diff --git a/.github/autolabeler.yml b/.github/autolabeler.yml deleted file mode 100644 index 3bca01f89950a..0000000000000 --- a/.github/autolabeler.yml +++ /dev/null @@ -1,133 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# Bot page: https://github.com/apps/probot-autolabeler -# The matching patterns follow the .gitignore spec. -# See: https://git-scm.com/docs/gitignore#_pattern_format -# Also, note that the plugin uses 'ignore' package. See also -# https://github.com/kaelzhang/node-ignore -INFRA: - - ".github/" - - "appveyor.yml" - - "/tools/" - - "/dev/create-release/" - - ".asf.yaml" - - ".gitattributes" - - ".gitignore" - - "/dev/github_jira_sync.py" - - "/dev/merge_spark_pr.py" - - "/dev/run-tests-jenkins*" -BUILD: - - "/dev/" - - "!/dev/github_jira_sync.py" - - "!/dev/merge_spark_pr.py" - - "!/dev/run-tests-jenkins*" - - "!/dev/.rat-excludes" - - "/build/" - - "/project/" - - "/assembly/" - - "*pom.xml" - - "/bin/docker-image-tool.sh" - - "/bin/find-spark-home*" - - "scalastyle-config.xml" -DOCS: - - "docs/" - - "/README.md" - - "/CONTRIBUTING.md" -EXAMPLES: - - "examples/" - - "/bin/run-example*" -CORE: - - "/core/" - - "!UI.scala" - - "!ui/" - - "/common/kvstore/" - - "/common/network-common/" - - "/common/network-shuffle/" - - "/python/pyspark/*.py" - - "/python/pyspark/tests/*.py" -SPARK SUBMIT: - - "/bin/spark-submit*" -SPARK SHELL: - - "/repl/" - - "/bin/spark-shell*" -SQL: - - "sql/" - - "/common/unsafe/" - - "!/python/pyspark/sql/avro/" - - "!/python/pyspark/sql/streaming.py" - - "!/python/pyspark/sql/tests/test_streaming.py" - - "/bin/spark-sql*" - - "/bin/beeline*" - - "/sbin/*thriftserver*.sh" - - "*SQL*.R" - - "DataFrame.R" - - "WindowSpec.R" - - "catalog.R" - - "column.R" - - "functions.R" - - "group.R" - - "schema.R" - - "types.R" -AVRO: - - "/external/avro/" - - "/python/pyspark/sql/avro/" -DSTREAM: - - "/streaming/" - - "/data/streaming/" - - "/external/flume*" - - "/external/kinesis*" - - "/external/kafka*" - - "/python/pyspark/streaming/" -GRAPHX: - - "/graphx/" - - "/data/graphx/" -ML: - - "ml/" - - "*mllib_*.R" -MLLIB: - - "spark/mllib/" - - "/mllib-local/" - - "/python/pyspark/mllib/" -STRUCTURED STREAMING: - - "sql/**/streaming/" - - "/external/kafka-0-10-sql/" - - "/python/pyspark/sql/streaming.py" - - "/python/pyspark/sql/tests/test_streaming.py" - - "*streaming.R" -PYTHON: - - "/bin/pyspark*" - - "python/" -R: - - "r/" - - "R/" - - "/bin/sparkR*" -YARN: - - "/resource-managers/yarn/" -MESOS: - - "/resource-managers/mesos/" - - "/sbin/*mesos*.sh" -KUBERNETES: - - "/resource-managers/kubernetes/" -WINDOWS: - - "*.cmd" - - "/R/pkg/tests/fulltests/test_Windows.R" -WEB UI: - - "ui/" - - "UI.scala" -DEPLOY: - - "/sbin/" diff --git a/.github/labeler.yml b/.github/labeler.yml new file mode 100644 index 0000000000000..bd61902925e33 --- /dev/null +++ b/.github/labeler.yml @@ -0,0 +1,152 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +# +# Pull Request Labeler Github Action Configuration: https://github.com/marketplace/actions/labeler +# +# Note that we currently cannot use the negatioon operator (i.e. `!`) for miniglob matches as they +# would match any file that doesn't touch them. What's needed is the concept of `any `, which takes a +# list of constraints / globs and then matches all of the constraints for either `any` of the files or +# `all` of the files in the change set. +# +# However, `any`/`all` are not supported in a released version and testing off of the `main` branch +# resulted in some other errors when testing. +# +# An issue has been opened upstream requesting that a release be cut that has support for all/any: +# - https://github.com/actions/labeler/issues/111 +# +# While we wait for this issue to be handled upstream, we can remove +# the negated / `!` matches for now and at least have labels again. +# +INFRA: + - ".github/**/*" + - "appveyor.yml" + - "tools/**/*" + - "dev/create-release/**/*" + - ".asf.yaml" + - ".gitattributes" + - ".gitignore" + - "dev/github_jira_sync.py" + - "dev/merge_spark_pr.py" + - "dev/run-tests-jenkins*" +BUILD: + # Can be supported when a stable release with correct all/any is released + #- any: ['dev/**/*', '!dev/github_jira_sync.py', '!dev/merge_spark_pr.py', '!dev/.rat-excludes'] + - "dev/**/*" + - "build/**/*" + - "project/**/*" + - "assembly/**/*" + - "**/*pom.xml" + - "bin/docker-image-tool.sh" + - "bin/find-spark-home*" + - "scalastyle-config.xml" + # These can be added in the above `any` clause (and the /dev/**/* glob removed) when + # `any`/`all` support is released + # - "!dev/github_jira_sync.py" + # - "!dev/merge_spark_pr.py" + # - "!dev/run-tests-jenkins*" + # - "!dev/.rat-excludes" +DOCS: + - "docs/**/*" + - "**/README.md" + - "**/CONTRIBUTING.md" +EXAMPLES: + - "examples/**/*" + - "bin/run-example*" +# CORE needs to be updated when all/any are released upstream. +CORE: + # - any: ["core/**/*", "!**/*UI.scala", "!**/ui/**/*"] # If any file matches all of the globs defined in the list started by `any`, label is applied. + - "core/**/*" + - "common/kvstore/**/*" + - "common/network-common/**/*" + - "common/network-shuffle/**/*" + - "python/pyspark/**/*.py" + - "python/pyspark/tests/**/*.py" +SPARK SUBMIT: + - "bin/spark-submit*" +SPARK SHELL: + - "repl/**/*" + - "bin/spark-shell*" +SQL: +#- any: ["**/sql/**/*", "!python/pyspark/sql/avro/**/*", "!python/pyspark/sql/streaming.py", "!python/pyspark/sql/tests/test_streaming.py"] + - "**/sql/**/*" + - "common/unsafe/**/*" + #- "!python/pyspark/sql/avro/**/*" + #- "!python/pyspark/sql/streaming.py" + #- "!python/pyspark/sql/tests/test_streaming.py" + - "bin/spark-sql*" + - "bin/beeline*" + - "sbin/*thriftserver*.sh" + - "**/*SQL*.R" + - "**/DataFrame.R" + - "**/*WindowSpec.R" + - "**/*catalog.R" + - "**/*column.R" + - "**/*functions.R" + - "**/*group.R" + - "**/*schema.R" + - "**/*types.R" +AVRO: + - "external/avro/**/*" + - "python/pyspark/sql/avro/**/*" +DSTREAM: + - "streaming/**/*" + - "data/streaming/**/*" + - "external/kinesis*" + - "external/kafka*" + - "python/pyspark/streaming/**/*" +GRAPHX: + - "graphx/**/*" + - "data/graphx/**/*" +ML: + - "**/ml/**/*" + - "**/*mllib_*.R" +MLLIB: + - "**/spark/mllib/**/*" + - "mllib-local/**/*" + - "python/pyspark/mllib/**/*" +STRUCTURED STREAMING: + - "**/sql/**/streaming/**/*" + - "external/kafka-0-10-sql/**/*" + - "python/pyspark/sql/streaming.py" + - "python/pyspark/sql/tests/test_streaming.py" + - "**/*streaming.R" +PYTHON: + - "bin/pyspark*" + - "**/python/**/*" +R: + - "**/r/**/*" + - "**/R/**/*" + - "bin/sparkR*" +YARN: + - "resource-managers/yarn/**/*" +MESOS: + - "resource-managers/mesos/**/*" + - "sbin/*mesos*.sh" +KUBERNETES: + - "resource-managers/kubernetes/**/*" +WINDOWS: + - "**/*.cmd" + - "R/pkg/tests/fulltests/test_Windows.R" +WEB UI: + - "**/ui/**/*" + - "**/*UI.scala" +DEPLOY: + - "sbin/**/*" + diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 1c0f50328ee72..2011104a19b8a 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -17,12 +17,13 @@ jobs: # Build: build Spark and run the tests for specified modules. build: name: "Build modules: ${{ matrix.modules }} ${{ matrix.comment }} (JDK ${{ matrix.java }}, ${{ matrix.hadoop }}, ${{ matrix.hive }})" - runs-on: ubuntu-latest + # Ubuntu 20.04 is the latest LTS. The next LTS is 22.04. + runs-on: ubuntu-20.04 strategy: fail-fast: false matrix: java: - - 1.8 + - 8 hadoop: - hadoop3.2 hive: @@ -41,12 +42,6 @@ jobs: streaming, sql-kafka-0-10, streaming-kafka-0-10, mllib-local, mllib, yarn, mesos, kubernetes, hadoop-cloud, spark-ganglia-lgpl - - >- - pyspark-sql, pyspark-mllib, pyspark-resource - - >- - pyspark-core, pyspark-streaming, pyspark-ml - - >- - sparkr # Here, we split Hive and SQL tests into some of slow ones and the rest of them. included-tags: [""] excluded-tags: [""] @@ -54,26 +49,26 @@ jobs: include: # Hive tests - modules: hive - java: 1.8 + java: 8 hadoop: hadoop3.2 hive: hive2.3 included-tags: org.apache.spark.tags.SlowHiveTest comment: "- slow tests" - modules: hive - java: 1.8 + java: 8 hadoop: hadoop3.2 hive: hive2.3 excluded-tags: org.apache.spark.tags.SlowHiveTest comment: "- other tests" # SQL tests - modules: sql - java: 1.8 + java: 8 hadoop: hadoop3.2 hive: hive2.3 included-tags: org.apache.spark.tags.ExtendedSQLTest comment: "- slow tests" - modules: sql - java: 1.8 + java: 8 hadoop: hadoop3.2 hive: hive2.3 excluded-tags: org.apache.spark.tags.ExtendedSQLTest @@ -106,90 +101,41 @@ jobs: build/zinc-* build/scala-* build/*.jar + ~/.sbt key: build-${{ hashFiles('**/pom.xml', 'project/build.properties', 'build/mvn', 'build/sbt', 'build/sbt-launch-lib.bash', 'build/spark-build-info') }} restore-keys: | build- - - name: Cache Maven local repository - uses: actions/cache@v2 - with: - path: ~/.m2/repository - key: ${{ matrix.java }}-${{ matrix.hadoop }}-maven-${{ hashFiles('**/pom.xml') }} - restore-keys: | - ${{ matrix.java }}-${{ matrix.hadoop }}-maven- - - name: Cache Ivy local repository + - name: Cache Coursier local repository uses: actions/cache@v2 with: - path: ~/.ivy2/cache - key: ${{ matrix.java }}-${{ matrix.hadoop }}-ivy-${{ hashFiles('**/pom.xml', '**/plugins.sbt') }} + path: ~/.cache/coursier + key: ${{ matrix.java }}-${{ matrix.hadoop }}-coursier-${{ hashFiles('**/pom.xml', '**/plugins.sbt') }} restore-keys: | - ${{ matrix.java }}-${{ matrix.hadoop }}-ivy- - - name: Install JDK ${{ matrix.java }} + ${{ matrix.java }}-${{ matrix.hadoop }}-coursier- + - name: Install Java ${{ matrix.java }} uses: actions/setup-java@v1 with: java-version: ${{ matrix.java }} - # PySpark - - name: Install PyPy3 - # Note that order of Python installations here matters because default python3 is - # overridden by pypy3. - uses: actions/setup-python@v2 - if: contains(matrix.modules, 'pyspark') - with: - python-version: pypy3 - architecture: x64 - - name: Install Python 3.6 - uses: actions/setup-python@v2 - if: contains(matrix.modules, 'pyspark') - with: - python-version: 3.6 - architecture: x64 - name: Install Python 3.8 uses: actions/setup-python@v2 # We should install one Python that is higher then 3+ for SQL and Yarn because: # - SQL component also has Python related tests, for example, IntegratedUDFTestUtils. # - Yarn has a Python specific test too, for example, YarnClusterSuite. - if: contains(matrix.modules, 'yarn') || contains(matrix.modules, 'pyspark') || (contains(matrix.modules, 'sql') && !contains(matrix.modules, 'sql-')) + if: contains(matrix.modules, 'yarn') || (contains(matrix.modules, 'sql') && !contains(matrix.modules, 'sql-')) with: python-version: 3.8 architecture: x64 - - name: Install Python packages (Python 3.6 and PyPy3) - if: contains(matrix.modules, 'pyspark') - # PyArrow is not supported in PyPy yet, see ARROW-2651. - # TODO(SPARK-32247): scipy installation with PyPy fails for an unknown reason. - run: | - python3.6 -m pip install numpy pyarrow pandas scipy xmlrunner - python3.6 -m pip list - # PyPy does not have xmlrunner - pypy3 -m pip install numpy pandas - pypy3 -m pip list - name: Install Python packages (Python 3.8) - if: contains(matrix.modules, 'pyspark') || (contains(matrix.modules, 'sql') && !contains(matrix.modules, 'sql-')) + if: (contains(matrix.modules, 'sql') && !contains(matrix.modules, 'sql-')) run: | - python3.8 -m pip install numpy pyarrow pandas scipy xmlrunner + python3.8 -m pip install numpy 'pyarrow<3.0.0' pandas scipy xmlrunner python3.8 -m pip list - # SparkR - - name: Install R 4.0 - if: contains(matrix.modules, 'sparkr') - run: | - sudo sh -c "echo 'deb https://cloud.r-project.org/bin/linux/ubuntu bionic-cran40/' >> /etc/apt/sources.list" - curl -sL "https://keyserver.ubuntu.com/pks/lookup?op=get&search=0xE298A3A825C0D65DFD57CBB651716619E084DAB9" | sudo apt-key add - sudo apt-get update - sudo apt-get install -y r-base r-base-dev libcurl4-openssl-dev - - name: Install R packages - if: contains(matrix.modules, 'sparkr') - run: | - # qpdf is required to reduce the size of PDFs to make CRAN check pass. See SPARK-32497. - sudo apt-get install -y libcurl4-openssl-dev qpdf - sudo Rscript -e "install.packages(c('knitr', 'rmarkdown', 'testthat', 'devtools', 'e1071', 'survival', 'arrow', 'roxygen2'), repos='https://cloud.r-project.org/')" - # Show installed packages in R. - sudo Rscript -e 'pkg_list <- as.data.frame(installed.packages()[, c(1,3:4)]); pkg_list[is.na(pkg_list$Priority), 1:2, drop = FALSE]' # Run the tests. - name: Run tests run: | - # Hive tests become flaky when running in parallel as it's too intensive. - if [[ "$MODULES_TO_TEST" == "hive" ]]; then export SERIAL_SBT_TESTS=1; fi - mkdir -p ~/.m2 + # Hive and SQL tests become flaky when running in parallel as it's too intensive. + if [[ "$MODULES_TO_TEST" == "hive" ]] || [[ "$MODULES_TO_TEST" == "sql" ]]; then export SERIAL_SBT_TESTS=1; fi ./dev/run-tests --parallelism 2 --modules "$MODULES_TO_TEST" --included-tags "$INCLUDED_TAGS" --excluded-tags "$EXCLUDED_TAGS" - rm -rf ~/.m2/repository/org/apache/spark - name: Upload test results to report if: always() uses: actions/upload-artifact@v2 @@ -203,24 +149,174 @@ jobs: name: unit-tests-log-${{ matrix.modules }}-${{ matrix.comment }}-${{ matrix.java }}-${{ matrix.hadoop }}-${{ matrix.hive }} path: "**/target/unit-tests.log" + pyspark: + name: "Build modules: ${{ matrix.modules }}" + runs-on: ubuntu-20.04 + container: + image: dongjoon/apache-spark-github-action-image:20201025 + strategy: + fail-fast: false + matrix: + modules: + - >- + pyspark-sql, pyspark-mllib, pyspark-resource + - >- + pyspark-core, pyspark-streaming, pyspark-ml + env: + MODULES_TO_TEST: ${{ matrix.modules }} + HADOOP_PROFILE: hadoop3.2 + HIVE_PROFILE: hive2.3 + # GitHub Actions' default miniconda to use in pip packaging test. + CONDA_PREFIX: /usr/share/miniconda + GITHUB_PREV_SHA: ${{ github.event.before }} + GITHUB_INPUT_BRANCH: ${{ github.event.inputs.target }} + steps: + - name: Checkout Spark repository + uses: actions/checkout@v2 + # In order to fetch changed files + with: + fetch-depth: 0 + - name: Merge dispatched input branch + if: ${{ github.event.inputs.target != '' }} + run: git merge --progress --ff-only origin/${{ github.event.inputs.target }} + # Cache local repositories. Note that GitHub Actions cache has a 2G limit. + - name: Cache Scala, SBT, Maven and Zinc + uses: actions/cache@v2 + with: + path: | + build/apache-maven-* + build/zinc-* + build/scala-* + build/*.jar + ~/.sbt + key: build-${{ hashFiles('**/pom.xml', 'project/build.properties', 'build/mvn', 'build/sbt', 'build/sbt-launch-lib.bash', 'build/spark-build-info') }} + restore-keys: | + build- + - name: Cache Coursier local repository + uses: actions/cache@v2 + with: + path: ~/.cache/coursier + key: pyspark-coursier-${{ hashFiles('**/pom.xml', '**/plugins.sbt') }} + restore-keys: | + pyspark-coursier- + - name: Install Python 3.6 + uses: actions/setup-python@v2 + with: + python-version: 3.6 + architecture: x64 + # This step takes much less time (~30s) than other Python versions so it is not included + # in the Docker image being used. There is also a technical issue to install Python 3.6 on + # Ubuntu 20.04. See also SPARK-33162. + - name: Install Python packages (Python 3.6) + run: | + python3.6 -m pip install numpy 'pyarrow<3.0.0' pandas scipy xmlrunner + python3.6 -m pip list + # Run the tests. + - name: Run tests + run: | + ./dev/run-tests --parallelism 2 --modules "$MODULES_TO_TEST" + - name: Upload test results to report + if: always() + uses: actions/upload-artifact@v2 + with: + name: test-results-${{ matrix.modules }}--8-hadoop3.2-hive2.3 + path: "**/target/test-reports/*.xml" + - name: Upload unit tests log files + if: failure() + uses: actions/upload-artifact@v2 + with: + name: unit-tests-log-${{ matrix.modules }}--8-hadoop3.2-hive2.3 + path: "**/target/unit-tests.log" + + sparkr: + name: "Build modules: sparkr" + runs-on: ubuntu-20.04 + container: + image: dongjoon/apache-spark-github-action-image:20201025 + env: + HADOOP_PROFILE: hadoop3.2 + HIVE_PROFILE: hive2.3 + GITHUB_PREV_SHA: ${{ github.event.before }} + GITHUB_INPUT_BRANCH: ${{ github.event.inputs.target }} + steps: + - name: Checkout Spark repository + uses: actions/checkout@v2 + # In order to fetch changed files + with: + fetch-depth: 0 + - name: Merge dispatched input branch + if: ${{ github.event.inputs.target != '' }} + run: git merge --progress --ff-only origin/${{ github.event.inputs.target }} + # Cache local repositories. Note that GitHub Actions cache has a 2G limit. + - name: Cache Scala, SBT, Maven and Zinc + uses: actions/cache@v2 + with: + path: | + build/apache-maven-* + build/zinc-* + build/scala-* + build/*.jar + ~/.sbt + key: build-${{ hashFiles('**/pom.xml', 'project/build.properties', 'build/mvn', 'build/sbt', 'build/sbt-launch-lib.bash', 'build/spark-build-info') }} + restore-keys: | + build- + - name: Cache Coursier local repository + uses: actions/cache@v2 + with: + path: ~/.cache/coursier + key: sparkr-coursier-${{ hashFiles('**/pom.xml', '**/plugins.sbt') }} + restore-keys: | + sparkr-coursier- + - name: Run tests + run: | + # The followings are also used by `r-lib/actions/setup-r` to avoid + # R issues at docker environment + export TZ=UTC + export _R_CHECK_SYSTEM_CLOCK_=FALSE + ./dev/run-tests --parallelism 2 --modules sparkr + - name: Upload test results to report + if: always() + uses: actions/upload-artifact@v2 + with: + name: test-results-sparkr--8-hadoop3.2-hive2.3 + path: "**/target/test-reports/*.xml" + # Static analysis, and documentation build lint: name: Linters, licenses, dependencies and documentation generation - runs-on: ubuntu-latest + runs-on: ubuntu-20.04 + container: + image: dongjoon/apache-spark-github-action-image:20201025 steps: - name: Checkout Spark repository uses: actions/checkout@v2 + # Cache local repositories. Note that GitHub Actions cache has a 2G limit. + - name: Cache Scala, SBT, Maven and Zinc + uses: actions/cache@v2 + with: + path: | + build/apache-maven-* + build/zinc-* + build/scala-* + build/*.jar + ~/.sbt + key: build-${{ hashFiles('**/pom.xml', 'project/build.properties', 'build/mvn', 'build/sbt', 'build/sbt-launch-lib.bash', 'build/spark-build-info') }} + restore-keys: | + build- + - name: Cache Coursier local repository + uses: actions/cache@v2 + with: + path: ~/.cache/coursier + key: docs-coursier-${{ hashFiles('**/pom.xml', '**/plugins.sbt') }} + restore-keys: | + docs-coursier- - name: Cache Maven local repository uses: actions/cache@v2 with: path: ~/.m2/repository - key: docs-maven-repo-${{ hashFiles('**/pom.xml') }} + key: docs-maven-${{ hashFiles('**/pom.xml') }} restore-keys: | docs-maven- - - name: Install JDK 1.8 - uses: actions/setup-java@v1 - with: - java-version: 1.8 - name: Install Python 3.6 uses: actions/setup-python@v2 with: @@ -230,32 +326,24 @@ jobs: run: | # TODO(SPARK-32407): Sphinx 3.1+ does not correctly index nested classes. # See also https://github.com/sphinx-doc/sphinx/issues/7551. - pip3 install flake8 'sphinx<3.1.0' numpy pydata_sphinx_theme ipython nbsphinx - - name: Install R 4.0 - run: | - sudo sh -c "echo 'deb https://cloud.r-project.org/bin/linux/ubuntu bionic-cran40/' >> /etc/apt/sources.list" - curl -sL "https://keyserver.ubuntu.com/pks/lookup?op=get&search=0xE298A3A825C0D65DFD57CBB651716619E084DAB9" | sudo apt-key add - sudo apt-get update - sudo apt-get install -y r-base r-base-dev libcurl4-openssl-dev + python3.6 -m pip install flake8 'sphinx<3.1.0' numpy pydata_sphinx_theme ipython nbsphinx mypy numpydoc - name: Install R linter dependencies and SparkR run: | - sudo apt-get install -y libcurl4-openssl-dev - sudo Rscript -e "install.packages(c('devtools'), repos='https://cloud.r-project.org/')" - sudo Rscript -e "devtools::install_github('jimhester/lintr@v2.0.0')" + apt-get install -y libcurl4-openssl-dev libgit2-dev libssl-dev libxml2-dev + Rscript -e "install.packages(c('devtools'), repos='https://cloud.r-project.org/')" + Rscript -e "devtools::install_github('jimhester/lintr@v2.0.0')" ./R/install-dev.sh - - name: Install Ruby 2.7 for documentation generation - uses: actions/setup-ruby@v1 - with: - ruby-version: 2.7 - name: Install dependencies for documentation generation run: | # pandoc is required to generate PySpark APIs as well in nbsphinx. - sudo apt-get install -y libcurl4-openssl-dev pandoc + apt-get install -y libcurl4-openssl-dev pandoc # TODO(SPARK-32407): Sphinx 3.1+ does not correctly index nested classes. # See also https://github.com/sphinx-doc/sphinx/issues/7551. - pip install 'sphinx<3.1.0' mkdocs numpy pydata_sphinx_theme ipython nbsphinx + python3.6 -m pip install 'sphinx<3.1.0' mkdocs numpy pydata_sphinx_theme ipython nbsphinx numpydoc + apt-get update -y + apt-get install -y ruby ruby-dev gem install jekyll jekyll-redirect-from rouge - sudo Rscript -e "install.packages(c('devtools', 'testthat', 'knitr', 'rmarkdown', 'roxygen2'), repos='https://cloud.r-project.org/')" + Rscript -e "install.packages(c('devtools', 'testthat', 'knitr', 'rmarkdown', 'roxygen2'), repos='https://cloud.r-project.org/')" - name: Scala linter run: ./dev/lint-scala - name: Java linter @@ -271,11 +359,13 @@ jobs: - name: Run documentation build run: | cd docs + export LC_ALL=C.UTF-8 + export LANG=C.UTF-8 jekyll build - java11: - name: Java 11 build - runs-on: ubuntu-latest + java-11: + name: Java 11 build with Maven + runs-on: ubuntu-20.04 steps: - name: Checkout Spark repository uses: actions/checkout@v2 @@ -294,6 +384,49 @@ jobs: run: | export MAVEN_OPTS="-Xmx2g -XX:ReservedCodeCacheSize=1g -Dorg.slf4j.simpleLogger.defaultLogLevel=WARN" export MAVEN_CLI_OPTS="--no-transfer-progress" - mkdir -p ~/.m2 + # It uses Maven's 'install' intentionally, see https://github.com/apache/spark/pull/26414. ./build/mvn $MAVEN_CLI_OPTS -DskipTests -Pyarn -Pmesos -Pkubernetes -Phive -Phive-thriftserver -Phadoop-cloud -Djava.version=11 install rm -rf ~/.m2/repository/org/apache/spark + + scala-213: + name: Scala 2.13 build with SBT + runs-on: ubuntu-20.04 + steps: + - name: Checkout Spark repository + uses: actions/checkout@v2 + - name: Cache Coursier local repository + uses: actions/cache@v2 + with: + path: ~/.cache/coursier + key: scala-213-coursier-${{ hashFiles('**/pom.xml', '**/plugins.sbt') }} + restore-keys: | + scala-213-coursier- + - name: Install Java 8 + uses: actions/setup-java@v1 + with: + java-version: 8 + - name: Build with SBT + run: | + ./dev/change-scala-version.sh 2.13 + ./build/sbt -Pyarn -Pmesos -Pkubernetes -Phive -Phive-thriftserver -Phadoop-cloud -Pkinesis-asl -Pdocker-integration-tests -Pkubernetes-integration-tests -Pspark-ganglia-lgpl -Pscala-2.13 compile test:compile + + hadoop-2: + name: Hadoop 2 build with SBT + runs-on: ubuntu-20.04 + steps: + - name: Checkout Spark repository + uses: actions/checkout@v2 + - name: Cache Coursier local repository + uses: actions/cache@v2 + with: + path: ~/.cache/coursier + key: hadoop-2-coursier-${{ hashFiles('**/pom.xml', '**/plugins.sbt') }} + restore-keys: | + hadoop-2-coursier- + - name: Install Java 8 + uses: actions/setup-java@v1 + with: + java-version: 8 + - name: Build with SBT + run: | + ./build/sbt -Pyarn -Pmesos -Pkubernetes -Phive -Phive-thriftserver -Phadoop-cloud -Pkinesis-asl -Phadoop-2.7 compile test:compile diff --git a/.github/workflows/labeler.yml b/.github/workflows/labeler.yml new file mode 100644 index 0000000000000..a1a5ab5b70f5b --- /dev/null +++ b/.github/workflows/labeler.yml @@ -0,0 +1,43 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +name: "Pull Request Labeler" +on: pull_request_target + +jobs: + label: + runs-on: ubuntu-latest + steps: + # In order to get back the negated matches like in the old config, + # we need the actinons/labeler concept of `all` and `any` which matches + # all of the given constraints / glob patterns for either `all` + # files or `any` file in the change set. + # + # Github issue which requests a timeline for a release with any/all support: + # - https://github.com/actions/labeler/issues/111 + # This issue also references the issue that mentioned that any/all are only + # supported on main branch (previously called master): + # - https://github.com/actions/labeler/issues/73#issuecomment-639034278 + # + # However, these are not in a published release and the current `main` branch + # has some issues upon testing. + - uses: actions/labeler@2.2.0 + with: + repo-token: "${{ secrets.GITHUB_TOKEN }}" + sync-labels: true diff --git a/.github/workflows/publish_snapshot.yml b/.github/workflows/publish_snapshot.yml new file mode 100644 index 0000000000000..c5dbc8d057964 --- /dev/null +++ b/.github/workflows/publish_snapshot.yml @@ -0,0 +1,40 @@ +name: Publish Snapshot + +on: + schedule: + - cron: '0 0 * * *' + +jobs: + publish-snapshot: + if: github.repository == 'apache/spark' + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + branch: + - master + - branch-3.1 + steps: + - name: Checkout Spark repository + uses: actions/checkout@master + with: + ref: ${{ matrix.branch }} + - name: Cache Maven local repository + uses: actions/cache@v2 + with: + path: ~/.m2/repository + key: snapshot-maven-${{ hashFiles('**/pom.xml') }} + restore-keys: | + snapshot-maven- + - name: Install Java 8 + uses: actions/setup-java@v1 + with: + java-version: 8 + - name: Publish snapshot + env: + ASF_USERNAME: ${{ secrets.NEXUS_USER }} + ASF_PASSWORD: ${{ secrets.NEXUS_PW }} + GPG_KEY: "not_used" + GPG_PASSPHRASE: "not_used" + GIT_REF: ${{ matrix.branch }} + run: ./dev/create-release/release-build.sh publish-snapshot diff --git a/.gitignore b/.gitignore index 2e4f77ad6fb42..9c145fba1bee9 100644 --- a/.gitignore +++ b/.gitignore @@ -8,6 +8,7 @@ *.swp *~ .DS_Store +.bsp/ .cache .classpath .ensime @@ -68,6 +69,7 @@ python/docs/source/reference/api/ python/test_coverage/coverage_data python/test_coverage/htmlcov python/pyspark/python +.mypy_cache/ reports/ scalastyle-on-compile.generated.xml scalastyle-output.xml diff --git a/.sbtopts b/.sbtopts new file mode 100644 index 0000000000000..9afbdca6db1c7 --- /dev/null +++ b/.sbtopts @@ -0,0 +1,17 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +-J-Xmx4G +-J-Xss4m diff --git a/LICENSE-binary b/LICENSE-binary index d363661b1cc7e..2a5434e14a3f5 100644 --- a/LICENSE-binary +++ b/LICENSE-binary @@ -521,7 +521,6 @@ Common Development and Distribution License (CDDL) 1.1 ------------------------------------------------------ javax.el:javax.el-api https://javaee.github.io/uel-ri/ -javax.servlet:javax.servlet-api https://javaee.github.io/servlet-spec/ javax.servlet.jsp:jsp-api javax.transaction:jta http://www.oracle.com/technetwork/java/index.html javax.xml.bind:jaxb-api https://github.com/javaee/jaxb-v2 @@ -553,6 +552,7 @@ Eclipse Public License (EPL) 2.0 -------------------------------- jakarta.annotation:jakarta-annotation-api https://projects.eclipse.org/projects/ee4j.ca +jakarta.servlet:jakarta.servlet-api https://projects.eclipse.org/projects/ee4j.servlet jakarta.ws.rs:jakarta.ws.rs-api https://github.com/eclipse-ee4j/jaxrs-api org.glassfish.hk2.external:jakarta.inject diff --git a/R/CRAN_RELEASE.md b/R/CRAN_RELEASE.md index 4d9b6416c01cb..2f410cf8bfd94 100644 --- a/R/CRAN_RELEASE.md +++ b/R/CRAN_RELEASE.md @@ -25,7 +25,7 @@ To release SparkR as a package to CRAN, we would use the `devtools` package. Ple First, check that the `Version:` field in the `pkg/DESCRIPTION` file is updated. Also, check for stale files not under source control. -Note that while `run-tests.sh` runs `check-cran.sh` (which runs `R CMD check`), it is doing so with `--no-manual --no-vignettes`, which skips a few vignettes or PDF checks - therefore it will be preferred to run `R CMD check` on the source package built manually before uploading a release. Also note that for CRAN checks for pdf vignettes to success, `qpdf` tool must be there (to install it, eg. `yum -q -y install qpdf`). +Note that while `run-tests.sh` runs `check-cran.sh` (which runs `R CMD check`), it is doing so with `--no-manual --no-vignettes`, which skips a few vignettes or PDF checks - therefore it will be preferred to run `R CMD check` on the source package built manually before uploading a release. Also note that for CRAN checks for pdf vignettes to success, `qpdf` tool must be there (to install it, e.g. `yum -q -y install qpdf`). To upload a release, we would need to update the `cran-comments.md`. This should generally contain the results from running the `check-cran.sh` script along with comments on status of all `WARNING` (should not be any) or `NOTE`. As a part of `check-cran.sh` and the release process, the vignettes is build - make sure `SPARK_HOME` is set and Spark jars are accessible. diff --git a/R/install-dev.bat b/R/install-dev.bat index c570d93049a14..ae5aa589a19d1 100644 --- a/R/install-dev.bat +++ b/R/install-dev.bat @@ -26,7 +26,7 @@ MKDIR %SPARK_HOME%\R\lib rem When you pass the package path directly as an argument to R CMD INSTALL, rem it takes the path as 'C:\projects\spark\R\..\R\pkg"' as an example at -rem R 4.0. To work around this, directly go to the directoy and install it. +rem R 4.0. To work around this, directly go to the directory and install it. rem See also SPARK-32074 pushd %SPARK_HOME%\R\pkg\ R.exe CMD INSTALL --library="%SPARK_HOME%\R\lib" . diff --git a/R/pkg/DESCRIPTION b/R/pkg/DESCRIPTION index 2047f0d75ca18..c141baa51b8cb 100644 --- a/R/pkg/DESCRIPTION +++ b/R/pkg/DESCRIPTION @@ -1,6 +1,6 @@ Package: SparkR Type: Package -Version: 3.1.0 +Version: 3.2.0 Title: R Front End for 'Apache Spark' Description: Provides an R Front end for 'Apache Spark' . Authors@R: c(person("Shivaram", "Venkataraman", role = c("aut", "cre"), @@ -11,7 +11,7 @@ Authors@R: c(person("Shivaram", "Venkataraman", role = c("aut", "cre"), email = "felixcheung@apache.org"), person(family = "The Apache Software Foundation", role = c("aut", "cph"))) License: Apache License (== 2.0) -URL: https://www.apache.org/ https://spark.apache.org/ +URL: https://www.apache.org https://spark.apache.org BugReports: https://spark.apache.org/contributing.html SystemRequirements: Java (>= 8, < 12) Depends: diff --git a/R/pkg/NAMESPACE b/R/pkg/NAMESPACE index 53a0b7856567e..6ef2df5731e10 100644 --- a/R/pkg/NAMESPACE +++ b/R/pkg/NAMESPACE @@ -202,6 +202,7 @@ exportMethods("%<=>%", "%in%", "abs", "acos", + "acosh", "add_months", "alias", "approx_count_distinct", @@ -222,15 +223,21 @@ exportMethods("%<=>%", "array_remove", "array_repeat", "array_sort", + "array_to_vector", "array_transform", "arrays_overlap", "array_union", "arrays_zip", "arrays_zip_with", "asc", + "asc_nulls_first", + "asc_nulls_last", "ascii", "asin", + "asinh", + "assert_true", "atan", + "atanh", "atan2", "avg", "base64", @@ -272,6 +279,9 @@ exportMethods("%<=>%", "degrees", "dense_rank", "desc", + "desc_nulls_first", + "desc_nulls_last", + "dropFields", "element_at", "encode", "endsWith", @@ -286,6 +296,7 @@ exportMethods("%<=>%", "floor", "format_number", "format_string", + "from_avro", "from_csv", "from_json", "from_unixtime", @@ -348,6 +359,7 @@ exportMethods("%<=>%", "negate", "next_day", "not", + "nth_value", "ntile", "otherwise", "over", @@ -359,6 +371,7 @@ exportMethods("%<=>%", "posexplode_outer", "quarter", "radians", + "raise_error", "rand", "randn", "rank", @@ -405,8 +418,10 @@ exportMethods("%<=>%", "sumDistinct", "tan", "tanh", + "timestamp_seconds", "toDegrees", "toRadians", + "to_avro", "to_csv", "to_date", "to_json", @@ -425,9 +440,11 @@ exportMethods("%<=>%", "variance", "var_pop", "var_samp", + "vector_to_array", "weekofyear", "when", "window", + "withField", "xxhash64", "year") diff --git a/R/pkg/R/DataFrame.R b/R/pkg/R/DataFrame.R index 089e1f26b7d3b..72d96151f6371 100644 --- a/R/pkg/R/DataFrame.R +++ b/R/pkg/R/DataFrame.R @@ -880,7 +880,7 @@ setMethod("toJSON", #' Save the contents of SparkDataFrame as a JSON file #' -#' Save the contents of a SparkDataFrame as a JSON file (\href{http://jsonlines.org/}{ +#' Save the contents of a SparkDataFrame as a JSON file (\href{https://jsonlines.org/}{ #' JSON Lines text format or newline-delimited JSON}). Files written out #' with this method can be read back in as a SparkDataFrame using read.json(). #' @@ -2277,16 +2277,17 @@ setMethod("mutate", # For named arguments, use the names for arguments as the column names # For unnamed arguments, use the argument symbols as the column names - args <- sapply(substitute(list(...))[-1], deparse) ns <- names(cols) - if (!is.null(ns)) { - lapply(seq_along(args), function(i) { - if (ns[[i]] != "") { - args[[i]] <<- ns[[i]] - } + if (is.null(ns)) ns <- rep("", length(cols)) + named_idx <- nzchar(ns) + if (!all(named_idx)) { + # SPARK-31517: deparse uses width.cutoff on wide input and the + # output is length>1, so need to collapse it to scalar + colsub <- substitute(list(...))[-1L] + ns[!named_idx] <- sapply(which(!named_idx), function(ii) { + paste(gsub("^\\s*|\\s*$", "", deparse(colsub[[ii]])), collapse = " ") }) } - ns <- args # The last column of the same name in the specific columns takes effect deDupCols <- list() @@ -2772,7 +2773,7 @@ setMethod("merge", #' Creates a list of columns by replacing the intersected ones with aliases #' #' Creates a list of columns by replacing the intersected ones with aliases. -#' The name of the alias column is formed by concatanating the original column name and a suffix. +#' The name of the alias column is formed by concatenating the original column name and a suffix. #' #' @param x a SparkDataFrame #' @param intersectedColNames a list of intersected column names of the SparkDataFrame @@ -2863,11 +2864,18 @@ setMethod("unionAll", #' \code{UNION ALL} and \code{UNION DISTINCT} in SQL as column positions are not taken #' into account. Input SparkDataFrames can have different data types in the schema. #' +#' When the parameter allowMissingColumns is `TRUE`, the set of column names +#' in x and y can differ; missing columns will be filled as null. +#' Further, the missing columns of x will be added at the end +#' in the schema of the union result. +#' #' Note: This does not remove duplicate rows across the two SparkDataFrames. #' This function resolves columns by name (not by position). #' #' @param x A SparkDataFrame #' @param y A SparkDataFrame +#' @param allowMissingColumns logical +#' @param ... further arguments to be passed to or from other methods. #' @return A SparkDataFrame containing the result of the union. #' @family SparkDataFrame functions #' @rdname unionByName @@ -2880,12 +2888,15 @@ setMethod("unionAll", #' df1 <- select(createDataFrame(mtcars), "carb", "am", "gear") #' df2 <- select(createDataFrame(mtcars), "am", "gear", "carb") #' head(unionByName(df1, df2)) +#' +#' df3 <- select(createDataFrame(mtcars), "carb") +#' head(unionByName(df1, df3, allowMissingColumns = TRUE)) #' } #' @note unionByName since 2.3.0 setMethod("unionByName", signature(x = "SparkDataFrame", y = "SparkDataFrame"), - function(x, y) { - unioned <- callJMethod(x@sdf, "unionByName", y@sdf) + function(x, y, allowMissingColumns=FALSE) { + unioned <- callJMethod(x@sdf, "unionByName", y@sdf, allowMissingColumns) dataFrame(unioned) }) @@ -3221,7 +3232,7 @@ setMethod("describe", #' \item stddev #' \item min #' \item max -#' \item arbitrary approximate percentiles specified as a percentage (eg, "75\%") +#' \item arbitrary approximate percentiles specified as a percentage (e.g., "75\%") #' } #' If no statistics are given, this function computes count, mean, stddev, min, #' approximate quartiles (percentiles at 25\%, 50\%, and 75\%), and max. @@ -3434,7 +3445,8 @@ setMethod("as.data.frame", #' @note attach since 1.6.0 setMethod("attach", signature(what = "SparkDataFrame"), - function(what, pos = 2L, name = deparse(substitute(what), backtick = FALSE), + function(what, pos = 2L, + name = paste(deparse(substitute(what), backtick = FALSE), collapse = " "), warn.conflicts = TRUE) { args <- as.list(environment()) # capture all parameters - this must be the first line newEnv <- assignNewEnv(args$what) @@ -3733,7 +3745,7 @@ setMethod("histogram", #' #' @param x a SparkDataFrame. #' @param url JDBC database url of the form \code{jdbc:subprotocol:subname}. -#' @param tableName yhe name of the table in the external database. +#' @param tableName the name of the table in the external database. #' @param mode one of 'append', 'overwrite', 'error', 'errorifexists', 'ignore' #' save mode (it is 'error' by default) #' @param ... additional JDBC database connection properties. diff --git a/R/pkg/R/RDD.R b/R/pkg/R/RDD.R index 7a1d157bb8a36..408a3ff25b2b2 100644 --- a/R/pkg/R/RDD.R +++ b/R/pkg/R/RDD.R @@ -970,7 +970,7 @@ setMethod("takeSample", signature(x = "RDD", withReplacement = "logical", MAXINT))))) # If the first sample didn't turn out large enough, keep trying to # take samples; this shouldn't happen often because we use a big - # multiplier for thei initial size + # multiplier for the initial size while (length(samples) < total) samples <- collectRDD(sampleRDD(x, withReplacement, fraction, as.integer(ceiling(stats::runif(1, @@ -1512,7 +1512,7 @@ setMethod("glom", #' #' @param x An RDD. #' @param y An RDD. -#' @return a new RDD created by performing the simple union (witout removing +#' @return a new RDD created by performing the simple union (without removing #' duplicates) of two input RDDs. #' @examples #'\dontrun{ diff --git a/R/pkg/R/SQLContext.R b/R/pkg/R/SQLContext.R index c0ac68332ec41..14262e1a74ab0 100644 --- a/R/pkg/R/SQLContext.R +++ b/R/pkg/R/SQLContext.R @@ -203,7 +203,7 @@ getSchema <- function(schema, firstRow = NULL, rdd = NULL) { }) } - # SPAKR-SQL does not support '.' in column name, so replace it with '_' + # SPARK-SQL does not support '.' in column name, so replace it with '_' # TODO(davies): remove this once SPARK-2775 is fixed names <- lapply(names, function(n) { nn <- gsub(".", "_", n, fixed = TRUE) @@ -374,7 +374,7 @@ setMethod("toDF", signature(x = "RDD"), #' Create a SparkDataFrame from a JSON file. #' #' Loads a JSON file, returning the result as a SparkDataFrame -#' By default, (\href{http://jsonlines.org/}{JSON Lines text format or newline-delimited JSON} +#' By default, (\href{https://jsonlines.org/}{JSON Lines text format or newline-delimited JSON} #' ) is supported. For JSON (one record per file), set a named property \code{multiLine} to #' \code{TRUE}. #' It goes through the entire dataset once to determine the schema. diff --git a/R/pkg/R/WindowSpec.R b/R/pkg/R/WindowSpec.R index 037809cd0923e..be47d0117ed7f 100644 --- a/R/pkg/R/WindowSpec.R +++ b/R/pkg/R/WindowSpec.R @@ -54,7 +54,7 @@ setMethod("show", "WindowSpec", #' Defines the partitioning columns in a WindowSpec. #' #' @param x a WindowSpec. -#' @param col a column to partition on (desribed by the name or Column). +#' @param col a column to partition on (described by the name or Column). #' @param ... additional column(s) to partition on. #' @return A WindowSpec. #' @rdname partitionBy @@ -231,7 +231,7 @@ setMethod("rangeBetween", #' @rdname over #' @name over #' @aliases over,Column,WindowSpec-method -#' @family colum_func +#' @family column_func #' @examples #' \dontrun{ #' df <- createDataFrame(mtcars) diff --git a/R/pkg/R/column.R b/R/pkg/R/column.R index 7926a9a2467ee..9fa117ccb6281 100644 --- a/R/pkg/R/column.R +++ b/R/pkg/R/column.R @@ -67,7 +67,11 @@ operators <- list( # we can not override `&&` and `||`, so use `&` and `|` instead "&" = "and", "|" = "or", "^" = "pow" ) -column_functions1 <- c("asc", "desc", "isNaN", "isNull", "isNotNull") +column_functions1 <- c( + "asc", "asc_nulls_first", "asc_nulls_last", + "desc", "desc_nulls_first", "desc_nulls_last", + "isNaN", "isNull", "isNotNull" +) column_functions2 <- c("like", "rlike", "getField", "getItem", "contains") createOperator <- function(op) { @@ -131,7 +135,7 @@ createMethods() #' @rdname alias #' @name alias #' @aliases alias,Column-method -#' @family colum_func +#' @family column_func #' @examples #' \dontrun{ #' df <- createDataFrame(iris) @@ -157,7 +161,7 @@ setMethod("alias", #' #' @rdname substr #' @name substr -#' @family colum_func +#' @family column_func #' @aliases substr,Column-method #' #' @param x a Column. @@ -183,7 +187,7 @@ setMethod("substr", signature(x = "Column"), #' #' @rdname startsWith #' @name startsWith -#' @family colum_func +#' @family column_func #' @aliases startsWith,Column-method #' #' @param x vector of character string whose "starts" are considered @@ -202,7 +206,7 @@ setMethod("startsWith", signature(x = "Column"), #' #' @rdname endsWith #' @name endsWith -#' @family colum_func +#' @family column_func #' @aliases endsWith,Column-method #' #' @param x vector of character string whose "ends" are considered @@ -220,7 +224,7 @@ setMethod("endsWith", signature(x = "Column"), #' #' @rdname between #' @name between -#' @family colum_func +#' @family column_func #' @aliases between,Column-method #' #' @param x a Column @@ -247,7 +251,7 @@ setMethod("between", signature(x = "Column"), # nolint end #' @rdname cast #' @name cast -#' @family colum_func +#' @family column_func #' @aliases cast,Column-method #' #' @examples @@ -296,7 +300,7 @@ setMethod("%in%", #' Can be a single value or a Column. #' @rdname otherwise #' @name otherwise -#' @family colum_func +#' @family column_func #' @aliases otherwise,Column-method #' @note otherwise since 1.5.0 setMethod("otherwise", @@ -356,3 +360,103 @@ setMethod("%<=>%", #' } #' @note ! since 2.3.0 setMethod("!", signature(x = "Column"), function(x) not(x)) + +#' withField +#' +#' Adds/replaces field in a struct \code{Column} by name. +#' +#' @param x a Column +#' @param fieldName a character +#' @param col a Column expression +#' +#' @rdname withField +#' @aliases withField withField,Column-method +#' @examples +#' \dontrun{ +#' df <- withColumn( +#' createDataFrame(iris), +#' "sepal", +#' struct(column("Sepal_Width"), column("Sepal_Length")) +#' ) +#' +#' head(select( +#' df, +#' withField(df$sepal, "product", df$Sepal_Length * df$Sepal_Width) +#' )) +#' } +#' @note withField since 3.1.0 +setMethod("withField", + signature(x = "Column", fieldName = "character", col = "Column"), + function(x, fieldName, col) { + jc <- callJMethod(x@jc, "withField", fieldName, col@jc) + column(jc) + }) + +#' dropFields +#' +#' Drops fields in a struct \code{Column} by name. +#' +#' @param x a Column +#' @param ... names of the fields to be dropped. +#' +#' @rdname dropFields +#' @aliases dropFields dropFields,Column-method +#' @examples +#' \dontrun{ +#' df <- select( +#' createDataFrame(iris), +#' alias( +#' struct( +#' column("Sepal_Width"), column("Sepal_Length"), +#' alias( +#' struct( +#' column("Petal_Width"), column("Petal_Length"), +#' alias( +#' column("Petal_Width") * column("Petal_Length"), +#' "Petal_Product" +#' ) +#' ), +#' "Petal" +#' ) +#' ), +#' "dimensions" +#' ) +#' ) +#' head(withColumn(df, "dimensions", dropFields(df$dimensions, "Petal"))) +#' +#' head( +#' withColumn( +#' df, "dimensions", +#' dropFields(df$dimensions, "Sepal_Width", "Sepal_Length") +#' ) +#' ) +#' +#' # This method supports dropping multiple nested fields directly e.g. +#' head( +#' withColumn( +#' df, "dimensions", +#' dropFields(df$dimensions, "Petal.Petal_Width", "Petal.Petal_Length") +#' ) +#' ) +#' +#' # However, if you are going to add/replace multiple nested fields, +#' # it is preferred to extract out the nested struct before +#' # adding/replacing multiple fields e.g. +#' head( +#' withColumn( +#' df, "dimensions", +#' withField( +#' column("dimensions"), +#' "Petal", +#' dropFields(column("dimensions.Petal"), "Petal_Width", "Petal_Length") +#' ) +#' ) +#' ) +#' } +#' @note dropFields since 3.1.0 +setMethod("dropFields", + signature(x = "Column"), + function(x, ...) { + jc <- callJMethod(x@jc, "dropFields", list(...)) + column(jc) + }) diff --git a/R/pkg/R/context.R b/R/pkg/R/context.R index e3c9d9f8793d6..cca6c2c817de9 100644 --- a/R/pkg/R/context.R +++ b/R/pkg/R/context.R @@ -86,7 +86,7 @@ makeSplits <- function(numSerializedSlices, length) { # For instance, for numSerializedSlices of 22, length of 50 # [1] 0 0 2 2 4 4 6 6 6 9 9 11 11 13 13 15 15 15 18 18 20 20 22 22 22 # [26] 25 25 27 27 29 29 31 31 31 34 34 36 36 38 38 40 40 40 43 43 45 45 47 47 47 - # Notice the slice group with 3 slices (ie. 6, 15, 22) are roughly evenly spaced. + # Notice the slice group with 3 slices (i.e. 6, 15, 22) are roughly evenly spaced. # We are trying to reimplement the calculation in the positions method in ParallelCollectionRDD if (numSerializedSlices > 0) { unlist(lapply(0: (numSerializedSlices - 1), function(x) { @@ -116,7 +116,7 @@ makeSplits <- function(numSerializedSlices, length) { #' This change affects both createDataFrame and spark.lapply. #' In the specific one case that it is used to convert R native object into SparkDataFrame, it has #' always been kept at the default of 1. In the case the object is large, we are explicitly setting -#' the parallism to numSlices (which is still 1). +#' the parallelism to numSlices (which is still 1). #' #' Specifically, we are changing to split positions to match the calculation in positions() of #' ParallelCollectionRDD in Spark. diff --git a/R/pkg/R/deserialize.R b/R/pkg/R/deserialize.R index 5d22340fb62a0..89a8fbecd36b0 100644 --- a/R/pkg/R/deserialize.R +++ b/R/pkg/R/deserialize.R @@ -250,7 +250,7 @@ readDeserializeWithKeysInArrow <- function(inputCon) { keys <- readMultipleObjects(inputCon) - # Read keys to map with each groupped batch later. + # Read keys to map with each grouped batch later. list(keys = keys, data = data) } diff --git a/R/pkg/R/functions.R b/R/pkg/R/functions.R index 5d9c8e8124d9a..58d07a8d8fc2f 100644 --- a/R/pkg/R/functions.R +++ b/R/pkg/R/functions.R @@ -144,7 +144,7 @@ NULL #' @param y Column to compute on. #' @param pos In \itemize{ #' \item \code{locate}: a start position of search. -#' \item \code{overlay}: a start postiton for replacement. +#' \item \code{overlay}: a start position for replacement. #' } #' @param len In \itemize{ #' \item \code{lpad} the maximum length of each output result. @@ -338,12 +338,79 @@ NULL #' tmp <- mutate(df, dist = over(cume_dist(), ws), dense_rank = over(dense_rank(), ws), #' lag = over(lag(df$mpg), ws), lead = over(lead(df$mpg, 1), ws), #' percent_rank = over(percent_rank(), ws), -#' rank = over(rank(), ws), row_number = over(row_number(), ws)) +#' rank = over(rank(), ws), row_number = over(row_number(), ws), +#' nth_value = over(nth_value(df$mpg, 3), ws)) #' # Get ntile group id (1-4) for hp #' tmp <- mutate(tmp, ntile = over(ntile(4), ws)) #' head(tmp)} NULL +#' ML functions for Column operations +#' +#' ML functions defined for \code{Column}. +#' +#' @param x Column to compute on. +#' @param ... additional argument(s). +#' @name column_ml_functions +#' @rdname column_ml_functions +#' @family ml functions +#' @examples +#' \dontrun{ +#' df <- read.df("data/mllib/sample_libsvm_data.txt", source = "libsvm") +#' head( +#' withColumn( +#' withColumn(df, "array", vector_to_array(df$features)), +#' "vector", +#' array_to_vector(column("array")) +#' ) +#' ) +#' } +NULL + +#' Avro processing functions for Column operations +#' +#' Avro processing functions defined for \code{Column}. +#' +#' @param x Column to compute on. +#' @param jsonFormatSchema character Avro schema in JSON string format +#' @param ... additional argument(s) passed as parser options. +#' @name column_avro_functions +#' @rdname column_avro_functions +#' @family avro functions +#' @note Avro is built-in but external data source module since Spark 2.4. +#' Please deploy the application as per +#' \href{https://spark.apache.org/docs/latest/sql-data-sources-avro.html#deploying}{ +#' the deployment section +#' } of "Apache Avro Data Source Guide". +#' @examples +#' \dontrun{ +#' df <- createDataFrame(iris) +#' schema <- paste( +#' c( +#' '{"type": "record", "namespace": "example.avro", "name": "Iris", "fields": [', +#' '{"type": ["double", "null"], "name": "Sepal_Length"},', +#' '{"type": ["double", "null"], "name": "Sepal_Width"},', +#' '{"type": ["double", "null"], "name": "Petal_Length"},', +#' '{"type": ["double", "null"], "name": "Petal_Width"},', +#' '{"type": ["string", "null"], "name": "Species"}]}' +#' ), +#' collapse="\\n" +#' ) +#' +#' df_serialized <- select( +#' df, +#' alias(to_avro(alias(struct(column("*")), "fields")), "payload") +#' ) +#' +#' df_deserialized <- select( +#' df_serialized, +#' from_avro(df_serialized$payload, schema) +#' ) +#' +#' head(df_deserialized) +#' } +NULL + #' @details #' \code{lit}: A new Column is created to represent the literal value. #' If the parameter is a Column, it is returned unchanged. @@ -394,6 +461,19 @@ setMethod("acos", column(jc) }) +#' @details +#' \code{acosh}: Computes inverse hyperbolic cosine of the input column. +#' +#' @rdname column_math_functions +#' @aliases acosh acosh,Column-method +#' @note acosh since 3.1.0 +setMethod("acosh", + signature(x = "Column"), + function(x) { + jc <- callJStatic("org.apache.spark.sql.functions", "acosh", x@jc) + column(jc) + }) + #' @details #' \code{approx_count_distinct}: Returns the approximate number of distinct items in a group. #' @@ -461,6 +541,19 @@ setMethod("asin", column(jc) }) +#' @details +#' \code{asinh}: Computes inverse hyperbolic sine of the input column. +#' +#' @rdname column_math_functions +#' @aliases asinh asinh,Column-method +#' @note asinh since 3.1.0 +setMethod("asinh", + signature(x = "Column"), + function(x) { + jc <- callJStatic("org.apache.spark.sql.functions", "asinh", x@jc) + column(jc) + }) + #' @details #' \code{atan}: Returns the inverse tangent of the given value, #' as if computed by \code{java.lang.Math.atan()} @@ -475,6 +568,19 @@ setMethod("atan", column(jc) }) +#' @details +#' \code{atanh}: Computes inverse hyperbolic tangent of the input column. +#' +#' @rdname column_math_functions +#' @aliases atanh atanh,Column-method +#' @note atanh since 3.1.0 +setMethod("atanh", + signature(x = "Column"), + function(x) { + jc <- callJStatic("org.apache.spark.sql.functions", "atanh", x@jc) + column(jc) + }) + #' avg #' #' Aggregate function: returns the average of the values in a group. @@ -809,6 +915,57 @@ setMethod("xxhash64", column(jc) }) +#' @details +#' \code{assert_true}: Returns null if the input column is true; throws an exception +#' with the provided error message otherwise. +#' +#' @param errMsg (optional) The error message to be thrown. +#' +#' @rdname column_misc_functions +#' @aliases assert_true assert_true,Column-method +#' @examples +#' \dontrun{ +#' tmp <- mutate(df, v1 = assert_true(df$vs < 2), +#' v2 = assert_true(df$vs < 2, "custom error message"), +#' v3 = assert_true(df$vs < 2, df$vs)) +#' head(tmp)} +#' @note assert_true since 3.1.0 +setMethod("assert_true", + signature(x = "Column"), + function(x, errMsg = NULL) { + jc <- if (is.null(errMsg)) { + callJStatic("org.apache.spark.sql.functions", "assert_true", x@jc) + } else { + if (is.character(errMsg)) { + stopifnot(length(errMsg) == 1) + errMsg <- lit(errMsg) + } + callJStatic("org.apache.spark.sql.functions", "assert_true", x@jc, errMsg@jc) + } + column(jc) + }) + +#' @details +#' \code{raise_error}: Throws an exception with the provided error message. +#' +#' @rdname column_misc_functions +#' @aliases raise_error raise_error,characterOrColumn-method +#' @examples +#' \dontrun{ +#' tmp <- mutate(df, v1 = raise_error("error message")) +#' head(tmp)} +#' @note raise_error since 3.1.0 +setMethod("raise_error", + signature(x = "characterOrColumn"), + function(x) { + if (is.character(x)) { + stopifnot(length(x) == 1) + x <- lit(x) + } + jc <- callJStatic("org.apache.spark.sql.functions", "raise_error", x@jc) + column(jc) + }) + #' @details #' \code{dayofmonth}: Extracts the day of the month as an integer from a #' given date/timestamp/string. @@ -1417,8 +1574,10 @@ setMethod("quarter", }) #' @details -#' \code{percentile_approx} Returns the approximate percentile value of -#' numeric column at the given percentage. +#' \code{percentile_approx} Returns the approximate \code{percentile} of the numeric column +#' \code{col} which is the smallest value in the ordered \code{col} values (sorted from least to +#' greatest) such that no more than \code{percentage} of \code{col} values is less than the value +#' or equal to that value. #' #' @param percentage Numeric percentage at which percentile should be computed #' All values should be between 0 and 1. @@ -2765,7 +2924,7 @@ setMethod("shiftRight", signature(y = "Column", x = "numeric"), }) #' @details -#' \code{shiftRightUnsigned}: (Unigned) shifts the given value numBits right. If the given value is +#' \code{shiftRightUnsigned}: (Unsigned) shifts the given value numBits right. If the given value is #' a long value, it will return a long value else it will return an integer value. #' #' @rdname column_math_functions @@ -3296,6 +3455,37 @@ setMethod("lead", column(jc) }) +#' @details +#' \code{nth_value}: Window function: returns the value that is the \code{offset}th +#' row of the window frame# (counting from 1), and \code{null} if the size of window +#' frame is less than \code{offset} rows. +#' +#' @param offset a numeric indicating number of row to use as the value +#' @param na.rm a logical which indicates that the Nth value should skip null in the +#' determination of which row to use +#' +#' @rdname column_window_functions +#' @aliases nth_value nth_value,characterOrColumn-method +#' @note nth_value since 3.1.0 +setMethod("nth_value", + signature(x = "characterOrColumn", offset = "numeric"), + function(x, offset, na.rm = FALSE) { + x <- if (is.character(x)) { + column(x) + } else { + x + } + offset <- as.integer(offset) + jc <- callJStatic( + "org.apache.spark.sql.functions", + "nth_value", + x@jc, + offset, + na.rm + ) + column(jc) + }) + #' @details #' \code{ntile}: Returns the ntile group id (from 1 to n inclusive) in an ordered window #' partition. For example, if n is 4, the first quarter of the rows will get value 1, the second @@ -4380,7 +4570,8 @@ setMethod("date_trunc", }) #' @details -#' \code{current_date}: Returns the current date as a date column. +#' \code{current_date}: Returns the current date at the start of query evaluation as a date column. +#' All calls of current_date within the same query return the same value. #' #' @rdname column_datetime_functions #' @aliases current_date current_date,missing-method @@ -4396,7 +4587,8 @@ setMethod("current_date", }) #' @details -#' \code{current_timestamp}: Returns the current timestamp as a timestamp column. +#' \code{current_timestamp}: Returns the current timestamp at the start of query evaluation as +#' a timestamp column. All calls of current_timestamp within the same query return the same value. #' #' @rdname column_datetime_functions #' @aliases current_timestamp current_timestamp,missing-method @@ -4407,3 +4599,115 @@ setMethod("current_timestamp", jc <- callJStatic("org.apache.spark.sql.functions", "current_timestamp") column(jc) }) + +#' @details +#' \code{timestamp_seconds}: Creates timestamp from the number of seconds since UTC epoch. +#' +#' @rdname column_datetime_functions +#' @aliases timestamp_seconds timestamp_seconds,Column-method +#' @note timestamp_seconds since 3.1.0 +setMethod("timestamp_seconds", + signature(x = "Column"), + function(x) { + jc <- callJStatic( + "org.apache.spark.sql.functions", "timestamp_seconds", x@jc + ) + column(jc) + }) + +#' @details +#' \code{array_to_vector} Converts a column of array of numeric type into +#' a column of dense vectors in MLlib +#' +#' @rdname column_ml_functions +#' @aliases array_to_vector array_to_vector,Column-method +#' @note array_to_vector since 3.1.0 +setMethod("array_to_vector", + signature(x = "Column"), + function(x) { + jc <- callJStatic( + "org.apache.spark.ml.functions", + "array_to_vector", + x@jc + ) + column(jc) + }) + +#' @details +#' \code{vector_to_array} Converts a column of MLlib sparse/dense vectors into +#' a column of dense arrays. +#' +#' @param dtype The data type of the output array. Valid values: "float64" or "float32". +#' +#' @rdname column_ml_functions +#' @aliases vector_to_array vector_to_array,Column-method +#' @note vector_to_array since 3.1.0 +setMethod("vector_to_array", + signature(x = "Column"), + function(x, dtype = c("float64", "float32")) { + dtype <- match.arg(dtype) + jc <- callJStatic( + "org.apache.spark.ml.functions", + "vector_to_array", + x@jc, + dtype + ) + column(jc) + }) + +#' @details +#' \code{from_avro} Converts a binary column of Avro format into its corresponding catalyst value. +#' The specified schema must match the read data, otherwise the behavior is undefined: +#' it may fail or return arbitrary result. +#' To deserialize the data with a compatible and evolved schema, the expected Avro schema can be +#' set via the option avroSchema. +#' +#' @rdname column_avro_functions +#' @aliases from_avro from_avro,Column-method +#' @note from_avro since 3.1.0 +setMethod("from_avro", + signature(x = "characterOrColumn"), + function(x, jsonFormatSchema, ...) { + x <- if (is.character(x)) { + column(x) + } else { + x + } + + options <- varargsToStrEnv(...) + jc <- callJStatic( + "org.apache.spark.sql.avro.functions", "from_avro", + x@jc, + jsonFormatSchema, + options + ) + column(jc) + }) + +#' @details +#' \code{to_avro} Converts a column into binary of Avro format. +#' +#' @rdname column_avro_functions +#' @aliases to_avro to_avro,Column-method +#' @note to_avro since 3.1.0 +setMethod("to_avro", + signature(x = "characterOrColumn"), + function(x, jsonFormatSchema = NULL) { + x <- if (is.character(x)) { + column(x) + } else { + x + } + + jc <- if (is.null(jsonFormatSchema)) { + callJStatic("org.apache.spark.sql.avro.functions", "to_avro", x@jc) + } else { + callJStatic( + "org.apache.spark.sql.avro.functions", + "to_avro", + x@jc, + jsonFormatSchema + ) + } + column(jc) + }) diff --git a/R/pkg/R/generics.R b/R/pkg/R/generics.R index 839c00cf21aeb..fb830aa686f72 100644 --- a/R/pkg/R/generics.R +++ b/R/pkg/R/generics.R @@ -638,7 +638,7 @@ setGeneric("union", function(x, y) { standardGeneric("union") }) setGeneric("unionAll", function(x, y) { standardGeneric("unionAll") }) #' @rdname unionByName -setGeneric("unionByName", function(x, y) { standardGeneric("unionByName") }) +setGeneric("unionByName", function(x, y, ...) { standardGeneric("unionByName") }) #' @rdname unpersist setGeneric("unpersist", function(x, ...) { standardGeneric("unpersist") }) @@ -675,6 +675,12 @@ setGeneric("broadcast", function(x) { standardGeneric("broadcast") }) #' @rdname columnfunctions setGeneric("asc", function(x) { standardGeneric("asc") }) +#' @rdname columnfunctions +setGeneric("asc_nulls_first", function(x) { standardGeneric("asc_nulls_first") }) + +#' @rdname columnfunctions +setGeneric("asc_nulls_last", function(x) { standardGeneric("asc_nulls_last") }) + #' @rdname between setGeneric("between", function(x, bounds) { standardGeneric("between") }) @@ -689,6 +695,12 @@ setGeneric("contains", function(x, ...) { standardGeneric("contains") }) #' @rdname columnfunctions setGeneric("desc", function(x) { standardGeneric("desc") }) +#' @rdname columnfunctions +setGeneric("desc_nulls_first", function(x) { standardGeneric("desc_nulls_first") }) + +#' @rdname columnfunctions +setGeneric("desc_nulls_last", function(x) { standardGeneric("desc_nulls_last") }) + #' @rdname endsWith setGeneric("endsWith", function(x, suffix) { standardGeneric("endsWith") }) @@ -729,6 +741,12 @@ setGeneric("over", function(x, window) { standardGeneric("over") }) #' @rdname eq_null_safe setGeneric("%<=>%", function(x, value) { standardGeneric("%<=>%") }) +#' @rdname withField +setGeneric("withField", function(x, fieldName, col) { standardGeneric("withField") }) + +#' @rdname dropFields +setGeneric("dropFields", function(x, ...) { standardGeneric("dropFields") }) + ###################### WindowSpec Methods ########################## #' @rdname partitionBy @@ -820,6 +838,10 @@ setGeneric("array_repeat", function(x, count) { standardGeneric("array_repeat") #' @name NULL setGeneric("array_sort", function(x) { standardGeneric("array_sort") }) +#' @rdname column_ml_functions +#' @name NULL +setGeneric("array_to_vector", function(x) { standardGeneric("array_to_vector") }) + #' @rdname column_collection_functions #' @name NULL setGeneric("array_transform", function(x, f) { standardGeneric("array_transform") }) @@ -844,6 +866,10 @@ setGeneric("arrays_zip_with", function(x, y, f) { standardGeneric("arrays_zip_wi #' @name NULL setGeneric("ascii", function(x) { standardGeneric("ascii") }) +#' @rdname column_misc_functions +#' @name NULL +setGeneric("assert_true", function(x, errMsg = NULL) { standardGeneric("assert_true") }) + #' @param x Column to compute on or a GroupedData object. #' @param ... additional argument(s) when \code{x} is a GroupedData object. #' @rdname avg @@ -928,7 +954,6 @@ setGeneric("current_date", function(x = "missing") { standardGeneric("current_da #' @name NULL setGeneric("current_timestamp", function(x = "missing") { standardGeneric("current_timestamp") }) - #' @rdname column_datetime_diff_functions #' @name NULL setGeneric("datediff", function(y, x) { standardGeneric("datediff") }) @@ -993,6 +1018,10 @@ setGeneric("expr", function(x) { standardGeneric("expr") }) #' @name NULL setGeneric("flatten", function(x) { standardGeneric("flatten") }) +#' @rdname column_avro_functions +#' @name NULL +setGeneric("from_avro", function(x, ...) { standardGeneric("from_avro") }) + #' @rdname column_datetime_diff_functions #' @name NULL setGeneric("from_utc_timestamp", function(y, x) { standardGeneric("from_utc_timestamp") }) @@ -1161,6 +1190,10 @@ setGeneric("months_between", function(y, x, ...) { standardGeneric("months_betwe #' @rdname count setGeneric("n", function(x) { standardGeneric("n") }) +#' @rdname column_window_functions +#' @name NULL +setGeneric("nth_value", function(x, offset, ...) { standardGeneric("nth_value") }) + #' @rdname column_nonaggregate_functions #' @name NULL setGeneric("nanvl", function(y, x) { standardGeneric("nanvl") }) @@ -1213,6 +1246,10 @@ setGeneric("posexplode_outer", function(x) { standardGeneric("posexplode_outer") #' @name NULL setGeneric("quarter", function(x) { standardGeneric("quarter") }) +#' @rdname column_misc_functions +#' @name NULL +setGeneric("raise_error", function(x) { standardGeneric("raise_error") }) + #' @rdname column_nonaggregate_functions #' @name NULL setGeneric("rand", function(seed) { standardGeneric("rand") }) @@ -1354,6 +1391,14 @@ setGeneric("substring_index", function(x, delim, count) { standardGeneric("subst #' @name NULL setGeneric("sumDistinct", function(x) { standardGeneric("sumDistinct") }) +#' @rdname column_datetime_functions +#' @name timestamp_seconds +setGeneric("timestamp_seconds", function(x) { standardGeneric("timestamp_seconds") }) + +#' @rdname column_avro_functions +#' @name NULL +setGeneric("to_avro", function(x, ...) { standardGeneric("to_avro") }) + #' @rdname column_collection_functions #' @name NULL setGeneric("transform_keys", function(x, f) { standardGeneric("transform_keys") }) @@ -1438,6 +1483,10 @@ setGeneric("var_pop", function(x) { standardGeneric("var_pop") }) #' @name NULL setGeneric("var_samp", function(x) { standardGeneric("var_samp") }) +#' @rdname column_ml_functions +#' @name NULL +setGeneric("vector_to_array", function(x, ...) { standardGeneric("vector_to_array") }) + #' @rdname column_datetime_functions #' @name NULL setGeneric("weekofyear", function(x) { standardGeneric("weekofyear") }) diff --git a/R/pkg/R/install.R b/R/pkg/R/install.R index ea2c0b4c0f42f..bbb9188cd083f 100644 --- a/R/pkg/R/install.R +++ b/R/pkg/R/install.R @@ -39,11 +39,11 @@ #' version number in the format of "x.y" where x and y are integer. #' If \code{hadoopVersion = "without"}, "Hadoop free" build is installed. #' See -#' \href{http://spark.apache.org/docs/latest/hadoop-provided.html}{ +#' \href{https://spark.apache.org/docs/latest/hadoop-provided.html}{ #' "Hadoop Free" Build} for more information. #' Other patched version names can also be used, e.g. \code{"cdh4"} #' @param mirrorUrl base URL of the repositories to use. The directory layout should follow -#' \href{http://www.apache.org/dyn/closer.lua/spark/}{Apache mirrors}. +#' \href{https://www.apache.org/dyn/closer.lua/spark/}{Apache mirrors}. #' @param localDir a local directory where Spark is installed. The directory contains #' version-specific folders of Spark packages. Default is path to #' the cache directory: @@ -64,7 +64,7 @@ #'} #' @note install.spark since 2.1.0 #' @seealso See available Hadoop versions: -#' \href{http://spark.apache.org/downloads.html}{Apache Spark} +#' \href{https://spark.apache.org/downloads.html}{Apache Spark} install.spark <- function(hadoopVersion = "2.7", mirrorUrl = NULL, localDir = NULL, overwrite = FALSE) { sparkHome <- Sys.getenv("SPARK_HOME") @@ -289,7 +289,7 @@ sparkCachePath <- function() { } # Length of the Spark cache specific relative path segments for each platform -# eg. "Apache\Spark\Cache" is 3 in Windows, or "spark" is 1 in unix +# e.g. "Apache\Spark\Cache" is 3 in Windows, or "spark" is 1 in unix # Must match sparkCachePath() exactly. sparkCacheRelPathLength <- function() { if (is_windows()) { diff --git a/R/pkg/R/mllib_classification.R b/R/pkg/R/mllib_classification.R index ec83b6bd406a7..71ebe4e26ef63 100644 --- a/R/pkg/R/mllib_classification.R +++ b/R/pkg/R/mllib_classification.R @@ -425,7 +425,7 @@ setMethod("write.ml", signature(object = "LogisticRegressionModel", path = "char #' predictions on new data, and \code{write.ml}/\code{read.ml} to save/load fitted models. #' Only categorical data is supported. #' For more details, see -#' \href{http://spark.apache.org/docs/latest/ml-classification-regression.html}{ +#' \href{https://spark.apache.org/docs/latest/ml-classification-regression.html}{ #' Multilayer Perceptron} #' #' @param data a \code{SparkDataFrame} of observations and labels for model fitting. @@ -574,7 +574,7 @@ setMethod("write.ml", signature(object = "MultilayerPerceptronClassificationMode #' @rdname spark.naiveBayes #' @aliases spark.naiveBayes,SparkDataFrame,formula-method #' @name spark.naiveBayes -#' @seealso e1071: \url{https://cran.r-project.org/package=e1071} +#' @seealso e1071: \url{https://cran.r-project.org/web/packages/e1071/index.html} #' @examples #' \dontrun{ #' data <- as.data.frame(UCBAdmissions) diff --git a/R/pkg/R/mllib_clustering.R b/R/pkg/R/mllib_clustering.R index 8bc15353465d8..ff7cbd8fc9b74 100644 --- a/R/pkg/R/mllib_clustering.R +++ b/R/pkg/R/mllib_clustering.R @@ -204,7 +204,7 @@ setMethod("write.ml", signature(object = "BisectingKMeansModel", path = "charact #' @return \code{spark.gaussianMixture} returns a fitted multivariate gaussian mixture model. #' @rdname spark.gaussianMixture #' @name spark.gaussianMixture -#' @seealso mixtools: \url{https://cran.r-project.org/package=mixtools} +#' @seealso mixtools: \url{https://cran.r-project.org/web/packages/mixtools/index.html} #' @examples #' \dontrun{ #' sparkR.session() @@ -483,7 +483,7 @@ setMethod("write.ml", signature(object = "KMeansModel", path = "character"), #' @return \code{spark.lda} returns a fitted Latent Dirichlet Allocation model. #' @rdname spark.lda #' @aliases spark.lda,SparkDataFrame-method -#' @seealso topicmodels: \url{https://cran.r-project.org/package=topicmodels} +#' @seealso topicmodels: \url{https://cran.r-project.org/web/packages/topicmodels/index.html} #' @examples #' \dontrun{ #' text <- read.df("data/mllib/sample_lda_libsvm_data.txt", source = "libsvm") diff --git a/R/pkg/R/mllib_fpm.R b/R/pkg/R/mllib_fpm.R index 30bc51b932041..65a43514930f0 100644 --- a/R/pkg/R/mllib_fpm.R +++ b/R/pkg/R/mllib_fpm.R @@ -125,7 +125,7 @@ setMethod("spark.freqItemsets", signature(object = "FPGrowthModel"), #' The \code{SparkDataFrame} contains five columns: #' \code{antecedent} (an array of the same type as the input column), #' \code{consequent} (an array of the same type as the input column), -#' \code{condfidence} (confidence for the rule) +#' \code{confidence} (confidence for the rule) #' \code{lift} (lift for the rule) #' and \code{support} (support for the rule) #' @rdname spark.fpGrowth diff --git a/R/pkg/R/mllib_recommendation.R b/R/pkg/R/mllib_recommendation.R index d238ff93ed245..87a1bc991f812 100644 --- a/R/pkg/R/mllib_recommendation.R +++ b/R/pkg/R/mllib_recommendation.R @@ -30,7 +30,7 @@ setClass("ALSModel", representation(jobj = "jobj")) #' to make predictions on new data, and \code{write.ml}/\code{read.ml} to save/load fitted models. #' #' For more details, see -#' \href{http://spark.apache.org/docs/latest/ml-collaborative-filtering.html}{MLlib: +#' \href{https://spark.apache.org/docs/latest/ml-collaborative-filtering.html}{MLlib: #' Collaborative Filtering}. #' #' @param data a SparkDataFrame for training. diff --git a/R/pkg/R/mllib_regression.R b/R/pkg/R/mllib_regression.R index b2228a141689b..db9f367407df3 100644 --- a/R/pkg/R/mllib_regression.R +++ b/R/pkg/R/mllib_regression.R @@ -475,7 +475,7 @@ setMethod("write.ml", signature(object = "IsotonicRegressionModel", path = "char #' @param ... additional arguments passed to the method. #' @return \code{spark.survreg} returns a fitted AFT survival regression model. #' @rdname spark.survreg -#' @seealso survival: \url{https://cran.r-project.org/package=survival} +#' @seealso survival: \url{https://cran.r-project.org/web/packages/survival/index.html} #' @examples #' \dontrun{ #' df <- createDataFrame(ovarian) diff --git a/R/pkg/R/mllib_stat.R b/R/pkg/R/mllib_stat.R index 6db4d5d4831dd..f82fb589bb5a5 100644 --- a/R/pkg/R/mllib_stat.R +++ b/R/pkg/R/mllib_stat.R @@ -49,7 +49,7 @@ setClass("KSTest", representation(jobj = "jobj")) #' @rdname spark.kstest #' @aliases spark.kstest,SparkDataFrame-method #' @name spark.kstest -#' @seealso \href{http://spark.apache.org/docs/latest/mllib-statistics.html#hypothesis-testing}{ +#' @seealso \href{https://spark.apache.org/docs/latest/mllib-statistics.html#hypothesis-testing}{ #' MLlib: Hypothesis Testing} #' @examples #' \dontrun{ diff --git a/R/pkg/R/mllib_tree.R b/R/pkg/R/mllib_tree.R index f6aa48f5fa04a..f3192ee9b1382 100644 --- a/R/pkg/R/mllib_tree.R +++ b/R/pkg/R/mllib_tree.R @@ -53,7 +53,7 @@ setClass("DecisionTreeRegressionModel", representation(jobj = "jobj")) #' @note DecisionTreeClassificationModel since 2.3.0 setClass("DecisionTreeClassificationModel", representation(jobj = "jobj")) -# Create the summary of a tree ensemble model (eg. Random Forest, GBT) +# Create the summary of a tree ensemble model (e.g. Random Forest, GBT) summary.treeEnsemble <- function(model) { jobj <- model@jobj formula <- callJMethod(jobj, "formula") @@ -73,7 +73,7 @@ summary.treeEnsemble <- function(model) { jobj = jobj) } -# Prints the summary of tree ensemble models (eg. Random Forest, GBT) +# Prints the summary of tree ensemble models (e.g. Random Forest, GBT) print.summary.treeEnsemble <- function(x) { jobj <- x$jobj cat("Formula: ", x$formula) @@ -127,9 +127,9 @@ print.summary.decisionTree <- function(x) { #' \code{write.ml}/\code{read.ml} to save/load fitted models. #' For more details, see # nolint start -#' \href{http://spark.apache.org/docs/latest/ml-classification-regression.html#gradient-boosted-tree-regression}{ +#' \href{https://spark.apache.org/docs/latest/ml-classification-regression.html#gradient-boosted-tree-regression}{ #' GBT Regression} and -#' \href{http://spark.apache.org/docs/latest/ml-classification-regression.html#gradient-boosted-tree-classifier}{ +#' \href{https://spark.apache.org/docs/latest/ml-classification-regression.html#gradient-boosted-tree-classifier}{ #' GBT Classification} # nolint end #' @@ -343,9 +343,9 @@ setMethod("write.ml", signature(object = "GBTClassificationModel", path = "chara #' save/load fitted models. #' For more details, see # nolint start -#' \href{http://spark.apache.org/docs/latest/ml-classification-regression.html#random-forest-regression}{ +#' \href{https://spark.apache.org/docs/latest/ml-classification-regression.html#random-forest-regression}{ #' Random Forest Regression} and -#' \href{http://spark.apache.org/docs/latest/ml-classification-regression.html#random-forest-classifier}{ +#' \href{https://spark.apache.org/docs/latest/ml-classification-regression.html#random-forest-classifier}{ #' Random Forest Classification} # nolint end #' @@ -568,9 +568,9 @@ setMethod("write.ml", signature(object = "RandomForestClassificationModel", path #' save/load fitted models. #' For more details, see # nolint start -#' \href{http://spark.apache.org/docs/latest/ml-classification-regression.html#decision-tree-regression}{ +#' \href{https://spark.apache.org/docs/latest/ml-classification-regression.html#decision-tree-regression}{ #' Decision Tree Regression} and -#' \href{http://spark.apache.org/docs/latest/ml-classification-regression.html#decision-tree-classifier}{ +#' \href{https://spark.apache.org/docs/latest/ml-classification-regression.html#decision-tree-classifier}{ #' Decision Tree Classification} # nolint end #' diff --git a/R/pkg/R/mllib_utils.R b/R/pkg/R/mllib_utils.R index f38f1ac3a6b4c..d943d8d0ab4c0 100644 --- a/R/pkg/R/mllib_utils.R +++ b/R/pkg/R/mllib_utils.R @@ -18,7 +18,7 @@ # mllib_utils.R: Utilities for MLlib integration # Integration with R's standard functions. -# Most of MLlib's argorithms are provided in two flavours: +# Most of MLlib's algorithms are provided in two flavours: # - a specialization of the default R methods (glm). These methods try to respect # the inputs and the outputs of R's method to the largest extent, but some small differences # may exist. diff --git a/R/pkg/R/pairRDD.R b/R/pkg/R/pairRDD.R index b29381bb900fb..41676be03e951 100644 --- a/R/pkg/R/pairRDD.R +++ b/R/pkg/R/pairRDD.R @@ -239,7 +239,7 @@ setMethod("partitionByRDD", javaPairRDD <- callJMethod(javaPairRDD, "partitionBy", rPartitioner) # Call .values() on the result to get back the final result, the - # shuffled acutal content key-val pairs. + # shuffled actual content key-val pairs. r <- callJMethod(javaPairRDD, "values") RDD(r, serializedMode = "byte") @@ -411,7 +411,7 @@ setMethod("reduceByKeyLocally", #' \itemize{ #' \item createCombiner, which turns a V into a C (e.g., creates a one-element list) #' \item mergeValue, to merge a V into a C (e.g., adds it to the end of a list) - -#' \item mergeCombiners, to combine two C's into a single one (e.g., concatentates +#' \item mergeCombiners, to combine two C's into a single one (e.g., concatenates #' two lists). #' } #' diff --git a/R/pkg/R/stats.R b/R/pkg/R/stats.R index 7252351ebebb2..0aabceef226e3 100644 --- a/R/pkg/R/stats.R +++ b/R/pkg/R/stats.R @@ -109,7 +109,8 @@ setMethod("corr", #' #' Finding frequent items for columns, possibly with false positives. #' Using the frequent element count algorithm described in -#' \url{https://doi.org/10.1145/762471.762473}, proposed by Karp, Schenker, and Papadimitriou. +#' \url{https://dl.acm.org/doi/10.1145/762471.762473}, proposed by Karp, Schenker, +#' and Papadimitriou. #' #' @param x A SparkDataFrame. #' @param cols A vector column names to search frequent items in. diff --git a/R/pkg/R/streaming.R b/R/pkg/R/streaming.R index 5eccbdc9d3818..2bcfb363f9d24 100644 --- a/R/pkg/R/streaming.R +++ b/R/pkg/R/streaming.R @@ -93,7 +93,7 @@ setMethod("explain", #' lastProgress #' -#' Prints the most recent progess update of this streaming query in JSON format. +#' Prints the most recent progress update of this streaming query in JSON format. #' #' @param x a StreamingQuery. #' @rdname lastProgress diff --git a/R/pkg/R/types.R b/R/pkg/R/types.R index 5d48a9eee2799..dfa83c35665ce 100644 --- a/R/pkg/R/types.R +++ b/R/pkg/R/types.R @@ -68,7 +68,7 @@ rToSQLTypes <- as.environment(list( "character" = "string", "logical" = "boolean")) -# Helper function of coverting decimal type. When backend returns column type in the +# Helper function of converting decimal type. When backend returns column type in the # format of decimal(,) (e.g., decimal(10, 0)), this function coverts the column type # as double type. This function converts backend returned types that are not the key # of PRIMITIVE_TYPES, but should be treated as PRIMITIVE_TYPES. diff --git a/R/pkg/R/utils.R b/R/pkg/R/utils.R index d6f9f927d5cdc..264cbfc9ba929 100644 --- a/R/pkg/R/utils.R +++ b/R/pkg/R/utils.R @@ -930,7 +930,7 @@ getOne <- function(x, envir, inherits = TRUE, ifnotfound = NULL) { } # Returns a vector of parent directories, traversing up count times, starting with a full path -# eg. traverseParentDirs("/Users/user/Library/Caches/spark/spark2.2", 1) should return +# e.g. traverseParentDirs("/Users/user/Library/Caches/spark/spark2.2", 1) should return # this "/Users/user/Library/Caches/spark/spark2.2" # and "/Users/user/Library/Caches/spark" traverseParentDirs <- function(x, count) { diff --git a/R/pkg/inst/profile/shell.R b/R/pkg/inst/profile/shell.R index f6c20e1a5ebc3..ffedb3038fd53 100644 --- a/R/pkg/inst/profile/shell.R +++ b/R/pkg/inst/profile/shell.R @@ -43,5 +43,7 @@ cat(" /_/", "\n") cat("\n") - cat("\nSparkSession available as 'spark'.\n") + cat("\nSparkSession Web UI available at", SparkR::sparkR.uiWebUrl()) + cat("\nSparkSession available as 'spark'(master = ", unlist(SparkR::sparkR.conf("spark.master")), + ", app id = ", unlist(SparkR::sparkR.conf("spark.app.id")), ").", "\n", sep = "") } diff --git a/R/pkg/inst/worker/daemon.R b/R/pkg/inst/worker/daemon.R index fb9db63b07cd0..4589bb9c6ad1b 100644 --- a/R/pkg/inst/worker/daemon.R +++ b/R/pkg/inst/worker/daemon.R @@ -32,7 +32,7 @@ inputCon <- socketConnection( SparkR:::doServerAuth(inputCon, Sys.getenv("SPARKR_WORKER_SECRET")) -# Waits indefinitely for a socket connecion by default. +# Waits indefinitely for a socket connection by default. selectTimeout <- NULL while (TRUE) { @@ -72,7 +72,7 @@ while (TRUE) { } }) } else if (is.null(children)) { - # If it is NULL, there are no children. Waits indefinitely for a socket connecion. + # If it is NULL, there are no children. Waits indefinitely for a socket connection. selectTimeout <- NULL } diff --git a/R/pkg/inst/worker/worker.R b/R/pkg/inst/worker/worker.R index 1ef05ea621e83..7fc4680bad10e 100644 --- a/R/pkg/inst/worker/worker.R +++ b/R/pkg/inst/worker/worker.R @@ -85,7 +85,7 @@ outputResult <- function(serializer, output, outputCon) { } # Constants -specialLengths <- list(END_OF_STERAM = 0L, TIMING_DATA = -1L) +specialLengths <- list(END_OF_STREAM = 0L, TIMING_DATA = -1L) # Timing R process boot bootTime <- currentTimeSecs() @@ -180,7 +180,7 @@ if (isEmpty != 0) { } else if (deserializer == "arrow" && mode == 1) { data <- SparkR:::readDeserializeInArrow(inputCon) # See https://stat.ethz.ch/pipermail/r-help/2010-September/252046.html - # rbind.fill might be an anternative to make it faster if plyr is installed. + # rbind.fill might be an alternative to make it faster if plyr is installed. # Also, note that, 'dapply' applies a function to each partition. data <- do.call("rbind", data) } @@ -196,7 +196,7 @@ if (isEmpty != 0) { outputs <- list() for (i in seq_len(length(data))) { # Timing reading input data for execution - inputElap <- elapsedSecs() + computeStart <- elapsedSecs() output <- compute(mode, partition, serializer, deserializer, keys[[i]], colNames, computeFunc, data[[i]]) computeElap <- elapsedSecs() @@ -204,17 +204,18 @@ if (isEmpty != 0) { outputs[[length(outputs) + 1L]] <- output } else { outputResult(serializer, output, outputCon) + outputComputeElapsDiff <- outputComputeElapsDiff + (elapsedSecs() - computeElap) } - outputElap <- elapsedSecs() - computeInputElapsDiff <- computeInputElapsDiff + (computeElap - inputElap) - outputComputeElapsDiff <- outputComputeElapsDiff + (outputElap - computeElap) + computeInputElapsDiff <- computeInputElapsDiff + (computeElap - computeStart) } if (serializer == "arrow") { # See https://stat.ethz.ch/pipermail/r-help/2010-September/252046.html - # rbind.fill might be an anternative to make it faster if plyr is installed. + # rbind.fill might be an alternative to make it faster if plyr is installed. + outputStart <- elapsedSecs() combined <- do.call("rbind", outputs) SparkR:::writeSerializeInArrow(outputCon, combined) + outputComputeElapsDiff <- elapsedSecs() - outputStart } } } else { @@ -285,7 +286,7 @@ SparkR:::writeDouble(outputCon, computeInputElapsDiff) # compute SparkR:::writeDouble(outputCon, outputComputeElapsDiff) # output # End of output -SparkR:::writeInt(outputCon, specialLengths$END_OF_STERAM) +SparkR:::writeInt(outputCon, specialLengths$END_OF_STREAM) close(outputCon) close(inputCon) diff --git a/R/pkg/tests/fulltests/test_Serde.R b/R/pkg/tests/fulltests/test_Serde.R index e01f6ee005218..a52289e43ca5e 100644 --- a/R/pkg/tests/fulltests/test_Serde.R +++ b/R/pkg/tests/fulltests/test_Serde.R @@ -125,7 +125,7 @@ test_that("SerDe of list of lists", { sparkR.session.stop() -# Note that this test should be at the end of tests since the configruations used here are not +# Note that this test should be at the end of tests since the configurations used here are not # specific to sessions, and the Spark context is restarted. test_that("createDataFrame large objects", { for (encryptionEnabled in list("true", "false")) { diff --git a/R/pkg/tests/fulltests/test_jvm_api.R b/R/pkg/tests/fulltests/test_jvm_api.R index 8b3b4f73de170..3bf6ae556c079 100644 --- a/R/pkg/tests/fulltests/test_jvm_api.R +++ b/R/pkg/tests/fulltests/test_jvm_api.R @@ -20,11 +20,11 @@ context("JVM API") sparkSession <- sparkR.session(master = sparkRTestMaster, enableHiveSupport = FALSE) test_that("Create and call methods on object", { - jarr <- sparkR.newJObject("java.util.ArrayList") + jarray <- sparkR.newJObject("java.util.ArrayList") # Add an element to the array - sparkR.callJMethod(jarr, "add", 1L) + sparkR.callJMethod(jarray, "add", 1L) # Check if get returns the same element - expect_equal(sparkR.callJMethod(jarr, "get", 0L), 1L) + expect_equal(sparkR.callJMethod(jarray, "get", 0L), 1L) }) test_that("Call static methods", { diff --git a/R/pkg/tests/fulltests/test_sparkSQL.R b/R/pkg/tests/fulltests/test_sparkSQL.R index e008bc5bbd7d9..ebf08b9559379 100644 --- a/R/pkg/tests/fulltests/test_sparkSQL.R +++ b/R/pkg/tests/fulltests/test_sparkSQL.R @@ -1424,6 +1424,14 @@ test_that("column functions", { date_trunc("quarter", c) + current_date() + current_timestamp() c25 <- overlay(c1, c2, c3, c3) + overlay(c1, c2, c3) + overlay(c1, c2, 1) + overlay(c1, c2, 3, 4) + c26 <- timestamp_seconds(c1) + vector_to_array(c) + + vector_to_array(c, "float32") + vector_to_array(c, "float64") + + array_to_vector(c) + c27 <- nth_value("x", 1L) + nth_value("y", 2, TRUE) + + nth_value(column("v"), 3) + nth_value(column("z"), 4L, FALSE) + c28 <- asc_nulls_first(c1) + asc_nulls_last(c1) + + desc_nulls_first(c1) + desc_nulls_last(c1) + c29 <- acosh(c1) + asinh(c1) + atanh(c1) # Test if base::is.nan() is exposed expect_equal(is.nan(c("a", "b")), c(FALSE, FALSE)) @@ -1676,9 +1684,9 @@ test_that("column functions", { df <- as.DataFrame(list(list("col" = "1"))) c <- collect(select(df, schema_of_csv("Amsterdam,2018"))) - expect_equal(c[[1]], "struct<_c0:string,_c1:int>") + expect_equal(c[[1]], "STRUCT<`_c0`: STRING, `_c1`: INT>") c <- collect(select(df, schema_of_csv(lit("Amsterdam,2018")))) - expect_equal(c[[1]], "struct<_c0:string,_c1:int>") + expect_equal(c[[1]], "STRUCT<`_c0`: STRING, `_c1`: INT>") # Test to_json(), from_json(), schema_of_json() df <- sql("SELECT array(named_struct('name', 'Bob'), named_struct('name', 'Alice')) as people") @@ -1711,9 +1719,9 @@ test_that("column functions", { df <- as.DataFrame(list(list("col" = "1"))) c <- collect(select(df, schema_of_json('{"name":"Bob"}'))) - expect_equal(c[[1]], "struct") + expect_equal(c[[1]], "STRUCT<`name`: STRING>") c <- collect(select(df, schema_of_json(lit('{"name":"Bob"}')))) - expect_equal(c[[1]], "struct") + expect_equal(c[[1]], "STRUCT<`name`: STRING>") # Test to_json() supports arrays of primitive types and arrays df <- sql("SELECT array(19, 42, 70) as age") @@ -1803,6 +1811,62 @@ test_that("column functions", { ) expect_equal(actual, expected) + + # Test withField + lines <- c("{\"Person\": {\"name\":\"Bob\", \"age\":24, \"height\": 170}}") + jsonPath <- tempfile(pattern = "sparkr-test", fileext = ".tmp") + writeLines(lines, jsonPath) + df <- read.df(jsonPath, "json") + result <- collect( + select( + select(df, alias(withField(df$Person, "dummy", lit(42)), "Person")), + "Person.dummy" + ) + ) + expect_equal(result, data.frame(dummy = 42)) + + # Test dropFields + expect_setequal( + colnames(select( + withColumn(df, "Person", dropFields(df$Person, "age")), + column("Person.*") + )), + c("name", "height") + ) + + expect_equal( + colnames(select( + withColumn(df, "Person", dropFields(df$Person, "height", "name")), + column("Person.*") + )), + "age" + ) +}) + +test_that("avro column functions", { + skip_if_not( + grepl("spark-avro", sparkR.conf("spark.jars", "")), + "spark-avro jar not present" + ) + + schema <- '{"namespace": "example.avro", + "type": "record", + "name": "User", + "fields": [ + {"name": "name", "type": "string"}, + {"name": "favorite_color", "type": ["string", "null"]} + ] + }' + + c0 <- column("foo") + c1 <- from_avro(c0, schema) + expect_s4_class(c1, "Column") + c2 <- from_avro("foo", schema) + expect_s4_class(c2, "Column") + c3 <- to_avro(c1) + expect_s4_class(c3, "Column") + c4 <- to_avro(c1, schema) + expect_s4_class(c4, "Column") }) test_that("column binary mathfunctions", { @@ -2030,7 +2094,7 @@ test_that("higher order functions", { createDataFrame(data.frame(id = 1)), expr("CAST(array(1.0, 2.0, -3.0, -4.0) AS array) xs"), expr("CAST(array(0.0, 3.0, 48.0) AS array) ys"), - expr("array('FAILED', 'SUCCEDED') as vs"), + expr("array('FAILED', 'SUCCEEDED') as vs"), expr("map('foo', 1, 'bar', 2) as mx"), expr("map('foo', 42, 'bar', -1, 'baz', 0) as my") ) @@ -2113,7 +2177,7 @@ test_that("group by, agg functions", { df3 <- agg(gd, age = "stddev") expect_is(df3, "SparkDataFrame") df3_local <- collect(df3) - expect_true(is.nan(df3_local[df3_local$name == "Andy", ][1, 2])) + expect_true(is.na(df3_local[df3_local$name == "Andy", ][1, 2])) df4 <- agg(gd, sumAge = sum(df$age)) expect_is(df4, "SparkDataFrame") @@ -2144,7 +2208,7 @@ test_that("group by, agg functions", { df7 <- agg(gd2, value = "stddev") df7_local <- collect(df7) expect_true(abs(df7_local[df7_local$name == "ID1", ][1, 2] - 6.928203) < 1e-6) - expect_true(is.nan(df7_local[df7_local$name == "ID2", ][1, 2])) + expect_true(is.na(df7_local[df7_local$name == "ID2", ][1, 2])) mockLines3 <- c("{\"name\":\"Andy\", \"age\":30}", "{\"name\":\"Andy\", \"age\":30}", @@ -2696,6 +2760,19 @@ test_that("union(), unionByName(), rbind(), except(), and intersect() on a DataF expect_error(rbind(df, df2, df3), "Names of input data frames are different.") + + df4 <- unionByName(df2, select(df2, "age"), TRUE) + + expect_equal( + sum(collect( + select(df4, alias(isNull(df4$name), "missing_name") + ))$missing_name), + 3 + ) + + testthat::expect_error(unionByName(df2, select(df2, "age"), FALSE)) + testthat::expect_error(unionByName(df2, select(df2, "age"))) + excepted <- arrange(except(df, df2), desc(df$age)) expect_is(unioned, "SparkDataFrame") expect_equal(count(excepted), 2) @@ -2807,6 +2884,15 @@ test_that("mutate(), transform(), rename() and names()", { expect_equal(nrow(result), 153) expect_equal(ncol(result), 2) detach(airquality) + + # ensure long inferred names are handled without error (SPARK-26199) + # test implicitly assumes eval(formals(deparse)$width.cutoff) = 60 + # (which has always been true as of 2020-11-15) + newDF <- mutate( + df, + df$age + 12345678901234567890 + 12345678901234567890 + 12345678901234 + ) + expect_match(tail(columns(newDF), 1L), "234567890", fixed = TRUE) }) test_that("read/write ORC files", { @@ -3196,6 +3282,12 @@ test_that("attach() on a DataFrame", { stat3 <- summary(df[, "age", drop = F]) expect_equal(collect(stat3)[8, "age"], "30") expect_error(age) + + # attach method uses deparse(); ensure no errors from a very long input + abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnop <- df # nolint + attach(abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnop) + expect_true(any(grepl("abcdefghijklmnopqrstuvwxyz", search()))) + detach("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnop") }) test_that("with() on a DataFrame", { @@ -3591,7 +3683,7 @@ test_that("gapply() and gapplyCollect() on a DataFrame", { } # Computes the arithmetic mean of the second column by grouping - # on the first and third columns. Output the groupping value and the average. + # on the first and third columns. Output the grouping value and the average. schema <- structType(structField("a", "integer"), structField("c", "string"), structField("avg", "double")) df3 <- gapply( @@ -3889,7 +3981,7 @@ test_that("catalog APIs, listTables, listColumns, listFunctions", { paste("Error in listFunctions : analysis error - Database", "'zxwtyswklpf_db' does not exist")) - # recoverPartitions does not work with tempory view + # recoverPartitions does not work with temporary view expect_error(recoverPartitions("cars"), "no such table - Table or view 'cars' not found in database 'default'") expect_error(refreshTable("cars"), NA) @@ -3898,6 +3990,24 @@ test_that("catalog APIs, listTables, listColumns, listFunctions", { dropTempView("cars") }) +test_that("assert_true, raise_error", { + df <- read.json(jsonPath) + filtered <- filter(df, "age < 20") + + expect_equal(collect(select(filtered, assert_true(filtered$age < 20)))$age, c(NULL)) + expect_equal(collect(select(filtered, assert_true(filtered$age < 20, "error message")))$age, + c(NULL)) + expect_equal(collect(select(filtered, assert_true(filtered$age < 20, filtered$name)))$age, + c(NULL)) + expect_error(collect(select(df, assert_true(df$age < 20))), "is not true!") + expect_error(collect(select(df, assert_true(df$age < 20, "error message"))), + "error message") + expect_error(collect(select(df, assert_true(df$age < 20, df$name))), "Michael") + + expect_error(collect(select(filtered, raise_error("error message"))), "error message") + expect_error(collect(select(filtered, raise_error(filtered$name))), "Justin") +}) + compare_list <- function(list1, list2) { # get testthat to show the diff by first making the 2 lists equal in length expect_equal(length(list1), length(list2)) diff --git a/R/pkg/tests/fulltests/test_utils.R b/R/pkg/tests/fulltests/test_utils.R index c3fb9046fcda4..6c83a137cfb7b 100644 --- a/R/pkg/tests/fulltests/test_utils.R +++ b/R/pkg/tests/fulltests/test_utils.R @@ -116,7 +116,7 @@ test_that("cleanClosure on R functions", { actual <- get("y", envir = env, inherits = FALSE) expect_equal(actual, y) - # Test for combination for nested and sequenctial functions in a closure + # Test for combination for nested and sequential functions in a closure f1 <- function(x) x + 1 f2 <- function(x) f1(x) + 2 userFunc <- function(x) { f1(x); f2(x) } diff --git a/R/pkg/tests/run-all.R b/R/pkg/tests/run-all.R index 3043df0f12075..f9e266eb4e014 100644 --- a/R/pkg/tests/run-all.R +++ b/R/pkg/tests/run-all.R @@ -60,25 +60,37 @@ if (identical(Sys.getenv("NOT_CRAN"), "true")) { # set random seed for predictable results. mostly for base's sample() in tree and classification set.seed(42) - # TODO (SPARK-30663) To be removed once testthat 1.x is removed from all builds - if (packageVersion("testthat")$major <= 1) { - # testthat 1.x - test_runner <- testthat:::run_tests - reporter <- "summary" + if (packageVersion("testthat")$major <= 1) stop("testhat 1.x is not supported") + + test_runner <- if (packageVersion("testthat")$major == 2) { + # testthat >= 2.0.0, < 3.0.0 + function(path, package, reporter, filter) { + testthat:::test_package_dir( + test_path = path, + package = package, + filter = filter, + reporter = reporter + ) + } } else { - # testthat >= 2.0.0 - test_runner <- testthat:::test_package_dir - dir.create("target/test-reports", showWarnings = FALSE) - reporter <- MultiReporter$new(list( - SummaryReporter$new(), - JunitReporter$new(file = "target/test-reports/test-results.xml") - )) + # testthat >= 3.0.0 + testthat::test_dir } - test_runner("SparkR", - file.path(sparkRDir, "pkg", "tests", "fulltests"), - NULL, - reporter) + dir.create("target/test-reports", showWarnings = FALSE) + reporter <- MultiReporter$new(list( + SummaryReporter$new(), + JunitReporter$new( + file = file.path(getwd(), "target/test-reports/test-results.xml") + ) + )) + + test_runner( + path = file.path(sparkRDir, "pkg", "tests", "fulltests"), + package = "SparkR", + reporter = reporter, + filter = NULL + ) } SparkR:::uninstallDownloadedSpark() diff --git a/R/pkg/vignettes/sparkr-vignettes.Rmd b/R/pkg/vignettes/sparkr-vignettes.Rmd index 3713e6c784855..3177b54dc5fac 100644 --- a/R/pkg/vignettes/sparkr-vignettes.Rmd +++ b/R/pkg/vignettes/sparkr-vignettes.Rmd @@ -146,7 +146,7 @@ sparkR.session.stop() Different from many other R packages, to use SparkR, you need an additional installation of Apache Spark. The Spark installation will be used to run a backend process that will compile and execute SparkR programs. -After installing the SparkR package, you can call `sparkR.session` as explained in the previous section to start and it will check for the Spark installation. If you are working with SparkR from an interactive shell (eg. R, RStudio) then Spark is downloaded and cached automatically if it is not found. Alternatively, we provide an easy-to-use function `install.spark` for running this manually. If you don't have Spark installed on the computer, you may download it from [Apache Spark Website](https://spark.apache.org/downloads.html). +After installing the SparkR package, you can call `sparkR.session` as explained in the previous section to start and it will check for the Spark installation. If you are working with SparkR from an interactive shell (e.g. R, RStudio) then Spark is downloaded and cached automatically if it is not found. Alternatively, we provide an easy-to-use function `install.spark` for running this manually. If you don't have Spark installed on the computer, you may download it from [Apache Spark Website](https://spark.apache.org/downloads.html). ```{r, eval=FALSE} install.spark() @@ -1007,7 +1007,7 @@ perplexity #### Alternating Least Squares -`spark.als` learns latent factors in [collaborative filtering](https://en.wikipedia.org/wiki/Recommender_system#Collaborative_filtering) via [alternating least squares](https://dl.acm.org/citation.cfm?id=1608614). +`spark.als` learns latent factors in [collaborative filtering](https://en.wikipedia.org/wiki/Recommender_system#Collaborative_filtering) via [alternating least squares](https://dl.acm.org/doi/10.1109/MC.2009.263). There are multiple options that can be configured in `spark.als`, including `rank`, `reg`, and `nonnegative`. For a complete list, refer to the help file. diff --git a/R/run-tests.sh b/R/run-tests.sh index 51ca7d600caf0..edc2b2b60b60e 100755 --- a/R/run-tests.sh +++ b/R/run-tests.sh @@ -23,7 +23,18 @@ FAILED=0 LOGFILE=$FWDIR/unit-tests.out rm -f $LOGFILE -SPARK_TESTING=1 NOT_CRAN=true $FWDIR/../bin/spark-submit --driver-java-options "-Dlog4j.configuration=file:$FWDIR/log4j.properties" --conf spark.hadoop.fs.defaultFS="file:///" --conf spark.driver.extraJavaOptions="-Dio.netty.tryReflectionSetAccessible=true" --conf spark.executor.extraJavaOptions="-Dio.netty.tryReflectionSetAccessible=true" $FWDIR/pkg/tests/run-all.R 2>&1 | tee -a $LOGFILE +SPARK_AVRO_JAR_PATH=$(find $FWDIR/../external/avro/ -name "spark-avro*jar" -print | egrep -v "tests.jar|test-sources.jar|sources.jar|javadoc.jar") + +if [[ $(echo $SPARK_AVRO_JAR_PATH | wc -l) -eq 1 ]]; then + SPARK_JARS=$SPARK_AVRO_JAR_PATH +fi + +if [ -z "$SPARK_JARS" ]; then + SPARK_TESTING=1 NOT_CRAN=true $FWDIR/../bin/spark-submit --driver-java-options "-Dlog4j.configuration=file:$FWDIR/log4j.properties" --conf spark.hadoop.fs.defaultFS="file:///" --conf spark.driver.extraJavaOptions="-Dio.netty.tryReflectionSetAccessible=true" --conf spark.executor.extraJavaOptions="-Dio.netty.tryReflectionSetAccessible=true" $FWDIR/pkg/tests/run-all.R 2>&1 | tee -a $LOGFILE +else + SPARK_TESTING=1 NOT_CRAN=true $FWDIR/../bin/spark-submit --jars $SPARK_JARS --driver-java-options "-Dlog4j.configuration=file:$FWDIR/log4j.properties" --conf spark.hadoop.fs.defaultFS="file:///" --conf spark.driver.extraJavaOptions="-Dio.netty.tryReflectionSetAccessible=true" --conf spark.executor.extraJavaOptions="-Dio.netty.tryReflectionSetAccessible=true" $FWDIR/pkg/tests/run-all.R 2>&1 | tee -a $LOGFILE +fi + FAILED=$((PIPESTATUS[0]||$FAILED)) NUM_TEST_WARNING="$(grep -c -e 'Warnings ----------------' $LOGFILE)" diff --git a/README.md b/README.md index d7931263b0fc7..aa7d1dd338be0 100644 --- a/README.md +++ b/README.md @@ -9,7 +9,7 @@ and Structured Streaming for stream processing. -[![Jenkins Build](https://amplab.cs.berkeley.edu/jenkins/job/spark-master-test-sbt-hadoop-2.7-hive-2.3/badge/icon)](https://amplab.cs.berkeley.edu/jenkins/job/spark-master-test-sbt-hadoop-2.7-hive-2.3) +[![Jenkins Build](https://amplab.cs.berkeley.edu/jenkins/job/spark-master-test-sbt-hadoop-3.2/badge/icon)](https://amplab.cs.berkeley.edu/jenkins/job/spark-master-test-sbt-hadoop-3.2) [![AppVeyor Build](https://img.shields.io/appveyor/ci/ApacheSoftwareFoundation/spark/master.svg?style=plastic&logo=appveyor)](https://ci.appveyor.com/project/ApacheSoftwareFoundation/spark) [![PySpark Coverage](https://img.shields.io/badge/dynamic/xml.svg?label=pyspark%20coverage&url=https%3A%2F%2Fspark-test.github.io%2Fpyspark-coverage-site&query=%2Fhtml%2Fbody%2Fdiv%5B1%5D%2Fdiv%2Fh1%2Fspan&colorB=brightgreen&style=plastic)](https://spark-test.github.io/pyspark-coverage-site) diff --git a/assembly/pom.xml b/assembly/pom.xml index d17abe857ade5..6aa97710f7307 100644 --- a/assembly/pom.xml +++ b/assembly/pom.xml @@ -21,7 +21,7 @@ org.apache.spark spark-parent_2.12 - 3.1.0-SNAPSHOT + 3.2.0-SNAPSHOT ../pom.xml diff --git a/bin/docker-image-tool.sh b/bin/docker-image-tool.sh index 6d74f8328aea2..2ec1ab8861798 100755 --- a/bin/docker-image-tool.sh +++ b/bin/docker-image-tool.sh @@ -274,7 +274,7 @@ Examples: - Build and push JDK11-based image for multiple archs to docker.io/myrepo $0 -r docker.io/myrepo -t v3.0.0 -X -b java_image_tag=11-jre-slim build # Note: buildx, which does cross building, needs to do the push during build - # So there is no seperate push step with -X + # So there is no separate push step with -X EOF } diff --git a/bin/find-spark-home.cmd b/bin/find-spark-home.cmd index f795d146d49c7..3149d05039ba4 100644 --- a/bin/find-spark-home.cmd +++ b/bin/find-spark-home.cmd @@ -55,6 +55,6 @@ if "x%SPARK_HOME%"=="x" ( set SPARK_HOME=%~dp0.. ) else ( rem We are pip installed, use the Python script to resolve a reasonable SPARK_HOME - for /f "delims=" %%i in ('%PYTHON_RUNNER% %FIND_SPARK_HOME_PYTHON_SCRIPT%') do set SPARK_HOME=%%i + for /f "delims=" %%i in ('%PYTHON_RUNNER% "%FIND_SPARK_HOME_PYTHON_SCRIPT%"') do set SPARK_HOME=%%i ) ) diff --git a/bin/load-spark-env.cmd b/bin/load-spark-env.cmd index fe725a4e1a368..5692af529fb66 100644 --- a/bin/load-spark-env.cmd +++ b/bin/load-spark-env.cmd @@ -24,7 +24,7 @@ rem conf\ subdirectory. if not defined SPARK_ENV_LOADED ( set SPARK_ENV_LOADED=1 - if [%SPARK_CONF_DIR%] == [] ( + if not defined SPARK_CONF_DIR ( set SPARK_CONF_DIR=%~dp0..\conf ) @@ -36,8 +36,8 @@ rem Setting SPARK_SCALA_VERSION if not already set. set SCALA_VERSION_1=2.13 set SCALA_VERSION_2=2.12 -set ASSEMBLY_DIR1=%SPARK_HOME%\assembly\target\scala-%SCALA_VERSION_1% -set ASSEMBLY_DIR2=%SPARK_HOME%\assembly\target\scala-%SCALA_VERSION_2% +set ASSEMBLY_DIR1="%SPARK_HOME%\assembly\target\scala-%SCALA_VERSION_1%" +set ASSEMBLY_DIR2="%SPARK_HOME%\assembly\target\scala-%SCALA_VERSION_2%" set ENV_VARIABLE_DOC=https://spark.apache.org/docs/latest/configuration.html#environment-variables if not defined SPARK_SCALA_VERSION ( diff --git a/bin/pyspark b/bin/pyspark index 463a2dcfc7e6c..251bfef5c80a8 100755 --- a/bin/pyspark +++ b/bin/pyspark @@ -50,7 +50,7 @@ export PYSPARK_DRIVER_PYTHON_OPTS # Add the PySpark classes to the Python path: export PYTHONPATH="${SPARK_HOME}/python/:$PYTHONPATH" -export PYTHONPATH="${SPARK_HOME}/python/lib/py4j-0.10.9-src.zip:$PYTHONPATH" +export PYTHONPATH="${SPARK_HOME}/python/lib/py4j-0.10.9.1-src.zip:$PYTHONPATH" # Load the PySpark shell.py script when ./pyspark is used interactively: export OLD_PYTHONSTARTUP="$PYTHONSTARTUP" diff --git a/bin/pyspark2.cmd b/bin/pyspark2.cmd index dc34be1a41706..5741480fe5501 100644 --- a/bin/pyspark2.cmd +++ b/bin/pyspark2.cmd @@ -30,7 +30,7 @@ if "x%PYSPARK_DRIVER_PYTHON%"=="x" ( ) set PYTHONPATH=%SPARK_HOME%\python;%PYTHONPATH% -set PYTHONPATH=%SPARK_HOME%\python\lib\py4j-0.10.9-src.zip;%PYTHONPATH% +set PYTHONPATH=%SPARK_HOME%\python\lib\py4j-0.10.9.1-src.zip;%PYTHONPATH% set OLD_PYTHONSTARTUP=%PYTHONSTARTUP% set PYTHONSTARTUP=%SPARK_HOME%\python\pyspark\shell.py diff --git a/bin/spark-class2.cmd b/bin/spark-class2.cmd old mode 100644 new mode 100755 index 34d04c9856d2c..68b271d1d05d9 --- a/bin/spark-class2.cmd +++ b/bin/spark-class2.cmd @@ -30,12 +30,12 @@ if "x%1"=="x" ( rem Find Spark jars. if exist "%SPARK_HOME%\jars" ( - set SPARK_JARS_DIR="%SPARK_HOME%\jars" + set SPARK_JARS_DIR=%SPARK_HOME%\jars ) else ( - set SPARK_JARS_DIR="%SPARK_HOME%\assembly\target\scala-%SPARK_SCALA_VERSION%\jars" + set SPARK_JARS_DIR=%SPARK_HOME%\assembly\target\scala-%SPARK_SCALA_VERSION%\jars ) -if not exist "%SPARK_JARS_DIR%"\ ( +if not exist "%SPARK_JARS_DIR%" ( echo Failed to find Spark jars directory. echo You need to build Spark before running this program. exit /b 1 diff --git a/build/sbt-launch-lib.bash b/build/sbt-launch-lib.bash index 162bfbf2257c7..1d79989f3c3c3 100755 --- a/build/sbt-launch-lib.bash +++ b/build/sbt-launch-lib.bash @@ -39,7 +39,11 @@ dlog () { acquire_sbt_jar () { SBT_VERSION=`awk -F "=" '/sbt\.version/ {print $2}' ./project/build.properties` - URL1=https://dl.bintray.com/typesafe/ivy-releases/org.scala-sbt/sbt-launch/${SBT_VERSION}/sbt-launch.jar + # DEFAULT_ARTIFACT_REPOSITORY env variable can be used to only fetch + # artifacts from internal repos only. + # Ex: + # DEFAULT_ARTIFACT_REPOSITORY=https://artifacts.internal.com/libs-release/ + URL1=${DEFAULT_ARTIFACT_REPOSITORY:-https://repo1.maven.org/maven2/}org/scala-sbt/sbt-launch/${SBT_VERSION}/sbt-launch-${SBT_VERSION}.jar JAR=build/sbt-launch-${SBT_VERSION}.jar sbt_jar=$JAR diff --git a/common/kvstore/pom.xml b/common/kvstore/pom.xml index 39cdc6d6d6cd3..4ade8c2032b24 100644 --- a/common/kvstore/pom.xml +++ b/common/kvstore/pom.xml @@ -22,7 +22,7 @@ org.apache.spark spark-parent_2.12 - 3.1.0-SNAPSHOT + 3.2.0-SNAPSHOT ../../pom.xml diff --git a/common/kvstore/src/main/java/org/apache/spark/util/kvstore/InMemoryStore.java b/common/kvstore/src/main/java/org/apache/spark/util/kvstore/InMemoryStore.java index 42e090bc83ed1..431c7e42774e4 100644 --- a/common/kvstore/src/main/java/org/apache/spark/util/kvstore/InMemoryStore.java +++ b/common/kvstore/src/main/java/org/apache/spark/util/kvstore/InMemoryStore.java @@ -164,8 +164,9 @@ public void clear() { } /** - * An alias class for the type "ConcurrentHashMap, Boolean>", which is used - * as a concurrent hashset for storing natural keys and the boolean value doesn't matter. + * An alias class for the type "{@literal ConcurrentHashMap, Boolean>}", + * which is used as a concurrent hashset for storing natural keys + * and the boolean value doesn't matter. */ private static class NaturalKeys extends ConcurrentHashMap, Boolean> {} diff --git a/common/kvstore/src/main/java/org/apache/spark/util/kvstore/LevelDBTypeInfo.java b/common/kvstore/src/main/java/org/apache/spark/util/kvstore/LevelDBTypeInfo.java index d7423537ddfcf..4d7f76f673865 100644 --- a/common/kvstore/src/main/java/org/apache/spark/util/kvstore/LevelDBTypeInfo.java +++ b/common/kvstore/src/main/java/org/apache/spark/util/kvstore/LevelDBTypeInfo.java @@ -133,7 +133,7 @@ class LevelDBTypeInfo { // First create the parent indices, then the child indices. ti.indices().forEach(idx -> { - // In LevelDB, there is no parent index for the NUTURAL INDEX. + // In LevelDB, there is no parent index for the NATURAL INDEX. if (idx.parent().isEmpty() || idx.value().equals(KVIndex.NATURAL_INDEX_NAME)) { indices.put(idx.value(), new Index(idx, ti.getAccessor(idx.value()), null)); } diff --git a/common/network-common/pom.xml b/common/network-common/pom.xml index 9d5bc9aae0719..0318f60d546e7 100644 --- a/common/network-common/pom.xml +++ b/common/network-common/pom.xml @@ -22,7 +22,7 @@ org.apache.spark spark-parent_2.12 - 3.1.0-SNAPSHOT + 3.2.0-SNAPSHOT ../../pom.xml @@ -91,6 +91,10 @@ org.apache.commons commons-crypto + + org.roaringbitmap + RoaringBitmap + diff --git a/common/network-common/src/main/java/org/apache/spark/network/client/TransportClient.java b/common/network-common/src/main/java/org/apache/spark/network/client/TransportClient.java index 6dcc703e92669..eb2882074d7c7 100644 --- a/common/network-common/src/main/java/org/apache/spark/network/client/TransportClient.java +++ b/common/network-common/src/main/java/org/apache/spark/network/client/TransportClient.java @@ -303,7 +303,7 @@ public void close() { @Override public String toString() { return new ToStringBuilder(this, ToStringStyle.SHORT_PREFIX_STYLE) - .append("remoteAdress", channel.remoteAddress()) + .append("remoteAddress", channel.remoteAddress()) .append("clientId", clientId) .append("isActive", isActive()) .toString(); diff --git a/common/network-common/src/main/java/org/apache/spark/network/client/TransportClientFactory.java b/common/network-common/src/main/java/org/apache/spark/network/client/TransportClientFactory.java index 24c436a504fa8..43408d43e577e 100644 --- a/common/network-common/src/main/java/org/apache/spark/network/client/TransportClientFactory.java +++ b/common/network-common/src/main/java/org/apache/spark/network/client/TransportClientFactory.java @@ -254,7 +254,7 @@ TransportClient createClient(InetSocketAddress address) // Disable Nagle's Algorithm since we don't want packets to wait .option(ChannelOption.TCP_NODELAY, true) .option(ChannelOption.SO_KEEPALIVE, true) - .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, conf.connectionTimeoutMs()) + .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, conf.connectionCreationTimeoutMs()) .option(ChannelOption.ALLOCATOR, pooledAllocator); if (conf.receiveBuf() > 0) { @@ -280,9 +280,10 @@ public void initChannel(SocketChannel ch) { // Connect to the remote server long preConnect = System.nanoTime(); ChannelFuture cf = bootstrap.connect(address); - if (!cf.await(conf.connectionTimeoutMs())) { + if (!cf.await(conf.connectionCreationTimeoutMs())) { throw new IOException( - String.format("Connecting to %s timed out (%s ms)", address, conf.connectionTimeoutMs())); + String.format("Connecting to %s timed out (%s ms)", + address, conf.connectionCreationTimeoutMs())); } else if (cf.cause() != null) { throw new IOException(String.format("Failed to connect to %s", address), cf.cause()); } diff --git a/common/network-common/src/main/java/org/apache/spark/network/crypto/AuthEngine.java b/common/network-common/src/main/java/org/apache/spark/network/crypto/AuthEngine.java index 64fdb32a67ada..c2b2edc7f07d5 100644 --- a/common/network-common/src/main/java/org/apache/spark/network/crypto/AuthEngine.java +++ b/common/network-common/src/main/java/org/apache/spark/network/crypto/AuthEngine.java @@ -287,7 +287,7 @@ private byte[] doCipherOp(int mode, byte[] in, boolean isFinal) } } } catch (InternalError ie) { - // SPARK-25535. The commons-cryto library will throw InternalError if something goes wrong, + // SPARK-25535. The commons-crypto library will throw InternalError if something goes wrong, // and leave bad state behind in the Java wrappers, so it's not safe to use them afterwards. if (mode == Cipher.ENCRYPT_MODE) { this.encryptor = null; diff --git a/common/network-common/src/main/java/org/apache/spark/network/protocol/Encoders.java b/common/network-common/src/main/java/org/apache/spark/network/protocol/Encoders.java index 490915f6de4b3..8bab808ad6864 100644 --- a/common/network-common/src/main/java/org/apache/spark/network/protocol/Encoders.java +++ b/common/network-common/src/main/java/org/apache/spark/network/protocol/Encoders.java @@ -17,9 +17,12 @@ package org.apache.spark.network.protocol; +import java.io.IOException; +import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; import io.netty.buffer.ByteBuf; +import org.roaringbitmap.RoaringBitmap; /** Provides a canonical set of Encoders for simple types. */ public class Encoders { @@ -44,6 +47,51 @@ public static String decode(ByteBuf buf) { } } + /** + * Bitmaps are encoded with their serialization length followed by the serialization bytes. + * + * @since 3.1.0 + */ + public static class Bitmaps { + public static int encodedLength(RoaringBitmap b) { + // Compress the bitmap before serializing it. Note that since BlockTransferMessage + // needs to invoke encodedLength first to figure out the length for the ByteBuf, it + // guarantees that the bitmap will always be compressed before being serialized. + b.trim(); + b.runOptimize(); + return b.serializedSizeInBytes(); + } + + /** + * The input ByteBuf for this encoder should have enough write capacity to fit the serialized + * bitmap. Other encoders which use {@link io.netty.buffer.AbstractByteBuf#writeBytes(byte[])} + * to write can expand the buf as writeBytes calls {@link ByteBuf#ensureWritable} internally. + * However, this encoder doesn't rely on netty's writeBytes and will fail if the input buf + * doesn't have enough write capacity. + */ + public static void encode(ByteBuf buf, RoaringBitmap b) { + // RoaringBitmap requires nio ByteBuffer for serde. We expose the netty ByteBuf as a nio + // ByteBuffer. Here, we need to explicitly manage the index so we can write into the + // ByteBuffer, and the write is reflected in the underneath ByteBuf. + ByteBuffer byteBuffer = buf.nioBuffer(buf.writerIndex(), buf.writableBytes()); + b.serialize(byteBuffer); + buf.writerIndex(buf.writerIndex() + byteBuffer.position()); + } + + public static RoaringBitmap decode(ByteBuf buf) { + RoaringBitmap bitmap = new RoaringBitmap(); + try { + bitmap.deserialize(buf.nioBuffer()); + // RoaringBitmap deserialize does not advance the reader index of the underlying ByteBuf. + // Manually update the index here. + buf.readerIndex(buf.readerIndex() + bitmap.serializedSizeInBytes()); + } catch (IOException e) { + throw new RuntimeException("Exception while decoding bitmap", e); + } + return bitmap; + } + } + /** Byte arrays are encoded with their length followed by bytes. */ public static class ByteArrays { public static int encodedLength(byte[] arr) { @@ -135,4 +183,35 @@ public static long[] decode(ByteBuf buf) { return longs; } } + + /** + * Bitmap arrays are encoded with the number of bitmaps followed by per-Bitmap encoding. + * + * @since 3.1.0 + */ + public static class BitmapArrays { + public static int encodedLength(RoaringBitmap[] bitmaps) { + int totalLength = 4; + for (RoaringBitmap b : bitmaps) { + totalLength += Bitmaps.encodedLength(b); + } + return totalLength; + } + + public static void encode(ByteBuf buf, RoaringBitmap[] bitmaps) { + buf.writeInt(bitmaps.length); + for (RoaringBitmap b : bitmaps) { + Bitmaps.encode(buf, b); + } + } + + public static RoaringBitmap[] decode(ByteBuf buf) { + int numBitmaps = buf.readInt(); + RoaringBitmap[] bitmaps = new RoaringBitmap[numBitmaps]; + for (int i = 0; i < bitmaps.length; i ++) { + bitmaps[i] = Bitmaps.decode(buf); + } + return bitmaps; + } + } } diff --git a/common/network-common/src/main/java/org/apache/spark/network/server/ChunkFetchRequestHandler.java b/common/network-common/src/main/java/org/apache/spark/network/server/ChunkFetchRequestHandler.java index 82810dacdad84..9a71cf593e28c 100644 --- a/common/network-common/src/main/java/org/apache/spark/network/server/ChunkFetchRequestHandler.java +++ b/common/network-common/src/main/java/org/apache/spark/network/server/ChunkFetchRequestHandler.java @@ -88,12 +88,14 @@ public void processFetchRequest( logger.trace("Received req from {} to fetch block {}", getRemoteAddress(channel), msg.streamChunkId); } - long chunksBeingTransferred = streamManager.chunksBeingTransferred(); - if (chunksBeingTransferred >= maxChunksBeingTransferred) { - logger.warn("The number of chunks being transferred {} is above {}, close the connection.", - chunksBeingTransferred, maxChunksBeingTransferred); - channel.close(); - return; + if (maxChunksBeingTransferred < Long.MAX_VALUE) { + long chunksBeingTransferred = streamManager.chunksBeingTransferred(); + if (chunksBeingTransferred >= maxChunksBeingTransferred) { + logger.warn("The number of chunks being transferred {} is above {}, close the connection.", + chunksBeingTransferred, maxChunksBeingTransferred); + channel.close(); + return; + } } ManagedBuffer buf; try { diff --git a/common/network-common/src/main/java/org/apache/spark/network/server/TransportRequestHandler.java b/common/network-common/src/main/java/org/apache/spark/network/server/TransportRequestHandler.java index f178928006902..4a30f8de07827 100644 --- a/common/network-common/src/main/java/org/apache/spark/network/server/TransportRequestHandler.java +++ b/common/network-common/src/main/java/org/apache/spark/network/server/TransportRequestHandler.java @@ -124,12 +124,14 @@ private void processStreamRequest(final StreamRequest req) { req.streamId); } - long chunksBeingTransferred = streamManager.chunksBeingTransferred(); - if (chunksBeingTransferred >= maxChunksBeingTransferred) { - logger.warn("The number of chunks being transferred {} is above {}, close the connection.", - chunksBeingTransferred, maxChunksBeingTransferred); - channel.close(); - return; + if (maxChunksBeingTransferred < Long.MAX_VALUE) { + long chunksBeingTransferred = streamManager.chunksBeingTransferred(); + if (chunksBeingTransferred >= maxChunksBeingTransferred) { + logger.warn("The number of chunks being transferred {} is above {}, close the connection.", + chunksBeingTransferred, maxChunksBeingTransferred); + channel.close(); + return; + } } ManagedBuffer buf; try { diff --git a/common/network-common/src/main/java/org/apache/spark/network/util/TransportConf.java b/common/network-common/src/main/java/org/apache/spark/network/util/TransportConf.java index 646e4278811f4..f051042a7adb4 100644 --- a/common/network-common/src/main/java/org/apache/spark/network/util/TransportConf.java +++ b/common/network-common/src/main/java/org/apache/spark/network/util/TransportConf.java @@ -19,6 +19,7 @@ import java.util.Locale; import java.util.Properties; +import java.util.concurrent.TimeUnit; import com.google.common.primitives.Ints; import io.netty.util.NettyRuntime; @@ -31,6 +32,7 @@ public class TransportConf { private final String SPARK_NETWORK_IO_MODE_KEY; private final String SPARK_NETWORK_IO_PREFERDIRECTBUFS_KEY; private final String SPARK_NETWORK_IO_CONNECTIONTIMEOUT_KEY; + private final String SPARK_NETWORK_IO_CONNECTIONCREATIONTIMEOUT_KEY; private final String SPARK_NETWORK_IO_BACKLOG_KEY; private final String SPARK_NETWORK_IO_NUMCONNECTIONSPERPEER_KEY; private final String SPARK_NETWORK_IO_SERVERTHREADS_KEY; @@ -54,6 +56,7 @@ public TransportConf(String module, ConfigProvider conf) { SPARK_NETWORK_IO_MODE_KEY = getConfKey("io.mode"); SPARK_NETWORK_IO_PREFERDIRECTBUFS_KEY = getConfKey("io.preferDirectBufs"); SPARK_NETWORK_IO_CONNECTIONTIMEOUT_KEY = getConfKey("io.connectionTimeout"); + SPARK_NETWORK_IO_CONNECTIONCREATIONTIMEOUT_KEY = getConfKey("io.connectionCreationTimeout"); SPARK_NETWORK_IO_BACKLOG_KEY = getConfKey("io.backLog"); SPARK_NETWORK_IO_NUMCONNECTIONSPERPEER_KEY = getConfKey("io.numConnectionsPerPeer"); SPARK_NETWORK_IO_SERVERTHREADS_KEY = getConfKey("io.serverThreads"); @@ -94,7 +97,7 @@ public boolean preferDirectBufs() { return conf.getBoolean(SPARK_NETWORK_IO_PREFERDIRECTBUFS_KEY, true); } - /** Connect timeout in milliseconds. Default 120 secs. */ + /** Connection idle timeout in milliseconds. Default 120 secs. */ public int connectionTimeoutMs() { long defaultNetworkTimeoutS = JavaUtils.timeStringAsSec( conf.get("spark.network.timeout", "120s")); @@ -103,6 +106,14 @@ public int connectionTimeoutMs() { return (int) defaultTimeoutMs; } + /** Connect creation timeout in milliseconds. Default 30 secs. */ + public int connectionCreationTimeoutMs() { + long connectionTimeoutS = TimeUnit.MILLISECONDS.toSeconds(connectionTimeoutMs()); + long defaultTimeoutMs = JavaUtils.timeStringAsSec( + conf.get(SPARK_NETWORK_IO_CONNECTIONCREATIONTIMEOUT_KEY, connectionTimeoutS + "s")) * 1000; + return (int) defaultTimeoutMs; + } + /** Number of concurrent connections between two nodes for fetching data. */ public int numConnectionsPerPeer() { return conf.getInt(SPARK_NETWORK_IO_NUMCONNECTIONSPERPEER_KEY, 1); @@ -363,4 +374,49 @@ public boolean useOldFetchProtocol() { return conf.getBoolean("spark.shuffle.useOldFetchProtocol", false); } + /** + * Class name of the implementation of MergedShuffleFileManager that merges the blocks + * pushed to it when push-based shuffle is enabled. By default, push-based shuffle is disabled at + * a cluster level because this configuration is set to + * 'org.apache.spark.network.shuffle.ExternalBlockHandler$NoOpMergedShuffleFileManager'. + * To turn on push-based shuffle at a cluster level, set the configuration to + * 'org.apache.spark.network.shuffle.RemoteBlockPushResolver'. + */ + public String mergedShuffleFileManagerImpl() { + return conf.get("spark.shuffle.server.mergedShuffleFileManagerImpl", + "org.apache.spark.network.shuffle.ExternalBlockHandler$NoOpMergedShuffleFileManager"); + } + + /** + * The minimum size of a chunk when dividing a merged shuffle file into multiple chunks during + * push-based shuffle. + * A merged shuffle file consists of multiple small shuffle blocks. Fetching the + * complete merged shuffle file in a single response increases the memory requirements for the + * clients. Instead of serving the entire merged file, the shuffle service serves the + * merged file in `chunks`. A `chunk` constitutes few shuffle blocks in entirety and this + * configuration controls how big a chunk can get. A corresponding index file for each merged + * shuffle file will be generated indicating chunk boundaries. + */ + public int minChunkSizeInMergedShuffleFile() { + return Ints.checkedCast(JavaUtils.byteStringAsBytes( + conf.get("spark.shuffle.server.minChunkSizeInMergedShuffleFile", "2m"))); + } + + /** + * The size of cache in memory which is used in push-based shuffle for storing merged index files. + */ + public long mergedIndexCacheSize() { + return JavaUtils.byteStringAsBytes( + conf.get("spark.shuffle.server.mergedIndexCacheSize", "100m")); + } + + /** + * The threshold for number of IOExceptions while merging shuffle blocks to a shuffle partition. + * When the number of IOExceptions while writing to merged shuffle data/index/meta file exceed + * this threshold then the shuffle server will respond back to client to stop pushing shuffle + * blocks for this shuffle partition. + */ + public int ioExceptionsThresholdDuringMerge() { + return conf.getInt("spark.shuffle.server.ioExceptionsThresholdDuringMerge", 4); + } } diff --git a/common/network-common/src/test/java/org/apache/spark/network/crypto/AuthEngineSuite.java b/common/network-common/src/test/java/org/apache/spark/network/crypto/AuthEngineSuite.java index 0790f0079c2bd..1c2061699a128 100644 --- a/common/network-common/src/test/java/org/apache/spark/network/crypto/AuthEngineSuite.java +++ b/common/network-common/src/test/java/org/apache/spark/network/crypto/AuthEngineSuite.java @@ -150,8 +150,8 @@ public void testEncryptedMessage() throws Exception { ByteArrayWritableChannel channel = new ByteArrayWritableChannel(data.length); TransportCipher.EncryptedMessage emsg = handler.createEncryptedMessage(buf); - while (emsg.transfered() < emsg.count()) { - emsg.transferTo(channel, emsg.transfered()); + while (emsg.transferred() < emsg.count()) { + emsg.transferTo(channel, emsg.transferred()); } assertEquals(data.length, channel.length()); } finally { @@ -196,9 +196,9 @@ public Long answer(InvocationOnMock invocationOnMock) throws Throwable { TransportCipher.EncryptedMessage emsg = handler.createEncryptedMessage(region); ByteArrayWritableChannel channel = new ByteArrayWritableChannel(testDataLength); // "transferTo" should act correctly when the underlying FileRegion transfers 0 bytes. - assertEquals(0L, emsg.transferTo(channel, emsg.transfered())); - assertEquals(testDataLength, emsg.transferTo(channel, emsg.transfered())); - assertEquals(emsg.transfered(), emsg.count()); + assertEquals(0L, emsg.transferTo(channel, emsg.transferred())); + assertEquals(testDataLength, emsg.transferTo(channel, emsg.transferred())); + assertEquals(emsg.transferred(), emsg.count()); assertEquals(4, channel.length()); } finally { client.close(); diff --git a/common/network-common/src/test/java/org/apache/spark/network/protocol/EncodersSuite.java b/common/network-common/src/test/java/org/apache/spark/network/protocol/EncodersSuite.java new file mode 100644 index 0000000000000..6e89702c04396 --- /dev/null +++ b/common/network-common/src/test/java/org/apache/spark/network/protocol/EncodersSuite.java @@ -0,0 +1,68 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.network.protocol; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import org.junit.Test; +import org.roaringbitmap.RoaringBitmap; + +import static org.junit.Assert.*; + +/** + * Tests for {@link Encoders}. + */ +public class EncodersSuite { + + @Test + public void testRoaringBitmapEncodeDecode() { + RoaringBitmap bitmap = new RoaringBitmap(); + bitmap.add(1, 2, 3); + ByteBuf buf = Unpooled.buffer(Encoders.Bitmaps.encodedLength(bitmap)); + Encoders.Bitmaps.encode(buf, bitmap); + RoaringBitmap decodedBitmap = Encoders.Bitmaps.decode(buf); + assertEquals(bitmap, decodedBitmap); + } + + @Test (expected = java.nio.BufferOverflowException.class) + public void testRoaringBitmapEncodeShouldFailWhenBufferIsSmall() { + RoaringBitmap bitmap = new RoaringBitmap(); + bitmap.add(1, 2, 3); + ByteBuf buf = Unpooled.buffer(4); + Encoders.Bitmaps.encode(buf, bitmap); + } + + @Test + public void testBitmapArraysEncodeDecode() { + RoaringBitmap[] bitmaps = new RoaringBitmap[] { + new RoaringBitmap(), + new RoaringBitmap(), + new RoaringBitmap(), // empty + new RoaringBitmap(), + new RoaringBitmap() + }; + bitmaps[0].add(1, 2, 3); + bitmaps[1].add(1, 2, 4); + bitmaps[3].add(7L, 9L); + bitmaps[4].add(1L, 100L); + ByteBuf buf = Unpooled.buffer(Encoders.BitmapArrays.encodedLength(bitmaps)); + Encoders.BitmapArrays.encode(buf, bitmaps); + RoaringBitmap[] decodedBitmaps = Encoders.BitmapArrays.decode(buf); + assertArrayEquals(bitmaps, decodedBitmaps); + } +} diff --git a/common/network-common/src/test/java/org/apache/spark/network/protocol/MessageWithHeaderSuite.java b/common/network-common/src/test/java/org/apache/spark/network/protocol/MessageWithHeaderSuite.java index 3bff34e210e3c..af1c2878672c0 100644 --- a/common/network-common/src/test/java/org/apache/spark/network/protocol/MessageWithHeaderSuite.java +++ b/common/network-common/src/test/java/org/apache/spark/network/protocol/MessageWithHeaderSuite.java @@ -129,8 +129,8 @@ private void testFileRegionBody(int totalWrites, int writesPerCall) throws Excep private ByteBuf doWrite(MessageWithHeader msg, int minExpectedWrites) throws Exception { int writes = 0; ByteArrayWritableChannel channel = new ByteArrayWritableChannel((int) msg.count()); - while (msg.transfered() < msg.count()) { - msg.transferTo(channel, msg.transfered()); + while (msg.transferred() < msg.count()) { + msg.transferTo(channel, msg.transferred()); writes++; } assertTrue("Not enough writes!", minExpectedWrites <= writes); diff --git a/common/network-common/src/test/java/org/apache/spark/network/sasl/SparkSaslSuite.java b/common/network-common/src/test/java/org/apache/spark/network/sasl/SparkSaslSuite.java index ecaeec98da182..32c9acd327213 100644 --- a/common/network-common/src/test/java/org/apache/spark/network/sasl/SparkSaslSuite.java +++ b/common/network-common/src/test/java/org/apache/spark/network/sasl/SparkSaslSuite.java @@ -191,28 +191,28 @@ public void testEncryptedMessage() throws Exception { SaslEncryption.EncryptedMessage emsg = new SaslEncryption.EncryptedMessage(backend, msg, 1024); - long count = emsg.transferTo(channel, emsg.transfered()); + long count = emsg.transferTo(channel, emsg.transferred()); assertTrue(count < data.length); assertTrue(count > 0); // Here, the output buffer is full so nothing should be transferred. - assertEquals(0, emsg.transferTo(channel, emsg.transfered())); + assertEquals(0, emsg.transferTo(channel, emsg.transferred())); // Now there's room in the buffer, but not enough to transfer all the remaining data, // so the dummy count should be returned. channel.reset(); - assertEquals(1, emsg.transferTo(channel, emsg.transfered())); + assertEquals(1, emsg.transferTo(channel, emsg.transferred())); // Eventually, the whole message should be transferred. for (int i = 0; i < data.length / 32 - 2; i++) { channel.reset(); - assertEquals(1, emsg.transferTo(channel, emsg.transfered())); + assertEquals(1, emsg.transferTo(channel, emsg.transferred())); } channel.reset(); - count = emsg.transferTo(channel, emsg.transfered()); + count = emsg.transferTo(channel, emsg.transferred()); assertTrue("Unexpected count: " + count, count > 1 && count < data.length); - assertEquals(data.length, emsg.transfered()); + assertEquals(data.length, emsg.transferred()); } finally { msg.release(); } @@ -237,9 +237,9 @@ public void testEncryptedMessageChunking() throws Exception { new SaslEncryption.EncryptedMessage(backend, msg.convertToNetty(), data.length / 8); ByteArrayWritableChannel channel = new ByteArrayWritableChannel(data.length); - while (emsg.transfered() < emsg.count()) { + while (emsg.transferred() < emsg.count()) { channel.reset(); - emsg.transferTo(channel, emsg.transfered()); + emsg.transferTo(channel, emsg.transferred()); } verify(backend, times(8)).wrap(any(byte[].class), anyInt(), anyInt()); diff --git a/common/network-common/src/test/java/org/apache/spark/network/server/OneForOneStreamManagerSuite.java b/common/network-common/src/test/java/org/apache/spark/network/server/OneForOneStreamManagerSuite.java index 45e1836da641f..634b40ed450ee 100644 --- a/common/network-common/src/test/java/org/apache/spark/network/server/OneForOneStreamManagerSuite.java +++ b/common/network-common/src/test/java/org/apache/spark/network/server/OneForOneStreamManagerSuite.java @@ -72,7 +72,7 @@ public void testMissingChunk() { Assert.assertNotNull(getChunk(manager, streamId, 2)); manager.connectionTerminated(dummyChannel); - // loaded buffers are not released yet as in production a MangedBuffer returned by getChunk() + // loaded buffers are not released yet as in production a ManagedBuffer returned by getChunk() // would only be released by Netty after it is written to the network Mockito.verify(buffer1, Mockito.never()).release(); Mockito.verify(buffer2, Mockito.never()).release(); diff --git a/common/network-common/src/test/java/org/apache/spark/network/util/TransportFrameDecoderSuite.java b/common/network-common/src/test/java/org/apache/spark/network/util/TransportFrameDecoderSuite.java index 4b67aa80351d2..163c52b023822 100644 --- a/common/network-common/src/test/java/org/apache/spark/network/util/TransportFrameDecoderSuite.java +++ b/common/network-common/src/test/java/org/apache/spark/network/util/TransportFrameDecoderSuite.java @@ -98,7 +98,7 @@ public void testConsolidationPerf() throws Exception { writtenBytes += pieceBytes; } logger.info("Writing 300MiB frame buf with consolidation of threshold " + threshold - + " took " + totalTime + " milis"); + + " took " + totalTime + " millis"); } finally { for (ByteBuf buf : retained) { release(buf); diff --git a/common/network-shuffle/pom.xml b/common/network-shuffle/pom.xml index 00f1defbb0093..6be6df993478d 100644 --- a/common/network-shuffle/pom.xml +++ b/common/network-shuffle/pom.xml @@ -22,7 +22,7 @@ org.apache.spark spark-parent_2.12 - 3.1.0-SNAPSHOT + 3.2.0-SNAPSHOT ../../pom.xml @@ -47,6 +47,11 @@ metrics-core + + org.apache.spark + spark-tags_${scala.binary.version} + + org.slf4j @@ -57,6 +62,10 @@ com.google.guava guava + + org.roaringbitmap + RoaringBitmap + @@ -66,11 +75,6 @@ test-jar test - - org.apache.spark - spark-tags_${scala.binary.version} - test - + + com.amazonaws + aws-java-sdk + + + org.apache.commons commons-crypto diff --git a/core/src/main/java/org/apache/spark/SparkFirehoseListener.java b/core/src/main/java/org/apache/spark/SparkFirehoseListener.java index c0e72b57d48bd..7cb2455affe48 100644 --- a/core/src/main/java/org/apache/spark/SparkFirehoseListener.java +++ b/core/src/main/java/org/apache/spark/SparkFirehoseListener.java @@ -17,6 +17,7 @@ package org.apache.spark; +import org.apache.spark.annotation.DeveloperApi; import org.apache.spark.scheduler.*; /** @@ -27,7 +28,11 @@ * new methods to SparkListener: forgetting to add a method will result in a compilation error (if * this was a concrete Scala class, default implementations of new event handlers would be inherited * from the SparkListener trait). + * + * Please note until Spark 3.1.0 this was missing the DevelopApi annotation, this needs to be + * taken into account if changing this API before a major release. */ +@DeveloperApi public class SparkFirehoseListener implements SparkListenerInterface { public void onEvent(SparkListenerEvent event) { } @@ -124,34 +129,67 @@ public final void onExecutorBlacklisted(SparkListenerExecutorBlacklisted executo onEvent(executorBlacklisted); } + @Override + public final void onExecutorExcluded(SparkListenerExecutorExcluded executorExcluded) { + onEvent(executorExcluded); + } + @Override public void onExecutorBlacklistedForStage( SparkListenerExecutorBlacklistedForStage executorBlacklistedForStage) { onEvent(executorBlacklistedForStage); } + @Override + public void onExecutorExcludedForStage( + SparkListenerExecutorExcludedForStage executorExcludedForStage) { + onEvent(executorExcludedForStage); + } + @Override public void onNodeBlacklistedForStage( SparkListenerNodeBlacklistedForStage nodeBlacklistedForStage) { onEvent(nodeBlacklistedForStage); } + @Override + public void onNodeExcludedForStage( + SparkListenerNodeExcludedForStage nodeExcludedForStage) { + onEvent(nodeExcludedForStage); + } + @Override public final void onExecutorUnblacklisted( SparkListenerExecutorUnblacklisted executorUnblacklisted) { onEvent(executorUnblacklisted); } + @Override + public final void onExecutorUnexcluded( + SparkListenerExecutorUnexcluded executorUnexcluded) { + onEvent(executorUnexcluded); + } + @Override public final void onNodeBlacklisted(SparkListenerNodeBlacklisted nodeBlacklisted) { onEvent(nodeBlacklisted); } + @Override + public final void onNodeExcluded(SparkListenerNodeExcluded nodeExcluded) { + onEvent(nodeExcluded); + } + @Override public final void onNodeUnblacklisted(SparkListenerNodeUnblacklisted nodeUnblacklisted) { onEvent(nodeUnblacklisted); } + @Override + public final void onNodeUnexcluded(SparkListenerNodeUnexcluded nodeUnexcluded) { + onEvent(nodeUnexcluded); + } + @Override public void onBlockUpdated(SparkListenerBlockUpdated blockUpdated) { onEvent(blockUpdated); diff --git a/core/src/main/java/org/apache/spark/api/plugin/ExecutorPlugin.java b/core/src/main/java/org/apache/spark/api/plugin/ExecutorPlugin.java index 4961308035163..481bf985f1c6c 100644 --- a/core/src/main/java/org/apache/spark/api/plugin/ExecutorPlugin.java +++ b/core/src/main/java/org/apache/spark/api/plugin/ExecutorPlugin.java @@ -19,6 +19,7 @@ import java.util.Map; +import org.apache.spark.TaskFailedReason; import org.apache.spark.annotation.DeveloperApi; /** @@ -54,4 +55,45 @@ default void init(PluginContext ctx, Map extraConf) {} */ default void shutdown() {} + /** + * Perform any action before the task is run. + *

+ * This method is invoked from the same thread the task will be executed. + * Task-specific information can be accessed via {@link org.apache.spark.TaskContext#get}. + *

+ * Plugin authors should avoid expensive operations here, as this method will be called + * on every task, and doing something expensive can significantly slow down a job. + * It is not recommended for a user to call a remote service, for example. + *

+ * Exceptions thrown from this method do not propagate - they're caught, + * logged, and suppressed. Therefore exceptions when executing this method won't + * make the job fail. + * + * @since 3.1.0 + */ + default void onTaskStart() {} + + /** + * Perform an action after tasks completes without exceptions. + *

+ * As {@link #onTaskStart() onTaskStart} exceptions are suppressed, this method + * will still be invoked even if the corresponding {@link #onTaskStart} call for this + * task failed. + *

+ * Same warnings of {@link #onTaskStart() onTaskStart} apply here. + * + * @since 3.1.0 + */ + default void onTaskSucceeded() {} + + /** + * Perform an action after tasks completes with exceptions. + *

+ * Same warnings of {@link #onTaskStart() onTaskStart} apply here. + * + * @param failureReason the exception thrown from the failed task. + * + * @since 3.1.0 + */ + default void onTaskFailed(TaskFailedReason failureReason) {} } diff --git a/core/src/main/java/org/apache/spark/shuffle/sort/BypassMergeSortShuffleWriter.java b/core/src/main/java/org/apache/spark/shuffle/sort/BypassMergeSortShuffleWriter.java index 256789b8c7827..3dbee1b13d287 100644 --- a/core/src/main/java/org/apache/spark/shuffle/sort/BypassMergeSortShuffleWriter.java +++ b/core/src/main/java/org/apache/spark/shuffle/sort/BypassMergeSortShuffleWriter.java @@ -31,7 +31,6 @@ import scala.Tuple2; import scala.collection.Iterator; -import com.google.common.annotations.VisibleForTesting; import com.google.common.io.Closeables; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -178,8 +177,8 @@ public void write(Iterator> records) throws IOException { } } - @VisibleForTesting - long[] getPartitionLengths() { + @Override + public long[] getPartitionLengths() { return partitionLengths; } diff --git a/core/src/main/java/org/apache/spark/shuffle/sort/UnsafeShuffleWriter.java b/core/src/main/java/org/apache/spark/shuffle/sort/UnsafeShuffleWriter.java index 79e38a824fea4..e8f94ba8ffeee 100644 --- a/core/src/main/java/org/apache/spark/shuffle/sort/UnsafeShuffleWriter.java +++ b/core/src/main/java/org/apache/spark/shuffle/sort/UnsafeShuffleWriter.java @@ -88,6 +88,7 @@ public class UnsafeShuffleWriter extends ShuffleWriter { @Nullable private MapStatus mapStatus; @Nullable private ShuffleExternalSorter sorter; + @Nullable private long[] partitionLengths; private long peakMemoryUsedBytes = 0; /** Subclass of ByteArrayOutputStream that exposes `buf` directly. */ @@ -219,7 +220,6 @@ void closeAndWriteOutput() throws IOException { serOutputStream = null; final SpillInfo[] spills = sorter.closeAndGetSpills(); sorter = null; - final long[] partitionLengths; try { partitionLengths = mergeSpills(spills); } finally { @@ -543,4 +543,9 @@ public void close() throws IOException { channel.close(); } } + + @Override + public long[] getPartitionLengths() { + return partitionLengths; + } } diff --git a/core/src/main/java/org/apache/spark/unsafe/map/BytesToBytesMap.java b/core/src/main/java/org/apache/spark/unsafe/map/BytesToBytesMap.java index 8eea9db393aff..f474c30b8b3d8 100644 --- a/core/src/main/java/org/apache/spark/unsafe/map/BytesToBytesMap.java +++ b/core/src/main/java/org/apache/spark/unsafe/map/BytesToBytesMap.java @@ -393,10 +393,12 @@ public void remove() { } private void handleFailedDelete() { - // remove the spill file from disk - File file = spillWriters.removeFirst().getFile(); - if (file != null && file.exists() && !file.delete()) { - logger.error("Was unable to delete spill file {}", file.getAbsolutePath()); + if (spillWriters.size() > 0) { + // remove the spill file from disk + File file = spillWriters.removeFirst().getFile(); + if (file != null && file.exists() && !file.delete()) { + logger.error("Was unable to delete spill file {}", file.getAbsolutePath()); + } } } } @@ -808,12 +810,21 @@ public boolean append(Object kbase, long koff, int klen, Object vbase, long voff longArray.set(pos * 2 + 1, keyHashcode); isDefined = true; - // We use two array entries per key, so the array size is twice the capacity. - // We should compare the current capacity of the array, instead of its size. - if (numKeys >= growthThreshold && longArray.size() / 2 < MAX_CAPACITY) { - try { - growAndRehash(); - } catch (SparkOutOfMemoryError oom) { + // If the map has reached its growth threshold, try to grow it. + if (numKeys >= growthThreshold) { + // We use two array entries per key, so the array size is twice the capacity. + // We should compare the current capacity of the array, instead of its size. + if (longArray.size() / 2 < MAX_CAPACITY) { + try { + growAndRehash(); + } catch (SparkOutOfMemoryError oom) { + canGrowArray = false; + } + } else { + // The map is already at MAX_CAPACITY and cannot grow. Instead, we prevent it from + // accepting any more new elements to make sure we don't exceed the load factor. If we + // need to spill later, this allows UnsafeKVExternalSorter to reuse the array for + // sorting. canGrowArray = false; } } diff --git a/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/UnsafeExternalSorter.java b/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/UnsafeExternalSorter.java index 55e4e609c3c7b..dda8ed4c239ae 100644 --- a/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/UnsafeExternalSorter.java +++ b/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/UnsafeExternalSorter.java @@ -203,6 +203,10 @@ public long spill(long size, MemoryConsumer trigger) throws IOException { } if (inMemSorter == null || inMemSorter.numRecords() <= 0) { + // There could still be some memory allocated when there are no records in the in-memory + // sorter. We will not spill it however, to ensure that we can always process at least one + // record before spilling. See the comments in `allocateMemoryForRecordIfNecessary` for why + // this is necessary. return 0L; } @@ -224,7 +228,7 @@ public long spill(long size, MemoryConsumer trigger) throws IOException { // Note that this is more-or-less going to be a multiple of the page size, so wasted space in // pages will currently be counted as memory spilled even though that space isn't actually // written to disk. This also counts the space needed to store the sorter's pointer array. - inMemSorter.reset(); + inMemSorter.freeMemory(); // Reset the in-memory sorter's pointer array only after freeing up the memory pages holding the // records. Otherwise, if the task is over allocated memory, then without freeing the memory // pages, we might not be able to get memory for the pointer array. @@ -325,7 +329,7 @@ public void cleanupResources() { deleteSpillFiles(); freeMemory(); if (inMemSorter != null) { - inMemSorter.free(); + inMemSorter.freeMemory(); inMemSorter = null; } } @@ -339,40 +343,53 @@ public void cleanupResources() { private void growPointerArrayIfNecessary() throws IOException { assert(inMemSorter != null); if (!inMemSorter.hasSpaceForAnotherRecord()) { + if (inMemSorter.numRecords() <= 0) { + // Spilling was triggered just before this method was called. The pointer array was freed + // during the spill, so a new pointer array needs to be allocated here. + LongArray array = allocateArray(inMemSorter.getInitialSize()); + inMemSorter.expandPointerArray(array); + return; + } + long used = inMemSorter.getMemoryUsage(); - LongArray array; + LongArray array = null; try { // could trigger spilling array = allocateArray(used / 8 * 2); } catch (TooLargePageException e) { // The pointer array is too big to fix in a single page, spill. spill(); - return; } catch (SparkOutOfMemoryError e) { - // should have trigger spilling - if (!inMemSorter.hasSpaceForAnotherRecord()) { + if (inMemSorter.numRecords() > 0) { logger.error("Unable to grow the pointer array"); throw e; } - return; + // The new array could not be allocated, but that is not an issue as it is longer needed, + // as all records were spilled. } - // check if spilling is triggered or not - if (inMemSorter.hasSpaceForAnotherRecord()) { - freeArray(array); - } else { - inMemSorter.expandPointerArray(array); + + if (inMemSorter.numRecords() <= 0) { + // Spilling was triggered while trying to allocate the new array. + if (array != null) { + // We succeeded in allocating the new array, but, since all records were spilled, a + // smaller array would also suffice. + freeArray(array); + } + // The pointer array was freed during the spill, so a new pointer array needs to be + // allocated here. + array = allocateArray(inMemSorter.getInitialSize()); } + inMemSorter.expandPointerArray(array); } } /** - * Allocates more memory in order to insert an additional record. This will request additional - * memory from the memory manager and spill if the requested memory can not be obtained. + * Allocates an additional page in order to insert an additional record. This will request + * additional memory from the memory manager and spill if the requested memory can not be + * obtained. * * @param required the required space in the data page, in bytes, including space for storing - * the record size. This must be less than or equal to the page size (records - * that exceed the page size are handled via a different code path which uses - * special overflow pages). + * the record size. */ private void acquireNewPageIfNecessary(int required) { if (currentPage == null || @@ -384,6 +401,37 @@ private void acquireNewPageIfNecessary(int required) { } } + /** + * Allocates more memory in order to insert an additional record. This will request additional + * memory from the memory manager and spill if the requested memory can not be obtained. + * + * @param required the required space in the data page, in bytes, including space for storing + * the record size. + */ + private void allocateMemoryForRecordIfNecessary(int required) throws IOException { + // Step 1: + // Ensure that the pointer array has space for another record. This may cause a spill. + growPointerArrayIfNecessary(); + // Step 2: + // Ensure that the last page has space for another record. This may cause a spill. + acquireNewPageIfNecessary(required); + // Step 3: + // The allocation in step 2 could have caused a spill, which would have freed the pointer + // array allocated in step 1. Therefore we need to check again whether we have to allocate + // a new pointer array. + // + // If the allocation in this step causes a spill event then it will not cause the page + // allocated in the previous step to be freed. The function `spill` only frees memory if at + // least one record has been inserted in the in-memory sorter. This will not be the case if + // we have spilled in the previous step. + // + // If we did not spill in the previous step then `growPointerArrayIfNecessary` will be a + // no-op that does not allocate any memory, and therefore can't cause a spill event. + // + // Thus there is no need to call `acquireNewPageIfNecessary` again after this step. + growPointerArrayIfNecessary(); + } + /** * Write a record to the sorter. */ @@ -398,11 +446,10 @@ public void insertRecord( spill(); } - growPointerArrayIfNecessary(); - int uaoSize = UnsafeAlignedOffset.getUaoSize(); + final int uaoSize = UnsafeAlignedOffset.getUaoSize(); // Need 4 or 8 bytes to store the record length. final int required = length + uaoSize; - acquireNewPageIfNecessary(required); + allocateMemoryForRecordIfNecessary(required); final Object base = currentPage.getBaseObject(); final long recordAddress = taskMemoryManager.encodePageNumberAndOffset(currentPage, pageCursor); @@ -425,10 +472,9 @@ public void insertKVRecord(Object keyBase, long keyOffset, int keyLen, Object valueBase, long valueOffset, int valueLen, long prefix, boolean prefixIsNull) throws IOException { - growPointerArrayIfNecessary(); - int uaoSize = UnsafeAlignedOffset.getUaoSize(); + final int uaoSize = UnsafeAlignedOffset.getUaoSize(); final int required = keyLen + valueLen + (2 * uaoSize); - acquireNewPageIfNecessary(required); + allocateMemoryForRecordIfNecessary(required); final Object base = currentPage.getBaseObject(); final long recordAddress = taskMemoryManager.encodePageNumberAndOffset(currentPage, pageCursor); @@ -501,10 +547,14 @@ private static void spillIterator(UnsafeSorterIterator inMemIterator, */ class SpillableIterator extends UnsafeSorterIterator { private UnsafeSorterIterator upstream; - private UnsafeSorterIterator nextUpstream = null; private MemoryBlock lastPage = null; private boolean loaded = false; - private int numRecords = 0; + private int numRecords; + + private Object currentBaseObject; + private long currentBaseOffset; + private int currentRecordLength; + private long currentKeyPrefix; SpillableIterator(UnsafeSorterIterator inMemIterator) { this.upstream = inMemIterator; @@ -516,23 +566,32 @@ public int getNumRecords() { return numRecords; } + @Override + public long getCurrentPageNumber() { + throw new UnsupportedOperationException(); + } + public long spill() throws IOException { synchronized (this) { - if (!(upstream instanceof UnsafeInMemorySorter.SortedIterator && nextUpstream == null - && numRecords > 0)) { + if (inMemSorter == null) { return 0L; } - UnsafeInMemorySorter.SortedIterator inMemIterator = - ((UnsafeInMemorySorter.SortedIterator) upstream).clone(); + long currentPageNumber = upstream.getCurrentPageNumber(); - ShuffleWriteMetrics writeMetrics = new ShuffleWriteMetrics(); - // Iterate over the records that have not been returned and spill them. - final UnsafeSorterSpillWriter spillWriter = - new UnsafeSorterSpillWriter(blockManager, fileBufferSizeBytes, writeMetrics, numRecords); - spillIterator(inMemIterator, spillWriter); - spillWriters.add(spillWriter); - nextUpstream = spillWriter.getReader(serializerManager); + ShuffleWriteMetrics writeMetrics = new ShuffleWriteMetrics(); + if (numRecords > 0) { + // Iterate over the records that have not been returned and spill them. + final UnsafeSorterSpillWriter spillWriter = new UnsafeSorterSpillWriter( + blockManager, fileBufferSizeBytes, writeMetrics, numRecords); + spillIterator(upstream, spillWriter); + spillWriters.add(spillWriter); + upstream = spillWriter.getReader(serializerManager); + } else { + // Nothing to spill as all records have been read already, but do not return yet, as the + // memory still has to be freed. + upstream = null; + } long released = 0L; synchronized (UnsafeExternalSorter.this) { @@ -540,8 +599,7 @@ public long spill() throws IOException { // is accessing the current record. We free this page in that caller's next loadNext() // call. for (MemoryBlock page : allocatedPages) { - if (!loaded || page.pageNumber != - ((UnsafeInMemorySorter.SortedIterator)upstream).getCurrentPageNumber()) { + if (!loaded || page.pageNumber != currentPageNumber) { released += page.size(); freePage(page); } else { @@ -549,13 +607,18 @@ public long spill() throws IOException { } } allocatedPages.clear(); + if (lastPage != null) { + // Add the last page back to the list of allocated pages to make sure it gets freed in + // case loadNext() never gets called again. + allocatedPages.add(lastPage); + } } // in-memory sorter will not be used after spilling assert(inMemSorter != null); released += inMemSorter.getMemoryUsage(); totalSortTimeNanos += inMemSorter.getSortTimeNanos(); - inMemSorter.free(); + inMemSorter.freeMemory(); inMemSorter = null; taskContext.taskMetrics().incMemoryBytesSpilled(released); taskContext.taskMetrics().incDiskBytesSpilled(writeMetrics.bytesWritten()); @@ -571,26 +634,32 @@ public boolean hasNext() { @Override public void loadNext() throws IOException { + assert upstream != null; MemoryBlock pageToFree = null; try { synchronized (this) { loaded = true; - if (nextUpstream != null) { - // Just consumed the last record from in memory iterator - if(lastPage != null) { - // Do not free the page here, while we are locking `SpillableIterator`. The `freePage` - // method locks the `TaskMemoryManager`, and it's a bad idea to lock 2 objects in - // sequence. We may hit dead lock if another thread locks `TaskMemoryManager` and - // `SpillableIterator` in sequence, which may happen in - // `TaskMemoryManager.acquireExecutionMemory`. - pageToFree = lastPage; - lastPage = null; - } - upstream = nextUpstream; - nextUpstream = null; + // Just consumed the last record from the in-memory iterator. + if (lastPage != null) { + // Do not free the page here, while we are locking `SpillableIterator`. The `freePage` + // method locks the `TaskMemoryManager`, and it's a bad idea to lock 2 objects in + // sequence. We may hit dead lock if another thread locks `TaskMemoryManager` and + // `SpillableIterator` in sequence, which may happen in + // `TaskMemoryManager.acquireExecutionMemory`. + pageToFree = lastPage; + allocatedPages.clear(); + lastPage = null; } numRecords--; upstream.loadNext(); + + // Keep track of the current base object, base offset, record length, and key prefix, + // so that the current record can still be read in case a spill is triggered and we + // switch to the spill writer's iterator. + currentBaseObject = upstream.getBaseObject(); + currentBaseOffset = upstream.getBaseOffset(); + currentRecordLength = upstream.getRecordLength(); + currentKeyPrefix = upstream.getKeyPrefix(); } } finally { if (pageToFree != null) { @@ -601,22 +670,22 @@ public void loadNext() throws IOException { @Override public Object getBaseObject() { - return upstream.getBaseObject(); + return currentBaseObject; } @Override public long getBaseOffset() { - return upstream.getBaseOffset(); + return currentBaseOffset; } @Override public int getRecordLength() { - return upstream.getRecordLength(); + return currentRecordLength; } @Override public long getKeyPrefix() { - return upstream.getKeyPrefix(); + return currentKeyPrefix; } } @@ -646,7 +715,7 @@ public UnsafeSorterIterator getIterator(int startIndex) throws IOException { } i += spillWriter.recordsSpilled(); } - if (inMemSorter != null) { + if (inMemSorter != null && inMemSorter.numRecords() > 0) { UnsafeSorterIterator iter = inMemSorter.getSortedIterator(); moveOver(iter, startIndex - i); queue.add(iter); @@ -693,6 +762,11 @@ public int getNumRecords() { return numRecords; } + @Override + public long getCurrentPageNumber() { + return current.getCurrentPageNumber(); + } + @Override public boolean hasNext() { while (!current.hasNext() && !iterators.isEmpty()) { diff --git a/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/UnsafeInMemorySorter.java b/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/UnsafeInMemorySorter.java index 660eb790a550b..33be899b6b438 100644 --- a/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/UnsafeInMemorySorter.java +++ b/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/UnsafeInMemorySorter.java @@ -159,32 +159,26 @@ private int getUsableCapacity() { return (int) (array.size() / (radixSortSupport != null ? 2 : 1.5)); } + public long getInitialSize() { + return initialSize; + } + /** * Free the memory used by pointer array. */ - public void free() { + public void freeMemory() { if (consumer != null) { if (array != null) { consumer.freeArray(array); } - array = null; - } - } - public void reset() { - if (consumer != null) { - consumer.freeArray(array); - // the call to consumer.allocateArray may trigger a spill which in turn access this instance - // and eventually re-enter this method and try to free the array again. by setting the array - // to null and its length to 0 we effectively make the spill code-path a no-op. setting the - // array to null also indicates that it has already been de-allocated which prevents a double - // de-allocation in free(). + // Set the array to null instead of allocating a new array. Allocating an array could have + // triggered another spill and this method already is called from UnsafeExternalSorter when + // spilling. Attempting to allocate while spilling is dangerous, as we could be holding onto + // a large partially complete allocation, which may prevent other memory from being allocated. + // Instead we will allocate the new array when it is necessary. array = null; usableCapacity = 0; - pos = 0; - nullBoundaryPos = 0; - array = consumer.allocateArray(initialSize); - usableCapacity = getUsableCapacity(); } pos = 0; nullBoundaryPos = 0; @@ -217,18 +211,20 @@ public boolean hasSpaceForAnotherRecord() { } public void expandPointerArray(LongArray newArray) { - if (newArray.size() < array.size()) { - // checkstyle.off: RegexpSinglelineJava - throw new SparkOutOfMemoryError("Not enough memory to grow pointer array"); - // checkstyle.on: RegexpSinglelineJava + if (array != null) { + if (newArray.size() < array.size()) { + // checkstyle.off: RegexpSinglelineJava + throw new SparkOutOfMemoryError("Not enough memory to grow pointer array"); + // checkstyle.on: RegexpSinglelineJava + } + Platform.copyMemory( + array.getBaseObject(), + array.getBaseOffset(), + newArray.getBaseObject(), + newArray.getBaseOffset(), + pos * 8L); + consumer.freeArray(array); } - Platform.copyMemory( - array.getBaseObject(), - array.getBaseOffset(), - newArray.getBaseObject(), - newArray.getBaseOffset(), - pos * 8L); - consumer.freeArray(array); array = newArray; usableCapacity = getUsableCapacity(); } @@ -330,6 +326,7 @@ public void loadNext() { @Override public long getBaseOffset() { return baseOffset; } + @Override public long getCurrentPageNumber() { return currentPageNumber; } @@ -346,6 +343,11 @@ public long getCurrentPageNumber() { * {@code next()} will return the same mutable object. */ public UnsafeSorterIterator getSortedIterator() { + if (numRecords() == 0) { + // `array` might be null, so make sure that it is not accessed by returning early. + return new SortedIterator(0, 0); + } + int offset = 0; long start = System.nanoTime(); if (sortComparator != null) { diff --git a/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/UnsafeSorterIterator.java b/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/UnsafeSorterIterator.java index 1b3167fcc250c..d9f22311d07c2 100644 --- a/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/UnsafeSorterIterator.java +++ b/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/UnsafeSorterIterator.java @@ -34,4 +34,6 @@ public abstract class UnsafeSorterIterator { public abstract long getKeyPrefix(); public abstract int getNumRecords(); + + public abstract long getCurrentPageNumber(); } diff --git a/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/UnsafeSorterSpillMerger.java b/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/UnsafeSorterSpillMerger.java index ab800288dcb43..f8603c5799e9b 100644 --- a/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/UnsafeSorterSpillMerger.java +++ b/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/UnsafeSorterSpillMerger.java @@ -70,6 +70,11 @@ public int getNumRecords() { return numRecords; } + @Override + public long getCurrentPageNumber() { + throw new UnsupportedOperationException(); + } + @Override public boolean hasNext() { return !priorityQueue.isEmpty() || (spillReader != null && spillReader.hasNext()); diff --git a/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/UnsafeSorterSpillReader.java b/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/UnsafeSorterSpillReader.java index a524c4790407d..db79efd008530 100644 --- a/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/UnsafeSorterSpillReader.java +++ b/core/src/main/java/org/apache/spark/util/collection/unsafe/sort/UnsafeSorterSpillReader.java @@ -89,6 +89,11 @@ public int getNumRecords() { return numRecords; } + @Override + public long getCurrentPageNumber() { + throw new UnsupportedOperationException(); + } + @Override public boolean hasNext() { return (numRecordsRemaining > 0); diff --git a/core/src/main/resources/org/apache/spark/ui/static/executorspage-template.html b/core/src/main/resources/org/apache/spark/ui/static/executorspage-template.html index 0729dfe1cef72..ec3cb5bb8ae5e 100644 --- a/core/src/main/resources/org/apache/spark/ui/static/executorspage-template.html +++ b/core/src/main/resources/org/apache/spark/ui/static/executorspage-template.html @@ -56,8 +56,8 @@

Summary

- Blacklisted + title="Number of executors excluded by the scheduler due to task failures."> + Excluded @@ -86,6 +86,22 @@

Executors

Off Heap Storage Memory + + + Peak JVM Memory OnHeap / OffHeap + + + Peak Execution Memory OnHeap / OffHeap + + + Peak Storage Memory OnHeap / OffHeap + + + Peak Pool Memory Direct / Mapped Disk Used Cores Resources diff --git a/core/src/main/resources/org/apache/spark/ui/static/executorspage.js b/core/src/main/resources/org/apache/spark/ui/static/executorspage.js index 520edb9cc3e34..c8dc61991114a 100644 --- a/core/src/main/resources/org/apache/spark/ui/static/executorspage.js +++ b/core/src/main/resources/org/apache/spark/ui/static/executorspage.js @@ -26,15 +26,15 @@ function getThreadDumpEnabled() { } function formatStatus(status, type, row) { - if (row.isBlacklisted) { - return "Blacklisted"; + if (row.isExcluded) { + return "Excluded"; } if (status) { - if (row.blacklistedInStages.length == 0) { + if (row.excludedInStages.length == 0) { return "Active" } - return "Active (Blacklisted in Stages: [" + row.blacklistedInStages.join(", ") + "])"; + return "Active (Excluded in Stages: [" + row.excludedInStages.join(", ") + "])"; } return "Dead" } @@ -119,7 +119,7 @@ function totalDurationColor(totalGCTime, totalDuration) { } var sumOptionalColumns = [3, 4]; -var execOptionalColumns = [5, 6, 9, 10]; +var execOptionalColumns = [5, 6, 7, 8, 9, 10, 13, 14]; var execDataTable; var sumDataTable; @@ -168,7 +168,7 @@ $(document).ready(function () { var allTotalInputBytes = 0; var allTotalShuffleRead = 0; var allTotalShuffleWrite = 0; - var allTotalBlacklisted = 0; + var allTotalExcluded = 0; var activeExecCnt = 0; var activeRDDBlocks = 0; @@ -190,7 +190,7 @@ $(document).ready(function () { var activeTotalInputBytes = 0; var activeTotalShuffleRead = 0; var activeTotalShuffleWrite = 0; - var activeTotalBlacklisted = 0; + var activeTotalExcluded = 0; var deadExecCnt = 0; var deadRDDBlocks = 0; @@ -212,7 +212,7 @@ $(document).ready(function () { var deadTotalInputBytes = 0; var deadTotalShuffleRead = 0; var deadTotalShuffleWrite = 0; - var deadTotalBlacklisted = 0; + var deadTotalExcluded = 0; response.forEach(function (exec) { var memoryMetrics = { @@ -246,7 +246,7 @@ $(document).ready(function () { allTotalInputBytes += exec.totalInputBytes; allTotalShuffleRead += exec.totalShuffleRead; allTotalShuffleWrite += exec.totalShuffleWrite; - allTotalBlacklisted += exec.isBlacklisted ? 1 : 0; + allTotalExcluded += exec.isExcluded ? 1 : 0; if (exec.isActive) { activeExecCnt += 1; activeRDDBlocks += exec.rddBlocks; @@ -268,7 +268,7 @@ $(document).ready(function () { activeTotalInputBytes += exec.totalInputBytes; activeTotalShuffleRead += exec.totalShuffleRead; activeTotalShuffleWrite += exec.totalShuffleWrite; - activeTotalBlacklisted += exec.isBlacklisted ? 1 : 0; + activeTotalExcluded += exec.isExcluded ? 1 : 0; } else { deadExecCnt += 1; deadRDDBlocks += exec.rddBlocks; @@ -290,7 +290,7 @@ $(document).ready(function () { deadTotalInputBytes += exec.totalInputBytes; deadTotalShuffleRead += exec.totalShuffleRead; deadTotalShuffleWrite += exec.totalShuffleWrite; - deadTotalBlacklisted += exec.isBlacklisted ? 1 : 0; + deadTotalExcluded += exec.isExcluded ? 1 : 0; // todo - TEST BACKWARDS compatibility history? } }); @@ -315,7 +315,7 @@ $(document).ready(function () { "allTotalInputBytes": allTotalInputBytes, "allTotalShuffleRead": allTotalShuffleRead, "allTotalShuffleWrite": allTotalShuffleWrite, - "allTotalBlacklisted": allTotalBlacklisted + "allTotalExcluded": allTotalExcluded }; var activeSummary = { "execCnt": ( "Active(" + activeExecCnt + ")"), @@ -338,7 +338,7 @@ $(document).ready(function () { "allTotalInputBytes": activeTotalInputBytes, "allTotalShuffleRead": activeTotalShuffleRead, "allTotalShuffleWrite": activeTotalShuffleWrite, - "allTotalBlacklisted": activeTotalBlacklisted + "allTotalExcluded": activeTotalExcluded }; var deadSummary = { "execCnt": ( "Dead(" + deadExecCnt + ")" ), @@ -361,7 +361,7 @@ $(document).ready(function () { "allTotalInputBytes": deadTotalInputBytes, "allTotalShuffleRead": deadTotalShuffleRead, "allTotalShuffleWrite": deadTotalShuffleWrite, - "allTotalBlacklisted": deadTotalBlacklisted + "allTotalExcluded": deadTotalExcluded }; var data = {executors: response, "execSummary": [activeSummary, deadSummary, totalSummary]}; @@ -412,6 +412,78 @@ $(document).ready(function () { formatBytes(row.memoryMetrics.totalOffHeapStorageMemory, type)); } }, + { + data: function (row, type) { + var peakMemoryMetrics = row.peakMemoryMetrics; + if (typeof peakMemoryMetrics !== 'undefined') { + if (type !== 'display') + return peakMemoryMetrics.JVMHeapMemory; + else + return (formatBytes(peakMemoryMetrics.JVMHeapMemory, type) + ' / ' + + formatBytes(peakMemoryMetrics.JVMOffHeapMemory, type)); + } else { + if (type !== 'display') { + return 0; + } else { + return '0.0 B / 0.0 B'; + } + } + } + }, + { + data: function (row, type) { + var peakMemoryMetrics = row.peakMemoryMetrics; + if (typeof peakMemoryMetrics !== 'undefined') { + if (type !== 'display') + return peakMemoryMetrics.OnHeapExecutionMemory; + else + return (formatBytes(peakMemoryMetrics.OnHeapExecutionMemory, type) + ' / ' + + formatBytes(peakMemoryMetrics.OffHeapExecutionMemory, type)); + } else { + if (type !== 'display') { + return 0; + } else { + return '0.0 B / 0.0 B'; + } + } + } + }, + { + data: function (row, type) { + var peakMemoryMetrics = row.peakMemoryMetrics; + if (typeof peakMemoryMetrics !== 'undefined') { + if (type !== 'display') + return peakMemoryMetrics.OnHeapStorageMemory; + else + return (formatBytes(peakMemoryMetrics.OnHeapStorageMemory, type) + ' / ' + + formatBytes(peakMemoryMetrics.OffHeapStorageMemory, type)); + } else { + if (type !== 'display') { + return 0; + } else { + return '0.0 B / 0.0 B'; + } + } + } + }, + { + data: function (row, type) { + var peakMemoryMetrics = row.peakMemoryMetrics; + if (typeof peakMemoryMetrics !== 'undefined') { + if (type !== 'display') + return peakMemoryMetrics.DirectPoolMemory; + else + return (formatBytes(peakMemoryMetrics.DirectPoolMemory, type) + ' / ' + + formatBytes(peakMemoryMetrics.MappedPoolMemory, type)); + } else { + if (type !== 'display') { + return 0; + } else { + return '0.0 B / 0.0 B'; + } + } + } + }, {data: 'diskUsed', render: formatBytes}, {data: 'totalCores'}, {name: 'resourcesCol', data: 'resources', render: formatResourceCells, orderable: false}, @@ -462,8 +534,12 @@ $(document).ready(function () { "columnDefs": [ {"visible": false, "targets": 5}, {"visible": false, "targets": 6}, + {"visible": false, "targets": 7}, + {"visible": false, "targets": 8}, {"visible": false, "targets": 9}, - {"visible": false, "targets": 10} + {"visible": false, "targets": 10}, + {"visible": false, "targets": 13}, + {"visible": false, "targets": 14} ], "deferRender": true }; @@ -547,7 +623,7 @@ $(document).ready(function () { {data: 'allTotalInputBytes', render: formatBytes}, {data: 'allTotalShuffleRead', render: formatBytes}, {data: 'allTotalShuffleWrite', render: formatBytes}, - {data: 'allTotalBlacklisted'} + {data: 'allTotalExcluded'} ], "paging": false, "searching": false, @@ -568,11 +644,15 @@ $(document).ready(function () { "Show Additional Metrics" + "" + "
" + - "
Select All
" + - "
On Heap Memory
" + - "
Off Heap Memory
" + - "
Resources
" + - "
Resource Profile Id
" + + "
Select All
" + + "
On Heap Memory
" + + "
Off Heap Memory
" + + "
Peak JVM Memory OnHeap / OffHeap
" + + "
Peak Execution Memory OnHeap / OffHeap
" + + "
Peak Storage Memory OnHeap / OffHeap
" + + "
Peak Pool Memory Direct / Mapped
" + + "
Resources
" + + "
Resource Profile Id
" + "
"); reselectCheckboxesBasedOnTaskTableState(); diff --git a/core/src/main/resources/org/apache/spark/ui/static/jquery-3.4.1.min.js b/core/src/main/resources/org/apache/spark/ui/static/jquery-3.4.1.min.js deleted file mode 100644 index 07c00cd227da0..0000000000000 --- a/core/src/main/resources/org/apache/spark/ui/static/jquery-3.4.1.min.js +++ /dev/null @@ -1,2 +0,0 @@ -/*! jQuery v3.4.1 | (c) JS Foundation and other contributors | jquery.org/license */ -!function(e,t){"use strict";"object"==typeof module&&"object"==typeof module.exports?module.exports=e.document?t(e,!0):function(e){if(!e.document)throw new Error("jQuery requires a window with a document");return t(e)}:t(e)}("undefined"!=typeof window?window:this,function(C,e){"use strict";var t=[],E=C.document,r=Object.getPrototypeOf,s=t.slice,g=t.concat,u=t.push,i=t.indexOf,n={},o=n.toString,v=n.hasOwnProperty,a=v.toString,l=a.call(Object),y={},m=function(e){return"function"==typeof e&&"number"!=typeof e.nodeType},x=function(e){return null!=e&&e===e.window},c={type:!0,src:!0,nonce:!0,noModule:!0};function b(e,t,n){var r,i,o=(n=n||E).createElement("script");if(o.text=e,t)for(r in c)(i=t[r]||t.getAttribute&&t.getAttribute(r))&&o.setAttribute(r,i);n.head.appendChild(o).parentNode.removeChild(o)}function w(e){return null==e?e+"":"object"==typeof e||"function"==typeof e?n[o.call(e)]||"object":typeof e}var f="3.4.1",k=function(e,t){return new k.fn.init(e,t)},p=/^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g;function d(e){var t=!!e&&"length"in e&&e.length,n=w(e);return!m(e)&&!x(e)&&("array"===n||0===t||"number"==typeof t&&0+~]|"+M+")"+M+"*"),U=new RegExp(M+"|>"),X=new RegExp($),V=new RegExp("^"+I+"$"),G={ID:new RegExp("^#("+I+")"),CLASS:new RegExp("^\\.("+I+")"),TAG:new RegExp("^("+I+"|[*])"),ATTR:new RegExp("^"+W),PSEUDO:new RegExp("^"+$),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+M+"*(even|odd|(([+-]|)(\\d*)n|)"+M+"*(?:([+-]|)"+M+"*(\\d+)|))"+M+"*\\)|)","i"),bool:new RegExp("^(?:"+R+")$","i"),needsContext:new RegExp("^"+M+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+M+"*((?:-\\d)?\\d*)"+M+"*\\)|)(?=[^-]|$)","i")},Y=/HTML$/i,Q=/^(?:input|select|textarea|button)$/i,J=/^h\d$/i,K=/^[^{]+\{\s*\[native \w/,Z=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,ee=/[+~]/,te=new RegExp("\\\\([\\da-f]{1,6}"+M+"?|("+M+")|.)","ig"),ne=function(e,t,n){var r="0x"+t-65536;return r!=r||n?t:r<0?String.fromCharCode(r+65536):String.fromCharCode(r>>10|55296,1023&r|56320)},re=/([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g,ie=function(e,t){return t?"\0"===e?"\ufffd":e.slice(0,-1)+"\\"+e.charCodeAt(e.length-1).toString(16)+" ":"\\"+e},oe=function(){T()},ae=be(function(e){return!0===e.disabled&&"fieldset"===e.nodeName.toLowerCase()},{dir:"parentNode",next:"legend"});try{H.apply(t=O.call(m.childNodes),m.childNodes),t[m.childNodes.length].nodeType}catch(e){H={apply:t.length?function(e,t){L.apply(e,O.call(t))}:function(e,t){var n=e.length,r=0;while(e[n++]=t[r++]);e.length=n-1}}}function se(t,e,n,r){var i,o,a,s,u,l,c,f=e&&e.ownerDocument,p=e?e.nodeType:9;if(n=n||[],"string"!=typeof t||!t||1!==p&&9!==p&&11!==p)return n;if(!r&&((e?e.ownerDocument||e:m)!==C&&T(e),e=e||C,E)){if(11!==p&&(u=Z.exec(t)))if(i=u[1]){if(9===p){if(!(a=e.getElementById(i)))return n;if(a.id===i)return n.push(a),n}else if(f&&(a=f.getElementById(i))&&y(e,a)&&a.id===i)return n.push(a),n}else{if(u[2])return H.apply(n,e.getElementsByTagName(t)),n;if((i=u[3])&&d.getElementsByClassName&&e.getElementsByClassName)return H.apply(n,e.getElementsByClassName(i)),n}if(d.qsa&&!A[t+" "]&&(!v||!v.test(t))&&(1!==p||"object"!==e.nodeName.toLowerCase())){if(c=t,f=e,1===p&&U.test(t)){(s=e.getAttribute("id"))?s=s.replace(re,ie):e.setAttribute("id",s=k),o=(l=h(t)).length;while(o--)l[o]="#"+s+" "+xe(l[o]);c=l.join(","),f=ee.test(t)&&ye(e.parentNode)||e}try{return H.apply(n,f.querySelectorAll(c)),n}catch(e){A(t,!0)}finally{s===k&&e.removeAttribute("id")}}}return g(t.replace(B,"$1"),e,n,r)}function ue(){var r=[];return function e(t,n){return r.push(t+" ")>b.cacheLength&&delete e[r.shift()],e[t+" "]=n}}function le(e){return e[k]=!0,e}function ce(e){var t=C.createElement("fieldset");try{return!!e(t)}catch(e){return!1}finally{t.parentNode&&t.parentNode.removeChild(t),t=null}}function fe(e,t){var n=e.split("|"),r=n.length;while(r--)b.attrHandle[n[r]]=t}function pe(e,t){var n=t&&e,r=n&&1===e.nodeType&&1===t.nodeType&&e.sourceIndex-t.sourceIndex;if(r)return r;if(n)while(n=n.nextSibling)if(n===t)return-1;return e?1:-1}function de(t){return function(e){return"input"===e.nodeName.toLowerCase()&&e.type===t}}function he(n){return function(e){var t=e.nodeName.toLowerCase();return("input"===t||"button"===t)&&e.type===n}}function ge(t){return function(e){return"form"in e?e.parentNode&&!1===e.disabled?"label"in e?"label"in e.parentNode?e.parentNode.disabled===t:e.disabled===t:e.isDisabled===t||e.isDisabled!==!t&&ae(e)===t:e.disabled===t:"label"in e&&e.disabled===t}}function ve(a){return le(function(o){return o=+o,le(function(e,t){var n,r=a([],e.length,o),i=r.length;while(i--)e[n=r[i]]&&(e[n]=!(t[n]=e[n]))})})}function ye(e){return e&&"undefined"!=typeof e.getElementsByTagName&&e}for(e in d=se.support={},i=se.isXML=function(e){var t=e.namespaceURI,n=(e.ownerDocument||e).documentElement;return!Y.test(t||n&&n.nodeName||"HTML")},T=se.setDocument=function(e){var t,n,r=e?e.ownerDocument||e:m;return r!==C&&9===r.nodeType&&r.documentElement&&(a=(C=r).documentElement,E=!i(C),m!==C&&(n=C.defaultView)&&n.top!==n&&(n.addEventListener?n.addEventListener("unload",oe,!1):n.attachEvent&&n.attachEvent("onunload",oe)),d.attributes=ce(function(e){return e.className="i",!e.getAttribute("className")}),d.getElementsByTagName=ce(function(e){return e.appendChild(C.createComment("")),!e.getElementsByTagName("*").length}),d.getElementsByClassName=K.test(C.getElementsByClassName),d.getById=ce(function(e){return a.appendChild(e).id=k,!C.getElementsByName||!C.getElementsByName(k).length}),d.getById?(b.filter.ID=function(e){var t=e.replace(te,ne);return function(e){return e.getAttribute("id")===t}},b.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&E){var n=t.getElementById(e);return n?[n]:[]}}):(b.filter.ID=function(e){var n=e.replace(te,ne);return function(e){var t="undefined"!=typeof e.getAttributeNode&&e.getAttributeNode("id");return t&&t.value===n}},b.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&E){var n,r,i,o=t.getElementById(e);if(o){if((n=o.getAttributeNode("id"))&&n.value===e)return[o];i=t.getElementsByName(e),r=0;while(o=i[r++])if((n=o.getAttributeNode("id"))&&n.value===e)return[o]}return[]}}),b.find.TAG=d.getElementsByTagName?function(e,t){return"undefined"!=typeof t.getElementsByTagName?t.getElementsByTagName(e):d.qsa?t.querySelectorAll(e):void 0}:function(e,t){var n,r=[],i=0,o=t.getElementsByTagName(e);if("*"===e){while(n=o[i++])1===n.nodeType&&r.push(n);return r}return o},b.find.CLASS=d.getElementsByClassName&&function(e,t){if("undefined"!=typeof t.getElementsByClassName&&E)return t.getElementsByClassName(e)},s=[],v=[],(d.qsa=K.test(C.querySelectorAll))&&(ce(function(e){a.appendChild(e).innerHTML="",e.querySelectorAll("[msallowcapture^='']").length&&v.push("[*^$]="+M+"*(?:''|\"\")"),e.querySelectorAll("[selected]").length||v.push("\\["+M+"*(?:value|"+R+")"),e.querySelectorAll("[id~="+k+"-]").length||v.push("~="),e.querySelectorAll(":checked").length||v.push(":checked"),e.querySelectorAll("a#"+k+"+*").length||v.push(".#.+[+~]")}),ce(function(e){e.innerHTML="";var t=C.createElement("input");t.setAttribute("type","hidden"),e.appendChild(t).setAttribute("name","D"),e.querySelectorAll("[name=d]").length&&v.push("name"+M+"*[*^$|!~]?="),2!==e.querySelectorAll(":enabled").length&&v.push(":enabled",":disabled"),a.appendChild(e).disabled=!0,2!==e.querySelectorAll(":disabled").length&&v.push(":enabled",":disabled"),e.querySelectorAll("*,:x"),v.push(",.*:")})),(d.matchesSelector=K.test(c=a.matches||a.webkitMatchesSelector||a.mozMatchesSelector||a.oMatchesSelector||a.msMatchesSelector))&&ce(function(e){d.disconnectedMatch=c.call(e,"*"),c.call(e,"[s!='']:x"),s.push("!=",$)}),v=v.length&&new RegExp(v.join("|")),s=s.length&&new RegExp(s.join("|")),t=K.test(a.compareDocumentPosition),y=t||K.test(a.contains)?function(e,t){var n=9===e.nodeType?e.documentElement:e,r=t&&t.parentNode;return e===r||!(!r||1!==r.nodeType||!(n.contains?n.contains(r):e.compareDocumentPosition&&16&e.compareDocumentPosition(r)))}:function(e,t){if(t)while(t=t.parentNode)if(t===e)return!0;return!1},D=t?function(e,t){if(e===t)return l=!0,0;var n=!e.compareDocumentPosition-!t.compareDocumentPosition;return n||(1&(n=(e.ownerDocument||e)===(t.ownerDocument||t)?e.compareDocumentPosition(t):1)||!d.sortDetached&&t.compareDocumentPosition(e)===n?e===C||e.ownerDocument===m&&y(m,e)?-1:t===C||t.ownerDocument===m&&y(m,t)?1:u?P(u,e)-P(u,t):0:4&n?-1:1)}:function(e,t){if(e===t)return l=!0,0;var n,r=0,i=e.parentNode,o=t.parentNode,a=[e],s=[t];if(!i||!o)return e===C?-1:t===C?1:i?-1:o?1:u?P(u,e)-P(u,t):0;if(i===o)return pe(e,t);n=e;while(n=n.parentNode)a.unshift(n);n=t;while(n=n.parentNode)s.unshift(n);while(a[r]===s[r])r++;return r?pe(a[r],s[r]):a[r]===m?-1:s[r]===m?1:0}),C},se.matches=function(e,t){return se(e,null,null,t)},se.matchesSelector=function(e,t){if((e.ownerDocument||e)!==C&&T(e),d.matchesSelector&&E&&!A[t+" "]&&(!s||!s.test(t))&&(!v||!v.test(t)))try{var n=c.call(e,t);if(n||d.disconnectedMatch||e.document&&11!==e.document.nodeType)return n}catch(e){A(t,!0)}return 0":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(e){return e[1]=e[1].replace(te,ne),e[3]=(e[3]||e[4]||e[5]||"").replace(te,ne),"~="===e[2]&&(e[3]=" "+e[3]+" "),e.slice(0,4)},CHILD:function(e){return e[1]=e[1].toLowerCase(),"nth"===e[1].slice(0,3)?(e[3]||se.error(e[0]),e[4]=+(e[4]?e[5]+(e[6]||1):2*("even"===e[3]||"odd"===e[3])),e[5]=+(e[7]+e[8]||"odd"===e[3])):e[3]&&se.error(e[0]),e},PSEUDO:function(e){var t,n=!e[6]&&e[2];return G.CHILD.test(e[0])?null:(e[3]?e[2]=e[4]||e[5]||"":n&&X.test(n)&&(t=h(n,!0))&&(t=n.indexOf(")",n.length-t)-n.length)&&(e[0]=e[0].slice(0,t),e[2]=n.slice(0,t)),e.slice(0,3))}},filter:{TAG:function(e){var t=e.replace(te,ne).toLowerCase();return"*"===e?function(){return!0}:function(e){return e.nodeName&&e.nodeName.toLowerCase()===t}},CLASS:function(e){var t=p[e+" "];return t||(t=new RegExp("(^|"+M+")"+e+"("+M+"|$)"))&&p(e,function(e){return t.test("string"==typeof e.className&&e.className||"undefined"!=typeof e.getAttribute&&e.getAttribute("class")||"")})},ATTR:function(n,r,i){return function(e){var t=se.attr(e,n);return null==t?"!="===r:!r||(t+="","="===r?t===i:"!="===r?t!==i:"^="===r?i&&0===t.indexOf(i):"*="===r?i&&-1:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i;function j(e,n,r){return m(n)?k.grep(e,function(e,t){return!!n.call(e,t,e)!==r}):n.nodeType?k.grep(e,function(e){return e===n!==r}):"string"!=typeof n?k.grep(e,function(e){return-1)[^>]*|#([\w-]+))$/;(k.fn.init=function(e,t,n){var r,i;if(!e)return this;if(n=n||q,"string"==typeof e){if(!(r="<"===e[0]&&">"===e[e.length-1]&&3<=e.length?[null,e,null]:L.exec(e))||!r[1]&&t)return!t||t.jquery?(t||n).find(e):this.constructor(t).find(e);if(r[1]){if(t=t instanceof k?t[0]:t,k.merge(this,k.parseHTML(r[1],t&&t.nodeType?t.ownerDocument||t:E,!0)),D.test(r[1])&&k.isPlainObject(t))for(r in t)m(this[r])?this[r](t[r]):this.attr(r,t[r]);return this}return(i=E.getElementById(r[2]))&&(this[0]=i,this.length=1),this}return e.nodeType?(this[0]=e,this.length=1,this):m(e)?void 0!==n.ready?n.ready(e):e(k):k.makeArray(e,this)}).prototype=k.fn,q=k(E);var H=/^(?:parents|prev(?:Until|All))/,O={children:!0,contents:!0,next:!0,prev:!0};function P(e,t){while((e=e[t])&&1!==e.nodeType);return e}k.fn.extend({has:function(e){var t=k(e,this),n=t.length;return this.filter(function(){for(var e=0;e\x20\t\r\n\f]*)/i,he=/^$|^module$|\/(?:java|ecma)script/i,ge={option:[1,""],thead:[1,"","
"],col:[2,"","
"],tr:[2,"","
"],td:[3,"","
"],_default:[0,"",""]};function ve(e,t){var n;return n="undefined"!=typeof e.getElementsByTagName?e.getElementsByTagName(t||"*"):"undefined"!=typeof e.querySelectorAll?e.querySelectorAll(t||"*"):[],void 0===t||t&&A(e,t)?k.merge([e],n):n}function ye(e,t){for(var n=0,r=e.length;nx",y.noCloneChecked=!!me.cloneNode(!0).lastChild.defaultValue;var Te=/^key/,Ce=/^(?:mouse|pointer|contextmenu|drag|drop)|click/,Ee=/^([^.]*)(?:\.(.+)|)/;function ke(){return!0}function Se(){return!1}function Ne(e,t){return e===function(){try{return E.activeElement}catch(e){}}()==("focus"===t)}function Ae(e,t,n,r,i,o){var a,s;if("object"==typeof t){for(s in"string"!=typeof n&&(r=r||n,n=void 0),t)Ae(e,s,n,r,t[s],o);return e}if(null==r&&null==i?(i=n,r=n=void 0):null==i&&("string"==typeof n?(i=r,r=void 0):(i=r,r=n,n=void 0)),!1===i)i=Se;else if(!i)return e;return 1===o&&(a=i,(i=function(e){return k().off(e),a.apply(this,arguments)}).guid=a.guid||(a.guid=k.guid++)),e.each(function(){k.event.add(this,t,i,r,n)})}function De(e,i,o){o?(Q.set(e,i,!1),k.event.add(e,i,{namespace:!1,handler:function(e){var t,n,r=Q.get(this,i);if(1&e.isTrigger&&this[i]){if(r.length)(k.event.special[i]||{}).delegateType&&e.stopPropagation();else if(r=s.call(arguments),Q.set(this,i,r),t=o(this,i),this[i](),r!==(n=Q.get(this,i))||t?Q.set(this,i,!1):n={},r!==n)return e.stopImmediatePropagation(),e.preventDefault(),n.value}else r.length&&(Q.set(this,i,{value:k.event.trigger(k.extend(r[0],k.Event.prototype),r.slice(1),this)}),e.stopImmediatePropagation())}})):void 0===Q.get(e,i)&&k.event.add(e,i,ke)}k.event={global:{},add:function(t,e,n,r,i){var o,a,s,u,l,c,f,p,d,h,g,v=Q.get(t);if(v){n.handler&&(n=(o=n).handler,i=o.selector),i&&k.find.matchesSelector(ie,i),n.guid||(n.guid=k.guid++),(u=v.events)||(u=v.events={}),(a=v.handle)||(a=v.handle=function(e){return"undefined"!=typeof k&&k.event.triggered!==e.type?k.event.dispatch.apply(t,arguments):void 0}),l=(e=(e||"").match(R)||[""]).length;while(l--)d=g=(s=Ee.exec(e[l])||[])[1],h=(s[2]||"").split(".").sort(),d&&(f=k.event.special[d]||{},d=(i?f.delegateType:f.bindType)||d,f=k.event.special[d]||{},c=k.extend({type:d,origType:g,data:r,handler:n,guid:n.guid,selector:i,needsContext:i&&k.expr.match.needsContext.test(i),namespace:h.join(".")},o),(p=u[d])||((p=u[d]=[]).delegateCount=0,f.setup&&!1!==f.setup.call(t,r,h,a)||t.addEventListener&&t.addEventListener(d,a)),f.add&&(f.add.call(t,c),c.handler.guid||(c.handler.guid=n.guid)),i?p.splice(p.delegateCount++,0,c):p.push(c),k.event.global[d]=!0)}},remove:function(e,t,n,r,i){var o,a,s,u,l,c,f,p,d,h,g,v=Q.hasData(e)&&Q.get(e);if(v&&(u=v.events)){l=(t=(t||"").match(R)||[""]).length;while(l--)if(d=g=(s=Ee.exec(t[l])||[])[1],h=(s[2]||"").split(".").sort(),d){f=k.event.special[d]||{},p=u[d=(r?f.delegateType:f.bindType)||d]||[],s=s[2]&&new RegExp("(^|\\.)"+h.join("\\.(?:.*\\.|)")+"(\\.|$)"),a=o=p.length;while(o--)c=p[o],!i&&g!==c.origType||n&&n.guid!==c.guid||s&&!s.test(c.namespace)||r&&r!==c.selector&&("**"!==r||!c.selector)||(p.splice(o,1),c.selector&&p.delegateCount--,f.remove&&f.remove.call(e,c));a&&!p.length&&(f.teardown&&!1!==f.teardown.call(e,h,v.handle)||k.removeEvent(e,d,v.handle),delete u[d])}else for(d in u)k.event.remove(e,d+t[l],n,r,!0);k.isEmptyObject(u)&&Q.remove(e,"handle events")}},dispatch:function(e){var t,n,r,i,o,a,s=k.event.fix(e),u=new Array(arguments.length),l=(Q.get(this,"events")||{})[s.type]||[],c=k.event.special[s.type]||{};for(u[0]=s,t=1;t\x20\t\r\n\f]*)[^>]*)\/>/gi,qe=/\s*$/g;function Oe(e,t){return A(e,"table")&&A(11!==t.nodeType?t:t.firstChild,"tr")&&k(e).children("tbody")[0]||e}function Pe(e){return e.type=(null!==e.getAttribute("type"))+"/"+e.type,e}function Re(e){return"true/"===(e.type||"").slice(0,5)?e.type=e.type.slice(5):e.removeAttribute("type"),e}function Me(e,t){var n,r,i,o,a,s,u,l;if(1===t.nodeType){if(Q.hasData(e)&&(o=Q.access(e),a=Q.set(t,o),l=o.events))for(i in delete a.handle,a.events={},l)for(n=0,r=l[i].length;n")},clone:function(e,t,n){var r,i,o,a,s,u,l,c=e.cloneNode(!0),f=oe(e);if(!(y.noCloneChecked||1!==e.nodeType&&11!==e.nodeType||k.isXMLDoc(e)))for(a=ve(c),r=0,i=(o=ve(e)).length;r").attr(n.scriptAttrs||{}).prop({charset:n.scriptCharset,src:n.url}).on("load error",i=function(e){r.remove(),i=null,e&&t("error"===e.type?404:200,e.type)}),E.head.appendChild(r[0])},abort:function(){i&&i()}}});var Vt,Gt=[],Yt=/(=)\?(?=&|$)|\?\?/;k.ajaxSetup({jsonp:"callback",jsonpCallback:function(){var e=Gt.pop()||k.expando+"_"+kt++;return this[e]=!0,e}}),k.ajaxPrefilter("json jsonp",function(e,t,n){var r,i,o,a=!1!==e.jsonp&&(Yt.test(e.url)?"url":"string"==typeof e.data&&0===(e.contentType||"").indexOf("application/x-www-form-urlencoded")&&Yt.test(e.data)&&"data");if(a||"jsonp"===e.dataTypes[0])return r=e.jsonpCallback=m(e.jsonpCallback)?e.jsonpCallback():e.jsonpCallback,a?e[a]=e[a].replace(Yt,"$1"+r):!1!==e.jsonp&&(e.url+=(St.test(e.url)?"&":"?")+e.jsonp+"="+r),e.converters["script json"]=function(){return o||k.error(r+" was not called"),o[0]},e.dataTypes[0]="json",i=C[r],C[r]=function(){o=arguments},n.always(function(){void 0===i?k(C).removeProp(r):C[r]=i,e[r]&&(e.jsonpCallback=t.jsonpCallback,Gt.push(r)),o&&m(i)&&i(o[0]),o=i=void 0}),"script"}),y.createHTMLDocument=((Vt=E.implementation.createHTMLDocument("").body).innerHTML="
",2===Vt.childNodes.length),k.parseHTML=function(e,t,n){return"string"!=typeof e?[]:("boolean"==typeof t&&(n=t,t=!1),t||(y.createHTMLDocument?((r=(t=E.implementation.createHTMLDocument("")).createElement("base")).href=E.location.href,t.head.appendChild(r)):t=E),o=!n&&[],(i=D.exec(e))?[t.createElement(i[1])]:(i=we([e],t,o),o&&o.length&&k(o).remove(),k.merge([],i.childNodes)));var r,i,o},k.fn.load=function(e,t,n){var r,i,o,a=this,s=e.indexOf(" ");return-1").append(k.parseHTML(e)).find(r):e)}).always(n&&function(e,t){a.each(function(){n.apply(this,o||[e.responseText,t,e])})}),this},k.each(["ajaxStart","ajaxStop","ajaxComplete","ajaxError","ajaxSuccess","ajaxSend"],function(e,t){k.fn[t]=function(e){return this.on(t,e)}}),k.expr.pseudos.animated=function(t){return k.grep(k.timers,function(e){return t===e.elem}).length},k.offset={setOffset:function(e,t,n){var r,i,o,a,s,u,l=k.css(e,"position"),c=k(e),f={};"static"===l&&(e.style.position="relative"),s=c.offset(),o=k.css(e,"top"),u=k.css(e,"left"),("absolute"===l||"fixed"===l)&&-1<(o+u).indexOf("auto")?(a=(r=c.position()).top,i=r.left):(a=parseFloat(o)||0,i=parseFloat(u)||0),m(t)&&(t=t.call(e,n,k.extend({},s))),null!=t.top&&(f.top=t.top-s.top+a),null!=t.left&&(f.left=t.left-s.left+i),"using"in t?t.using.call(e,f):c.css(f)}},k.fn.extend({offset:function(t){if(arguments.length)return void 0===t?this:this.each(function(e){k.offset.setOffset(this,t,e)});var e,n,r=this[0];return r?r.getClientRects().length?(e=r.getBoundingClientRect(),n=r.ownerDocument.defaultView,{top:e.top+n.pageYOffset,left:e.left+n.pageXOffset}):{top:0,left:0}:void 0},position:function(){if(this[0]){var e,t,n,r=this[0],i={top:0,left:0};if("fixed"===k.css(r,"position"))t=r.getBoundingClientRect();else{t=this.offset(),n=r.ownerDocument,e=r.offsetParent||n.documentElement;while(e&&(e===n.body||e===n.documentElement)&&"static"===k.css(e,"position"))e=e.parentNode;e&&e!==r&&1===e.nodeType&&((i=k(e).offset()).top+=k.css(e,"borderTopWidth",!0),i.left+=k.css(e,"borderLeftWidth",!0))}return{top:t.top-i.top-k.css(r,"marginTop",!0),left:t.left-i.left-k.css(r,"marginLeft",!0)}}},offsetParent:function(){return this.map(function(){var e=this.offsetParent;while(e&&"static"===k.css(e,"position"))e=e.offsetParent;return e||ie})}}),k.each({scrollLeft:"pageXOffset",scrollTop:"pageYOffset"},function(t,i){var o="pageYOffset"===i;k.fn[t]=function(e){return _(this,function(e,t,n){var r;if(x(e)?r=e:9===e.nodeType&&(r=e.defaultView),void 0===n)return r?r[i]:e[t];r?r.scrollTo(o?r.pageXOffset:n,o?n:r.pageYOffset):e[t]=n},t,e,arguments.length)}}),k.each(["top","left"],function(e,n){k.cssHooks[n]=ze(y.pixelPosition,function(e,t){if(t)return t=_e(e,n),$e.test(t)?k(e).position()[n]+"px":t})}),k.each({Height:"height",Width:"width"},function(a,s){k.each({padding:"inner"+a,content:s,"":"outer"+a},function(r,o){k.fn[o]=function(e,t){var n=arguments.length&&(r||"boolean"!=typeof e),i=r||(!0===e||!0===t?"margin":"border");return _(this,function(e,t,n){var r;return x(e)?0===o.indexOf("outer")?e["inner"+a]:e.document.documentElement["client"+a]:9===e.nodeType?(r=e.documentElement,Math.max(e.body["scroll"+a],r["scroll"+a],e.body["offset"+a],r["offset"+a],r["client"+a])):void 0===n?k.css(e,t,i):k.style(e,t,n,i)},s,n?e:void 0,n)}})}),k.each("blur focus focusin focusout resize scroll click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup contextmenu".split(" "),function(e,n){k.fn[n]=function(e,t){return 0+~]|"+M+")"+M+"*"),U=new RegExp(M+"|>"),X=new RegExp(F),V=new RegExp("^"+I+"$"),G={ID:new RegExp("^#("+I+")"),CLASS:new RegExp("^\\.("+I+")"),TAG:new RegExp("^("+I+"|[*])"),ATTR:new RegExp("^"+W),PSEUDO:new RegExp("^"+F),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+M+"*(even|odd|(([+-]|)(\\d*)n|)"+M+"*(?:([+-]|)"+M+"*(\\d+)|))"+M+"*\\)|)","i"),bool:new RegExp("^(?:"+R+")$","i"),needsContext:new RegExp("^"+M+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+M+"*((?:-\\d)?\\d*)"+M+"*\\)|)(?=[^-]|$)","i")},Y=/HTML$/i,Q=/^(?:input|select|textarea|button)$/i,J=/^h\d$/i,K=/^[^{]+\{\s*\[native \w/,Z=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,ee=/[+~]/,te=new RegExp("\\\\[\\da-fA-F]{1,6}"+M+"?|\\\\([^\\r\\n\\f])","g"),ne=function(e,t){var n="0x"+e.slice(1)-65536;return t||(n<0?String.fromCharCode(n+65536):String.fromCharCode(n>>10|55296,1023&n|56320))},re=/([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g,ie=function(e,t){return t?"\0"===e?"\ufffd":e.slice(0,-1)+"\\"+e.charCodeAt(e.length-1).toString(16)+" ":"\\"+e},oe=function(){T()},ae=be(function(e){return!0===e.disabled&&"fieldset"===e.nodeName.toLowerCase()},{dir:"parentNode",next:"legend"});try{H.apply(t=O.call(p.childNodes),p.childNodes),t[p.childNodes.length].nodeType}catch(e){H={apply:t.length?function(e,t){L.apply(e,O.call(t))}:function(e,t){var n=e.length,r=0;while(e[n++]=t[r++]);e.length=n-1}}}function se(t,e,n,r){var i,o,a,s,u,l,c,f=e&&e.ownerDocument,p=e?e.nodeType:9;if(n=n||[],"string"!=typeof t||!t||1!==p&&9!==p&&11!==p)return n;if(!r&&(T(e),e=e||C,E)){if(11!==p&&(u=Z.exec(t)))if(i=u[1]){if(9===p){if(!(a=e.getElementById(i)))return n;if(a.id===i)return n.push(a),n}else if(f&&(a=f.getElementById(i))&&y(e,a)&&a.id===i)return n.push(a),n}else{if(u[2])return H.apply(n,e.getElementsByTagName(t)),n;if((i=u[3])&&d.getElementsByClassName&&e.getElementsByClassName)return H.apply(n,e.getElementsByClassName(i)),n}if(d.qsa&&!N[t+" "]&&(!v||!v.test(t))&&(1!==p||"object"!==e.nodeName.toLowerCase())){if(c=t,f=e,1===p&&(U.test(t)||z.test(t))){(f=ee.test(t)&&ye(e.parentNode)||e)===e&&d.scope||((s=e.getAttribute("id"))?s=s.replace(re,ie):e.setAttribute("id",s=S)),o=(l=h(t)).length;while(o--)l[o]=(s?"#"+s:":scope")+" "+xe(l[o]);c=l.join(",")}try{return H.apply(n,f.querySelectorAll(c)),n}catch(e){N(t,!0)}finally{s===S&&e.removeAttribute("id")}}}return g(t.replace($,"$1"),e,n,r)}function ue(){var r=[];return function e(t,n){return r.push(t+" ")>b.cacheLength&&delete e[r.shift()],e[t+" "]=n}}function le(e){return e[S]=!0,e}function ce(e){var t=C.createElement("fieldset");try{return!!e(t)}catch(e){return!1}finally{t.parentNode&&t.parentNode.removeChild(t),t=null}}function fe(e,t){var n=e.split("|"),r=n.length;while(r--)b.attrHandle[n[r]]=t}function pe(e,t){var n=t&&e,r=n&&1===e.nodeType&&1===t.nodeType&&e.sourceIndex-t.sourceIndex;if(r)return r;if(n)while(n=n.nextSibling)if(n===t)return-1;return e?1:-1}function de(t){return function(e){return"input"===e.nodeName.toLowerCase()&&e.type===t}}function he(n){return function(e){var t=e.nodeName.toLowerCase();return("input"===t||"button"===t)&&e.type===n}}function ge(t){return function(e){return"form"in e?e.parentNode&&!1===e.disabled?"label"in e?"label"in e.parentNode?e.parentNode.disabled===t:e.disabled===t:e.isDisabled===t||e.isDisabled!==!t&&ae(e)===t:e.disabled===t:"label"in e&&e.disabled===t}}function ve(a){return le(function(o){return o=+o,le(function(e,t){var n,r=a([],e.length,o),i=r.length;while(i--)e[n=r[i]]&&(e[n]=!(t[n]=e[n]))})})}function ye(e){return e&&"undefined"!=typeof e.getElementsByTagName&&e}for(e in d=se.support={},i=se.isXML=function(e){var t=e.namespaceURI,n=(e.ownerDocument||e).documentElement;return!Y.test(t||n&&n.nodeName||"HTML")},T=se.setDocument=function(e){var t,n,r=e?e.ownerDocument||e:p;return r!=C&&9===r.nodeType&&r.documentElement&&(a=(C=r).documentElement,E=!i(C),p!=C&&(n=C.defaultView)&&n.top!==n&&(n.addEventListener?n.addEventListener("unload",oe,!1):n.attachEvent&&n.attachEvent("onunload",oe)),d.scope=ce(function(e){return a.appendChild(e).appendChild(C.createElement("div")),"undefined"!=typeof e.querySelectorAll&&!e.querySelectorAll(":scope fieldset div").length}),d.attributes=ce(function(e){return e.className="i",!e.getAttribute("className")}),d.getElementsByTagName=ce(function(e){return e.appendChild(C.createComment("")),!e.getElementsByTagName("*").length}),d.getElementsByClassName=K.test(C.getElementsByClassName),d.getById=ce(function(e){return a.appendChild(e).id=S,!C.getElementsByName||!C.getElementsByName(S).length}),d.getById?(b.filter.ID=function(e){var t=e.replace(te,ne);return function(e){return e.getAttribute("id")===t}},b.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&E){var n=t.getElementById(e);return n?[n]:[]}}):(b.filter.ID=function(e){var n=e.replace(te,ne);return function(e){var t="undefined"!=typeof e.getAttributeNode&&e.getAttributeNode("id");return t&&t.value===n}},b.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&E){var n,r,i,o=t.getElementById(e);if(o){if((n=o.getAttributeNode("id"))&&n.value===e)return[o];i=t.getElementsByName(e),r=0;while(o=i[r++])if((n=o.getAttributeNode("id"))&&n.value===e)return[o]}return[]}}),b.find.TAG=d.getElementsByTagName?function(e,t){return"undefined"!=typeof t.getElementsByTagName?t.getElementsByTagName(e):d.qsa?t.querySelectorAll(e):void 0}:function(e,t){var n,r=[],i=0,o=t.getElementsByTagName(e);if("*"===e){while(n=o[i++])1===n.nodeType&&r.push(n);return r}return o},b.find.CLASS=d.getElementsByClassName&&function(e,t){if("undefined"!=typeof t.getElementsByClassName&&E)return t.getElementsByClassName(e)},s=[],v=[],(d.qsa=K.test(C.querySelectorAll))&&(ce(function(e){var t;a.appendChild(e).innerHTML="",e.querySelectorAll("[msallowcapture^='']").length&&v.push("[*^$]="+M+"*(?:''|\"\")"),e.querySelectorAll("[selected]").length||v.push("\\["+M+"*(?:value|"+R+")"),e.querySelectorAll("[id~="+S+"-]").length||v.push("~="),(t=C.createElement("input")).setAttribute("name",""),e.appendChild(t),e.querySelectorAll("[name='']").length||v.push("\\["+M+"*name"+M+"*="+M+"*(?:''|\"\")"),e.querySelectorAll(":checked").length||v.push(":checked"),e.querySelectorAll("a#"+S+"+*").length||v.push(".#.+[+~]"),e.querySelectorAll("\\\f"),v.push("[\\r\\n\\f]")}),ce(function(e){e.innerHTML="";var t=C.createElement("input");t.setAttribute("type","hidden"),e.appendChild(t).setAttribute("name","D"),e.querySelectorAll("[name=d]").length&&v.push("name"+M+"*[*^$|!~]?="),2!==e.querySelectorAll(":enabled").length&&v.push(":enabled",":disabled"),a.appendChild(e).disabled=!0,2!==e.querySelectorAll(":disabled").length&&v.push(":enabled",":disabled"),e.querySelectorAll("*,:x"),v.push(",.*:")})),(d.matchesSelector=K.test(c=a.matches||a.webkitMatchesSelector||a.mozMatchesSelector||a.oMatchesSelector||a.msMatchesSelector))&&ce(function(e){d.disconnectedMatch=c.call(e,"*"),c.call(e,"[s!='']:x"),s.push("!=",F)}),v=v.length&&new RegExp(v.join("|")),s=s.length&&new RegExp(s.join("|")),t=K.test(a.compareDocumentPosition),y=t||K.test(a.contains)?function(e,t){var n=9===e.nodeType?e.documentElement:e,r=t&&t.parentNode;return e===r||!(!r||1!==r.nodeType||!(n.contains?n.contains(r):e.compareDocumentPosition&&16&e.compareDocumentPosition(r)))}:function(e,t){if(t)while(t=t.parentNode)if(t===e)return!0;return!1},D=t?function(e,t){if(e===t)return l=!0,0;var n=!e.compareDocumentPosition-!t.compareDocumentPosition;return n||(1&(n=(e.ownerDocument||e)==(t.ownerDocument||t)?e.compareDocumentPosition(t):1)||!d.sortDetached&&t.compareDocumentPosition(e)===n?e==C||e.ownerDocument==p&&y(p,e)?-1:t==C||t.ownerDocument==p&&y(p,t)?1:u?P(u,e)-P(u,t):0:4&n?-1:1)}:function(e,t){if(e===t)return l=!0,0;var n,r=0,i=e.parentNode,o=t.parentNode,a=[e],s=[t];if(!i||!o)return e==C?-1:t==C?1:i?-1:o?1:u?P(u,e)-P(u,t):0;if(i===o)return pe(e,t);n=e;while(n=n.parentNode)a.unshift(n);n=t;while(n=n.parentNode)s.unshift(n);while(a[r]===s[r])r++;return r?pe(a[r],s[r]):a[r]==p?-1:s[r]==p?1:0}),C},se.matches=function(e,t){return se(e,null,null,t)},se.matchesSelector=function(e,t){if(T(e),d.matchesSelector&&E&&!N[t+" "]&&(!s||!s.test(t))&&(!v||!v.test(t)))try{var n=c.call(e,t);if(n||d.disconnectedMatch||e.document&&11!==e.document.nodeType)return n}catch(e){N(t,!0)}return 0":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(e){return e[1]=e[1].replace(te,ne),e[3]=(e[3]||e[4]||e[5]||"").replace(te,ne),"~="===e[2]&&(e[3]=" "+e[3]+" "),e.slice(0,4)},CHILD:function(e){return e[1]=e[1].toLowerCase(),"nth"===e[1].slice(0,3)?(e[3]||se.error(e[0]),e[4]=+(e[4]?e[5]+(e[6]||1):2*("even"===e[3]||"odd"===e[3])),e[5]=+(e[7]+e[8]||"odd"===e[3])):e[3]&&se.error(e[0]),e},PSEUDO:function(e){var t,n=!e[6]&&e[2];return G.CHILD.test(e[0])?null:(e[3]?e[2]=e[4]||e[5]||"":n&&X.test(n)&&(t=h(n,!0))&&(t=n.indexOf(")",n.length-t)-n.length)&&(e[0]=e[0].slice(0,t),e[2]=n.slice(0,t)),e.slice(0,3))}},filter:{TAG:function(e){var t=e.replace(te,ne).toLowerCase();return"*"===e?function(){return!0}:function(e){return e.nodeName&&e.nodeName.toLowerCase()===t}},CLASS:function(e){var t=m[e+" "];return t||(t=new RegExp("(^|"+M+")"+e+"("+M+"|$)"))&&m(e,function(e){return t.test("string"==typeof e.className&&e.className||"undefined"!=typeof e.getAttribute&&e.getAttribute("class")||"")})},ATTR:function(n,r,i){return function(e){var t=se.attr(e,n);return null==t?"!="===r:!r||(t+="","="===r?t===i:"!="===r?t!==i:"^="===r?i&&0===t.indexOf(i):"*="===r?i&&-1:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i;function D(e,n,r){return m(n)?S.grep(e,function(e,t){return!!n.call(e,t,e)!==r}):n.nodeType?S.grep(e,function(e){return e===n!==r}):"string"!=typeof n?S.grep(e,function(e){return-1)[^>]*|#([\w-]+))$/;(S.fn.init=function(e,t,n){var r,i;if(!e)return this;if(n=n||j,"string"==typeof e){if(!(r="<"===e[0]&&">"===e[e.length-1]&&3<=e.length?[null,e,null]:q.exec(e))||!r[1]&&t)return!t||t.jquery?(t||n).find(e):this.constructor(t).find(e);if(r[1]){if(t=t instanceof S?t[0]:t,S.merge(this,S.parseHTML(r[1],t&&t.nodeType?t.ownerDocument||t:E,!0)),N.test(r[1])&&S.isPlainObject(t))for(r in t)m(this[r])?this[r](t[r]):this.attr(r,t[r]);return this}return(i=E.getElementById(r[2]))&&(this[0]=i,this.length=1),this}return e.nodeType?(this[0]=e,this.length=1,this):m(e)?void 0!==n.ready?n.ready(e):e(S):S.makeArray(e,this)}).prototype=S.fn,j=S(E);var L=/^(?:parents|prev(?:Until|All))/,H={children:!0,contents:!0,next:!0,prev:!0};function O(e,t){while((e=e[t])&&1!==e.nodeType);return e}S.fn.extend({has:function(e){var t=S(e,this),n=t.length;return this.filter(function(){for(var e=0;e\x20\t\r\n\f]*)/i,he=/^$|^module$|\/(?:java|ecma)script/i;ce=E.createDocumentFragment().appendChild(E.createElement("div")),(fe=E.createElement("input")).setAttribute("type","radio"),fe.setAttribute("checked","checked"),fe.setAttribute("name","t"),ce.appendChild(fe),y.checkClone=ce.cloneNode(!0).cloneNode(!0).lastChild.checked,ce.innerHTML="",y.noCloneChecked=!!ce.cloneNode(!0).lastChild.defaultValue,ce.innerHTML="",y.option=!!ce.lastChild;var ge={thead:[1,"","
"],col:[2,"","
"],tr:[2,"","
"],td:[3,"","
"],_default:[0,"",""]};function ve(e,t){var n;return n="undefined"!=typeof e.getElementsByTagName?e.getElementsByTagName(t||"*"):"undefined"!=typeof e.querySelectorAll?e.querySelectorAll(t||"*"):[],void 0===t||t&&A(e,t)?S.merge([e],n):n}function ye(e,t){for(var n=0,r=e.length;n",""]);var me=/<|&#?\w+;/;function xe(e,t,n,r,i){for(var o,a,s,u,l,c,f=t.createDocumentFragment(),p=[],d=0,h=e.length;d\s*$/g;function qe(e,t){return A(e,"table")&&A(11!==t.nodeType?t:t.firstChild,"tr")&&S(e).children("tbody")[0]||e}function Le(e){return e.type=(null!==e.getAttribute("type"))+"/"+e.type,e}function He(e){return"true/"===(e.type||"").slice(0,5)?e.type=e.type.slice(5):e.removeAttribute("type"),e}function Oe(e,t){var n,r,i,o,a,s;if(1===t.nodeType){if(Y.hasData(e)&&(s=Y.get(e).events))for(i in Y.remove(t,"handle events"),s)for(n=0,r=s[i].length;n").attr(n.scriptAttrs||{}).prop({charset:n.scriptCharset,src:n.url}).on("load error",i=function(e){r.remove(),i=null,e&&t("error"===e.type?404:200,e.type)}),E.head.appendChild(r[0])},abort:function(){i&&i()}}});var Ut,Xt=[],Vt=/(=)\?(?=&|$)|\?\?/;S.ajaxSetup({jsonp:"callback",jsonpCallback:function(){var e=Xt.pop()||S.expando+"_"+Ct.guid++;return this[e]=!0,e}}),S.ajaxPrefilter("json jsonp",function(e,t,n){var r,i,o,a=!1!==e.jsonp&&(Vt.test(e.url)?"url":"string"==typeof e.data&&0===(e.contentType||"").indexOf("application/x-www-form-urlencoded")&&Vt.test(e.data)&&"data");if(a||"jsonp"===e.dataTypes[0])return r=e.jsonpCallback=m(e.jsonpCallback)?e.jsonpCallback():e.jsonpCallback,a?e[a]=e[a].replace(Vt,"$1"+r):!1!==e.jsonp&&(e.url+=(Et.test(e.url)?"&":"?")+e.jsonp+"="+r),e.converters["script json"]=function(){return o||S.error(r+" was not called"),o[0]},e.dataTypes[0]="json",i=C[r],C[r]=function(){o=arguments},n.always(function(){void 0===i?S(C).removeProp(r):C[r]=i,e[r]&&(e.jsonpCallback=t.jsonpCallback,Xt.push(r)),o&&m(i)&&i(o[0]),o=i=void 0}),"script"}),y.createHTMLDocument=((Ut=E.implementation.createHTMLDocument("").body).innerHTML="
",2===Ut.childNodes.length),S.parseHTML=function(e,t,n){return"string"!=typeof e?[]:("boolean"==typeof t&&(n=t,t=!1),t||(y.createHTMLDocument?((r=(t=E.implementation.createHTMLDocument("")).createElement("base")).href=E.location.href,t.head.appendChild(r)):t=E),o=!n&&[],(i=N.exec(e))?[t.createElement(i[1])]:(i=xe([e],t,o),o&&o.length&&S(o).remove(),S.merge([],i.childNodes)));var r,i,o},S.fn.load=function(e,t,n){var r,i,o,a=this,s=e.indexOf(" ");return-1").append(S.parseHTML(e)).find(r):e)}).always(n&&function(e,t){a.each(function(){n.apply(this,o||[e.responseText,t,e])})}),this},S.expr.pseudos.animated=function(t){return S.grep(S.timers,function(e){return t===e.elem}).length},S.offset={setOffset:function(e,t,n){var r,i,o,a,s,u,l=S.css(e,"position"),c=S(e),f={};"static"===l&&(e.style.position="relative"),s=c.offset(),o=S.css(e,"top"),u=S.css(e,"left"),("absolute"===l||"fixed"===l)&&-1<(o+u).indexOf("auto")?(a=(r=c.position()).top,i=r.left):(a=parseFloat(o)||0,i=parseFloat(u)||0),m(t)&&(t=t.call(e,n,S.extend({},s))),null!=t.top&&(f.top=t.top-s.top+a),null!=t.left&&(f.left=t.left-s.left+i),"using"in t?t.using.call(e,f):("number"==typeof f.top&&(f.top+="px"),"number"==typeof f.left&&(f.left+="px"),c.css(f))}},S.fn.extend({offset:function(t){if(arguments.length)return void 0===t?this:this.each(function(e){S.offset.setOffset(this,t,e)});var e,n,r=this[0];return r?r.getClientRects().length?(e=r.getBoundingClientRect(),n=r.ownerDocument.defaultView,{top:e.top+n.pageYOffset,left:e.left+n.pageXOffset}):{top:0,left:0}:void 0},position:function(){if(this[0]){var e,t,n,r=this[0],i={top:0,left:0};if("fixed"===S.css(r,"position"))t=r.getBoundingClientRect();else{t=this.offset(),n=r.ownerDocument,e=r.offsetParent||n.documentElement;while(e&&(e===n.body||e===n.documentElement)&&"static"===S.css(e,"position"))e=e.parentNode;e&&e!==r&&1===e.nodeType&&((i=S(e).offset()).top+=S.css(e,"borderTopWidth",!0),i.left+=S.css(e,"borderLeftWidth",!0))}return{top:t.top-i.top-S.css(r,"marginTop",!0),left:t.left-i.left-S.css(r,"marginLeft",!0)}}},offsetParent:function(){return this.map(function(){var e=this.offsetParent;while(e&&"static"===S.css(e,"position"))e=e.offsetParent;return e||re})}}),S.each({scrollLeft:"pageXOffset",scrollTop:"pageYOffset"},function(t,i){var o="pageYOffset"===i;S.fn[t]=function(e){return $(this,function(e,t,n){var r;if(x(e)?r=e:9===e.nodeType&&(r=e.defaultView),void 0===n)return r?r[i]:e[t];r?r.scrollTo(o?r.pageXOffset:n,o?n:r.pageYOffset):e[t]=n},t,e,arguments.length)}}),S.each(["top","left"],function(e,n){S.cssHooks[n]=$e(y.pixelPosition,function(e,t){if(t)return t=Be(e,n),Me.test(t)?S(e).position()[n]+"px":t})}),S.each({Height:"height",Width:"width"},function(a,s){S.each({padding:"inner"+a,content:s,"":"outer"+a},function(r,o){S.fn[o]=function(e,t){var n=arguments.length&&(r||"boolean"!=typeof e),i=r||(!0===e||!0===t?"margin":"border");return $(this,function(e,t,n){var r;return x(e)?0===o.indexOf("outer")?e["inner"+a]:e.document.documentElement["client"+a]:9===e.nodeType?(r=e.documentElement,Math.max(e.body["scroll"+a],r["scroll"+a],e.body["offset"+a],r["offset"+a],r["client"+a])):void 0===n?S.css(e,t,i):S.style(e,t,n,i)},s,n?e:void 0,n)}})}),S.each(["ajaxStart","ajaxStop","ajaxComplete","ajaxError","ajaxSuccess","ajaxSend"],function(e,t){S.fn[t]=function(e){return this.on(t,e)}}),S.fn.extend({bind:function(e,t,n){return this.on(e,null,t,n)},unbind:function(e,t){return this.off(e,null,t)},delegate:function(e,t,n,r){return this.on(t,e,n,r)},undelegate:function(e,t,n){return 1===arguments.length?this.off(e,"**"):this.off(t,e||"**",n)},hover:function(e,t){return this.mouseenter(e).mouseleave(t||e)}}),S.each("blur focus focusin focusout resize scroll click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup contextmenu".split(" "),function(e,n){S.fn[n]=function(e,t){return 05' : ' ▾'; this.appendChild(sortrevind); return; @@ -117,12 +117,12 @@ sorttable = { 'sorttable_sorted'); rowlists = this.parentNode.getElementsByTagName("span"); for (var j=0; j < rowlists.length; j++) { - if (rowlists[j].className.search(/\sorttable_sortrevind\b/)) { + if (rowlists[j].className.search(/\bsorttable_sortrevind\b/) != -1) { rowlists[j].parentNode.removeChild(rowlists[j]); } } sortfwdind = document.createElement('span'); - sortfwdind.class = "sorttable_sortfwdind"; + sortfwdind.className = "sorttable_sortfwdind"; sortfwdind.innerHTML = stIsIE ? ' 6' : ' ▴'; this.appendChild(sortfwdind); return; @@ -138,15 +138,15 @@ sorttable = { }); rowlists = this.parentNode.getElementsByTagName("span"); for (var j=0; j < rowlists.length; j++) { - if (rowlists[j].className.search(/\bsorttable_sortfwdind\b/) - || rowlists[j].className.search(/\sorttable_sortrevind\b/) ) { + if (rowlists[j].className.search(/\bsorttable_sortfwdind\b/) != -1 + || rowlists[j].className.search(/\bsorttable_sortrevind\b/) != -1) { rowlists[j].parentNode.removeChild(rowlists[j]); } } this.className += ' sorttable_sorted'; sortfwdind = document.createElement('span'); - sortfwdind.class = "sorttable_sortfwdind"; + sortfwdind.className = "sorttable_sortfwdind"; sortfwdind.innerHTML = stIsIE ? ' 6' : ' ▴'; this.appendChild(sortfwdind); diff --git a/core/src/main/resources/org/apache/spark/ui/static/spark-dag-viz.js b/core/src/main/resources/org/apache/spark/ui/static/spark-dag-viz.js index 474c453643365..1fc1fb4b4513b 100644 --- a/core/src/main/resources/org/apache/spark/ui/static/spark-dag-viz.js +++ b/core/src/main/resources/org/apache/spark/ui/static/spark-dag-viz.js @@ -334,7 +334,7 @@ function preprocessGraphLayout(g, forJob) { } /* - * Helper function to size the SVG appropriately such that all elements are displyed. + * Helper function to size the SVG appropriately such that all elements are displayed. * This assumes that all outermost elements are clusters (rectangles). */ function resizeSvg(svg) { diff --git a/core/src/main/resources/org/apache/spark/ui/static/stagepage.js b/core/src/main/resources/org/apache/spark/ui/static/stagepage.js index 93b37c296271b..ebb79f542168d 100644 --- a/core/src/main/resources/org/apache/spark/ui/static/stagepage.js +++ b/core/src/main/resources/org/apache/spark/ui/static/stagepage.js @@ -70,7 +70,7 @@ function stageEndPoint(appId) { return newBaseURI + "/api/v1/applications/" + appId + "/" + appAttemptId + "/stages/" + stageId; } } - return location.origin + "/api/v1/applications/" + appId + "/stages/" + stageId; + return uiRoot + "/api/v1/applications/" + appId + "/stages/" + stageId; } function getColumnNameForTaskMetricSummary(columnKey) { @@ -243,23 +243,39 @@ function createRowMetadataForColumn(colKey, data, checkboxId) { } function reselectCheckboxesBasedOnTaskTableState() { - var allChecked = true; + var taskSummaryHasSelected = false; + var executorSummaryHasSelected = false; + var allTaskSummaryChecked = true; + var allExecutorSummaryChecked = true; var taskSummaryMetricsTableCurrentFilteredArray = taskSummaryMetricsTableCurrentStateArray.slice(); if (typeof taskTableSelector !== 'undefined' && taskSummaryMetricsTableCurrentStateArray.length > 0) { for (var k = 0; k < optionalColumns.length; k++) { if (taskTableSelector.column(optionalColumns[k]).visible()) { + taskSummaryHasSelected = true; $("#box-"+optionalColumns[k]).prop('checked', true); taskSummaryMetricsTableCurrentStateArray.push(taskSummaryMetricsTableArray.filter(row => (row.checkboxId).toString() == optionalColumns[k])[0]); taskSummaryMetricsTableCurrentFilteredArray = taskSummaryMetricsTableCurrentStateArray.slice(); } else { - allChecked = false; + allTaskSummaryChecked = false; } } - if (allChecked) { - $("#box-0").prop('checked', true); - } createDataTableForTaskSummaryMetricsTable(taskSummaryMetricsTableCurrentFilteredArray); } + + if (typeof executorSummaryTableSelector !== 'undefined') { + for (var k = 0; k < executorOptionalColumns.length; k++) { + if (executorSummaryTableSelector.column(executorOptionalColumns[k]).visible()) { + executorSummaryHasSelected = true; + $("#executor-box-"+executorOptionalColumns[k]).prop('checked', true); + } else { + allExecutorSummaryChecked = false; + } + } + } + + if ((taskSummaryHasSelected || executorSummaryHasSelected) && allTaskSummaryChecked && allExecutorSummaryChecked) { + $("#box-0").prop('checked', true); + } } function getStageAttemptId() { @@ -278,6 +294,9 @@ var taskSummaryMetricsDataTable; var optionalColumns = [11, 12, 13, 14, 15, 16, 17, 21]; var taskTableSelector; +var executorOptionalColumns = [15, 16, 17, 18]; +var executorSummaryTableSelector; + $(document).ready(function () { setDataTableDefaults(); @@ -288,14 +307,18 @@ $(document).ready(function () { "" + "
" + "
Select All
" + - "
Scheduler Delay
" + - "
Task Deserialization Time
" + - "
Shuffle Read Blocked Time
" + - "
Shuffle Remote Reads
" + - "
Shuffle Write Time
" + - "
Result Serialization Time
" + - "
Getting Result Time
" + - "
Peak Execution Memory
" + + "
Scheduler Delay
" + + "
Task Deserialization Time
" + + "
Shuffle Read Blocked Time
" + + "
Shuffle Remote Reads
" + + "
Shuffle Write Time
" + + "
Result Serialization Time
" + + "
Getting Result Time
" + + "
Peak Execution Memory
" + + "
Peak JVM Memory OnHeap / OffHeap
" + + "
Peak Execution Memory OnHeap / OffHeap
" + + "
Peak Storage Memory OnHeap / OffHeap
" + + "
Peak Pool Memory Direct / Mapped
" + "
"); $('#scheduler_delay').attr("data-toggle", "tooltip") @@ -433,7 +456,7 @@ $(document).ready(function () { {data : "failedTasks"}, {data : "killedTasks"}, {data : "succeededTasks"}, - {data : "isBlacklistedForStage"}, + {data : "isExcludedForStage"}, { data : function (row, type) { return row.inputRecords != 0 ? formatBytes(row.inputBytes, type) + " / " + row.inputRecords : ""; @@ -463,15 +486,95 @@ $(document).ready(function () { data : function (row, type) { return typeof row.diskBytesSpilled != 'undefined' ? formatBytes(row.diskBytesSpilled, type) : ""; } + }, + { + data : function (row, type) { + var peakMemoryMetrics = row.peakMemoryMetrics; + if (typeof peakMemoryMetrics !== 'undefined') { + if (type !== 'display') + return peakMemoryMetrics.JVMHeapMemory; + else + return (formatBytes(peakMemoryMetrics.JVMHeapMemory, type) + ' / ' + + formatBytes(peakMemoryMetrics.JVMOffHeapMemory, type)); + } else { + if (type !== 'display') { + return 0; + } else { + return '0.0 B / 0.0 B'; + } + } + + } + }, + { + data : function (row, type) { + var peakMemoryMetrics = row.peakMemoryMetrics + if (typeof peakMemoryMetrics !== 'undefined') { + if (type !== 'display') + return peakMemoryMetrics.OnHeapExecutionMemory; + else + return (formatBytes(peakMemoryMetrics.OnHeapExecutionMemory, type) + ' / ' + + formatBytes(peakMemoryMetrics.OffHeapExecutionMemory, type)); + } else { + if (type !== 'display') { + return 0; + } else { + return '0.0 B / 0.0 B'; + } + } + } + }, + { + data : function (row, type) { + var peakMemoryMetrics = row.peakMemoryMetrics + if (typeof peakMemoryMetrics !== 'undefined') { + if (type !== 'display') + return peakMemoryMetrics.OnHeapStorageMemory; + else + return (formatBytes(peakMemoryMetrics.OnHeapStorageMemory, type) + ' / ' + + formatBytes(peakMemoryMetrics.OffHeapStorageMemory, type)); + } else { + if (type !== 'display') { + return 0; + } else { + return '0.0 B / 0.0 B'; + } + } + } + }, + { + data : function (row, type) { + var peakMemoryMetrics = row.peakMemoryMetrics + if (typeof peakMemoryMetrics !== 'undefined') { + if (type !== 'display') + return peakMemoryMetrics.DirectPoolMemory; + else + return (formatBytes(peakMemoryMetrics.DirectPoolMemory, type) + ' / ' + + formatBytes(peakMemoryMetrics.MappedPoolMemory, type)); + } else { + if (type !== 'display') { + return 0; + } else { + return '0.0 B / 0.0 B'; + } + } + } } ], + "columnDefs": [ + { "visible": false, "targets": 15 }, + { "visible": false, "targets": 16 }, + { "visible": false, "targets": 17 }, + { "visible": false, "targets": 18 } + ], + "deferRender": true, "order": [[0, "asc"]], "bAutoWidth": false, "oLanguage": { "sEmptyTable": "No data to show yet" } }; - var executorSummaryTableSelector = + executorSummaryTableSelector = $("#summary-executor-table").DataTable(executorSummaryConf); $('#parent-container [data-toggle="tooltip"]').tooltip(); @@ -843,7 +946,8 @@ $(document).ready(function () { }, { data : function (row, type) { - if (row.taskMetrics && row.taskMetrics.shuffleReadMetrics && row.taskMetrics.shuffleReadMetrics.localBytesRead > 0) { + if (row.taskMetrics && row.taskMetrics.shuffleReadMetrics && + (row.taskMetrics.shuffleReadMetrics.localBytesRead > 0 || row.taskMetrics.shuffleReadMetrics.remoteBytesRead > 0)) { var totalBytesRead = parseInt(row.taskMetrics.shuffleReadMetrics.localBytesRead) + parseInt(row.taskMetrics.shuffleReadMetrics.remoteBytesRead); if (type === 'display') { return formatBytes(totalBytesRead, type) + " / " + row.taskMetrics.shuffleReadMetrics.recordsRead; @@ -923,30 +1027,40 @@ $(document).ready(function () { var para = $(this).attr('data-column'); if (para == "0") { var allColumns = taskTableSelector.columns(optionalColumns); + var executorAllColumns = executorSummaryTableSelector.columns(executorOptionalColumns); if ($(this).is(":checked")) { $(".toggle-vis").prop('checked', true); allColumns.visible(true); + executorAllColumns.visible(true); createDataTableForTaskSummaryMetricsTable(taskSummaryMetricsTableArray); } else { $(".toggle-vis").prop('checked', false); allColumns.visible(false); + executorAllColumns.visible(false); var taskSummaryMetricsTableFilteredArray = taskSummaryMetricsTableArray.filter(row => row.checkboxId < 11); createDataTableForTaskSummaryMetricsTable(taskSummaryMetricsTableFilteredArray); } } else { - var column = taskTableSelector.column(para); - // Toggle the visibility - column.visible(!column.visible()); - var taskSummaryMetricsTableFilteredArray = []; - if ($(this).is(":checked")) { - taskSummaryMetricsTableCurrentStateArray.push(taskSummaryMetricsTableArray.filter(row => (row.checkboxId).toString() == para)[0]); - taskSummaryMetricsTableFilteredArray = taskSummaryMetricsTableCurrentStateArray.slice(); - } else { - taskSummaryMetricsTableFilteredArray = - taskSummaryMetricsTableCurrentStateArray.filter(row => (row.checkboxId).toString() != para); + var dataMetricsType = $(this).attr("data-metrics-type"); + if (dataMetricsType === 'task') { + var column = taskTableSelector.column(para); + // Toggle the visibility + column.visible(!column.visible()); + var taskSummaryMetricsTableFilteredArray = []; + if ($(this).is(":checked")) { + taskSummaryMetricsTableCurrentStateArray.push(taskSummaryMetricsTableArray.filter(row => (row.checkboxId).toString() == para)[0]); + taskSummaryMetricsTableFilteredArray = taskSummaryMetricsTableCurrentStateArray.slice(); + } else { + taskSummaryMetricsTableFilteredArray = + taskSummaryMetricsTableCurrentStateArray.filter(row => (row.checkboxId).toString() != para); + } + createDataTableForTaskSummaryMetricsTable(taskSummaryMetricsTableFilteredArray); + } + if (dataMetricsType === "executor") { + var column = executorSummaryTableSelector.column(para); + column.visible(!column.visible()); } - createDataTableForTaskSummaryMetricsTable(taskSummaryMetricsTableFilteredArray); } }); diff --git a/core/src/main/resources/org/apache/spark/ui/static/stagespage-template.html b/core/src/main/resources/org/apache/spark/ui/static/stagespage-template.html index 77ea70e4ad966..b938158b77027 100644 --- a/core/src/main/resources/org/apache/spark/ui/static/stagespage-template.html +++ b/core/src/main/resources/org/apache/spark/ui/static/stagespage-template.html @@ -50,8 +50,8 @@

Aggregated Metrics by Executor

Succeeded Tasks - Blacklisted + title="Shows if this executor has been excluded by the scheduler due to task failures."> + Excluded Input Size / Records Output Size / Records @@ -59,6 +59,10 @@

Aggregated Metrics by Executor

Shuffle Write Size / Records Spill (Memory) Spill (Disk) + Peak JVM Memory OnHeap / OffHeap + Peak Execution Memory OnHeap / OffHeap + Peak Storage Memory OnHeap / OffHeap + Peak Pool Memory Direct / Mapped diff --git a/core/src/main/resources/org/apache/spark/ui/static/timeline-view.js b/core/src/main/resources/org/apache/spark/ui/static/timeline-view.js index 5be8cffd1f8db..220b76a0f1b27 100644 --- a/core/src/main/resources/org/apache/spark/ui/static/timeline-view.js +++ b/core/src/main/resources/org/apache/spark/ui/static/timeline-view.js @@ -42,26 +42,31 @@ function drawApplicationTimeline(groupArray, eventObjArray, startTime, offset) { setupZoomable("#application-timeline-zoom-lock", applicationTimeline); setupExecutorEventAction(); + function getIdForJobEntry(baseElem) { + var jobIdText = $($(baseElem).find(".application-timeline-content")[0]).text(); + var jobId = jobIdText.match("\\(Job (\\d+)\\)$")[1]; + return jobId; + } + + function getSelectorForJobEntry(jobId) { + return "#job-" + jobId; + } + function setupJobEventAction() { $(".vis-item.vis-range.job.application-timeline-object").each(function() { - var getSelectorForJobEntry = function(baseElem) { - var jobIdText = $($(baseElem).find(".application-timeline-content")[0]).text(); - var jobId = jobIdText.match("\\(Job (\\d+)\\)$")[1]; - return "#job-" + jobId; - }; - $(this).click(function() { - var jobPagePath = $(getSelectorForJobEntry(this)).find("a.name-link").attr("href"); - window.location.href = jobPagePath + var jobId = getIdForJobEntry(this); + var jobPagePath = uiRoot + appBasePath + "/jobs/job/?id=" + jobId; + window.location.href = jobPagePath; }); $(this).hover( function() { - $(getSelectorForJobEntry(this)).addClass("corresponding-item-hover"); + $(getSelectorForJobEntry(getIdForJobEntry(this))).addClass("corresponding-item-hover"); $($(this).find("div.application-timeline-content")[0]).tooltip("show"); }, function() { - $(getSelectorForJobEntry(this)).removeClass("corresponding-item-hover"); + $(getSelectorForJobEntry(getIdForJobEntry(this))).removeClass("corresponding-item-hover"); $($(this).find("div.application-timeline-content")[0]).tooltip("hide"); } ); @@ -125,26 +130,34 @@ function drawJobTimeline(groupArray, eventObjArray, startTime, offset) { setupZoomable("#job-timeline-zoom-lock", jobTimeline); setupExecutorEventAction(); + function getStageIdAndAttemptForStageEntry(baseElem) { + var stageIdText = $($(baseElem).find(".job-timeline-content")[0]).text(); + var stageIdAndAttempt = stageIdText.match("\\(Stage (\\d+\\.\\d+)\\)$")[1].split("."); + return stageIdAndAttempt; + } + + function getSelectorForStageEntry(stageIdAndAttempt) { + return "#stage-" + stageIdAndAttempt[0] + "-" + stageIdAndAttempt[1]; + } + function setupStageEventAction() { $(".vis-item.vis-range.stage.job-timeline-object").each(function() { - var getSelectorForStageEntry = function(baseElem) { - var stageIdText = $($(baseElem).find(".job-timeline-content")[0]).text(); - var stageIdAndAttempt = stageIdText.match("\\(Stage (\\d+\\.\\d+)\\)$")[1].split("."); - return "#stage-" + stageIdAndAttempt[0] + "-" + stageIdAndAttempt[1]; - }; - $(this).click(function() { - var stagePagePath = $(getSelectorForStageEntry(this)).find("a.name-link").attr("href") - window.location.href = stagePagePath + var stageIdAndAttempt = getStageIdAndAttemptForStageEntry(this); + var stagePagePath = uiRoot + appBasePath + + "/stages/stage/?id=" + stageIdAndAttempt[0] + "&attempt=" + stageIdAndAttempt[1]; + window.location.href = stagePagePath; }); $(this).hover( function() { - $(getSelectorForStageEntry(this)).addClass("corresponding-item-hover"); + $(getSelectorForStageEntry(getStageIdAndAttemptForStageEntry(this))) + .addClass("corresponding-item-hover"); $($(this).find("div.job-timeline-content")[0]).tooltip("show"); }, function() { - $(getSelectorForStageEntry(this)).removeClass("corresponding-item-hover"); + $(getSelectorForStageEntry(getStageIdAndAttemptForStageEntry(this))) + .removeClass("corresponding-item-hover"); $($(this).find("div.job-timeline-content")[0]).tooltip("hide"); } ); diff --git a/core/src/main/resources/org/apache/spark/ui/static/utils.js b/core/src/main/resources/org/apache/spark/ui/static/utils.js index 4571fc1aec4dd..f4914f000e705 100644 --- a/core/src/main/resources/org/apache/spark/ui/static/utils.js +++ b/core/src/main/resources/org/apache/spark/ui/static/utils.js @@ -39,7 +39,7 @@ function formatDuration(milliseconds) { function formatBytes(bytes, type) { if (type !== 'display') return bytes; - if (bytes == 0) return '0.0 B'; + if (bytes <= 0) return '0.0 B'; var k = 1024; var dm = 1; var sizes = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB']; @@ -74,7 +74,7 @@ function getTimeZone() { return Intl.DateTimeFormat().resolvedOptions().timeZone; } catch(ex) { // Get time zone from a string representing the date, - // eg. "Thu Nov 16 2017 01:13:32 GMT+0800 (CST)" -> "CST" + // e.g. "Thu Nov 16 2017 01:13:32 GMT+0800 (CST)" -> "CST" return new Date().toString().match(/\((.*)\)/)[1]; } } @@ -105,7 +105,7 @@ function getStandAloneAppId(cb) { } // Looks like Web UI is running in standalone mode // Let's get application-id using REST End Point - $.getJSON(location.origin + "/api/v1/applications", function(response, status, jqXHR) { + $.getJSON(uiRoot + "/api/v1/applications", function(response, status, jqXHR) { if (response && response.length > 0) { var appId = response[0].id; cb(appId); @@ -152,7 +152,7 @@ function createTemplateURI(appId, templateName) { var baseURI = words.slice(0, ind).join('/') + '/static/' + templateName + '-template.html'; return baseURI; } - return location.origin + "/static/" + templateName + "-template.html"; + return uiRoot + "/static/" + templateName + "-template.html"; } function setDataTableDefaults() { @@ -193,5 +193,5 @@ function createRESTEndPointForExecutorsPage(appId) { return newBaseURI + "/api/v1/applications/" + appId + "/" + attemptId + "/allexecutors"; } } - return location.origin + "/api/v1/applications/" + appId + "/allexecutors"; + return uiRoot + "/api/v1/applications/" + appId + "/allexecutors"; } diff --git a/core/src/main/resources/org/apache/spark/ui/static/webui.css b/core/src/main/resources/org/apache/spark/ui/static/webui.css index d4394ebcfd258..262cee7b58aff 100755 --- a/core/src/main/resources/org/apache/spark/ui/static/webui.css +++ b/core/src/main/resources/org/apache/spark/ui/static/webui.css @@ -321,10 +321,6 @@ a.expandbutton { width: 100%; } -.container-fluid-div { - width: 200px; -} - .select-all-div-checkbox-div { width: 90px; } diff --git a/core/src/main/scala/org/apache/spark/BarrierTaskContext.scala b/core/src/main/scala/org/apache/spark/BarrierTaskContext.scala index 4d765481eb836..09fa91655fba5 100644 --- a/core/src/main/scala/org/apache/spark/BarrierTaskContext.scala +++ b/core/src/main/scala/org/apache/spark/BarrierTaskContext.scala @@ -21,7 +21,6 @@ import java.util.{Properties, Timer, TimerTask} import scala.collection.JavaConverters._ import scala.concurrent.duration._ -import scala.language.postfixOps import scala.util.{Failure, Success => ScalaSuccess, Try} import org.apache.spark.annotation.{Experimental, Since} diff --git a/core/src/main/scala/org/apache/spark/ContextAwareIterator.scala b/core/src/main/scala/org/apache/spark/ContextAwareIterator.scala new file mode 100644 index 0000000000000..c4d0dd8aceab0 --- /dev/null +++ b/core/src/main/scala/org/apache/spark/ContextAwareIterator.scala @@ -0,0 +1,40 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark + +import org.apache.spark.annotation.DeveloperApi + +/** + * :: DeveloperApi :: + * A TaskContext aware iterator. + * + * As the Python evaluation consumes the parent iterator in a separate thread, + * it could consume more data from the parent even after the task ends and the parent is closed. + * If an off-heap access exists in the parent iterator, it could cause segmentation fault + * which crashes the executor. + * Thus, we should use [[ContextAwareIterator]] to stop consuming after the task ends. + */ +@DeveloperApi +class ContextAwareIterator[+T](val context: TaskContext, val delegate: Iterator[T]) + extends Iterator[T] { + + override def hasNext: Boolean = + !context.isCompleted() && !context.isInterrupted() && delegate.hasNext + + override def next(): T = delegate.next() +} diff --git a/core/src/main/scala/org/apache/spark/Dependency.scala b/core/src/main/scala/org/apache/spark/Dependency.scala index ba8e4d69ba755..d21b9d9833e9e 100644 --- a/core/src/main/scala/org/apache/spark/Dependency.scala +++ b/core/src/main/scala/org/apache/spark/Dependency.scala @@ -23,6 +23,7 @@ import org.apache.spark.annotation.DeveloperApi import org.apache.spark.rdd.RDD import org.apache.spark.serializer.Serializer import org.apache.spark.shuffle.{ShuffleHandle, ShuffleWriteProcessor} +import org.apache.spark.storage.BlockManagerId /** * :: DeveloperApi :: @@ -95,6 +96,20 @@ class ShuffleDependency[K: ClassTag, V: ClassTag, C: ClassTag]( val shuffleHandle: ShuffleHandle = _rdd.context.env.shuffleManager.registerShuffle( shuffleId, this) + /** + * Stores the location of the list of chosen external shuffle services for handling the + * shuffle merge requests from mappers in this shuffle map stage. + */ + private[spark] var mergerLocs: Seq[BlockManagerId] = Nil + + def setMergerLocs(mergerLocs: Seq[BlockManagerId]): Unit = { + if (mergerLocs != null) { + this.mergerLocs = mergerLocs + } + } + + def getMergerLocs: Seq[BlockManagerId] = mergerLocs + _rdd.sparkContext.cleaner.foreach(_.registerShuffleForCleanup(this)) _rdd.sparkContext.shuffleDriverComponents.registerShuffle(shuffleId) } diff --git a/core/src/main/scala/org/apache/spark/ExecutorAllocationClient.scala b/core/src/main/scala/org/apache/spark/ExecutorAllocationClient.scala index ce47f3fd32203..cdba1c44034c0 100644 --- a/core/src/main/scala/org/apache/spark/ExecutorAllocationClient.scala +++ b/core/src/main/scala/org/apache/spark/ExecutorAllocationClient.scala @@ -91,11 +91,13 @@ private[spark] trait ExecutorAllocationClient { * @param executorsAndDecomInfo identifiers of executors & decom info. * @param adjustTargetNumExecutors whether the target number of executors will be adjusted down * after these executors have been decommissioned. + * @param triggeredByExecutor whether the decommission is triggered at executor. * @return the ids of the executors acknowledged by the cluster manager to be removed. */ def decommissionExecutors( - executorsAndDecomInfo: Array[(String, ExecutorDecommissionInfo)], - adjustTargetNumExecutors: Boolean): Seq[String] = { + executorsAndDecomInfo: Array[(String, ExecutorDecommissionInfo)], + adjustTargetNumExecutors: Boolean, + triggeredByExecutor: Boolean): Seq[String] = { killExecutors(executorsAndDecomInfo.map(_._1), adjustTargetNumExecutors, countFailures = false) @@ -109,14 +111,21 @@ private[spark] trait ExecutorAllocationClient { * @param executorId identifiers of executor to decommission * @param decommissionInfo information about the decommission (reason, host loss) * @param adjustTargetNumExecutors if we should adjust the target number of executors. + * @param triggeredByExecutor whether the decommission is triggered at executor. + * (TODO: add a new type like `ExecutorDecommissionInfo` for the + * case where executor is decommissioned at executor first, so we + * don't need this extra parameter.) * @return whether the request is acknowledged by the cluster manager. */ - final def decommissionExecutor(executorId: String, + final def decommissionExecutor( + executorId: String, decommissionInfo: ExecutorDecommissionInfo, - adjustTargetNumExecutors: Boolean): Boolean = { + adjustTargetNumExecutors: Boolean, + triggeredByExecutor: Boolean = false): Boolean = { val decommissionedExecutors = decommissionExecutors( Array((executorId, decommissionInfo)), - adjustTargetNumExecutors = adjustTargetNumExecutors) + adjustTargetNumExecutors = adjustTargetNumExecutors, + triggeredByExecutor = triggeredByExecutor) decommissionedExecutors.nonEmpty && decommissionedExecutors(0).equals(executorId) } diff --git a/core/src/main/scala/org/apache/spark/ExecutorAllocationManager.scala b/core/src/main/scala/org/apache/spark/ExecutorAllocationManager.scala index b6e14e8210c86..a83762ff01ccb 100644 --- a/core/src/main/scala/org/apache/spark/ExecutorAllocationManager.scala +++ b/core/src/main/scala/org/apache/spark/ExecutorAllocationManager.scala @@ -28,7 +28,7 @@ import com.codahale.metrics.{Gauge, MetricRegistry} import org.apache.spark.internal.{config, Logging} import org.apache.spark.internal.config._ import org.apache.spark.internal.config.DECOMMISSION_ENABLED -import org.apache.spark.internal.config.Tests.TEST_SCHEDULE_INTERVAL +import org.apache.spark.internal.config.Tests.TEST_DYNAMIC_ALLOCATION_SCHEDULE_ENABLED import org.apache.spark.metrics.source.Source import org.apache.spark.resource.ResourceProfile.UNKNOWN_RESOURCE_PROFILE_ID import org.apache.spark.resource.ResourceProfileManager @@ -150,11 +150,7 @@ private[spark] class ExecutorAllocationManager( private var addTime: Long = NOT_SET // Polling loop interval (ms) - private val intervalMillis: Long = if (Utils.isTesting) { - conf.get(TEST_SCHEDULE_INTERVAL) - } else { - 100 - } + private val intervalMillis: Long = 100 // Listener for Spark events that impact the allocation policy val listener = new ExecutorAllocationListener @@ -247,9 +243,12 @@ private[spark] class ExecutorAllocationManager( } } } - executor.scheduleWithFixedDelay(scheduleTask, 0, intervalMillis, TimeUnit.MILLISECONDS) - // copy the maps inside synchonize to ensure not being modified + if (!testing || conf.get(TEST_DYNAMIC_ALLOCATION_SCHEDULE_ENABLED)) { + executor.scheduleWithFixedDelay(scheduleTask, 0, intervalMillis, TimeUnit.MILLISECONDS) + } + + // copy the maps inside synchronize to ensure not being modified val (numExecutorsTarget, numLocalityAware) = synchronized { val numTarget = numExecutorsTargetPerResourceProfileId.toMap val numLocality = numLocalityAwareTasksPerResourceProfileId.toMap @@ -313,8 +312,8 @@ private[spark] class ExecutorAllocationManager( if (unschedulableTaskSets > 0) { // Request additional executors to account for task sets having tasks that are unschedulable - // due to blacklisting when the active executor count has already reached the max needed - // which we would normally get. + // due to executors excluded for failures when the active executor count has already reached + // the max needed which we would normally get. val maxNeededForUnschedulables = math.ceil(unschedulableTaskSets * executorAllocationRatio / tasksPerExecutor).toInt math.max(maxNeededWithSpeculationLocalityOffset, @@ -380,7 +379,7 @@ private[spark] class ExecutorAllocationManager( // We lower the target number of executors but don't actively kill any yet. Killing is // controlled separately by an idle timeout. It's still helpful to reduce - // the target number in case an executor just happens to get lost (eg., bad hardware, + // the target number in case an executor just happens to get lost (e.g., bad hardware, // or the cluster manager preempts it) -- in that case, there is no point in trying // to immediately get a new executor, since we wouldn't even use it yet. decrementExecutorsFromTarget(maxNeeded, rpId, updatesNeeded) @@ -581,7 +580,10 @@ private[spark] class ExecutorAllocationManager( if (decommissionEnabled) { val executorIdsWithoutHostLoss = executorIdsToBeRemoved.toSeq.map( id => (id, ExecutorDecommissionInfo("spark scale down"))).toArray - client.decommissionExecutors(executorIdsWithoutHostLoss, adjustTargetNumExecutors = false) + client.decommissionExecutors( + executorIdsWithoutHostLoss, + adjustTargetNumExecutors = false, + triggeredByExecutor = false) } else { client.killExecutors(executorIdsToBeRemoved.toSeq, adjustTargetNumExecutors = false, countFailures = false, force = false) @@ -660,10 +662,10 @@ private[spark] class ExecutorAllocationManager( private val resourceProfileIdToStageAttempt = new mutable.HashMap[Int, mutable.Set[StageAttempt]] - // Keep track of unschedulable task sets due to blacklisting. This is a Set of StageAttempt's - // because we'll only take the last unschedulable task in a taskset although there can be more. - // This is done in order to avoid costly loops in the scheduling. - // Check TaskSetManager#getCompletelyBlacklistedTaskIfAny for more details. + // Keep track of unschedulable task sets because of executor/node exclusions from too many task + // failures. This is a Set of StageAttempt's because we'll only take the last unschedulable task + // in a taskset although there can be more. This is done in order to avoid costly loops in the + // scheduling. Check TaskSetManager#getCompletelyExcludedTaskIfAny for more details. private val unschedulableTaskSets = new mutable.HashSet[StageAttempt] // stageAttempt to tuple (the number of task with locality preferences, a map where each pair @@ -796,7 +798,11 @@ private[spark] class ExecutorAllocationManager( } if (taskEnd.taskInfo.speculative) { stageAttemptToSpeculativeTaskIndices.get(stageAttempt).foreach {_.remove{taskIndex}} - stageAttemptToNumSpeculativeTasks(stageAttempt) -= 1 + // If the previous task attempt succeeded first and it was the last task in a stage, + // the stage may have been removed before handing this speculative TaskEnd event. + if (stageAttemptToNumSpeculativeTasks.contains(stageAttempt)) { + stageAttemptToNumSpeculativeTasks(stageAttempt) -= 1 + } } taskEnd.reason match { diff --git a/core/src/main/scala/org/apache/spark/HeartbeatReceiver.scala b/core/src/main/scala/org/apache/spark/HeartbeatReceiver.scala index c99698f99d904..13ff075660cd7 100644 --- a/core/src/main/scala/org/apache/spark/HeartbeatReceiver.scala +++ b/core/src/main/scala/org/apache/spark/HeartbeatReceiver.scala @@ -67,7 +67,7 @@ private[spark] case class HeartbeatResponse(reregisterBlockManager: Boolean) private[spark] class HeartbeatReceiver(sc: SparkContext, clock: Clock) extends SparkListener with ThreadSafeRpcEndpoint with Logging { - def this(sc: SparkContext) { + def this(sc: SparkContext) = { this(sc, new SystemClock) } @@ -80,7 +80,9 @@ private[spark] class HeartbeatReceiver(sc: SparkContext, clock: Clock) // executor ID -> timestamp of when the last heartbeat from this executor was received private val executorLastSeen = new HashMap[String, Long] - private val executorTimeoutMs = sc.conf.get(config.STORAGE_BLOCKMANAGER_HEARTBEAT_TIMEOUT) + private val executorTimeoutMs = sc.conf.get( + config.STORAGE_BLOCKMANAGER_HEARTBEAT_TIMEOUT + ).getOrElse(Utils.timeStringAsMs(s"${sc.conf.get(Network.NETWORK_TIMEOUT)}s")) private val checkTimeoutIntervalMs = sc.conf.get(Network.NETWORK_TIMEOUT_INTERVAL) diff --git a/core/src/main/scala/org/apache/spark/MapOutputTracker.scala b/core/src/main/scala/org/apache/spark/MapOutputTracker.scala index 64102ccc05882..cdec1982b4487 100644 --- a/core/src/main/scala/org/apache/spark/MapOutputTracker.scala +++ b/core/src/main/scala/org/apache/spark/MapOutputTracker.scala @@ -35,7 +35,7 @@ import org.apache.spark.internal.Logging import org.apache.spark.internal.config._ import org.apache.spark.io.CompressionCodec import org.apache.spark.rpc.{RpcCallContext, RpcEndpoint, RpcEndpointRef, RpcEnv} -import org.apache.spark.scheduler.{ExecutorCacheTaskLocation, MapStatus} +import org.apache.spark.scheduler.MapStatus import org.apache.spark.shuffle.MetadataFetchFailedException import org.apache.spark.storage.{BlockId, BlockManagerId, ShuffleBlockId} import org.apache.spark.util._ @@ -125,14 +125,19 @@ private class ShuffleStatus(numPartitions: Int) extends Logging { * Update the map output location (e.g. during migration). */ def updateMapOutput(mapId: Long, bmAddress: BlockManagerId): Unit = withWriteLock { - val mapStatusOpt = mapStatuses.find(_.mapId == mapId) - mapStatusOpt match { - case Some(mapStatus) => - logInfo(s"Updating map output for ${mapId} to ${bmAddress}") - mapStatus.updateLocation(bmAddress) - invalidateSerializedMapOutputStatusCache() - case None => - logError(s"Asked to update map output ${mapId} for untracked map status.") + try { + val mapStatusOpt = mapStatuses.find(_.mapId == mapId) + mapStatusOpt match { + case Some(mapStatus) => + logInfo(s"Updating map output for ${mapId} to ${bmAddress}") + mapStatus.updateLocation(bmAddress) + invalidateSerializedMapOutputStatusCache() + case None => + logWarning(s"Asked to update map output ${mapId} for untracked map status.") + } + } catch { + case e: java.lang.NullPointerException => + logWarning(s"Unable to update map output for ${mapId}, status removed in-flight") } } diff --git a/core/src/main/scala/org/apache/spark/SparkConf.scala b/core/src/main/scala/org/apache/spark/SparkConf.scala index dbd89d646ae54..5f37a1abb1909 100644 --- a/core/src/main/scala/org/apache/spark/SparkConf.scala +++ b/core/src/main/scala/org/apache/spark/SparkConf.scala @@ -568,7 +568,7 @@ class SparkConf(loadDefaults: Boolean) extends Cloneable with Logging with Seria // If spark.executor.heartbeatInterval bigger than spark.network.timeout, // it will almost always cause ExecutorLostFailure. See SPARK-22754. require(executorTimeoutThresholdMs > executorHeartbeatIntervalMs, "The value of " + - s"${networkTimeout}=${executorTimeoutThresholdMs}ms must be no less than the value of " + + s"${networkTimeout}=${executorTimeoutThresholdMs}ms must be greater than the value of " + s"${EXECUTOR_HEARTBEAT_INTERVAL.key}=${executorHeartbeatIntervalMs}ms.") } @@ -603,7 +603,7 @@ private[spark] object SparkConf extends Logging { "are no longer accepted. To specify the equivalent now, one may use '64k'."), DeprecatedConfig("spark.rpc", "2.0", "Not used anymore."), DeprecatedConfig("spark.scheduler.executorTaskBlacklistTime", "2.1.0", - "Please use the new blacklisting options, spark.blacklist.*"), + "Please use the new excludedOnFailure options, spark.excludeOnFailure.*"), DeprecatedConfig("spark.yarn.am.port", "2.0.0", "Not used anymore"), DeprecatedConfig("spark.executor.port", "2.0.0", "Not used anymore"), DeprecatedConfig("spark.shuffle.service.index.cache.entries", "2.3.0", @@ -612,7 +612,31 @@ private[spark] object SparkConf extends Logging { DeprecatedConfig("spark.yarn.credentials.file.retention.days", "2.4.0", "Not used anymore."), DeprecatedConfig("spark.yarn.services", "3.0.0", "Feature no longer available."), DeprecatedConfig("spark.executor.plugins", "3.0.0", - "Feature replaced with new plugin API. See Monitoring documentation.") + "Feature replaced with new plugin API. See Monitoring documentation."), + DeprecatedConfig("spark.blacklist.enabled", "3.1.0", + "Please use spark.excludeOnFailure.enabled"), + DeprecatedConfig("spark.blacklist.task.maxTaskAttemptsPerExecutor", "3.1.0", + "Please use spark.excludeOnFailure.task.maxTaskAttemptsPerExecutor"), + DeprecatedConfig("spark.blacklist.task.maxTaskAttemptsPerNode", "3.1.0", + "Please use spark.excludeOnFailure.task.maxTaskAttemptsPerNode"), + DeprecatedConfig("spark.blacklist.application.maxFailedTasksPerExecutor", "3.1.0", + "Please use spark.excludeOnFailure.application.maxFailedTasksPerExecutor"), + DeprecatedConfig("spark.blacklist.stage.maxFailedTasksPerExecutor", "3.1.0", + "Please use spark.excludeOnFailure.stage.maxFailedTasksPerExecutor"), + DeprecatedConfig("spark.blacklist.application.maxFailedExecutorsPerNode", "3.1.0", + "Please use spark.excludeOnFailure.application.maxFailedExecutorsPerNode"), + DeprecatedConfig("spark.blacklist.stage.maxFailedExecutorsPerNode", "3.1.0", + "Please use spark.excludeOnFailure.stage.maxFailedExecutorsPerNode"), + DeprecatedConfig("spark.blacklist.timeout", "3.1.0", + "Please use spark.excludeOnFailure.timeout"), + DeprecatedConfig("spark.blacklist.application.fetchFailure.enabled", "3.1.0", + "Please use spark.excludeOnFailure.application.fetchFailure.enabled"), + DeprecatedConfig("spark.scheduler.blacklist.unschedulableTaskSetTimeout", "3.1.0", + "Please use spark.scheduler.excludeOnFailure.unschedulableTaskSetTimeout"), + DeprecatedConfig("spark.blacklist.killBlacklistedExecutors", "3.1.0", + "Please use spark.excludeOnFailure.killExcludedExecutors"), + DeprecatedConfig("spark.yarn.blacklist.executor.launch.blacklisting.enabled", "3.1.0", + "Please use spark.yarn.executor.launch.excludeOnFailure.enabled") ) Map(configs.map { cfg => (cfg.key -> cfg) } : _*) diff --git a/core/src/main/scala/org/apache/spark/SparkContext.scala b/core/src/main/scala/org/apache/spark/SparkContext.scala index 85a24acb97c07..f6e8a5694dbdf 100644 --- a/core/src/main/scala/org/apache/spark/SparkContext.scala +++ b/core/src/main/scala/org/apache/spark/SparkContext.scala @@ -22,6 +22,7 @@ import java.net.URI import java.util.{Arrays, Locale, Properties, ServiceLoader, UUID} import java.util.concurrent.{ConcurrentHashMap, ConcurrentMap} import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger, AtomicReference} +import javax.ws.rs.core.UriBuilder import scala.collection.JavaConverters._ import scala.collection.Map @@ -39,10 +40,10 @@ import org.apache.hadoop.mapred.{FileInputFormat, InputFormat, JobConf, Sequence import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat, Job => NewHadoopJob} import org.apache.hadoop.mapreduce.lib.input.{FileInputFormat => NewFileInputFormat} -import org.apache.spark.annotation.DeveloperApi +import org.apache.spark.annotation.{DeveloperApi, Experimental} import org.apache.spark.broadcast.Broadcast import org.apache.spark.deploy.{LocalSparkCluster, SparkHadoopUtil} -import org.apache.spark.executor.{ExecutorMetrics, ExecutorMetricsSource} +import org.apache.spark.executor.{Executor, ExecutorMetrics, ExecutorMetricsSource} import org.apache.spark.input.{FixedLengthBinaryInputFormat, PortableDataStream, StreamInputFormat, WholeTextFileInputFormat} import org.apache.spark.internal.Logging import org.apache.spark.internal.config._ @@ -221,6 +222,7 @@ class SparkContext(config: SparkConf) extends Logging { private var _listenerBusStarted: Boolean = false private var _jars: Seq[String] = _ private var _files: Seq[String] = _ + private var _archives: Seq[String] = _ private var _shutdownHookRef: AnyRef = _ private var _statusStore: AppStatusStore = _ private var _heartbeater: Heartbeater = _ @@ -246,6 +248,7 @@ class SparkContext(config: SparkConf) extends Logging { def jars: Seq[String] = _jars def files: Seq[String] = _files + def archives: Seq[String] = _archives def master: String = _conf.get("spark.master") def deployMode: String = _conf.get(SUBMIT_DEPLOY_MODE) def appName: String = _conf.get("spark.app.name") @@ -278,6 +281,7 @@ class SparkContext(config: SparkConf) extends Logging { // Used to store a URL for each static file/jar together with the file's local timestamp private[spark] val addedFiles = new ConcurrentHashMap[String, Long]().asScala + private[spark] val addedArchives = new ConcurrentHashMap[String, Long]().asScala private[spark] val addedJars = new ConcurrentHashMap[String, Long]().asScala // Keeps track of all persisted RDDs @@ -422,6 +426,7 @@ class SparkContext(config: SparkConf) extends Logging { _jars = Utils.getUserJars(_conf) _files = _conf.getOption(FILES.key).map(_.split(",")).map(_.filter(_.nonEmpty)) .toSeq.flatten + _archives = _conf.getOption(ARCHIVES.key).map(Utils.stringToSeq).toSeq.flatten _eventLogDir = if (isEventLogEnabled) { @@ -506,6 +511,13 @@ class SparkContext(config: SparkConf) extends Logging { } } + if (archives != null) { + archives.foreach(file => addFile(file, false, true, isArchive = true)) + if (addedArchives.nonEmpty) { + _conf.set("spark.app.initial.archive.urls", addedArchives.keys.toSeq.mkString(",")) + } + } + _executorMemory = _conf.getOption(EXECUTOR_MEMORY.key) .orElse(Option(System.getenv("SPARK_EXECUTOR_MEMORY"))) .orElse(Option(System.getenv("SPARK_MEM")) @@ -570,10 +582,13 @@ class SparkContext(config: SparkConf) extends Logging { _applicationAttemptId = _taskScheduler.applicationAttemptId() _conf.set("spark.app.id", _applicationId) if (_conf.get(UI_REVERSE_PROXY)) { - System.setProperty("spark.ui.proxyBase", "/proxy/" + _applicationId) + val proxyUrl = _conf.get(UI_REVERSE_PROXY_URL.key, "").stripSuffix("/") + + "/proxy/" + _applicationId + System.setProperty("spark.ui.proxyBase", proxyUrl) } _ui.foreach(_.setAppId(_applicationId)) _env.blockManager.initialize(_applicationId) + FallbackStorage.registerBlockManagerIfNeeded(_env.blockManager.master, _conf) // The metrics system for Driver need to be set spark.app.id to app ID. // So it should start after we get app ID from the task scheduler and set spark.app.id. @@ -623,6 +638,9 @@ class SparkContext(config: SparkConf) extends Logging { // Post init _taskScheduler.postStartHook() + if (isLocal) { + _env.metricsSystem.registerSource(Executor.executorSourceLocalModeOnly) + } _env.metricsSystem.registerSource(_dagScheduler.metricsSource) _env.metricsSystem.registerSource(new BlockManagerSource(_env.blockManager)) _env.metricsSystem.registerSource(new JVMCPUSource()) @@ -1515,6 +1533,36 @@ class SparkContext(config: SparkConf) extends Logging { */ def listFiles(): Seq[String] = addedFiles.keySet.toSeq + /** + * :: Experimental :: + * Add an archive to be downloaded and unpacked with this Spark job on every node. + * + * If an archive is added during execution, it will not be available until the next TaskSet + * starts. + * + * @param path can be either a local file, a file in HDFS (or other Hadoop-supported + * filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs, + * use `SparkFiles.get(paths-to-files)` to find its download/unpacked location. + * The given path should be one of .zip, .tar, .tar.gz, .tgz and .jar. + * + * @note A path can be added only once. Subsequent additions of the same path are ignored. + * + * @since 3.1.0 + */ + @Experimental + def addArchive(path: String): Unit = { + addFile(path, false, false, isArchive = true) + } + + /** + * :: Experimental :: + * Returns a list of archive paths that are added to resources. + * + * @since 3.1.0 + */ + @Experimental + def listArchives(): Seq[String] = addedArchives.keySet.toSeq + /** * Add a file to be downloaded with this Spark job on every node. * @@ -1532,20 +1580,26 @@ class SparkContext(config: SparkConf) extends Logging { addFile(path, recursive, false) } - private def addFile(path: String, recursive: Boolean, addedOnSubmit: Boolean): Unit = { - val uri = new Path(path).toUri + private def addFile( + path: String, recursive: Boolean, addedOnSubmit: Boolean, isArchive: Boolean = false + ): Unit = { + val uri = if (!isArchive) { + new Path(path).toUri + } else { + Utils.resolveURI(path) + } val schemeCorrectedURI = uri.getScheme match { case null => new File(path).getCanonicalFile.toURI case "local" => - logWarning("File with 'local' scheme is not supported to add to file server, since " + - "it is already available on every node.") + logWarning(s"File with 'local' scheme $path is not supported to add to file server, " + + s"since it is already available on every node.") return case _ => uri } val hadoopPath = new Path(schemeCorrectedURI) val scheme = schemeCorrectedURI.getScheme - if (!Array("http", "https", "ftp").contains(scheme)) { + if (!Array("http", "https", "ftp").contains(scheme) && !isArchive) { val fs = hadoopPath.getFileSystem(hadoopConfiguration) val isDir = fs.getFileStatus(hadoopPath).isDirectory if (!isLocal && scheme == "file" && isDir) { @@ -1563,20 +1617,40 @@ class SparkContext(config: SparkConf) extends Logging { val key = if (!isLocal && scheme == "file") { env.rpcEnv.fileServer.addFile(new File(uri.getPath)) + } else if (uri.getScheme == null) { + schemeCorrectedURI.toString + } else if (isArchive) { + uri.toString } else { - if (uri.getScheme == null) { - schemeCorrectedURI.toString - } else { - path - } + path } + val timestamp = if (addedOnSubmit) startTime else System.currentTimeMillis - if (addedFiles.putIfAbsent(key, timestamp).isEmpty) { + if (!isArchive && addedFiles.putIfAbsent(key, timestamp).isEmpty) { logInfo(s"Added file $path at $key with timestamp $timestamp") // Fetch the file locally so that closures which are run on the driver can still use the // SparkFiles API to access files. Utils.fetchFile(uri.toString, new File(SparkFiles.getRootDirectory()), conf, - env.securityManager, hadoopConfiguration, timestamp, useCache = false) + hadoopConfiguration, timestamp, useCache = false) + postEnvironmentUpdate() + } else if ( + isArchive && + addedArchives.putIfAbsent( + UriBuilder.fromUri(new URI(key)).fragment(uri.getFragment).build().toString, + timestamp).isEmpty) { + logInfo(s"Added archive $path at $key with timestamp $timestamp") + // If the scheme is file, use URI to simply copy instead of downloading. + val uriToUse = if (!isLocal && scheme == "file") uri else new URI(key) + val uriToDownload = UriBuilder.fromUri(uriToUse).fragment(null).build() + val source = Utils.fetchFile(uriToDownload.toString, Utils.createTempDir(), conf, + hadoopConfiguration, timestamp, useCache = false, shouldUntar = false) + val dest = new File( + SparkFiles.getRootDirectory(), + if (uri.getFragment != null) uri.getFragment else source.getName) + logInfo( + s"Unpacking an archive $path from ${source.getAbsolutePath} to ${dest.getAbsolutePath}") + Utils.deleteRecursively(dest) + Utils.unpack(source, dest) postEnvironmentUpdate() } else { logWarning(s"The path $path has been added already. Overwriting of added paths " + @@ -1855,7 +1929,7 @@ class SparkContext(config: SparkConf) extends Logging { } private def addJar(path: String, addedOnSubmit: Boolean): Unit = { - def addLocalJarFile(file: File): String = { + def addLocalJarFile(file: File): Seq[String] = { try { if (!file.exists()) { throw new FileNotFoundException(s"Jar ${file.getAbsolutePath} not found") @@ -1864,15 +1938,15 @@ class SparkContext(config: SparkConf) extends Logging { throw new IllegalArgumentException( s"Directory ${file.getAbsoluteFile} is not allowed for addJar") } - env.rpcEnv.fileServer.addJar(file) + Seq(env.rpcEnv.fileServer.addJar(file)) } catch { case NonFatal(e) => logError(s"Failed to add $path to Spark environment", e) - null + Nil } } - def checkRemoteJarFile(path: String): String = { + def checkRemoteJarFile(path: String): Seq[String] = { val hadoopPath = new Path(path) val scheme = hadoopPath.toUri.getScheme if (!Array("http", "https", "ftp").contains(scheme)) { @@ -1881,32 +1955,33 @@ class SparkContext(config: SparkConf) extends Logging { if (!fs.exists(hadoopPath)) { throw new FileNotFoundException(s"Jar ${path} not found") } - if (fs.isDirectory(hadoopPath)) { + if (fs.getFileStatus(hadoopPath).isDirectory) { throw new IllegalArgumentException( s"Directory ${path} is not allowed for addJar") } - path + Seq(path) } catch { case NonFatal(e) => logError(s"Failed to add $path to Spark environment", e) - null + Nil } } else { - path + Seq(path) } } if (path == null || path.isEmpty) { logWarning("null or empty path specified as parameter to addJar") } else { - val key = if (path.contains("\\")) { + val (keys, scheme) = if (path.contains("\\") && Utils.isWindows) { // For local paths with backslashes on Windows, URI throws an exception - addLocalJarFile(new File(path)) + (addLocalJarFile(new File(path)), "local") } else { val uri = new Path(path).toUri // SPARK-17650: Make sure this is a valid URL before adding it to the list of dependencies Utils.validateURL(uri) - uri.getScheme match { + val uriScheme = uri.getScheme + val jarPaths = uriScheme match { // A JAR file which exists only on the driver node case null => // SPARK-22585 path without schema is not url encoded @@ -1914,18 +1989,28 @@ class SparkContext(config: SparkConf) extends Logging { // A JAR file which exists only on the driver node case "file" => addLocalJarFile(new File(uri.getPath)) // A JAR file which exists locally on every worker node - case "local" => "file:" + uri.getPath + case "local" => Seq("file:" + uri.getPath) + case "ivy" => + // Since `new Path(path).toUri` will lose query information, + // so here we use `URI.create(path)` + DependencyUtils.resolveMavenDependencies(URI.create(path)) + .flatMap(jar => addLocalJarFile(new File(jar))) case _ => checkRemoteJarFile(path) } + (jarPaths, uriScheme) } - if (key != null) { + if (keys.nonEmpty) { val timestamp = if (addedOnSubmit) startTime else System.currentTimeMillis - if (addedJars.putIfAbsent(key, timestamp).isEmpty) { - logInfo(s"Added JAR $path at $key with timestamp $timestamp") + val (added, existed) = keys.partition(addedJars.putIfAbsent(_, timestamp).isEmpty) + if (added.nonEmpty) { + val jarMessage = if (scheme != "ivy") "JAR" else "dependency jars of Ivy URI" + logInfo(s"Added $jarMessage $path at ${added.mkString(",")} with timestamp $timestamp") postEnvironmentUpdate() - } else { - logWarning(s"The jar $path has been added already. Overwriting of added jars " + - "is not supported in the current version.") + } + if (existed.nonEmpty) { + val jarMessage = if (scheme != "ivy") "JAR" else "dependency jars of Ivy URI" + logInfo(s"The $jarMessage $path at ${existed.mkString(",")} has been added already." + + " Overwriting of added jar is not supported in the current version.") } } } @@ -2489,8 +2574,9 @@ class SparkContext(config: SparkConf) extends Logging { val schedulingMode = getSchedulingMode.toString val addedJarPaths = addedJars.keys.toSeq val addedFilePaths = addedFiles.keys.toSeq + val addedArchivePaths = addedArchives.keys.toSeq val environmentDetails = SparkEnv.environmentDetails(conf, hadoopConfiguration, - schedulingMode, addedJarPaths, addedFilePaths) + schedulingMode, addedJarPaths, addedFilePaths, addedArchivePaths) val environmentUpdate = SparkListenerEnvironmentUpdate(environmentDetails) listenerBus.post(environmentUpdate) } diff --git a/core/src/main/scala/org/apache/spark/SparkEnv.scala b/core/src/main/scala/org/apache/spark/SparkEnv.scala index d543359f4dedf..9fc60ac3990fc 100644 --- a/core/src/main/scala/org/apache/spark/SparkEnv.scala +++ b/core/src/main/scala/org/apache/spark/SparkEnv.scala @@ -454,7 +454,8 @@ object SparkEnv extends Logging { hadoopConf: Configuration, schedulingMode: String, addedJars: Seq[String], - addedFiles: Seq[String]): Map[String, Seq[(String, String)]] = { + addedFiles: Seq[String], + addedArchives: Seq[String]): Map[String, Seq[(String, String)]] = { import Properties._ val jvmInformation = Seq( @@ -484,7 +485,7 @@ object SparkEnv extends Logging { .split(File.pathSeparator) .filterNot(_.isEmpty) .map((_, "System Classpath")) - val addedJarsAndFiles = (addedJars ++ addedFiles).map((_, "Added By User")) + val addedJarsAndFiles = (addedJars ++ addedFiles ++ addedArchives).map((_, "Added By User")) val classPaths = (addedJarsAndFiles ++ classPathEntries).sorted // Add Hadoop properties, it will not ignore configs including in Spark. Some spark diff --git a/core/src/main/scala/org/apache/spark/TaskEndReason.scala b/core/src/main/scala/org/apache/spark/TaskEndReason.scala index 6606d317e7b86..5dc70e9834b0b 100644 --- a/core/src/main/scala/org/apache/spark/TaskEndReason.scala +++ b/core/src/main/scala/org/apache/spark/TaskEndReason.scala @@ -98,10 +98,11 @@ case class FetchFailed( /** * Fetch failures lead to a different failure handling path: (1) we don't abort the stage after * 4 task failures, instead we immediately go back to the stage which generated the map output, - * and regenerate the missing data. (2) we don't count fetch failures for blacklisting, since - * presumably its not the fault of the executor where the task ran, but the executor which - * stored the data. This is especially important because we might rack up a bunch of - * fetch-failures in rapid succession, on all nodes of the cluster, due to one bad node. + * and regenerate the missing data. (2) we don't count fetch failures from executors excluded + * due to too many task failures, since presumably its not the fault of the executor where + * the task ran, but the executor which stored the data. This is especially important because + * we might rack up a bunch of fetch-failures in rapid succession, on all nodes of the cluster, + * due to one bad node. */ override def countTowardsTaskFailures: Boolean = false } @@ -142,12 +143,12 @@ case class ExceptionFailure( private[spark] def this( e: Throwable, accumUpdates: Seq[AccumulableInfo], - preserveCause: Boolean) { + preserveCause: Boolean) = { this(e.getClass.getName, e.getMessage, e.getStackTrace, Utils.exceptionString(e), if (preserveCause) Some(new ThrowableSerializationWrapper(e)) else None, accumUpdates) } - private[spark] def this(e: Throwable, accumUpdates: Seq[AccumulableInfo]) { + private[spark] def this(e: Throwable, accumUpdates: Seq[AccumulableInfo]) = { this(e, accumUpdates, preserveCause = true) } diff --git a/core/src/main/scala/org/apache/spark/TestUtils.scala b/core/src/main/scala/org/apache/spark/TestUtils.scala index 6947d1c72f12b..9632d6c691085 100644 --- a/core/src/main/scala/org/apache/spark/TestUtils.scala +++ b/core/src/main/scala/org/apache/spark/TestUtils.scala @@ -20,13 +20,14 @@ package org.apache.spark import java.io.{ByteArrayInputStream, File, FileInputStream, FileOutputStream} import java.net.{HttpURLConnection, URI, URL} import java.nio.charset.StandardCharsets -import java.nio.file.{Files => JavaFiles} +import java.nio.file.{Files => JavaFiles, Paths} import java.nio.file.attribute.PosixFilePermission.{OWNER_EXECUTE, OWNER_READ, OWNER_WRITE} import java.security.SecureRandom import java.security.cert.X509Certificate import java.util.{Arrays, EnumSet, Locale, Properties} import java.util.concurrent.{TimeoutException, TimeUnit} import java.util.jar.{JarEntry, JarOutputStream, Manifest} +import java.util.regex.Pattern import javax.net.ssl._ import javax.tools.{JavaFileObject, SimpleJavaFileObject, ToolProvider} @@ -37,6 +38,7 @@ import scala.sys.process.{Process, ProcessLogger} import scala.util.Try import com.google.common.io.{ByteStreams, Files} +import org.apache.commons.lang3.StringUtils import org.apache.log4j.PropertyConfigurator import org.json4s.JsonAST.JValue import org.json4s.jackson.JsonMethods.{compact, render} @@ -255,6 +257,37 @@ private[spark] object TestUtils { attempt.isSuccess && attempt.get == 0 } + def isPythonVersionAtLeast38(): Boolean = { + val attempt = if (Utils.isWindows) { + Try(Process(Seq("cmd.exe", "/C", "python3 --version")) + .run(ProcessLogger(s => s.startsWith("Python 3.8") || s.startsWith("Python 3.9"))) + .exitValue()) + } else { + Try(Process(Seq("sh", "-c", "python3 --version")) + .run(ProcessLogger(s => s.startsWith("Python 3.8") || s.startsWith("Python 3.9"))) + .exitValue()) + } + attempt.isSuccess && attempt.get == 0 + } + + /** + * Get the absolute path from the executable. This implementation was borrowed from + * `spark/dev/sparktestsupport/shellutils.py`. + */ + def getAbsolutePathFromExecutable(executable: String): Option[String] = { + val command = if (Utils.isWindows) s"$executable.exe" else executable + if (command.split(File.separator, 2).length == 1 && + JavaFiles.isRegularFile(Paths.get(command)) && + JavaFiles.isExecutable(Paths.get(command))) { + Some(Paths.get(command).toAbsolutePath.toString) + } else { + sys.env("PATH").split(Pattern.quote(File.pathSeparator)) + .map(path => Paths.get(s"${StringUtils.strip(path, "\"")}${File.separator}$command")) + .find(p => JavaFiles.isRegularFile(p) && JavaFiles.isExecutable(p)) + .map(_.toString) + } + } + /** * Returns the response code from an HTTP(S) URL. */ diff --git a/core/src/main/scala/org/apache/spark/api/java/JavaPairRDD.scala b/core/src/main/scala/org/apache/spark/api/java/JavaPairRDD.scala index 1bcd203f2e435..6dd36309378cc 100644 --- a/core/src/main/scala/org/apache/spark/api/java/JavaPairRDD.scala +++ b/core/src/main/scala/org/apache/spark/api/java/JavaPairRDD.scala @@ -941,7 +941,7 @@ class JavaPairRDD[K, V](val rdd: RDD[(K, V)]) /** * Return a RDD containing only the elements in the inclusive range `lower` to `upper`. * If the RDD has been partitioned using a `RangePartitioner`, then this operation can be - * performed efficiently by only scanning the partitions that might containt matching elements. + * performed efficiently by only scanning the partitions that might contain matching elements. * Otherwise, a standard `filter` is applied to all partitions. * * @since 3.1.0 @@ -955,7 +955,7 @@ class JavaPairRDD[K, V](val rdd: RDD[(K, V)]) /** * Return a RDD containing only the elements in the inclusive range `lower` to `upper`. * If the RDD has been partitioned using a `RangePartitioner`, then this operation can be - * performed efficiently by only scanning the partitions that might containt matching elements. + * performed efficiently by only scanning the partitions that might contain matching elements. * Otherwise, a standard `filter` is applied to all partitions. * * @since 3.1.0 diff --git a/core/src/main/scala/org/apache/spark/api/java/JavaRDDLike.scala b/core/src/main/scala/org/apache/spark/api/java/JavaRDDLike.scala index 89b33945dfb08..306af24ada584 100644 --- a/core/src/main/scala/org/apache/spark/api/java/JavaRDDLike.scala +++ b/core/src/main/scala/org/apache/spark/api/java/JavaRDDLike.scala @@ -78,7 +78,7 @@ trait JavaRDDLike[T, This <: JavaRDDLike[T, This]] extends Serializable { /** * Internal method to this RDD; will read from cache if applicable, or otherwise compute it. - * This should ''not'' be called by users directly, but is available for implementors of custom + * This should ''not'' be called by users directly, but is available for implementers of custom * subclasses of RDD. */ def iterator(split: Partition, taskContext: TaskContext): JIterator[T] = diff --git a/core/src/main/scala/org/apache/spark/api/python/PythonRDD.scala b/core/src/main/scala/org/apache/spark/api/python/PythonRDD.scala index 86a1ac31c0845..6d4dc3d3dfe92 100644 --- a/core/src/main/scala/org/apache/spark/api/python/PythonRDD.scala +++ b/core/src/main/scala/org/apache/spark/api/python/PythonRDD.scala @@ -48,14 +48,14 @@ import org.apache.spark.util._ private[spark] class PythonRDD( parent: RDD[_], func: PythonFunction, - preservePartitoning: Boolean, + preservePartitioning: Boolean, isFromBarrier: Boolean = false) extends RDD[Array[Byte]](parent) { override def getPartitions: Array[Partition] = firstParent.partitions override val partitioner: Option[Partitioner] = { - if (preservePartitoning) firstParent.partitioner else None + if (preservePartitioning) firstParent.partitioner else None } val asJavaRDD: JavaRDD[Array[Byte]] = JavaRDD.fromRDD(this) @@ -837,7 +837,7 @@ private[spark] class PythonBroadcast(@transient var path: String) extends Serial * We might be serializing a really large object from python -- we don't want * python to buffer the whole thing in memory, nor can it write to a file, * so we don't know the length in advance. So python writes it in chunks, each chunk - * preceeded by a length, till we get a "length" of -1 which serves as EOF. + * preceded by a length, till we get a "length" of -1 which serves as EOF. * * Tested from python tests. */ diff --git a/core/src/main/scala/org/apache/spark/api/python/PythonRunner.scala b/core/src/main/scala/org/apache/spark/api/python/PythonRunner.scala index d7a09b599794e..f49cb3c2b8836 100644 --- a/core/src/main/scala/org/apache/spark/api/python/PythonRunner.scala +++ b/core/src/main/scala/org/apache/spark/api/python/PythonRunner.scala @@ -24,13 +24,8 @@ import java.nio.charset.StandardCharsets.UTF_8 import java.util.concurrent.atomic.AtomicBoolean import scala.collection.JavaConverters._ -import scala.collection.mutable.ArrayBuffer import scala.util.control.NonFatal -import org.json4s.JsonAST._ -import org.json4s.JsonDSL._ -import org.json4s.jackson.JsonMethods.{compact, render} - import org.apache.spark._ import org.apache.spark.internal.Logging import org.apache.spark.internal.config.{BUFFER_SIZE, EXECUTOR_CORES} @@ -85,7 +80,9 @@ private[spark] abstract class BasePythonRunner[IN, OUT]( private val conf = SparkEnv.get.conf protected val bufferSize: Int = conf.get(BUFFER_SIZE) + protected val authSocketTimeout = conf.get(PYTHON_AUTH_SOCKET_TIMEOUT) private val reuseWorker = conf.get(PYTHON_WORKER_REUSE) + protected val simplifiedTraceback: Boolean = false // All the Python functions should have the same exec, version and envvars. protected val envVars: java.util.Map[String, String] = funcs.head.funcs.head.envVars @@ -133,6 +130,9 @@ private[spark] abstract class BasePythonRunner[IN, OUT]( if (reuseWorker) { envVars.put("SPARK_REUSE_WORKER", "1") } + if (simplifiedTraceback) { + envVars.put("SPARK_SIMPLIFIED_TRACEBACK", "1") + } // SPARK-30299 this could be wrong with standalone mode when executor // cores might not be correct because it defaults to all cores on the box. val execCores = execCoresProp.map(_.toInt).getOrElse(conf.get(EXECUTOR_CORES)) @@ -140,6 +140,7 @@ private[spark] abstract class BasePythonRunner[IN, OUT]( if (workerMemoryMb.isDefined) { envVars.put("PYSPARK_EXECUTOR_MEMORY_MB", workerMemoryMb.get.toString) } + envVars.put("SPARK_AUTH_SOCKET_TIMEOUT", authSocketTimeout.toString) envVars.put("SPARK_BUFFER_SIZE", bufferSize.toString) val worker: Socket = env.createPythonWorker(pythonExec, envVars.asScala.toMap) // Whether is the worker released into idle pool or closed. When any codes try to release or diff --git a/core/src/main/scala/org/apache/spark/api/python/PythonUtils.scala b/core/src/main/scala/org/apache/spark/api/python/PythonUtils.scala index 527d0d6d3a48d..717eb4db6dd93 100644 --- a/core/src/main/scala/org/apache/spark/api/python/PythonUtils.scala +++ b/core/src/main/scala/org/apache/spark/api/python/PythonUtils.scala @@ -27,7 +27,7 @@ import org.apache.spark.SparkContext import org.apache.spark.api.java.{JavaRDD, JavaSparkContext} private[spark] object PythonUtils { - val PY4J_ZIP_NAME = "py4j-0.10.9-src.zip" + val PY4J_ZIP_NAME = "py4j-0.10.9.1-src.zip" /** Get the PYTHONPATH for PySpark, either from SPARK_HOME, if it is set, or from our JAR */ def sparkPythonPath: String = { @@ -85,4 +85,12 @@ private[spark] object PythonUtils { def getBroadcastThreshold(sc: JavaSparkContext): Long = { sc.conf.get(org.apache.spark.internal.config.BROADCAST_FOR_UDF_COMPRESSION_THRESHOLD) } + + def getPythonAuthSocketTimeout(sc: JavaSparkContext): Long = { + sc.conf.get(org.apache.spark.internal.config.Python.PYTHON_AUTH_SOCKET_TIMEOUT) + } + + def getSparkBufferSize(sc: JavaSparkContext): Int = { + sc.conf.get(org.apache.spark.internal.config.BUFFER_SIZE) + } } diff --git a/core/src/main/scala/org/apache/spark/api/python/SerDeUtil.scala b/core/src/main/scala/org/apache/spark/api/python/SerDeUtil.scala index 5a6fa507963f0..dc2587a62ae40 100644 --- a/core/src/main/scala/org/apache/spark/api/python/SerDeUtil.scala +++ b/core/src/main/scala/org/apache/spark/api/python/SerDeUtil.scala @@ -17,8 +17,6 @@ package org.apache.spark.api.python -import java.nio.ByteOrder -import java.nio.charset.StandardCharsets import java.util.{ArrayList => JArrayList} import scala.collection.JavaConverters._ diff --git a/core/src/main/scala/org/apache/spark/api/r/RRunner.scala b/core/src/main/scala/org/apache/spark/api/r/RRunner.scala index 20ab6fc2f348d..41c66024272b9 100644 --- a/core/src/main/scala/org/apache/spark/api/r/RRunner.scala +++ b/core/src/main/scala/org/apache/spark/api/r/RRunner.scala @@ -19,7 +19,6 @@ package org.apache.spark.api.r import java.io._ -import org.apache.spark._ import org.apache.spark.broadcast.Broadcast /** diff --git a/core/src/main/scala/org/apache/spark/broadcast/TorrentBroadcast.scala b/core/src/main/scala/org/apache/spark/broadcast/TorrentBroadcast.scala index 77fbbc08c2103..1024d9b5060bc 100644 --- a/core/src/main/scala/org/apache/spark/broadcast/TorrentBroadcast.scala +++ b/core/src/main/scala/org/apache/spark/broadcast/TorrentBroadcast.scala @@ -133,22 +133,30 @@ private[spark] class TorrentBroadcast[T: ClassTag](obj: T, id: Long) if (!blockManager.putSingle(broadcastId, value, MEMORY_AND_DISK, tellMaster = false)) { throw new SparkException(s"Failed to store $broadcastId in BlockManager") } - val blocks = - TorrentBroadcast.blockifyObject(value, blockSize, SparkEnv.get.serializer, compressionCodec) - if (checksumEnabled) { - checksums = new Array[Int](blocks.length) - } - blocks.zipWithIndex.foreach { case (block, i) => + try { + val blocks = + TorrentBroadcast.blockifyObject(value, blockSize, SparkEnv.get.serializer, compressionCodec) if (checksumEnabled) { - checksums(i) = calcChecksum(block) + checksums = new Array[Int](blocks.length) } - val pieceId = BroadcastBlockId(id, "piece" + i) - val bytes = new ChunkedByteBuffer(block.duplicate()) - if (!blockManager.putBytes(pieceId, bytes, MEMORY_AND_DISK_SER, tellMaster = true)) { - throw new SparkException(s"Failed to store $pieceId of $broadcastId in local BlockManager") + blocks.zipWithIndex.foreach { case (block, i) => + if (checksumEnabled) { + checksums(i) = calcChecksum(block) + } + val pieceId = BroadcastBlockId(id, "piece" + i) + val bytes = new ChunkedByteBuffer(block.duplicate()) + if (!blockManager.putBytes(pieceId, bytes, MEMORY_AND_DISK_SER, tellMaster = true)) { + throw new SparkException(s"Failed to store $pieceId of $broadcastId " + + s"in local BlockManager") + } } + blocks.length + } catch { + case t: Throwable => + logError(s"Store broadcast $broadcastId fail, remove all pieces of the broadcast") + blockManager.removeBroadcast(id, tellMaster = true) + throw t } - blocks.length } /** Fetch torrent blocks from the driver and/or other executors. */ diff --git a/core/src/main/scala/org/apache/spark/deploy/DeployMessage.scala b/core/src/main/scala/org/apache/spark/deploy/DeployMessage.scala index 83f373d526e90..727cdbc4ef2d1 100644 --- a/core/src/main/scala/org/apache/spark/deploy/DeployMessage.scala +++ b/core/src/main/scala/org/apache/spark/deploy/DeployMessage.scala @@ -61,13 +61,35 @@ private[deploy] object DeployMessages { } /** + * An internal message that used by Master itself, in order to handle the + * `DecommissionWorkersOnHosts` request from `MasterWebUI` asynchronously. + * @param ids A collection of Worker ids, which should be decommissioned. + */ + case class DecommissionWorkers(ids: Seq[String]) extends DeployMessage + + /** + * A message that sent from Master to Worker to decommission the Worker. + * It's used for the case where decommission is triggered at MasterWebUI. + * + * Note that decommission a Worker will cause all the executors on that Worker + * to be decommissioned as well. + */ + object DecommissionWorker extends DeployMessage + + /** + * A message that sent by the Worker to itself when it receives a signal, + * indicating the Worker starts to decommission. + */ + object WorkerDecommissionSigReceived extends DeployMessage + + /** + * A message sent from Worker to Master to tell Master that the Worker has started + * decommissioning. It's used for the case where decommission is triggered at Worker. + * * @param id the worker id - * @param worker the worker endpoint ref + * @param workerRef the worker endpoint ref */ - case class WorkerDecommission( - id: String, - worker: RpcEndpointRef) - extends DeployMessage + case class WorkerDecommissioning(id: String, workerRef: RpcEndpointRef) extends DeployMessage case class ExecutorStateChanged( appId: String, diff --git a/core/src/main/scala/org/apache/spark/deploy/JsonProtocol.scala b/core/src/main/scala/org/apache/spark/deploy/JsonProtocol.scala index 17733d99cd5bc..f697892aacc83 100644 --- a/core/src/main/scala/org/apache/spark/deploy/JsonProtocol.scala +++ b/core/src/main/scala/org/apache/spark/deploy/JsonProtocol.scala @@ -22,7 +22,6 @@ import org.json4s.JsonDSL._ import org.apache.spark.deploy.DeployMessages.{MasterStateResponse, WorkerStateResponse} import org.apache.spark.deploy.master._ -import org.apache.spark.deploy.master.RecoveryState.MasterState import org.apache.spark.deploy.worker.ExecutorRunner import org.apache.spark.resource.{ResourceInformation, ResourceRequirement} @@ -81,7 +80,7 @@ private[deploy] object JsonProtocol { } /** - * Export the [[ApplicationInfo]] to a Json objec. An [[ApplicationInfo]] consists of the + * Export the [[ApplicationInfo]] to a Json object. An [[ApplicationInfo]] consists of the * information of an application. * * @return a Json object containing the following fields: @@ -208,7 +207,8 @@ private[deploy] object JsonProtocol { * master * `completeddrivers` a list of Json objects of [[DriverInfo]] of the completed drivers * of the master - * `status` status of the master, see [[MasterState]] + * `status` status of the master, + * see [[org.apache.spark.deploy.master.RecoveryState.MasterState]] */ def writeMasterState(obj: MasterStateResponse): JObject = { val aliveWorkers = obj.workers.filter(_.isAlive()) diff --git a/core/src/main/scala/org/apache/spark/deploy/PythonRunner.scala b/core/src/main/scala/org/apache/spark/deploy/PythonRunner.scala index 7ad92da4e055a..c3f73ed745da4 100644 --- a/core/src/main/scala/org/apache/spark/deploy/PythonRunner.scala +++ b/core/src/main/scala/org/apache/spark/deploy/PythonRunner.scala @@ -44,7 +44,7 @@ object PythonRunner { .orElse(sparkConf.get(PYSPARK_PYTHON)) .orElse(sys.env.get("PYSPARK_DRIVER_PYTHON")) .orElse(sys.env.get("PYSPARK_PYTHON")) - .getOrElse("python") + .getOrElse("python3") // Format python file paths before adding them to the PYTHONPATH val formattedPythonFile = formatPath(pythonFile) diff --git a/core/src/main/scala/org/apache/spark/deploy/SparkHadoopUtil.scala b/core/src/main/scala/org/apache/spark/deploy/SparkHadoopUtil.scala index 1180501e8c738..6f799a542bc1e 100644 --- a/core/src/main/scala/org/apache/spark/deploy/SparkHadoopUtil.scala +++ b/core/src/main/scala/org/apache/spark/deploy/SparkHadoopUtil.scala @@ -462,6 +462,9 @@ private[spark] object SparkHadoopUtil { for ((key, value) <- conf.getAll if key.startsWith("spark.hadoop.")) { hadoopConf.set(key.substring("spark.hadoop.".length), value) } + if (conf.getOption("spark.hadoop.mapreduce.fileoutputcommitter.algorithm.version").isEmpty) { + hadoopConf.set("mapreduce.fileoutputcommitter.algorithm.version", "1") + } } private def appendSparkHiveConfigs(conf: SparkConf, hadoopConf: Configuration): Unit = { diff --git a/core/src/main/scala/org/apache/spark/deploy/SparkSubmit.scala b/core/src/main/scala/org/apache/spark/deploy/SparkSubmit.scala index 8363d570d7320..8bf7795b7bfe4 100644 --- a/core/src/main/scala/org/apache/spark/deploy/SparkSubmit.scala +++ b/core/src/main/scala/org/apache/spark/deploy/SparkSubmit.scala @@ -24,13 +24,13 @@ import java.security.PrivilegedExceptionAction import java.text.ParseException import java.util.{ServiceLoader, UUID} import java.util.jar.JarInputStream +import javax.ws.rs.core.UriBuilder import scala.annotation.tailrec import scala.collection.JavaConverters._ import scala.collection.mutable.ArrayBuffer import scala.util.{Properties, Try} -import org.apache.commons.io.FilenameUtils import org.apache.commons.lang3.StringUtils import org.apache.hadoop.conf.{Configuration => HadoopConfiguration} import org.apache.hadoop.fs.{FileSystem, Path} @@ -304,28 +304,29 @@ private[spark] class SparkSubmit extends Logging { // Resolve maven dependencies if there are any and add classpath to jars. Add them to py-files // too for packages that include Python code val resolvedMavenCoordinates = DependencyUtils.resolveMavenDependencies( - args.packagesExclusions, args.packages, args.repositories, args.ivyRepoPath, - args.ivySettingsPath) + packagesTransitive = true, args.packagesExclusions, args.packages, + args.repositories, args.ivyRepoPath, args.ivySettingsPath) - if (!StringUtils.isBlank(resolvedMavenCoordinates)) { + if (resolvedMavenCoordinates.nonEmpty) { // In K8s client mode, when in the driver, add resolved jars early as we might need // them at the submit time for artifact downloading. // For example we might use the dependencies for downloading - // files from a Hadoop Compatible fs eg. S3. In this case the user might pass: + // files from a Hadoop Compatible fs e.g. S3. In this case the user might pass: // --packages com.amazonaws:aws-java-sdk:1.7.4:org.apache.hadoop:hadoop-aws:2.7.6 if (isKubernetesClusterModeDriver) { val loader = getSubmitClassLoader(sparkConf) - for (jar <- resolvedMavenCoordinates.split(",")) { + for (jar <- resolvedMavenCoordinates) { addJarToClasspath(jar, loader) } } else if (isKubernetesCluster) { // We need this in K8s cluster mode so that we can upload local deps // via the k8s application, like in cluster mode driver - childClasspath ++= resolvedMavenCoordinates.split(",") + childClasspath ++= resolvedMavenCoordinates } else { - args.jars = mergeFileLists(args.jars, resolvedMavenCoordinates) + args.jars = mergeFileLists(args.jars, mergeFileLists(resolvedMavenCoordinates: _*)) if (args.isPython || isInternal(args.primaryResource)) { - args.pyFiles = mergeFileLists(args.pyFiles, resolvedMavenCoordinates) + args.pyFiles = mergeFileLists(args.pyFiles, + mergeFileLists(resolvedMavenCoordinates: _*)) } } } @@ -373,24 +374,53 @@ private[spark] class SparkSubmit extends Logging { var localPyFiles: String = null if (deployMode == CLIENT) { localPrimaryResource = Option(args.primaryResource).map { - downloadFile(_, targetDir, sparkConf, hadoopConf, secMgr) + downloadFile(_, targetDir, sparkConf, hadoopConf) }.orNull localJars = Option(args.jars).map { - downloadFileList(_, targetDir, sparkConf, hadoopConf, secMgr) + downloadFileList(_, targetDir, sparkConf, hadoopConf) }.orNull localPyFiles = Option(args.pyFiles).map { - downloadFileList(_, targetDir, sparkConf, hadoopConf, secMgr) + downloadFileList(_, targetDir, sparkConf, hadoopConf) }.orNull if (isKubernetesClusterModeDriver) { // Replace with the downloaded local jar path to avoid propagating hadoop compatible uris. // Executors will get the jars from the Spark file server. // Explicitly download the related files here - args.jars = renameResourcesToLocalFS(args.jars, localJars) - val localFiles = Option(args.files).map { - downloadFileList(_, targetDir, sparkConf, hadoopConf, secMgr) + args.jars = localJars + val filesLocalFiles = Option(args.files).map { + downloadFileList(_, targetDir, sparkConf, hadoopConf) }.orNull - args.files = renameResourcesToLocalFS(args.files, localFiles) + val archiveLocalFiles = Option(args.archives).map { uris => + val resolvedUris = Utils.stringToSeq(uris).map(Utils.resolveURI) + val localArchives = downloadFileList( + resolvedUris.map( + UriBuilder.fromUri(_).fragment(null).build().toString).mkString(","), + targetDir, sparkConf, hadoopConf) + + // SPARK-33748: this mimics the behaviour of Yarn cluster mode. If the driver is running + // in cluster mode, the archives should be available in the driver's current working + // directory too. + Utils.stringToSeq(localArchives).map(Utils.resolveURI).zip(resolvedUris).map { + case (localArchive, resolvedUri) => + val source = new File(localArchive.getPath) + val dest = new File( + ".", + if (resolvedUri.getFragment != null) resolvedUri.getFragment else source.getName) + logInfo( + s"Unpacking an archive $resolvedUri " + + s"from ${source.getAbsolutePath} to ${dest.getAbsolutePath}") + Utils.deleteRecursively(dest) + Utils.unpack(source, dest) + + // Keep the URIs of local files with the given fragments. + UriBuilder.fromUri( + localArchive).fragment(resolvedUri.getFragment).build().toString + }.mkString(",") + }.orNull + args.files = filesLocalFiles + args.archives = archiveLocalFiles + args.pyFiles = localPyFiles } } @@ -417,7 +447,7 @@ private[spark] class SparkSubmit extends Logging { if (file.exists()) { file.toURI.toString } else { - downloadFile(resource, targetDir, sparkConf, hadoopConf, secMgr) + downloadFile(resource, targetDir, sparkConf, hadoopConf) } case _ => uri.toString } @@ -606,6 +636,8 @@ private[spark] class SparkSubmit extends Logging { confKey = CORES_MAX.key), OptionAssigner(args.files, LOCAL | STANDALONE | MESOS | KUBERNETES, ALL_DEPLOY_MODES, confKey = FILES.key), + OptionAssigner(args.archives, LOCAL | STANDALONE | MESOS | KUBERNETES, ALL_DEPLOY_MODES, + confKey = ARCHIVES.key), OptionAssigner(args.jars, LOCAL, CLIENT, confKey = JARS.key), OptionAssigner(args.jars, STANDALONE | MESOS | KUBERNETES, ALL_DEPLOY_MODES, confKey = JARS.key), @@ -795,6 +827,7 @@ private[spark] class SparkSubmit extends Logging { val pathConfigs = Seq( JARS.key, FILES.key, + ARCHIVES.key, "spark.yarn.dist.files", "spark.yarn.dist.archives", "spark.yarn.dist.jars") @@ -823,21 +856,6 @@ private[spark] class SparkSubmit extends Logging { (childArgs.toSeq, childClasspath.toSeq, sparkConf, childMainClass) } - private def renameResourcesToLocalFS(resources: String, localResources: String): String = { - if (resources != null && localResources != null) { - val localResourcesSeq = Utils.stringToSeq(localResources) - Utils.stringToSeq(resources).map { resource => - val filenameRemote = FilenameUtils.getName(new URI(resource).getPath) - localResourcesSeq.find { localUri => - val filenameLocal = FilenameUtils.getName(new URI(localUri).getPath) - filenameRemote == filenameLocal - }.getOrElse(resource) - }.mkString(",") - } else { - resources - } - } - // [SPARK-20328]. HadoopRDD calls into a Hadoop library that fetches delegation tokens with // renewer set to the YARN ResourceManager. Since YARN isn't configured in Mesos or Kubernetes // mode, we must trick it into thinking we're YARN. @@ -1160,33 +1178,42 @@ private[spark] object SparkSubmitUtils { val br: IBiblioResolver = new IBiblioResolver br.setM2compatible(true) br.setUsepoms(true) + val defaultInternalRepo : Option[String] = sys.env.get("DEFAULT_ARTIFACT_REPOSITORY") + br.setRoot(defaultInternalRepo.getOrElse("https://repo1.maven.org/maven2/")) br.setName("central") cr.add(br) val sp: IBiblioResolver = new IBiblioResolver sp.setM2compatible(true) sp.setUsepoms(true) - sp.setRoot("https://dl.bintray.com/spark-packages/maven") + sp.setRoot(sys.env.getOrElse( + "DEFAULT_ARTIFACT_REPOSITORY", "https://dl.bintray.com/spark-packages/maven")) sp.setName("spark-packages") cr.add(sp) cr } /** - * Output a comma-delimited list of paths for the downloaded jars to be added to the classpath + * Output a list of paths for the downloaded jars to be added to the classpath * (will append to jars in SparkSubmit). * @param artifacts Sequence of dependencies that were resolved and retrieved - * @param cacheDirectory directory where jars are cached - * @return a comma-delimited list of paths for the dependencies + * @param cacheDirectory Directory where jars are cached + * @return List of paths for the dependencies */ def resolveDependencyPaths( artifacts: Array[AnyRef], - cacheDirectory: File): String = { + cacheDirectory: File): Seq[String] = { artifacts.map { artifactInfo => val artifact = artifactInfo.asInstanceOf[Artifact].getModuleRevisionId + val extraAttrs = artifactInfo.asInstanceOf[Artifact].getExtraAttributes + val classifier = if (extraAttrs.containsKey("classifier")) { + "-" + extraAttrs.get("classifier") + } else { + "" + } cacheDirectory.getAbsolutePath + File.separator + - s"${artifact.getOrganisation}_${artifact.getName}-${artifact.getRevision}.jar" - }.mkString(",") + s"${artifact.getOrganisation}_${artifact.getName}-${artifact.getRevision}$classifier.jar" + } } /** Adds the given maven coordinates to Ivy's module descriptor. */ @@ -1334,17 +1361,19 @@ private[spark] object SparkSubmitUtils { * Resolves any dependencies that were supplied through maven coordinates * @param coordinates Comma-delimited string of maven coordinates * @param ivySettings An IvySettings containing resolvers to use + * @param transitive Whether resolving transitive dependencies, default is true * @param exclusions Exclusions to apply when resolving transitive dependencies - * @return The comma-delimited path to the jars of the given maven artifacts including their + * @return Seq of path to the jars of the given maven artifacts including their * transitive dependencies */ def resolveMavenCoordinates( coordinates: String, ivySettings: IvySettings, + transitive: Boolean, exclusions: Seq[String] = Nil, - isTest: Boolean = false): String = { + isTest: Boolean = false): Seq[String] = { if (coordinates == null || coordinates.trim.isEmpty) { - "" + Nil } else { val sysOut = System.out // Default configuration name for ivy @@ -1370,7 +1399,7 @@ private[spark] object SparkSubmitUtils { val ivy = Ivy.newInstance(ivySettings) // Set resolve options to download transitive dependencies as well val resolveOptions = new ResolveOptions - resolveOptions.setTransitive(true) + resolveOptions.setTransitive(transitive) val retrieveOptions = new RetrieveOptions // Turn downloading and logging off for testing if (isTest) { diff --git a/core/src/main/scala/org/apache/spark/deploy/SparkSubmitArguments.scala b/core/src/main/scala/org/apache/spark/deploy/SparkSubmitArguments.scala index 3090a3b10a97c..9da1a73bba692 100644 --- a/core/src/main/scala/org/apache/spark/deploy/SparkSubmitArguments.scala +++ b/core/src/main/scala/org/apache/spark/deploy/SparkSubmitArguments.scala @@ -183,6 +183,7 @@ private[deploy] class SparkSubmitArguments(args: Seq[String], env: Map[String, S name = Option(name).orElse(sparkProperties.get("spark.app.name")).orNull jars = Option(jars).orElse(sparkProperties.get(config.JARS.key)).orNull files = Option(files).orElse(sparkProperties.get(config.FILES.key)).orNull + archives = Option(archives).orElse(sparkProperties.get(config.ARCHIVES.key)).orNull pyFiles = Option(pyFiles).orElse(sparkProperties.get(config.SUBMIT_PYTHON_FILES.key)).orNull ivyRepoPath = sparkProperties.get("spark.jars.ivy").orNull ivySettingsPath = sparkProperties.get("spark.jars.ivySettings") @@ -512,6 +513,8 @@ private[deploy] class SparkSubmitArguments(args: Seq[String], env: Map[String, S | --files FILES Comma-separated list of files to be placed in the working | directory of each executor. File paths of these files | in executors can be accessed via SparkFiles.get(fileName). + | --archives ARCHIVES Comma-separated list of archives to be extracted into the + | working directory of each executor. | | --conf, -c PROP=VALUE Arbitrary Spark configuration property. | --properties-file FILE Path to a file from which to load extra properties. If not @@ -562,8 +565,6 @@ private[deploy] class SparkSubmitArguments(args: Seq[String], env: Map[String, S | | Spark on YARN only: | --queue QUEUE_NAME The YARN queue to submit to (Default: "default"). - | --archives ARCHIVES Comma separated list of archives to be extracted into the - | working directory of each executor. """.stripMargin ) diff --git a/core/src/main/scala/org/apache/spark/deploy/history/ApplicationHistoryProvider.scala b/core/src/main/scala/org/apache/spark/deploy/history/ApplicationHistoryProvider.scala index 472b52957ed7f..f3f7db6bb0aba 100644 --- a/core/src/main/scala/org/apache/spark/deploy/history/ApplicationHistoryProvider.scala +++ b/core/src/main/scala/org/apache/spark/deploy/history/ApplicationHistoryProvider.scala @@ -150,4 +150,11 @@ private[history] abstract class ApplicationHistoryProvider { */ def onUIDetached(appId: String, attemptId: Option[String], ui: SparkUI): Unit = { } + /** + * Returns true if the given user has permission to view the UI of the given attempt. + * + * @throws NoSuchElementException if the given attempt doesn't exist + */ + def checkUIViewPermissions(appId: String, attemptId: Option[String], user: String): Boolean + } diff --git a/core/src/main/scala/org/apache/spark/deploy/history/BasicEventFilterBuilder.scala b/core/src/main/scala/org/apache/spark/deploy/history/BasicEventFilterBuilder.scala index b18bf2665d6ce..57b05ff245258 100644 --- a/core/src/main/scala/org/apache/spark/deploy/history/BasicEventFilterBuilder.scala +++ b/core/src/main/scala/org/apache/spark/deploy/history/BasicEventFilterBuilder.scala @@ -19,7 +19,6 @@ package org.apache.spark.deploy.history import scala.collection.mutable -import org.apache.spark.SparkContext import org.apache.spark.deploy.history.EventFilter.FilterStatistics import org.apache.spark.internal.Logging import org.apache.spark.scheduler._ @@ -160,6 +159,8 @@ private[spark] class BasicEventFilter( case e: SparkListenerExecutorRemoved => liveExecutors.contains(e.executorId) case e: SparkListenerExecutorBlacklisted => liveExecutors.contains(e.executorId) case e: SparkListenerExecutorUnblacklisted => liveExecutors.contains(e.executorId) + case e: SparkListenerExecutorExcluded => liveExecutors.contains(e.executorId) + case e: SparkListenerExecutorUnexcluded => liveExecutors.contains(e.executorId) case e: SparkListenerStageExecutorMetrics => liveExecutors.contains(e.execId) case e: SparkListenerBlockManagerAdded => acceptBlockManagerEvent(e.blockManagerId) case e: SparkListenerBlockManagerRemoved => acceptBlockManagerEvent(e.blockManagerId) diff --git a/core/src/main/scala/org/apache/spark/deploy/history/EventLogFileReaders.scala b/core/src/main/scala/org/apache/spark/deploy/history/EventLogFileReaders.scala index 9f63a6441a838..b4771c80a175f 100644 --- a/core/src/main/scala/org/apache/spark/deploy/history/EventLogFileReaders.scala +++ b/core/src/main/scala/org/apache/spark/deploy/history/EventLogFileReaders.scala @@ -116,7 +116,7 @@ object EventLogFileReader { def apply(fs: FileSystem, status: FileStatus): Option[EventLogFileReader] = { if (isSingleEventLog(status)) { - Some(new SingleFileEventLogFileReader(fs, status.getPath)) + Some(new SingleFileEventLogFileReader(fs, status.getPath, Option(status))) } else if (isRollingEventLogs(status)) { Some(new RollingEventLogFilesFileReader(fs, status.getPath)) } else { @@ -164,10 +164,13 @@ object EventLogFileReader { * FileNotFoundException could occur if the log file is renamed before getting the * status of log file. */ -class SingleFileEventLogFileReader( +private[history] class SingleFileEventLogFileReader( fs: FileSystem, - path: Path) extends EventLogFileReader(fs, path) { - private lazy val status = fileSystem.getFileStatus(rootPath) + path: Path, + maybeStatus: Option[FileStatus]) extends EventLogFileReader(fs, path) { + private lazy val status = maybeStatus.getOrElse(fileSystem.getFileStatus(rootPath)) + + def this(fs: FileSystem, path: Path) = this(fs, path, None) override def lastIndex: Option[Long] = None @@ -203,7 +206,7 @@ class SingleFileEventLogFileReader( * This reader lists the files only once; if caller would like to play with updated list, * it needs to create another reader instance. */ -class RollingEventLogFilesFileReader( +private[history] class RollingEventLogFilesFileReader( fs: FileSystem, path: Path) extends EventLogFileReader(fs, path) { import RollingEventLogFilesWriter._ diff --git a/core/src/main/scala/org/apache/spark/deploy/history/FsHistoryProvider.scala b/core/src/main/scala/org/apache/spark/deploy/history/FsHistoryProvider.scala index fe8be0b3b20d3..d35d8606eb4b4 100644 --- a/core/src/main/scala/org/apache/spark/deploy/history/FsHistoryProvider.scala +++ b/core/src/main/scala/org/apache/spark/deploy/history/FsHistoryProvider.scala @@ -21,7 +21,7 @@ import java.io.{File, FileNotFoundException, IOException} import java.lang.{Long => JLong} import java.nio.file.Files import java.util.{Date, NoSuchElementException, ServiceLoader} -import java.util.concurrent.{ConcurrentHashMap, ExecutorService, Future, TimeUnit} +import java.util.concurrent.{ConcurrentHashMap, ExecutorService, TimeUnit} import java.util.zip.ZipOutputStream import scala.collection.JavaConverters._ @@ -359,15 +359,7 @@ private[history] class FsHistoryProvider(conf: SparkConf, clock: Clock) } val conf = this.conf.clone() - val secManager = new SecurityManager(conf) - - secManager.setAcls(historyUiAclsEnable) - // make sure to set admin acls before view acls so they are properly picked up - secManager.setAdminAcls(historyUiAdminAcls ++ stringToSeq(attempt.adminAcls.getOrElse(""))) - secManager.setViewAcls(attempt.info.sparkUser, stringToSeq(attempt.viewAcls.getOrElse(""))) - secManager.setAdminAclsGroups(historyUiAdminAclsGroups ++ - stringToSeq(attempt.adminAclsGroups.getOrElse(""))) - secManager.setViewAclsGroups(stringToSeq(attempt.viewAclsGroups.getOrElse(""))) + val secManager = createSecurityManager(conf, attempt) val kvstore = try { diskManager match { @@ -461,6 +453,17 @@ private[history] class FsHistoryProvider(conf: SparkConf, clock: Clock) } } + override def checkUIViewPermissions(appId: String, attemptId: Option[String], + user: String): Boolean = { + val app = load(appId) + val attempt = app.attempts.find(_.info.attemptId == attemptId).orNull + if (attempt == null) { + throw new NoSuchElementException() + } + val secManager = createSecurityManager(this.conf.clone(), attempt) + secManager.checkUIViewPermissions(user) + } + /** * Builds the application list based on the current contents of the log directory. * Tries to reuse as much of the data already in memory as possible, by not reading @@ -471,9 +474,21 @@ private[history] class FsHistoryProvider(conf: SparkConf, clock: Clock) val newLastScanTime = clock.getTimeMillis() logDebug(s"Scanning $logDir with lastScanTime==$lastScanTime") + // Mark entries that are processing as not stale. Such entries do not have a chance to be + // updated with the new 'lastProcessed' time and thus any entity that completes processing + // right after this check and before the check for stale entities will be identified as stale + // and will be deleted from the UI until the next 'checkForLogs' run. + val notStale = mutable.HashSet[String]() val updated = Option(fs.listStatus(new Path(logDir))).map(_.toSeq).getOrElse(Nil) .filter { entry => isAccessible(entry.getPath) } - .filter { entry => !isProcessing(entry.getPath) } + .filter { entry => + if (isProcessing(entry.getPath)) { + notStale.add(entry.getPath.toString()) + false + } else { + true + } + } .flatMap { entry => EventLogFileReader(fs, entry) } .filter { reader => try { @@ -538,6 +553,9 @@ private[history] class FsHistoryProvider(conf: SparkConf, clock: Clock) reader.fileSizeForLastIndex > 0 } catch { case _: FileNotFoundException => false + case NonFatal(e) => + logWarning(s"Error while reading new log ${reader.rootPath}", e) + false } case NonFatal(e) => @@ -570,12 +588,14 @@ private[history] class FsHistoryProvider(conf: SparkConf, clock: Clock) .last(newLastScanTime - 1) .asScala .toList - stale.filterNot(isProcessing).foreach { log => - log.appId.foreach { appId => - cleanAppData(appId, log.attemptId, log.logPath) - listing.delete(classOf[LogInfo], log.logPath) + stale.filterNot(isProcessing) + .filterNot(info => notStale.contains(info.logPath)) + .foreach { log => + log.appId.foreach { appId => + cleanAppData(appId, log.attemptId, log.logPath) + listing.delete(classOf[LogInfo], log.logPath) + } } - } lastScanTime.set(newLastScanTime) } catch { @@ -716,7 +736,7 @@ private[history] class FsHistoryProvider(conf: SparkConf, clock: Clock) /** * Replay the given log file, saving the application in the listing db. - * Visable for testing + * Visible for testing */ private[history] def doMergeApplicationListing( reader: EventLogFileReader, @@ -1373,6 +1393,19 @@ private[history] class FsHistoryProvider(conf: SparkConf, clock: Clock) endProcessing(rootPath) } } + + private def createSecurityManager(conf: SparkConf, + attempt: AttemptInfoWrapper): SecurityManager = { + val secManager = new SecurityManager(conf) + secManager.setAcls(historyUiAclsEnable) + // make sure to set admin acls before view acls so they are properly picked up + secManager.setAdminAcls(historyUiAdminAcls ++ stringToSeq(attempt.adminAcls.getOrElse(""))) + secManager.setViewAcls(attempt.info.sparkUser, stringToSeq(attempt.viewAcls.getOrElse(""))) + secManager.setAdminAclsGroups(historyUiAdminAclsGroups ++ + stringToSeq(attempt.adminAclsGroups.getOrElse(""))) + secManager.setViewAclsGroups(stringToSeq(attempt.viewAclsGroups.getOrElse(""))) + secManager + } } private[history] object FsHistoryProvider { @@ -1527,14 +1560,9 @@ private[history] class AppListingListener( private class MutableApplicationInfo { var id: String = null var name: String = null - var coresGranted: Option[Int] = None - var maxCores: Option[Int] = None - var coresPerExecutor: Option[Int] = None - var memoryPerExecutorMB: Option[Int] = None def toView(): ApplicationInfoWrapper = { - val apiInfo = ApplicationInfo(id, name, coresGranted, maxCores, coresPerExecutor, - memoryPerExecutorMB, Nil) + val apiInfo = ApplicationInfo(id, name, None, None, None, None, Nil) new ApplicationInfoWrapper(apiInfo, List(attempt.toView())) } diff --git a/core/src/main/scala/org/apache/spark/deploy/history/HistoryAppStatusStore.scala b/core/src/main/scala/org/apache/spark/deploy/history/HistoryAppStatusStore.scala index 7973652b3e254..ac0f102d81a6a 100644 --- a/core/src/main/scala/org/apache/spark/deploy/history/HistoryAppStatusStore.scala +++ b/core/src/main/scala/org/apache/spark/deploy/history/HistoryAppStatusStore.scala @@ -73,7 +73,7 @@ private[spark] class HistoryAppStatusStore( source.totalShuffleWrite, source.isBlacklisted, source.maxMemory, source.addTime, source.removeTime, source.removeReason, newExecutorLogs, source.memoryMetrics, source.blacklistedInStages, source.peakMemoryMetrics, source.attributes, source.resources, - source.resourceProfileId) + source.resourceProfileId, source.isExcluded, source.excludedInStages) } } diff --git a/core/src/main/scala/org/apache/spark/deploy/history/HistoryServer.scala b/core/src/main/scala/org/apache/spark/deploy/history/HistoryServer.scala index ca21a8056d1b5..bb13f34818a62 100644 --- a/core/src/main/scala/org/apache/spark/deploy/history/HistoryServer.scala +++ b/core/src/main/scala/org/apache/spark/deploy/history/HistoryServer.scala @@ -128,6 +128,11 @@ class HistoryServer( appCache.withSparkUI(appId, attemptId)(fn) } + override def checkUIViewPermissions(appId: String, attemptId: Option[String], + user: String): Boolean = { + provider.checkUIViewPermissions(appId, attemptId, user) + } + initialize() /** diff --git a/core/src/main/scala/org/apache/spark/deploy/history/HybridStore.scala b/core/src/main/scala/org/apache/spark/deploy/history/HybridStore.scala index 58714f16e8417..4eb5c15d4ed18 100644 --- a/core/src/main/scala/org/apache/spark/deploy/history/HybridStore.scala +++ b/core/src/main/scala/org/apache/spark/deploy/history/HybridStore.scala @@ -17,7 +17,6 @@ package org.apache.spark.deploy.history -import java.io.IOException import java.util.Collection import java.util.concurrent.ConcurrentHashMap import java.util.concurrent.atomic.AtomicBoolean @@ -53,7 +52,7 @@ private[history] class HybridStore extends KVStore { // A background thread that dumps data from inMemoryStore to levelDB private var backgroundThread: Thread = null - // A hash map that stores all classes that had been writen to inMemoryStore + // A hash map that stores all classes that had been written to inMemoryStore // Visible for testing private[history] val klassMap = new ConcurrentHashMap[Class[_], Boolean] diff --git a/core/src/main/scala/org/apache/spark/deploy/master/Master.scala b/core/src/main/scala/org/apache/spark/deploy/master/Master.scala index 48516cdf83291..9f1b36ad1c8c1 100644 --- a/core/src/main/scala/org/apache/spark/deploy/master/Master.scala +++ b/core/src/main/scala/org/apache/spark/deploy/master/Master.scala @@ -22,9 +22,7 @@ import java.util.{Date, Locale} import java.util.concurrent.{ScheduledFuture, TimeUnit} import scala.collection.mutable.{ArrayBuffer, HashMap, HashSet} -import scala.collection.mutable import scala.util.Random -import scala.util.control.NonFatal import org.apache.spark.{SecurityManager, SparkConf, SparkException} import org.apache.spark.deploy.{ApplicationDescription, DriverDescription, ExecutorState, SparkHadoopUtil} @@ -145,9 +143,15 @@ private[deploy] class Master( logInfo(s"Running Spark version ${org.apache.spark.SPARK_VERSION}") webUi = new MasterWebUI(this, webUiPort) webUi.bind() - masterWebUiUrl = s"${webUi.scheme}$masterPublicAddress:${webUi.boundPort}" + masterWebUiUrl = webUi.webUrl if (reverseProxy) { - masterWebUiUrl = conf.get(UI_REVERSE_PROXY_URL).orElse(Some(masterWebUiUrl)).get + val uiReverseProxyUrl = conf.get(UI_REVERSE_PROXY_URL).map(_.stripSuffix("/")) + if (uiReverseProxyUrl.nonEmpty) { + System.setProperty("spark.ui.proxyBase", uiReverseProxyUrl.get) + // If the master URL has a path component, it must end with a slash. + // Otherwise the browser generates incorrect relative links + masterWebUiUrl = uiReverseProxyUrl.get + "/" + } webUi.addProxy() logInfo(s"Spark Master is acting as a reverse proxy. Master, Workers and " + s"Applications UIs are available at $masterWebUiUrl") @@ -245,8 +249,7 @@ private[deploy] class Master( logError("Leadership has been revoked -- master shutting down.") System.exit(0) - case WorkerDecommission(id, workerRef) => - logInfo("Recording worker %s decommissioning".format(id)) + case WorkerDecommissioning(id, workerRef) => if (state == RecoveryState.STANDBY) { workerRef.send(MasterInStandby) } else { @@ -254,6 +257,19 @@ private[deploy] class Master( idToWorker.get(id).foreach(decommissionWorker) } + case DecommissionWorkers(ids) => + // The caller has already checked the state when handling DecommissionWorkersOnHosts, + // so it should not be the STANDBY + assert(state != RecoveryState.STANDBY) + ids.foreach ( id => + // We use foreach since get gives us an option and we can skip the failures. + idToWorker.get(id).foreach { w => + decommissionWorker(w) + // Also send a message to the worker node to notify. + w.endpoint.send(DecommissionWorker) + } + ) + case RegisterWorker( id, workerHost, workerPort, workerRef, cores, memory, workerWebUiUrl, masterAddress, resources) => @@ -891,10 +907,7 @@ private[deploy] class Master( logInfo(s"Decommissioning the workers with host:ports ${workersToRemoveHostPorts}") // The workers are removed async to avoid blocking the receive loop for the entire batch - workersToRemove.foreach(wi => { - logInfo(s"Sending the worker decommission to ${wi.id} and ${wi.endpoint}") - self.send(WorkerDecommission(wi.id, wi.endpoint)) - }) + self.send(DecommissionWorkers(workersToRemove.map(_.id).toSeq)) // Return the count of workers actually removed workersToRemove.size diff --git a/core/src/main/scala/org/apache/spark/deploy/master/ui/MasterWebUI.scala b/core/src/main/scala/org/apache/spark/deploy/master/ui/MasterWebUI.scala index 035f9d379471c..af94bd6d9e0f2 100644 --- a/core/src/main/scala/org/apache/spark/deploy/master/ui/MasterWebUI.scala +++ b/core/src/main/scala/org/apache/spark/deploy/master/ui/MasterWebUI.scala @@ -18,7 +18,6 @@ package org.apache.spark.deploy.master.ui import java.net.{InetAddress, NetworkInterface, SocketException} -import java.util.Locale import javax.servlet.http.{HttpServlet, HttpServletRequest, HttpServletResponse} import org.apache.spark.deploy.DeployMessages.{DecommissionWorkersOnHosts, MasterStateResponse, RequestMasterState} diff --git a/core/src/main/scala/org/apache/spark/deploy/security/HadoopDelegationTokenManager.scala b/core/src/main/scala/org/apache/spark/deploy/security/HadoopDelegationTokenManager.scala index 3168c763df4df..6ce195b6c7a34 100644 --- a/core/src/main/scala/org/apache/spark/deploy/security/HadoopDelegationTokenManager.scala +++ b/core/src/main/scala/org/apache/spark/deploy/security/HadoopDelegationTokenManager.scala @@ -178,7 +178,7 @@ private[spark] class HadoopDelegationTokenManager( private def scheduleRenewal(delay: Long): Unit = { val _delay = math.max(0, delay) - logInfo(s"Scheduling renewal in ${UIUtils.formatDuration(delay)}.") + logInfo(s"Scheduling renewal in ${UIUtils.formatDuration(_delay)}.") val renewalTask = new Runnable() { override def run(): Unit = { @@ -230,6 +230,8 @@ private[spark] class HadoopDelegationTokenManager( val now = System.currentTimeMillis val ratio = sparkConf.get(CREDENTIALS_RENEWAL_INTERVAL_RATIO) val delay = (ratio * (nextRenewal - now)).toLong + logInfo(s"Calculated delay on renewal is $delay, based on next renewal $nextRenewal " + + s"and the ratio $ratio, and current time $now") scheduleRenewal(delay) creds } diff --git a/core/src/main/scala/org/apache/spark/deploy/security/HadoopFSDelegationTokenProvider.scala b/core/src/main/scala/org/apache/spark/deploy/security/HadoopFSDelegationTokenProvider.scala index a46864e2d3c9c..0dc6aa1d7ef30 100644 --- a/core/src/main/scala/org/apache/spark/deploy/security/HadoopFSDelegationTokenProvider.scala +++ b/core/src/main/scala/org/apache/spark/deploy/security/HadoopFSDelegationTokenProvider.scala @@ -63,7 +63,8 @@ private[deploy] class HadoopFSDelegationTokenProvider val identifier = token .decodeIdentifier() .asInstanceOf[AbstractDelegationTokenIdentifier] - identifier.getIssueDate + interval + val tokenKind = token.getKind.toString + getIssueDate(tokenKind, identifier) + interval } if (nextRenewalDates.isEmpty) None else Some(nextRenewalDates.min) } @@ -126,13 +127,33 @@ private[deploy] class HadoopFSDelegationTokenProvider Try { val newExpiration = token.renew(hadoopConf) val identifier = token.decodeIdentifier().asInstanceOf[AbstractDelegationTokenIdentifier] - val interval = newExpiration - identifier.getIssueDate - logInfo(s"Renewal interval is $interval for token ${token.getKind.toString}") + val tokenKind = token.getKind.toString + val interval = newExpiration - getIssueDate(tokenKind, identifier) + logInfo(s"Renewal interval is $interval for token $tokenKind") interval }.toOption } if (renewIntervals.isEmpty) None else Some(renewIntervals.min) } + + private def getIssueDate(kind: String, identifier: AbstractDelegationTokenIdentifier): Long = { + val now = System.currentTimeMillis() + val issueDate = identifier.getIssueDate + if (issueDate > now) { + logWarning(s"Token $kind has set up issue date later than current time. (provided: " + + s"$issueDate / current timestamp: $now) Please make sure clocks are in sync between " + + "machines. If the issue is not a clock mismatch, consult token implementor to check " + + "whether issue date is valid.") + issueDate + } else if (issueDate > 0L) { + issueDate + } else { + logWarning(s"Token $kind has not set up issue date properly. (provided: $issueDate) " + + s"Using current timestamp ($now) as issue date instead. Consult token implementor to fix " + + "the behavior.") + now + } + } } private[deploy] object HadoopFSDelegationTokenProvider { diff --git a/core/src/main/scala/org/apache/spark/deploy/worker/DriverRunner.scala b/core/src/main/scala/org/apache/spark/deploy/worker/DriverRunner.scala index 4f9c497fc3d76..776d9164cdbbe 100644 --- a/core/src/main/scala/org/apache/spark/deploy/worker/DriverRunner.scala +++ b/core/src/main/scala/org/apache/spark/deploy/worker/DriverRunner.scala @@ -160,7 +160,6 @@ private[deploy] class DriverRunner( driverDesc.jarUrl, driverDir, conf, - securityManager, SparkHadoopUtil.get.newConfiguration(conf), System.currentTimeMillis(), useCache = false) diff --git a/core/src/main/scala/org/apache/spark/deploy/worker/DriverWrapper.scala b/core/src/main/scala/org/apache/spark/deploy/worker/DriverWrapper.scala index 45ffdde58d6c3..61fb92999cfe3 100644 --- a/core/src/main/scala/org/apache/spark/deploy/worker/DriverWrapper.scala +++ b/core/src/main/scala/org/apache/spark/deploy/worker/DriverWrapper.scala @@ -19,10 +19,8 @@ package org.apache.spark.deploy.worker import java.io.File -import org.apache.commons.lang3.StringUtils - import org.apache.spark.{SecurityManager, SparkConf} -import org.apache.spark.deploy.{DependencyUtils, SparkHadoopUtil} +import org.apache.spark.deploy.SparkHadoopUtil import org.apache.spark.internal.{config, Logging} import org.apache.spark.rpc.RpcEnv import org.apache.spark.util._ @@ -79,27 +77,21 @@ object DriverWrapper extends Logging { val secMgr = new SecurityManager(sparkConf) val hadoopConf = SparkHadoopUtil.newConfiguration(sparkConf) - val Seq(packagesExclusions, packages, repositories, ivyRepoPath, ivySettingsPath) = - Seq( - "spark.jars.excludes", - "spark.jars.packages", - "spark.jars.repositories", - "spark.jars.ivy", - "spark.jars.ivySettings" - ).map(sys.props.get(_).orNull) + val ivyProperties = DependencyUtils.getIvyProperties() - val resolvedMavenCoordinates = DependencyUtils.resolveMavenDependencies(packagesExclusions, - packages, repositories, ivyRepoPath, Option(ivySettingsPath)) + val resolvedMavenCoordinates = DependencyUtils.resolveMavenDependencies(true, + ivyProperties.packagesExclusions, ivyProperties.packages, ivyProperties.repositories, + ivyProperties.ivyRepoPath, Option(ivyProperties.ivySettingsPath)) val jars = { val jarsProp = sys.props.get(config.JARS.key).orNull - if (!StringUtils.isBlank(resolvedMavenCoordinates)) { - DependencyUtils.mergeFileLists(jarsProp, resolvedMavenCoordinates) + if (resolvedMavenCoordinates.nonEmpty) { + DependencyUtils.mergeFileLists(jarsProp, + DependencyUtils.mergeFileLists(resolvedMavenCoordinates: _*)) } else { jarsProp } } - val localJars = DependencyUtils.resolveAndDownloadJars(jars, userJar, sparkConf, hadoopConf, - secMgr) + val localJars = DependencyUtils.resolveAndDownloadJars(jars, userJar, sparkConf, hadoopConf) DependencyUtils.addJarsToClassPath(localJars, loader) } } diff --git a/core/src/main/scala/org/apache/spark/deploy/worker/ExecutorRunner.scala b/core/src/main/scala/org/apache/spark/deploy/worker/ExecutorRunner.scala index e4fcae13a2f89..2e26ccf671d88 100644 --- a/core/src/main/scala/org/apache/spark/deploy/worker/ExecutorRunner.scala +++ b/core/src/main/scala/org/apache/spark/deploy/worker/ExecutorRunner.scala @@ -171,7 +171,8 @@ private[deploy] class ExecutorRunner( // Add webUI log urls val baseUrl = if (conf.get(UI_REVERSE_PROXY)) { - s"/proxy/$workerId/logPage/?appId=$appId&executorId=$execId&logType=" + conf.get(UI_REVERSE_PROXY_URL.key, "").stripSuffix("/") + + s"/proxy/$workerId/logPage/?appId=$appId&executorId=$execId&logType=" } else { s"$webUiScheme$publicAddress:$webUiPort/logPage/?appId=$appId&executorId=$execId&logType=" } diff --git a/core/src/main/scala/org/apache/spark/deploy/worker/Worker.scala b/core/src/main/scala/org/apache/spark/deploy/worker/Worker.scala index 7649bc37c30b6..a3c73751a2136 100755 --- a/core/src/main/scala/org/apache/spark/deploy/worker/Worker.scala +++ b/core/src/main/scala/org/apache/spark/deploy/worker/Worker.scala @@ -66,13 +66,17 @@ private[deploy] class Worker( Utils.checkHost(host) assert (port > 0) - // If worker decommissioning is enabled register a handler on PWR to shutdown. + // If worker decommissioning is enabled register a handler on the configured signal to shutdown. if (conf.get(config.DECOMMISSION_ENABLED)) { - logInfo("Registering SIGPWR handler to trigger decommissioning.") - SignalUtils.register("PWR", "Failed to register SIGPWR handler - " + - "disabling worker decommission feature.")(decommissionSelf) + val signal = conf.get(config.Worker.WORKER_DECOMMISSION_SIGNAL) + logInfo(s"Registering SIG$signal handler to trigger decommissioning.") + SignalUtils.register(signal, s"Failed to register SIG$signal handler - " + + "disabling worker decommission feature.") { + self.send(WorkerDecommissionSigReceived) + true + } } else { - logInfo("Worker decommissioning not enabled, SIGPWR will result in exiting.") + logInfo("Worker decommissioning not enabled.") } // A scheduled executor used to send messages at the specified time. @@ -137,7 +141,8 @@ private[deploy] class Worker( private var registered = false private var connected = false private var decommissioned = false - private val workerId = generateWorkerId() + // expose for test + private[spark] val workerId = generateWorkerId() private val sparkHome = if (sys.props.contains(IS_TESTING.key)) { assert(sys.props.contains("spark.test.home"), "spark.test.home is not set!") @@ -272,7 +277,14 @@ private[deploy] class Worker( master = Some(masterRef) connected = true if (reverseProxy) { - logInfo(s"WorkerWebUI is available at $activeMasterWebUiUrl/proxy/$workerId") + logInfo("WorkerWebUI is available at %s/proxy/%s".format( + activeMasterWebUiUrl.stripSuffix("/"), workerId)) + // if reverseProxyUrl is not set, then we continue to generate relative URLs + // starting with "/" throughout the UI and do not use activeMasterWebUiUrl + val proxyUrl = conf.get(UI_REVERSE_PROXY_URL.key, "").stripSuffix("/") + // In the method `UIUtils.makeHref`, the URL segment "/proxy/$worker_id" will be appended + // after `proxyUrl`, so no need to set the worker ID in the `spark.ui.proxyBase` here. + System.setProperty("spark.ui.proxyBase", proxyUrl) } // Cancel any outstanding re-registration attempts because we found a new master cancelLastRegistrationRetry() @@ -668,8 +680,14 @@ private[deploy] class Worker( finishedApps += id maybeCleanupApplication(id) - case WorkerDecommission(_, _) => + case DecommissionWorker => + decommissionSelf() + + case WorkerDecommissionSigReceived => decommissionSelf() + // Tell the Master that we are starting decommissioning + // so it stops trying to launch executor/driver on us + sendToMaster(WorkerDecommissioning(workerId, self)) } override def receiveAndReply(context: RpcCallContext): PartialFunction[Any, Unit] = { @@ -768,16 +786,15 @@ private[deploy] class Worker( } } - private[deploy] def decommissionSelf(): Boolean = { - if (conf.get(config.DECOMMISSION_ENABLED)) { - logDebug("Decommissioning self") + private[deploy] def decommissionSelf(): Unit = { + if (conf.get(config.DECOMMISSION_ENABLED) && !decommissioned) { decommissioned = true - sendToMaster(WorkerDecommission(workerId, self)) + logInfo(s"Decommission worker $workerId.") + } else if (decommissioned) { + logWarning(s"Worker $workerId already started decommissioning.") } else { - logWarning("Asked to decommission self, but decommissioning not enabled") + logWarning(s"Receive decommission request, but decommission feature is disabled.") } - // Return true since can be called as a signal handler - true } private[worker] def handleDriverStateChanged(driverStateChanged: DriverStateChanged): Unit = { diff --git a/core/src/main/scala/org/apache/spark/executor/CoarseGrainedExecutorBackend.scala b/core/src/main/scala/org/apache/spark/executor/CoarseGrainedExecutorBackend.scala index 48045bafe6e3f..e1d3009598b8c 100644 --- a/core/src/main/scala/org/apache/spark/executor/CoarseGrainedExecutorBackend.scala +++ b/core/src/main/scala/org/apache/spark/executor/CoarseGrainedExecutorBackend.scala @@ -17,7 +17,6 @@ package org.apache.spark.executor -import java.io.File import java.net.URL import java.nio.ByteBuffer import java.util.Locale @@ -40,7 +39,7 @@ import org.apache.spark.resource.ResourceProfile import org.apache.spark.resource.ResourceProfile._ import org.apache.spark.resource.ResourceUtils._ import org.apache.spark.rpc._ -import org.apache.spark.scheduler.{ExecutorDecommissionInfo, ExecutorLossReason, TaskDescription} +import org.apache.spark.scheduler.{ExecutorLossReason, TaskDescription} import org.apache.spark.scheduler.cluster.CoarseGrainedClusterMessages._ import org.apache.spark.serializer.SerializerInstance import org.apache.spark.util.{ChildFirstURLClassLoader, MutableURLClassLoader, SignalUtils, ThreadUtils, Utils} @@ -79,12 +78,15 @@ private[spark] class CoarseGrainedExecutorBackend( */ private[executor] val taskResources = new mutable.HashMap[Long, Map[String, ResourceInformation]] - @volatile private var decommissioned = false + private var decommissioned = false override def onStart(): Unit = { - logInfo("Registering PWR handler.") - SignalUtils.register("PWR", "Failed to register SIGPWR handler - " + - "disabling decommission feature.")(decommissionSelf) + if (env.conf.get(DECOMMISSION_ENABLED)) { + val signal = env.conf.get(EXECUTOR_DECOMMISSION_SIGNAL) + logInfo(s"Registering SIG$signal handler to trigger decommissioning.") + SignalUtils.register(signal, s"Failed to register SIG$signal handler - disabling" + + s" executor decommission feature.") (self.askSync[Boolean](ExecutorDecommissionSigReceived)) + } logInfo("Connecting to driver: " + driverUrl) try { @@ -166,17 +168,6 @@ private[spark] class CoarseGrainedExecutorBackend( if (executor == null) { exitExecutor(1, "Received LaunchTask command but executor was null") } else { - if (decommissioned) { - val msg = "Asked to launch a task while decommissioned." - logError(msg) - driver match { - case Some(endpoint) => - logInfo("Sending DecommissionExecutor to driver.") - endpoint.send(DecommissionExecutor(executorId, ExecutorDecommissionInfo(msg))) - case _ => - logError("No registered driver to send Decommission to.") - } - } val taskDesc = TaskDescription.decode(data.value) logInfo("Got assigned task " + taskDesc.taskId) taskResources(taskDesc.taskId) = taskDesc.resources @@ -213,11 +204,31 @@ private[spark] class CoarseGrainedExecutorBackend( logInfo(s"Received tokens of ${tokenBytes.length} bytes") SparkHadoopUtil.get.addDelegationTokens(tokenBytes, env.conf) - case DecommissionSelf => - logInfo("Received decommission self") + case DecommissionExecutor => decommissionSelf() } + override def receiveAndReply(context: RpcCallContext): PartialFunction[Any, Unit] = { + case ExecutorDecommissionSigReceived => + var driverNotified = false + try { + driver.foreach { driverRef => + // Tell driver that we are starting decommissioning so it stops trying to schedule us + driverNotified = driverRef.askSync[Boolean](ExecutorDecommissioning(executorId)) + if (driverNotified) decommissionSelf() + } + } catch { + case e: Exception => + if (driverNotified) { + logError("Fail to decommission self (but driver has been notified).", e) + } else { + logError("Fail to tell driver that we are starting decommissioning", e) + } + decommissioned = false + } + context.reply(decommissioned) + } + override def onDisconnected(remoteAddress: RpcAddress): Unit = { if (stopping.get()) { logInfo(s"Driver from $remoteAddress disconnected during shutdown") @@ -264,17 +275,20 @@ private[spark] class CoarseGrainedExecutorBackend( System.exit(code) } - private def decommissionSelf(): Boolean = { - val msg = "Decommissioning self w/sync" + private def decommissionSelf(): Unit = { + if (!env.conf.get(DECOMMISSION_ENABLED)) { + logWarning(s"Receive decommission request, but decommission feature is disabled.") + return + } else if (decommissioned) { + logWarning(s"Executor $executorId already started decommissioning.") + return + } + val msg = s"Decommission executor $executorId." logInfo(msg) try { decommissioned = true - // Tell master we are are decommissioned so it stops trying to schedule us - if (driver.nonEmpty) { - driver.get.askSync[Boolean](DecommissionExecutor( - executorId, ExecutorDecommissionInfo(msg))) - } else { - logError("No driver to message decommissioning.") + if (env.conf.get(STORAGE_DECOMMISSION_ENABLED)) { + env.blockManager.decommissionBlockManager() } if (executor != null) { executor.decommission() @@ -333,12 +347,10 @@ private[spark] class CoarseGrainedExecutorBackend( shutdownThread.start() logInfo("Will exit when finished decommissioning") - // Return true since we are handling a signal - true } catch { case e: Exception => + decommissioned = false logError("Unexpected error while decommissioning self", e) - false } } } diff --git a/core/src/main/scala/org/apache/spark/executor/Executor.scala b/core/src/main/scala/org/apache/spark/executor/Executor.scala index 54b50e6d2fa4a..3865c9c987b1c 100644 --- a/core/src/main/scala/org/apache/spark/executor/Executor.scala +++ b/core/src/main/scala/org/apache/spark/executor/Executor.scala @@ -22,10 +22,11 @@ import java.lang.Thread.UncaughtExceptionHandler import java.lang.management.ManagementFactory import java.net.{URI, URL} import java.nio.ByteBuffer -import java.util.Properties +import java.util.{Locale, Properties} import java.util.concurrent._ import java.util.concurrent.atomic.AtomicBoolean import javax.annotation.concurrent.GuardedBy +import javax.ws.rs.core.UriBuilder import scala.collection.JavaConverters._ import scala.collection.immutable @@ -46,7 +47,7 @@ import org.apache.spark.metrics.source.JVMCPUSource import org.apache.spark.resource.ResourceInformation import org.apache.spark.rpc.RpcTimeout import org.apache.spark.scheduler._ -import org.apache.spark.shuffle.FetchFailedException +import org.apache.spark.shuffle.{FetchFailedException, ShuffleBlockPusher} import org.apache.spark.storage.{StorageLevel, TaskResultBlockId} import org.apache.spark.util._ import org.apache.spark.util.io.ChunkedByteBuffer @@ -78,6 +79,7 @@ private[spark] class Executor( // Each map holds the master's timestamp for the version of that file or JAR we got. private val currentFiles: HashMap[String, Long] = new HashMap[String, Long]() private val currentJars: HashMap[String, Long] = new HashMap[String, Long]() + private val currentArchives: HashMap[String, Long] = new HashMap[String, Long]() private val EMPTY_BYTE_BUFFER = ByteBuffer.wrap(new Array[Byte](0)) @@ -110,7 +112,9 @@ private[spark] class Executor( .build() Executors.newCachedThreadPool(threadFactory).asInstanceOf[ThreadPoolExecutor] } - private val executorSource = new ExecutorSource(threadPool, executorId) + private val schemes = conf.get(EXECUTOR_METRICS_FILESYSTEM_SCHEMES) + .toLowerCase(Locale.ROOT).split(",").map(_.trim).filter(_.nonEmpty) + private val executorSource = new ExecutorSource(threadPool, executorId, schemes) // Pool used for threads that supervise task killing / cancellation private val taskReaperPool = ThreadUtils.newDaemonCachedThreadPool("Task reaper") // For tasks which are in the process of being killed, this map holds the most recently created @@ -135,6 +139,11 @@ private[spark] class Executor( env.metricsSystem.registerSource(new JVMCPUSource()) executorMetricsSource.foreach(_.register(env.metricsSystem)) env.metricsSystem.registerSource(env.blockManager.shuffleMetricsSource) + } else { + // This enable the registration of the executor source in local mode. + // The actual registration happens in SparkContext, + // it cannot be done here as the appId is not available yet + Executor.executorSourceLocalModeOnly = executorSource } // Whether to load classes in user jars before those in Spark jars @@ -143,6 +152,8 @@ private[spark] class Executor( // Whether to monitor killed / interrupted tasks private val taskReaperEnabled = conf.get(TASK_REAPER_ENABLED) + private val killOnFatalErrorDepth = conf.get(EXECUTOR_KILL_ON_FATAL_ERROR_DEPTH) + // Create our ClassLoader // do this after SparkEnv creation so can access the SecurityManager private val urlClassLoader = createClassLoader() @@ -223,16 +234,17 @@ private[spark] class Executor( private val appStartTime = conf.getLong("spark.app.startTime", 0) // To allow users to distribute plugins and their required files - // specified by --jars and --files on application submission, those jars/files should be - // downloaded and added to the class loader via updateDependencies. - // This should be done before plugin initialization below + // specified by --jars, --files and --archives on application submission, those + // jars/files/archives should be downloaded and added to the class loader via + // updateDependencies. This should be done before plugin initialization below // because executors search plugins from the class loader and initialize them. - private val Seq(initialUserJars, initialUserFiles) = Seq("jar", "file").map { key => - conf.getOption(s"spark.app.initial.$key.urls").map { urls => - Map(urls.split(",").map(url => (url, appStartTime)): _*) - }.getOrElse(Map.empty) - } - updateDependencies(initialUserFiles, initialUserJars) + private val Seq(initialUserJars, initialUserFiles, initialUserArchives) = + Seq("jar", "file", "archive").map { key => + conf.getOption(s"spark.app.initial.$key.urls").map { urls => + Map(urls.split(",").map(url => (url, appStartTime)): _*) + }.getOrElse(Map.empty) + } + updateDependencies(initialUserFiles, initialUserJars, initialUserArchives) // Plugins need to load using a class loader that includes the executor's user classpath. // Plugins also needs to be initialized after the heartbeater started @@ -253,7 +265,7 @@ private[spark] class Executor( } def launchTask(context: ExecutorBackend, taskDescription: TaskDescription): Unit = { - val tr = new TaskRunner(context, taskDescription) + val tr = new TaskRunner(context, taskDescription, plugins) runningTasks.put(taskDescription.taskId, tr) threadPool.execute(tr) if (decommissioned) { @@ -313,6 +325,7 @@ private[spark] class Executor( case NonFatal(e) => logWarning("Unable to stop heartbeater", e) } + ShuffleBlockPusher.stop() threadPool.shutdown() // Notify plugins that executor is shutting down so they can terminate cleanly @@ -332,7 +345,8 @@ private[spark] class Executor( class TaskRunner( execBackend: ExecutorBackend, - private val taskDescription: TaskDescription) + private val taskDescription: TaskDescription, + private val plugins: Option[PluginContainer]) extends Runnable { val taskId = taskDescription.taskId @@ -400,7 +414,9 @@ private[spark] class Executor( // Report executor runtime and JVM gc time Option(task).foreach(t => { t.metrics.setExecutorRunTime(TimeUnit.NANOSECONDS.toMillis( - System.nanoTime() - taskStartTimeNs)) + // SPARK-32898: it's possible that a task is killed when taskStartTimeNs has the initial + // value(=0) still. In this case, the executorRunTime should be considered as 0. + if (taskStartTimeNs > 0) System.nanoTime() - taskStartTimeNs else 0)) t.metrics.setJvmGCTime(computeTotalGcTime() - startGCTime) }) @@ -437,7 +453,8 @@ private[spark] class Executor( // requires access to properties contained within (e.g. for access control). Executor.taskDeserializationProps.set(taskDescription.properties) - updateDependencies(taskDescription.addedFiles, taskDescription.addedJars) + updateDependencies( + taskDescription.addedFiles, taskDescription.addedJars, taskDescription.addedArchives) task = ser.deserialize[Task[Any]]( taskDescription.serializedTask, Thread.currentThread.getContextClassLoader) task.localProperties = taskDescription.properties @@ -477,7 +494,8 @@ private[spark] class Executor( taskAttemptId = taskId, attemptNumber = taskDescription.attemptNumber, metricsSystem = env.metricsSystem, - resources = taskDescription.resources) + resources = taskDescription.resources, + plugins = plugins) threwException = false res } { @@ -612,6 +630,7 @@ private[spark] class Executor( executorSource.SUCCEEDED_TASKS.inc(1L) setTaskFinishedAndClearInterruptStatus() + plugins.foreach(_.onTaskSucceeded()) execBackend.statusUpdate(taskId, TaskState.FINISHED, serializedResult) } catch { case t: TaskKilledException => @@ -621,9 +640,9 @@ private[spark] class Executor( // Here and below, put task metric peaks in a WrappedArray to expose them as a Seq // without requiring a copy. val metricPeaks = WrappedArray.make(metricsPoller.getTaskMetricPeaks(taskId)) - val serializedTK = ser.serialize( - TaskKilled(t.reason, accUpdates, accums, metricPeaks.toSeq)) - execBackend.statusUpdate(taskId, TaskState.KILLED, serializedTK) + val reason = TaskKilled(t.reason, accUpdates, accums, metricPeaks.toSeq) + plugins.foreach(_.onTaskFailed(reason)) + execBackend.statusUpdate(taskId, TaskState.KILLED, ser.serialize(reason)) case _: InterruptedException | NonFatal(_) if task != null && task.reasonIfKilled.isDefined => @@ -632,11 +651,11 @@ private[spark] class Executor( val (accums, accUpdates) = collectAccumulatorsAndResetStatusOnFailure(taskStartTimeNs) val metricPeaks = WrappedArray.make(metricsPoller.getTaskMetricPeaks(taskId)) - val serializedTK = ser.serialize( - TaskKilled(killReason, accUpdates, accums, metricPeaks.toSeq)) - execBackend.statusUpdate(taskId, TaskState.KILLED, serializedTK) + val reason = TaskKilled(killReason, accUpdates, accums, metricPeaks.toSeq) + plugins.foreach(_.onTaskFailed(reason)) + execBackend.statusUpdate(taskId, TaskState.KILLED, ser.serialize(reason)) - case t: Throwable if hasFetchFailure && !Utils.isFatalError(t) => + case t: Throwable if hasFetchFailure && !Executor.isFatalError(t, killOnFatalErrorDepth) => val reason = task.context.fetchFailed.get.toTaskFailedReason if (!t.isInstanceOf[FetchFailedException]) { // there was a fetch failure in the task, but some user code wrapped that exception @@ -648,11 +667,13 @@ private[spark] class Executor( s"other exception: $t") } setTaskFinishedAndClearInterruptStatus() + plugins.foreach(_.onTaskFailed(reason)) execBackend.statusUpdate(taskId, TaskState.FAILED, ser.serialize(reason)) case CausedBy(cDE: CommitDeniedException) => val reason = cDE.toTaskCommitDeniedReason setTaskFinishedAndClearInterruptStatus() + plugins.foreach(_.onTaskFailed(reason)) execBackend.statusUpdate(taskId, TaskState.KILLED, ser.serialize(reason)) case t: Throwable if env.isStopped => @@ -669,34 +690,35 @@ private[spark] class Executor( // SPARK-20904: Do not report failure to driver if if happened during shut down. Because // libraries may set up shutdown hooks that race with running tasks during shutdown, // spurious failures may occur and can result in improper accounting in the driver (e.g. - // the task failure would not be ignored if the shutdown happened because of premption, + // the task failure would not be ignored if the shutdown happened because of preemption, // instead of an app issue). if (!ShutdownHookManager.inShutdown()) { val (accums, accUpdates) = collectAccumulatorsAndResetStatusOnFailure(taskStartTimeNs) val metricPeaks = WrappedArray.make(metricsPoller.getTaskMetricPeaks(taskId)) - val serializedTaskEndReason = { + val (taskFailureReason, serializedTaskFailureReason) = { try { val ef = new ExceptionFailure(t, accUpdates).withAccums(accums) .withMetricPeaks(metricPeaks.toSeq) - ser.serialize(ef) + (ef, ser.serialize(ef)) } catch { case _: NotSerializableException => // t is not serializable so just send the stacktrace val ef = new ExceptionFailure(t, accUpdates, false).withAccums(accums) .withMetricPeaks(metricPeaks.toSeq) - ser.serialize(ef) + (ef, ser.serialize(ef)) } } setTaskFinishedAndClearInterruptStatus() - execBackend.statusUpdate(taskId, TaskState.FAILED, serializedTaskEndReason) + plugins.foreach(_.onTaskFailed(taskFailureReason)) + execBackend.statusUpdate(taskId, TaskState.FAILED, serializedTaskFailureReason) } else { logInfo("Not reporting error to driver during JVM shutdown.") } // Don't forcibly exit unless the exception was inherently fatal, to avoid // stopping other tasks unnecessarily. - if (!t.isInstanceOf[SparkOutOfMemoryError] && Utils.isFatalError(t)) { + if (Executor.isFatalError(t, killOnFatalErrorDepth)) { uncaughtExceptionHandler.uncaughtException(Thread.currentThread(), t) } } finally { @@ -727,7 +749,7 @@ private[spark] class Executor( * sending a Thread.interrupt(), and monitoring the task until it finishes. * * Spark's current task cancellation / task killing mechanism is "best effort" because some tasks - * may not be interruptable or may not respond to their "killed" flags being set. If a significant + * may not be interruptible or may not respond to their "killed" flags being set. If a significant * fraction of a cluster's task slots are occupied by tasks that have been marked as killed but * remain running then this can lead to a situation where new jobs and tasks are starved of * resources that are being used by these zombie tasks. @@ -892,32 +914,50 @@ private[spark] class Executor( * Download any missing dependencies if we receive a new set of files and JARs from the * SparkContext. Also adds any new JARs we fetched to the class loader. */ - private def updateDependencies(newFiles: Map[String, Long], newJars: Map[String, Long]): Unit = { + private def updateDependencies( + newFiles: Map[String, Long], + newJars: Map[String, Long], + newArchives: Map[String, Long]): Unit = { lazy val hadoopConf = SparkHadoopUtil.get.newConfiguration(conf) synchronized { // Fetch missing dependencies for ((name, timestamp) <- newFiles if currentFiles.getOrElse(name, -1L) < timestamp) { - logInfo("Fetching " + name + " with timestamp " + timestamp) + logInfo(s"Fetching $name with timestamp $timestamp") // Fetch file with useCache mode, close cache for local mode. Utils.fetchFile(name, new File(SparkFiles.getRootDirectory()), conf, - env.securityManager, hadoopConf, timestamp, useCache = !isLocal) + hadoopConf, timestamp, useCache = !isLocal) currentFiles(name) = timestamp } + for ((name, timestamp) <- newArchives if currentArchives.getOrElse(name, -1L) < timestamp) { + logInfo(s"Fetching $name with timestamp $timestamp") + val sourceURI = new URI(name) + val uriToDownload = UriBuilder.fromUri(sourceURI).fragment(null).build() + val source = Utils.fetchFile(uriToDownload.toString, Utils.createTempDir(), conf, + hadoopConf, timestamp, useCache = !isLocal, shouldUntar = false) + val dest = new File( + SparkFiles.getRootDirectory(), + if (sourceURI.getFragment != null) sourceURI.getFragment else source.getName) + logInfo( + s"Unpacking an archive $name from ${source.getAbsolutePath} to ${dest.getAbsolutePath}") + Utils.deleteRecursively(dest) + Utils.unpack(source, dest) + currentArchives(name) = timestamp + } for ((name, timestamp) <- newJars) { val localName = new URI(name).getPath.split("/").last val currentTimeStamp = currentJars.get(name) .orElse(currentJars.get(localName)) .getOrElse(-1L) if (currentTimeStamp < timestamp) { - logInfo("Fetching " + name + " with timestamp " + timestamp) + logInfo(s"Fetching $name with timestamp $timestamp") // Fetch file with useCache mode, close cache for local mode. Utils.fetchFile(name, new File(SparkFiles.getRootDirectory()), conf, - env.securityManager, hadoopConf, timestamp, useCache = !isLocal) + hadoopConf, timestamp, useCache = !isLocal) currentJars(name) = timestamp // Add it to our class loader val url = new File(SparkFiles.getRootDirectory(), localName).toURI.toURL if (!urlClassLoader.getURLs().contains(url)) { - logInfo("Adding " + url + " to class loader") + logInfo(s"Adding $url to class loader") urlClassLoader.addURL(url) } } @@ -979,4 +1019,29 @@ private[spark] object Executor { // task is fully deserialized. When possible, the TaskContext.getLocalProperty call should be // used instead. val taskDeserializationProps: ThreadLocal[Properties] = new ThreadLocal[Properties] + + // Used to store executorSource, for local mode only + var executorSourceLocalModeOnly: ExecutorSource = null + + /** + * Whether a `Throwable` thrown from a task is a fatal error. We will use this to decide whether + * to kill the executor. + * + * @param depthToCheck The max depth of the exception chain we should search for a fatal error. 0 + * means not checking any fatal error (in other words, return false), 1 means + * checking only the exception but not the cause, and so on. This is to avoid + * `StackOverflowError` when hitting a cycle in the exception chain. + */ + def isFatalError(t: Throwable, depthToCheck: Int): Boolean = { + if (depthToCheck <= 0) { + false + } else { + t match { + case _: SparkOutOfMemoryError => false + case e if Utils.isFatalError(e) => true + case e if e.getCause != null => isFatalError(e.getCause, depthToCheck - 1) + case _ => false + } + } + } } diff --git a/core/src/main/scala/org/apache/spark/executor/ExecutorMetrics.scala b/core/src/main/scala/org/apache/spark/executor/ExecutorMetrics.scala index d9aa3ef60fc9e..486e59652218b 100644 --- a/core/src/main/scala/org/apache/spark/executor/ExecutorMetrics.scala +++ b/core/src/main/scala/org/apache/spark/executor/ExecutorMetrics.scala @@ -44,12 +44,12 @@ class ExecutorMetrics private[spark] extends Serializable { /** Returns true if the values for the metrics have been set, false otherwise. */ def isSet(): Boolean = metrics(0) > -1 - private[spark] def this(metrics: Array[Long]) { + private[spark] def this(metrics: Array[Long]) = { this() Array.copy(metrics, 0, this.metrics, 0, Math.min(metrics.size, this.metrics.size)) } - private[spark] def this(metrics: AtomicLongArray) { + private[spark] def this(metrics: AtomicLongArray) = { this() ExecutorMetricType.metricToOffset.foreach { case (_, i) => this.metrics(i) = metrics.get(i) @@ -61,7 +61,7 @@ class ExecutorMetrics private[spark] extends Serializable { * * @param executorMetrics map of executor metric name to value */ - private[spark] def this(executorMetrics: Map[String, Long]) { + private[spark] def this(executorMetrics: Map[String, Long]) = { this() ExecutorMetricType.metricToOffset.foreach { case (name, idx) => metrics(idx) = executorMetrics.getOrElse(name, 0L) diff --git a/core/src/main/scala/org/apache/spark/executor/ExecutorSource.scala b/core/src/main/scala/org/apache/spark/executor/ExecutorSource.scala index 50207aeb3ef6b..d2765d061d662 100644 --- a/core/src/main/scala/org/apache/spark/executor/ExecutorSource.scala +++ b/core/src/main/scala/org/apache/spark/executor/ExecutorSource.scala @@ -27,7 +27,10 @@ import org.apache.hadoop.fs.FileSystem import org.apache.spark.metrics.source.Source private[spark] -class ExecutorSource(threadPool: ThreadPoolExecutor, executorId: String) extends Source { +class ExecutorSource( + threadPool: ThreadPoolExecutor, + executorId: String, + fileSystemSchemes: Array[String]) extends Source { private def fileStats(scheme: String) : Option[FileSystem.Statistics] = FileSystem.getAllStatistics.asScala.find(s => s.getScheme.equals(scheme)) @@ -70,7 +73,7 @@ class ExecutorSource(threadPool: ThreadPoolExecutor, executorId: String) extends }) // Gauge for file system stats of this executor - for (scheme <- Array("hdfs", "file")) { + for (scheme <- fileSystemSchemes) { registerFileSystemStat(scheme, "read_bytes", _.getBytesRead(), 0L) registerFileSystemStat(scheme, "write_bytes", _.getBytesWritten(), 0L) registerFileSystemStat(scheme, "read_ops", _.getReadOps(), 0) diff --git a/core/src/main/scala/org/apache/spark/internal/config/Python.scala b/core/src/main/scala/org/apache/spark/internal/config/Python.scala index 188d884319644..348a33e129d65 100644 --- a/core/src/main/scala/org/apache/spark/internal/config/Python.scala +++ b/core/src/main/scala/org/apache/spark/internal/config/Python.scala @@ -50,4 +50,10 @@ private[spark] object Python { .version("2.4.0") .bytesConf(ByteUnit.MiB) .createOptional + + val PYTHON_AUTH_SOCKET_TIMEOUT = ConfigBuilder("spark.python.authenticate.socketTimeout") + .internal() + .version("3.1.0") + .timeConf(TimeUnit.SECONDS) + .createWithDefaultString("15s") } diff --git a/core/src/main/scala/org/apache/spark/internal/config/Tests.scala b/core/src/main/scala/org/apache/spark/internal/config/Tests.scala index a1ebe5ce0ca32..7b8b204bab640 100644 --- a/core/src/main/scala/org/apache/spark/internal/config/Tests.scala +++ b/core/src/main/scala/org/apache/spark/internal/config/Tests.scala @@ -26,11 +26,11 @@ private[spark] object Tests { .longConf .createWithDefault(Runtime.getRuntime.maxMemory) - val TEST_SCHEDULE_INTERVAL = - ConfigBuilder("spark.testing.dynamicAllocation.scheduleInterval") - .version("2.3.0") - .longConf - .createWithDefault(100) + val TEST_DYNAMIC_ALLOCATION_SCHEDULE_ENABLED = + ConfigBuilder("spark.testing.dynamicAllocation.schedule.enabled") + .version("3.1.0") + .booleanConf + .createWithDefault(true) val IS_TESTING = ConfigBuilder("spark.testing") .version("1.0.1") diff --git a/core/src/main/scala/org/apache/spark/internal/config/Worker.scala b/core/src/main/scala/org/apache/spark/internal/config/Worker.scala index a8072712c46ce..fda3a57546b67 100644 --- a/core/src/main/scala/org/apache/spark/internal/config/Worker.scala +++ b/core/src/main/scala/org/apache/spark/internal/config/Worker.scala @@ -82,4 +82,11 @@ private[spark] object Worker { .version("2.0.2") .intConf .createWithDefault(100) + + val WORKER_DECOMMISSION_SIGNAL = + ConfigBuilder("spark.worker.decommission.signal") + .doc("The signal that used to trigger the worker to start decommission.") + .version("3.2.0") + .stringConf + .createWithDefaultString("PWR") } diff --git a/core/src/main/scala/org/apache/spark/internal/config/package.scala b/core/src/main/scala/org/apache/spark/internal/config/package.scala index 9a7039a9cfe93..84c66470288ff 100644 --- a/core/src/main/scala/org/apache/spark/internal/config/package.scala +++ b/core/src/main/scala/org/apache/spark/internal/config/package.scala @@ -271,6 +271,13 @@ package object config { .timeConf(TimeUnit.MILLISECONDS) .createWithDefaultString("0") + private[spark] val EXECUTOR_METRICS_FILESYSTEM_SCHEMES = + ConfigBuilder("spark.executor.metrics.fileSystemSchemes") + .doc("The file system schemes to report in executor metrics.") + .version("3.1.0") + .stringConf + .createWithDefaultString("file,hdfs") + private[spark] val EXECUTOR_JAVA_OPTIONS = ConfigBuilder(SparkLauncher.EXECUTOR_EXTRA_JAVA_OPTIONS) .withPrepended(SparkLauncher.EXECUTOR_DEFAULT_JAVA_OPTIONS) @@ -302,8 +309,8 @@ package object config { .createWithDefaultString("1g") private[spark] val EXECUTOR_MEMORY_OVERHEAD = ConfigBuilder("spark.executor.memoryOverhead") - .doc("The amount of non-heap memory to be allocated per executor in cluster mode, " + - "in MiB unless otherwise specified.") + .doc("The amount of non-heap memory to be allocated per executor, in MiB unless otherwise" + + " specified.") .version("2.3.0") .bytesConf(ByteUnit.MiB) .createOptional @@ -377,7 +384,7 @@ package object config { "get the replication level of the block to the initial number") .version("2.2.0") .booleanConf - .createWithDefault(false) + .createWithDefault(true) private[spark] val STORAGE_MEMORY_MAP_THRESHOLD = ConfigBuilder("spark.storage.memoryMapThreshold") @@ -423,7 +430,7 @@ package object config { private[spark] val STORAGE_DECOMMISSION_SHUFFLE_BLOCKS_ENABLED = ConfigBuilder("spark.storage.decommission.shuffleBlocks.enabled") .doc("Whether to transfer shuffle blocks during block manager decommissioning. Requires " + - "a migratable shuffle resolver (like sort based shuffe)") + "a migratable shuffle resolver (like sort based shuffle)") .version("3.1.0") .booleanConf .createWithDefault(false) @@ -464,6 +471,16 @@ package object config { "cache block replication should be positive.") .createWithDefaultString("30s") + private[spark] val STORAGE_DECOMMISSION_FALLBACK_STORAGE_PATH = + ConfigBuilder("spark.storage.decommission.fallbackStorage.path") + .doc("The location for fallback storage during block manager decommissioning. " + + "For example, `s3a://spark-storage/`. In case of empty, fallback storage is disabled. " + + "The storage should be managed by TTL because Spark will not clean it up.") + .version("3.1.0") + .stringConf + .checkValue(_.endsWith(java.io.File.separator), "Path should end with separator.") + .createOptional + private[spark] val STORAGE_REPLICATION_TOPOLOGY_FILE = ConfigBuilder("spark.storage.replication.topologyFile") .version("2.1.0") @@ -487,7 +504,7 @@ package object config { .version("0.7.0") .withAlternative("spark.storage.blockManagerSlaveTimeoutMs") .timeConf(TimeUnit.MILLISECONDS) - .createWithDefaultString(Network.NETWORK_TIMEOUT.defaultValueString) + .createOptional private[spark] val STORAGE_CLEANUP_FILES_AFTER_EXECUTOR_EXIT = ConfigBuilder("spark.storage.cleanupFilesAfterExecutorExit") @@ -722,74 +739,83 @@ package object config { .booleanConf .createWithDefault(true) - // Blacklist confs - private[spark] val BLACKLIST_ENABLED = - ConfigBuilder("spark.blacklist.enabled") - .version("2.1.0") + private[spark] val EXCLUDE_ON_FAILURE_ENABLED = + ConfigBuilder("spark.excludeOnFailure.enabled") + .version("3.1.0") + .withAlternative("spark.blacklist.enabled") .booleanConf .createOptional private[spark] val MAX_TASK_ATTEMPTS_PER_EXECUTOR = - ConfigBuilder("spark.blacklist.task.maxTaskAttemptsPerExecutor") - .version("2.1.0") + ConfigBuilder("spark.excludeOnFailure.task.maxTaskAttemptsPerExecutor") + .version("3.1.0") + .withAlternative("spark.blacklist.task.maxTaskAttemptsPerExecutor") .intConf .createWithDefault(1) private[spark] val MAX_TASK_ATTEMPTS_PER_NODE = - ConfigBuilder("spark.blacklist.task.maxTaskAttemptsPerNode") - .version("2.1.0") + ConfigBuilder("spark.excludeOnFailure.task.maxTaskAttemptsPerNode") + .version("3.1.0") + .withAlternative("spark.blacklist.task.maxTaskAttemptsPerNode") .intConf .createWithDefault(2) private[spark] val MAX_FAILURES_PER_EXEC = - ConfigBuilder("spark.blacklist.application.maxFailedTasksPerExecutor") - .version("2.2.0") + ConfigBuilder("spark.excludeOnFailure.application.maxFailedTasksPerExecutor") + .version("3.1.0") + .withAlternative("spark.blacklist.application.maxFailedTasksPerExecutor") .intConf .createWithDefault(2) private[spark] val MAX_FAILURES_PER_EXEC_STAGE = - ConfigBuilder("spark.blacklist.stage.maxFailedTasksPerExecutor") - .version("2.1.0") + ConfigBuilder("spark.excludeOnFailure.stage.maxFailedTasksPerExecutor") + .version("3.1.0") + .withAlternative("spark.blacklist.stage.maxFailedTasksPerExecutor") .intConf .createWithDefault(2) private[spark] val MAX_FAILED_EXEC_PER_NODE = - ConfigBuilder("spark.blacklist.application.maxFailedExecutorsPerNode") - .version("2.2.0") + ConfigBuilder("spark.excludeOnFailure.application.maxFailedExecutorsPerNode") + .version("3.1.0") + .withAlternative("spark.blacklist.application.maxFailedExecutorsPerNode") .intConf .createWithDefault(2) private[spark] val MAX_FAILED_EXEC_PER_NODE_STAGE = - ConfigBuilder("spark.blacklist.stage.maxFailedExecutorsPerNode") - .version("2.1.0") + ConfigBuilder("spark.excludeOnFailure.stage.maxFailedExecutorsPerNode") + .version("3.1.0") + .withAlternative("spark.blacklist.stage.maxFailedExecutorsPerNode") .intConf .createWithDefault(2) - private[spark] val BLACKLIST_TIMEOUT_CONF = - ConfigBuilder("spark.blacklist.timeout") - .version("2.1.0") + private[spark] val EXCLUDE_ON_FAILURE_TIMEOUT_CONF = + ConfigBuilder("spark.excludeOnFailure.timeout") + .version("3.1.0") + .withAlternative("spark.blacklist.timeout") .timeConf(TimeUnit.MILLISECONDS) .createOptional - private[spark] val BLACKLIST_KILL_ENABLED = - ConfigBuilder("spark.blacklist.killBlacklistedExecutors") - .version("2.2.0") + private[spark] val EXCLUDE_ON_FAILURE_KILL_ENABLED = + ConfigBuilder("spark.excludeOnFailure.killExcludedExecutors") + .version("3.1.0") + .withAlternative("spark.blacklist.killBlacklistedExecutors") .booleanConf .createWithDefault(false) - private[spark] val BLACKLIST_LEGACY_TIMEOUT_CONF = - ConfigBuilder("spark.scheduler.executorTaskBlacklistTime") + private[spark] val EXCLUDE_ON_FAILURE_LEGACY_TIMEOUT_CONF = + ConfigBuilder("spark.scheduler.executorTaskExcludeOnFailureTime") .internal() - .version("1.0.0") + .version("3.1.0") + .withAlternative("spark.scheduler.executorTaskBlacklistTime") .timeConf(TimeUnit.MILLISECONDS) .createOptional - private[spark] val BLACKLIST_FETCH_FAILURE_ENABLED = - ConfigBuilder("spark.blacklist.application.fetchFailure.enabled") - .version("2.3.0") + private[spark] val EXCLUDE_ON_FAILURE_FETCH_FAILURE_ENABLED = + ConfigBuilder("spark.excludeOnFailure.application.fetchFailure.enabled") + .version("3.1.0") + .withAlternative("spark.blacklist.application.fetchFailure.enabled") .booleanConf .createWithDefault(false) - // End blacklist confs private[spark] val UNREGISTER_OUTPUT_ON_HOST_ON_FETCH_FAILURE = ConfigBuilder("spark.files.fetchFailure.unRegisterOutputOnHost") @@ -1453,10 +1479,12 @@ package object config { .createWithDefaultString("365d") private[spark] val UNSCHEDULABLE_TASKSET_TIMEOUT = - ConfigBuilder("spark.scheduler.blacklist.unschedulableTaskSetTimeout") + ConfigBuilder("spark.scheduler.excludeOnFailure.unschedulableTaskSetTimeout") .doc("The timeout in seconds to wait to acquire a new executor and schedule a task " + - "before aborting a TaskSet which is unschedulable because of being completely blacklisted.") - .version("2.4.1") + "before aborting a TaskSet which is unschedulable because all executors are " + + "excluded due to failures.") + .version("3.1.0") + .withAlternative("spark.scheduler.blacklist.unschedulableTaskSetTimeout") .timeConf(TimeUnit.SECONDS) .checkValue(v => v >= 0, "The value should be a non negative time value.") .createWithDefault(120) @@ -1785,6 +1813,16 @@ package object config { .toSequence .createWithDefault(Nil) + private[spark] val ARCHIVES = ConfigBuilder("spark.archives") + .version("3.1.0") + .doc("Comma-separated list of archives to be extracted into the working directory of each " + + "executor. .jar, .tar.gz, .tgz and .zip are supported. You can specify the directory " + + "name to unpack via adding '#' after the file name to unpack, for example, " + + "'file.zip#directory'. This configuration is experimental.") + .stringConf + .toSequence + .createWithDefault(Nil) + private[spark] val SUBMIT_DEPLOY_MODE = ConfigBuilder("spark.submit.deployMode") .version("1.5.0") .stringConf @@ -1889,6 +1927,13 @@ package object config { .timeConf(TimeUnit.SECONDS) .createOptional + private[spark] val EXECUTOR_DECOMMISSION_SIGNAL = + ConfigBuilder("spark.executor.decommission.signal") + .doc("The signal that used to trigger the executor to start decommission.") + .version("3.2.0") + .stringConf + .createWithDefaultString("PWR") + private[spark] val STAGING_DIR = ConfigBuilder("spark.yarn.stagingDir") .doc("Staging directory used while submitting applications.") .version("2.0.0") @@ -1927,4 +1972,91 @@ package object config { .version("3.0.1") .booleanConf .createWithDefault(false) + + private[spark] val EXECUTOR_KILL_ON_FATAL_ERROR_DEPTH = + ConfigBuilder("spark.executor.killOnFatalError.depth") + .doc("The max depth of the exception chain in a failed task Spark will search for a fatal " + + "error to check whether it should kill an executor. 0 means not checking any fatal " + + "error, 1 means checking only the exception but not the cause, and so on.") + .internal() + .version("3.1.0") + .intConf + .checkValue(_ >= 0, "needs to be a non-negative value") + .createWithDefault(5) + + private[spark] val PUSH_BASED_SHUFFLE_ENABLED = + ConfigBuilder("spark.shuffle.push.enabled") + .doc("Set to 'true' to enable push-based shuffle on the client side and this works in " + + "conjunction with the server side flag spark.shuffle.server.mergedShuffleFileManagerImpl " + + "which needs to be set with the appropriate " + + "org.apache.spark.network.shuffle.MergedShuffleFileManager implementation for push-based " + + "shuffle to be enabled") + .version("3.1.0") + .booleanConf + .createWithDefault(false) + + private[spark] val SHUFFLE_MERGER_MAX_RETAINED_LOCATIONS = + ConfigBuilder("spark.shuffle.push.maxRetainedMergerLocations") + .doc("Maximum number of shuffle push merger locations cached for push based shuffle. " + + "Currently, shuffle push merger locations are nothing but external shuffle services " + + "which are responsible for handling pushed blocks and merging them and serving " + + "merged blocks for later shuffle fetch.") + .version("3.1.0") + .intConf + .createWithDefault(500) + + private[spark] val SHUFFLE_MERGER_LOCATIONS_MIN_THRESHOLD_RATIO = + ConfigBuilder("spark.shuffle.push.mergersMinThresholdRatio") + .doc("The minimum number of shuffle merger locations required to enable push based " + + "shuffle for a stage. This is specified as a ratio of the number of partitions in " + + "the child stage. For example, a reduce stage which has 100 partitions and uses the " + + "default value 0.05 requires at least 5 unique merger locations to enable push based " + + "shuffle. Merger locations are currently defined as external shuffle services.") + .version("3.1.0") + .doubleConf + .createWithDefault(0.05) + + private[spark] val SHUFFLE_MERGER_LOCATIONS_MIN_STATIC_THRESHOLD = + ConfigBuilder("spark.shuffle.push.mergersMinStaticThreshold") + .doc(s"The static threshold for number of shuffle push merger locations should be " + + "available in order to enable push based shuffle for a stage. Note this config " + + s"works in conjunction with ${SHUFFLE_MERGER_LOCATIONS_MIN_THRESHOLD_RATIO.key}. " + + "Maximum of spark.shuffle.push.mergersMinStaticThreshold and " + + s"${SHUFFLE_MERGER_LOCATIONS_MIN_THRESHOLD_RATIO.key} ratio number of mergers needed to " + + "enable push based shuffle for a stage. For eg: with 1000 partitions for the child " + + "stage with spark.shuffle.push.mergersMinStaticThreshold as 5 and " + + s"${SHUFFLE_MERGER_LOCATIONS_MIN_THRESHOLD_RATIO.key} set to 0.05, we would need " + + "at least 50 mergers to enable push based shuffle for that stage.") + .version("3.1.0") + .doubleConf + .createWithDefault(5) + + private[spark] val SHUFFLE_NUM_PUSH_THREADS = + ConfigBuilder("spark.shuffle.push.numPushThreads") + .doc("Specify the number of threads in the block pusher pool. These threads assist " + + "in creating connections and pushing blocks to remote shuffle services. By default, the " + + "threadpool size is equal to the number of spark executor cores.") + .version("3.2.0") + .intConf + .createOptional + + private[spark] val SHUFFLE_MAX_BLOCK_SIZE_TO_PUSH = + ConfigBuilder("spark.shuffle.push.maxBlockSizeToPush") + .doc("The max size of an individual block to push to the remote shuffle services. Blocks " + + "larger than this threshold are not pushed to be merged remotely. These shuffle blocks " + + "will be fetched by the executors in the original manner.") + .version("3.2.0") + .bytesConf(ByteUnit.BYTE) + .createWithDefaultString("1m") + + private[spark] val SHUFFLE_MAX_BLOCK_BATCH_SIZE_FOR_PUSH = + ConfigBuilder("spark.shuffle.push.maxBlockBatchSize") + .doc("The max size of a batch of shuffle blocks to be grouped into a single push request.") + .version("3.2.0") + .bytesConf(ByteUnit.BYTE) + // Default is 3m because it is greater than 2m which is the default value for + // TransportConf#memoryMapBytes. If this defaults to 2m as well it is very likely that each + // batch of block will be loaded in memory with memory mapping, which has higher overhead + // with small MB sized chunk of data. + .createWithDefaultString("3m") } diff --git a/core/src/main/scala/org/apache/spark/internal/io/FileCommitProtocol.scala b/core/src/main/scala/org/apache/spark/internal/io/FileCommitProtocol.scala index 0746e43babf9a..d9d7b06cdb8ce 100644 --- a/core/src/main/scala/org/apache/spark/internal/io/FileCommitProtocol.scala +++ b/core/src/main/scala/org/apache/spark/internal/io/FileCommitProtocol.scala @@ -169,4 +169,8 @@ object FileCommitProtocol extends Logging { ctor.newInstance(jobId, outputPath) } } + + def getStagingDir(path: String, jobId: String): Path = { + new Path(path, ".spark-staging-" + jobId) + } } diff --git a/core/src/main/scala/org/apache/spark/internal/io/HadoopMapReduceCommitProtocol.scala b/core/src/main/scala/org/apache/spark/internal/io/HadoopMapReduceCommitProtocol.scala index 11ce608f52ee2..30f9a650a69c9 100644 --- a/core/src/main/scala/org/apache/spark/internal/io/HadoopMapReduceCommitProtocol.scala +++ b/core/src/main/scala/org/apache/spark/internal/io/HadoopMapReduceCommitProtocol.scala @@ -41,13 +41,28 @@ import org.apache.spark.mapred.SparkHadoopMapRedUtil * @param jobId the job's or stage's id * @param path the job's output path, or null if committer acts as a noop * @param dynamicPartitionOverwrite If true, Spark will overwrite partition directories at runtime - * dynamically, i.e., we first write files under a staging - * directory with partition path, e.g. - * /path/to/staging/a=1/b=1/xxx.parquet. When committing the job, - * we first clean up the corresponding partition directories at - * destination path, e.g. /path/to/destination/a=1/b=1, and move - * files from staging directory to the corresponding partition - * directories under destination path. + * dynamically. Suppose final path is /path/to/outputPath, output + * path of [[FileOutputCommitter]] is an intermediate path, e.g. + * /path/to/outputPath/.spark-staging-{jobId}, which is a staging + * directory. Task attempts firstly write files under the + * intermediate path, e.g. + * /path/to/outputPath/.spark-staging-{jobId}/_temporary/ + * {appAttemptId}/_temporary/{taskAttemptId}/a=1/b=1/xxx.parquet. + * + * 1. When [[FileOutputCommitter]] algorithm version set to 1, + * we firstly move task attempt output files to + * /path/to/outputPath/.spark-staging-{jobId}/_temporary/ + * {appAttemptId}/{taskId}/a=1/b=1, + * then move them to + * /path/to/outputPath/.spark-staging-{jobId}/a=1/b=1. + * 2. When [[FileOutputCommitter]] algorithm version set to 2, + * committing tasks directly move task attempt output files to + * /path/to/outputPath/.spark-staging-{jobId}/a=1/b=1. + * + * At the end of committing job, we move output files from + * intermediate path to final path, e.g., move files from + * /path/to/outputPath/.spark-staging-{jobId}/a=1/b=1 + * to /path/to/outputPath/a=1/b=1 */ class HadoopMapReduceCommitProtocol( jobId: String, @@ -89,7 +104,7 @@ class HadoopMapReduceCommitProtocol( * The staging directory of this write job. Spark uses it to deal with files with absolute output * path, or writing data into partitioned directory with dynamicPartitionOverwrite=true. */ - private def stagingDir = new Path(path, ".spark-staging-" + jobId) + protected def stagingDir = getStagingDir(path, jobId) protected def setupCommitter(context: TaskAttemptContext): OutputCommitter = { val format = context.getOutputFormatClass.getConstructor().newInstance() @@ -106,13 +121,13 @@ class HadoopMapReduceCommitProtocol( val filename = getFilename(taskContext, ext) val stagingDir: Path = committer match { - case _ if dynamicPartitionOverwrite => - assert(dir.isDefined, - "The dataset to be written must be partitioned when dynamicPartitionOverwrite is true.") - partitionPaths += dir.get - this.stagingDir // For FileOutputCommitter it has its own staging path called "work path". case f: FileOutputCommitter => + if (dynamicPartitionOverwrite) { + assert(dir.isDefined, + "The dataset to be written must be partitioned when dynamicPartitionOverwrite is true.") + partitionPaths += dir.get + } new Path(Option(f.getWorkPath).map(_.toString).getOrElse(path)) case _ => new Path(path) } diff --git a/core/src/main/scala/org/apache/spark/internal/io/SparkHadoopWriter.scala b/core/src/main/scala/org/apache/spark/internal/io/SparkHadoopWriter.scala index 6d174b5e0f81b..37b470802067a 100644 --- a/core/src/main/scala/org/apache/spark/internal/io/SparkHadoopWriter.scala +++ b/core/src/main/scala/org/apache/spark/internal/io/SparkHadoopWriter.scala @@ -18,7 +18,7 @@ package org.apache.spark.internal.io import java.text.NumberFormat -import java.util.{Date, Locale} +import java.util.{Date, Locale, UUID} import scala.reflect.ClassTag @@ -70,6 +70,11 @@ object SparkHadoopWriter extends Logging { // Assert the output format/key/value class is set in JobConf. config.assertConf(jobContext, rdd.conf) + // propagate the description UUID into the jobs, so that committers + // get an ID guaranteed to be unique. + jobContext.getConfiguration.set("spark.sql.sources.writeJobUUID", + UUID.randomUUID.toString) + val committer = config.createCommitter(commitJobId) committer.setupJob(jobContext) diff --git a/core/src/main/scala/org/apache/spark/internal/io/SparkHadoopWriterUtils.scala b/core/src/main/scala/org/apache/spark/internal/io/SparkHadoopWriterUtils.scala index de828a6d6156e..657842c620f30 100644 --- a/core/src/main/scala/org/apache/spark/internal/io/SparkHadoopWriterUtils.scala +++ b/core/src/main/scala/org/apache/spark/internal/io/SparkHadoopWriterUtils.scala @@ -20,7 +20,7 @@ package org.apache.spark.internal.io import java.text.SimpleDateFormat import java.util.{Date, Locale} -import scala.util.DynamicVariable +import scala.util.{DynamicVariable, Random} import org.apache.hadoop.fs.Path import org.apache.hadoop.mapred.{JobConf, JobID} @@ -37,14 +37,35 @@ private[spark] object SparkHadoopWriterUtils { private val RECORDS_BETWEEN_BYTES_WRITTEN_METRIC_UPDATES = 256 + private val RAND = new Random() + /** + * Create a job ID. + * + * @param time (current) time + * @param id job number + * @return a job ID + */ def createJobID(time: Date, id: Int): JobID = { + if (id < 0) { + throw new IllegalArgumentException("Job number is negative") + } val jobtrackerID = createJobTrackerID(time) new JobID(jobtrackerID, id) } + /** + * Generate an ID for a job tracker. + * @param time (current) time + * @return a string for a job ID + */ def createJobTrackerID(time: Date): String = { - new SimpleDateFormat("yyyyMMddHHmmss", Locale.US).format(time) + val base = new SimpleDateFormat("yyyyMMddHHmmss", Locale.US).format(time) + var l1 = RAND.nextLong() + if (l1 < 0) { + l1 = -l1 + } + base + l1 } def createPathFromString(path: String, conf: JobConf): Path = { diff --git a/core/src/main/scala/org/apache/spark/internal/plugin/PluginContainer.scala b/core/src/main/scala/org/apache/spark/internal/plugin/PluginContainer.scala index 4eda4767094ad..f78ec250f7173 100644 --- a/core/src/main/scala/org/apache/spark/internal/plugin/PluginContainer.scala +++ b/core/src/main/scala/org/apache/spark/internal/plugin/PluginContainer.scala @@ -20,7 +20,7 @@ package org.apache.spark.internal.plugin import scala.collection.JavaConverters._ import scala.util.{Either, Left, Right} -import org.apache.spark.{SparkContext, SparkEnv} +import org.apache.spark.{SparkContext, SparkEnv, TaskFailedReason} import org.apache.spark.api.plugin._ import org.apache.spark.internal.Logging import org.apache.spark.internal.config._ @@ -31,6 +31,9 @@ sealed abstract class PluginContainer { def shutdown(): Unit def registerMetrics(appId: String): Unit + def onTaskStart(): Unit + def onTaskSucceeded(): Unit + def onTaskFailed(failureReason: TaskFailedReason): Unit } @@ -85,6 +88,17 @@ private class DriverPluginContainer( } } + override def onTaskStart(): Unit = { + throw new IllegalStateException("Should not be called for the driver container.") + } + + override def onTaskSucceeded(): Unit = { + throw new IllegalStateException("Should not be called for the driver container.") + } + + override def onTaskFailed(failureReason: TaskFailedReason): Unit = { + throw new IllegalStateException("Should not be called for the driver container.") + } } private class ExecutorPluginContainer( @@ -134,6 +148,39 @@ private class ExecutorPluginContainer( } } } + + override def onTaskStart(): Unit = { + executorPlugins.foreach { case (name, plugin) => + try { + plugin.onTaskStart() + } catch { + case t: Throwable => + logInfo(s"Exception while calling onTaskStart on plugin $name.", t) + } + } + } + + override def onTaskSucceeded(): Unit = { + executorPlugins.foreach { case (name, plugin) => + try { + plugin.onTaskSucceeded() + } catch { + case t: Throwable => + logInfo(s"Exception while calling onTaskSucceeded on plugin $name.", t) + } + } + } + + override def onTaskFailed(failureReason: TaskFailedReason): Unit = { + executorPlugins.foreach { case (name, plugin) => + try { + plugin.onTaskFailed(failureReason) + } catch { + case t: Throwable => + logInfo(s"Exception while calling onTaskFailed on plugin $name.", t) + } + } + } } object PluginContainer { diff --git a/core/src/main/scala/org/apache/spark/io/CompressionCodec.scala b/core/src/main/scala/org/apache/spark/io/CompressionCodec.scala index 5205a2d568ac3..fa663a32d4929 100644 --- a/core/src/main/scala/org/apache/spark/io/CompressionCodec.scala +++ b/core/src/main/scala/org/apache/spark/io/CompressionCodec.scala @@ -107,7 +107,6 @@ private[spark] object CompressionCodec { } val FALLBACK_COMPRESSION_CODEC = "snappy" - val DEFAULT_COMPRESSION_CODEC = "lz4" val ALL_COMPRESSION_CODECS = shortCompressionCodecNames.values.toSeq } diff --git a/core/src/main/scala/org/apache/spark/metrics/MetricsConfig.scala b/core/src/main/scala/org/apache/spark/metrics/MetricsConfig.scala index d98d5e3b81aa0..bddd18adc683e 100644 --- a/core/src/main/scala/org/apache/spark/metrics/MetricsConfig.scala +++ b/core/src/main/scala/org/apache/spark/metrics/MetricsConfig.scala @@ -102,7 +102,7 @@ private[spark] class MetricsConfig(conf: SparkConf) extends Logging { * * @param prop the flat list of properties to "unflatten" based on prefixes * @param regex the regex that the prefix has to comply with - * @return an unflatted map, mapping prefix with sub-properties under that prefix + * @return an unflattened map, mapping prefix with sub-properties under that prefix */ def subProperties(prop: Properties, regex: Regex): mutable.HashMap[String, Properties] = { val subProperties = new mutable.HashMap[String, Properties] diff --git a/core/src/main/scala/org/apache/spark/metrics/sink/PrometheusServlet.scala b/core/src/main/scala/org/apache/spark/metrics/sink/PrometheusServlet.scala index 59b863b89f75a..e9c2974622300 100644 --- a/core/src/main/scala/org/apache/spark/metrics/sink/PrometheusServlet.scala +++ b/core/src/main/scala/org/apache/spark/metrics/sink/PrometheusServlet.scala @@ -56,7 +56,7 @@ private[spark] class PrometheusServlet( def getMetricsSnapshot(request: HttpServletRequest): String = { import scala.collection.JavaConverters._ - val guagesLabel = """{type="gauges"}""" + val gaugesLabel = """{type="gauges"}""" val countersLabel = """{type="counters"}""" val metersLabel = countersLabel val histogramslabels = """{type="histograms"}""" @@ -65,8 +65,8 @@ private[spark] class PrometheusServlet( val sb = new StringBuilder() registry.getGauges.asScala.foreach { case (k, v) => if (!v.getValue.isInstanceOf[String]) { - sb.append(s"${normalizeKey(k)}Number$guagesLabel ${v.getValue}\n") - sb.append(s"${normalizeKey(k)}Value$guagesLabel ${v.getValue}\n") + sb.append(s"${normalizeKey(k)}Number$gaugesLabel ${v.getValue}\n") + sb.append(s"${normalizeKey(k)}Value$gaugesLabel ${v.getValue}\n") } } registry.getCounters.asScala.foreach { case (k, v) => diff --git a/core/src/main/scala/org/apache/spark/network/BlockDataManager.scala b/core/src/main/scala/org/apache/spark/network/BlockDataManager.scala index 62fbc166167d3..cafb39ea82ad9 100644 --- a/core/src/main/scala/org/apache/spark/network/BlockDataManager.scala +++ b/core/src/main/scala/org/apache/spark/network/BlockDataManager.scala @@ -22,7 +22,7 @@ import scala.reflect.ClassTag import org.apache.spark.TaskContext import org.apache.spark.network.buffer.ManagedBuffer import org.apache.spark.network.client.StreamCallbackWithID -import org.apache.spark.storage.{BlockId, ShuffleBlockId, StorageLevel} +import org.apache.spark.storage.{BlockId, StorageLevel} private[spark] trait BlockDataManager { diff --git a/core/src/main/scala/org/apache/spark/network/BlockTransferService.scala b/core/src/main/scala/org/apache/spark/network/BlockTransferService.scala index 98129b62b53df..635efc3e22628 100644 --- a/core/src/main/scala/org/apache/spark/network/BlockTransferService.scala +++ b/core/src/main/scala/org/apache/spark/network/BlockTransferService.scala @@ -23,7 +23,6 @@ import scala.concurrent.{Future, Promise} import scala.concurrent.duration.Duration import scala.reflect.ClassTag -import org.apache.spark.internal.Logging import org.apache.spark.network.buffer.{FileSegmentManagedBuffer, ManagedBuffer, NioManagedBuffer} import org.apache.spark.network.shuffle.{BlockFetchingListener, BlockStoreClient, DownloadFileManager} import org.apache.spark.storage.{BlockId, EncryptedManagedBuffer, StorageLevel} @@ -110,6 +109,7 @@ abstract class BlockTransferService extends BlockStoreClient { * This method is similar to [[uploadBlock]], except this one blocks the thread * until the upload finishes. */ + @throws[java.io.IOException] def uploadBlockSync( hostname: String, port: Int, diff --git a/core/src/main/scala/org/apache/spark/network/netty/NettyBlockTransferService.scala b/core/src/main/scala/org/apache/spark/network/netty/NettyBlockTransferService.scala index 806fbf52795bc..828849812bbd1 100644 --- a/core/src/main/scala/org/apache/spark/network/netty/NettyBlockTransferService.scala +++ b/core/src/main/scala/org/apache/spark/network/netty/NettyBlockTransferService.scala @@ -19,9 +19,7 @@ package org.apache.spark.network.netty import java.io.IOException import java.nio.ByteBuffer -import java.util import java.util.{HashMap => JHashMap, Map => JMap} -import java.util.concurrent.CompletableFuture import scala.collection.JavaConverters._ import scala.concurrent.{Future, Promise} @@ -35,11 +33,11 @@ import org.apache.spark.ExecutorDeadException import org.apache.spark.internal.config import org.apache.spark.network._ import org.apache.spark.network.buffer.{ManagedBuffer, NioManagedBuffer} -import org.apache.spark.network.client.{RpcResponseCallback, TransportClient, TransportClientBootstrap, TransportClientFactory} +import org.apache.spark.network.client.{RpcResponseCallback, TransportClientBootstrap} import org.apache.spark.network.crypto.{AuthClientBootstrap, AuthServerBootstrap} import org.apache.spark.network.server._ import org.apache.spark.network.shuffle.{BlockFetchingListener, DownloadFileManager, OneForOneBlockFetcher, RetryingBlockFetcher} -import org.apache.spark.network.shuffle.protocol.{BlockTransferMessage, GetLocalDirsForExecutors, LocalDirsForExecutors, UploadBlock, UploadBlockStream} +import org.apache.spark.network.shuffle.protocol.{UploadBlock, UploadBlockStream} import org.apache.spark.network.util.JavaUtils import org.apache.spark.rpc.RpcEndpointRef import org.apache.spark.serializer.JavaSerializer diff --git a/core/src/main/scala/org/apache/spark/rdd/DoubleRDDFunctions.scala b/core/src/main/scala/org/apache/spark/rdd/DoubleRDDFunctions.scala index 943abae17a911..39f69567981ea 100644 --- a/core/src/main/scala/org/apache/spark/rdd/DoubleRDDFunctions.scala +++ b/core/src/main/scala/org/apache/spark/rdd/DoubleRDDFunctions.scala @@ -173,7 +173,7 @@ class DoubleRDDFunctions(self: RDD[Double]) extends Logging with Serializable { if (buckets.length < 2) { throw new IllegalArgumentException("buckets array must have at least two elements") } - // The histogramPartition function computes the partail histogram for a given + // The histogramPartition function computes the partial histogram for a given // partition. The provided bucketFunction determines which bucket in the array // to increment or returns None if there is no bucket. This is done so we can // specialize for uniformly distributed buckets and save the O(log n) binary diff --git a/core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala b/core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala index d5f21112c0c9e..5fc0b4f736d55 100644 --- a/core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala +++ b/core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala @@ -232,6 +232,10 @@ class HadoopRDD[K, V]( logWarning(s"${jobConf.get(FileInputFormat.INPUT_DIR)} doesn't exist and no" + s" partitions returned from this path.", e) Array.empty[Partition] + case e: IOException if e.getMessage.startsWith("Not a file:") => + val path = e.getMessage.split(":").map(_.trim).apply(2) + throw new IOException(s"Path: ${path} is a directory, which is not supported by the " + + s"record reader when `mapreduce.input.fileinputformat.input.dir.recursive` is false.") } } diff --git a/core/src/main/scala/org/apache/spark/rdd/InputFileBlockHolder.scala b/core/src/main/scala/org/apache/spark/rdd/InputFileBlockHolder.scala index 1beb085db27d9..8230144025feb 100644 --- a/core/src/main/scala/org/apache/spark/rdd/InputFileBlockHolder.scala +++ b/core/src/main/scala/org/apache/spark/rdd/InputFileBlockHolder.scala @@ -34,7 +34,7 @@ private[spark] object InputFileBlockHolder { * @param length size of the block, in bytes, or -1 if not available. */ private class FileBlock(val filePath: UTF8String, val startOffset: Long, val length: Long) { - def this() { + def this() = { this(UTF8String.fromString(""), -1, -1) } } diff --git a/core/src/main/scala/org/apache/spark/rdd/LocalCheckpointRDD.scala b/core/src/main/scala/org/apache/spark/rdd/LocalCheckpointRDD.scala index 503aa0dffc9f3..113ed2db7f546 100644 --- a/core/src/main/scala/org/apache/spark/rdd/LocalCheckpointRDD.scala +++ b/core/src/main/scala/org/apache/spark/rdd/LocalCheckpointRDD.scala @@ -40,7 +40,7 @@ private[spark] class LocalCheckpointRDD[T: ClassTag]( numPartitions: Int) extends CheckpointRDD[T](sc) { - def this(rdd: RDD[T]) { + def this(rdd: RDD[T]) = { this(rdd.context, rdd.id, rdd.partitions.length) } diff --git a/core/src/main/scala/org/apache/spark/rdd/OrderedRDDFunctions.scala b/core/src/main/scala/org/apache/spark/rdd/OrderedRDDFunctions.scala index 5b1c024257529..3cefcb16d6eb1 100644 --- a/core/src/main/scala/org/apache/spark/rdd/OrderedRDDFunctions.scala +++ b/core/src/main/scala/org/apache/spark/rdd/OrderedRDDFunctions.scala @@ -88,10 +88,10 @@ class OrderedRDDFunctions[K : Ordering : ClassTag, val rddToFilter: RDD[P] = self.partitioner match { case Some(rp: RangePartitioner[K, V]) => - val partitionIndicies = (rp.getPartition(lower), rp.getPartition(upper)) match { + val partitionIndices = (rp.getPartition(lower), rp.getPartition(upper)) match { case (l, u) => Math.min(l, u) to Math.max(l, u) } - PartitionPruningRDD.create(self, partitionIndicies.contains) + PartitionPruningRDD.create(self, partitionIndices.contains) case _ => self } diff --git a/core/src/main/scala/org/apache/spark/rdd/ParallelCollectionRDD.scala b/core/src/main/scala/org/apache/spark/rdd/ParallelCollectionRDD.scala index 324cba5b4de42..f0239cdd9136d 100644 --- a/core/src/main/scala/org/apache/spark/rdd/ParallelCollectionRDD.scala +++ b/core/src/main/scala/org/apache/spark/rdd/ParallelCollectionRDD.scala @@ -19,7 +19,6 @@ package org.apache.spark.rdd import java.io._ -import scala.Serializable import scala.collection.Map import scala.collection.immutable.NumericRange import scala.collection.mutable.ArrayBuffer diff --git a/core/src/main/scala/org/apache/spark/rdd/RDD.scala b/core/src/main/scala/org/apache/spark/rdd/RDD.scala index 6095042de7f0c..65b39c4b65603 100644 --- a/core/src/main/scala/org/apache/spark/rdd/RDD.scala +++ b/core/src/main/scala/org/apache/spark/rdd/RDD.scala @@ -327,7 +327,7 @@ abstract class RDD[T: ClassTag]( /** * Internal method to this RDD; will read from cache if applicable, or otherwise compute it. - * This should ''not'' be called by users directly, but is available for implementors of custom + * This should ''not'' be called by users directly, but is available for implementers of custom * subclasses of RDD. */ final def iterator(split: Partition, context: TaskContext): Iterator[T] = { @@ -1919,9 +1919,8 @@ abstract class RDD[T: ClassTag]( val persistence = if (storageLevel != StorageLevel.NONE) storageLevel.description else "" val storageInfo = rdd.context.getRDDStorageInfo(_.id == rdd.id).map(info => - " CachedPartitions: %d; MemorySize: %s; ExternalBlockStoreSize: %s; DiskSize: %s".format( - info.numCachedPartitions, bytesToString(info.memSize), - bytesToString(info.externalBlockStoreSize), bytesToString(info.diskSize))) + " CachedPartitions: %d; MemorySize: %s; DiskSize: %s".format( + info.numCachedPartitions, bytesToString(info.memSize), bytesToString(info.diskSize))) s"$rdd [$persistence]" +: storageInfo } diff --git a/core/src/main/scala/org/apache/spark/rdd/ReliableCheckpointRDD.scala b/core/src/main/scala/org/apache/spark/rdd/ReliableCheckpointRDD.scala index 576a83f6ab4d9..5093a12777ad3 100644 --- a/core/src/main/scala/org/apache/spark/rdd/ReliableCheckpointRDD.scala +++ b/core/src/main/scala/org/apache/spark/rdd/ReliableCheckpointRDD.scala @@ -20,7 +20,6 @@ package org.apache.spark.rdd import java.io.{FileNotFoundException, IOException} import java.util.concurrent.TimeUnit -import scala.collection.mutable import scala.reflect.ClassTag import scala.util.control.NonFatal diff --git a/core/src/main/scala/org/apache/spark/rdd/SequenceFileRDDFunctions.scala b/core/src/main/scala/org/apache/spark/rdd/SequenceFileRDDFunctions.scala index 02def89dd8c2b..2f6ff0acdf024 100644 --- a/core/src/main/scala/org/apache/spark/rdd/SequenceFileRDDFunctions.scala +++ b/core/src/main/scala/org/apache/spark/rdd/SequenceFileRDDFunctions.scala @@ -32,16 +32,13 @@ import org.apache.spark.internal.Logging * @note This can't be part of PairRDDFunctions because we need more implicit parameters to * convert our keys and values to Writable. */ -class SequenceFileRDDFunctions[K <% Writable: ClassTag, V <% Writable : ClassTag]( +class SequenceFileRDDFunctions[K: IsWritable: ClassTag, V: IsWritable: ClassTag]( self: RDD[(K, V)], _keyWritableClass: Class[_ <: Writable], _valueWritableClass: Class[_ <: Writable]) extends Logging with Serializable { - // TODO the context bound (<%) above should be replaced with simple type bound and implicit - // conversion but is a breaking change. This should be fixed in Spark 3.x. - /** * Output the RDD as a Hadoop SequenceFile using the Writable types we infer from the RDD's key * and value types. If the key or value are Writable, then we use their classes directly; @@ -52,7 +49,7 @@ class SequenceFileRDDFunctions[K <% Writable: ClassTag, V <% Writable : ClassTag def saveAsSequenceFile( path: String, codec: Option[Class[_ <: CompressionCodec]] = None): Unit = self.withScope { - def anyToWritable[U <% Writable](u: U): Writable = u + def anyToWritable[U: IsWritable](u: U): Writable = u // TODO We cannot force the return type of `anyToWritable` be same as keyWritableClass and // valueWritableClass at the compile time. To implement that, we need to add type parameters to diff --git a/core/src/main/scala/org/apache/spark/rdd/package.scala b/core/src/main/scala/org/apache/spark/rdd/package.scala index 55fc6e4d2b4df..43ca6d7643b17 100644 --- a/core/src/main/scala/org/apache/spark/rdd/package.scala +++ b/core/src/main/scala/org/apache/spark/rdd/package.scala @@ -17,7 +17,11 @@ package org.apache.spark +import org.apache.hadoop.io.Writable + /** * Provides several RDD implementations. See [[org.apache.spark.rdd.RDD]]. */ -package object rdd +package object rdd { + type IsWritable[A] = A => Writable +} diff --git a/core/src/main/scala/org/apache/spark/resource/ResourceAllocator.scala b/core/src/main/scala/org/apache/spark/resource/ResourceAllocator.scala index 482d9e94c6dd9..22d10a975ad0f 100644 --- a/core/src/main/scala/org/apache/spark/resource/ResourceAllocator.scala +++ b/core/src/main/scala/org/apache/spark/resource/ResourceAllocator.scala @@ -20,7 +20,6 @@ package org.apache.spark.resource import scala.collection.mutable import org.apache.spark.SparkException -import org.apache.spark.util.collection.OpenHashMap /** * Trait used to help executor/worker allocate resources. @@ -40,7 +39,7 @@ trait ResourceAllocator { * can be a multiple, such that each address can be allocated up to [[slotsPerAddress]] * times. * - * TODO Use [[OpenHashMap]] instead to gain better performance. + * TODO Use [[org.apache.spark.util.collection.OpenHashMap]] instead to gain better performance. */ private lazy val addressAvailabilityMap = { mutable.HashMap(resourceAddresses.map(_ -> slotsPerAddress): _*) diff --git a/core/src/main/scala/org/apache/spark/resource/ResourceProfile.scala b/core/src/main/scala/org/apache/spark/resource/ResourceProfile.scala index 8a37670c31b9a..ac7e8e89fa4bf 100644 --- a/core/src/main/scala/org/apache/spark/resource/ResourceProfile.scala +++ b/core/src/main/scala/org/apache/spark/resource/ResourceProfile.scala @@ -29,6 +29,7 @@ import org.apache.spark.annotation.{Evolving, Since} import org.apache.spark.internal.Logging import org.apache.spark.internal.config._ import org.apache.spark.internal.config.Python.PYSPARK_EXECUTOR_MEMORY +import org.apache.spark.util.Utils /** * Resource profile to associate with an RDD. A ResourceProfile allows the user to @@ -256,6 +257,8 @@ object ResourceProfile extends Logging { val UNKNOWN_RESOURCE_PROFILE_ID = -1 val DEFAULT_RESOURCE_PROFILE_ID = 0 + private[spark] val MEMORY_OVERHEAD_MIN_MIB = 384L + private lazy val nextProfileId = new AtomicInteger(0) private val DEFAULT_PROFILE_LOCK = new Object() @@ -263,6 +266,7 @@ object ResourceProfile extends Logging { // var so that it can be reset for testing purposes. @GuardedBy("DEFAULT_PROFILE_LOCK") private var defaultProfile: Option[ResourceProfile] = None + private var defaultProfileExecutorResources: Option[DefaultProfileExecutorResources] = None private[spark] def getNextProfileId: Int = nextProfileId.getAndIncrement() @@ -284,6 +288,14 @@ object ResourceProfile extends Logging { } } + private[spark] def getDefaultProfileExecutorResources( + conf: SparkConf): DefaultProfileExecutorResources = { + defaultProfileExecutorResources.getOrElse { + getOrCreateDefaultProfile(conf) + defaultProfileExecutorResources.get + } + } + private def getDefaultTaskResources(conf: SparkConf): Map[String, TaskResourceRequest] = { val cpusPerTask = conf.get(CPUS_PER_TASK) val treqs = new TaskResourceRequests().cpus(cpusPerTask) @@ -293,20 +305,26 @@ object ResourceProfile extends Logging { private def getDefaultExecutorResources(conf: SparkConf): Map[String, ExecutorResourceRequest] = { val ereqs = new ExecutorResourceRequests() - ereqs.cores(conf.get(EXECUTOR_CORES)) - ereqs.memory(conf.get(EXECUTOR_MEMORY).toString) - conf.get(EXECUTOR_MEMORY_OVERHEAD).map(mem => ereqs.memoryOverhead(mem.toString)) - conf.get(PYSPARK_EXECUTOR_MEMORY).map(mem => ereqs.pysparkMemory(mem.toString)) - if (conf.get(MEMORY_OFFHEAP_ENABLED)) { - // Explicitly add suffix b as default unit of offHeapMemory is Mib - ereqs.offHeapMemory(conf.get(MEMORY_OFFHEAP_SIZE).toString + "b") - } + val cores = conf.get(EXECUTOR_CORES) + ereqs.cores(cores) + val memory = conf.get(EXECUTOR_MEMORY) + ereqs.memory(memory.toString) + val overheadMem = conf.get(EXECUTOR_MEMORY_OVERHEAD) + overheadMem.map(mem => ereqs.memoryOverhead(mem.toString)) + val pysparkMem = conf.get(PYSPARK_EXECUTOR_MEMORY) + pysparkMem.map(mem => ereqs.pysparkMemory(mem.toString)) + val offheapMem = Utils.executorOffHeapMemorySizeAsMb(conf) + ereqs.offHeapMemory(offheapMem.toString) val execReq = ResourceUtils.parseAllResourceRequests(conf, SPARK_EXECUTOR_PREFIX) execReq.foreach { req => - val name = req.id.resourceName - ereqs.resource(name, req.amount, req.discoveryScript.orElse(""), + ereqs.resource(req.id.resourceName, req.amount, req.discoveryScript.orElse(""), req.vendor.orElse("")) } + val customResourceNames = execReq.map(_.id.resourceName).toSet + val customResources = ereqs.requests.filter(v => customResourceNames.contains(v._1)) + defaultProfileExecutorResources = + Some(DefaultProfileExecutorResources(cores, memory, offheapMem, pysparkMem, + overheadMem, customResources)) ereqs.requests } @@ -320,6 +338,7 @@ object ResourceProfile extends Logging { private[spark] def clearDefaultProfile(): Unit = { DEFAULT_PROFILE_LOCK.synchronized { defaultProfile = None + defaultProfileExecutorResources = None } } @@ -342,6 +361,100 @@ object ResourceProfile extends Logging { rp.getTaskCpus.getOrElse(conf.get(CPUS_PER_TASK)) } + /** + * Get offHeap memory size from [[ExecutorResourceRequest]] + * return 0 if MEMORY_OFFHEAP_ENABLED is false. + */ + private[spark] def executorOffHeapMemorySizeAsMb(sparkConf: SparkConf, + execRequest: ExecutorResourceRequest): Long = { + Utils.checkOffHeapEnabled(sparkConf, execRequest.amount) + } + + private[spark] case class ExecutorResourcesOrDefaults( + cores: Int, + executorMemoryMiB: Long, + memoryOffHeapMiB: Long, + pysparkMemoryMiB: Long, + memoryOverheadMiB: Long, + totalMemMiB: Long, + customResources: Map[String, ExecutorResourceRequest]) + + private[spark] case class DefaultProfileExecutorResources( + cores: Int, + executorMemoryMiB: Long, + memoryOffHeapMiB: Long, + pysparkMemoryMiB: Option[Long], + memoryOverheadMiB: Option[Long], + customResources: Map[String, ExecutorResourceRequest]) + + private[spark] def calculateOverHeadMemory( + overHeadMemFromConf: Option[Long], + executorMemoryMiB: Long, + overheadFactor: Double): Long = { + overHeadMemFromConf.getOrElse(math.max((overheadFactor * executorMemoryMiB).toInt, + ResourceProfile.MEMORY_OVERHEAD_MIN_MIB)) + } + + /** + * Gets the full list of resources to allow a cluster manager to request the appropriate + * container. If the resource profile is not the default one we either get the resources + * specified in the profile or fall back to the default profile resource size for everything + * except for custom resources. + */ + private[spark] def getResourcesForClusterManager( + rpId: Int, + execResources: Map[String, ExecutorResourceRequest], + overheadFactor: Double, + conf: SparkConf, + isPythonApp: Boolean, + resourceMappings: Map[String, String]): ExecutorResourcesOrDefaults = { + val defaultResources = getDefaultProfileExecutorResources(conf) + // set all the default values, which may change for custom ResourceProfiles + var cores = defaultResources.cores + var executorMemoryMiB = defaultResources.executorMemoryMiB + var memoryOffHeapMiB = defaultResources.memoryOffHeapMiB + var pysparkMemoryMiB = defaultResources.pysparkMemoryMiB.getOrElse(0L) + var memoryOverheadMiB = calculateOverHeadMemory(defaultResources.memoryOverheadMiB, + executorMemoryMiB, overheadFactor) + + val finalCustomResources = if (rpId != DEFAULT_RESOURCE_PROFILE_ID) { + val customResources = new mutable.HashMap[String, ExecutorResourceRequest] + execResources.foreach { case (r, execReq) => + r match { + case ResourceProfile.MEMORY => + executorMemoryMiB = execReq.amount + case ResourceProfile.OVERHEAD_MEM => + memoryOverheadMiB = execReq.amount + case ResourceProfile.PYSPARK_MEM => + pysparkMemoryMiB = execReq.amount + case ResourceProfile.OFFHEAP_MEM => + memoryOffHeapMiB = executorOffHeapMemorySizeAsMb(conf, execReq) + case ResourceProfile.CORES => + cores = execReq.amount.toInt + case rName => + val nameToUse = resourceMappings.get(rName).getOrElse(rName) + customResources(nameToUse) = execReq + } + } + customResources.toMap + } else { + defaultResources.customResources.map { case (rName, execReq) => + val nameToUse = resourceMappings.get(rName).getOrElse(rName) + (nameToUse, execReq) + } + } + // only add in pyspark memory if actually a python application + val pysparkMemToUseMiB = if (isPythonApp) { + pysparkMemoryMiB + } else { + 0L + } + val totalMemMiB = + (executorMemoryMiB + memoryOverheadMiB + memoryOffHeapMiB + pysparkMemToUseMiB) + ExecutorResourcesOrDefaults(cores, executorMemoryMiB, memoryOffHeapMiB, + pysparkMemToUseMiB, memoryOverheadMiB, totalMemMiB, finalCustomResources) + } + private[spark] val PYSPARK_MEMORY_LOCAL_PROPERTY = "resource.pyspark.memory" private[spark] val EXECUTOR_CORES_LOCAL_PROPERTY = "resource.executor.cores" } diff --git a/core/src/main/scala/org/apache/spark/resource/ResourceProfileManager.scala b/core/src/main/scala/org/apache/spark/resource/ResourceProfileManager.scala index f365548c75359..d538f0bcc423e 100644 --- a/core/src/main/scala/org/apache/spark/resource/ResourceProfileManager.scala +++ b/core/src/main/scala/org/apache/spark/resource/ResourceProfileManager.scala @@ -52,18 +52,25 @@ private[spark] class ResourceProfileManager(sparkConf: SparkConf, private val dynamicEnabled = Utils.isDynamicAllocationEnabled(sparkConf) private val master = sparkConf.getOption("spark.master") - private val isNotYarn = master.isDefined && !master.get.equals("yarn") - private val errorForTesting = !isTesting || sparkConf.get(RESOURCE_PROFILE_MANAGER_TESTING) + private val isYarn = master.isDefined && master.get.equals("yarn") + private val isK8s = master.isDefined && master.get.startsWith("k8s://") + private val notRunningUnitTests = !isTesting + private val testExceptionThrown = sparkConf.get(RESOURCE_PROFILE_MANAGER_TESTING) // If we use anything except the default profile, its only supported on YARN right now. // Throw an exception if not supported. private[spark] def isSupported(rp: ResourceProfile): Boolean = { val isNotDefaultProfile = rp.id != ResourceProfile.DEFAULT_RESOURCE_PROFILE_ID - val notYarnAndNotDefaultProfile = isNotDefaultProfile && isNotYarn - val YarnNotDynAllocAndNotDefaultProfile = isNotDefaultProfile && !isNotYarn && !dynamicEnabled - if (errorForTesting && (notYarnAndNotDefaultProfile || YarnNotDynAllocAndNotDefaultProfile)) { - throw new SparkException("ResourceProfiles are only supported on YARN with dynamic " + - "allocation enabled.") + val notYarnOrK8sAndNotDefaultProfile = isNotDefaultProfile && !(isYarn || isK8s) + val YarnOrK8sNotDynAllocAndNotDefaultProfile = + isNotDefaultProfile && (isYarn || isK8s) && !dynamicEnabled + // We want the exception to be thrown only when we are specifically testing for the + // exception or in a real application. Otherwise in all other testing scenarios we want + // to skip throwing the exception so that we can test in other modes to make testing easier. + if ((notRunningUnitTests || testExceptionThrown) && + (notYarnOrK8sAndNotDefaultProfile || YarnOrK8sNotDynAllocAndNotDefaultProfile)) { + throw new SparkException("ResourceProfiles are only supported on YARN and Kubernetes " + + "with dynamic allocation enabled.") } true } diff --git a/core/src/main/scala/org/apache/spark/resource/ResourceUtils.scala b/core/src/main/scala/org/apache/spark/resource/ResourceUtils.scala index 5a9435653920f..837b2d80aace6 100644 --- a/core/src/main/scala/org/apache/spark/resource/ResourceUtils.scala +++ b/core/src/main/scala/org/apache/spark/resource/ResourceUtils.scala @@ -29,8 +29,8 @@ import org.apache.spark.{SparkConf, SparkException} import org.apache.spark.annotation.DeveloperApi import org.apache.spark.api.resource.ResourceDiscoveryPlugin import org.apache.spark.internal.Logging -import org.apache.spark.internal.config.{CPUS_PER_TASK, EXECUTOR_CORES, RESOURCES_DISCOVERY_PLUGIN, SPARK_TASK_PREFIX} -import org.apache.spark.internal.config.Tests.{RESOURCES_WARNING_TESTING, SKIP_VALIDATE_CORES_TESTING} +import org.apache.spark.internal.config.{EXECUTOR_CORES, RESOURCES_DISCOVERY_PLUGIN, SPARK_TASK_PREFIX} +import org.apache.spark.internal.config.Tests.{RESOURCES_WARNING_TESTING} import org.apache.spark.util.Utils /** diff --git a/core/src/main/scala/org/apache/spark/resource/TaskResourceRequest.scala b/core/src/main/scala/org/apache/spark/resource/TaskResourceRequest.scala index d3f979fa8672f..12ef34241f9cb 100644 --- a/core/src/main/scala/org/apache/spark/resource/TaskResourceRequest.scala +++ b/core/src/main/scala/org/apache/spark/resource/TaskResourceRequest.scala @@ -20,7 +20,7 @@ package org.apache.spark.resource import org.apache.spark.annotation.{Evolving, Since} /** - * A task resource request. This is used in conjuntion with the ResourceProfile to + * A task resource request. This is used in conjunction with the ResourceProfile to * programmatically specify the resources needed for an RDD that will be applied at the * stage level. * diff --git a/core/src/main/scala/org/apache/spark/rpc/netty/Dispatcher.scala b/core/src/main/scala/org/apache/spark/rpc/netty/Dispatcher.scala index 4a9f551646fc7..14198743c4801 100644 --- a/core/src/main/scala/org/apache/spark/rpc/netty/Dispatcher.scala +++ b/core/src/main/scala/org/apache/spark/rpc/netty/Dispatcher.scala @@ -24,7 +24,7 @@ import scala.collection.JavaConverters._ import scala.concurrent.Promise import scala.util.control.NonFatal -import org.apache.spark.SparkException +import org.apache.spark.{SparkEnv, SparkException} import org.apache.spark.internal.Logging import org.apache.spark.network.client.RpcResponseCallback import org.apache.spark.rpc._ @@ -147,13 +147,15 @@ private[netty] class Dispatcher(nettyEnv: NettyRpcEnv, numUsableCores: Int) exte /** Posts a one-way message. */ def postOneWayMessage(message: RequestMessage): Unit = { postMessage(message.receiver.name, OneWayMessage(message.senderAddress, message.content), - (e) => e match { + { // SPARK-31922: in local cluster mode, there's always a RpcEnvStoppedException when // stop is called due to some asynchronous message handling. We catch the exception // and log it at debug level to avoid verbose error message when user stop a local // cluster in spark shell. case re: RpcEnvStoppedException => logDebug(s"Message $message dropped. ${re.getMessage}") - case _ => throw e + case e if SparkEnv.get.isStopped => + logWarning(s"Message $message dropped due to sparkEnv is stopped. ${e.getMessage}") + case e => throw e }) } diff --git a/core/src/main/scala/org/apache/spark/rpc/netty/Inbox.scala b/core/src/main/scala/org/apache/spark/rpc/netty/Inbox.scala index 2ed03f7430c32..472401b23fe8e 100644 --- a/core/src/main/scala/org/apache/spark/rpc/netty/Inbox.scala +++ b/core/src/main/scala/org/apache/spark/rpc/netty/Inbox.scala @@ -200,6 +200,16 @@ private[netty] class Inbox(val endpointName: String, val endpoint: RpcEndpoint) * Calls action closure, and calls the endpoint's onError function in the case of exceptions. */ private def safelyCall(endpoint: RpcEndpoint)(action: => Unit): Unit = { + def dealWithFatalError(fatal: Throwable): Unit = { + inbox.synchronized { + assert(numActiveThreads > 0, "The number of active threads should be positive.") + // Should reduce the number of active threads before throw the error. + numActiveThreads -= 1 + } + logError(s"An error happened while processing message in the inbox for $endpointName", fatal) + throw fatal + } + try action catch { case NonFatal(e) => try endpoint.onError(e) catch { @@ -209,8 +219,18 @@ private[netty] class Inbox(val endpointName: String, val endpoint: RpcEndpoint) } else { logError("Ignoring error", ee) } + case fatal: Throwable => + dealWithFatalError(fatal) } + case fatal: Throwable => + dealWithFatalError(fatal) } } + // exposed only for testing + def getNumActiveThreads: Int = { + inbox.synchronized { + inbox.numActiveThreads + } + } } diff --git a/core/src/main/scala/org/apache/spark/rpc/netty/NettyRpcEnv.scala b/core/src/main/scala/org/apache/spark/rpc/netty/NettyRpcEnv.scala index fcb9fe422c0d4..5864e9e2ceac0 100644 --- a/core/src/main/scala/org/apache/spark/rpc/netty/NettyRpcEnv.scala +++ b/core/src/main/scala/org/apache/spark/rpc/netty/NettyRpcEnv.scala @@ -254,14 +254,14 @@ private[netty] class NettyRpcEnv( val timeoutCancelable = timeoutScheduler.schedule(new Runnable { override def run(): Unit = { - val remoteReceAddr = if (remoteAddr == null) { + val remoteRecAddr = if (remoteAddr == null) { Try { message.receiver.client.getChannel.remoteAddress() }.toOption.orNull } else { remoteAddr } - onFailure(new TimeoutException(s"Cannot receive any reply from ${remoteReceAddr} " + + onFailure(new TimeoutException(s"Cannot receive any reply from ${remoteRecAddr} " + s"in ${timeout.duration}")) } }, timeout.duration.toNanos, TimeUnit.NANOSECONDS) diff --git a/core/src/main/scala/org/apache/spark/scheduler/BarrierJobAllocationFailed.scala b/core/src/main/scala/org/apache/spark/scheduler/BarrierJobAllocationFailed.scala index 043c6b90384b4..8f0764ed1a61e 100644 --- a/core/src/main/scala/org/apache/spark/scheduler/BarrierJobAllocationFailed.scala +++ b/core/src/main/scala/org/apache/spark/scheduler/BarrierJobAllocationFailed.scala @@ -45,10 +45,10 @@ private[spark] object BarrierJobAllocationFailed { val ERROR_MESSAGE_RUN_BARRIER_WITH_UNSUPPORTED_RDD_CHAIN_PATTERN = "[SPARK-24820][SPARK-24821]: Barrier execution mode does not allow the following pattern of " + "RDD chain within a barrier stage:\n1. Ancestor RDDs that have different number of " + - "partitions from the resulting RDD (eg. union()/coalesce()/first()/take()/" + + "partitions from the resulting RDD (e.g. union()/coalesce()/first()/take()/" + "PartitionPruningRDD). A workaround for first()/take() can be barrierRdd.collect().head " + "(scala) or barrierRdd.collect()[0] (python).\n" + - "2. An RDD that depends on multiple barrier RDDs (eg. barrierRdd1.zip(barrierRdd2))." + "2. An RDD that depends on multiple barrier RDDs (e.g. barrierRdd1.zip(barrierRdd2))." // Error message when running a barrier stage with dynamic resource allocation enabled. val ERROR_MESSAGE_RUN_BARRIER_WITH_DYN_ALLOCATION = diff --git a/core/src/main/scala/org/apache/spark/scheduler/BlacklistTracker.scala b/core/src/main/scala/org/apache/spark/scheduler/BlacklistTracker.scala deleted file mode 100644 index 9e524c52267be..0000000000000 --- a/core/src/main/scala/org/apache/spark/scheduler/BlacklistTracker.scala +++ /dev/null @@ -1,477 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.spark.scheduler - -import java.util.concurrent.atomic.AtomicReference - -import scala.collection.mutable.{ArrayBuffer, HashMap, HashSet} - -import org.apache.spark.{ExecutorAllocationClient, SparkConf, SparkContext} -import org.apache.spark.internal.Logging -import org.apache.spark.internal.config -import org.apache.spark.util.{Clock, SystemClock, Utils} - -/** - * BlacklistTracker is designed to track problematic executors and nodes. It supports blacklisting - * executors and nodes across an entire application (with a periodic expiry). TaskSetManagers add - * additional blacklisting of executors and nodes for individual tasks and stages which works in - * concert with the blacklisting here. - * - * The tracker needs to deal with a variety of workloads, eg.: - * - * * bad user code -- this may lead to many task failures, but that should not count against - * individual executors - * * many small stages -- this may prevent a bad executor for having many failures within one - * stage, but still many failures over the entire application - * * "flaky" executors -- they don't fail every task, but are still faulty enough to merit - * blacklisting - * - * See the design doc on SPARK-8425 for a more in-depth discussion. - * - * THREADING: As with most helpers of TaskSchedulerImpl, this is not thread-safe. Though it is - * called by multiple threads, callers must already have a lock on the TaskSchedulerImpl. The - * one exception is [[nodeBlacklist()]], which can be called without holding a lock. - */ -private[scheduler] class BlacklistTracker ( - private val listenerBus: LiveListenerBus, - conf: SparkConf, - allocationClient: Option[ExecutorAllocationClient], - clock: Clock = new SystemClock()) extends Logging { - - def this(sc: SparkContext, allocationClient: Option[ExecutorAllocationClient]) = { - this(sc.listenerBus, sc.conf, allocationClient) - } - - BlacklistTracker.validateBlacklistConfs(conf) - private val MAX_FAILURES_PER_EXEC = conf.get(config.MAX_FAILURES_PER_EXEC) - private val MAX_FAILED_EXEC_PER_NODE = conf.get(config.MAX_FAILED_EXEC_PER_NODE) - val BLACKLIST_TIMEOUT_MILLIS = BlacklistTracker.getBlacklistTimeout(conf) - private val BLACKLIST_FETCH_FAILURE_ENABLED = conf.get(config.BLACKLIST_FETCH_FAILURE_ENABLED) - - /** - * A map from executorId to information on task failures. Tracks the time of each task failure, - * so that we can avoid blacklisting executors due to failures that are very far apart. We do not - * actively remove from this as soon as tasks hit their timeouts, to avoid the time it would take - * to do so. But it will not grow too large, because as soon as an executor gets too many - * failures, we blacklist the executor and remove its entry here. - */ - private val executorIdToFailureList = new HashMap[String, ExecutorFailureList]() - val executorIdToBlacklistStatus = new HashMap[String, BlacklistedExecutor]() - val nodeIdToBlacklistExpiryTime = new HashMap[String, Long]() - /** - * An immutable copy of the set of nodes that are currently blacklisted. Kept in an - * AtomicReference to make [[nodeBlacklist()]] thread-safe. - */ - private val _nodeBlacklist = new AtomicReference[Set[String]](Set()) - /** - * Time when the next blacklist will expire. Used as a - * shortcut to avoid iterating over all entries in the blacklist when none will have expired. - */ - var nextExpiryTime: Long = Long.MaxValue - /** - * Mapping from nodes to all of the executors that have been blacklisted on that node. We do *not* - * remove from this when executors are removed from spark, so we can track when we get multiple - * successive blacklisted executors on one node. Nonetheless, it will not grow too large because - * there cannot be many blacklisted executors on one node, before we stop requesting more - * executors on that node, and we clean up the list of blacklisted executors once an executor has - * been blacklisted for BLACKLIST_TIMEOUT_MILLIS. - */ - val nodeToBlacklistedExecs = new HashMap[String, HashSet[String]]() - - /** - * Un-blacklists executors and nodes that have been blacklisted for at least - * BLACKLIST_TIMEOUT_MILLIS - */ - def applyBlacklistTimeout(): Unit = { - val now = clock.getTimeMillis() - // quickly check if we've got anything to expire from blacklist -- if not, avoid doing any work - if (now > nextExpiryTime) { - // Apply the timeout to blacklisted nodes and executors - val execsToUnblacklist = executorIdToBlacklistStatus.filter(_._2.expiryTime < now).keys - if (execsToUnblacklist.nonEmpty) { - // Un-blacklist any executors that have been blacklisted longer than the blacklist timeout. - logInfo(s"Removing executors $execsToUnblacklist from blacklist because the blacklist " + - s"for those executors has timed out") - execsToUnblacklist.foreach { exec => - val status = executorIdToBlacklistStatus.remove(exec).get - val failedExecsOnNode = nodeToBlacklistedExecs(status.node) - listenerBus.post(SparkListenerExecutorUnblacklisted(now, exec)) - failedExecsOnNode.remove(exec) - if (failedExecsOnNode.isEmpty) { - nodeToBlacklistedExecs.remove(status.node) - } - } - } - val nodesToUnblacklist = nodeIdToBlacklistExpiryTime.filter(_._2 < now).keys - if (nodesToUnblacklist.nonEmpty) { - // Un-blacklist any nodes that have been blacklisted longer than the blacklist timeout. - logInfo(s"Removing nodes $nodesToUnblacklist from blacklist because the blacklist " + - s"has timed out") - nodesToUnblacklist.foreach { node => - nodeIdToBlacklistExpiryTime.remove(node) - listenerBus.post(SparkListenerNodeUnblacklisted(now, node)) - } - _nodeBlacklist.set(nodeIdToBlacklistExpiryTime.keySet.toSet) - } - updateNextExpiryTime() - } - } - - private def updateNextExpiryTime(): Unit = { - val execMinExpiry = if (executorIdToBlacklistStatus.nonEmpty) { - executorIdToBlacklistStatus.map{_._2.expiryTime}.min - } else { - Long.MaxValue - } - val nodeMinExpiry = if (nodeIdToBlacklistExpiryTime.nonEmpty) { - nodeIdToBlacklistExpiryTime.values.min - } else { - Long.MaxValue - } - nextExpiryTime = math.min(execMinExpiry, nodeMinExpiry) - } - - private def killExecutor(exec: String, msg: String): Unit = { - allocationClient match { - case Some(a) => - logInfo(msg) - a.killExecutors(Seq(exec), adjustTargetNumExecutors = false, countFailures = false, - force = true) - case None => - logInfo(s"Not attempting to kill blacklisted executor id $exec " + - s"since allocation client is not defined.") - } - } - - private def killBlacklistedExecutor(exec: String): Unit = { - if (conf.get(config.BLACKLIST_KILL_ENABLED)) { - killExecutor(exec, - s"Killing blacklisted executor id $exec since ${config.BLACKLIST_KILL_ENABLED.key} is set.") - } - } - - private[scheduler] def killBlacklistedIdleExecutor(exec: String): Unit = { - killExecutor(exec, - s"Killing blacklisted idle executor id $exec because of task unschedulability and trying " + - "to acquire a new executor.") - } - - private def killExecutorsOnBlacklistedNode(node: String): Unit = { - if (conf.get(config.BLACKLIST_KILL_ENABLED)) { - allocationClient match { - case Some(a) => - logInfo(s"Killing all executors on blacklisted host $node " + - s"since ${config.BLACKLIST_KILL_ENABLED.key} is set.") - if (a.killExecutorsOnHost(node) == false) { - logError(s"Killing executors on node $node failed.") - } - case None => - logWarning(s"Not attempting to kill executors on blacklisted host $node " + - s"since allocation client is not defined.") - } - } - } - - def updateBlacklistForFetchFailure(host: String, exec: String): Unit = { - if (BLACKLIST_FETCH_FAILURE_ENABLED) { - // If we blacklist on fetch failures, we are implicitly saying that we believe the failure is - // non-transient, and can't be recovered from (even if this is the first fetch failure, - // stage is retried after just one failure, so we don't always get a chance to collect - // multiple fetch failures). - // If the external shuffle-service is on, then every other executor on this node would - // be suffering from the same issue, so we should blacklist (and potentially kill) all - // of them immediately. - - val now = clock.getTimeMillis() - val expiryTimeForNewBlacklists = now + BLACKLIST_TIMEOUT_MILLIS - - if (conf.get(config.SHUFFLE_SERVICE_ENABLED)) { - if (!nodeIdToBlacklistExpiryTime.contains(host)) { - logInfo(s"blacklisting node $host due to fetch failure of external shuffle service") - - nodeIdToBlacklistExpiryTime.put(host, expiryTimeForNewBlacklists) - listenerBus.post(SparkListenerNodeBlacklisted(now, host, 1)) - _nodeBlacklist.set(nodeIdToBlacklistExpiryTime.keySet.toSet) - killExecutorsOnBlacklistedNode(host) - updateNextExpiryTime() - } - } else if (!executorIdToBlacklistStatus.contains(exec)) { - logInfo(s"Blacklisting executor $exec due to fetch failure") - - executorIdToBlacklistStatus.put(exec, BlacklistedExecutor(host, expiryTimeForNewBlacklists)) - // We hardcoded number of failure tasks to 1 for fetch failure, because there's no - // reattempt for such failure. - listenerBus.post(SparkListenerExecutorBlacklisted(now, exec, 1)) - updateNextExpiryTime() - killBlacklistedExecutor(exec) - - val blacklistedExecsOnNode = nodeToBlacklistedExecs.getOrElseUpdate(host, HashSet[String]()) - blacklistedExecsOnNode += exec - } - } - } - - def updateBlacklistForSuccessfulTaskSet( - stageId: Int, - stageAttemptId: Int, - failuresByExec: HashMap[String, ExecutorFailuresInTaskSet]): Unit = { - // if any tasks failed, we count them towards the overall failure count for the executor at - // this point. - val now = clock.getTimeMillis() - failuresByExec.foreach { case (exec, failuresInTaskSet) => - val appFailuresOnExecutor = - executorIdToFailureList.getOrElseUpdate(exec, new ExecutorFailureList) - appFailuresOnExecutor.addFailures(stageId, stageAttemptId, failuresInTaskSet) - appFailuresOnExecutor.dropFailuresWithTimeoutBefore(now) - val newTotal = appFailuresOnExecutor.numUniqueTaskFailures - - val expiryTimeForNewBlacklists = now + BLACKLIST_TIMEOUT_MILLIS - // If this pushes the total number of failures over the threshold, blacklist the executor. - // If its already blacklisted, we avoid "re-blacklisting" (which can happen if there were - // other tasks already running in another taskset when it got blacklisted), because it makes - // some of the logic around expiry times a little more confusing. But it also wouldn't be a - // problem to re-blacklist, with a later expiry time. - if (newTotal >= MAX_FAILURES_PER_EXEC && !executorIdToBlacklistStatus.contains(exec)) { - logInfo(s"Blacklisting executor id: $exec because it has $newTotal" + - s" task failures in successful task sets") - val node = failuresInTaskSet.node - executorIdToBlacklistStatus.put(exec, BlacklistedExecutor(node, expiryTimeForNewBlacklists)) - listenerBus.post(SparkListenerExecutorBlacklisted(now, exec, newTotal)) - executorIdToFailureList.remove(exec) - updateNextExpiryTime() - killBlacklistedExecutor(exec) - - // In addition to blacklisting the executor, we also update the data for failures on the - // node, and potentially put the entire node into a blacklist as well. - val blacklistedExecsOnNode = nodeToBlacklistedExecs.getOrElseUpdate(node, HashSet[String]()) - blacklistedExecsOnNode += exec - // If the node is already in the blacklist, we avoid adding it again with a later expiry - // time. - if (blacklistedExecsOnNode.size >= MAX_FAILED_EXEC_PER_NODE && - !nodeIdToBlacklistExpiryTime.contains(node)) { - logInfo(s"Blacklisting node $node because it has ${blacklistedExecsOnNode.size} " + - s"executors blacklisted: ${blacklistedExecsOnNode}") - nodeIdToBlacklistExpiryTime.put(node, expiryTimeForNewBlacklists) - listenerBus.post(SparkListenerNodeBlacklisted(now, node, blacklistedExecsOnNode.size)) - _nodeBlacklist.set(nodeIdToBlacklistExpiryTime.keySet.toSet) - killExecutorsOnBlacklistedNode(node) - } - } - } - } - - def isExecutorBlacklisted(executorId: String): Boolean = { - executorIdToBlacklistStatus.contains(executorId) - } - - /** - * Get the full set of nodes that are blacklisted. Unlike other methods in this class, this *IS* - * thread-safe -- no lock required on a taskScheduler. - */ - def nodeBlacklist(): Set[String] = { - _nodeBlacklist.get() - } - - def isNodeBlacklisted(node: String): Boolean = { - nodeIdToBlacklistExpiryTime.contains(node) - } - - def handleRemovedExecutor(executorId: String): Unit = { - // We intentionally do not clean up executors that are already blacklisted in - // nodeToBlacklistedExecs, so that if another executor on the same node gets blacklisted, we can - // blacklist the entire node. We also can't clean up executorIdToBlacklistStatus, so we can - // eventually remove the executor after the timeout. Despite not clearing those structures - // here, we don't expect they will grow too big since you won't get too many executors on one - // node, and the timeout will clear it up periodically in any case. - executorIdToFailureList -= executorId - } - - - /** - * Tracks all failures for one executor (that have not passed the timeout). - * - * In general we actually expect this to be extremely small, since it won't contain more than the - * maximum number of task failures before an executor is failed (default 2). - */ - private[scheduler] final class ExecutorFailureList extends Logging { - - private case class TaskId(stage: Int, stageAttempt: Int, taskIndex: Int) - - /** - * All failures on this executor in successful task sets. - */ - private var failuresAndExpiryTimes = ArrayBuffer[(TaskId, Long)]() - /** - * As an optimization, we track the min expiry time over all entries in failuresAndExpiryTimes - * so its quick to tell if there are any failures with expiry before the current time. - */ - private var minExpiryTime = Long.MaxValue - - def addFailures( - stage: Int, - stageAttempt: Int, - failuresInTaskSet: ExecutorFailuresInTaskSet): Unit = { - failuresInTaskSet.taskToFailureCountAndFailureTime.foreach { - case (taskIdx, (_, failureTime)) => - val expiryTime = failureTime + BLACKLIST_TIMEOUT_MILLIS - failuresAndExpiryTimes += ((TaskId(stage, stageAttempt, taskIdx), expiryTime)) - if (expiryTime < minExpiryTime) { - minExpiryTime = expiryTime - } - } - } - - /** - * The number of unique tasks that failed on this executor. Only counts failures within the - * timeout, and in successful tasksets. - */ - def numUniqueTaskFailures: Int = failuresAndExpiryTimes.size - - def isEmpty: Boolean = failuresAndExpiryTimes.isEmpty - - /** - * Apply the timeout to individual tasks. This is to prevent one-off failures that are very - * spread out in time (and likely have nothing to do with problems on the executor) from - * triggering blacklisting. However, note that we do *not* remove executors and nodes from - * the blacklist as we expire individual task failures -- each have their own timeout. Eg., - * suppose: - * * timeout = 10, maxFailuresPerExec = 2 - * * Task 1 fails on exec 1 at time 0 - * * Task 2 fails on exec 1 at time 5 - * --> exec 1 is blacklisted from time 5 - 15. - * This is to simplify the implementation, as well as keep the behavior easier to understand - * for the end user. - */ - def dropFailuresWithTimeoutBefore(dropBefore: Long): Unit = { - if (minExpiryTime < dropBefore) { - var newMinExpiry = Long.MaxValue - val newFailures = new ArrayBuffer[(TaskId, Long)] - failuresAndExpiryTimes.foreach { case (task, expiryTime) => - if (expiryTime >= dropBefore) { - newFailures += ((task, expiryTime)) - if (expiryTime < newMinExpiry) { - newMinExpiry = expiryTime - } - } - } - failuresAndExpiryTimes = newFailures - minExpiryTime = newMinExpiry - } - } - - override def toString(): String = { - s"failures = $failuresAndExpiryTimes" - } - } - -} - -private[spark] object BlacklistTracker extends Logging { - - private val DEFAULT_TIMEOUT = "1h" - - /** - * Returns true if the blacklist is enabled, based on checking the configuration in the following - * order: - * 1. Is it specifically enabled or disabled? - * 2. Is it enabled via the legacy timeout conf? - * 3. Default is off - */ - def isBlacklistEnabled(conf: SparkConf): Boolean = { - conf.get(config.BLACKLIST_ENABLED) match { - case Some(enabled) => - enabled - case None => - // if they've got a non-zero setting for the legacy conf, always enable the blacklist, - // otherwise, use the default. - val legacyKey = config.BLACKLIST_LEGACY_TIMEOUT_CONF.key - conf.get(config.BLACKLIST_LEGACY_TIMEOUT_CONF).exists { legacyTimeout => - if (legacyTimeout == 0) { - logWarning(s"Turning off blacklisting due to legacy configuration: $legacyKey == 0") - false - } else { - logWarning(s"Turning on blacklisting due to legacy configuration: $legacyKey > 0") - true - } - } - } - } - - def getBlacklistTimeout(conf: SparkConf): Long = { - conf.get(config.BLACKLIST_TIMEOUT_CONF).getOrElse { - conf.get(config.BLACKLIST_LEGACY_TIMEOUT_CONF).getOrElse { - Utils.timeStringAsMs(DEFAULT_TIMEOUT) - } - } - } - - /** - * Verify that blacklist configurations are consistent; if not, throw an exception. Should only - * be called if blacklisting is enabled. - * - * The configuration for the blacklist is expected to adhere to a few invariants. Default - * values follow these rules of course, but users may unwittingly change one configuration - * without making the corresponding adjustment elsewhere. This ensures we fail-fast when - * there are such misconfigurations. - */ - def validateBlacklistConfs(conf: SparkConf): Unit = { - - def mustBePos(k: String, v: String): Unit = { - throw new IllegalArgumentException(s"$k was $v, but must be > 0.") - } - - Seq( - config.MAX_TASK_ATTEMPTS_PER_EXECUTOR, - config.MAX_TASK_ATTEMPTS_PER_NODE, - config.MAX_FAILURES_PER_EXEC_STAGE, - config.MAX_FAILED_EXEC_PER_NODE_STAGE, - config.MAX_FAILURES_PER_EXEC, - config.MAX_FAILED_EXEC_PER_NODE - ).foreach { config => - val v = conf.get(config) - if (v <= 0) { - mustBePos(config.key, v.toString) - } - } - - val timeout = getBlacklistTimeout(conf) - if (timeout <= 0) { - // first, figure out where the timeout came from, to include the right conf in the message. - conf.get(config.BLACKLIST_TIMEOUT_CONF) match { - case Some(t) => - mustBePos(config.BLACKLIST_TIMEOUT_CONF.key, timeout.toString) - case None => - mustBePos(config.BLACKLIST_LEGACY_TIMEOUT_CONF.key, timeout.toString) - } - } - - val maxTaskFailures = conf.get(config.TASK_MAX_FAILURES) - val maxNodeAttempts = conf.get(config.MAX_TASK_ATTEMPTS_PER_NODE) - - if (maxNodeAttempts >= maxTaskFailures) { - throw new IllegalArgumentException(s"${config.MAX_TASK_ATTEMPTS_PER_NODE.key} " + - s"( = ${maxNodeAttempts}) was >= ${config.TASK_MAX_FAILURES.key} " + - s"( = ${maxTaskFailures} ). Though blacklisting is enabled, with this configuration, " + - s"Spark will not be robust to one bad node. Decrease " + - s"${config.MAX_TASK_ATTEMPTS_PER_NODE.key}, increase ${config.TASK_MAX_FAILURES.key}, " + - s"or disable blacklisting with ${config.BLACKLIST_ENABLED.key}") - } - } -} - -private final case class BlacklistedExecutor(node: String, expiryTime: Long) diff --git a/core/src/main/scala/org/apache/spark/scheduler/DAGScheduler.scala b/core/src/main/scala/org/apache/spark/scheduler/DAGScheduler.scala index 080e0e7f1552f..02f5bb8cccd52 100644 --- a/core/src/main/scala/org/apache/spark/scheduler/DAGScheduler.scala +++ b/core/src/main/scala/org/apache/spark/scheduler/DAGScheduler.scala @@ -249,6 +249,8 @@ private[spark] class DAGScheduler( private[spark] val eventProcessLoop = new DAGSchedulerEventProcessLoop(this) taskScheduler.setDAGScheduler(this) + private val pushBasedShuffleEnabled = Utils.isPushBasedShuffleEnabled(sc.getConf) + /** * Called by the TaskSetManager to report task's starting. */ @@ -333,8 +335,8 @@ private[spark] class DAGScheduler( } /** - * Called by the TaskSetManager when a taskset becomes unschedulable due to blacklisting and - * dynamic allocation is enabled. + * Called by the TaskSetManager when a taskset becomes unschedulable due to executors being + * excluded because of too many task failures and dynamic allocation is enabled. */ def unschedulableTaskSetAdded( stageId: Int, @@ -407,9 +409,9 @@ private[spark] class DAGScheduler( /** * Check to make sure we don't launch a barrier stage with unsupported RDD chain pattern. The * following patterns are not supported: - * 1. Ancestor RDDs that have different number of partitions from the resulting RDD (eg. + * 1. Ancestor RDDs that have different number of partitions from the resulting RDD (e.g. * union()/coalesce()/first()/take()/PartitionPruningRDD); - * 2. An RDD that depends on multiple barrier RDDs (eg. barrierRdd1.zip(barrierRdd2)). + * 2. An RDD that depends on multiple barrier RDDs (e.g. barrierRdd1.zip(barrierRdd2)). */ private def checkBarrierStageWithRDDChainPattern(rdd: RDD[_], numTasksInStage: Int): Unit = { if (rdd.isBarrier() && @@ -457,7 +459,7 @@ private[spark] class DAGScheduler( /** * We don't support run a barrier stage with dynamic resource allocation enabled, it shall lead - * to some confusing behaviors (eg. with dynamic resource allocation enabled, it may happen that + * to some confusing behaviors (e.g. with dynamic resource allocation enabled, it may happen that * we acquire some executors (but not enough to launch all the tasks in a barrier stage) and * later release them due to executor idle time expire, and then acquire again). * @@ -1252,6 +1254,33 @@ private[spark] class DAGScheduler( execCores.map(cores => properties.setProperty(EXECUTOR_CORES_LOCAL_PROPERTY, cores)) } + /** + * If push based shuffle is enabled, set the shuffle services to be used for the given + * shuffle map stage for block push/merge. + * + * Even with dynamic resource allocation kicking in and significantly reducing the number + * of available active executors, we would still be able to get sufficient shuffle service + * locations for block push/merge by getting the historical locations of past executors. + */ + private def prepareShuffleServicesForShuffleMapStage(stage: ShuffleMapStage): Unit = { + // TODO(SPARK-32920) Handle stage reuse/retry cases separately as without finalize + // TODO changes we cannot disable shuffle merge for the retry/reuse cases + val mergerLocs = sc.schedulerBackend.getShufflePushMergerLocations( + stage.shuffleDep.partitioner.numPartitions, stage.resourceProfileId) + + if (mergerLocs.nonEmpty) { + stage.shuffleDep.setMergerLocs(mergerLocs) + logInfo(s"Push-based shuffle enabled for $stage (${stage.name}) with" + + s" ${stage.shuffleDep.getMergerLocs.size} merger locations") + + logDebug("List of shuffle push merger locations " + + s"${stage.shuffleDep.getMergerLocs.map(_.host).mkString(", ")}") + } else { + logInfo("No available merger locations." + + s" Push-based shuffle disabled for $stage (${stage.name})") + } + } + /** Called when stage's parents are available and we can now do its task. */ private def submitMissingTasks(stage: Stage, jobId: Int): Unit = { logDebug("submitMissingTasks(" + stage + ")") @@ -1281,6 +1310,12 @@ private[spark] class DAGScheduler( stage match { case s: ShuffleMapStage => outputCommitCoordinator.stageStart(stage = s.id, maxPartitionId = s.numPartitions - 1) + // Only generate merger location for a given shuffle dependency once. This way, even if + // this stage gets retried, it would still be merging blocks using the same set of + // shuffle services. + if (pushBasedShuffleEnabled) { + prepareShuffleServicesForShuffleMapStage(s) + } case s: ResultStage => outputCommitCoordinator.stageStart( stage = s.id, maxPartitionId = s.rdd.partitions.length - 1) @@ -1520,7 +1555,7 @@ private[spark] class DAGScheduler( event.reason) if (!stageIdToStage.contains(task.stageId)) { - // The stage may have already finished when we get this event -- eg. maybe it was a + // The stage may have already finished when we get this event -- e.g. maybe it was a // speculative task. It is important that we send the TaskEnd event in any case, so listeners // are properly notified and can chose to handle it. For instance, some listeners are // doing their own accounting and if they don't get the task end event they think @@ -2027,6 +2062,11 @@ private[spark] class DAGScheduler( if (!executorFailureEpoch.contains(execId) || executorFailureEpoch(execId) < currentEpoch) { executorFailureEpoch(execId) = currentEpoch logInfo(s"Executor lost: $execId (epoch $currentEpoch)") + if (pushBasedShuffleEnabled) { + // Remove fetchFailed host in the shuffle push merger list for push based shuffle + hostToUnregisterOutputs.foreach( + host => blockManagerMaster.removeShufflePushMergerLocation(host)) + } blockManagerMaster.removeExecutor(execId) clearCacheLocs() } diff --git a/core/src/main/scala/org/apache/spark/scheduler/EventLoggingListener.scala b/core/src/main/scala/org/apache/spark/scheduler/EventLoggingListener.scala index b2e9a0b2a04e8..d4e22d739098f 100644 --- a/core/src/main/scala/org/apache/spark/scheduler/EventLoggingListener.scala +++ b/core/src/main/scala/org/apache/spark/scheduler/EventLoggingListener.scala @@ -18,7 +18,9 @@ package org.apache.spark.scheduler import java.net.URI +import java.util.Properties +import scala.collection.JavaConverters._ import scala.collection.mutable import org.apache.hadoop.conf.Configuration @@ -103,7 +105,7 @@ private[spark] class EventLoggingListener( // Events that do not trigger a flush override def onStageSubmitted(event: SparkListenerStageSubmitted): Unit = { - logEvent(event) + logEvent(event.copy(properties = redactProperties(event.properties))) if (shouldLogStageExecutorMetrics) { // record the peak metrics for the new stage liveStageExecutorMetrics.put((event.stageInfo.stageId, event.stageInfo.attemptNumber()), @@ -156,7 +158,9 @@ private[spark] class EventLoggingListener( logEvent(event, flushLogger = true) } - override def onJobStart(event: SparkListenerJobStart): Unit = logEvent(event, flushLogger = true) + override def onJobStart(event: SparkListenerJobStart): Unit = { + logEvent(event.copy(properties = redactProperties(event.properties)), flushLogger = true) + } override def onJobEnd(event: SparkListenerJobEnd): Unit = logEvent(event, flushLogger = true) @@ -191,27 +195,53 @@ private[spark] class EventLoggingListener( logEvent(event, flushLogger = true) } + override def onExecutorExcluded(event: SparkListenerExecutorExcluded): Unit = { + logEvent(event, flushLogger = true) + } + override def onExecutorBlacklistedForStage( event: SparkListenerExecutorBlacklistedForStage): Unit = { logEvent(event, flushLogger = true) } + override def onExecutorExcludedForStage( + event: SparkListenerExecutorExcludedForStage): Unit = { + logEvent(event, flushLogger = true) + } + override def onNodeBlacklistedForStage(event: SparkListenerNodeBlacklistedForStage): Unit = { logEvent(event, flushLogger = true) } + override def onNodeExcludedForStage(event: SparkListenerNodeExcludedForStage): Unit = { + logEvent(event, flushLogger = true) + } + override def onExecutorUnblacklisted(event: SparkListenerExecutorUnblacklisted): Unit = { logEvent(event, flushLogger = true) } + override def onExecutorUnexcluded(event: SparkListenerExecutorUnexcluded): Unit = { + logEvent(event, flushLogger = true) + } + + override def onNodeBlacklisted(event: SparkListenerNodeBlacklisted): Unit = { logEvent(event, flushLogger = true) } + override def onNodeExcluded(event: SparkListenerNodeExcluded): Unit = { + logEvent(event, flushLogger = true) + } + override def onNodeUnblacklisted(event: SparkListenerNodeUnblacklisted): Unit = { logEvent(event, flushLogger = true) } + override def onNodeUnexcluded(event: SparkListenerNodeUnexcluded): Unit = { + logEvent(event, flushLogger = true) + } + override def onBlockUpdated(event: SparkListenerBlockUpdated): Unit = { if (shouldLogBlockUpdates) { logEvent(event, flushLogger = true) @@ -250,6 +280,22 @@ private[spark] class EventLoggingListener( logWriter.stop() } + private def redactProperties(properties: Properties): Properties = { + if (properties == null) { + return properties + } + val redactedProperties = new Properties + // properties may contain some custom local properties such as stage/job description + // only properties in sparkConf need to be redacted. + val (globalProperties, localProperties) = properties.asScala.toSeq.partition { + case (key, _) => sparkConf.contains(key) + } + (Utils.redact(sparkConf, globalProperties) ++ localProperties).foreach { + case (key, value) => redactedProperties.setProperty(key, value) + } + redactedProperties + } + private[spark] def redactEvent( event: SparkListenerEnvironmentUpdate): SparkListenerEnvironmentUpdate = { // environmentDetails maps a string descriptor to a set of properties diff --git a/core/src/main/scala/org/apache/spark/scheduler/ExecutorFailuresInTaskSet.scala b/core/src/main/scala/org/apache/spark/scheduler/ExecutorFailuresInTaskSet.scala index 70553d8be28b5..f27c1560f8272 100644 --- a/core/src/main/scala/org/apache/spark/scheduler/ExecutorFailuresInTaskSet.scala +++ b/core/src/main/scala/org/apache/spark/scheduler/ExecutorFailuresInTaskSet.scala @@ -19,7 +19,7 @@ package org.apache.spark.scheduler import scala.collection.mutable.HashMap /** - * Small helper for tracking failed tasks for blacklisting purposes. Info on all failures on one + * Small helper for tracking failed tasks for exclusion purposes. Info on all failures on one * executor, within one task set. */ private[scheduler] class ExecutorFailuresInTaskSet(val node: String) { diff --git a/core/src/main/scala/org/apache/spark/scheduler/ExecutorLossReason.scala b/core/src/main/scala/org/apache/spark/scheduler/ExecutorLossReason.scala index f2eb4a7047b56..2644d0af2ac50 100644 --- a/core/src/main/scala/org/apache/spark/scheduler/ExecutorLossReason.scala +++ b/core/src/main/scala/org/apache/spark/scheduler/ExecutorLossReason.scala @@ -70,6 +70,7 @@ case class ExecutorProcessLost( * * This is used by the task scheduler to remove state associated with the executor, but * not yet fail any tasks that were running in the executor before the executor is "fully" lost. + * If you update this code make sure to re-run the K8s integration tests. * * @param workerHost it is defined when the worker is decommissioned too */ diff --git a/core/src/main/scala/org/apache/spark/scheduler/HealthTracker.scala b/core/src/main/scala/org/apache/spark/scheduler/HealthTracker.scala new file mode 100644 index 0000000000000..c6b8dca3597ba --- /dev/null +++ b/core/src/main/scala/org/apache/spark/scheduler/HealthTracker.scala @@ -0,0 +1,491 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.scheduler + +import java.util.concurrent.atomic.AtomicReference + +import scala.collection.mutable.{ArrayBuffer, HashMap, HashSet} + +import org.apache.spark.{ExecutorAllocationClient, SparkConf, SparkContext} +import org.apache.spark.internal.Logging +import org.apache.spark.internal.config +import org.apache.spark.util.{Clock, SystemClock, Utils} + +/** + * HealthTracker is designed to track problematic executors and nodes. It supports excluding + * executors and nodes across an entire application (with a periodic expiry). TaskSetManagers add + * additional logic for exclusion of executors and nodes for individual tasks and stages which + * works in concert with the logic here. + * + * The tracker needs to deal with a variety of workloads, e.g.: + * + * * bad user code -- this may lead to many task failures, but that should not count against + * individual executors + * * many small stages -- this may prevent a bad executor for having many failures within one + * stage, but still many failures over the entire application + * * "flaky" executors -- they don't fail every task, but are still faulty enough to merit + * excluding + * + * See the design doc on SPARK-8425 for a more in-depth discussion. Note SPARK-32037 renamed + * the feature. + * + * THREADING: As with most helpers of TaskSchedulerImpl, this is not thread-safe. Though it is + * called by multiple threads, callers must already have a lock on the TaskSchedulerImpl. The + * one exception is [[excludedNodeList()]], which can be called without holding a lock. + */ +private[scheduler] class HealthTracker ( + private val listenerBus: LiveListenerBus, + conf: SparkConf, + allocationClient: Option[ExecutorAllocationClient], + clock: Clock = new SystemClock()) extends Logging { + + def this(sc: SparkContext, allocationClient: Option[ExecutorAllocationClient]) = { + this(sc.listenerBus, sc.conf, allocationClient) + } + + HealthTracker.validateExcludeOnFailureConfs(conf) + private val MAX_FAILURES_PER_EXEC = conf.get(config.MAX_FAILURES_PER_EXEC) + private val MAX_FAILED_EXEC_PER_NODE = conf.get(config.MAX_FAILED_EXEC_PER_NODE) + val EXCLUDE_ON_FAILURE_TIMEOUT_MILLIS = HealthTracker.getExludeOnFailureTimeout(conf) + private val EXCLUDE_FETCH_FAILURE_ENABLED = + conf.get(config.EXCLUDE_ON_FAILURE_FETCH_FAILURE_ENABLED) + + /** + * A map from executorId to information on task failures. Tracks the time of each task failure, + * so that we can avoid excluding executors due to failures that are very far apart. We do not + * actively remove from this as soon as tasks hit their timeouts, to avoid the time it would take + * to do so. But it will not grow too large, because as soon as an executor gets too many + * failures, we exclude the executor and remove its entry here. + */ + private val executorIdToFailureList = new HashMap[String, ExecutorFailureList]() + val executorIdToExcludedStatus = new HashMap[String, ExcludedExecutor]() + val nodeIdToExcludedExpiryTime = new HashMap[String, Long]() + /** + * An immutable copy of the set of nodes that are currently excluded. Kept in an + * AtomicReference to make [[excludedNodeList()]] thread-safe. + */ + private val _excludedNodeList = new AtomicReference[Set[String]](Set()) + /** + * Time when the next excluded node will expire. Used as a shortcut to + * avoid iterating over all entries in the excludedNodeList when none will have expired. + */ + var nextExpiryTime: Long = Long.MaxValue + /** + * Mapping from nodes to all of the executors that have been excluded on that node. We do *not* + * remove from this when executors are removed from spark, so we can track when we get multiple + * successive excluded executors on one node. Nonetheless, it will not grow too large because + * there cannot be many excluded executors on one node, before we stop requesting more + * executors on that node, and we clean up the list of exluded executors once an executor has + * been excluded for EXCLUDE_ON_FAILURE_TIMEOUT_MILLIS. + */ + val nodeToExcludedExecs = new HashMap[String, HashSet[String]]() + + /** + * Include executors and nodes that have been excluded for at least + * EXCLUDE_ON_FAILURE_TIMEOUT_MILLIS + */ + def applyExcludeOnFailureTimeout(): Unit = { + val now = clock.getTimeMillis() + // quickly check if we've got anything to expire that is excluded -- if not, + // avoid doing any work + if (now > nextExpiryTime) { + // Apply the timeout to excluded nodes and executors + val execsToInclude = executorIdToExcludedStatus.filter(_._2.expiryTime < now).keys + if (execsToInclude.nonEmpty) { + // Include any executors that have been exluded longer than the excludeOnFailure timeout. + logInfo(s"Removing executors $execsToInclude from exclude list because the " + + s"the executors have reached the timed out") + execsToInclude.foreach { exec => + val status = executorIdToExcludedStatus.remove(exec).get + val failedExecsOnNode = nodeToExcludedExecs(status.node) + // post both to keep backwards compatibility + listenerBus.post(SparkListenerExecutorUnblacklisted(now, exec)) + listenerBus.post(SparkListenerExecutorUnexcluded(now, exec)) + failedExecsOnNode.remove(exec) + if (failedExecsOnNode.isEmpty) { + nodeToExcludedExecs.remove(status.node) + } + } + } + val nodesToInclude = nodeIdToExcludedExpiryTime.filter(_._2 < now).keys + if (nodesToInclude.nonEmpty) { + // Include any nodes that have been excluded longer than the excludeOnFailure timeout. + logInfo(s"Removing nodes $nodesToInclude from exclude list because the " + + s"nodes have reached has timed out") + nodesToInclude.foreach { node => + nodeIdToExcludedExpiryTime.remove(node) + // post both to keep backwards compatibility + listenerBus.post(SparkListenerNodeUnblacklisted(now, node)) + listenerBus.post(SparkListenerNodeUnexcluded(now, node)) + } + _excludedNodeList.set(nodeIdToExcludedExpiryTime.keySet.toSet) + } + updateNextExpiryTime() + } + } + + private def updateNextExpiryTime(): Unit = { + val execMinExpiry = if (executorIdToExcludedStatus.nonEmpty) { + executorIdToExcludedStatus.map{_._2.expiryTime}.min + } else { + Long.MaxValue + } + val nodeMinExpiry = if (nodeIdToExcludedExpiryTime.nonEmpty) { + nodeIdToExcludedExpiryTime.values.min + } else { + Long.MaxValue + } + nextExpiryTime = math.min(execMinExpiry, nodeMinExpiry) + } + + private def killExecutor(exec: String, msg: String): Unit = { + allocationClient match { + case Some(a) => + logInfo(msg) + a.killExecutors(Seq(exec), adjustTargetNumExecutors = false, countFailures = false, + force = true) + case None => + logInfo(s"Not attempting to kill excluded executor id $exec " + + s"since allocation client is not defined.") + } + } + + private def killExcludedExecutor(exec: String): Unit = { + if (conf.get(config.EXCLUDE_ON_FAILURE_KILL_ENABLED)) { + killExecutor(exec, s"Killing excluded executor id $exec since " + + s"${config.EXCLUDE_ON_FAILURE_KILL_ENABLED.key} is set.") + } + } + + private[scheduler] def killExcludedIdleExecutor(exec: String): Unit = { + killExecutor(exec, + s"Killing excluded idle executor id $exec because of task unschedulability and trying " + + "to acquire a new executor.") + } + + private def killExecutorsOnExcludedNode(node: String): Unit = { + if (conf.get(config.EXCLUDE_ON_FAILURE_KILL_ENABLED)) { + allocationClient match { + case Some(a) => + logInfo(s"Killing all executors on excluded host $node " + + s"since ${config.EXCLUDE_ON_FAILURE_KILL_ENABLED.key} is set.") + if (a.killExecutorsOnHost(node) == false) { + logError(s"Killing executors on node $node failed.") + } + case None => + logWarning(s"Not attempting to kill executors on excluded host $node " + + s"since allocation client is not defined.") + } + } + } + + def updateExcludedForFetchFailure(host: String, exec: String): Unit = { + if (EXCLUDE_FETCH_FAILURE_ENABLED) { + // If we exclude on fetch failures, we are implicitly saying that we believe the failure is + // non-transient, and can't be recovered from (even if this is the first fetch failure, + // stage is retried after just one failure, so we don't always get a chance to collect + // multiple fetch failures). + // If the external shuffle-service is on, then every other executor on this node would + // be suffering from the same issue, so we should exclude (and potentially kill) all + // of them immediately. + + val now = clock.getTimeMillis() + val expiryTimeForNewExcludes = now + EXCLUDE_ON_FAILURE_TIMEOUT_MILLIS + + if (conf.get(config.SHUFFLE_SERVICE_ENABLED)) { + if (!nodeIdToExcludedExpiryTime.contains(host)) { + logInfo(s"excluding node $host due to fetch failure of external shuffle service") + + nodeIdToExcludedExpiryTime.put(host, expiryTimeForNewExcludes) + // post both to keep backwards compatibility + listenerBus.post(SparkListenerNodeBlacklisted(now, host, 1)) + listenerBus.post(SparkListenerNodeExcluded(now, host, 1)) + _excludedNodeList.set(nodeIdToExcludedExpiryTime.keySet.toSet) + killExecutorsOnExcludedNode(host) + updateNextExpiryTime() + } + } else if (!executorIdToExcludedStatus.contains(exec)) { + logInfo(s"Excluding executor $exec due to fetch failure") + + executorIdToExcludedStatus.put(exec, ExcludedExecutor(host, expiryTimeForNewExcludes)) + // We hardcoded number of failure tasks to 1 for fetch failure, because there's no + // reattempt for such failure. + // post both to keep backwards compatibility + listenerBus.post(SparkListenerExecutorBlacklisted(now, exec, 1)) + listenerBus.post(SparkListenerExecutorExcluded(now, exec, 1)) + updateNextExpiryTime() + killExcludedExecutor(exec) + + val excludedExecsOnNode = nodeToExcludedExecs.getOrElseUpdate(host, HashSet[String]()) + excludedExecsOnNode += exec + } + } + } + + def updateExcludedForSuccessfulTaskSet( + stageId: Int, + stageAttemptId: Int, + failuresByExec: HashMap[String, ExecutorFailuresInTaskSet]): Unit = { + // if any tasks failed, we count them towards the overall failure count for the executor at + // this point. + val now = clock.getTimeMillis() + failuresByExec.foreach { case (exec, failuresInTaskSet) => + val appFailuresOnExecutor = + executorIdToFailureList.getOrElseUpdate(exec, new ExecutorFailureList) + appFailuresOnExecutor.addFailures(stageId, stageAttemptId, failuresInTaskSet) + appFailuresOnExecutor.dropFailuresWithTimeoutBefore(now) + val newTotal = appFailuresOnExecutor.numUniqueTaskFailures + + val expiryTimeForNewExcludes = now + EXCLUDE_ON_FAILURE_TIMEOUT_MILLIS + // If this pushes the total number of failures over the threshold, exclude the executor. + // If its already excluded, we avoid "re-excluding" (which can happen if there were + // other tasks already running in another taskset when it got excluded), because it makes + // some of the logic around expiry times a little more confusing. But it also wouldn't be a + // problem to re-exclude, with a later expiry time. + if (newTotal >= MAX_FAILURES_PER_EXEC && !executorIdToExcludedStatus.contains(exec)) { + logInfo(s"Excluding executor id: $exec because it has $newTotal" + + s" task failures in successful task sets") + val node = failuresInTaskSet.node + executorIdToExcludedStatus.put(exec, ExcludedExecutor(node, expiryTimeForNewExcludes)) + // post both to keep backwards compatibility + listenerBus.post(SparkListenerExecutorBlacklisted(now, exec, newTotal)) + listenerBus.post(SparkListenerExecutorExcluded(now, exec, newTotal)) + executorIdToFailureList.remove(exec) + updateNextExpiryTime() + killExcludedExecutor(exec) + + // In addition to excluding the executor, we also update the data for failures on the + // node, and potentially exclude the entire node as well. + val excludedExecsOnNode = nodeToExcludedExecs.getOrElseUpdate(node, HashSet[String]()) + excludedExecsOnNode += exec + // If the node is already excluded, we avoid adding it again with a later expiry + // time. + if (excludedExecsOnNode.size >= MAX_FAILED_EXEC_PER_NODE && + !nodeIdToExcludedExpiryTime.contains(node)) { + logInfo(s"Excluding node $node because it has ${excludedExecsOnNode.size} " + + s"executors excluded: ${excludedExecsOnNode}") + nodeIdToExcludedExpiryTime.put(node, expiryTimeForNewExcludes) + // post both to keep backwards compatibility + listenerBus.post(SparkListenerNodeBlacklisted(now, node, excludedExecsOnNode.size)) + listenerBus.post(SparkListenerNodeExcluded(now, node, excludedExecsOnNode.size)) + _excludedNodeList.set(nodeIdToExcludedExpiryTime.keySet.toSet) + killExecutorsOnExcludedNode(node) + } + } + } + } + + def isExecutorExcluded(executorId: String): Boolean = { + executorIdToExcludedStatus.contains(executorId) + } + + /** + * Get the full set of nodes that are excluded. Unlike other methods in this class, this *IS* + * thread-safe -- no lock required on a taskScheduler. + */ + def excludedNodeList(): Set[String] = { + _excludedNodeList.get() + } + + def isNodeExcluded(node: String): Boolean = { + nodeIdToExcludedExpiryTime.contains(node) + } + + def handleRemovedExecutor(executorId: String): Unit = { + // We intentionally do not clean up executors that are already excluded in + // nodeToExcludedExecs, so that if another executor on the same node gets excluded, we can + // exclude the entire node. We also can't clean up executorIdToExcludedStatus, so we can + // eventually remove the executor after the timeout. Despite not clearing those structures + // here, we don't expect they will grow too big since you won't get too many executors on one + // node, and the timeout will clear it up periodically in any case. + executorIdToFailureList -= executorId + } + + /** + * Tracks all failures for one executor (that have not passed the timeout). + * + * In general we actually expect this to be extremely small, since it won't contain more than the + * maximum number of task failures before an executor is failed (default 2). + */ + private[scheduler] final class ExecutorFailureList extends Logging { + + private case class TaskId(stage: Int, stageAttempt: Int, taskIndex: Int) + + /** + * All failures on this executor in successful task sets. + */ + private var failuresAndExpiryTimes = ArrayBuffer[(TaskId, Long)]() + /** + * As an optimization, we track the min expiry time over all entries in failuresAndExpiryTimes + * so its quick to tell if there are any failures with expiry before the current time. + */ + private var minExpiryTime = Long.MaxValue + + def addFailures( + stage: Int, + stageAttempt: Int, + failuresInTaskSet: ExecutorFailuresInTaskSet): Unit = { + failuresInTaskSet.taskToFailureCountAndFailureTime.foreach { + case (taskIdx, (_, failureTime)) => + val expiryTime = failureTime + EXCLUDE_ON_FAILURE_TIMEOUT_MILLIS + failuresAndExpiryTimes += ((TaskId(stage, stageAttempt, taskIdx), expiryTime)) + if (expiryTime < minExpiryTime) { + minExpiryTime = expiryTime + } + } + } + + /** + * The number of unique tasks that failed on this executor. Only counts failures within the + * timeout, and in successful tasksets. + */ + def numUniqueTaskFailures: Int = failuresAndExpiryTimes.size + + def isEmpty: Boolean = failuresAndExpiryTimes.isEmpty + + /** + * Apply the timeout to individual tasks. This is to prevent one-off failures that are very + * spread out in time (and likely have nothing to do with problems on the executor) from + * triggering exlusion. However, note that we do *not* remove executors and nodes from + * being excluded as we expire individual task failures -- each have their own timeout. E.g., + * suppose: + * * timeout = 10, maxFailuresPerExec = 2 + * * Task 1 fails on exec 1 at time 0 + * * Task 2 fails on exec 1 at time 5 + * --> exec 1 is excluded from time 5 - 15. + * This is to simplify the implementation, as well as keep the behavior easier to understand + * for the end user. + */ + def dropFailuresWithTimeoutBefore(dropBefore: Long): Unit = { + if (minExpiryTime < dropBefore) { + var newMinExpiry = Long.MaxValue + val newFailures = new ArrayBuffer[(TaskId, Long)] + failuresAndExpiryTimes.foreach { case (task, expiryTime) => + if (expiryTime >= dropBefore) { + newFailures += ((task, expiryTime)) + if (expiryTime < newMinExpiry) { + newMinExpiry = expiryTime + } + } + } + failuresAndExpiryTimes = newFailures + minExpiryTime = newMinExpiry + } + } + + override def toString(): String = { + s"failures = $failuresAndExpiryTimes" + } + } + +} + +private[spark] object HealthTracker extends Logging { + + private val DEFAULT_TIMEOUT = "1h" + + /** + * Returns true if the excludeOnFailure is enabled, based on checking the configuration + * in the following order: + * 1. Is it specifically enabled or disabled? + * 2. Is it enabled via the legacy timeout conf? + * 3. Default is off + */ + def isExcludeOnFailureEnabled(conf: SparkConf): Boolean = { + conf.get(config.EXCLUDE_ON_FAILURE_ENABLED) match { + case Some(enabled) => + enabled + case None => + // if they've got a non-zero setting for the legacy conf, always enable it, + // otherwise, use the default. + val legacyKey = config.EXCLUDE_ON_FAILURE_LEGACY_TIMEOUT_CONF.key + conf.get(config.EXCLUDE_ON_FAILURE_LEGACY_TIMEOUT_CONF).exists { legacyTimeout => + if (legacyTimeout == 0) { + logWarning(s"Turning off excludeOnFailure due to legacy configuration: $legacyKey == 0") + false + } else { + logWarning(s"Turning on excludeOnFailure due to legacy configuration: $legacyKey > 0") + true + } + } + } + } + + def getExludeOnFailureTimeout(conf: SparkConf): Long = { + conf.get(config.EXCLUDE_ON_FAILURE_TIMEOUT_CONF).getOrElse { + conf.get(config.EXCLUDE_ON_FAILURE_LEGACY_TIMEOUT_CONF).getOrElse { + Utils.timeStringAsMs(DEFAULT_TIMEOUT) + } + } + } + + /** + * Verify that exclude on failure configurations are consistent; if not, throw an exception. + * Should only be called if excludeOnFailure is enabled. + * + * The configuration is expected to adhere to a few invariants. Default values + * follow these rules of course, but users may unwittingly change one configuration + * without making the corresponding adjustment elsewhere. This ensures we fail-fast when + * there are such misconfigurations. + */ + def validateExcludeOnFailureConfs(conf: SparkConf): Unit = { + + def mustBePos(k: String, v: String): Unit = { + throw new IllegalArgumentException(s"$k was $v, but must be > 0.") + } + + Seq( + config.MAX_TASK_ATTEMPTS_PER_EXECUTOR, + config.MAX_TASK_ATTEMPTS_PER_NODE, + config.MAX_FAILURES_PER_EXEC_STAGE, + config.MAX_FAILED_EXEC_PER_NODE_STAGE, + config.MAX_FAILURES_PER_EXEC, + config.MAX_FAILED_EXEC_PER_NODE + ).foreach { config => + val v = conf.get(config) + if (v <= 0) { + mustBePos(config.key, v.toString) + } + } + + val timeout = getExludeOnFailureTimeout(conf) + if (timeout <= 0) { + // first, figure out where the timeout came from, to include the right conf in the message. + conf.get(config.EXCLUDE_ON_FAILURE_TIMEOUT_CONF) match { + case Some(t) => + mustBePos(config.EXCLUDE_ON_FAILURE_TIMEOUT_CONF.key, timeout.toString) + case None => + mustBePos(config.EXCLUDE_ON_FAILURE_LEGACY_TIMEOUT_CONF.key, timeout.toString) + } + } + + val maxTaskFailures = conf.get(config.TASK_MAX_FAILURES) + val maxNodeAttempts = conf.get(config.MAX_TASK_ATTEMPTS_PER_NODE) + + if (maxNodeAttempts >= maxTaskFailures) { + throw new IllegalArgumentException(s"${config.MAX_TASK_ATTEMPTS_PER_NODE.key} " + + s"( = ${maxNodeAttempts}) was >= ${config.TASK_MAX_FAILURES.key} " + + s"( = ${maxTaskFailures} ). Though excludeOnFailure is enabled, with this configuration, " + + s"Spark will not be robust to one bad node. Decrease " + + s"${config.MAX_TASK_ATTEMPTS_PER_NODE.key}, increase ${config.TASK_MAX_FAILURES.key}, " + + s"or disable excludeOnFailure with ${config.EXCLUDE_ON_FAILURE_ENABLED.key}") + } + } +} + +private final case class ExcludedExecutor(node: String, expiryTime: Long) diff --git a/core/src/main/scala/org/apache/spark/scheduler/MapStatus.scala b/core/src/main/scala/org/apache/spark/scheduler/MapStatus.scala index cfc2e141290c4..1239c32cee3ab 100644 --- a/core/src/main/scala/org/apache/spark/scheduler/MapStatus.scala +++ b/core/src/main/scala/org/apache/spark/scheduler/MapStatus.scala @@ -123,7 +123,7 @@ private[spark] class CompressedMapStatus( // For deserialization only protected def this() = this(null, null.asInstanceOf[Array[Byte]], -1) - def this(loc: BlockManagerId, uncompressedSizes: Array[Long], mapTaskId: Long) { + def this(loc: BlockManagerId, uncompressedSizes: Array[Long], mapTaskId: Long) = { this(loc, uncompressedSizes.map(MapStatus.compressSize), mapTaskId) } diff --git a/core/src/main/scala/org/apache/spark/scheduler/Pool.scala b/core/src/main/scala/org/apache/spark/scheduler/Pool.scala index 2e2851eb9070b..7333b31524f2a 100644 --- a/core/src/main/scala/org/apache/spark/scheduler/Pool.scala +++ b/core/src/main/scala/org/apache/spark/scheduler/Pool.scala @@ -59,6 +59,8 @@ private[spark] class Pool( } } + override def isSchedulable: Boolean = true + override def addSchedulable(schedulable: Schedulable): Unit = { require(schedulable != null) schedulableQueue.add(schedulable) @@ -105,7 +107,7 @@ private[spark] class Pool( val sortedSchedulableQueue = schedulableQueue.asScala.toSeq.sortWith(taskSetSchedulingAlgorithm.comparator) for (schedulable <- sortedSchedulableQueue) { - sortedTaskSetQueue ++= schedulable.getSortedTaskSetQueue + sortedTaskSetQueue ++= schedulable.getSortedTaskSetQueue.filter(_.isSchedulable) } sortedTaskSetQueue } diff --git a/core/src/main/scala/org/apache/spark/scheduler/Schedulable.scala b/core/src/main/scala/org/apache/spark/scheduler/Schedulable.scala index 8cc239c81d11a..0626f8fb8150a 100644 --- a/core/src/main/scala/org/apache/spark/scheduler/Schedulable.scala +++ b/core/src/main/scala/org/apache/spark/scheduler/Schedulable.scala @@ -39,6 +39,7 @@ private[spark] trait Schedulable { def stageId: Int def name: String + def isSchedulable: Boolean def addSchedulable(schedulable: Schedulable): Unit def removeSchedulable(schedulable: Schedulable): Unit def getSchedulableByName(name: String): Schedulable diff --git a/core/src/main/scala/org/apache/spark/scheduler/SchedulerBackend.scala b/core/src/main/scala/org/apache/spark/scheduler/SchedulerBackend.scala index a566d0a04387c..b2acdb3e12a6d 100644 --- a/core/src/main/scala/org/apache/spark/scheduler/SchedulerBackend.scala +++ b/core/src/main/scala/org/apache/spark/scheduler/SchedulerBackend.scala @@ -18,6 +18,7 @@ package org.apache.spark.scheduler import org.apache.spark.resource.ResourceProfile +import org.apache.spark.storage.BlockManagerId /** * A backend interface for scheduling systems that allows plugging in different ones under @@ -92,4 +93,16 @@ private[spark] trait SchedulerBackend { */ def maxNumConcurrentTasks(rp: ResourceProfile): Int + /** + * Get the list of host locations for push based shuffle + * + * Currently push based shuffle is disabled for both stage retry and stage reuse cases + * (for eg: in the case where few partitions are lost due to failure). Hence this method + * should be invoked only once for a ShuffleDependency. + * @return List of external shuffle services locations + */ + def getShufflePushMergerLocations( + numPartitions: Int, + resourceProfileId: Int): Seq[BlockManagerId] = Nil + } diff --git a/core/src/main/scala/org/apache/spark/scheduler/ShuffleMapTask.scala b/core/src/main/scala/org/apache/spark/scheduler/ShuffleMapTask.scala index a0ba9208ea647..89db3a86f4ce8 100644 --- a/core/src/main/scala/org/apache/spark/scheduler/ShuffleMapTask.scala +++ b/core/src/main/scala/org/apache/spark/scheduler/ShuffleMapTask.scala @@ -66,7 +66,7 @@ private[spark] class ShuffleMapTask( with Logging { /** A constructor used only in test suites. This does not require passing in an RDD. */ - def this(partitionId: Int) { + def this(partitionId: Int) = { this(0, 0, null, new Partition { override def index: Int = 0 }, null, new Properties, null) } diff --git a/core/src/main/scala/org/apache/spark/scheduler/SparkListener.scala b/core/src/main/scala/org/apache/spark/scheduler/SparkListener.scala index 8119215b8b74f..3fcb35b604ef6 100644 --- a/core/src/main/scala/org/apache/spark/scheduler/SparkListener.scala +++ b/core/src/main/scala/org/apache/spark/scheduler/SparkListener.scala @@ -118,12 +118,21 @@ case class SparkListenerExecutorRemoved(time: Long, executorId: String, reason: extends SparkListenerEvent @DeveloperApi +@deprecated("use SparkListenerExecutorExcluded instead", "3.1.0") case class SparkListenerExecutorBlacklisted( time: Long, executorId: String, taskFailures: Int) extends SparkListenerEvent +@DeveloperApi +case class SparkListenerExecutorExcluded( + time: Long, + executorId: String, + taskFailures: Int) + extends SparkListenerEvent + +@deprecated("use SparkListenerExecutorExcludedForStage instead", "3.1.0") @DeveloperApi case class SparkListenerExecutorBlacklistedForStage( time: Long, @@ -133,6 +142,17 @@ case class SparkListenerExecutorBlacklistedForStage( stageAttemptId: Int) extends SparkListenerEvent + +@DeveloperApi +case class SparkListenerExecutorExcludedForStage( + time: Long, + executorId: String, + taskFailures: Int, + stageId: Int, + stageAttemptId: Int) + extends SparkListenerEvent + +@deprecated("use SparkListenerNodeExcludedForStage instead", "3.1.0") @DeveloperApi case class SparkListenerNodeBlacklistedForStage( time: Long, @@ -142,10 +162,27 @@ case class SparkListenerNodeBlacklistedForStage( stageAttemptId: Int) extends SparkListenerEvent + +@DeveloperApi +case class SparkListenerNodeExcludedForStage( + time: Long, + hostId: String, + executorFailures: Int, + stageId: Int, + stageAttemptId: Int) + extends SparkListenerEvent + +@deprecated("use SparkListenerExecutorUnexcluded instead", "3.1.0") @DeveloperApi case class SparkListenerExecutorUnblacklisted(time: Long, executorId: String) extends SparkListenerEvent + +@DeveloperApi +case class SparkListenerExecutorUnexcluded(time: Long, executorId: String) + extends SparkListenerEvent + +@deprecated("use SparkListenerNodeExcluded instead", "3.1.0") @DeveloperApi case class SparkListenerNodeBlacklisted( time: Long, @@ -153,10 +190,23 @@ case class SparkListenerNodeBlacklisted( executorFailures: Int) extends SparkListenerEvent + +@DeveloperApi +case class SparkListenerNodeExcluded( + time: Long, + hostId: String, + executorFailures: Int) + extends SparkListenerEvent + +@deprecated("use SparkListenerNodeUnexcluded instead", "3.1.0") @DeveloperApi case class SparkListenerNodeUnblacklisted(time: Long, hostId: String) extends SparkListenerEvent +@DeveloperApi +case class SparkListenerNodeUnexcluded(time: Long, hostId: String) + extends SparkListenerEvent + @DeveloperApi case class SparkListenerUnschedulableTaskSetAdded( stageId: Int, @@ -319,38 +369,75 @@ private[spark] trait SparkListenerInterface { def onExecutorRemoved(executorRemoved: SparkListenerExecutorRemoved): Unit /** - * Called when the driver blacklists an executor for a Spark application. + * Called when the driver excludes an executor for a Spark application. */ + @deprecated("use onExecutorExcluded instead", "3.1.0") def onExecutorBlacklisted(executorBlacklisted: SparkListenerExecutorBlacklisted): Unit /** - * Called when the driver blacklists an executor for a stage. + * Called when the driver excludes an executor for a Spark application. */ + def onExecutorExcluded(executorExcluded: SparkListenerExecutorExcluded): Unit + + /** + * Called when the driver excludes an executor for a stage. + */ + @deprecated("use onExecutorExcludedForStage instead", "3.1.0") def onExecutorBlacklistedForStage( executorBlacklistedForStage: SparkListenerExecutorBlacklistedForStage): Unit /** - * Called when the driver blacklists a node for a stage. + * Called when the driver excludes an executor for a stage. + */ + def onExecutorExcludedForStage( + executorExcludedForStage: SparkListenerExecutorExcludedForStage): Unit + + /** + * Called when the driver excludes a node for a stage. */ + @deprecated("use onNodeExcludedForStage instead", "3.1.0") def onNodeBlacklistedForStage(nodeBlacklistedForStage: SparkListenerNodeBlacklistedForStage): Unit /** - * Called when the driver re-enables a previously blacklisted executor. + * Called when the driver excludes a node for a stage. + */ + def onNodeExcludedForStage(nodeExcludedForStage: SparkListenerNodeExcludedForStage): Unit + + /** + * Called when the driver re-enables a previously excluded executor. */ + @deprecated("use onExecutorUnexcluded instead", "3.1.0") def onExecutorUnblacklisted(executorUnblacklisted: SparkListenerExecutorUnblacklisted): Unit /** - * Called when the driver blacklists a node for a Spark application. + * Called when the driver re-enables a previously excluded executor. + */ + def onExecutorUnexcluded(executorUnexcluded: SparkListenerExecutorUnexcluded): Unit + + /** + * Called when the driver excludes a node for a Spark application. */ + @deprecated("use onNodeExcluded instead", "3.1.0") def onNodeBlacklisted(nodeBlacklisted: SparkListenerNodeBlacklisted): Unit /** - * Called when the driver re-enables a previously blacklisted node. + * Called when the driver excludes a node for a Spark application. */ + def onNodeExcluded(nodeExcluded: SparkListenerNodeExcluded): Unit + + /** + * Called when the driver re-enables a previously excluded node. + */ + @deprecated("use onNodeUnexcluded instead", "3.1.0") def onNodeUnblacklisted(nodeUnblacklisted: SparkListenerNodeUnblacklisted): Unit /** - * Called when a taskset becomes unschedulable due to blacklisting and dynamic allocation + * Called when the driver re-enables a previously excluded node. + */ + def onNodeUnexcluded(nodeUnexcluded: SparkListenerNodeUnexcluded): Unit + + /** + * Called when a taskset becomes unschedulable due to exludeOnFailure and dynamic allocation * is enabled. */ def onUnschedulableTaskSetAdded( @@ -433,21 +520,33 @@ abstract class SparkListener extends SparkListenerInterface { override def onExecutorBlacklisted( executorBlacklisted: SparkListenerExecutorBlacklisted): Unit = { } + override def onExecutorExcluded( + executorExcluded: SparkListenerExecutorExcluded): Unit = { } - def onExecutorBlacklistedForStage( + override def onExecutorBlacklistedForStage( executorBlacklistedForStage: SparkListenerExecutorBlacklistedForStage): Unit = { } + override def onExecutorExcludedForStage( + executorExcludedForStage: SparkListenerExecutorExcludedForStage): Unit = { } - def onNodeBlacklistedForStage( + override def onNodeBlacklistedForStage( nodeBlacklistedForStage: SparkListenerNodeBlacklistedForStage): Unit = { } + override def onNodeExcludedForStage( + nodeExcludedForStage: SparkListenerNodeExcludedForStage): Unit = { } override def onExecutorUnblacklisted( executorUnblacklisted: SparkListenerExecutorUnblacklisted): Unit = { } + override def onExecutorUnexcluded( + executorUnexcluded: SparkListenerExecutorUnexcluded): Unit = { } override def onNodeBlacklisted( nodeBlacklisted: SparkListenerNodeBlacklisted): Unit = { } + override def onNodeExcluded( + nodeExcluded: SparkListenerNodeExcluded): Unit = { } override def onNodeUnblacklisted( nodeUnblacklisted: SparkListenerNodeUnblacklisted): Unit = { } + override def onNodeUnexcluded( + nodeUnexcluded: SparkListenerNodeUnexcluded): Unit = { } override def onUnschedulableTaskSetAdded( unschedulableTaskSetAdded: SparkListenerUnschedulableTaskSetAdded): Unit = { } diff --git a/core/src/main/scala/org/apache/spark/scheduler/SparkListenerBus.scala b/core/src/main/scala/org/apache/spark/scheduler/SparkListenerBus.scala index 13e65f4291fd0..ec0c0cf3cf82b 100644 --- a/core/src/main/scala/org/apache/spark/scheduler/SparkListenerBus.scala +++ b/core/src/main/scala/org/apache/spark/scheduler/SparkListenerBus.scala @@ -75,6 +75,18 @@ private[spark] trait SparkListenerBus listener.onNodeBlacklisted(nodeBlacklisted) case nodeUnblacklisted: SparkListenerNodeUnblacklisted => listener.onNodeUnblacklisted(nodeUnblacklisted) + case executorExcludedForStage: SparkListenerExecutorExcludedForStage => + listener.onExecutorExcludedForStage(executorExcludedForStage) + case nodeExcludedForStage: SparkListenerNodeExcludedForStage => + listener.onNodeExcludedForStage(nodeExcludedForStage) + case executorExcluded: SparkListenerExecutorExcluded => + listener.onExecutorExcluded(executorExcluded) + case executorUnexcluded: SparkListenerExecutorUnexcluded => + listener.onExecutorUnexcluded(executorUnexcluded) + case nodeExcluded: SparkListenerNodeExcluded => + listener.onNodeExcluded(nodeExcluded) + case nodeUnexcluded: SparkListenerNodeUnexcluded => + listener.onNodeUnexcluded(nodeUnexcluded) case blockUpdated: SparkListenerBlockUpdated => listener.onBlockUpdated(blockUpdated) case speculativeTaskSubmitted: SparkListenerSpeculativeTaskSubmitted => diff --git a/core/src/main/scala/org/apache/spark/scheduler/Task.scala b/core/src/main/scala/org/apache/spark/scheduler/Task.scala index ebc1c05435fee..81f984bb2b511 100644 --- a/core/src/main/scala/org/apache/spark/scheduler/Task.scala +++ b/core/src/main/scala/org/apache/spark/scheduler/Task.scala @@ -23,6 +23,7 @@ import java.util.Properties import org.apache.spark._ import org.apache.spark.executor.TaskMetrics import org.apache.spark.internal.config.APP_CALLER_CONTEXT +import org.apache.spark.internal.plugin.PluginContainer import org.apache.spark.memory.{MemoryMode, TaskMemoryManager} import org.apache.spark.metrics.MetricsSystem import org.apache.spark.rdd.InputFileBlockHolder @@ -82,7 +83,8 @@ private[spark] abstract class Task[T]( taskAttemptId: Long, attemptNumber: Int, metricsSystem: MetricsSystem, - resources: Map[String, ResourceInformation]): T = { + resources: Map[String, ResourceInformation], + plugins: Option[PluginContainer]): T = { SparkEnv.get.blockManager.registerTask(taskAttemptId) // TODO SPARK-24874 Allow create BarrierTaskContext based on partitions, instead of whether // the stage is barrier. @@ -123,6 +125,8 @@ private[spark] abstract class Task[T]( Option(taskAttemptId), Option(attemptNumber)).setCurrentContext() + plugins.foreach(_.onTaskStart()) + try { runTask(context) } catch { diff --git a/core/src/main/scala/org/apache/spark/scheduler/TaskDescription.scala b/core/src/main/scala/org/apache/spark/scheduler/TaskDescription.scala index 863bf27088355..12b911d06153b 100644 --- a/core/src/main/scala/org/apache/spark/scheduler/TaskDescription.scala +++ b/core/src/main/scala/org/apache/spark/scheduler/TaskDescription.scala @@ -55,6 +55,7 @@ private[spark] class TaskDescription( val partitionId: Int, val addedFiles: Map[String, Long], val addedJars: Map[String, Long], + val addedArchives: Map[String, Long], val properties: Properties, val resources: immutable.Map[String, ResourceInformation], val serializedTask: ByteBuffer) { @@ -99,6 +100,9 @@ private[spark] object TaskDescription { // Write jars. serializeStringLongMap(taskDescription.addedJars, dataOut) + // Write archives. + serializeStringLongMap(taskDescription.addedArchives, dataOut) + // Write properties. dataOut.writeInt(taskDescription.properties.size()) taskDescription.properties.asScala.foreach { case (key, value) => @@ -167,6 +171,9 @@ private[spark] object TaskDescription { // Read jars. val taskJars = deserializeStringLongMap(dataIn) + // Read archives. + val taskArchives = deserializeStringLongMap(dataIn) + // Read properties. val properties = new Properties() val numProperties = dataIn.readInt() @@ -185,6 +192,6 @@ private[spark] object TaskDescription { val serializedTask = byteBuffer.slice() new TaskDescription(taskId, attemptNumber, executorId, name, index, partitionId, taskFiles, - taskJars, properties, resources, serializedTask) + taskJars, taskArchives, properties, resources, serializedTask) } } diff --git a/core/src/main/scala/org/apache/spark/scheduler/TaskSchedulerImpl.scala b/core/src/main/scala/org/apache/spark/scheduler/TaskSchedulerImpl.scala index 107c517ca06bc..b939e40f3b60c 100644 --- a/core/src/main/scala/org/apache/spark/scheduler/TaskSchedulerImpl.scala +++ b/core/src/main/scala/org/apache/spark/scheduler/TaskSchedulerImpl.scala @@ -26,9 +26,6 @@ import scala.collection.mutable import scala.collection.mutable.{ArrayBuffer, Buffer, HashMap, HashSet} import scala.util.Random -import com.google.common.base.Ticker -import com.google.common.cache.CacheBuilder - import org.apache.spark._ import org.apache.spark.TaskState.TaskState import org.apache.spark.executor.ExecutorMetrics @@ -91,9 +88,9 @@ private[spark] class TaskSchedulerImpl( this(sc, sc.conf.get(config.TASK_MAX_FAILURES)) } - // Lazily initializing blacklistTrackerOpt to avoid getting empty ExecutorAllocationClient, + // Lazily initializing healthTrackerOpt to avoid getting empty ExecutorAllocationClient, // because ExecutorAllocationClient is created after this TaskSchedulerImpl. - private[scheduler] lazy val blacklistTrackerOpt = maybeCreateBlacklistTracker(sc) + private[scheduler] lazy val healthTrackerOpt = maybeCreateHealthTracker(sc) val conf = sc.conf @@ -281,7 +278,7 @@ private[spark] class TaskSchedulerImpl( private[scheduler] def createTaskSetManager( taskSet: TaskSet, maxTaskFailures: Int): TaskSetManager = { - new TaskSetManager(this, taskSet, maxTaskFailures, blacklistTrackerOpt, clock) + new TaskSetManager(this, taskSet, maxTaskFailures, healthTrackerOpt, clock) } override def cancelTasks(stageId: Int, interruptThread: Boolean): Unit = synchronized { @@ -381,7 +378,7 @@ private[spark] class TaskSchedulerImpl( : (Boolean, Option[TaskLocality]) = { var noDelayScheduleRejects = true var minLaunchedLocality: Option[TaskLocality] = None - // nodes and executors that are blacklisted for the entire application have already been + // nodes and executors that are excluded for the entire application have already been // filtered out by this point for (i <- 0 until shuffledOffers.size) { val execId = shuffledOffers(i).executorId @@ -515,15 +512,15 @@ private[spark] class TaskSchedulerImpl( hostsByRack.getOrElseUpdate(rack, new HashSet[String]()) += host } - // Before making any offers, remove any nodes from the blacklist whose blacklist has expired. Do + // Before making any offers, include any nodes whose expireOnFailure timeout has expired. Do // this here to avoid a separate thread and added synchronization overhead, and also because - // updating the blacklist is only relevant when task offers are being made. - blacklistTrackerOpt.foreach(_.applyBlacklistTimeout()) + // updating the excluded executors and nodes is only relevant when task offers are being made. + healthTrackerOpt.foreach(_.applyExcludeOnFailureTimeout()) - val filteredOffers = blacklistTrackerOpt.map { blacklistTracker => + val filteredOffers = healthTrackerOpt.map { healthTracker => offers.filter { offer => - !blacklistTracker.isNodeBlacklisted(offer.host) && - !blacklistTracker.isExecutorBlacklisted(offer.executorId) + !healthTracker.isNodeExcluded(offer.host) && + !healthTracker.isExecutorExcluded(offer.executorId) } }.getOrElse(offers) @@ -535,7 +532,7 @@ private[spark] class TaskSchedulerImpl( val availableResources = shuffledOffers.map(_.resources).toArray val availableCpus = shuffledOffers.map(o => o.cores).toArray val resourceProfileIds = shuffledOffers.map(o => o.resourceProfileId).toArray - val sortedTaskSets = rootPool.getSortedTaskSetQueue.filterNot(_.isZombie) + val sortedTaskSets = rootPool.getSortedTaskSetQueue for (taskSet <- sortedTaskSets) { logDebug("parentName: %s, name: %s, runningTasks: %s".format( taskSet.parent.name, taskSet.name, taskSet.runningTasks)) @@ -602,15 +599,15 @@ private[spark] class TaskSchedulerImpl( } if (!launchedAnyTask) { - taskSet.getCompletelyBlacklistedTaskIfAny(hostToExecutors).foreach { taskIndex => - // If the taskSet is unschedulable we try to find an existing idle blacklisted + taskSet.getCompletelyExcludedTaskIfAny(hostToExecutors).foreach { taskIndex => + // If the taskSet is unschedulable we try to find an existing idle excluded // executor and kill the idle executor and kick off an abortTimer which if it doesn't // schedule a task within the the timeout will abort the taskSet if we were unable to // schedule any task from the taskSet. // Note 1: We keep track of schedulability on a per taskSet basis rather than on a per // task basis. // Note 2: The taskSet can still be aborted when there are more than one idle - // blacklisted executors and dynamic allocation is on. This can happen when a killed + // excluded executors and dynamic allocation is on. This can happen when a killed // idle executor isn't replaced in time by ExecutorAllocationManager as it relies on // pending tasks and doesn't kill executors on idle timeouts, resulting in the abort // timer to expire and abort the taskSet. @@ -621,7 +618,7 @@ private[spark] class TaskSchedulerImpl( executorIdToRunningTaskIds.find(x => !isExecutorBusy(x._1)) match { case Some ((executorId, _)) => if (!unschedulableTaskSetToExpiryTime.contains(taskSet)) { - blacklistTrackerOpt.foreach(blt => blt.killBlacklistedIdleExecutor(executorId)) + healthTrackerOpt.foreach(blt => blt.killExcludedIdleExecutor(executorId)) updateUnschedulableTaskSetTimeoutAndStartAbortTimer(taskSet, taskIndex) } case None => @@ -638,18 +635,19 @@ private[spark] class TaskSchedulerImpl( } } else { // Abort Immediately - logInfo("Cannot schedule any task because of complete blacklisting. No idle" + - s" executors can be found to kill. Aborting stage ${taskSet.stageId}.") - taskSet.abortSinceCompletelyBlacklisted(taskIndex) + logInfo("Cannot schedule any task because all executors excluded from " + + "failures. No idle executors can be found to kill. Aborting stage " + + s"${taskSet.stageId}.") + taskSet.abortSinceCompletelyExcludedOnFailure(taskIndex) } } } } else { - // We want to defer killing any taskSets as long as we have a non blacklisted executor + // We want to defer killing any taskSets as long as we have a non excluded executor // which can be used to schedule a task from any active taskSets. This ensures that the // job can make progress. // Note: It is theoretically possible that a taskSet never gets scheduled on a - // non-blacklisted executor and the abort timer doesn't kick in because of a constant + // non-excluded executor and the abort timer doesn't kick in because of a constant // submission of new TaskSets. See the PR for more details. if (unschedulableTaskSetToExpiryTime.nonEmpty) { logInfo("Clearing the expiry times for all unschedulable taskSets as a task was " + @@ -710,7 +708,7 @@ private[spark] class TaskSchedulerImpl( val timeout = conf.get(config.UNSCHEDULABLE_TASKSET_TIMEOUT) * 1000 unschedulableTaskSetToExpiryTime(taskSet) = clock.getTimeMillis() + timeout logInfo(s"Waiting for $timeout ms for completely " + - s"blacklisted task to be schedulable again before aborting stage ${taskSet.stageId}.") + s"excluded task to be schedulable again before aborting stage ${taskSet.stageId}.") abortTimer.schedule( createUnschedulableTaskSetAbortTimer(taskSet, taskIndex), timeout) } @@ -722,9 +720,9 @@ private[spark] class TaskSchedulerImpl( override def run(): Unit = TaskSchedulerImpl.this.synchronized { if (unschedulableTaskSetToExpiryTime.contains(taskSet) && unschedulableTaskSetToExpiryTime(taskSet) <= clock.getTimeMillis()) { - logInfo("Cannot schedule any task because of complete blacklisting. " + + logInfo("Cannot schedule any task because all executors excluded due to failures. " + s"Wait time for scheduling expired. Aborting stage ${taskSet.stageId}.") - taskSet.abortSinceCompletelyBlacklisted(taskIndex) + taskSet.abortSinceCompletelyExcludedOnFailure(taskIndex) } else { this.cancel() } @@ -1019,7 +1017,7 @@ private[spark] class TaskSchedulerImpl( executorIdToHost -= executorId rootPool.executorLost(executorId, host, reason) } - blacklistTrackerOpt.foreach(_.handleRemovedExecutor(executorId)) + healthTrackerOpt.foreach(_.handleRemovedExecutor(executorId)) } def executorAdded(execId: String, host: String): Unit = { @@ -1060,11 +1058,11 @@ private[spark] class TaskSchedulerImpl( } /** - * Get a snapshot of the currently blacklisted nodes for the entire application. This is + * Get a snapshot of the currently excluded nodes for the entire application. This is * thread-safe -- it can be called without a lock on the TaskScheduler. */ - def nodeBlacklist(): Set[String] = { - blacklistTrackerOpt.map(_.nodeBlacklist()).getOrElse(Set.empty) + def excludedNodes(): Set[String] = { + healthTrackerOpt.map(_.excludedNodeList()).getOrElse(Set.empty) } /** @@ -1223,13 +1221,13 @@ private[spark] object TaskSchedulerImpl { retval.toList } - private def maybeCreateBlacklistTracker(sc: SparkContext): Option[BlacklistTracker] = { - if (BlacklistTracker.isBlacklistEnabled(sc.conf)) { + private def maybeCreateHealthTracker(sc: SparkContext): Option[HealthTracker] = { + if (HealthTracker.isExcludeOnFailureEnabled(sc.conf)) { val executorAllocClient: Option[ExecutorAllocationClient] = sc.schedulerBackend match { case b: ExecutorAllocationClient => Some(b) case _ => None } - Some(new BlacklistTracker(sc, executorAllocClient)) + Some(new HealthTracker(sc, executorAllocClient)) } else { None } diff --git a/core/src/main/scala/org/apache/spark/scheduler/TaskSetBlacklist.scala b/core/src/main/scala/org/apache/spark/scheduler/TaskSetExcludeList.scala similarity index 63% rename from core/src/main/scala/org/apache/spark/scheduler/TaskSetBlacklist.scala rename to core/src/main/scala/org/apache/spark/scheduler/TaskSetExcludeList.scala index 4df2889089ee9..d8c46db166fc5 100644 --- a/core/src/main/scala/org/apache/spark/scheduler/TaskSetBlacklist.scala +++ b/core/src/main/scala/org/apache/spark/scheduler/TaskSetExcludeList.scala @@ -24,19 +24,19 @@ import org.apache.spark.internal.config import org.apache.spark.util.Clock /** - * Handles blacklisting executors and nodes within a taskset. This includes blacklisting specific - * (task, executor) / (task, nodes) pairs, and also completely blacklisting executors and nodes + * Handles excluding executors and nodes within a taskset. This includes excluding specific + * (task, executor) / (task, nodes) pairs, and also completely excluding executors and nodes * for the entire taskset. * - * It also must store sufficient information in task failures for application level blacklisting, - * which is handled by [[BlacklistTracker]]. Note that BlacklistTracker does not know anything + * It also must store sufficient information in task failures for application level exclusion, + * which is handled by [[HealthTracker]]. Note that HealthTracker does not know anything * about task failures until a taskset completes successfully. * * THREADING: This class is a helper to [[TaskSetManager]]; as with the methods in * [[TaskSetManager]] this class is designed only to be called from code with a lock on the * TaskScheduler (e.g. its event handlers). It should not be called from other threads. */ -private[scheduler] class TaskSetBlacklist( +private[scheduler] class TaskSetExcludelist( private val listenerBus: LiveListenerBus, val conf: SparkConf, val stageId: Int, @@ -49,9 +49,9 @@ private[scheduler] class TaskSetBlacklist( private val MAX_FAILED_EXEC_PER_NODE_STAGE = conf.get(config.MAX_FAILED_EXEC_PER_NODE_STAGE) /** - * A map from each executor to the task failures on that executor. This is used for blacklisting - * within this taskset, and it is also relayed onto [[BlacklistTracker]] for app-level - * blacklisting if this taskset completes successfully. + * A map from each executor to the task failures on that executor. This is used for exclusion + * within this taskset, and it is also relayed onto [[HealthTracker]] for app-level + * exlucsion if this taskset completes successfully. */ val execToFailures = new HashMap[String, ExecutorFailuresInTaskSet]() @@ -61,9 +61,9 @@ private[scheduler] class TaskSetBlacklist( * node -> execs mapping in the usual case when there aren't any failures). */ private val nodeToExecsWithFailures = new HashMap[String, HashSet[String]]() - private val nodeToBlacklistedTaskIndexes = new HashMap[String, HashSet[Int]]() - private val blacklistedExecs = new HashSet[String]() - private val blacklistedNodes = new HashSet[String]() + private val nodeToExcludedTaskIndexes = new HashMap[String, HashSet[Int]]() + private val excludedExecs = new HashSet[String]() + private val excludedNodes = new HashSet[String]() private var latestFailureReason: String = null @@ -75,36 +75,36 @@ private[scheduler] class TaskSetBlacklist( } /** - * Return true if this executor is blacklisted for the given task. This does *not* - * need to return true if the executor is blacklisted for the entire stage, or blacklisted + * Return true if this executor is excluded for the given task. This does *not* + * need to return true if the executor is excluded for the entire stage, or excluded * for the entire application. That is to keep this method as fast as possible in the inner-loop * of the scheduler, where those filters will have already been applied. */ - def isExecutorBlacklistedForTask(executorId: String, index: Int): Boolean = { + def isExecutorExcludedForTask(executorId: String, index: Int): Boolean = { execToFailures.get(executorId).exists { execFailures => execFailures.getNumTaskFailures(index) >= MAX_TASK_ATTEMPTS_PER_EXECUTOR } } - def isNodeBlacklistedForTask(node: String, index: Int): Boolean = { - nodeToBlacklistedTaskIndexes.get(node).exists(_.contains(index)) + def isNodeExcludedForTask(node: String, index: Int): Boolean = { + nodeToExcludedTaskIndexes.get(node).exists(_.contains(index)) } /** - * Return true if this executor is blacklisted for the given stage. Completely ignores whether - * the executor is blacklisted for the entire application (or anything to do with the node the + * Return true if this executor is excluded for the given stage. Completely ignores whether + * the executor is excluded for the entire application (or anything to do with the node the * executor is on). That is to keep this method as fast as possible in the inner-loop of the * scheduler, where those filters will already have been applied. */ - def isExecutorBlacklistedForTaskSet(executorId: String): Boolean = { - blacklistedExecs.contains(executorId) + def isExecutorExcludedForTaskSet(executorId: String): Boolean = { + excludedExecs.contains(executorId) } - def isNodeBlacklistedForTaskSet(node: String): Boolean = { - blacklistedNodes.contains(node) + def isNodeExcludedForTaskSet(node: String): Boolean = { + excludedNodes.contains(node) } - private[scheduler] def updateBlacklistForFailedTask( + private[scheduler] def updateExcludedForFailedTask( host: String, exec: String, index: Int, @@ -114,7 +114,7 @@ private[scheduler] class TaskSetBlacklist( execFailures.updateWithFailure(index, clock.getTimeMillis()) // check if this task has also failed on other executors on the same host -- if its gone - // over the limit, blacklist this task from the entire host. + // over the limit, exclude this task from the entire host. val execsWithFailuresOnNode = nodeToExecsWithFailures.getOrElseUpdate(host, new HashSet()) execsWithFailuresOnNode += exec val failuresOnHost = execsWithFailuresOnNode.toIterator.flatMap { exec => @@ -127,27 +127,35 @@ private[scheduler] class TaskSetBlacklist( } }.sum if (failuresOnHost >= MAX_TASK_ATTEMPTS_PER_NODE) { - nodeToBlacklistedTaskIndexes.getOrElseUpdate(host, new HashSet()) += index + nodeToExcludedTaskIndexes.getOrElseUpdate(host, new HashSet()) += index } - // Check if enough tasks have failed on the executor to blacklist it for the entire stage. + // Check if enough tasks have failed on the executor to exclude it for the entire stage. val numFailures = execFailures.numUniqueTasksWithFailures if (numFailures >= MAX_FAILURES_PER_EXEC_STAGE) { - if (blacklistedExecs.add(exec)) { - logInfo(s"Blacklisting executor ${exec} for stage $stageId") - // This executor has been pushed into the blacklist for this stage. Let's check if it - // pushes the whole node into the blacklist. - val blacklistedExecutorsOnNode = - execsWithFailuresOnNode.filter(blacklistedExecs.contains(_)) + if (excludedExecs.add(exec)) { + logInfo(s"Excluding executor ${exec} for stage $stageId") + // This executor has been excluded for this stage. Let's check if it + // the whole node should be excluded. + val excludedExecutorsOnNode = + execsWithFailuresOnNode.filter(excludedExecs.contains(_)) val now = clock.getTimeMillis() + // SparkListenerExecutorBlacklistedForStage is deprecated but post both events + // to keep backward compatibility listenerBus.post( SparkListenerExecutorBlacklistedForStage(now, exec, numFailures, stageId, stageAttemptId)) - val numFailExec = blacklistedExecutorsOnNode.size + listenerBus.post( + SparkListenerExecutorExcludedForStage(now, exec, numFailures, stageId, stageAttemptId)) + val numFailExec = excludedExecutorsOnNode.size if (numFailExec >= MAX_FAILED_EXEC_PER_NODE_STAGE) { - if (blacklistedNodes.add(host)) { - logInfo(s"Blacklisting ${host} for stage $stageId") + if (excludedNodes.add(host)) { + logInfo(s"Excluding ${host} for stage $stageId") + // SparkListenerNodeBlacklistedForStage is deprecated but post both events + // to keep backward compatibility listenerBus.post( SparkListenerNodeBlacklistedForStage(now, host, numFailExec, stageId, stageAttemptId)) + listenerBus.post( + SparkListenerNodeExcludedForStage(now, host, numFailExec, stageId, stageAttemptId)) } } } diff --git a/core/src/main/scala/org/apache/spark/scheduler/TaskSetManager.scala b/core/src/main/scala/org/apache/spark/scheduler/TaskSetManager.scala index 673fe4fe27519..ad0791fa42931 100644 --- a/core/src/main/scala/org/apache/spark/scheduler/TaskSetManager.scala +++ b/core/src/main/scala/org/apache/spark/scheduler/TaskSetManager.scala @@ -55,7 +55,7 @@ private[spark] class TaskSetManager( sched: TaskSchedulerImpl, val taskSet: TaskSet, val maxTaskFailures: Int, - blacklistTracker: Option[BlacklistTracker] = None, + healthTracker: Option[HealthTracker] = None, clock: Clock = new SystemClock()) extends Schedulable with Logging { private val conf = sched.sc.conf @@ -63,6 +63,7 @@ private[spark] class TaskSetManager( // SPARK-21563 make a copy of the jars/files so they are consistent across the TaskSet private val addedJars = HashMap[String, Long](sched.sc.addedJars.toSeq: _*) private val addedFiles = HashMap[String, Long](sched.sc.addedFiles.toSeq: _*) + private val addedArchives = HashMap[String, Long](sched.sc.addedArchives.toSeq: _*) val maxResultSize = conf.get(config.MAX_RESULT_SIZE) @@ -130,9 +131,9 @@ private[spark] class TaskSetManager( private var totalResultSize = 0L private var calculatedTasks = 0 - private[scheduler] val taskSetBlacklistHelperOpt: Option[TaskSetBlacklist] = { - blacklistTracker.map { _ => - new TaskSetBlacklist(sched.sc.listenerBus, conf, stageId, taskSet.stageAttemptId, clock) + private[scheduler] val taskSetExcludelistHelperOpt: Option[TaskSetExcludelist] = { + healthTracker.map { _ => + new TaskSetExcludelist(sched.sc.listenerBus, conf, stageId, taskSet.stageAttemptId, clock) } } @@ -216,7 +217,7 @@ private[spark] class TaskSetManager( /** * Track the set of locality levels which are valid given the tasks locality preferences and * the set of currently available executors. This is updated as executors are added and removed. - * This allows a performance optimization, of skipping levels that aren't relevant (eg., skip + * This allows a performance optimization, of skipping levels that aren't relevant (e.g., skip * PROCESS_LOCAL if no tasks could be run PROCESS_LOCAL for the current set of executors). */ private[scheduler] var myLocalityLevels = computeValidLocalityLevels() @@ -294,7 +295,7 @@ private[spark] class TaskSetManager( while (indexOffset > 0) { indexOffset -= 1 val index = list(indexOffset) - if (!isTaskBlacklistedOnExecOrNode(index, execId, host) && + if (!isTaskExcludededOnExecOrNode(index, execId, host) && !(speculative && hasAttemptOnHost(index, host))) { // This should almost always be list.trimEnd(1) to remove tail list.remove(indexOffset) @@ -317,10 +318,10 @@ private[spark] class TaskSetManager( taskAttempts(taskIndex).exists(_.host == host) } - private def isTaskBlacklistedOnExecOrNode(index: Int, execId: String, host: String): Boolean = { - taskSetBlacklistHelperOpt.exists { blacklist => - blacklist.isNodeBlacklistedForTask(host, index) || - blacklist.isExecutorBlacklistedForTask(execId, index) + private def isTaskExcludededOnExecOrNode(index: Int, execId: String, host: String): Boolean = { + taskSetExcludelistHelperOpt.exists { excludeList => + excludeList.isNodeExcludedForTask(host, index) || + excludeList.isExecutorExcludedForTask(execId, index) } } @@ -421,11 +422,11 @@ private[spark] class TaskSetManager( taskResourceAssignments: Map[String, ResourceInformation] = Map.empty) : (Option[TaskDescription], Boolean) = { - val offerBlacklisted = taskSetBlacklistHelperOpt.exists { blacklist => - blacklist.isNodeBlacklistedForTaskSet(host) || - blacklist.isExecutorBlacklistedForTaskSet(execId) + val offerExcluded = taskSetExcludelistHelperOpt.exists { excludeList => + excludeList.isNodeExcludedForTaskSet(host) || + excludeList.isExecutorExcludedForTaskSet(execId) } - if (!isZombie && !offerBlacklisted) { + if (!isZombie && !offerExcluded) { val curTime = clock.getTimeMillis() var allowedLocality = maxLocality @@ -493,6 +494,7 @@ private[spark] class TaskSetManager( task.partitionId, addedFiles, addedJars, + addedArchives, task.localProperties, taskResourceAssignments, serializedTask) @@ -518,10 +520,10 @@ private[spark] class TaskSetManager( if (isZombie && runningTasks == 0) { sched.taskSetFinished(this) if (tasksSuccessful == numTasks) { - blacklistTracker.foreach(_.updateBlacklistForSuccessfulTaskSet( + healthTracker.foreach(_.updateExcludedForSuccessfulTaskSet( taskSet.stageId, taskSet.stageAttemptId, - taskSetBlacklistHelperOpt.get.execToFailures)) + taskSetExcludelistHelperOpt.get.execToFailures)) } } } @@ -606,12 +608,13 @@ private[spark] class TaskSetManager( } /** - * Check whether the given task set has been blacklisted to the point that it can't run anywhere. + * Check whether the given task set has been excluded to the point that it can't run anywhere. * * It is possible that this taskset has become impossible to schedule *anywhere* due to the - * blacklist. The most common scenario would be if there are fewer executors than - * spark.task.maxFailures. We need to detect this so we can avoid the job from being hung. - * We try to acquire new executor/s by killing an existing idle blacklisted executor. + * failures that lead executors being excluded from the ones we can run on. The most common + * scenario would be if there are fewer executors than spark.task.maxFailures. + * We need to detect this so we can avoid the job from being hung. We try to acquire new + * executor/s by killing an existing idle excluded executor. * * There's a tradeoff here: we could make sure all tasks in the task set are schedulable, but that * would add extra time to each iteration of the scheduling loop. Here, we take the approach of @@ -620,12 +623,12 @@ private[spark] class TaskSetManager( * method is faster in the typical case. In the worst case, this method can take * O(maxTaskFailures + numTasks) time, but it will be faster when there haven't been any task * failures (this is because the method picks one unscheduled task, and then iterates through each - * executor until it finds one that the task isn't blacklisted on). + * executor until it finds one that the task isn't excluded on). */ - private[scheduler] def getCompletelyBlacklistedTaskIfAny( + private[scheduler] def getCompletelyExcludedTaskIfAny( hostToExecutors: HashMap[String, HashSet[String]]): Option[Int] = { - taskSetBlacklistHelperOpt.flatMap { taskSetBlacklist => - val appBlacklist = blacklistTracker.get + taskSetExcludelistHelperOpt.flatMap { taskSetExcludelist => + val appHealthTracker = healthTracker.get // Only look for unschedulable tasks when at least one executor has registered. Otherwise, // task sets will be (unnecessarily) aborted in cases when no executors have registered yet. if (hostToExecutors.nonEmpty) { @@ -651,18 +654,18 @@ private[spark] class TaskSetManager( // when that unschedulable task is the last task remaining. hostToExecutors.forall { case (host, execsOnHost) => // Check if the task can run on the node - val nodeBlacklisted = - appBlacklist.isNodeBlacklisted(host) || - taskSetBlacklist.isNodeBlacklistedForTaskSet(host) || - taskSetBlacklist.isNodeBlacklistedForTask(host, indexInTaskSet) - if (nodeBlacklisted) { + val nodeExcluded = + appHealthTracker.isNodeExcluded(host) || + taskSetExcludelist.isNodeExcludedForTaskSet(host) || + taskSetExcludelist.isNodeExcludedForTask(host, indexInTaskSet) + if (nodeExcluded) { true } else { // Check if the task can run on any of the executors execsOnHost.forall { exec => - appBlacklist.isExecutorBlacklisted(exec) || - taskSetBlacklist.isExecutorBlacklistedForTaskSet(exec) || - taskSetBlacklist.isExecutorBlacklistedForTask(exec, indexInTaskSet) + appHealthTracker.isExecutorExcluded(exec) || + taskSetExcludelist.isExecutorExcludedForTaskSet(exec) || + taskSetExcludelist.isExecutorExcludedForTask(exec, indexInTaskSet) } } } @@ -673,16 +676,16 @@ private[spark] class TaskSetManager( } } - private[scheduler] def abortSinceCompletelyBlacklisted(indexInTaskSet: Int): Unit = { - taskSetBlacklistHelperOpt.foreach { taskSetBlacklist => + private[scheduler] def abortSinceCompletelyExcludedOnFailure(indexInTaskSet: Int): Unit = { + taskSetExcludelistHelperOpt.foreach { taskSetExcludelist => val partition = tasks(indexInTaskSet).partitionId abort(s""" |Aborting $taskSet because task $indexInTaskSet (partition $partition) - |cannot run anywhere due to node and executor blacklist. + |cannot run anywhere due to node and executor excludeOnFailure. |Most recent failure: - |${taskSetBlacklist.getLatestFailureReason} + |${taskSetExcludelist.getLatestFailureReason} | - |Blacklisting behavior can be configured via spark.blacklist.*. + |ExcludeOnFailure behavior can be configured via spark.excludeOnFailure.*. |""".stripMargin) } } @@ -821,7 +824,7 @@ private[spark] class TaskSetManager( isZombie = true if (fetchFailed.bmAddress != null) { - blacklistTracker.foreach(_.updateBlacklistForFetchFailure( + healthTracker.foreach(_.updateExcludedForFetchFailure( fetchFailed.bmAddress.host, fetchFailed.bmAddress.executorId)) } @@ -899,7 +902,7 @@ private[spark] class TaskSetManager( if (!isZombie && reason.countTowardsTaskFailures) { assert (null != failureReason) - taskSetBlacklistHelperOpt.foreach(_.updateBlacklistForFailedTask( + taskSetExcludelistHelperOpt.foreach(_.updateExcludedForFailedTask( info.host, info.executorId, index, failureReason)) numFailures(index) += 1 if (numFailures(index) >= maxTaskFailures) { @@ -951,6 +954,9 @@ private[spark] class TaskSetManager( null } + override def isSchedulable: Boolean = !isZombie && + (pendingTasks.all.nonEmpty || pendingSpeculatableTasks.all.nonEmpty) + override def addSchedulable(schedulable: Schedulable): Unit = {} override def removeSchedulable(schedulable: Schedulable): Unit = {} diff --git a/core/src/main/scala/org/apache/spark/scheduler/cluster/CoarseGrainedClusterMessage.scala b/core/src/main/scala/org/apache/spark/scheduler/cluster/CoarseGrainedClusterMessage.scala index 7242ab7786061..2f171433bbb5c 100644 --- a/core/src/main/scala/org/apache/spark/scheduler/cluster/CoarseGrainedClusterMessage.scala +++ b/core/src/main/scala/org/apache/spark/scheduler/cluster/CoarseGrainedClusterMessage.scala @@ -22,7 +22,6 @@ import java.nio.ByteBuffer import org.apache.spark.TaskState.TaskState import org.apache.spark.resource.{ResourceInformation, ResourceProfile} import org.apache.spark.rpc.RpcEndpointRef -import org.apache.spark.scheduler.ExecutorDecommissionInfo import org.apache.spark.scheduler.ExecutorLossReason import org.apache.spark.util.SerializableBuffer @@ -95,8 +94,17 @@ private[spark] object CoarseGrainedClusterMessages { case class RemoveExecutor(executorId: String, reason: ExecutorLossReason) extends CoarseGrainedClusterMessage - case class DecommissionExecutor(executorId: String, decommissionInfo: ExecutorDecommissionInfo) - extends CoarseGrainedClusterMessage + // A message that sent from executor to driver to tell driver that the executor has started + // decommissioning. It's used for the case where decommission is triggered at executor (e.g., K8S) + case class ExecutorDecommissioning(executorId: String) extends CoarseGrainedClusterMessage + + // A message that sent from driver to executor to decommission that executor. + // It's used for Standalone's cases, where decommission is triggered at MasterWebUI or Worker. + object DecommissionExecutor extends CoarseGrainedClusterMessage + + // A message that sent to the executor itself when it receives a signal, + // indicating the executor starts to decommission. + object ExecutorDecommissionSigReceived extends CoarseGrainedClusterMessage case class RemoveWorker(workerId: String, host: String, message: String) extends CoarseGrainedClusterMessage @@ -122,7 +130,7 @@ private[spark] object CoarseGrainedClusterMessages { resourceProfileToTotalExecs: Map[ResourceProfile, Int], numLocalityAwareTasksPerResourceProfileId: Map[Int, Int], hostToLocalTaskCount: Map[Int, Map[String, Int]], - nodeBlacklist: Set[String]) + excludedNodes: Set[String]) extends CoarseGrainedClusterMessage // Check if an executor was force-killed but for a reason unrelated to the running tasks. @@ -136,7 +144,4 @@ private[spark] object CoarseGrainedClusterMessages { // The message to check if `CoarseGrainedSchedulerBackend` thinks the executor is alive or not. case class IsExecutorAlive(executorId: String) extends CoarseGrainedClusterMessage - - // Used to ask an executor to decommission itself. (Can be an internal message) - case object DecommissionSelf extends CoarseGrainedClusterMessage } diff --git a/core/src/main/scala/org/apache/spark/scheduler/cluster/CoarseGrainedSchedulerBackend.scala b/core/src/main/scala/org/apache/spark/scheduler/cluster/CoarseGrainedSchedulerBackend.scala index 0f144125af7bf..2bd0b4cc4b7d0 100644 --- a/core/src/main/scala/org/apache/spark/scheduler/cluster/CoarseGrainedSchedulerBackend.scala +++ b/core/src/main/scala/org/apache/spark/scheduler/cluster/CoarseGrainedSchedulerBackend.scala @@ -191,10 +191,6 @@ class CoarseGrainedSchedulerBackend(scheduler: TaskSchedulerImpl, val rpcEnv: Rp executorDataMap.get(executorId).foreach(_.executorEndpoint.send(StopExecutor)) removeExecutor(executorId, reason) - case DecommissionExecutor(executorId, decommissionInfo) => - logError(s"Received decommission executor message ${executorId}: $decommissionInfo") - decommissionExecutor(executorId, decommissionInfo, adjustTargetNumExecutors = false) - case RemoveWorker(workerId, host, message) => removeWorker(workerId, host, message) @@ -213,13 +209,14 @@ class CoarseGrainedSchedulerBackend(scheduler: TaskSchedulerImpl, val rpcEnv: Rp attributes, resources, resourceProfileId) => if (executorDataMap.contains(executorId)) { context.sendFailure(new IllegalStateException(s"Duplicate executor ID: $executorId")) - } else if (scheduler.nodeBlacklist.contains(hostname) || - isBlacklisted(executorId, hostname)) { - // If the cluster manager gives us an executor on a blacklisted node (because it - // already started allocating those resources before we informed it of our blacklist, - // or if it ignored our blacklist), then we reject that executor immediately. - logInfo(s"Rejecting $executorId as it has been blacklisted.") - context.sendFailure(new IllegalStateException(s"Executor is blacklisted: $executorId")) + } else if (scheduler.excludedNodes.contains(hostname) || + isExecutorExcluded(executorId, hostname)) { + // If the cluster manager gives us an executor on an excluded node (because it + // already started allocating those resources before we informed it of our exclusion, + // or if it ignored our exclusion), then we reject that executor immediately. + logInfo(s"Rejecting $executorId as it has been excluded.") + context.sendFailure( + new IllegalStateException(s"Executor is excluded due to failures: $executorId")) } else { // If the executor's rpc env is not listening for incoming connections, `hostPort` // will be null, and the client connection should be used to contact the executor. @@ -272,10 +269,15 @@ class CoarseGrainedSchedulerBackend(scheduler: TaskSchedulerImpl, val rpcEnv: Rp removeWorker(workerId, host, message) context.reply(true) - case DecommissionExecutor(executorId, decommissionInfo) => - logError(s"Received decommission executor message ${executorId}: ${decommissionInfo}.") - context.reply(decommissionExecutor(executorId, decommissionInfo, - adjustTargetNumExecutors = false)) + // Do not change this code without running the K8s integration suites + case ExecutorDecommissioning(executorId) => + logWarning(s"Received executor $executorId decommissioned message") + context.reply( + decommissionExecutor( + executorId, + ExecutorDecommissionInfo(s"Executor $executorId is decommissioned."), + adjustTargetNumExecutors = false, + triggeredByExecutor = true)) case RetrieveSparkAppConfig(resourceProfileId) => val rp = scheduler.sc.resourceProfileManager.resourceProfileFromId(resourceProfileId) @@ -463,71 +465,50 @@ class CoarseGrainedSchedulerBackend(scheduler: TaskSchedulerImpl, val rpcEnv: Rp * @param executorsAndDecomInfo Identifiers of executors & decommission info. * @param adjustTargetNumExecutors whether the target number of executors will be adjusted down * after these executors have been decommissioned. + * @param triggeredByExecutor whether the decommission is triggered at executor. * @return the ids of the executors acknowledged by the cluster manager to be removed. */ override def decommissionExecutors( executorsAndDecomInfo: Array[(String, ExecutorDecommissionInfo)], - adjustTargetNumExecutors: Boolean): Seq[String] = { - - val executorsToDecommission = executorsAndDecomInfo.filter { case (executorId, decomInfo) => - CoarseGrainedSchedulerBackend.this.synchronized { - // Only bother decommissioning executors which are alive. - if (isExecutorActive(executorId)) { - executorsPendingDecommission(executorId) = decomInfo.workerHost - true - } else { - false - } + adjustTargetNumExecutors: Boolean, + triggeredByExecutor: Boolean): Seq[String] = withLock { + // Do not change this code without running the K8s integration suites + val executorsToDecommission = executorsAndDecomInfo.flatMap { case (executorId, decomInfo) => + // Only bother decommissioning executors which are alive. + if (isExecutorActive(executorId)) { + scheduler.executorDecommission(executorId, decomInfo) + executorsPendingDecommission(executorId) = decomInfo.workerHost + Some(executorId) + } else { + None } } + logInfo(s"Decommission executors: ${executorsToDecommission.mkString(", ")}") // If we don't want to replace the executors we are decommissioning if (adjustTargetNumExecutors) { - adjustExecutors(executorsToDecommission.map(_._1)) + adjustExecutors(executorsToDecommission) } - executorsToDecommission.filter { case (executorId, decomInfo) => - doDecommission(executorId, decomInfo) - }.map(_._1) - } - - - private def doDecommission(executorId: String, - decomInfo: ExecutorDecommissionInfo): Boolean = { - - logInfo(s"Asking executor $executorId to decommissioning.") - scheduler.executorDecommission(executorId, decomInfo) - // Send decommission message to the executor (it could have originated on the executor - // but not necessarily). - CoarseGrainedSchedulerBackend.this.synchronized { - executorDataMap.get(executorId) match { - case Some(executorInfo) => - executorInfo.executorEndpoint.send(DecommissionSelf) - case None => - // Ignoring the executor since it is not registered. - logWarning(s"Attempted to decommission unknown executor $executorId.") - return false + // Mark those corresponding BlockManagers as decommissioned first before we sending + // decommission notification to executors. So, it's less likely to lead to the race + // condition where `getPeer` request from the decommissioned executor comes first + // before the BlockManagers are marked as decommissioned. + // Note that marking BlockManager as decommissioned doesn't need depend on + // `spark.storage.decommission.enabled`. Because it's meaningless to save more blocks + // for the BlockManager since the executor will be shutdown soon. + scheduler.sc.env.blockManager.master.decommissionBlockManagers(executorsToDecommission) + + if (!triggeredByExecutor) { + executorsToDecommission.foreach { executorId => + logInfo(s"Notify executor $executorId to decommissioning.") + executorDataMap(executorId).executorEndpoint.send(DecommissionExecutor) } } - logInfo(s"Asked executor $executorId to decommission.") - - if (conf.get(STORAGE_DECOMMISSION_ENABLED)) { - try { - logInfo(s"Asking block manager corresponding to executor $executorId to decommission.") - scheduler.sc.env.blockManager.master.decommissionBlockManagers(Seq(executorId)) - } catch { - case e: Exception => - logError("Unexpected error during block manager " + - s"decommissioning for executor $executorId: ${e.toString}", e) - return false - } - logInfo(s"Acknowledged decommissioning block manager corresponding to $executorId.") - } - true + executorsToDecommission } - override def start(): Unit = { if (UserGroupInformation.isSecurityEnabled()) { delegationTokenManager = createTokenManager() @@ -872,7 +853,7 @@ class CoarseGrainedSchedulerBackend(scheduler: TaskSchedulerImpl, val rpcEnv: Rp final override def killExecutorsOnHost(host: String): Boolean = { logInfo(s"Requesting to kill any and all executors on host ${host}") // A potential race exists if a new executor attempts to register on a host - // that is on the blacklist and is no no longer valid. To avoid this race, + // that is on the exclude list and is no no longer valid. To avoid this race, // all executor registration and killing happens in the event loop. This way, either // an executor will fail to register, or will be killed when all executors on a host // are killed. @@ -904,13 +885,13 @@ class CoarseGrainedSchedulerBackend(scheduler: TaskSchedulerImpl, val rpcEnv: Rp protected def currentDelegationTokens: Array[Byte] = delegationTokens.get() /** - * Checks whether the executor is blacklisted. This is called when the executor tries to - * register with the scheduler, and will deny registration if this method returns true. + * Checks whether the executor is excluded due to failure(s). This is called when the executor + * tries to register with the scheduler, and will deny registration if this method returns true. * - * This is in addition to the blacklist kept by the task scheduler, so custom implementations + * This is in addition to the exclude list kept by the task scheduler, so custom implementations * don't need to check there. */ - protected def isBlacklisted(executorId: String, hostname: String): Boolean = false + protected def isExecutorExcluded(executorId: String, hostname: String): Boolean = false // SPARK-27112: We need to ensure that there is ordering of lock acquisition // between TaskSchedulerImpl and CoarseGrainedSchedulerBackend objects in order to fix diff --git a/core/src/main/scala/org/apache/spark/scheduler/cluster/StandaloneSchedulerBackend.scala b/core/src/main/scala/org/apache/spark/scheduler/cluster/StandaloneSchedulerBackend.scala index 34b03dfec9e80..c14b2d4e5df31 100644 --- a/core/src/main/scala/org/apache/spark/scheduler/cluster/StandaloneSchedulerBackend.scala +++ b/core/src/main/scala/org/apache/spark/scheduler/cluster/StandaloneSchedulerBackend.scala @@ -177,10 +177,14 @@ private[spark] class StandaloneSchedulerBackend( removeExecutor(fullId.split("/")(1), reason) } - override def executorDecommissioned(fullId: String, decommissionInfo: ExecutorDecommissionInfo) { - logInfo("Asked to decommission executor") + override def executorDecommissioned(fullId: String, + decommissionInfo: ExecutorDecommissionInfo): Unit = { + logInfo(s"Asked to decommission executor $fullId") val execId = fullId.split("/")(1) - decommissionExecutors(Array((execId, decommissionInfo)), adjustTargetNumExecutors = false) + decommissionExecutors( + Array((execId, decommissionInfo)), + adjustTargetNumExecutors = false, + triggeredByExecutor = false) logInfo("Executor %s decommissioned: %s".format(fullId, decommissionInfo)) } diff --git a/core/src/main/scala/org/apache/spark/security/CryptoStreamUtils.scala b/core/src/main/scala/org/apache/spark/security/CryptoStreamUtils.scala index a4df0d543ecbe..4ebb7b0defd7f 100644 --- a/core/src/main/scala/org/apache/spark/security/CryptoStreamUtils.scala +++ b/core/src/main/scala/org/apache/spark/security/CryptoStreamUtils.scala @@ -167,7 +167,7 @@ private[spark] object CryptoStreamUtils extends Logging { } /** - * SPARK-25535. The commons-cryto library will throw InternalError if something goes + * SPARK-25535. The commons-crypto library will throw InternalError if something goes * wrong, and leave bad state behind in the Java wrappers, so it's not safe to use them * afterwards. This wrapper detects that situation and avoids further calls into the * commons-crypto code, while still allowing the underlying streams to be closed. diff --git a/core/src/main/scala/org/apache/spark/security/SecurityConfigurationLock.scala b/core/src/main/scala/org/apache/spark/security/SecurityConfigurationLock.scala new file mode 100644 index 0000000000000..0741a8c1580df --- /dev/null +++ b/core/src/main/scala/org/apache/spark/security/SecurityConfigurationLock.scala @@ -0,0 +1,24 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.security + +/** + * There are cases when global JVM security configuration must be modified. + * In order to avoid race the modification must be synchronized with this. + */ +object SecurityConfigurationLock diff --git a/core/src/main/scala/org/apache/spark/security/SocketAuthHelper.scala b/core/src/main/scala/org/apache/spark/security/SocketAuthHelper.scala index dbcb376905338..f800553c5388b 100644 --- a/core/src/main/scala/org/apache/spark/security/SocketAuthHelper.scala +++ b/core/src/main/scala/org/apache/spark/security/SocketAuthHelper.scala @@ -34,7 +34,7 @@ import org.apache.spark.util.Utils * * There's no secrecy, so this relies on the sockets being either local or somehow encrypted. */ -private[spark] class SocketAuthHelper(conf: SparkConf) { +private[spark] class SocketAuthHelper(val conf: SparkConf) { val secret = Utils.createSecret(conf) diff --git a/core/src/main/scala/org/apache/spark/security/SocketAuthServer.scala b/core/src/main/scala/org/apache/spark/security/SocketAuthServer.scala index 548fd1b07ddc5..35990b5a59281 100644 --- a/core/src/main/scala/org/apache/spark/security/SocketAuthServer.scala +++ b/core/src/main/scala/org/apache/spark/security/SocketAuthServer.scala @@ -25,6 +25,8 @@ import scala.concurrent.duration.Duration import scala.util.Try import org.apache.spark.SparkEnv +import org.apache.spark.internal.Logging +import org.apache.spark.internal.config.Python.PYTHON_AUTH_SOCKET_TIMEOUT import org.apache.spark.network.util.JavaUtils import org.apache.spark.util.{ThreadUtils, Utils} @@ -34,11 +36,11 @@ import org.apache.spark.util.{ThreadUtils, Utils} * handling one batch of data, with authentication and error handling. * * The socket server can only accept one connection, or close if no connection - * in 15 seconds. + * in configurable amount of seconds (default 15). */ private[spark] abstract class SocketAuthServer[T]( authHelper: SocketAuthHelper, - threadName: String) { + threadName: String) extends Logging { def this(env: SparkEnv, threadName: String) = this(new SocketAuthHelper(env.conf), threadName) def this(threadName: String) = this(SparkEnv.get, threadName) @@ -46,19 +48,26 @@ private[spark] abstract class SocketAuthServer[T]( private val promise = Promise[T]() private def startServer(): (Int, String) = { + logTrace("Creating listening socket") val serverSocket = new ServerSocket(0, 1, InetAddress.getByAddress(Array(127, 0, 0, 1))) - // Close the socket if no connection in 15 seconds - serverSocket.setSoTimeout(15000) + // Close the socket if no connection in the configured seconds + val timeout = authHelper.conf.get(PYTHON_AUTH_SOCKET_TIMEOUT).toInt + logTrace(s"Setting timeout to $timeout sec") + serverSocket.setSoTimeout(timeout * 1000) new Thread(threadName) { setDaemon(true) override def run(): Unit = { var sock: Socket = null try { + logTrace(s"Waiting for connection on port ${serverSocket.getLocalPort}") sock = serverSocket.accept() + logTrace(s"Connection accepted from address ${sock.getRemoteSocketAddress}") authHelper.authClient(sock) + logTrace("Client authenticated") promise.complete(Try(handleConnection(sock))) } finally { + logTrace("Closing server") JavaUtils.closeQuietly(serverSocket) JavaUtils.closeQuietly(sock) } diff --git a/core/src/main/scala/org/apache/spark/shuffle/FetchFailedException.scala b/core/src/main/scala/org/apache/spark/shuffle/FetchFailedException.scala index 6509a04dc4893..208c676a1c352 100644 --- a/core/src/main/scala/org/apache/spark/shuffle/FetchFailedException.scala +++ b/core/src/main/scala/org/apache/spark/shuffle/FetchFailedException.scala @@ -48,7 +48,7 @@ private[spark] class FetchFailedException( mapTaskId: Long, mapIndex: Int, reduceId: Int, - cause: Throwable) { + cause: Throwable) = { this(bmAddress, shuffleId, mapTaskId, mapIndex, reduceId, cause.getMessage, cause) } diff --git a/core/src/main/scala/org/apache/spark/shuffle/IndexShuffleBlockResolver.scala b/core/src/main/scala/org/apache/spark/shuffle/IndexShuffleBlockResolver.scala index a019a3382d5b2..5f0bb42108c56 100644 --- a/core/src/main/scala/org/apache/spark/shuffle/IndexShuffleBlockResolver.scala +++ b/core/src/main/scala/org/apache/spark/shuffle/IndexShuffleBlockResolver.scala @@ -91,7 +91,7 @@ private[spark] class IndexShuffleBlockResolver( * When the dirs parameter is None then use the disk manager's local directories. Otherwise, * read from the specified directories. */ - private def getIndexFile( + def getIndexFile( shuffleId: Int, mapId: Long, dirs: Option[Array[String]] = None): File = { @@ -225,19 +225,37 @@ private[spark] class IndexShuffleBlockResolver( * Get the index & data block for migration. */ def getMigrationBlocks(shuffleBlockInfo: ShuffleBlockInfo): List[(BlockId, ManagedBuffer)] = { - val shuffleId = shuffleBlockInfo.shuffleId - val mapId = shuffleBlockInfo.mapId - // Load the index block - val indexFile = getIndexFile(shuffleId, mapId) - val indexBlockId = ShuffleIndexBlockId(shuffleId, mapId, NOOP_REDUCE_ID) - val indexFileSize = indexFile.length() - val indexBlockData = new FileSegmentManagedBuffer(transportConf, indexFile, 0, indexFileSize) + try { + val shuffleId = shuffleBlockInfo.shuffleId + val mapId = shuffleBlockInfo.mapId + // Load the index block + val indexFile = getIndexFile(shuffleId, mapId) + val indexBlockId = ShuffleIndexBlockId(shuffleId, mapId, NOOP_REDUCE_ID) + val indexFileSize = indexFile.length() + val indexBlockData = new FileSegmentManagedBuffer( + transportConf, indexFile, 0, indexFileSize) + + // Load the data block + val dataFile = getDataFile(shuffleId, mapId) + val dataBlockId = ShuffleDataBlockId(shuffleId, mapId, NOOP_REDUCE_ID) + val dataBlockData = new FileSegmentManagedBuffer( + transportConf, dataFile, 0, dataFile.length()) - // Load the data block - val dataFile = getDataFile(shuffleId, mapId) - val dataBlockId = ShuffleDataBlockId(shuffleId, mapId, NOOP_REDUCE_ID) - val dataBlockData = new FileSegmentManagedBuffer(transportConf, dataFile, 0, dataFile.length()) - List((indexBlockId, indexBlockData), (dataBlockId, dataBlockData)) + // Make sure the index exist. + if (!indexFile.exists()) { + throw new FileNotFoundException("Index file is deleted already.") + } + if (dataFile.exists()) { + List((indexBlockId, indexBlockData), (dataBlockId, dataBlockData)) + } else { + List((indexBlockId, indexBlockData)) + } + } catch { + case _: Exception => // If we can't load the blocks ignore them. + logWarning(s"Failed to resolve shuffle block ${shuffleBlockInfo}. " + + "This is expected to occur if a block is removed after decommissioning has started.") + List.empty[(BlockId, ManagedBuffer)] + } } diff --git a/core/src/main/scala/org/apache/spark/shuffle/ShuffleBlockPusher.scala b/core/src/main/scala/org/apache/spark/shuffle/ShuffleBlockPusher.scala new file mode 100644 index 0000000000000..88d084ce1b2f4 --- /dev/null +++ b/core/src/main/scala/org/apache/spark/shuffle/ShuffleBlockPusher.scala @@ -0,0 +1,450 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.shuffle + +import java.io.File +import java.net.ConnectException +import java.nio.ByteBuffer +import java.util.concurrent.ExecutorService + +import scala.collection.mutable.{ArrayBuffer, HashMap, HashSet, Queue} + +import com.google.common.base.Throwables + +import org.apache.spark.{ShuffleDependency, SparkConf, SparkEnv} +import org.apache.spark.annotation.Since +import org.apache.spark.internal.Logging +import org.apache.spark.internal.config._ +import org.apache.spark.launcher.SparkLauncher +import org.apache.spark.network.buffer.{FileSegmentManagedBuffer, ManagedBuffer, NioManagedBuffer} +import org.apache.spark.network.netty.SparkTransportConf +import org.apache.spark.network.shuffle.BlockFetchingListener +import org.apache.spark.network.shuffle.ErrorHandler.BlockPushErrorHandler +import org.apache.spark.network.util.TransportConf +import org.apache.spark.shuffle.ShuffleBlockPusher._ +import org.apache.spark.storage.{BlockId, BlockManagerId, ShufflePushBlockId} +import org.apache.spark.util.{ThreadUtils, Utils} + +/** + * Used for pushing shuffle blocks to remote shuffle services when push shuffle is enabled. + * When push shuffle is enabled, it is created after the shuffle writer finishes writing the shuffle + * file and initiates the block push process. + * + * @param conf spark configuration + */ +@Since("3.2.0") +private[spark] class ShuffleBlockPusher(conf: SparkConf) extends Logging { + private[this] val maxBlockSizeToPush = conf.get(SHUFFLE_MAX_BLOCK_SIZE_TO_PUSH) + private[this] val maxBlockBatchSize = conf.get(SHUFFLE_MAX_BLOCK_BATCH_SIZE_FOR_PUSH) + private[this] val maxBytesInFlight = + conf.getSizeAsMb("spark.reducer.maxSizeInFlight", "48m") * 1024 * 1024 + private[this] val maxReqsInFlight = conf.getInt("spark.reducer.maxReqsInFlight", Int.MaxValue) + private[this] val maxBlocksInFlightPerAddress = conf.get(REDUCER_MAX_BLOCKS_IN_FLIGHT_PER_ADDRESS) + private[this] var bytesInFlight = 0L + private[this] var reqsInFlight = 0 + private[this] val numBlocksInFlightPerAddress = new HashMap[BlockManagerId, Int]() + private[this] val deferredPushRequests = new HashMap[BlockManagerId, Queue[PushRequest]]() + private[this] val pushRequests = new Queue[PushRequest] + private[this] val errorHandler = createErrorHandler() + // VisibleForTesting + private[shuffle] val unreachableBlockMgrs = new HashSet[BlockManagerId]() + + // VisibleForTesting + private[shuffle] def createErrorHandler(): BlockPushErrorHandler = { + new BlockPushErrorHandler() { + // For a connection exception against a particular host, we will stop pushing any + // blocks to just that host and continue push blocks to other hosts. So, here push of + // all blocks will only stop when it is "Too Late". Also see updateStateAndCheckIfPushMore. + override def shouldRetryError(t: Throwable): Boolean = { + // If the block is too late, there is no need to retry it + !Throwables.getStackTraceAsString(t).contains(BlockPushErrorHandler.TOO_LATE_MESSAGE_SUFFIX) + } + } + } + + /** + * Initiates the block push. + * + * @param dataFile mapper generated shuffle data file + * @param partitionLengths array of shuffle block size so we can tell shuffle block + * @param dep shuffle dependency to get shuffle ID and the location of remote shuffle + * services to push local shuffle blocks + * @param mapIndex map index of the shuffle map task + */ + private[shuffle] def initiateBlockPush( + dataFile: File, + partitionLengths: Array[Long], + dep: ShuffleDependency[_, _, _], + mapIndex: Int): Unit = { + val numPartitions = dep.partitioner.numPartitions + val transportConf = SparkTransportConf.fromSparkConf(conf, "shuffle") + val requests = prepareBlockPushRequests(numPartitions, mapIndex, dep.shuffleId, dataFile, + partitionLengths, dep.getMergerLocs, transportConf) + // Randomize the orders of the PushRequest, so different mappers pushing blocks at the same + // time won't be pushing the same ranges of shuffle partitions. + pushRequests ++= Utils.randomize(requests) + + submitTask(() => { + pushUpToMax() + }) + } + + /** + * Triggers the push. It's a separate method for testing. + * VisibleForTesting + */ + protected def submitTask(task: Runnable): Unit = { + if (BLOCK_PUSHER_POOL != null) { + BLOCK_PUSHER_POOL.execute(task) + } + } + + /** + * Since multiple block push threads could potentially be calling pushUpToMax for the same + * mapper, we synchronize access to this method so that only one thread can push blocks for + * a given mapper. This helps to simplify access to the shared states. The down side of this + * is that we could unnecessarily block other mappers' block pushes if all the threads + * are occupied by block pushes from the same mapper. + * + * This code is similar to ShuffleBlockFetcherIterator#fetchUpToMaxBytes in how it throttles + * the data transfer between shuffle client/server. + */ + private def pushUpToMax(): Unit = synchronized { + // Process any outstanding deferred push requests if possible. + if (deferredPushRequests.nonEmpty) { + for ((remoteAddress, defReqQueue) <- deferredPushRequests) { + while (isRemoteBlockPushable(defReqQueue) && + !isRemoteAddressMaxedOut(remoteAddress, defReqQueue.front)) { + val request = defReqQueue.dequeue() + logDebug(s"Processing deferred push request for $remoteAddress with " + + s"${request.blocks.length} blocks") + sendRequest(request) + if (defReqQueue.isEmpty) { + deferredPushRequests -= remoteAddress + } + } + } + } + + // Process any regular push requests if possible. + while (isRemoteBlockPushable(pushRequests)) { + val request = pushRequests.dequeue() + val remoteAddress = request.address + if (isRemoteAddressMaxedOut(remoteAddress, request)) { + logDebug(s"Deferring push request for $remoteAddress with ${request.blocks.size} blocks") + deferredPushRequests.getOrElseUpdate(remoteAddress, new Queue[PushRequest]()) + .enqueue(request) + } else { + sendRequest(request) + } + } + + def isRemoteBlockPushable(pushReqQueue: Queue[PushRequest]): Boolean = { + pushReqQueue.nonEmpty && + (bytesInFlight == 0 || + (reqsInFlight + 1 <= maxReqsInFlight && + bytesInFlight + pushReqQueue.front.size <= maxBytesInFlight)) + } + + // Checks if sending a new push request will exceed the max no. of blocks being pushed to a + // given remote address. + def isRemoteAddressMaxedOut(remoteAddress: BlockManagerId, request: PushRequest): Boolean = { + (numBlocksInFlightPerAddress.getOrElse(remoteAddress, 0) + + request.blocks.size) > maxBlocksInFlightPerAddress + } + } + + /** + * Push blocks to remote shuffle server. The callback listener will invoke #pushUpToMax again + * to trigger pushing the next batch of blocks once some block transfer is done in the current + * batch. This way, we decouple the map task from the block push process, since it is netty + * client thread instead of task execution thread which takes care of majority of the block + * pushes. + */ + private def sendRequest(request: PushRequest): Unit = { + bytesInFlight += request.size + reqsInFlight += 1 + numBlocksInFlightPerAddress(request.address) = numBlocksInFlightPerAddress.getOrElseUpdate( + request.address, 0) + request.blocks.length + + val sizeMap = request.blocks.map { case (blockId, size) => (blockId.toString, size) }.toMap + val address = request.address + val blockIds = request.blocks.map(_._1.toString) + val remainingBlocks = new HashSet[String]() ++= blockIds + + val blockPushListener = new BlockFetchingListener { + // Initiating a connection and pushing blocks to a remote shuffle service is always handled by + // the block-push-threads. We should not initiate the connection creation in the + // blockPushListener callbacks which are invoked by the netty eventloop because: + // 1. TrasportClient.createConnection(...) blocks for connection to be established and it's + // recommended to avoid any blocking operations in the eventloop; + // 2. The actual connection creation is a task that gets added to the task queue of another + // eventloop which could have eventloops eventually blocking each other. + // Once the blockPushListener is notified of the block push success or failure, we + // just delegate it to block-push-threads. + def handleResult(result: PushResult): Unit = { + submitTask(() => { + if (updateStateAndCheckIfPushMore( + sizeMap(result.blockId), address, remainingBlocks, result)) { + pushUpToMax() + } + }) + } + + override def onBlockFetchSuccess(blockId: String, data: ManagedBuffer): Unit = { + logTrace(s"Push for block $blockId to $address successful.") + handleResult(PushResult(blockId, null)) + } + + override def onBlockFetchFailure(blockId: String, exception: Throwable): Unit = { + // check the message or it's cause to see it needs to be logged. + if (!errorHandler.shouldLogError(exception)) { + logTrace(s"Pushing block $blockId to $address failed.", exception) + } else { + logWarning(s"Pushing block $blockId to $address failed.", exception) + } + handleResult(PushResult(blockId, exception)) + } + } + SparkEnv.get.blockManager.blockStoreClient.pushBlocks( + address.host, address.port, blockIds.toArray, + sliceReqBufferIntoBlockBuffers(request.reqBuffer, request.blocks.map(_._2)), + blockPushListener) + } + + /** + * Given the ManagedBuffer representing all the continuous blocks inside the shuffle data file + * for a PushRequest and an array of individual block sizes, load the buffer from disk into + * memory and slice it into multiple smaller buffers representing each block. + * + * With nio ByteBuffer, the individual block buffers share data with the initial in memory + * buffer loaded from disk. Thus only one copy of the block data is kept in memory. + * @param reqBuffer A {{FileSegmentManagedBuffer}} representing all the continuous blocks in + * the shuffle data file for a PushRequest + * @param blockSizes Array of block sizes + * @return Array of in memory buffer for each individual block + */ + private def sliceReqBufferIntoBlockBuffers( + reqBuffer: ManagedBuffer, + blockSizes: Seq[Int]): Array[ManagedBuffer] = { + if (blockSizes.size == 1) { + Array(reqBuffer) + } else { + val inMemoryBuffer = reqBuffer.nioByteBuffer() + val blockOffsets = new Array[Int](blockSizes.size) + var offset = 0 + for (index <- blockSizes.indices) { + blockOffsets(index) = offset + offset += blockSizes(index) + } + blockOffsets.zip(blockSizes).map { + case (offset, size) => + new NioManagedBuffer(inMemoryBuffer.duplicate() + .position(offset) + .limit(offset + size).asInstanceOf[ByteBuffer].slice()) + }.toArray + } + } + + /** + * Updates the stats and based on the previous push result decides whether to push more blocks + * or stop. + * + * @param bytesPushed number of bytes pushed. + * @param address address of the remote service + * @param remainingBlocks remaining blocks + * @param pushResult result of the last push + * @return true if more blocks should be pushed; false otherwise. + */ + private def updateStateAndCheckIfPushMore( + bytesPushed: Long, + address: BlockManagerId, + remainingBlocks: HashSet[String], + pushResult: PushResult): Boolean = synchronized { + remainingBlocks -= pushResult.blockId + bytesInFlight -= bytesPushed + numBlocksInFlightPerAddress(address) = numBlocksInFlightPerAddress(address) - 1 + if (remainingBlocks.isEmpty) { + reqsInFlight -= 1 + } + if (pushResult.failure != null && pushResult.failure.getCause.isInstanceOf[ConnectException]) { + // Remove all the blocks for this address just once because removing from pushRequests + // is expensive. If there is a ConnectException for the first block, all the subsequent + // blocks to that address will fail, so should avoid removing multiple times. + if (!unreachableBlockMgrs.contains(address)) { + var removed = 0 + unreachableBlockMgrs.add(address) + removed += pushRequests.dequeueAll(req => req.address == address).length + removed += deferredPushRequests.remove(address).map(_.length).getOrElse(0) + logWarning(s"Received a ConnectException from $address. " + + s"Dropping $removed push-requests and " + + s"not pushing any more blocks to this address.") + } + } + if (pushResult.failure != null && !errorHandler.shouldRetryError(pushResult.failure)) { + logDebug(s"Received after merge is finalized from $address. Not pushing any more blocks.") + return false + } else { + remainingBlocks.isEmpty && (pushRequests.nonEmpty || deferredPushRequests.nonEmpty) + } + } + + /** + * Convert the shuffle data file of the current mapper into a list of PushRequest. Basically, + * continuous blocks in the shuffle file are grouped into a single request to allow more + * efficient read of the block data. Each mapper for a given shuffle will receive the same + * list of BlockManagerIds as the target location to push the blocks to. All mappers in the + * same shuffle will map shuffle partition ranges to individual target locations in a consistent + * manner to make sure each target location receives shuffle blocks belonging to the same set + * of partition ranges. 0-length blocks and blocks that are large enough will be skipped. + * + * @param numPartitions sumber of shuffle partitions in the shuffle file + * @param partitionId map index of the current mapper + * @param shuffleId shuffleId of current shuffle + * @param dataFile shuffle data file + * @param partitionLengths array of sizes of blocks in the shuffle data file + * @param mergerLocs target locations to push blocks to + * @param transportConf transportConf used to create FileSegmentManagedBuffer + * @return List of the PushRequest, randomly shuffled. + * + * VisibleForTesting + */ + private[shuffle] def prepareBlockPushRequests( + numPartitions: Int, + partitionId: Int, + shuffleId: Int, + dataFile: File, + partitionLengths: Array[Long], + mergerLocs: Seq[BlockManagerId], + transportConf: TransportConf): Seq[PushRequest] = { + var offset = 0L + var currentReqSize = 0 + var currentReqOffset = 0L + var currentMergerId = 0 + val numMergers = mergerLocs.length + val requests = new ArrayBuffer[PushRequest] + var blocks = new ArrayBuffer[(BlockId, Int)] + for (reduceId <- 0 until numPartitions) { + val blockSize = partitionLengths(reduceId) + logDebug( + s"Block ${ShufflePushBlockId(shuffleId, partitionId, reduceId)} is of size $blockSize") + // Skip 0-length blocks and blocks that are large enough + if (blockSize > 0) { + val mergerId = math.min(math.floor(reduceId * 1.0 / numPartitions * numMergers), + numMergers - 1).asInstanceOf[Int] + // Start a new PushRequest if the current request goes beyond the max batch size, + // or the number of blocks in the current request goes beyond the limit per destination, + // or the next block push location is for a different shuffle service, or the next block + // exceeds the max block size to push limit. This guarantees that each PushRequest + // represents continuous blocks in the shuffle file to be pushed to the same shuffle + // service, and does not go beyond existing limitations. + if (currentReqSize + blockSize <= maxBlockBatchSize + && blocks.size < maxBlocksInFlightPerAddress + && mergerId == currentMergerId && blockSize <= maxBlockSizeToPush) { + // Add current block to current batch + currentReqSize += blockSize.toInt + } else { + if (blocks.nonEmpty) { + // Convert the previous batch into a PushRequest + requests += PushRequest(mergerLocs(currentMergerId), blocks.toSeq, + createRequestBuffer(transportConf, dataFile, currentReqOffset, currentReqSize)) + blocks = new ArrayBuffer[(BlockId, Int)] + } + // Start a new batch + currentReqSize = 0 + // Set currentReqOffset to -1 so we are able to distinguish between the initial value + // of currentReqOffset and when we are about to start a new batch + currentReqOffset = -1 + currentMergerId = mergerId + } + // Only push blocks under the size limit + if (blockSize <= maxBlockSizeToPush) { + val blockSizeInt = blockSize.toInt + blocks += ((ShufflePushBlockId(shuffleId, partitionId, reduceId), blockSizeInt)) + // Only update currentReqOffset if the current block is the first in the request + if (currentReqOffset == -1) { + currentReqOffset = offset + } + if (currentReqSize == 0) { + currentReqSize += blockSizeInt + } + } + } + offset += blockSize + } + // Add in the final request + if (blocks.nonEmpty) { + requests += PushRequest(mergerLocs(currentMergerId), blocks.toSeq, + createRequestBuffer(transportConf, dataFile, currentReqOffset, currentReqSize)) + } + requests.toSeq + } + + // Visible for testing + protected def createRequestBuffer( + conf: TransportConf, + dataFile: File, + offset: Long, + length: Long): ManagedBuffer = { + new FileSegmentManagedBuffer(conf, dataFile, offset, length) + } +} + +private[spark] object ShuffleBlockPusher { + + /** + * A request to push blocks to a remote shuffle service + * @param address remote shuffle service location to push blocks to + * @param blocks list of block IDs and their sizes + * @param reqBuffer a chunk of data in the shuffle data file corresponding to the continuous + * blocks represented in this request + */ + private[spark] case class PushRequest( + address: BlockManagerId, + blocks: Seq[(BlockId, Int)], + reqBuffer: ManagedBuffer) { + val size = blocks.map(_._2).sum + } + + /** + * Result of the block push. + * @param blockId blockId + * @param failure exception if the push was unsuccessful; null otherwise; + */ + private case class PushResult(blockId: String, failure: Throwable) + + private val BLOCK_PUSHER_POOL: ExecutorService = { + val conf = SparkEnv.get.conf + if (Utils.isPushBasedShuffleEnabled(conf)) { + val numThreads = conf.get(SHUFFLE_NUM_PUSH_THREADS) + .getOrElse(conf.getInt(SparkLauncher.EXECUTOR_CORES, 1)) + ThreadUtils.newDaemonFixedThreadPool(numThreads, "shuffle-block-push-thread") + } else { + null + } + } + + /** + * Stop the shuffle pusher pool if it isn't null. + */ + private[spark] def stop(): Unit = { + if (BLOCK_PUSHER_POOL != null) { + BLOCK_PUSHER_POOL.shutdown() + } + } +} diff --git a/core/src/main/scala/org/apache/spark/shuffle/ShuffleManager.scala b/core/src/main/scala/org/apache/spark/shuffle/ShuffleManager.scala index 400c4526f0114..4e2183451c258 100644 --- a/core/src/main/scala/org/apache/spark/shuffle/ShuffleManager.scala +++ b/core/src/main/scala/org/apache/spark/shuffle/ShuffleManager.scala @@ -24,8 +24,12 @@ import org.apache.spark.{ShuffleDependency, TaskContext} * and on each executor, based on the spark.shuffle.manager setting. The driver registers shuffles * with it, and executors (or tasks running locally in the driver) can ask to read and write data. * - * NOTE: this will be instantiated by SparkEnv so its constructor can take a SparkConf and + * NOTE: + * 1. This will be instantiated by SparkEnv so its constructor can take a SparkConf and * boolean isDriver as parameters. + * 2. This contains a method ShuffleBlockResolver which interacts with External Shuffle Service + * when it is enabled. Need to pay attention to that, if implementing a custom ShuffleManager, to + * make sure the custom ShuffleManager could co-exist with External Shuffle Service. */ private[spark] trait ShuffleManager { diff --git a/core/src/main/scala/org/apache/spark/shuffle/ShuffleWriteProcessor.scala b/core/src/main/scala/org/apache/spark/shuffle/ShuffleWriteProcessor.scala index 1429144c6f6e2..abff650b0611b 100644 --- a/core/src/main/scala/org/apache/spark/shuffle/ShuffleWriteProcessor.scala +++ b/core/src/main/scala/org/apache/spark/shuffle/ShuffleWriteProcessor.scala @@ -21,6 +21,7 @@ import org.apache.spark.{Partition, ShuffleDependency, SparkEnv, TaskContext} import org.apache.spark.internal.Logging import org.apache.spark.rdd.RDD import org.apache.spark.scheduler.MapStatus +import org.apache.spark.util.Utils /** * The interface for customizing shuffle write process. The driver create a ShuffleWriteProcessor @@ -57,7 +58,23 @@ private[spark] class ShuffleWriteProcessor extends Serializable with Logging { createMetricsReporter(context)) writer.write( rdd.iterator(partition, context).asInstanceOf[Iterator[_ <: Product2[Any, Any]]]) - writer.stop(success = true).get + val mapStatus = writer.stop(success = true) + if (mapStatus.isDefined) { + // Initiate shuffle push process if push based shuffle is enabled + // The map task only takes care of converting the shuffle data file into multiple + // block push requests. It delegates pushing the blocks to a different thread-pool - + // ShuffleBlockPusher.BLOCK_PUSHER_POOL. + if (Utils.isPushBasedShuffleEnabled(SparkEnv.get.conf) && dep.getMergerLocs.nonEmpty) { + manager.shuffleBlockResolver match { + case resolver: IndexShuffleBlockResolver => + val dataFile = resolver.getDataFile(dep.shuffleId, mapId) + new ShuffleBlockPusher(SparkEnv.get.conf) + .initiateBlockPush(dataFile, writer.getPartitionLengths(), dep, partition.index) + case _ => + } + } + } + mapStatus.get } catch { case e: Exception => try { diff --git a/core/src/main/scala/org/apache/spark/shuffle/ShuffleWriter.scala b/core/src/main/scala/org/apache/spark/shuffle/ShuffleWriter.scala index 4cc4ef5f1886e..a279b4c8f42f4 100644 --- a/core/src/main/scala/org/apache/spark/shuffle/ShuffleWriter.scala +++ b/core/src/main/scala/org/apache/spark/shuffle/ShuffleWriter.scala @@ -31,4 +31,7 @@ private[spark] abstract class ShuffleWriter[K, V] { /** Close this writer, passing along whether the map completed */ def stop(success: Boolean): Option[MapStatus] + + /** Get the lengths of each partition */ + def getPartitionLengths(): Array[Long] } diff --git a/core/src/main/scala/org/apache/spark/shuffle/sort/SortShuffleManager.scala b/core/src/main/scala/org/apache/spark/shuffle/sort/SortShuffleManager.scala index 72460180f5908..d9b8eddcf8cd0 100644 --- a/core/src/main/scala/org/apache/spark/shuffle/sort/SortShuffleManager.scala +++ b/core/src/main/scala/org/apache/spark/shuffle/sort/SortShuffleManager.scala @@ -22,11 +22,9 @@ import java.util.concurrent.ConcurrentHashMap import scala.collection.JavaConverters._ import org.apache.spark._ -import org.apache.spark.internal.{config, Logging} -import org.apache.spark.scheduler.MapStatus +import org.apache.spark.internal.Logging import org.apache.spark.shuffle._ -import org.apache.spark.shuffle.api.{ShuffleDataIO, ShuffleExecutorComponents} -import org.apache.spark.util.Utils +import org.apache.spark.shuffle.api.ShuffleExecutorComponents import org.apache.spark.util.collection.OpenHashSet /** diff --git a/core/src/main/scala/org/apache/spark/shuffle/sort/SortShuffleWriter.scala b/core/src/main/scala/org/apache/spark/shuffle/sort/SortShuffleWriter.scala index 83ebe3e12946c..af8d1e2fff413 100644 --- a/core/src/main/scala/org/apache/spark/shuffle/sort/SortShuffleWriter.scala +++ b/core/src/main/scala/org/apache/spark/shuffle/sort/SortShuffleWriter.scala @@ -45,6 +45,8 @@ private[spark] class SortShuffleWriter[K, V, C]( private var mapStatus: MapStatus = null + private var partitionLengths: Array[Long] = _ + private val writeMetrics = context.taskMetrics().shuffleWriteMetrics /** Write a bunch of records to this task's output */ @@ -67,7 +69,7 @@ private[spark] class SortShuffleWriter[K, V, C]( val mapOutputWriter = shuffleExecutorComponents.createMapOutputWriter( dep.shuffleId, mapId, dep.partitioner.numPartitions) sorter.writePartitionedMapOutput(dep.shuffleId, mapId, mapOutputWriter) - val partitionLengths = mapOutputWriter.commitAllPartitions().getPartitionLengths + partitionLengths = mapOutputWriter.commitAllPartitions().getPartitionLengths mapStatus = MapStatus(blockManager.shuffleServerId, partitionLengths, mapId) } @@ -93,6 +95,8 @@ private[spark] class SortShuffleWriter[K, V, C]( } } } + + override def getPartitionLengths(): Array[Long] = partitionLengths } private[spark] object SortShuffleWriter { diff --git a/core/src/main/scala/org/apache/spark/status/AppStatusListener.scala b/core/src/main/scala/org/apache/spark/status/AppStatusListener.scala index 7ae9117137caa..52d41cdd72664 100644 --- a/core/src/main/scala/org/apache/spark/status/AppStatusListener.scala +++ b/core/src/main/scala/org/apache/spark/status/AppStatusListener.scala @@ -283,82 +283,143 @@ private[spark] class AppStatusListener( } } + // Note, the blacklisted functions are left here for backwards compatibility to allow + // new history server to properly read and display older event logs. override def onExecutorBlacklisted(event: SparkListenerExecutorBlacklisted): Unit = { - updateBlackListStatus(event.executorId, true) + updateExecExclusionStatus(event.executorId, true) + } + + override def onExecutorExcluded(event: SparkListenerExecutorExcluded): Unit = { + updateExecExclusionStatus(event.executorId, true) } override def onExecutorBlacklistedForStage( event: SparkListenerExecutorBlacklistedForStage): Unit = { - val now = System.nanoTime() + updateExclusionStatusForStage(event.stageId, event.stageAttemptId, event.executorId) + } - Option(liveStages.get((event.stageId, event.stageAttemptId))).foreach { stage => - setStageBlackListStatus(stage, now, event.executorId) - } - liveExecutors.get(event.executorId).foreach { exec => - addBlackListedStageTo(exec, event.stageId, now) - } + override def onExecutorExcludedForStage( + event: SparkListenerExecutorExcludedForStage): Unit = { + updateExclusionStatusForStage(event.stageId, event.stageAttemptId, event.executorId) } override def onNodeBlacklistedForStage(event: SparkListenerNodeBlacklistedForStage): Unit = { - val now = System.nanoTime() + updateNodeExclusionStatusForStage(event.stageId, event.stageAttemptId, event.hostId) + } - // Implicitly blacklist every available executor for the stage associated with this node - Option(liveStages.get((event.stageId, event.stageAttemptId))).foreach { stage => - val executorIds = liveExecutors.values.filter(_.host == event.hostId).map(_.executorId).toSeq - setStageBlackListStatus(stage, now, executorIds: _*) - } - liveExecutors.values.filter(_.hostname == event.hostId).foreach { exec => - addBlackListedStageTo(exec, event.stageId, now) - } + override def onNodeExcludedForStage(event: SparkListenerNodeExcludedForStage): Unit = { + updateNodeExclusionStatusForStage(event.stageId, event.stageAttemptId, event.hostId) } - private def addBlackListedStageTo(exec: LiveExecutor, stageId: Int, now: Long): Unit = { - exec.blacklistedInStages += stageId + private def addExcludedStageTo(exec: LiveExecutor, stageId: Int, now: Long): Unit = { + exec.excludedInStages += stageId liveUpdate(exec, now) } private def setStageBlackListStatus(stage: LiveStage, now: Long, executorIds: String*): Unit = { executorIds.foreach { executorId => val executorStageSummary = stage.executorSummary(executorId) - executorStageSummary.isBlacklisted = true + executorStageSummary.isExcluded = true maybeUpdate(executorStageSummary, now) } - stage.blackListedExecutors ++= executorIds + stage.excludedExecutors ++= executorIds + maybeUpdate(stage, now) + } + + private def setStageExcludedStatus(stage: LiveStage, now: Long, executorIds: String*): Unit = { + executorIds.foreach { executorId => + val executorStageSummary = stage.executorSummary(executorId) + executorStageSummary.isExcluded = true + maybeUpdate(executorStageSummary, now) + } + stage.excludedExecutors ++= executorIds maybeUpdate(stage, now) } override def onExecutorUnblacklisted(event: SparkListenerExecutorUnblacklisted): Unit = { - updateBlackListStatus(event.executorId, false) + updateExecExclusionStatus(event.executorId, false) + } + + override def onExecutorUnexcluded(event: SparkListenerExecutorUnexcluded): Unit = { + updateExecExclusionStatus(event.executorId, false) } override def onNodeBlacklisted(event: SparkListenerNodeBlacklisted): Unit = { - updateNodeBlackList(event.hostId, true) + updateNodeExcluded(event.hostId, true) + } + + override def onNodeExcluded(event: SparkListenerNodeExcluded): Unit = { + updateNodeExcluded(event.hostId, true) } override def onNodeUnblacklisted(event: SparkListenerNodeUnblacklisted): Unit = { - updateNodeBlackList(event.hostId, false) + updateNodeExcluded(event.hostId, false) + } + + override def onNodeUnexcluded(event: SparkListenerNodeUnexcluded): Unit = { + updateNodeExcluded(event.hostId, false) + } + + private def updateNodeExclusionStatusForStage(stageId: Int, stageAttemptId: Int, + hostId: String): Unit = { + val now = System.nanoTime() + + // Implicitly exclude every available executor for the stage associated with this node + Option(liveStages.get((stageId, stageAttemptId))).foreach { stage => + val executorIds = liveExecutors.values.filter(exec => exec.host == hostId + && exec.executorId != SparkContext.DRIVER_IDENTIFIER).map(_.executorId).toSeq + setStageExcludedStatus(stage, now, executorIds: _*) + } + liveExecutors.values.filter(exec => exec.hostname == hostId + && exec.executorId != SparkContext.DRIVER_IDENTIFIER).foreach { exec => + addExcludedStageTo(exec, stageId, now) + } + } + + private def updateExclusionStatusForStage(stageId: Int, stageAttemptId: Int, + execId: String): Unit = { + val now = System.nanoTime() + + Option(liveStages.get((stageId, stageAttemptId))).foreach { stage => + setStageExcludedStatus(stage, now, execId) + } + liveExecutors.get(execId).foreach { exec => + addExcludedStageTo(exec, stageId, now) + } } - private def updateBlackListStatus(execId: String, blacklisted: Boolean): Unit = { + private def updateExecExclusionStatus(execId: String, excluded: Boolean): Unit = { liveExecutors.get(execId).foreach { exec => - exec.isBlacklisted = blacklisted - if (blacklisted) { + updateExecExclusionStatus(exec, excluded, System.nanoTime()) + } + } + + private def updateExecExclusionStatus(exec: LiveExecutor, excluded: Boolean, now: Long): Unit = { + // Since we are sending both blacklisted and excluded events for backwards compatibility + // we need to protect against double counting so don't increment if already in + // that state. Also protects against executor being excluded and then node being + // separately excluded which could result in this being called twice for same + // executor. + if (exec.isExcluded != excluded) { + if (excluded) { appStatusSource.foreach(_.BLACKLISTED_EXECUTORS.inc()) + appStatusSource.foreach(_.EXCLUDED_EXECUTORS.inc()) } else { appStatusSource.foreach(_.UNBLACKLISTED_EXECUTORS.inc()) + appStatusSource.foreach(_.UNEXCLUDED_EXECUTORS.inc()) } - liveUpdate(exec, System.nanoTime()) + exec.isExcluded = excluded + liveUpdate(exec, now) } } - private def updateNodeBlackList(host: String, blacklisted: Boolean): Unit = { + private def updateNodeExcluded(host: String, excluded: Boolean): Unit = { val now = System.nanoTime() - // Implicitly (un)blacklist every executor associated with the node. + // Implicitly (un)exclude every executor associated with the node. liveExecutors.values.foreach { exec => - if (exec.hostname == host) { - exec.isBlacklisted = blacklisted - liveUpdate(exec, now) + if (exec.hostname == host && exec.executorId != SparkContext.DRIVER_IDENTIFIER) { + updateExecExclusionStatus(exec, excluded, now) } } } @@ -628,6 +689,10 @@ private[spark] class AppStatusListener( stage.killedSummary = killedTasksSummary(event.reason, stage.killedSummary) } stage.activeTasksPerExecutor(event.taskInfo.executorId) -= 1 + + stage.peakExecutorMetrics.compareAndUpdatePeakValues(event.taskExecutorMetrics) + stage.executorSummary(event.taskInfo.executorId).peakExecutorMetrics + .compareAndUpdatePeakValues(event.taskExecutorMetrics) // [SPARK-24415] Wait for all tasks to finish before removing stage from live list val removeStage = stage.activeTasks == 0 && @@ -694,6 +759,7 @@ private[spark] class AppStatusListener( exec.completedTasks += completedDelta exec.failedTasks += failedDelta exec.totalDuration += event.taskInfo.duration + exec.peakExecutorMetrics.compareAndUpdatePeakValues(event.taskExecutorMetrics) // Note: For resubmitted tasks, we continue to use the metrics that belong to the // first attempt of this task. This may not be 100% accurate because the first attempt @@ -759,10 +825,10 @@ private[spark] class AppStatusListener( update(pool, now) } - val executorIdsForStage = stage.blackListedExecutors + val executorIdsForStage = stage.excludedExecutors executorIdsForStage.foreach { executorId => liveExecutors.get(executorId).foreach { exec => - removeBlackListedStageFrom(exec, event.stageInfo.stageId, now) + removeExcludedStageFrom(exec, event.stageInfo.stageId, now) } } @@ -782,8 +848,8 @@ private[spark] class AppStatusListener( deadExecutors.retain((execId, exec) => isExecutorActiveForLiveStages(exec)) } - private def removeBlackListedStageFrom(exec: LiveExecutor, stageId: Int, now: Long) = { - exec.blacklistedInStages -= stageId + private def removeExcludedStageFrom(exec: LiveExecutor, stageId: Int, now: Long) = { + exec.excludedInStages -= stageId liveUpdate(exec, now) } diff --git a/core/src/main/scala/org/apache/spark/status/AppStatusSource.scala b/core/src/main/scala/org/apache/spark/status/AppStatusSource.scala index 20f171bd3c375..d19744db089ba 100644 --- a/core/src/main/scala/org/apache/spark/status/AppStatusSource.scala +++ b/core/src/main/scala/org/apache/spark/status/AppStatusSource.scala @@ -59,9 +59,25 @@ private[spark] class AppStatusSource extends Source { val SKIPPED_TASKS = getCounter("tasks", "skippedTasks") + // This is the count of how many executors have been blacklisted at the application level, + // does not include stage level blacklisting. + // this is private but user visible from metrics so just deprecate + @deprecated("use excludedExecutors instead", "3.1.0") val BLACKLISTED_EXECUTORS = getCounter("tasks", "blackListedExecutors") + // This is the count of how many executors have been unblacklisted at the application level, + // does not include stage level unblacklisting. + @deprecated("use unexcludedExecutors instead", "3.1.0") val UNBLACKLISTED_EXECUTORS = getCounter("tasks", "unblackListedExecutors") + + // This is the count of how many executors have been excluded at the application level, + // does not include stage level exclusion. + val EXCLUDED_EXECUTORS = getCounter("tasks", "excludedExecutors") + + // This is the count of how many executors have been unexcluded at the application level, + // does not include stage level unexclusion. + val UNEXCLUDED_EXECUTORS = getCounter("tasks", "unexcludedExecutors") + } private[spark] object AppStatusSource { diff --git a/core/src/main/scala/org/apache/spark/status/AppStatusStore.scala b/core/src/main/scala/org/apache/spark/status/AppStatusStore.scala index 5c6543fe28a18..b9cc9145feb4d 100644 --- a/core/src/main/scala/org/apache/spark/status/AppStatusStore.scala +++ b/core/src/main/scala/org/apache/spark/status/AppStatusStore.scala @@ -22,9 +22,9 @@ import java.util.{List => JList} import scala.collection.JavaConverters._ import scala.collection.mutable.HashMap -import org.apache.spark.{JobExecutionStatus, SparkConf, SparkException} -import org.apache.spark.resource.ResourceProfileManager +import org.apache.spark.{JobExecutionStatus, SparkConf} import org.apache.spark.status.api.v1 +import org.apache.spark.storage.FallbackStorage.FALLBACK_BLOCK_MANAGER_ID import org.apache.spark.ui.scope._ import org.apache.spark.util.Utils import org.apache.spark.util.kvstore.{InMemoryStore, KVStore} @@ -89,7 +89,7 @@ private[spark] class AppStatusStore( } else { base } - filtered.asScala.map(_.info).toSeq + filtered.asScala.map(_.info).filter(_.id != FALLBACK_BLOCK_MANAGER_ID.executorId).toSeq } def executorSummary(executorId: String): v1.ExecutorSummary = { diff --git a/core/src/main/scala/org/apache/spark/status/KVUtils.scala b/core/src/main/scala/org/apache/spark/status/KVUtils.scala index 45348be5c98b9..c79f2dcd86533 100644 --- a/core/src/main/scala/org/apache/spark/status/KVUtils.scala +++ b/core/src/main/scala/org/apache/spark/status/KVUtils.scala @@ -21,7 +21,6 @@ import java.io.File import scala.annotation.meta.getter import scala.collection.JavaConverters._ -import scala.language.implicitConversions import scala.reflect.{classTag, ClassTag} import com.fasterxml.jackson.annotation.JsonInclude diff --git a/core/src/main/scala/org/apache/spark/status/LiveEntity.scala b/core/src/main/scala/org/apache/spark/status/LiveEntity.scala index 0fadd330a01ad..38f1f25f2fcaa 100644 --- a/core/src/main/scala/org/apache/spark/status/LiveEntity.scala +++ b/core/src/main/scala/org/apache/spark/status/LiveEntity.scala @@ -286,8 +286,8 @@ private[spark] class LiveExecutor(val executorId: String, _addTime: Long) extend var totalInputBytes = 0L var totalShuffleRead = 0L var totalShuffleWrite = 0L - var isBlacklisted = false - var blacklistedInStages: Set[Int] = TreeSet() + var isExcluded = false + var excludedInStages: Set[Int] = TreeSet() var executorLogs = Map[String, String]() var attributes = Map[String, String]() @@ -334,18 +334,20 @@ private[spark] class LiveExecutor(val executorId: String, _addTime: Long) extend totalInputBytes, totalShuffleRead, totalShuffleWrite, - isBlacklisted, + isExcluded, maxMemory, addTime, Option(removeTime), Option(removeReason), executorLogs, memoryMetrics, - blacklistedInStages, + excludedInStages, Some(peakExecutorMetrics).filter(_.isSet), attributes, resources, - resourceProfileId) + resourceProfileId, + isExcluded, + excludedInStages) new ExecutorSummaryWrapper(info) } } @@ -361,7 +363,7 @@ private class LiveExecutorStageSummary( var succeededTasks = 0 var failedTasks = 0 var killedTasks = 0 - var isBlacklisted = false + var isExcluded = false var metrics = createMetrics(default = 0L) @@ -383,8 +385,9 @@ private class LiveExecutorStageSummary( metrics.shuffleWriteMetrics.recordsWritten, metrics.memoryBytesSpilled, metrics.diskBytesSpilled, - isBlacklisted, - Some(peakExecutorMetrics).filter(_.isSet)) + isExcluded, + Some(peakExecutorMetrics).filter(_.isSet), + isExcluded) new ExecutorStageSummaryWrapper(stageId, attemptId, executorId, info) } @@ -421,7 +424,7 @@ private class LiveStage extends LiveEntity { val activeTasksPerExecutor = new HashMap[String, Int]().withDefaultValue(0) - var blackListedExecutors = new HashSet[String]() + var excludedExecutors = new HashSet[String]() val peakExecutorMetrics = new ExecutorMetrics() diff --git a/core/src/main/scala/org/apache/spark/status/api/v1/ApiRootResource.scala b/core/src/main/scala/org/apache/spark/status/api/v1/ApiRootResource.scala index 83f76db7e89da..cc21c1488f67c 100644 --- a/core/src/main/scala/org/apache/spark/status/api/v1/ApiRootResource.scala +++ b/core/src/main/scala/org/apache/spark/status/api/v1/ApiRootResource.scala @@ -95,6 +95,8 @@ private[spark] trait UIRoot { .build() } def securityManager: SecurityManager + + def checkUIViewPermissions(appId: String, attemptId: Option[String], user: String): Boolean } private[v1] object UIRootFromServletContext { @@ -145,6 +147,19 @@ private[v1] trait BaseAppResource extends ApiRequestContext { throw new NotFoundException(s"no such app: $appKey") } } + + protected def checkUIViewPermissions(): Unit = { + try { + val user = httpRequest.getRemoteUser() + if (!uiRoot.checkUIViewPermissions(appId, Option(attemptId), user)) { + throw new ForbiddenException(raw"""user "$user" is not authorized""") + } + } catch { + case _: NoSuchElementException => + val appKey = Option(attemptId).map(appId + "/" + _).getOrElse(appId) + throw new NotFoundException(s"no such app: $appKey") + } + } } private[v1] class ForbiddenException(msg: String) extends WebApplicationException( diff --git a/core/src/main/scala/org/apache/spark/status/api/v1/OneApplicationResource.scala b/core/src/main/scala/org/apache/spark/status/api/v1/OneApplicationResource.scala index 536a1fcd59cd0..fb64ff5e60247 100644 --- a/core/src/main/scala/org/apache/spark/status/api/v1/OneApplicationResource.scala +++ b/core/src/main/scala/org/apache/spark/status/api/v1/OneApplicationResource.scala @@ -115,15 +115,14 @@ private[v1] class AbstractApplicationResource extends BaseAppResource { @Path("logs") @Produces(Array(MediaType.APPLICATION_OCTET_STREAM)) def getEventLogs(): Response = { - // Retrieve the UI for the application just to do access permission checks. For backwards - // compatibility, this code also tries with attemptId "1" if the UI without an attempt ID does - // not exist. + // For backwards compatibility, this code also tries with attemptId "1" if the UI + // without an attempt ID does not exist. try { - withUI { _ => } + checkUIViewPermissions() } catch { case _: NotFoundException if attemptId == null => attemptId = "1" - withUI { _ => } + checkUIViewPermissions() attemptId = null } diff --git a/core/src/main/scala/org/apache/spark/status/api/v1/api.scala b/core/src/main/scala/org/apache/spark/status/api/v1/api.scala index d207a6023f7f9..96f5b7b5cf27e 100644 --- a/core/src/main/scala/org/apache/spark/status/api/v1/api.scala +++ b/core/src/main/scala/org/apache/spark/status/api/v1/api.scala @@ -82,10 +82,12 @@ class ExecutorStageSummary private[spark]( val shuffleWriteRecords : Long, val memoryBytesSpilled : Long, val diskBytesSpilled : Long, + @deprecated("use isExcludedForStage instead", "3.1.0") val isBlacklistedForStage: Boolean, @JsonSerialize(using = classOf[ExecutorMetricsJsonSerializer]) @JsonDeserialize(using = classOf[ExecutorMetricsJsonDeserializer]) - val peakMemoryMetrics: Option[ExecutorMetrics]) + val peakMemoryMetrics: Option[ExecutorMetrics], + val isExcludedForStage: Boolean) class ExecutorSummary private[spark]( val id: String, @@ -105,6 +107,7 @@ class ExecutorSummary private[spark]( val totalInputBytes: Long, val totalShuffleRead: Long, val totalShuffleWrite: Long, + @deprecated("use isExcluded instead", "3.1.0") val isBlacklisted: Boolean, val maxMemory: Long, val addTime: Date, @@ -112,13 +115,16 @@ class ExecutorSummary private[spark]( val removeReason: Option[String], val executorLogs: Map[String, String], val memoryMetrics: Option[MemoryMetrics], + @deprecated("use excludedInStages instead", "3.1.0") val blacklistedInStages: Set[Int], @JsonSerialize(using = classOf[ExecutorMetricsJsonSerializer]) @JsonDeserialize(using = classOf[ExecutorMetricsJsonDeserializer]) val peakMemoryMetrics: Option[ExecutorMetrics], val attributes: Map[String, String], val resources: Map[String, ResourceInformation], - val resourceProfileId: Int) + val resourceProfileId: Int, + val isExcluded: Boolean, + val excludedInStages: Set[Int]) class MemoryMetrics private[spark]( val usedOnHeapStorageMemory: Long, @@ -136,6 +142,10 @@ private[spark] class ExecutorMetricsJsonDeserializer new TypeReference[Option[Map[String, java.lang.Long]]] {}) metricsMap.map(metrics => new ExecutorMetrics(metrics)) } + + override def getNullValue(ctxt: DeserializationContext): Option[ExecutorMetrics] = { + None + } } /** serializer for peakMemoryMetrics: convert ExecutorMetrics to map with metric name as key */ private[spark] class ExecutorMetricsJsonSerializer @@ -144,11 +154,15 @@ private[spark] class ExecutorMetricsJsonSerializer metrics: Option[ExecutorMetrics], jsonGenerator: JsonGenerator, serializerProvider: SerializerProvider): Unit = { - metrics.foreach { m: ExecutorMetrics => - val metricsMap = ExecutorMetricType.metricToOffset.map { case (metric, _) => - metric -> m.getMetricValue(metric) + if (metrics.isEmpty) { + jsonGenerator.writeNull() + } else { + metrics.foreach { m: ExecutorMetrics => + val metricsMap = ExecutorMetricType.metricToOffset.map { case (metric, _) => + metric -> m.getMetricValue(metric) + } + jsonGenerator.writeObject(metricsMap) } - jsonGenerator.writeObject(metricsMap) } } diff --git a/core/src/main/scala/org/apache/spark/storage/BlockId.scala b/core/src/main/scala/org/apache/spark/storage/BlockId.scala index 7b084e73c92f9..73bf809a08a68 100644 --- a/core/src/main/scala/org/apache/spark/storage/BlockId.scala +++ b/core/src/main/scala/org/apache/spark/storage/BlockId.scala @@ -20,7 +20,7 @@ package org.apache.spark.storage import java.util.UUID import org.apache.spark.SparkException -import org.apache.spark.annotation.DeveloperApi +import org.apache.spark.annotation.{DeveloperApi, Since} /** * :: DeveloperApi :: @@ -81,6 +81,12 @@ case class ShuffleIndexBlockId(shuffleId: Int, mapId: Long, reduceId: Int) exten override def name: String = "shuffle_" + shuffleId + "_" + mapId + "_" + reduceId + ".index" } +@Since("3.2.0") +@DeveloperApi +case class ShufflePushBlockId(shuffleId: Int, mapIndex: Int, reduceId: Int) extends BlockId { + override def name: String = "shufflePush_" + shuffleId + "_" + mapIndex + "_" + reduceId +} + @DeveloperApi case class BroadcastBlockId(broadcastId: Long, field: String = "") extends BlockId { override def name: String = "broadcast_" + broadcastId + (if (field == "") "" else "_" + field) @@ -122,6 +128,7 @@ object BlockId { val SHUFFLE_BATCH = "shuffle_([0-9]+)_([0-9]+)_([0-9]+)_([0-9]+)".r val SHUFFLE_DATA = "shuffle_([0-9]+)_([0-9]+)_([0-9]+).data".r val SHUFFLE_INDEX = "shuffle_([0-9]+)_([0-9]+)_([0-9]+).index".r + val SHUFFLE_PUSH = "shufflePush_([0-9]+)_([0-9]+)_([0-9]+)".r val BROADCAST = "broadcast_([0-9]+)([_A-Za-z0-9]*)".r val TASKRESULT = "taskresult_([0-9]+)".r val STREAM = "input-([0-9]+)-([0-9]+)".r @@ -140,6 +147,8 @@ object BlockId { ShuffleDataBlockId(shuffleId.toInt, mapId.toLong, reduceId.toInt) case SHUFFLE_INDEX(shuffleId, mapId, reduceId) => ShuffleIndexBlockId(shuffleId.toInt, mapId.toLong, reduceId.toInt) + case SHUFFLE_PUSH(shuffleId, mapIndex, reduceId) => + ShufflePushBlockId(shuffleId.toInt, mapIndex.toInt, reduceId.toInt) case BROADCAST(broadcastId, field) => BroadcastBlockId(broadcastId.toLong, field.stripPrefix("_")) case TASKRESULT(taskId) => diff --git a/core/src/main/scala/org/apache/spark/storage/BlockManager.scala b/core/src/main/scala/org/apache/spark/storage/BlockManager.scala index ff0f38a2479b0..a5b8d5d0c8cda 100644 --- a/core/src/main/scala/org/apache/spark/storage/BlockManager.scala +++ b/core/src/main/scala/org/apache/spark/storage/BlockManager.scala @@ -55,8 +55,7 @@ import org.apache.spark.rpc.RpcEnv import org.apache.spark.scheduler.ExecutorCacheTaskLocation import org.apache.spark.serializer.{SerializerInstance, SerializerManager} import org.apache.spark.shuffle.{MigratableResolver, ShuffleManager, ShuffleWriteMetricsReporter} -import org.apache.spark.shuffle.{ShuffleManager, ShuffleWriteMetricsReporter} -import org.apache.spark.storage.BlockManagerMessages.ReplicateBlock +import org.apache.spark.storage.BlockManagerMessages.{DecommissionBlockManager, ReplicateBlock} import org.apache.spark.storage.memory._ import org.apache.spark.unsafe.Platform import org.apache.spark.util._ @@ -243,8 +242,9 @@ private[spark] class BlockManager( private var blockReplicationPolicy: BlockReplicationPolicy = _ + // visible for test // This is volatile since if it's defined we should not accept remote blocks. - @volatile private var decommissioner: Option[BlockManagerDecommissioner] = None + @volatile private[spark] var decommissioner: Option[BlockManagerDecommissioner] = None // A DownloadFileManager used to track all the files of remote blocks which are above the // specified memory threshold. Files will be deleted automatically based on weak reference. @@ -627,7 +627,16 @@ private[spark] class BlockManager( override def getLocalBlockData(blockId: BlockId): ManagedBuffer = { if (blockId.isShuffle) { logDebug(s"Getting local shuffle block ${blockId}") - shuffleManager.shuffleBlockResolver.getBlockData(blockId) + try { + shuffleManager.shuffleBlockResolver.getBlockData(blockId) + } catch { + case e: IOException => + if (conf.get(config.STORAGE_DECOMMISSION_FALLBACK_STORAGE_PATH).isDefined) { + FallbackStorage.read(conf, blockId) + } else { + throw e + } + } } else { getLocalBytes(blockId) match { case Some(blockData) => @@ -1103,7 +1112,7 @@ private[spark] class BlockManager( blockSize: Long): Option[ManagedBuffer] = { val file = ExecutorDiskUtils.getFile(localDirs, subDirsPerLocalDir, blockId.name) if (file.exists()) { - val mangedBuffer = securityManager.getIOEncryptionKey() match { + val managedBuffer = securityManager.getIOEncryptionKey() match { case Some(key) => // Encrypted blocks cannot be memory mapped; return a special object that does decryption // and provides InputStream / FileRegion implementations for reading the data. @@ -1114,7 +1123,7 @@ private[spark] class BlockManager( val transportConf = SparkTransportConf.fromSparkConf(conf, "shuffle") new FileSegmentManagedBuffer(transportConf, file, 0, file.length) } - Some(mangedBuffer) + Some(managedBuffer) } else { None } @@ -1580,7 +1589,12 @@ private[spark] class BlockManager( lastPeerFetchTimeNs = System.nanoTime() logDebug("Fetched peers from master: " + cachedPeers.mkString("[", ",", "]")) } - cachedPeers + if (cachedPeers.isEmpty && + conf.get(config.STORAGE_DECOMMISSION_FALLBACK_STORAGE_PATH).isDefined) { + Seq(FallbackStorage.FALLBACK_BLOCK_MANAGER_ID) + } else { + cachedPeers + } } } @@ -1809,7 +1823,9 @@ private[spark] class BlockManager( blocksToRemove.size } - def decommissionBlockManager(): Unit = synchronized { + def decommissionBlockManager(): Unit = storageEndpoint.ask(DecommissionBlockManager) + + private[spark] def decommissionSelf(): Unit = synchronized { decommissioner match { case None => logInfo("Starting block manager decommissioning process...") diff --git a/core/src/main/scala/org/apache/spark/storage/BlockManagerDecommissioner.scala b/core/src/main/scala/org/apache/spark/storage/BlockManagerDecommissioner.scala index f0a8e47aa3200..e73e359a70f1e 100644 --- a/core/src/main/scala/org/apache/spark/storage/BlockManagerDecommissioner.scala +++ b/core/src/main/scala/org/apache/spark/storage/BlockManagerDecommissioner.scala @@ -17,7 +17,7 @@ package org.apache.spark.storage -import java.util.concurrent.ExecutorService +import java.io.IOException import java.util.concurrent.atomic.AtomicInteger import scala.collection.JavaConverters._ @@ -27,7 +27,7 @@ import scala.util.control.NonFatal import org.apache.spark._ import org.apache.spark.internal.Logging import org.apache.spark.internal.config -import org.apache.spark.shuffle.{MigratableResolver, ShuffleBlockInfo} +import org.apache.spark.shuffle.ShuffleBlockInfo import org.apache.spark.storage.BlockManagerMessages.ReplicateBlock import org.apache.spark.util.ThreadUtils @@ -39,6 +39,7 @@ private[storage] class BlockManagerDecommissioner( conf: SparkConf, bm: BlockManager) extends Logging { + private val fallbackStorage = FallbackStorage.getFallbackStorage(conf) private val maxReplicationFailuresForDecommission = conf.get(config.STORAGE_DECOMMISSION_MAX_REPLICATION_FAILURE_PER_BLOCK) @@ -82,23 +83,45 @@ private[storage] class BlockManagerDecommissioner( Thread.sleep(SLEEP_TIME_SECS * 1000L) case Some((shuffleBlockInfo, retryCount)) => if (retryCount < maxReplicationFailuresForDecommission) { - logInfo(s"Trying to migrate shuffle ${shuffleBlockInfo} to ${peer}") - val blocks = - bm.migratableResolver.getMigrationBlocks(shuffleBlockInfo) - logDebug(s"Got migration sub-blocks ${blocks}") - blocks.foreach { case (blockId, buffer) => - logDebug(s"Migrating sub-block ${blockId}") - bm.blockTransferService.uploadBlockSync( - peer.host, - peer.port, - peer.executorId, - blockId, - buffer, - StorageLevel.DISK_ONLY, - null)// class tag, we don't need for shuffle - logDebug(s"Migrated sub block ${blockId}") + val blocks = bm.migratableResolver.getMigrationBlocks(shuffleBlockInfo) + if (blocks.isEmpty) { + logInfo(s"Ignore empty shuffle block $shuffleBlockInfo") + } else { + logInfo(s"Got migration sub-blocks ${blocks}") + logInfo(s"Trying to migrate shuffle ${shuffleBlockInfo} to ${peer} " + + s"($retryCount / $maxReplicationFailuresForDecommission)") + + // Migrate the components of the blocks. + try { + blocks.foreach { case (blockId, buffer) => + logDebug(s"Migrating sub-block ${blockId}") + bm.blockTransferService.uploadBlockSync( + peer.host, + peer.port, + peer.executorId, + blockId, + buffer, + StorageLevel.DISK_ONLY, + null) // class tag, we don't need for shuffle + logDebug(s"Migrated sub block ${blockId}") + } + logInfo(s"Migrated ${shuffleBlockInfo} to ${peer}") + } catch { + case e: IOException => + // If a block got deleted before netty opened the file handle, then trying to + // load the blocks now will fail. This is most likely to occur if we start + // migrating blocks and then the shuffle TTL cleaner kicks in. However this + // could also happen with manually managed shuffles or a GC event on the + // driver a no longer referenced RDD with shuffle files. + if (bm.migratableResolver.getMigrationBlocks(shuffleBlockInfo).isEmpty) { + logWarning(s"Skipping block ${shuffleBlockInfo}, block deleted.") + } else if (fallbackStorage.isDefined) { + fallbackStorage.foreach(_.copy(shuffleBlockInfo, bm)) + } else { + throw e + } + } } - logDebug(s"Migrated ${shuffleBlockInfo} to ${peer}") } else { logError(s"Skipping block ${shuffleBlockInfo} because it has failed ${retryCount}") } @@ -113,6 +136,7 @@ private[storage] class BlockManagerDecommissioner( case Some((shuffleMap, retryCount)) => logError(s"Error during migration, adding ${shuffleMap} back to migration queue", e) shufflesToMigrate.add((shuffleMap, retryCount + 1)) + running = false case None => logError(s"Error while waiting for block to migrate", e) } @@ -121,11 +145,11 @@ private[storage] class BlockManagerDecommissioner( } // Shuffles which are either in queue for migrations or migrated - private val migratingShuffles = mutable.HashSet[ShuffleBlockInfo]() + protected[storage] val migratingShuffles = mutable.HashSet[ShuffleBlockInfo]() // Shuffles which have migrated. This used to know when we are "done", being done can change // if a new shuffle file is created by a running task. - private val numMigratedShuffles = new AtomicInteger(0) + private[storage] val numMigratedShuffles = new AtomicInteger(0) // Shuffles which are queued for migration & number of retries so far. // Visible in storage for testing. @@ -186,7 +210,7 @@ private[storage] class BlockManagerDecommissioner( private val shuffleBlockMigrationRefreshRunnable = new Runnable { val sleepInterval = conf.get(config.STORAGE_DECOMMISSION_REPLICATION_REATTEMPT_INTERVAL) - override def run() { + override def run(): Unit = { assert(conf.get(config.STORAGE_DECOMMISSION_SHUFFLE_BLOCKS_ENABLED)) while (!stopped && !stoppedShuffle && !Thread.interrupted()) { try { @@ -225,9 +249,12 @@ private[storage] class BlockManagerDecommissioner( // Update the queue of shuffles to be migrated logInfo("Offloading shuffle blocks") val localShuffles = bm.migratableResolver.getStoredShuffles().toSet - val newShufflesToMigrate = localShuffles.diff(migratingShuffles).toSeq + val newShufflesToMigrate = (localShuffles.diff(migratingShuffles)).toSeq + .sortBy(b => (b.shuffleId, b.mapId)) shufflesToMigrate.addAll(newShufflesToMigrate.map(x => (x, 0)).asJava) migratingShuffles ++= newShufflesToMigrate + logInfo(s"${newShufflesToMigrate.size} of ${localShuffles.size} local shuffles " + + s"are added. In total, ${migratingShuffles.size} shuffles are remained.") // Update the threads doing migrations val livePeerSet = bm.getPeers(false).toSet @@ -249,7 +276,7 @@ private[storage] class BlockManagerDecommissioner( stoppedShuffle = true } // If we found any new shuffles to migrate or otherwise have not migrated everything. - newShufflesToMigrate.nonEmpty || migratingShuffles.size < numMigratedShuffles.get() + newShufflesToMigrate.nonEmpty || migratingShuffles.size > numMigratedShuffles.get() } /** diff --git a/core/src/main/scala/org/apache/spark/storage/BlockManagerId.scala b/core/src/main/scala/org/apache/spark/storage/BlockManagerId.scala index 49e32d04d450a..c6a4457d8f910 100644 --- a/core/src/main/scala/org/apache/spark/storage/BlockManagerId.scala +++ b/core/src/main/scala/org/apache/spark/storage/BlockManagerId.scala @@ -145,4 +145,6 @@ private[spark] object BlockManagerId { def getCachedBlockManagerId(id: BlockManagerId): BlockManagerId = { blockManagerIdCache.get(id) } + + private[spark] val SHUFFLE_MERGER_IDENTIFIER = "shuffle-push-merger" } diff --git a/core/src/main/scala/org/apache/spark/storage/BlockManagerMaster.scala b/core/src/main/scala/org/apache/spark/storage/BlockManagerMaster.scala index f544d47b8e13c..fe1a5aef9499c 100644 --- a/core/src/main/scala/org/apache/spark/storage/BlockManagerMaster.scala +++ b/core/src/main/scala/org/apache/spark/storage/BlockManagerMaster.scala @@ -125,6 +125,26 @@ class BlockManagerMaster( driverEndpoint.askSync[Seq[BlockManagerId]](GetPeers(blockManagerId)) } + /** + * Get a list of unique shuffle service locations where an executor is successfully + * registered in the past for block push/merge with push based shuffle. + */ + def getShufflePushMergerLocations( + numMergersNeeded: Int, + hostsToFilter: Set[String]): Seq[BlockManagerId] = { + driverEndpoint.askSync[Seq[BlockManagerId]]( + GetShufflePushMergerLocations(numMergersNeeded, hostsToFilter)) + } + + /** + * Remove the host from the candidate list of shuffle push mergers. This can be + * triggered if there is a FetchFailedException on the host + * @param host + */ + def removeShufflePushMergerLocation(host: String): Unit = { + driverEndpoint.askSync[Seq[BlockManagerId]](RemoveShufflePushMergerLocation(host)) + } + def getExecutorEndpointRef(executorId: String): Option[RpcEndpointRef] = { driverEndpoint.askSync[Option[RpcEndpointRef]](GetExecutorEndpointRef(executorId)) } diff --git a/core/src/main/scala/org/apache/spark/storage/BlockManagerMasterEndpoint.scala b/core/src/main/scala/org/apache/spark/storage/BlockManagerMasterEndpoint.scala index a3d42348befaa..eada4b3ee2e38 100644 --- a/core/src/main/scala/org/apache/spark/storage/BlockManagerMasterEndpoint.scala +++ b/core/src/main/scala/org/apache/spark/storage/BlockManagerMasterEndpoint.scala @@ -33,7 +33,7 @@ import org.apache.spark.{MapOutputTrackerMaster, SparkConf} import org.apache.spark.annotation.DeveloperApi import org.apache.spark.internal.{config, Logging} import org.apache.spark.network.shuffle.ExternalBlockStoreClient -import org.apache.spark.rpc.{IsolatedRpcEndpoint, RpcCallContext, RpcEndpointAddress, RpcEndpointRef, RpcEnv} +import org.apache.spark.rpc.{IsolatedRpcEndpoint, RpcCallContext, RpcEndpointRef, RpcEnv} import org.apache.spark.scheduler._ import org.apache.spark.scheduler.cluster.{CoarseGrainedClusterMessages, CoarseGrainedSchedulerBackend} import org.apache.spark.storage.BlockManagerMessages._ @@ -74,6 +74,14 @@ class BlockManagerMasterEndpoint( // Mapping from block id to the set of block managers that have the block. private val blockLocations = new JHashMap[BlockId, mutable.HashSet[BlockManagerId]] + // Mapping from host name to shuffle (mergers) services where the current app + // registered an executor in the past. Older hosts are removed when the + // maxRetainedMergerLocations size is reached in favor of newer locations. + private val shuffleMergerLocations = new mutable.LinkedHashMap[String, BlockManagerId]() + + // Maximum number of merger locations to cache + private val maxRetainedMergerLocations = conf.get(config.SHUFFLE_MERGER_MAX_RETAINED_LOCATIONS) + private val askThreadPool = ThreadUtils.newDaemonCachedThreadPool("block-manager-ask-thread-pool", 100) private implicit val askExecutionContext = ExecutionContext.fromExecutorService(askThreadPool) @@ -92,6 +100,8 @@ class BlockManagerMasterEndpoint( val defaultRpcTimeout = RpcUtils.askRpcTimeout(conf) + private val pushBasedShuffleEnabled = Utils.isPushBasedShuffleEnabled(conf) + logInfo("BlockManagerMasterEndpoint up") // same as `conf.get(config.SHUFFLE_SERVICE_ENABLED) // && conf.get(config.SHUFFLE_SERVICE_FETCH_RDD_ENABLED)` @@ -139,6 +149,12 @@ class BlockManagerMasterEndpoint( case GetBlockStatus(blockId, askStorageEndpoints) => context.reply(blockStatus(blockId, askStorageEndpoints)) + case GetShufflePushMergerLocations(numMergersNeeded, hostsToFilter) => + context.reply(getShufflePushMergerLocations(numMergersNeeded, hostsToFilter)) + + case RemoveShufflePushMergerLocation(host) => + context.reply(removeShufflePushMergerLocation(host)) + case IsExecutorAlive(executorId) => context.reply(blockManagerIdByExecutor.contains(executorId)) @@ -163,8 +179,14 @@ class BlockManagerMasterEndpoint( context.reply(true) case DecommissionBlockManagers(executorIds) => - val bmIds = executorIds.flatMap(blockManagerIdByExecutor.get) - decommissionBlockManagers(bmIds) + // Mark corresponding BlockManagers as being decommissioning by adding them to + // decommissioningBlockManagerSet, so they won't be used to replicate or migrate blocks. + // Note that BlockManagerStorageEndpoint will be notified about decommissioning when the + // executor is notified(see BlockManager.decommissionSelf), so we don't need to send the + // notification here. + val bms = executorIds.flatMap(blockManagerIdByExecutor.get) + logInfo(s"Mark BlockManagers (${bms.mkString(", ")}) as being decommissioning.") + decommissioningBlockManagerSet ++= bms context.reply(true) case GetReplicateInfoForRDDBlocks(blockManagerId) => @@ -335,7 +357,7 @@ class BlockManagerMasterEndpoint( blockLocations.remove(blockId) logWarning(s"No more replicas available for $blockId !") } else if (proactivelyReplicate && (blockId.isRDD || blockId.isInstanceOf[TestBlockId])) { - // As a heursitic, assume single executor failure to find out the number of replicas that + // As a heuristic, assume single executor failure to find out the number of replicas that // existed before failure val maxReplicas = locations.size + 1 val i = (new Random(blockId.hashCode)).nextInt(locations.size) @@ -354,42 +376,44 @@ class BlockManagerMasterEndpoint( } + private def addMergerLocation(blockManagerId: BlockManagerId): Unit = { + if (!blockManagerId.isDriver && !shuffleMergerLocations.contains(blockManagerId.host)) { + val shuffleServerId = BlockManagerId(BlockManagerId.SHUFFLE_MERGER_IDENTIFIER, + blockManagerId.host, externalShuffleServicePort) + if (shuffleMergerLocations.size >= maxRetainedMergerLocations) { + shuffleMergerLocations -= shuffleMergerLocations.head._1 + } + shuffleMergerLocations(shuffleServerId.host) = shuffleServerId + } + } + private def removeExecutor(execId: String): Unit = { logInfo("Trying to remove executor " + execId + " from BlockManagerMaster.") blockManagerIdByExecutor.get(execId).foreach(removeBlockManager) } - /** - * Decommission the given Seq of blockmanagers - * - Adds these block managers to decommissioningBlockManagerSet Set - * - Sends the DecommissionBlockManager message to each of the [[BlockManagerReplicaEndpoint]] - */ - def decommissionBlockManagers(blockManagerIds: Seq[BlockManagerId]): Future[Seq[Unit]] = { - val newBlockManagersToDecommission = blockManagerIds.toSet.diff(decommissioningBlockManagerSet) - val futures = newBlockManagersToDecommission.map { blockManagerId => - decommissioningBlockManagerSet.add(blockManagerId) - val info = blockManagerInfo(blockManagerId) - info.storageEndpoint.ask[Unit](DecommissionBlockManager) - } - Future.sequence{ futures.toSeq } - } - /** * Returns a Seq of ReplicateBlock for each RDD block stored by given blockManagerId * @param blockManagerId - block manager id for which ReplicateBlock info is needed * @return Seq of ReplicateBlock */ private def getReplicateInfoForRDDBlocks(blockManagerId: BlockManagerId): Seq[ReplicateBlock] = { - val info = blockManagerInfo(blockManagerId) + try { + val info = blockManagerInfo(blockManagerId) - val rddBlocks = info.blocks.keySet().asScala.filter(_.isRDD) - rddBlocks.map { blockId => - val currentBlockLocations = blockLocations.get(blockId) - val maxReplicas = currentBlockLocations.size + 1 - val remainingLocations = currentBlockLocations.toSeq.filter(bm => bm != blockManagerId) - val replicateMsg = ReplicateBlock(blockId, remainingLocations, maxReplicas) - replicateMsg - }.toSeq + val rddBlocks = info.blocks.keySet().asScala.filter(_.isRDD) + rddBlocks.map { blockId => + val currentBlockLocations = blockLocations.get(blockId) + val maxReplicas = currentBlockLocations.size + 1 + val remainingLocations = currentBlockLocations.toSeq.filter(bm => bm != blockManagerId) + val replicateMsg = ReplicateBlock(blockId, remainingLocations, maxReplicas) + replicateMsg + }.toSeq + } catch { + // If the block manager has already exited, nothing to replicate. + case e: java.util.NoSuchElementException => + Seq.empty[ReplicateBlock] + } } // Remove a block from the workers that have it. This can only be used to remove @@ -529,6 +553,10 @@ class BlockManagerMasterEndpoint( blockManagerInfo(id) = new BlockManagerInfo(id, System.currentTimeMillis(), maxOnHeapMemSize, maxOffHeapMemSize, storageEndpoint, externalShuffleServiceBlockStatus) + + if (pushBasedShuffleEnabled) { + addMergerLocation(id) + } } listenerBus.post(SparkListenerBlockManagerAdded(time, id, maxOnHeapMemSize + maxOffHeapMemSize, Some(maxOnHeapMemSize), Some(maxOffHeapMemSize))) @@ -660,6 +688,40 @@ class BlockManagerMasterEndpoint( } } + private def getShufflePushMergerLocations( + numMergersNeeded: Int, + hostsToFilter: Set[String]): Seq[BlockManagerId] = { + val blockManagerHosts = blockManagerIdByExecutor.values.map(_.host).toSet + val filteredBlockManagerHosts = blockManagerHosts.filterNot(hostsToFilter.contains(_)) + val filteredMergersWithExecutors = filteredBlockManagerHosts.map( + BlockManagerId(BlockManagerId.SHUFFLE_MERGER_IDENTIFIER, _, externalShuffleServicePort)) + // Enough mergers are available as part of active executors list + if (filteredMergersWithExecutors.size >= numMergersNeeded) { + filteredMergersWithExecutors.toSeq + } else { + // Delta mergers added from inactive mergers list to the active mergers list + val filteredMergersWithExecutorsHosts = filteredMergersWithExecutors.map(_.host) + val filteredMergersWithoutExecutors = shuffleMergerLocations.values + .filterNot(x => hostsToFilter.contains(x.host)) + .filterNot(x => filteredMergersWithExecutorsHosts.contains(x.host)) + val randomFilteredMergersLocations = + if (filteredMergersWithoutExecutors.size > + numMergersNeeded - filteredMergersWithExecutors.size) { + Utils.randomize(filteredMergersWithoutExecutors) + .take(numMergersNeeded - filteredMergersWithExecutors.size) + } else { + filteredMergersWithoutExecutors + } + filteredMergersWithExecutors.toSeq ++ randomFilteredMergersLocations + } + } + + private def removeShufflePushMergerLocation(host: String): Unit = { + if (shuffleMergerLocations.contains(host)) { + shuffleMergerLocations.remove(host) + } + } + /** * Returns an [[RpcEndpointRef]] of the [[BlockManagerReplicaEndpoint]] for sending RPC messages. */ @@ -740,7 +802,6 @@ private[spark] class BlockManagerInfo( if (storageLevel.isValid) { /* isValid means it is either stored in-memory or on-disk. * The memSize here indicates the data size in or dropped from memory, - * externalBlockStoreSize here indicates the data size in or dropped from externalBlockStore, * and the diskSize here indicates the data size in or dropped to disk. * They can be both larger than 0, when a block is dropped from memory to disk. * Therefore, a safe way to set BlockStatus is to set its info in accurate modes. */ diff --git a/core/src/main/scala/org/apache/spark/storage/BlockManagerMessages.scala b/core/src/main/scala/org/apache/spark/storage/BlockManagerMessages.scala index bbc076cea9ba8..afe416a55ed0d 100644 --- a/core/src/main/scala/org/apache/spark/storage/BlockManagerMessages.scala +++ b/core/src/main/scala/org/apache/spark/storage/BlockManagerMessages.scala @@ -141,4 +141,10 @@ private[spark] object BlockManagerMessages { case class BlockManagerHeartbeat(blockManagerId: BlockManagerId) extends ToBlockManagerMaster case class IsExecutorAlive(executorId: String) extends ToBlockManagerMaster + + case class GetShufflePushMergerLocations(numMergersNeeded: Int, hostsToFilter: Set[String]) + extends ToBlockManagerMaster + + case class RemoveShufflePushMergerLocation(host: String) extends ToBlockManagerMaster + } diff --git a/core/src/main/scala/org/apache/spark/storage/BlockManagerStorageEndpoint.scala b/core/src/main/scala/org/apache/spark/storage/BlockManagerStorageEndpoint.scala index a69bebc23c661..54a72568b18fa 100644 --- a/core/src/main/scala/org/apache/spark/storage/BlockManagerStorageEndpoint.scala +++ b/core/src/main/scala/org/apache/spark/storage/BlockManagerStorageEndpoint.scala @@ -62,7 +62,7 @@ class BlockManagerStorageEndpoint( } case DecommissionBlockManager => - context.reply(blockManager.decommissionBlockManager()) + context.reply(blockManager.decommissionSelf()) case RemoveBroadcast(broadcastId, _) => doAsync[Int]("removing broadcast " + broadcastId, context) { diff --git a/core/src/main/scala/org/apache/spark/storage/FallbackStorage.scala b/core/src/main/scala/org/apache/spark/storage/FallbackStorage.scala new file mode 100644 index 0000000000000..41126357f8983 --- /dev/null +++ b/core/src/main/scala/org/apache/spark/storage/FallbackStorage.scala @@ -0,0 +1,174 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.storage + +import java.io.DataInputStream +import java.nio.ByteBuffer + +import scala.concurrent.Future +import scala.reflect.ClassTag + +import org.apache.hadoop.fs.{FileSystem, Path} + +import org.apache.spark.SparkConf +import org.apache.spark.deploy.SparkHadoopUtil +import org.apache.spark.internal.Logging +import org.apache.spark.internal.config.STORAGE_DECOMMISSION_FALLBACK_STORAGE_PATH +import org.apache.spark.network.buffer.{ManagedBuffer, NioManagedBuffer} +import org.apache.spark.rpc.{RpcAddress, RpcEndpointRef, RpcTimeout} +import org.apache.spark.shuffle.{IndexShuffleBlockResolver, ShuffleBlockInfo} +import org.apache.spark.shuffle.IndexShuffleBlockResolver.NOOP_REDUCE_ID +import org.apache.spark.util.Utils + +/** + * A fallback storage used by storage decommissioners. + */ +private[storage] class FallbackStorage(conf: SparkConf) extends Logging { + require(conf.contains("spark.app.id")) + require(conf.get(STORAGE_DECOMMISSION_FALLBACK_STORAGE_PATH).isDefined) + + private val fallbackPath = new Path(conf.get(STORAGE_DECOMMISSION_FALLBACK_STORAGE_PATH).get) + private val hadoopConf = SparkHadoopUtil.get.newConfiguration(conf) + private val fallbackFileSystem = FileSystem.get(fallbackPath.toUri, hadoopConf) + private val appId = conf.getAppId + + // Visible for testing + def copy( + shuffleBlockInfo: ShuffleBlockInfo, + bm: BlockManager): Unit = { + val shuffleId = shuffleBlockInfo.shuffleId + val mapId = shuffleBlockInfo.mapId + + bm.migratableResolver match { + case r: IndexShuffleBlockResolver => + val indexFile = r.getIndexFile(shuffleId, mapId) + + if (indexFile.exists()) { + fallbackFileSystem.copyFromLocalFile( + new Path(indexFile.getAbsolutePath), + new Path(fallbackPath, s"$appId/$shuffleId/${indexFile.getName}")) + + val dataFile = r.getDataFile(shuffleId, mapId) + if (dataFile.exists()) { + fallbackFileSystem.copyFromLocalFile( + new Path(dataFile.getAbsolutePath), + new Path(fallbackPath, s"$appId/$shuffleId/${dataFile.getName}")) + } + + // Report block statuses + val reduceId = NOOP_REDUCE_ID + val indexBlockId = ShuffleIndexBlockId(shuffleId, mapId, reduceId) + FallbackStorage.reportBlockStatus(bm, indexBlockId, indexFile.length) + if (dataFile.exists) { + val dataBlockId = ShuffleDataBlockId(shuffleId, mapId, reduceId) + FallbackStorage.reportBlockStatus(bm, dataBlockId, dataFile.length) + } + } + case r => + logWarning(s"Unsupported Resolver: ${r.getClass.getName}") + } + } + + def exists(shuffleId: Int, filename: String): Boolean = { + fallbackFileSystem.exists(new Path(fallbackPath, s"$appId/$shuffleId/$filename")) + } +} + +class NoopRpcEndpointRef(conf: SparkConf) extends RpcEndpointRef(conf) { + import scala.concurrent.ExecutionContext.Implicits.global + override def address: RpcAddress = null + override def name: String = "fallback" + override def send(message: Any): Unit = {} + override def ask[T: ClassTag](message: Any, timeout: RpcTimeout): Future[T] = { + Future{true.asInstanceOf[T]} + } +} + +object FallbackStorage extends Logging { + /** We use one block manager id as a place holder. */ + val FALLBACK_BLOCK_MANAGER_ID: BlockManagerId = BlockManagerId("fallback", "remote", 7337) + + def getFallbackStorage(conf: SparkConf): Option[FallbackStorage] = { + if (conf.get(STORAGE_DECOMMISSION_FALLBACK_STORAGE_PATH).isDefined) { + Some(new FallbackStorage(conf)) + } else { + None + } + } + + /** Register the fallback block manager and its RPC endpoint. */ + def registerBlockManagerIfNeeded(master: BlockManagerMaster, conf: SparkConf): Unit = { + if (conf.get(STORAGE_DECOMMISSION_FALLBACK_STORAGE_PATH).isDefined) { + master.registerBlockManager( + FALLBACK_BLOCK_MANAGER_ID, Array.empty[String], 0, 0, new NoopRpcEndpointRef(conf)) + } + } + + /** Report block status to block manager master and map output tracker master. */ + private def reportBlockStatus(blockManager: BlockManager, blockId: BlockId, dataLength: Long) = { + assert(blockManager.master != null) + blockManager.master.updateBlockInfo( + FALLBACK_BLOCK_MANAGER_ID, blockId, StorageLevel.DISK_ONLY, memSize = 0, dataLength) + } + + /** + * Read a ManagedBuffer. + */ + def read(conf: SparkConf, blockId: BlockId): ManagedBuffer = { + logInfo(s"Read $blockId") + val fallbackPath = new Path(conf.get(STORAGE_DECOMMISSION_FALLBACK_STORAGE_PATH).get) + val hadoopConf = SparkHadoopUtil.get.newConfiguration(conf) + val fallbackFileSystem = FileSystem.get(fallbackPath.toUri, hadoopConf) + val appId = conf.getAppId + + val (shuffleId, mapId, startReduceId, endReduceId) = blockId match { + case id: ShuffleBlockId => + (id.shuffleId, id.mapId, id.reduceId, id.reduceId + 1) + case batchId: ShuffleBlockBatchId => + (batchId.shuffleId, batchId.mapId, batchId.startReduceId, batchId.endReduceId) + case _ => + throw new IllegalArgumentException("unexpected shuffle block id format: " + blockId) + } + + val name = ShuffleIndexBlockId(shuffleId, mapId, NOOP_REDUCE_ID).name + val indexFile = new Path(fallbackPath, s"$appId/$shuffleId/$name") + val start = startReduceId * 8L + val end = endReduceId * 8L + Utils.tryWithResource(fallbackFileSystem.open(indexFile)) { inputStream => + Utils.tryWithResource(new DataInputStream(inputStream)) { index => + index.skip(start) + val offset = index.readLong() + index.skip(end - (start + 8L)) + val nextOffset = index.readLong() + val name = ShuffleDataBlockId(shuffleId, mapId, NOOP_REDUCE_ID).name + val dataFile = new Path(fallbackPath, s"$appId/$shuffleId/$name") + val f = fallbackFileSystem.open(dataFile) + val size = nextOffset - offset + logDebug(s"To byte array $size") + val array = new Array[Byte](size.toInt) + val startTimeNs = System.nanoTime() + f.seek(offset) + f.read(array) + logDebug(s"Took ${(System.nanoTime() - startTimeNs) / (1000 * 1000)}ms") + f.close() + new NioManagedBuffer(ByteBuffer.wrap(array)) + } + } + } +} + diff --git a/core/src/main/scala/org/apache/spark/storage/RDDInfo.scala b/core/src/main/scala/org/apache/spark/storage/RDDInfo.scala index 27a4d4b64175e..f3575c4e43eb0 100644 --- a/core/src/main/scala/org/apache/spark/storage/RDDInfo.scala +++ b/core/src/main/scala/org/apache/spark/storage/RDDInfo.scala @@ -38,7 +38,6 @@ class RDDInfo( var numCachedPartitions = 0 var memSize = 0L var diskSize = 0L - var externalBlockStoreSize = 0L def isCached: Boolean = (memSize + diskSize > 0) && numCachedPartitions > 0 diff --git a/core/src/main/scala/org/apache/spark/storage/ShuffleBlockFetcherIterator.scala b/core/src/main/scala/org/apache/spark/storage/ShuffleBlockFetcherIterator.scala index e3b3fc5cc4565..fa4e46590aa5e 100644 --- a/core/src/main/scala/org/apache/spark/storage/ShuffleBlockFetcherIterator.scala +++ b/core/src/main/scala/org/apache/spark/storage/ShuffleBlockFetcherIterator.scala @@ -295,8 +295,9 @@ final class ShuffleBlockFetcherIterator( var hostLocalBlockBytes = 0L var remoteBlockBytes = 0L + val fallback = FallbackStorage.FALLBACK_BLOCK_MANAGER_ID.executorId for ((address, blockInfos) <- blocksByAddress) { - if (address.executorId == blockManager.blockManagerId.executorId) { + if (Seq(blockManager.blockManagerId.executorId, fallback).contains(address.executorId)) { checkBlockSizes(blockInfos) val mergedBlockInfos = mergeContinuousShuffleBlockIdsIfNeeded( blockInfos.map(info => FetchBlockInfo(info._1, info._2, info._3)), doBatchFetch) diff --git a/core/src/main/scala/org/apache/spark/storage/StorageLevel.scala b/core/src/main/scala/org/apache/spark/storage/StorageLevel.scala index f6db73ba805b1..ce89c2ae90b49 100644 --- a/core/src/main/scala/org/apache/spark/storage/StorageLevel.scala +++ b/core/src/main/scala/org/apache/spark/storage/StorageLevel.scala @@ -45,7 +45,7 @@ class StorageLevel private( extends Externalizable { // TODO: Also add fields for caching priority, dataset ID, and flushing. - private def this(flags: Int, replication: Int) { + private def this(flags: Int, replication: Int) = { this((flags & 8) != 0, (flags & 4) != 0, (flags & 2) != 0, (flags & 1) != 0, replication) } diff --git a/core/src/main/scala/org/apache/spark/storage/StorageUtils.scala b/core/src/main/scala/org/apache/spark/storage/StorageUtils.scala index fc426eee608c0..c607fb28b2f56 100644 --- a/core/src/main/scala/org/apache/spark/storage/StorageUtils.scala +++ b/core/src/main/scala/org/apache/spark/storage/StorageUtils.scala @@ -61,7 +61,7 @@ private[spark] class StorageStatus( maxMemory: Long, maxOnHeapMem: Option[Long], maxOffHeapMem: Option[Long], - initialBlocks: Map[BlockId, BlockStatus]) { + initialBlocks: Map[BlockId, BlockStatus]) = { this(bmid, maxMemory, maxOnHeapMem, maxOffHeapMem) initialBlocks.foreach { case (bid, bstatus) => addBlock(bid, bstatus) } } @@ -169,7 +169,7 @@ private[spark] class StorageStatus( .getOrElse((0L, 0L)) case _ if !level.useOffHeap => (_nonRddStorageInfo.onHeapUsage, _nonRddStorageInfo.diskUsage) - case _ if level.useOffHeap => + case _ => (_nonRddStorageInfo.offHeapUsage, _nonRddStorageInfo.diskUsage) } val newMem = math.max(oldMem + changeInMem, 0L) diff --git a/core/src/main/scala/org/apache/spark/ui/JettyUtils.scala b/core/src/main/scala/org/apache/spark/ui/JettyUtils.scala index 2a3597e323543..663da0d33e20b 100644 --- a/core/src/main/scala/org/apache/spark/ui/JettyUtils.scala +++ b/core/src/main/scala/org/apache/spark/ui/JettyUtils.scala @@ -401,17 +401,13 @@ private[spark] object JettyUtils extends Logging { uri.append(rest) } - val rewrittenURI = URI.create(uri.toString()) - if (query != null) { - return new URI( - rewrittenURI.getScheme(), - rewrittenURI.getAuthority(), - rewrittenURI.getPath(), - query, - rewrittenURI.getFragment() - ).normalize() + val queryString = if (query == null) { + "" + } else { + s"?$query" } - rewrittenURI.normalize() + // SPARK-33611: use method `URI.create` to avoid percent-encoding twice on the query string. + URI.create(uri.toString() + queryString).normalize() } def createProxyLocationHeader( diff --git a/core/src/main/scala/org/apache/spark/ui/SparkUI.scala b/core/src/main/scala/org/apache/spark/ui/SparkUI.scala index 8ae9828c3fee1..b1769a8a9c9ee 100644 --- a/core/src/main/scala/org/apache/spark/ui/SparkUI.scala +++ b/core/src/main/scala/org/apache/spark/ui/SparkUI.scala @@ -110,6 +110,11 @@ private[spark] class SparkUI private ( } } + override def checkUIViewPermissions(appId: String, attemptId: Option[String], + user: String): Boolean = { + securityManager.checkUIViewPermissions(user) + } + def getApplicationInfoList: Iterator[ApplicationInfo] = { Iterator(new ApplicationInfo( id = appId, diff --git a/core/src/main/scala/org/apache/spark/ui/ToolTips.scala b/core/src/main/scala/org/apache/spark/ui/ToolTips.scala index aefd001e573f9..a7c42b86468b2 100644 --- a/core/src/main/scala/org/apache/spark/ui/ToolTips.scala +++ b/core/src/main/scala/org/apache/spark/ui/ToolTips.scala @@ -91,9 +91,6 @@ private[spark] object ToolTips { val TASK_TIME = "Shaded red when garbage collection (GC) time is over 10% of task time" - val BLACKLISTED = - "Shows if this executor has been blacklisted by the scheduler due to task failures." - val APPLICATION_EXECUTOR_LIMIT = """Maximum number of executors that this application will use. This limit is finite only when dynamic allocation is enabled. The number of granted executors may exceed the limit diff --git a/core/src/main/scala/org/apache/spark/ui/UIUtils.scala b/core/src/main/scala/org/apache/spark/ui/UIUtils.scala index a070cc9c7b39d..5e3406037a72b 100644 --- a/core/src/main/scala/org/apache/spark/ui/UIUtils.scala +++ b/core/src/main/scala/org/apache/spark/ui/UIUtils.scala @@ -231,7 +231,7 @@ private[spark] object UIUtils extends Logging { - + @@ -639,7 +639,8 @@ private[spark] object UIUtils extends Logging { */ def makeHref(proxy: Boolean, id: String, origHref: String): String = { if (proxy) { - s"/proxy/$id" + val proxyPrefix = sys.props.getOrElse("spark.ui.proxyBase", "") + proxyPrefix + "/proxy/" + id } else { origHref } diff --git a/core/src/main/scala/org/apache/spark/ui/exec/ExecutorThreadDumpPage.scala b/core/src/main/scala/org/apache/spark/ui/exec/ExecutorThreadDumpPage.scala index 2c7aeeabb3601..c3246dc90976c 100644 --- a/core/src/main/scala/org/apache/spark/ui/exec/ExecutorThreadDumpPage.scala +++ b/core/src/main/scala/org/apache/spark/ui/exec/ExecutorThreadDumpPage.scala @@ -41,10 +41,10 @@ private[ui] class ExecutorThreadDumpPage( val dumpRows = threadDump.map { thread => val threadId = thread.threadId val blockedBy = thread.blockedByThreadId match { - case Some(_) => + case Some(blockingThreadId) => case None => Text("") } diff --git a/core/src/main/scala/org/apache/spark/ui/jobs/AllJobsPage.scala b/core/src/main/scala/org/apache/spark/ui/jobs/AllJobsPage.scala index 4e76ea289ede6..cfe15eb832273 100644 --- a/core/src/main/scala/org/apache/spark/ui/jobs/AllJobsPage.scala +++ b/core/src/main/scala/org/apache/spark/ui/jobs/AllJobsPage.scala @@ -85,7 +85,7 @@ private[ui] class AllJobsPage(parent: JobsTab, store: AppStatusStore) extends We } // The timeline library treats contents as HTML, so we have to escape them. We need to add - // extra layers of escaping in order to embed this in a Javascript string literal. + // extra layers of escaping in order to embed this in a JavaScript string literal. val escapedDesc = Utility.escape(jobDescription) val jsEscapedDescForTooltip = StringEscapeUtils.escapeEcmaScript(Utility.escape(escapedDesc)) val jsEscapedDescForLabel = StringEscapeUtils.escapeEcmaScript(escapedDesc) @@ -147,7 +147,8 @@ private[ui] class AllJobsPage(parent: JobsTab, store: AppStatusStore) extends We | 'Removed at ${UIUtils.formatDate(removeTime)}' + | '${ e.removeReason.map { reason => - s"""
Reason: ${reason.replace("\n", " ")}""" + s"""
Reason: ${StringEscapeUtils.escapeEcmaScript( + reason.replace("\n", " "))}""" }.getOrElse("") }"' + | 'data-html="true">Executor ${e.id} removed' diff --git a/core/src/main/scala/org/apache/spark/ui/jobs/JobPage.scala b/core/src/main/scala/org/apache/spark/ui/jobs/JobPage.scala index df239d6d0e187..1dfbce82c852b 100644 --- a/core/src/main/scala/org/apache/spark/ui/jobs/JobPage.scala +++ b/core/src/main/scala/org/apache/spark/ui/jobs/JobPage.scala @@ -68,7 +68,7 @@ private[ui] class JobPage(parent: JobsTab, store: AppStatusStore) extends WebUIP .getOrElse(System.currentTimeMillis()) // The timeline library treats contents as HTML, so we have to escape them. We need to add - // extra layers of escaping in order to embed this in a Javascript string literal. + // extra layers of escaping in order to embed this in a JavaScript string literal. val escapedName = Utility.escape(name) val jsEscapedNameForTooltip = StringEscapeUtils.escapeEcmaScript(Utility.escape(escapedName)) val jsEscapedNameForLabel = StringEscapeUtils.escapeEcmaScript(escapedName) @@ -127,7 +127,8 @@ private[ui] class JobPage(parent: JobsTab, store: AppStatusStore) extends WebUIP | 'Removed at ${UIUtils.formatDate(removeTime)}' + | '${ e.removeReason.map { reason => - s"""
Reason: ${reason.replace("\n", " ")}""" + s"""
Reason: ${StringEscapeUtils.escapeEcmaScript( + reason.replace("\n", " "))}""" }.getOrElse("") }"' + | 'data-html="true">Executor ${e.id} removed' @@ -283,9 +284,9 @@ private[ui] class JobPage(parent: JobsTab, store: AppStatusStore) extends WebUIP val pendingOrSkippedTableId = if (isComplete) { - "pending" - } else { "skipped" + } else { + "pending" } val activeStagesTable = diff --git a/core/src/main/scala/org/apache/spark/util/ClosureCleaner.scala b/core/src/main/scala/org/apache/spark/util/ClosureCleaner.scala index 6ffd6605f75b8..7e2b9c72ad91b 100644 --- a/core/src/main/scala/org/apache/spark/util/ClosureCleaner.scala +++ b/core/src/main/scala/org/apache/spark/util/ClosureCleaner.scala @@ -285,7 +285,7 @@ private[spark] object ClosureCleaner extends Logging { logDebug(s" + outermost object is a closure, so we clone it: ${outermostClass}") } else if (outermostClass.getName.startsWith("$line")) { // SPARK-14558: if the outermost object is a REPL line object, we should clone - // and clean it as it may carray a lot of unnecessary information, + // and clean it as it may carry a lot of unnecessary information, // e.g. hadoop conf, spark conf, etc. logDebug(s" + outermost object is a REPL line object, so we clone it:" + s" ${outermostClass}") diff --git a/core/src/main/scala/org/apache/spark/deploy/DependencyUtils.scala b/core/src/main/scala/org/apache/spark/util/DependencyUtils.scala similarity index 50% rename from core/src/main/scala/org/apache/spark/deploy/DependencyUtils.scala rename to core/src/main/scala/org/apache/spark/util/DependencyUtils.scala index 5a17a6b6e169c..60e866a556796 100644 --- a/core/src/main/scala/org/apache/spark/deploy/DependencyUtils.scala +++ b/core/src/main/scala/org/apache/spark/util/DependencyUtils.scala @@ -15,7 +15,7 @@ * limitations under the License. */ -package org.apache.spark.deploy +package org.apache.spark.util import java.io.File import java.net.URI @@ -24,18 +24,146 @@ import org.apache.commons.lang3.StringUtils import org.apache.hadoop.conf.Configuration import org.apache.hadoop.fs.{FileSystem, Path} -import org.apache.spark.{SecurityManager, SparkConf, SparkException} +import org.apache.spark.{SparkConf, SparkException} +import org.apache.spark.deploy.SparkSubmitUtils import org.apache.spark.internal.Logging -import org.apache.spark.util.{MutableURLClassLoader, Utils} -private[deploy] object DependencyUtils extends Logging { +case class IvyProperties( + packagesExclusions: String, + packages: String, + repositories: String, + ivyRepoPath: String, + ivySettingsPath: String) + +private[spark] object DependencyUtils extends Logging { + + def getIvyProperties(): IvyProperties = { + val Seq(packagesExclusions, packages, repositories, ivyRepoPath, ivySettingsPath) = Seq( + "spark.jars.excludes", + "spark.jars.packages", + "spark.jars.repositories", + "spark.jars.ivy", + "spark.jars.ivySettings" + ).map(sys.props.get(_).orNull) + IvyProperties(packagesExclusions, packages, repositories, ivyRepoPath, ivySettingsPath) + } + + private def isInvalidQueryString(tokens: Array[String]): Boolean = { + tokens.length != 2 || StringUtils.isBlank(tokens(0)) || StringUtils.isBlank(tokens(1)) + } + + /** + * Parse URI query string's parameter value of `transitive` and `exclude`. + * Other invalid parameters will be ignored. + * + * @param uri Ivy URI need to be downloaded. + * @return Tuple value of parameter `transitive` and `exclude` value. + * + * 1. transitive: whether to download dependency jar of Ivy URI, default value is false + * and this parameter value is case-sensitive. Invalid value will be treat as false. + * Example: Input: exclude=org.mortbay.jetty:jetty&transitive=true + * Output: true + * + * 2. exclude: comma separated exclusions to apply when resolving transitive dependencies, + * consists of `group:module` pairs separated by commas. + * Example: Input: excludeorg.mortbay.jetty:jetty,org.eclipse.jetty:jetty-http + * Output: [org.mortbay.jetty:jetty,org.eclipse.jetty:jetty-http] + */ + private def parseQueryParams(uri: URI): (Boolean, String) = { + val uriQuery = uri.getQuery + if (uriQuery == null) { + (false, "") + } else { + val mapTokens = uriQuery.split("&").map(_.split("=")) + if (mapTokens.exists(isInvalidQueryString)) { + throw new IllegalArgumentException( + s"Invalid query string in Ivy URI ${uri.toString}: $uriQuery") + } + val groupedParams = mapTokens.map(kv => (kv(0), kv(1))).groupBy(_._1) + + // Parse transitive parameters (e.g., transitive=true) in an Ivy URI, default value is false + val transitiveParams = groupedParams.get("transitive") + if (transitiveParams.map(_.size).getOrElse(0) > 1) { + logWarning("It's best to specify `transitive` parameter in ivy URI query only once." + + " If there are multiple `transitive` parameter, we will select the last one") + } + val transitive = + transitiveParams.flatMap(_.takeRight(1).map(_._2 == "true").headOption).getOrElse(false) + + // Parse an excluded list (e.g., exclude=org.mortbay.jetty:jetty,org.eclipse.jetty:jetty-http) + // in an Ivy URI. When download Ivy URI jar, Spark won't download transitive jar + // in a excluded list. + val exclusionList = groupedParams.get("exclude").map { params => + params.map(_._2).flatMap { excludeString => + val excludes = excludeString.split(",") + if (excludes.map(_.split(":")).exists(isInvalidQueryString)) { + throw new IllegalArgumentException( + s"Invalid exclude string in Ivy URI ${uri.toString}:" + + " expected 'org:module,org:module,..', found " + excludeString) + } + excludes + }.mkString(",") + }.getOrElse("") + + val validParams = Set("transitive", "exclude") + val invalidParams = groupedParams.keys.filterNot(validParams.contains).toSeq + if (invalidParams.nonEmpty) { + logWarning(s"Invalid parameters `${invalidParams.sorted.mkString(",")}` found " + + s"in Ivy URI query `$uriQuery`.") + } + + (transitive, exclusionList) + } + } + + /** + * Download Ivy URI's dependency jars. + * + * @param uri Ivy URI need to be downloaded. The URI format should be: + * `ivy://group:module:version[?query]` + * Ivy URI query part format should be: + * `parameter=value¶meter=value...` + * Note that currently Ivy URI query part support two parameters: + * 1. transitive: whether to download dependent jars related to your Ivy URI. + * transitive=false or `transitive=true`, if not set, the default value is false. + * 2. exclude: exclusion list when download Ivy URI jar and dependency jars. + * The `exclude` parameter content is a ',' separated `group:module` pair string : + * `exclude=group:module,group:module...` + * @return List of jars downloaded. + */ + def resolveMavenDependencies(uri: URI): Seq[String] = { + val ivyProperties = DependencyUtils.getIvyProperties() + val authority = uri.getAuthority + if (authority == null) { + throw new IllegalArgumentException( + s"Invalid Ivy URI authority in uri ${uri.toString}:" + + " Expected 'org:module:version', found null.") + } + if (authority.split(":").length != 3) { + throw new IllegalArgumentException( + s"Invalid Ivy URI authority in uri ${uri.toString}:" + + s" Expected 'org:module:version', found $authority.") + } + + val (transitive, exclusionList) = parseQueryParams(uri) + + resolveMavenDependencies( + transitive, + exclusionList, + authority, + ivyProperties.repositories, + ivyProperties.ivyRepoPath, + Option(ivyProperties.ivySettingsPath) + ) + } def resolveMavenDependencies( + packagesTransitive: Boolean, packagesExclusions: String, packages: String, repositories: String, ivyRepoPath: String, - ivySettingsPath: Option[String]): String = { + ivySettingsPath: Option[String]): Seq[String] = { val exclusions: Seq[String] = if (!StringUtils.isBlank(packagesExclusions)) { packagesExclusions.split(",") @@ -51,15 +179,15 @@ private[deploy] object DependencyUtils extends Logging { SparkSubmitUtils.buildIvySettings(Option(repositories), Option(ivyRepoPath)) } - SparkSubmitUtils.resolveMavenCoordinates(packages, ivySettings, exclusions = exclusions) + SparkSubmitUtils.resolveMavenCoordinates(packages, ivySettings, + transitive = packagesTransitive, exclusions = exclusions) } def resolveAndDownloadJars( jars: String, userJar: String, sparkConf: SparkConf, - hadoopConf: Configuration, - secMgr: SecurityManager): String = { + hadoopConf: Configuration): String = { val targetDir = Utils.createTempDir() val userJarName = userJar.split(File.separatorChar).last Option(jars) @@ -70,7 +198,7 @@ private[deploy] object DependencyUtils extends Logging { .mkString(",") } .filterNot(_ == "") - .map(downloadFileList(_, targetDir, sparkConf, hadoopConf, secMgr)) + .map(downloadFileList(_, targetDir, sparkConf, hadoopConf)) .orNull } @@ -90,18 +218,16 @@ private[deploy] object DependencyUtils extends Logging { * @param targetDir A temporary directory for which downloaded files. * @param sparkConf Spark configuration. * @param hadoopConf Hadoop configuration. - * @param secMgr Spark security manager. * @return A comma separated local files list. */ def downloadFileList( fileList: String, targetDir: File, sparkConf: SparkConf, - hadoopConf: Configuration, - secMgr: SecurityManager): String = { + hadoopConf: Configuration): String = { require(fileList != null, "fileList cannot be null.") Utils.stringToSeq(fileList) - .map(downloadFile(_, targetDir, sparkConf, hadoopConf, secMgr)) + .map(downloadFile(_, targetDir, sparkConf, hadoopConf)) .mkString(",") } @@ -113,15 +239,13 @@ private[deploy] object DependencyUtils extends Logging { * @param targetDir A temporary directory for which downloaded files. * @param sparkConf Spark configuration. * @param hadoopConf Hadoop configuration. - * @param secMgr Spark security manager. * @return Path to the local file. */ def downloadFile( path: String, targetDir: File, sparkConf: SparkConf, - hadoopConf: Configuration, - secMgr: SecurityManager): String = { + hadoopConf: Configuration): String = { require(path != null, "path cannot be null.") val uri = Utils.resolveURI(path) @@ -134,8 +258,7 @@ private[deploy] object DependencyUtils extends Logging { new File(targetDir, file.getName).toURI.toString case _ => val fname = new Path(uri).getName() - val localFile = Utils.doFetchFile(uri.toString(), targetDir, fname, sparkConf, secMgr, - hadoopConf) + val localFile = Utils.doFetchFile(uri.toString(), targetDir, fname, sparkConf, hadoopConf) localFile.toURI().toString() } } diff --git a/core/src/main/scala/org/apache/spark/util/HadoopFSUtils.scala b/core/src/main/scala/org/apache/spark/util/HadoopFSUtils.scala new file mode 100644 index 0000000000000..4af48d5b9125c --- /dev/null +++ b/core/src/main/scala/org/apache/spark/util/HadoopFSUtils.scala @@ -0,0 +1,353 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.util + +import java.io.FileNotFoundException + +import scala.collection.mutable + +import org.apache.hadoop.conf.Configuration +import org.apache.hadoop.fs._ +import org.apache.hadoop.fs.viewfs.ViewFileSystem +import org.apache.hadoop.hdfs.DistributedFileSystem + +import org.apache.spark._ +import org.apache.spark.internal.Logging +import org.apache.spark.metrics.source.HiveCatalogMetrics + +/** + * Utility functions to simplify and speed-up file listing. + */ +private[spark] object HadoopFSUtils extends Logging { + /** + * Lists a collection of paths recursively. Picks the listing strategy adaptively depending + * on the number of paths to list. + * + * This may only be called on the driver. + * + * @param sc Spark context used to run parallel listing. + * @param paths Input paths to list + * @param hadoopConf Hadoop configuration + * @param filter Path filter used to exclude leaf files from result + * @param ignoreMissingFiles Ignore missing files that happen during recursive listing + * (e.g., due to race conditions) + * @param ignoreLocality Whether to fetch data locality info when listing leaf files. If false, + * this will return `FileStatus` without `BlockLocation` info. + * @param parallelismThreshold The threshold to enable parallelism. If the number of input paths + * is smaller than this value, this will fallback to use + * sequential listing. + * @param parallelismMax The maximum parallelism for listing. If the number of input paths is + * larger than this value, parallelism will be throttled to this value + * to avoid generating too many tasks. + * @return for each input path, the set of discovered files for the path + */ + def parallelListLeafFiles( + sc: SparkContext, + paths: Seq[Path], + hadoopConf: Configuration, + filter: PathFilter, + ignoreMissingFiles: Boolean, + ignoreLocality: Boolean, + parallelismThreshold: Int, + parallelismMax: Int): Seq[(Path, Seq[FileStatus])] = { + parallelListLeafFilesInternal(sc, paths, hadoopConf, filter, isRootLevel = true, + ignoreMissingFiles, ignoreLocality, parallelismThreshold, parallelismMax) + } + + private def parallelListLeafFilesInternal( + sc: SparkContext, + paths: Seq[Path], + hadoopConf: Configuration, + filter: PathFilter, + isRootLevel: Boolean, + ignoreMissingFiles: Boolean, + ignoreLocality: Boolean, + parallelismThreshold: Int, + parallelismMax: Int): Seq[(Path, Seq[FileStatus])] = { + + // Short-circuits parallel listing when serial listing is likely to be faster. + if (paths.size <= parallelismThreshold) { + return paths.map { path => + val leafFiles = listLeafFiles( + path, + hadoopConf, + filter, + Some(sc), + ignoreMissingFiles = ignoreMissingFiles, + ignoreLocality = ignoreLocality, + isRootPath = isRootLevel, + parallelismThreshold = parallelismThreshold, + parallelismMax = parallelismMax) + (path, leafFiles) + } + } + + logInfo(s"Listing leaf files and directories in parallel under ${paths.length} paths." + + s" The first several paths are: ${paths.take(10).mkString(", ")}.") + HiveCatalogMetrics.incrementParallelListingJobCount(1) + + val serializableConfiguration = new SerializableConfiguration(hadoopConf) + val serializedPaths = paths.map(_.toString) + + // Set the number of parallelism to prevent following file listing from generating many tasks + // in case of large #defaultParallelism. + val numParallelism = Math.min(paths.size, parallelismMax) + + val previousJobDescription = sc.getLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION) + val statusMap = try { + val description = paths.size match { + case 0 => + "Listing leaf files and directories 0 paths" + case 1 => + s"Listing leaf files and directories for 1 path:
${paths(0)}" + case s => + s"Listing leaf files and directories for $s paths:
${paths(0)}, ..." + } + sc.setJobDescription(description) + sc + .parallelize(serializedPaths, numParallelism) + .mapPartitions { pathStrings => + val hadoopConf = serializableConfiguration.value + pathStrings.map(new Path(_)).toSeq.map { path => + val leafFiles = listLeafFiles( + path = path, + hadoopConf = hadoopConf, + filter = filter, + contextOpt = None, // Can't execute parallel scans on workers + ignoreMissingFiles = ignoreMissingFiles, + ignoreLocality = ignoreLocality, + isRootPath = isRootLevel, + parallelismThreshold = Int.MaxValue, + parallelismMax = 0) + (path, leafFiles) + }.iterator + }.map { case (path, statuses) => + val serializableStatuses = statuses.map { status => + // Turn FileStatus into SerializableFileStatus so we can send it back to the driver + val blockLocations = status match { + case f: LocatedFileStatus => + f.getBlockLocations.map { loc => + SerializableBlockLocation( + loc.getNames, + loc.getHosts, + loc.getOffset, + loc.getLength) + } + + case _ => + Array.empty[SerializableBlockLocation] + } + + SerializableFileStatus( + status.getPath.toString, + status.getLen, + status.isDirectory, + status.getReplication, + status.getBlockSize, + status.getModificationTime, + status.getAccessTime, + blockLocations) + } + (path.toString, serializableStatuses) + }.collect() + } finally { + sc.setJobDescription(previousJobDescription) + } + + // turn SerializableFileStatus back to Status + statusMap.map { case (path, serializableStatuses) => + val statuses = serializableStatuses.map { f => + val blockLocations = f.blockLocations.map { loc => + new BlockLocation(loc.names, loc.hosts, loc.offset, loc.length) + } + new LocatedFileStatus( + new FileStatus( + f.length, f.isDir, f.blockReplication, f.blockSize, f.modificationTime, + new Path(f.path)), + blockLocations) + } + (new Path(path), statuses) + } + } + + // scalastyle:off argcount + /** + * Lists a single filesystem path recursively. If a `SparkContext` object is specified, this + * function may launch Spark jobs to parallelize listing based on `parallelismThreshold`. + * + * If sessionOpt is None, this may be called on executors. + * + * @return all children of path that match the specified filter. + */ + private def listLeafFiles( + path: Path, + hadoopConf: Configuration, + filter: PathFilter, + contextOpt: Option[SparkContext], + ignoreMissingFiles: Boolean, + ignoreLocality: Boolean, + isRootPath: Boolean, + parallelismThreshold: Int, + parallelismMax: Int): Seq[FileStatus] = { + + logTrace(s"Listing $path") + val fs = path.getFileSystem(hadoopConf) + + // Note that statuses only include FileStatus for the files and dirs directly under path, + // and does not include anything else recursively. + val statuses: Array[FileStatus] = try { + fs match { + // DistributedFileSystem overrides listLocatedStatus to make 1 single call to namenode + // to retrieve the file status with the file block location. The reason to still fallback + // to listStatus is because the default implementation would potentially throw a + // FileNotFoundException which is better handled by doing the lookups manually below. + case (_: DistributedFileSystem | _: ViewFileSystem) if !ignoreLocality => + val remoteIter = fs.listLocatedStatus(path) + new Iterator[LocatedFileStatus]() { + def next(): LocatedFileStatus = remoteIter.next + def hasNext(): Boolean = remoteIter.hasNext + }.toArray + case _ => fs.listStatus(path) + } + } catch { + // If we are listing a root path for SQL (e.g. a top level directory of a table), we need to + // ignore FileNotFoundExceptions during this root level of the listing because + // + // (a) certain code paths might construct an InMemoryFileIndex with root paths that + // might not exist (i.e. not all callers are guaranteed to have checked + // path existence prior to constructing InMemoryFileIndex) and, + // (b) we need to ignore deleted root paths during REFRESH TABLE, otherwise we break + // existing behavior and break the ability drop SessionCatalog tables when tables' + // root directories have been deleted (which breaks a number of Spark's own tests). + // + // If we are NOT listing a root path then a FileNotFoundException here means that the + // directory was present in a previous level of file listing but is absent in this + // listing, likely indicating a race condition (e.g. concurrent table overwrite or S3 + // list inconsistency). + // + // The trade-off in supporting existing behaviors / use-cases is that we won't be + // able to detect race conditions involving root paths being deleted during + // InMemoryFileIndex construction. However, it's still a net improvement to detect and + // fail-fast on the non-root cases. For more info see the SPARK-27676 review discussion. + case _: FileNotFoundException if isRootPath || ignoreMissingFiles => + logWarning(s"The directory $path was not found. Was it deleted very recently?") + Array.empty[FileStatus] + } + + val allLeafStatuses = { + val (dirs, topLevelFiles) = statuses.partition(_.isDirectory) + val nestedFiles: Seq[FileStatus] = contextOpt match { + case Some(context) if dirs.size > parallelismThreshold => + parallelListLeafFilesInternal( + context, + dirs.map(_.getPath), + hadoopConf = hadoopConf, + filter = filter, + isRootLevel = false, + ignoreMissingFiles = ignoreMissingFiles, + ignoreLocality = ignoreLocality, + parallelismThreshold = parallelismThreshold, + parallelismMax = parallelismMax + ).flatMap(_._2) + case _ => + dirs.flatMap { dir => + listLeafFiles( + path = dir.getPath, + hadoopConf = hadoopConf, + filter = filter, + contextOpt = contextOpt, + ignoreMissingFiles = ignoreMissingFiles, + ignoreLocality = ignoreLocality, + isRootPath = false, + parallelismThreshold = parallelismThreshold, + parallelismMax = parallelismMax) + } + } + val allFiles = topLevelFiles ++ nestedFiles + if (filter != null) allFiles.filter(f => filter.accept(f.getPath)) else allFiles + } + + val missingFiles = mutable.ArrayBuffer.empty[String] + val resolvedLeafStatuses = allLeafStatuses.flatMap { + case f: LocatedFileStatus => + Some(f) + + // NOTE: + // + // - Although S3/S3A/S3N file system can be quite slow for remote file metadata + // operations, calling `getFileBlockLocations` does no harm here since these file system + // implementations don't actually issue RPC for this method. + // + // - Here we are calling `getFileBlockLocations` in a sequential manner, but it should not + // be a big deal since we always use to `parallelListLeafFiles` when the number of + // paths exceeds threshold. + case f if !ignoreLocality => + // The other constructor of LocatedFileStatus will call FileStatus.getPermission(), + // which is very slow on some file system (RawLocalFileSystem, which is launch a + // subprocess and parse the stdout). + try { + val locations = fs.getFileBlockLocations(f, 0, f.getLen).map { loc => + // Store BlockLocation objects to consume less memory + if (loc.getClass == classOf[BlockLocation]) { + loc + } else { + new BlockLocation(loc.getNames, loc.getHosts, loc.getOffset, loc.getLength) + } + } + val lfs = new LocatedFileStatus(f.getLen, f.isDirectory, f.getReplication, f.getBlockSize, + f.getModificationTime, 0, null, null, null, null, f.getPath, locations) + if (f.isSymlink) { + lfs.setSymlink(f.getSymlink) + } + Some(lfs) + } catch { + case _: FileNotFoundException if ignoreMissingFiles => + missingFiles += f.getPath.toString + None + } + + case f => Some(f) + } + + if (missingFiles.nonEmpty) { + logWarning( + s"the following files were missing during file scan:\n ${missingFiles.mkString("\n ")}") + } + + resolvedLeafStatuses + } + // scalastyle:on argcount + + /** A serializable variant of HDFS's BlockLocation. This is required by Hadoop 2.7. */ + private case class SerializableBlockLocation( + names: Array[String], + hosts: Array[String], + offset: Long, + length: Long) + + /** A serializable variant of HDFS's FileStatus. This is required by Hadoop 2.7. */ + private case class SerializableFileStatus( + path: String, + length: Long, + isDir: Boolean, + blockReplication: Short, + blockSize: Long, + modificationTime: Long, + accessTime: Long, + blockLocations: Array[SerializableBlockLocation]) +} diff --git a/core/src/main/scala/org/apache/spark/util/JsonProtocol.scala b/core/src/main/scala/org/apache/spark/util/JsonProtocol.scala index 13f7cb453346f..103965e4860a3 100644 --- a/core/src/main/scala/org/apache/spark/util/JsonProtocol.scala +++ b/core/src/main/scala/org/apache/spark/util/JsonProtocol.scala @@ -757,7 +757,7 @@ private[spark] object JsonProtocol { def taskResourceRequestMapFromJson(json: JValue): Map[String, TaskResourceRequest] = { val jsonFields = json.asInstanceOf[JObject].obj - jsonFields.map { case JField(k, v) => + jsonFields.collect { case JField(k, v) => val req = taskResourceRequestFromJson(v) (k, req) }.toMap @@ -765,7 +765,7 @@ private[spark] object JsonProtocol { def executorResourceRequestMapFromJson(json: JValue): Map[String, ExecutorResourceRequest] = { val jsonFields = json.asInstanceOf[JObject].obj - jsonFields.map { case JField(k, v) => + jsonFields.collect { case JField(k, v) => val req = executorResourceRequestFromJson(v) (k, req) }.toMap @@ -1229,7 +1229,7 @@ private[spark] object JsonProtocol { def resourcesMapFromJson(json: JValue): Map[String, ResourceInformation] = { val jsonFields = json.asInstanceOf[JObject].obj - jsonFields.map { case JField(k, v) => + jsonFields.collect { case JField(k, v) => val resourceInfo = ResourceInformation.parseJson(v) (k, resourceInfo) }.toMap @@ -1241,7 +1241,7 @@ private[spark] object JsonProtocol { def mapFromJson(json: JValue): Map[String, String] = { val jsonFields = json.asInstanceOf[JObject].obj - jsonFields.map { case JField(k, JString(v)) => (k, v) }.toMap + jsonFields.collect { case JField(k, JString(v)) => (k, v) }.toMap } def propertiesFromJson(json: JValue): Properties = { diff --git a/core/src/main/scala/org/apache/spark/util/ThreadUtils.scala b/core/src/main/scala/org/apache/spark/util/ThreadUtils.scala index 78206c51c1028..d45dc937910d9 100644 --- a/core/src/main/scala/org/apache/spark/util/ThreadUtils.scala +++ b/core/src/main/scala/org/apache/spark/util/ThreadUtils.scala @@ -23,7 +23,6 @@ import java.util.concurrent.locks.ReentrantLock import scala.concurrent.{Awaitable, ExecutionContext, ExecutionContextExecutor, Future} import scala.concurrent.duration.{Duration, FiniteDuration} -import scala.language.higherKinds import scala.util.control.NonFatal import com.google.common.util.concurrent.ThreadFactoryBuilder diff --git a/core/src/main/scala/org/apache/spark/util/UninterruptibleThread.scala b/core/src/main/scala/org/apache/spark/util/UninterruptibleThread.scala index 6a58ec142dd7f..24788d69121b2 100644 --- a/core/src/main/scala/org/apache/spark/util/UninterruptibleThread.scala +++ b/core/src/main/scala/org/apache/spark/util/UninterruptibleThread.scala @@ -31,7 +31,7 @@ private[spark] class UninterruptibleThread( target: Runnable, name: String) extends Thread(target, name) { - def this(name: String) { + def this(name: String) = { this(null, name) } diff --git a/core/src/main/scala/org/apache/spark/util/Utils.scala b/core/src/main/scala/org/apache/spark/util/Utils.scala index b8b044bbad30e..5e68dcd9df7fc 100644 --- a/core/src/main/scala/org/apache/spark/util/Utils.scala +++ b/core/src/main/scala/org/apache/spark/util/Utils.scala @@ -28,7 +28,7 @@ import java.nio.channels.{Channels, FileChannel, WritableByteChannel} import java.nio.charset.StandardCharsets import java.nio.file.Files import java.security.SecureRandom -import java.util.{Arrays, Locale, Properties, Random, UUID} +import java.util.{Locale, Properties, Random, UUID} import java.util.concurrent._ import java.util.concurrent.TimeUnit.NANOSECONDS import java.util.zip.GZIPInputStream @@ -50,9 +50,10 @@ import com.google.common.net.InetAddresses import org.apache.commons.codec.binary.Hex import org.apache.commons.lang3.SystemUtils import org.apache.hadoop.conf.Configuration -import org.apache.hadoop.fs.{FileSystem, FileUtil, Path, Trash} +import org.apache.hadoop.fs.{FileSystem, FileUtil, Path} import org.apache.hadoop.io.compress.{CompressionCodecFactory, SplittableCompressionCodec} import org.apache.hadoop.security.UserGroupInformation +import org.apache.hadoop.util.{RunJar, StringUtils} import org.apache.hadoop.yarn.conf.YarnConfiguration import org.eclipse.jetty.util.MultiException import org.slf4j.Logger @@ -269,29 +270,6 @@ private[spark] object Utils extends Logging { file.setExecutable(true, true) } - /** - * Move data to trash if 'spark.sql.truncate.trash.enabled' is true, else - * delete the data permanently. If move data to trash failed fallback to hard deletion. - */ - def moveToTrashOrDelete( - fs: FileSystem, - partitionPath: Path, - isTrashEnabled: Boolean, - hadoopConf: Configuration): Boolean = { - if (isTrashEnabled) { - logDebug(s"Try to move data ${partitionPath.toString} to trash") - val isSuccess = Trash.moveToAppropriateTrash(fs, partitionPath, hadoopConf) - if (!isSuccess) { - logWarning(s"Failed to move data ${partitionPath.toString} to trash. " + - "Fallback to hard deletion") - return fs.delete(partitionPath, true) - } - isSuccess - } else { - fs.delete(partitionPath, true) - } - } - /** * Create a directory given the abstract pathname * @return true, if the directory is successfully created; otherwise, return false. @@ -399,7 +377,7 @@ private[spark] object Utils extends Logging { * This returns a new InputStream which contains the same data as the original input stream. * It may be entirely on in-memory buffer, or it may be a combination of in-memory data, and then * continue to read from the original stream. The only real use of this is if the original input - * stream will potentially detect corruption while the data is being read (eg. from compression). + * stream will potentially detect corruption while the data is being read (e.g. from compression). * This allows for an eager check of corruption in the first maxSize bytes of data. * * @return An InputStream which includes all data from the original stream (combining buffered @@ -509,15 +487,19 @@ private[spark] object Utils extends Logging { * * Throws SparkException if the target file already exists and has different contents than * the requested file. + * + * If `shouldUntar` is true, it untars the given url if it is a tar.gz or tgz into `targetDir`. + * This is a legacy behavior, and users should better use `spark.archives` configuration or + * `SparkContext.addArchive` */ def fetchFile( url: String, targetDir: File, conf: SparkConf, - securityMgr: SecurityManager, hadoopConf: Configuration, timestamp: Long, - useCache: Boolean): File = { + useCache: Boolean, + shouldUntar: Boolean = true): File = { val fileName = decodeFileNameInURI(new URI(url)) val targetFile = new File(targetDir, fileName) val fetchCacheEnabled = conf.getBoolean("spark.files.useFetchCache", defaultValue = true) @@ -542,7 +524,7 @@ private[spark] object Utils extends Logging { val cachedFile = new File(localDir, cachedFileName) try { if (!cachedFile.exists()) { - doFetchFile(url, localDir, cachedFileName, conf, securityMgr, hadoopConf) + doFetchFile(url, localDir, cachedFileName, conf, hadoopConf) } } finally { lock.release() @@ -555,16 +537,26 @@ private[spark] object Utils extends Logging { conf.getBoolean("spark.files.overwrite", false) ) } else { - doFetchFile(url, targetDir, fileName, conf, securityMgr, hadoopConf) - } - - // Decompress the file if it's a .tar or .tar.gz - if (fileName.endsWith(".tar.gz") || fileName.endsWith(".tgz")) { - logInfo("Untarring " + fileName) - executeAndGetOutput(Seq("tar", "-xzf", fileName), targetDir) - } else if (fileName.endsWith(".tar")) { - logInfo("Untarring " + fileName) - executeAndGetOutput(Seq("tar", "-xf", fileName), targetDir) + doFetchFile(url, targetDir, fileName, conf, hadoopConf) + } + + if (shouldUntar) { + // Decompress the file if it's a .tar or .tar.gz + if (fileName.endsWith(".tar.gz") || fileName.endsWith(".tgz")) { + logWarning( + "Untarring behavior will be deprecated at spark.files and " + + "SparkContext.addFile. Consider using spark.archives or SparkContext.addArchive " + + "instead.") + logInfo("Untarring " + fileName) + executeAndGetOutput(Seq("tar", "-xzf", fileName), targetDir) + } else if (fileName.endsWith(".tar")) { + logWarning( + "Untarring behavior will be deprecated at spark.files and " + + "SparkContext.addFile. Consider using spark.archives or SparkContext.addArchive " + + "instead.") + logInfo("Untarring " + fileName) + executeAndGetOutput(Seq("tar", "-xf", fileName), targetDir) + } } // Make the file executable - That's necessary for scripts FileUtil.chmod(targetFile.getAbsolutePath, "a+x") @@ -578,6 +570,26 @@ private[spark] object Utils extends Logging { targetFile } + /** + * Unpacks an archive file into the specified directory. It expects .jar, .zip, .tar.gz, .tgz + * and .tar files. This behaves same as Hadoop's archive in distributed cache. This method is + * basically copied from `org.apache.hadoop.yarn.util.FSDownload.unpack`. + */ + def unpack(source: File, dest: File): Unit = { + val lowerSrc = StringUtils.toLowerCase(source.getName) + if (lowerSrc.endsWith(".jar")) { + RunJar.unJar(source, dest, RunJar.MATCH_ANY) + } else if (lowerSrc.endsWith(".zip")) { + FileUtil.unZip(source, dest) + } else if ( + lowerSrc.endsWith(".tar.gz") || lowerSrc.endsWith(".tgz") || lowerSrc.endsWith(".tar")) { + FileUtil.unTar(source, dest) + } else { + logWarning(s"Cannot unpack $source, just copying it to $dest.") + copyRecursive(source, dest) + } + } + /** Records the duration of running `body`. */ def timeTakenMs[T](body: => T): (T, Long) = { val startTime = System.nanoTime() @@ -728,7 +740,6 @@ private[spark] object Utils extends Logging { targetDir: File, filename: String, conf: SparkConf, - securityMgr: SecurityManager, hadoopConf: Configuration): File = { val targetFile = new File(targetDir, filename) val uri = new URI(url) @@ -1090,20 +1101,20 @@ private[spark] object Utils extends Logging { } // checks if the hostport contains IPV6 ip and parses the host, port if (hostPort != null && hostPort.split(":").length > 2) { - val indx: Int = hostPort.lastIndexOf("]:") - if (-1 == indx) { + val index: Int = hostPort.lastIndexOf("]:") + if (-1 == index) { return setDefaultPortValue } - val port = hostPort.substring(indx + 2).trim() - val retval = (hostPort.substring(0, indx + 1).trim(), if (port.isEmpty) 0 else port.toInt) + val port = hostPort.substring(index + 2).trim() + val retval = (hostPort.substring(0, index + 1).trim(), if (port.isEmpty) 0 else port.toInt) hostPortParseResults.putIfAbsent(hostPort, retval) } else { - val indx: Int = hostPort.lastIndexOf(':') - if (-1 == indx) { + val index: Int = hostPort.lastIndexOf(':') + if (-1 == index) { return setDefaultPortValue } - val port = hostPort.substring(indx + 1).trim() - val retval = (hostPort.substring(0, indx).trim(), if (port.isEmpty) 0 else port.toInt) + val port = hostPort.substring(index + 1).trim() + val retval = (hostPort.substring(0, index).trim(), if (port.isEmpty) 0 else port.toInt) hostPortParseResults.putIfAbsent(hostPort, retval) } @@ -2541,6 +2552,14 @@ private[spark] object Utils extends Logging { master == "local" || master.startsWith("local[") } + /** + * Push based shuffle can only be enabled when external shuffle service is enabled. + */ + def isPushBasedShuffleEnabled(conf: SparkConf): Boolean = { + conf.get(PUSH_BASED_SHUFFLE_ENABLED) && + (conf.get(IS_TESTING).getOrElse(false) || conf.get(SHUFFLE_SERVICE_ENABLED)) + } + /** * Return whether dynamic allocation is enabled in the given conf. */ @@ -2869,11 +2888,11 @@ private[spark] object Utils extends Logging { if (lastDollarIndex < s.length - 1) { // The last char is not a dollar sign if (lastDollarIndex == -1 || !s.contains("$iw")) { - // The name does not have dollar sign or is not an intepreter + // The name does not have dollar sign or is not an interpreter // generated class, so we should return the full string s } else { - // The class name is intepreter generated, + // The class name is interpreter generated, // return the part after the last dollar sign // This is the same behavior as getClass.getSimpleName s.substring(lastDollarIndex + 1) @@ -2906,14 +2925,14 @@ private[spark] object Utils extends Logging { */ private val fullWidthRegex = ("""[""" + // scalastyle:off nonascii - """\u1100-\u115F""" + - """\u2E80-\uA4CF""" + - """\uAC00-\uD7A3""" + - """\uF900-\uFAFF""" + - """\uFE10-\uFE19""" + - """\uFE30-\uFE6F""" + - """\uFF00-\uFF60""" + - """\uFFE0-\uFFE6""" + + "\u1100-\u115F" + + "\u2E80-\uA4CF" + + "\uAC00-\uD7A3" + + "\uF900-\uFAFF" + + "\uFE10-\uFE19" + + "\uFE30-\uFE6F" + + "\uFF00-\uFF60" + + "\uFFE0-\uFFE6" + // scalastyle:on nonascii """]""").r @@ -2971,6 +2990,27 @@ private[spark] object Utils extends Logging { metadata.append("]") metadata.toString } + + /** + * Convert MEMORY_OFFHEAP_SIZE to MB Unit, return 0 if MEMORY_OFFHEAP_ENABLED is false. + */ + def executorOffHeapMemorySizeAsMb(sparkConf: SparkConf): Int = { + val sizeInMB = Utils.memoryStringToMb(sparkConf.get(MEMORY_OFFHEAP_SIZE).toString) + checkOffHeapEnabled(sparkConf, sizeInMB).toInt + } + + /** + * return 0 if MEMORY_OFFHEAP_ENABLED is false. + */ + def checkOffHeapEnabled(sparkConf: SparkConf, offHeapSize: Long): Long = { + if (sparkConf.get(MEMORY_OFFHEAP_ENABLED)) { + require(offHeapSize > 0, + s"${MEMORY_OFFHEAP_SIZE.key} must be > 0 when ${MEMORY_OFFHEAP_ENABLED.key} == true") + offHeapSize + } else { + 0 + } + } } private[util] object CallerContext extends Logging { diff --git a/core/src/main/scala/org/apache/spark/util/collection/ExternalAppendOnlyMap.scala b/core/src/main/scala/org/apache/spark/util/collection/ExternalAppendOnlyMap.scala index 7f40b469a95e9..731131b688ca7 100644 --- a/core/src/main/scala/org/apache/spark/util/collection/ExternalAppendOnlyMap.scala +++ b/core/src/main/scala/org/apache/spark/util/collection/ExternalAppendOnlyMap.scala @@ -76,7 +76,7 @@ class ExternalAppendOnlyMap[K, V, C]( mergeValue: (C, V) => C, mergeCombiners: (C, C) => C, serializer: Serializer, - blockManager: BlockManager) { + blockManager: BlockManager) = { this(createCombiner, mergeValue, mergeCombiners, serializer, blockManager, TaskContext.get()) } diff --git a/core/src/main/scala/org/apache/spark/util/io/ChunkedByteBuffer.scala b/core/src/main/scala/org/apache/spark/util/io/ChunkedByteBuffer.scala index 2c3730de08b5b..8635f1a3d702e 100644 --- a/core/src/main/scala/org/apache/spark/util/io/ChunkedByteBuffer.scala +++ b/core/src/main/scala/org/apache/spark/util/io/ChunkedByteBuffer.scala @@ -193,7 +193,7 @@ private[spark] object ChunkedByteBuffer { length: Long): ChunkedByteBuffer = { // We do *not* memory map the file, because we may end up putting this into the memory store, // and spark currently is not expecting memory-mapped buffers in the memory store, it conflicts - // with other parts that manage the lifecyle of buffers and dispose them. See SPARK-25422. + // with other parts that manage the lifecycle of buffers and dispose them. See SPARK-25422. val is = new FileInputStream(file) ByteStreams.skipFully(is, offset) val in = new LimitedInputStream(is, length) diff --git a/core/src/test/java/org/apache/spark/shuffle/sort/UnsafeShuffleWriterSuite.java b/core/src/test/java/org/apache/spark/shuffle/sort/UnsafeShuffleWriterSuite.java index ee8e38c24b47f..df1d306e628a9 100644 --- a/core/src/test/java/org/apache/spark/shuffle/sort/UnsafeShuffleWriterSuite.java +++ b/core/src/test/java/org/apache/spark/shuffle/sort/UnsafeShuffleWriterSuite.java @@ -68,10 +68,10 @@ public class UnsafeShuffleWriterSuite { static final int DEFAULT_INITIAL_SORT_BUFFER_SIZE = 4096; - static final int NUM_PARTITITONS = 4; + static final int NUM_PARTITIONS = 4; TestMemoryManager memoryManager; TaskMemoryManager taskMemoryManager; - final HashPartitioner hashPartitioner = new HashPartitioner(NUM_PARTITITONS); + final HashPartitioner hashPartitioner = new HashPartitioner(NUM_PARTITIONS); File mergedOutputFile; File tempDir; long[] partitionSizesInMergedFile; @@ -194,7 +194,7 @@ private void assertSpillFilesWereCleanedUp() { private List> readRecordsFromFile() throws IOException { final ArrayList> recordsList = new ArrayList<>(); long startOffset = 0; - for (int i = 0; i < NUM_PARTITITONS; i++) { + for (int i = 0; i < NUM_PARTITIONS; i++) { final long partitionSize = partitionSizesInMergedFile[i]; if (partitionSize > 0) { FileInputStream fin = new FileInputStream(mergedOutputFile); @@ -253,7 +253,7 @@ public void writeEmptyIterator() throws Exception { assertTrue(mapStatus.isDefined()); assertTrue(mergedOutputFile.exists()); assertEquals(0, spillFilesCreated.size()); - assertArrayEquals(new long[NUM_PARTITITONS], partitionSizesInMergedFile); + assertArrayEquals(new long[NUM_PARTITIONS], partitionSizesInMergedFile); assertEquals(0, taskMetrics.shuffleWriteMetrics().recordsWritten()); assertEquals(0, taskMetrics.shuffleWriteMetrics().bytesWritten()); assertEquals(0, taskMetrics.diskBytesSpilled()); @@ -264,7 +264,7 @@ public void writeEmptyIterator() throws Exception { public void writeWithoutSpilling() throws Exception { // In this example, each partition should have exactly one record: final ArrayList> dataToWrite = new ArrayList<>(); - for (int i = 0; i < NUM_PARTITITONS; i++) { + for (int i = 0; i < NUM_PARTITIONS; i++) { dataToWrite.add(new Tuple2<>(i, i)); } final UnsafeShuffleWriter writer = createWriter(true); diff --git a/core/src/test/java/org/apache/spark/unsafe/map/AbstractBytesToBytesMapSuite.java b/core/src/test/java/org/apache/spark/unsafe/map/AbstractBytesToBytesMapSuite.java index f4e952f465e54..f35176a69d94b 100644 --- a/core/src/test/java/org/apache/spark/unsafe/map/AbstractBytesToBytesMapSuite.java +++ b/core/src/test/java/org/apache/spark/unsafe/map/AbstractBytesToBytesMapSuite.java @@ -576,6 +576,8 @@ public void spillInIterator() throws IOException { iter2.next(); } assertFalse(iter2.hasNext()); + // calls hasNext twice deliberately, make sure it's idempotent + assertFalse(iter2.hasNext()); } finally { map.free(); for (File spillFile : spillFilesCreated) { diff --git a/core/src/test/java/org/apache/spark/util/collection/unsafe/sort/UnsafeExternalSorterSuite.java b/core/src/test/java/org/apache/spark/util/collection/unsafe/sort/UnsafeExternalSorterSuite.java index 43977717f6c97..dc2b4814c8284 100644 --- a/core/src/test/java/org/apache/spark/util/collection/unsafe/sort/UnsafeExternalSorterSuite.java +++ b/core/src/test/java/org/apache/spark/util/collection/unsafe/sort/UnsafeExternalSorterSuite.java @@ -23,7 +23,6 @@ import java.util.LinkedList; import java.util.UUID; -import org.hamcrest.Matchers; import scala.Tuple2$; import org.junit.After; @@ -38,7 +37,6 @@ import org.apache.spark.executor.TaskMetrics; import org.apache.spark.internal.config.package$; import org.apache.spark.memory.TestMemoryManager; -import org.apache.spark.memory.SparkOutOfMemoryError; import org.apache.spark.memory.TaskMemoryManager; import org.apache.spark.serializer.JavaSerializer; import org.apache.spark.serializer.SerializerInstance; @@ -359,6 +357,69 @@ public void forcedSpillingWithReadIterator() throws Exception { assertSpillFilesWereCleanedUp(); } + @Test + public void forcedSpillingNullsWithReadIterator() throws Exception { + final UnsafeExternalSorter sorter = newSorter(); + long[] record = new long[100]; + final int recordSize = record.length * 8; + final int n = (int) pageSizeBytes / recordSize * 3; + for (int i = 0; i < n; i++) { + boolean isNull = i % 2 == 0; + sorter.insertRecord(record, Platform.LONG_ARRAY_OFFSET, recordSize, 0, isNull); + } + assertTrue(sorter.getNumberOfAllocatedPages() >= 2); + + UnsafeExternalSorter.SpillableIterator iter = + (UnsafeExternalSorter.SpillableIterator) sorter.getSortedIterator(); + final int numRecordsToReadBeforeSpilling = n / 3; + for (int i = 0; i < numRecordsToReadBeforeSpilling; i++) { + assertTrue(iter.hasNext()); + iter.loadNext(); + } + + assertTrue(iter.spill() > 0); + assertEquals(0, iter.spill()); + + for (int i = numRecordsToReadBeforeSpilling; i < n; i++) { + assertTrue(iter.hasNext()); + iter.loadNext(); + } + assertFalse(iter.hasNext()); + + sorter.cleanupResources(); + assertSpillFilesWereCleanedUp(); + } + + @Test + public void forcedSpillingWithFullyReadIterator() throws Exception { + final UnsafeExternalSorter sorter = newSorter(); + long[] record = new long[100]; + final int recordSize = record.length * 8; + final int n = (int) pageSizeBytes / recordSize * 3; + for (int i = 0; i < n; i++) { + record[0] = i; + sorter.insertRecord(record, Platform.LONG_ARRAY_OFFSET, recordSize, 0, false); + } + assertTrue(sorter.getNumberOfAllocatedPages() >= 2); + + UnsafeExternalSorter.SpillableIterator iter = + (UnsafeExternalSorter.SpillableIterator) sorter.getSortedIterator(); + for (int i = 0; i < n; i++) { + assertTrue(iter.hasNext()); + iter.loadNext(); + assertEquals(i, Platform.getLong(iter.getBaseObject(), iter.getBaseOffset())); + } + assertFalse(iter.hasNext()); + + assertTrue(iter.spill() > 0); + assertEquals(0, iter.spill()); + assertEquals(n - 1, Platform.getLong(iter.getBaseObject(), iter.getBaseOffset())); + assertFalse(iter.hasNext()); + + sorter.cleanupResources(); + assertSpillFilesWereCleanedUp(); + } + @Test public void forcedSpillingWithNotReadIterator() throws Exception { final UnsafeExternalSorter sorter = newSorter(); @@ -518,40 +579,28 @@ public void testGetIterator() throws Exception { } @Test - public void testOOMDuringSpill() throws Exception { + public void testNoOOMDuringSpill() throws Exception { final UnsafeExternalSorter sorter = newSorter(); - // we assume that given default configuration, - // the size of the data we insert to the sorter (ints) - // and assuming we shouldn't spill before pointers array is exhausted - // (memory manager is not configured to throw at this point) - // - so this loop runs a reasonable number of iterations (<2000). - // test indeed completed within <30ms (on a quad i7 laptop). - for (int i = 0; sorter.hasSpaceForAnotherRecord(); ++i) { + for (int i = 0; i < 100; i++) { insertNumber(sorter, i); } - // we expect the next insert to attempt growing the pointerssArray first - // allocation is expected to fail, then a spill is triggered which - // attempts another allocation which also fails and we expect to see this - // OOM here. the original code messed with a released array within the - // spill code and ended up with a failed assertion. we also expect the - // location of the OOM to be - // org.apache.spark.util.collection.unsafe.sort.UnsafeInMemorySorter.reset - memoryManager.markconsequentOOM(2); - try { - insertNumber(sorter, 1024); - fail("expected OutOfMmoryError but it seems operation surprisingly succeeded"); - } - // we expect an SparkOutOfMemoryError here, anything else (i.e the original NPE is a failure) - catch (SparkOutOfMemoryError oom){ - String oomStackTrace = Utils.exceptionString(oom); - assertThat("expected SparkOutOfMemoryError in " + - "org.apache.spark.util.collection.unsafe.sort.UnsafeInMemorySorter.reset", - oomStackTrace, - Matchers.containsString( - "org.apache.spark.util.collection.unsafe.sort.UnsafeInMemorySorter.reset")); + + // Check that spilling still succeeds when the task is starved for memory. + memoryManager.markconsequentOOM(Integer.MAX_VALUE); + sorter.spill(); + memoryManager.resetConsequentOOM(); + + // Ensure that records can be appended after spilling, i.e. check that the sorter will allocate + // the new pointer array that it could not allocate while spilling. + for (int i = 0; i < 100; ++i) { + insertNumber(sorter, i); } + + sorter.cleanupResources(); + assertSpillFilesWereCleanedUp(); } + private void verifyIntIterator(UnsafeSorterIterator iter, int start, int end) throws IOException { for (int i = start; i < end; i++) { diff --git a/core/src/test/java/org/apache/spark/util/collection/unsafe/sort/UnsafeInMemorySorterSuite.java b/core/src/test/java/org/apache/spark/util/collection/unsafe/sort/UnsafeInMemorySorterSuite.java index 2b8a0602730e1..9d4909ddce792 100644 --- a/core/src/test/java/org/apache/spark/util/collection/unsafe/sort/UnsafeInMemorySorterSuite.java +++ b/core/src/test/java/org/apache/spark/util/collection/unsafe/sort/UnsafeInMemorySorterSuite.java @@ -20,6 +20,7 @@ import java.nio.charset.StandardCharsets; import java.util.Arrays; +import org.apache.spark.unsafe.array.LongArray; import org.junit.Assert; import org.junit.Test; @@ -27,7 +28,6 @@ import org.apache.spark.SparkConf; import org.apache.spark.memory.TestMemoryConsumer; import org.apache.spark.memory.TestMemoryManager; -import org.apache.spark.memory.SparkOutOfMemoryError; import org.apache.spark.memory.TaskMemoryManager; import org.apache.spark.unsafe.Platform; import org.apache.spark.unsafe.memory.MemoryBlock; @@ -37,7 +37,6 @@ import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.isIn; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.fail; import static org.mockito.Mockito.mock; public class UnsafeInMemorySorterSuite { @@ -147,7 +146,7 @@ public int compare( } @Test - public void freeAfterOOM() { + public void testNoOOMDuringReset() { final SparkConf sparkConf = new SparkConf(); sparkConf.set(package$.MODULE$.MEMORY_OFFHEAP_ENABLED(), false); @@ -156,12 +155,7 @@ public void freeAfterOOM() { final TaskMemoryManager memoryManager = new TaskMemoryManager( testMemoryManager, 0); final TestMemoryConsumer consumer = new TestMemoryConsumer(memoryManager); - final MemoryBlock dataPage = memoryManager.allocatePage(2048, consumer); - final Object baseObject = dataPage.getBaseObject(); - // Write the records into the data page: - long position = dataPage.getBaseOffset(); - final HashPartitioner hashPartitioner = new HashPartitioner(4); // Use integer comparison for comparing prefixes (which are partition ids, in this case) final PrefixComparator prefixComparator = PrefixComparators.LONG; final RecordComparator recordComparator = new RecordComparator() { @@ -179,18 +173,24 @@ public int compare( UnsafeInMemorySorter sorter = new UnsafeInMemorySorter(consumer, memoryManager, recordComparator, prefixComparator, 100, shouldUseRadixSort()); - testMemoryManager.markExecutionAsOutOfMemoryOnce(); - try { - sorter.reset(); - fail("expected SparkOutOfMemoryError but it seems operation surprisingly succeeded"); - } catch (SparkOutOfMemoryError oom) { - // as expected - } - // [SPARK-21907] this failed on NPE at - // org.apache.spark.memory.MemoryConsumer.freeArray(MemoryConsumer.java:108) - sorter.free(); - // simulate a 'back to back' free. - sorter.free(); + // Ensure that the sorter does not OOM while freeing its memory. + testMemoryManager.markconsequentOOM(Integer.MAX_VALUE); + sorter.freeMemory(); + testMemoryManager.resetConsequentOOM(); + Assert.assertFalse(sorter.hasSpaceForAnotherRecord()); + + // Get the sorter in an usable state again by allocating a new pointer array. + LongArray array = consumer.allocateArray(1000); + sorter.expandPointerArray(array); + + // Ensure that it is safe to call freeMemory() multiple times. + testMemoryManager.markconsequentOOM(Integer.MAX_VALUE); + sorter.freeMemory(); + sorter.freeMemory(); + testMemoryManager.resetConsequentOOM(); + Assert.assertFalse(sorter.hasSpaceForAnotherRecord()); + + assertEquals(0L, memoryManager.cleanUpAllAllocatedMemory()); } } diff --git a/core/src/test/java/test/org/apache/spark/JavaAPISuite.java b/core/src/test/java/test/org/apache/spark/JavaAPISuite.java index dbaca71c5fdc3..e73ac0e9fb7a6 100644 --- a/core/src/test/java/test/org/apache/spark/JavaAPISuite.java +++ b/core/src/test/java/test/org/apache/spark/JavaAPISuite.java @@ -1518,7 +1518,7 @@ public void testAsyncActionErrorWrapping() throws Exception { JavaFutureAction future = rdd.map(new BuggyMapFunction<>()).countAsync(); try { future.get(2, TimeUnit.SECONDS); - fail("Expected future.get() for failed job to throw ExcecutionException"); + fail("Expected future.get() for failed job to throw ExecutionException"); } catch (ExecutionException ee) { assertTrue(Throwables.getStackTraceAsString(ee).contains("Custom exception!")); } diff --git a/core/src/test/resources/HistoryServerExpectations/complete_stage_list_json_expectation.json b/core/src/test/resources/HistoryServerExpectations/complete_stage_list_json_expectation.json index a452488294547..f04543e037c48 100644 --- a/core/src/test/resources/HistoryServerExpectations/complete_stage_list_json_expectation.json +++ b/core/src/test/resources/HistoryServerExpectations/complete_stage_list_json_expectation.json @@ -42,7 +42,29 @@ "rddIds" : [ 6, 5 ], "accumulatorUpdates" : [ ], "killedTasksSummary" : { }, - "resourceProfileId" : 0 + "resourceProfileId" : 0, + "peakExecutorMetrics" : { + "JVMHeapMemory" : 0, + "JVMOffHeapMemory" : 0, + "OnHeapExecutionMemory" : 0, + "OffHeapExecutionMemory" : 0, + "OnHeapStorageMemory" : 0, + "OffHeapStorageMemory" : 0, + "OnHeapUnifiedMemory" : 0, + "OffHeapUnifiedMemory" : 0, + "DirectPoolMemory" : 0, + "MappedPoolMemory" : 0, + "ProcessTreeJVMVMemory" : 0, + "ProcessTreeJVMRSSMemory" : 0, + "ProcessTreePythonVMemory" : 0, + "ProcessTreePythonRSSMemory" : 0, + "ProcessTreeOtherVMemory" : 0, + "ProcessTreeOtherRSSMemory" : 0, + "MinorGCCount" : 0, + "MinorGCTime" : 0, + "MajorGCCount" : 0, + "MajorGCTime" : 0 + } }, { "status" : "COMPLETE", "stageId" : 1, @@ -87,7 +109,29 @@ "rddIds" : [ 1, 0 ], "accumulatorUpdates" : [ ], "killedTasksSummary" : { }, - "resourceProfileId" : 0 + "resourceProfileId" : 0, + "peakExecutorMetrics" : { + "JVMHeapMemory" : 0, + "JVMOffHeapMemory" : 0, + "OnHeapExecutionMemory" : 0, + "OffHeapExecutionMemory" : 0, + "OnHeapStorageMemory" : 0, + "OffHeapStorageMemory" : 0, + "OnHeapUnifiedMemory" : 0, + "OffHeapUnifiedMemory" : 0, + "DirectPoolMemory" : 0, + "MappedPoolMemory" : 0, + "ProcessTreeJVMVMemory" : 0, + "ProcessTreeJVMRSSMemory" : 0, + "ProcessTreePythonVMemory" : 0, + "ProcessTreePythonRSSMemory" : 0, + "ProcessTreeOtherVMemory" : 0, + "ProcessTreeOtherRSSMemory" : 0, + "MinorGCCount" : 0, + "MinorGCTime" : 0, + "MajorGCCount" : 0, + "MajorGCTime" : 0 + } }, { "status" : "COMPLETE", "stageId" : 0, @@ -132,5 +176,27 @@ "rddIds" : [ 0 ], "accumulatorUpdates" : [ ], "killedTasksSummary" : { }, - "resourceProfileId" : 0 + "resourceProfileId" : 0, + "peakExecutorMetrics" : { + "JVMHeapMemory" : 0, + "JVMOffHeapMemory" : 0, + "OnHeapExecutionMemory" : 0, + "OffHeapExecutionMemory" : 0, + "OnHeapStorageMemory" : 0, + "OffHeapStorageMemory" : 0, + "OnHeapUnifiedMemory" : 0, + "OffHeapUnifiedMemory" : 0, + "DirectPoolMemory" : 0, + "MappedPoolMemory" : 0, + "ProcessTreeJVMVMemory" : 0, + "ProcessTreeJVMRSSMemory" : 0, + "ProcessTreePythonVMemory" : 0, + "ProcessTreePythonRSSMemory" : 0, + "ProcessTreeOtherVMemory" : 0, + "ProcessTreeOtherRSSMemory" : 0, + "MinorGCCount" : 0, + "MinorGCTime" : 0, + "MajorGCCount" : 0, + "MajorGCTime" : 0 + } } ] diff --git a/core/src/test/resources/HistoryServerExpectations/blacklisting_for_stage_expectation.json b/core/src/test/resources/HistoryServerExpectations/excludeOnFailure_for_stage_expectation.json similarity index 91% rename from core/src/test/resources/HistoryServerExpectations/blacklisting_for_stage_expectation.json rename to core/src/test/resources/HistoryServerExpectations/excludeOnFailure_for_stage_expectation.json index 0d197eab0e25d..dcad8a6895ed8 100644 --- a/core/src/test/resources/HistoryServerExpectations/blacklisting_for_stage_expectation.json +++ b/core/src/test/resources/HistoryServerExpectations/excludeOnFailure_for_stage_expectation.json @@ -697,7 +697,30 @@ "shuffleWriteRecords" : 0, "memoryBytesSpilled" : 0, "diskBytesSpilled" : 0, - "isBlacklistedForStage" : true + "isBlacklistedForStage" : true, + "peakMemoryMetrics" : { + "JVMHeapMemory" : 0, + "JVMOffHeapMemory" : 0, + "OnHeapExecutionMemory" : 0, + "OffHeapExecutionMemory" : 0, + "OnHeapStorageMemory" : 0, + "OffHeapStorageMemory" : 0, + "OnHeapUnifiedMemory" : 0, + "OffHeapUnifiedMemory" : 0, + "DirectPoolMemory" : 0, + "MappedPoolMemory" : 0, + "ProcessTreeJVMVMemory" : 0, + "ProcessTreeJVMRSSMemory" : 0, + "ProcessTreePythonVMemory" : 0, + "ProcessTreePythonRSSMemory" : 0, + "ProcessTreeOtherVMemory" : 0, + "ProcessTreeOtherRSSMemory" : 0, + "MinorGCCount" : 0, + "MinorGCTime" : 0, + "MajorGCCount" : 0, + "MajorGCTime" : 0 + }, + "isExcludedForStage" : true }, "1" : { "taskTime" : 708, @@ -714,9 +737,54 @@ "shuffleWriteRecords" : 10, "memoryBytesSpilled" : 0, "diskBytesSpilled" : 0, - "isBlacklistedForStage" : false + "isBlacklistedForStage" : false, + "peakMemoryMetrics" : { + "JVMHeapMemory" : 0, + "JVMOffHeapMemory" : 0, + "OnHeapExecutionMemory" : 0, + "OffHeapExecutionMemory" : 0, + "OnHeapStorageMemory" : 0, + "OffHeapStorageMemory" : 0, + "OnHeapUnifiedMemory" : 0, + "OffHeapUnifiedMemory" : 0, + "DirectPoolMemory" : 0, + "MappedPoolMemory" : 0, + "ProcessTreeJVMVMemory" : 0, + "ProcessTreeJVMRSSMemory" : 0, + "ProcessTreePythonVMemory" : 0, + "ProcessTreePythonRSSMemory" : 0, + "ProcessTreeOtherVMemory" : 0, + "ProcessTreeOtherRSSMemory" : 0, + "MinorGCCount" : 0, + "MinorGCTime" : 0, + "MajorGCCount" : 0, + "MajorGCTime" : 0 + }, + "isExcludedForStage" : false } }, "killedTasksSummary" : { }, - "resourceProfileId" : 0 + "resourceProfileId" : 0, + "peakExecutorMetrics" : { + "JVMHeapMemory" : 0, + "JVMOffHeapMemory" : 0, + "OnHeapExecutionMemory" : 0, + "OffHeapExecutionMemory" : 0, + "OnHeapStorageMemory" : 0, + "OffHeapStorageMemory" : 0, + "OnHeapUnifiedMemory" : 0, + "OffHeapUnifiedMemory" : 0, + "DirectPoolMemory" : 0, + "MappedPoolMemory" : 0, + "ProcessTreeJVMVMemory" : 0, + "ProcessTreeJVMRSSMemory" : 0, + "ProcessTreePythonVMemory" : 0, + "ProcessTreePythonRSSMemory" : 0, + "ProcessTreeOtherVMemory" : 0, + "ProcessTreeOtherRSSMemory" : 0, + "MinorGCCount" : 0, + "MinorGCTime" : 0, + "MajorGCCount" : 0, + "MajorGCTime" : 0 + } } diff --git a/core/src/test/resources/HistoryServerExpectations/blacklisting_node_for_stage_expectation.json b/core/src/test/resources/HistoryServerExpectations/excludeOnFailure_node_for_stage_expectation.json similarity index 87% rename from core/src/test/resources/HistoryServerExpectations/blacklisting_node_for_stage_expectation.json rename to core/src/test/resources/HistoryServerExpectations/excludeOnFailure_node_for_stage_expectation.json index 24d73faa45021..2ab1546bd4a86 100644 --- a/core/src/test/resources/HistoryServerExpectations/blacklisting_node_for_stage_expectation.json +++ b/core/src/test/resources/HistoryServerExpectations/excludeOnFailure_node_for_stage_expectation.json @@ -805,7 +805,30 @@ "shuffleWriteRecords" : 0, "memoryBytesSpilled" : 0, "diskBytesSpilled" : 0, - "isBlacklistedForStage" : true + "isBlacklistedForStage" : true, + "peakMemoryMetrics" : { + "JVMHeapMemory" : 0, + "JVMOffHeapMemory" : 0, + "OnHeapExecutionMemory" : 0, + "OffHeapExecutionMemory" : 0, + "OnHeapStorageMemory" : 0, + "OffHeapStorageMemory" : 0, + "OnHeapUnifiedMemory" : 0, + "OffHeapUnifiedMemory" : 0, + "DirectPoolMemory" : 0, + "MappedPoolMemory" : 0, + "ProcessTreeJVMVMemory" : 0, + "ProcessTreeJVMRSSMemory" : 0, + "ProcessTreePythonVMemory" : 0, + "ProcessTreePythonRSSMemory" : 0, + "ProcessTreeOtherVMemory" : 0, + "ProcessTreeOtherRSSMemory" : 0, + "MinorGCCount" : 0, + "MinorGCTime" : 0, + "MajorGCCount" : 0, + "MajorGCTime" : 0 + }, + "isExcludedForStage" : true }, "5" : { "taskTime" : 1579, @@ -822,7 +845,30 @@ "shuffleWriteRecords" : 0, "memoryBytesSpilled" : 0, "diskBytesSpilled" : 0, - "isBlacklistedForStage" : true + "isBlacklistedForStage" : true, + "peakMemoryMetrics" : { + "JVMHeapMemory" : 0, + "JVMOffHeapMemory" : 0, + "OnHeapExecutionMemory" : 0, + "OffHeapExecutionMemory" : 0, + "OnHeapStorageMemory" : 0, + "OffHeapStorageMemory" : 0, + "OnHeapUnifiedMemory" : 0, + "OffHeapUnifiedMemory" : 0, + "DirectPoolMemory" : 0, + "MappedPoolMemory" : 0, + "ProcessTreeJVMVMemory" : 0, + "ProcessTreeJVMRSSMemory" : 0, + "ProcessTreePythonVMemory" : 0, + "ProcessTreePythonRSSMemory" : 0, + "ProcessTreeOtherVMemory" : 0, + "ProcessTreeOtherRSSMemory" : 0, + "MinorGCCount" : 0, + "MinorGCTime" : 0, + "MajorGCCount" : 0, + "MajorGCTime" : 0 + }, + "isExcludedForStage" : true }, "1" : { "taskTime" : 2411, @@ -839,7 +885,30 @@ "shuffleWriteRecords" : 12, "memoryBytesSpilled" : 0, "diskBytesSpilled" : 0, - "isBlacklistedForStage" : false + "isBlacklistedForStage" : false, + "peakMemoryMetrics" : { + "JVMHeapMemory" : 0, + "JVMOffHeapMemory" : 0, + "OnHeapExecutionMemory" : 0, + "OffHeapExecutionMemory" : 0, + "OnHeapStorageMemory" : 0, + "OffHeapStorageMemory" : 0, + "OnHeapUnifiedMemory" : 0, + "OffHeapUnifiedMemory" : 0, + "DirectPoolMemory" : 0, + "MappedPoolMemory" : 0, + "ProcessTreeJVMVMemory" : 0, + "ProcessTreeJVMRSSMemory" : 0, + "ProcessTreePythonVMemory" : 0, + "ProcessTreePythonRSSMemory" : 0, + "ProcessTreeOtherVMemory" : 0, + "ProcessTreeOtherRSSMemory" : 0, + "MinorGCCount" : 0, + "MinorGCTime" : 0, + "MajorGCCount" : 0, + "MajorGCTime" : 0 + }, + "isExcludedForStage" : false }, "2" : { "taskTime" : 2446, @@ -856,7 +925,30 @@ "shuffleWriteRecords" : 15, "memoryBytesSpilled" : 0, "diskBytesSpilled" : 0, - "isBlacklistedForStage" : false + "isBlacklistedForStage" : false, + "peakMemoryMetrics" : { + "JVMHeapMemory" : 0, + "JVMOffHeapMemory" : 0, + "OnHeapExecutionMemory" : 0, + "OffHeapExecutionMemory" : 0, + "OnHeapStorageMemory" : 0, + "OffHeapStorageMemory" : 0, + "OnHeapUnifiedMemory" : 0, + "OffHeapUnifiedMemory" : 0, + "DirectPoolMemory" : 0, + "MappedPoolMemory" : 0, + "ProcessTreeJVMVMemory" : 0, + "ProcessTreeJVMRSSMemory" : 0, + "ProcessTreePythonVMemory" : 0, + "ProcessTreePythonRSSMemory" : 0, + "ProcessTreeOtherVMemory" : 0, + "ProcessTreeOtherRSSMemory" : 0, + "MinorGCCount" : 0, + "MinorGCTime" : 0, + "MajorGCCount" : 0, + "MajorGCTime" : 0 + }, + "isExcludedForStage" : false }, "3" : { "taskTime" : 1774, @@ -873,9 +965,54 @@ "shuffleWriteRecords" : 3, "memoryBytesSpilled" : 0, "diskBytesSpilled" : 0, - "isBlacklistedForStage" : true + "isBlacklistedForStage" : true, + "peakMemoryMetrics" : { + "JVMHeapMemory" : 0, + "JVMOffHeapMemory" : 0, + "OnHeapExecutionMemory" : 0, + "OffHeapExecutionMemory" : 0, + "OnHeapStorageMemory" : 0, + "OffHeapStorageMemory" : 0, + "OnHeapUnifiedMemory" : 0, + "OffHeapUnifiedMemory" : 0, + "DirectPoolMemory" : 0, + "MappedPoolMemory" : 0, + "ProcessTreeJVMVMemory" : 0, + "ProcessTreeJVMRSSMemory" : 0, + "ProcessTreePythonVMemory" : 0, + "ProcessTreePythonRSSMemory" : 0, + "ProcessTreeOtherVMemory" : 0, + "ProcessTreeOtherRSSMemory" : 0, + "MinorGCCount" : 0, + "MinorGCTime" : 0, + "MajorGCCount" : 0, + "MajorGCTime" : 0 + }, + "isExcludedForStage" : true } }, "killedTasksSummary" : { }, - "resourceProfileId" : 0 + "resourceProfileId" : 0, + "peakExecutorMetrics" : { + "JVMHeapMemory" : 0, + "JVMOffHeapMemory" : 0, + "OnHeapExecutionMemory" : 0, + "OffHeapExecutionMemory" : 0, + "OnHeapStorageMemory" : 0, + "OffHeapStorageMemory" : 0, + "OnHeapUnifiedMemory" : 0, + "OffHeapUnifiedMemory" : 0, + "DirectPoolMemory" : 0, + "MappedPoolMemory" : 0, + "ProcessTreeJVMVMemory" : 0, + "ProcessTreeJVMRSSMemory" : 0, + "ProcessTreePythonVMemory" : 0, + "ProcessTreePythonRSSMemory" : 0, + "ProcessTreeOtherVMemory" : 0, + "ProcessTreeOtherRSSMemory" : 0, + "MinorGCCount" : 0, + "MinorGCTime" : 0, + "MajorGCCount" : 0, + "MajorGCTime" : 0 + } } diff --git a/core/src/test/resources/HistoryServerExpectations/executor_list_json_expectation.json b/core/src/test/resources/HistoryServerExpectations/executor_list_json_expectation.json index 67425676a62d6..be125075874a2 100644 --- a/core/src/test/resources/HistoryServerExpectations/executor_list_json_expectation.json +++ b/core/src/test/resources/HistoryServerExpectations/executor_list_json_expectation.json @@ -21,7 +21,31 @@ "addTime" : "2015-02-03T16:43:00.906GMT", "executorLogs" : { }, "blacklistedInStages" : [ ], + "peakMemoryMetrics" : { + "JVMHeapMemory" : 0, + "JVMOffHeapMemory" : 0, + "OnHeapExecutionMemory" : 0, + "OffHeapExecutionMemory" : 0, + "OnHeapStorageMemory" : 0, + "OffHeapStorageMemory" : 0, + "OnHeapUnifiedMemory" : 0, + "OffHeapUnifiedMemory" : 0, + "DirectPoolMemory" : 0, + "MappedPoolMemory" : 0, + "ProcessTreeJVMVMemory" : 0, + "ProcessTreeJVMRSSMemory" : 0, + "ProcessTreePythonVMemory" : 0, + "ProcessTreePythonRSSMemory" : 0, + "ProcessTreeOtherVMemory" : 0, + "ProcessTreeOtherRSSMemory" : 0, + "MinorGCCount" : 0, + "MinorGCTime" : 0, + "MajorGCCount" : 0, + "MajorGCTime" : 0 + }, "attributes" : { }, "resources" : { }, - "resourceProfileId" : 0 + "resourceProfileId" : 0, + "isExcluded" : false, + "excludedInStages" : [ ] } ] diff --git a/core/src/test/resources/HistoryServerExpectations/executor_list_with_executor_metrics_json_expectation.json b/core/src/test/resources/HistoryServerExpectations/executor_list_with_executor_metrics_json_expectation.json index d052a27385f66..bf3e93f3d3783 100644 --- a/core/src/test/resources/HistoryServerExpectations/executor_list_with_executor_metrics_json_expectation.json +++ b/core/src/test/resources/HistoryServerExpectations/executor_list_with_executor_metrics_json_expectation.json @@ -51,7 +51,9 @@ }, "attributes" : { }, "resources" : { }, - "resourceProfileId" : 0 + "resourceProfileId" : 0, + "isExcluded" : false, + "excludedInStages" : [ ] }, { "id" : "3", "hostPort" : "test-3.vpc.company.com:37641", @@ -118,7 +120,9 @@ "CONTAINER_ID" : "container_1553914137147_0018_01_000004" }, "resources" : { }, - "resourceProfileId" : 0 + "resourceProfileId" : 0, + "isExcluded" : false, + "excludedInStages" : [ ] }, { "id" : "2", "hostPort" : "test-4.vpc.company.com:33179", @@ -185,7 +189,9 @@ "CONTAINER_ID" : "container_1553914137147_0018_01_000003" }, "resources" : { }, - "resourceProfileId" : 0 + "resourceProfileId" : 0, + "isExcluded" : false, + "excludedInStages" : [ ] }, { "id" : "1", "hostPort" : "test-2.vpc.company.com:43764", @@ -252,5 +258,7 @@ "CONTAINER_ID" : "container_1553914137147_0018_01_000002" }, "resources" : { }, - "resourceProfileId" : 0 + "resourceProfileId" : 0, + "isExcluded" : false, + "excludedInStages" : [ ] } ] diff --git a/core/src/test/resources/HistoryServerExpectations/executor_memory_usage_expectation.json b/core/src/test/resources/HistoryServerExpectations/executor_memory_usage_expectation.json index 91574ca8266b2..0a3eb81140cdb 100644 --- a/core/src/test/resources/HistoryServerExpectations/executor_memory_usage_expectation.json +++ b/core/src/test/resources/HistoryServerExpectations/executor_memory_usage_expectation.json @@ -16,7 +16,7 @@ "totalInputBytes" : 0, "totalShuffleRead" : 0, "totalShuffleWrite" : 0, - "isBlacklisted" : true, + "isBlacklisted" : false, "maxMemory" : 908381388, "addTime" : "2016-11-16T22:33:31.477GMT", "executorLogs" : { }, @@ -29,7 +29,9 @@ "blacklistedInStages" : [ ], "attributes" : { }, "resources" : { }, - "resourceProfileId" : 0 + "resourceProfileId" : 0, + "isExcluded" : false, + "excludedInStages" : [ ] }, { "id" : "3", "hostPort" : "172.22.0.167:51485", @@ -62,9 +64,33 @@ "totalOffHeapStorageMemory" : 524288000 }, "blacklistedInStages" : [ ], + "peakMemoryMetrics" : { + "JVMHeapMemory" : 0, + "JVMOffHeapMemory" : 0, + "OnHeapExecutionMemory" : 0, + "OffHeapExecutionMemory" : 0, + "OnHeapStorageMemory" : 0, + "OffHeapStorageMemory" : 0, + "OnHeapUnifiedMemory" : 0, + "OffHeapUnifiedMemory" : 0, + "DirectPoolMemory" : 0, + "MappedPoolMemory" : 0, + "ProcessTreeJVMVMemory" : 0, + "ProcessTreeJVMRSSMemory" : 0, + "ProcessTreePythonVMemory" : 0, + "ProcessTreePythonRSSMemory" : 0, + "ProcessTreeOtherVMemory" : 0, + "ProcessTreeOtherRSSMemory" : 0, + "MinorGCCount" : 0, + "MinorGCTime" : 0, + "MajorGCCount" : 0, + "MajorGCTime" : 0 + }, "attributes" : { }, "resources" : { }, - "resourceProfileId" : 0 + "resourceProfileId" : 0, + "isExcluded" : true, + "excludedInStages" : [ ] } ,{ "id" : "2", "hostPort" : "172.22.0.167:51487", @@ -97,9 +123,33 @@ "totalOffHeapStorageMemory" : 524288000 }, "blacklistedInStages" : [ ], + "peakMemoryMetrics" : { + "JVMHeapMemory" : 0, + "JVMOffHeapMemory" : 0, + "OnHeapExecutionMemory" : 0, + "OffHeapExecutionMemory" : 0, + "OnHeapStorageMemory" : 0, + "OffHeapStorageMemory" : 0, + "OnHeapUnifiedMemory" : 0, + "OffHeapUnifiedMemory" : 0, + "DirectPoolMemory" : 0, + "MappedPoolMemory" : 0, + "ProcessTreeJVMVMemory" : 0, + "ProcessTreeJVMRSSMemory" : 0, + "ProcessTreePythonVMemory" : 0, + "ProcessTreePythonRSSMemory" : 0, + "ProcessTreeOtherVMemory" : 0, + "ProcessTreeOtherRSSMemory" : 0, + "MinorGCCount" : 0, + "MinorGCTime" : 0, + "MajorGCCount" : 0, + "MajorGCTime" : 0 + }, "attributes" : { }, "resources" : { }, - "resourceProfileId" : 0 + "resourceProfileId" : 0, + "isExcluded" : true, + "excludedInStages" : [ ] }, { "id" : "1", "hostPort" : "172.22.0.167:51490", @@ -132,9 +182,33 @@ "totalOffHeapStorageMemory": 524288000 }, "blacklistedInStages" : [ ], + "peakMemoryMetrics" : { + "JVMHeapMemory" : 0, + "JVMOffHeapMemory" : 0, + "OnHeapExecutionMemory" : 0, + "OffHeapExecutionMemory" : 0, + "OnHeapStorageMemory" : 0, + "OffHeapStorageMemory" : 0, + "OnHeapUnifiedMemory" : 0, + "OffHeapUnifiedMemory" : 0, + "DirectPoolMemory" : 0, + "MappedPoolMemory" : 0, + "ProcessTreeJVMVMemory" : 0, + "ProcessTreeJVMRSSMemory" : 0, + "ProcessTreePythonVMemory" : 0, + "ProcessTreePythonRSSMemory" : 0, + "ProcessTreeOtherVMemory" : 0, + "ProcessTreeOtherRSSMemory" : 0, + "MinorGCCount" : 0, + "MinorGCTime" : 0, + "MajorGCCount" : 0, + "MajorGCTime" : 0 + }, "attributes" : { }, "resources" : { }, - "resourceProfileId" : 0 + "resourceProfileId" : 0, + "isExcluded" : true, + "excludedInStages" : [ ] }, { "id" : "0", "hostPort" : "172.22.0.167:51491", @@ -167,7 +241,31 @@ "totalOffHeapStorageMemory" : 524288000 }, "blacklistedInStages" : [ ], + "peakMemoryMetrics" : { + "JVMHeapMemory" : 0, + "JVMOffHeapMemory" : 0, + "OnHeapExecutionMemory" : 0, + "OffHeapExecutionMemory" : 0, + "OnHeapStorageMemory" : 0, + "OffHeapStorageMemory" : 0, + "OnHeapUnifiedMemory" : 0, + "OffHeapUnifiedMemory" : 0, + "DirectPoolMemory" : 0, + "MappedPoolMemory" : 0, + "ProcessTreeJVMVMemory" : 0, + "ProcessTreeJVMRSSMemory" : 0, + "ProcessTreePythonVMemory" : 0, + "ProcessTreePythonRSSMemory" : 0, + "ProcessTreeOtherVMemory" : 0, + "ProcessTreeOtherRSSMemory" : 0, + "MinorGCCount" : 0, + "MinorGCTime" : 0, + "MajorGCCount" : 0, + "MajorGCTime" : 0 + }, "attributes" : { }, "resources" : { }, - "resourceProfileId" : 0 + "resourceProfileId" : 0, + "isExcluded" : true, + "excludedInStages" : [ ] } ] diff --git a/core/src/test/resources/HistoryServerExpectations/executor_node_blacklisting_expectation.json b/core/src/test/resources/HistoryServerExpectations/executor_node_excludeOnFailure_expectation.json similarity index 61% rename from core/src/test/resources/HistoryServerExpectations/executor_node_blacklisting_expectation.json rename to core/src/test/resources/HistoryServerExpectations/executor_node_excludeOnFailure_expectation.json index f14b9a5085a42..8869fb4e296e6 100644 --- a/core/src/test/resources/HistoryServerExpectations/executor_node_blacklisting_expectation.json +++ b/core/src/test/resources/HistoryServerExpectations/executor_node_excludeOnFailure_expectation.json @@ -16,7 +16,7 @@ "totalInputBytes" : 0, "totalShuffleRead" : 0, "totalShuffleWrite" : 0, - "isBlacklisted" : true, + "isBlacklisted" : false, "maxMemory" : 908381388, "addTime" : "2016-11-16T22:33:31.477GMT", "executorLogs" : { }, @@ -29,7 +29,9 @@ "blacklistedInStages" : [ ], "attributes" : { }, "resources" : { }, - "resourceProfileId" : 0 + "resourceProfileId" : 0, + "isExcluded" : false, + "excludedInStages" : [ ] }, { "id" : "3", "hostPort" : "172.22.0.167:51485", @@ -62,9 +64,33 @@ "totalOffHeapStorageMemory" : 524288000 }, "blacklistedInStages" : [ ], + "peakMemoryMetrics" : { + "JVMHeapMemory" : 0, + "JVMOffHeapMemory" : 0, + "OnHeapExecutionMemory" : 0, + "OffHeapExecutionMemory" : 0, + "OnHeapStorageMemory" : 0, + "OffHeapStorageMemory" : 0, + "OnHeapUnifiedMemory" : 0, + "OffHeapUnifiedMemory" : 0, + "DirectPoolMemory" : 0, + "MappedPoolMemory" : 0, + "ProcessTreeJVMVMemory" : 0, + "ProcessTreeJVMRSSMemory" : 0, + "ProcessTreePythonVMemory" : 0, + "ProcessTreePythonRSSMemory" : 0, + "ProcessTreeOtherVMemory" : 0, + "ProcessTreeOtherRSSMemory" : 0, + "MinorGCCount" : 0, + "MinorGCTime" : 0, + "MajorGCCount" : 0, + "MajorGCTime" : 0 + }, "attributes" : { }, "resources" : { }, - "resourceProfileId" : 0 + "resourceProfileId" : 0, + "isExcluded" : true, + "excludedInStages" : [ ] }, { "id" : "2", "hostPort" : "172.22.0.167:51487", @@ -97,9 +123,33 @@ "totalOffHeapStorageMemory" : 524288000 }, "blacklistedInStages" : [ ], + "peakMemoryMetrics" : { + "JVMHeapMemory" : 0, + "JVMOffHeapMemory" : 0, + "OnHeapExecutionMemory" : 0, + "OffHeapExecutionMemory" : 0, + "OnHeapStorageMemory" : 0, + "OffHeapStorageMemory" : 0, + "OnHeapUnifiedMemory" : 0, + "OffHeapUnifiedMemory" : 0, + "DirectPoolMemory" : 0, + "MappedPoolMemory" : 0, + "ProcessTreeJVMVMemory" : 0, + "ProcessTreeJVMRSSMemory" : 0, + "ProcessTreePythonVMemory" : 0, + "ProcessTreePythonRSSMemory" : 0, + "ProcessTreeOtherVMemory" : 0, + "ProcessTreeOtherRSSMemory" : 0, + "MinorGCCount" : 0, + "MinorGCTime" : 0, + "MajorGCCount" : 0, + "MajorGCTime" : 0 + }, "attributes" : { }, "resources" : { }, - "resourceProfileId" : 0 + "resourceProfileId" : 0, + "isExcluded" : true, + "excludedInStages" : [ ] }, { "id" : "1", "hostPort" : "172.22.0.167:51490", @@ -132,9 +182,33 @@ "totalOffHeapStorageMemory": 524288000 }, "blacklistedInStages" : [ ], + "peakMemoryMetrics" : { + "JVMHeapMemory" : 0, + "JVMOffHeapMemory" : 0, + "OnHeapExecutionMemory" : 0, + "OffHeapExecutionMemory" : 0, + "OnHeapStorageMemory" : 0, + "OffHeapStorageMemory" : 0, + "OnHeapUnifiedMemory" : 0, + "OffHeapUnifiedMemory" : 0, + "DirectPoolMemory" : 0, + "MappedPoolMemory" : 0, + "ProcessTreeJVMVMemory" : 0, + "ProcessTreeJVMRSSMemory" : 0, + "ProcessTreePythonVMemory" : 0, + "ProcessTreePythonRSSMemory" : 0, + "ProcessTreeOtherVMemory" : 0, + "ProcessTreeOtherRSSMemory" : 0, + "MinorGCCount" : 0, + "MinorGCTime" : 0, + "MajorGCCount" : 0, + "MajorGCTime" : 0 + }, "attributes" : { }, "resources" : { }, - "resourceProfileId" : 0 + "resourceProfileId" : 0, + "isExcluded" : true, + "excludedInStages" : [ ] }, { "id" : "0", "hostPort" : "172.22.0.167:51491", @@ -167,7 +241,31 @@ "totalOffHeapStorageMemory": 524288000 }, "blacklistedInStages" : [ ], + "peakMemoryMetrics" : { + "JVMHeapMemory" : 0, + "JVMOffHeapMemory" : 0, + "OnHeapExecutionMemory" : 0, + "OffHeapExecutionMemory" : 0, + "OnHeapStorageMemory" : 0, + "OffHeapStorageMemory" : 0, + "OnHeapUnifiedMemory" : 0, + "OffHeapUnifiedMemory" : 0, + "DirectPoolMemory" : 0, + "MappedPoolMemory" : 0, + "ProcessTreeJVMVMemory" : 0, + "ProcessTreeJVMRSSMemory" : 0, + "ProcessTreePythonVMemory" : 0, + "ProcessTreePythonRSSMemory" : 0, + "ProcessTreeOtherVMemory" : 0, + "ProcessTreeOtherRSSMemory" : 0, + "MinorGCCount" : 0, + "MinorGCTime" : 0, + "MajorGCCount" : 0, + "MajorGCTime" : 0 + }, "attributes" : { }, "resources" : { }, - "resourceProfileId" : 0 + "resourceProfileId" : 0, + "isExcluded" : true, + "excludedInStages" : [ ] } ] diff --git a/core/src/test/resources/HistoryServerExpectations/executor_node_blacklisting_unblacklisting_expectation.json b/core/src/test/resources/HistoryServerExpectations/executor_node_excludeOnFailure_unexcluding_expectation.json similarity index 56% rename from core/src/test/resources/HistoryServerExpectations/executor_node_blacklisting_unblacklisting_expectation.json rename to core/src/test/resources/HistoryServerExpectations/executor_node_excludeOnFailure_unexcluding_expectation.json index 3645387317ca1..21cc9d0812990 100644 --- a/core/src/test/resources/HistoryServerExpectations/executor_node_blacklisting_unblacklisting_expectation.json +++ b/core/src/test/resources/HistoryServerExpectations/executor_node_excludeOnFailure_unexcluding_expectation.json @@ -23,7 +23,9 @@ "blacklistedInStages" : [ ], "attributes" : { }, "resources" : { }, - "resourceProfileId" : 0 + "resourceProfileId" : 0, + "isExcluded" : false, + "excludedInStages" : [ ] }, { "id" : "3", "hostPort" : "172.22.0.111:64543", @@ -50,9 +52,33 @@ "stderr" : "http://172.22.0.111:64521/logPage/?appId=app-20161115172038-0000&executorId=3&logType=stderr" }, "blacklistedInStages" : [ ], + "peakMemoryMetrics" : { + "JVMHeapMemory" : 0, + "JVMOffHeapMemory" : 0, + "OnHeapExecutionMemory" : 0, + "OffHeapExecutionMemory" : 0, + "OnHeapStorageMemory" : 0, + "OffHeapStorageMemory" : 0, + "OnHeapUnifiedMemory" : 0, + "OffHeapUnifiedMemory" : 0, + "DirectPoolMemory" : 0, + "MappedPoolMemory" : 0, + "ProcessTreeJVMVMemory" : 0, + "ProcessTreeJVMRSSMemory" : 0, + "ProcessTreePythonVMemory" : 0, + "ProcessTreePythonRSSMemory" : 0, + "ProcessTreeOtherVMemory" : 0, + "ProcessTreeOtherRSSMemory" : 0, + "MinorGCCount" : 0, + "MinorGCTime" : 0, + "MajorGCCount" : 0, + "MajorGCTime" : 0 + }, "attributes" : { }, "resources" : { }, - "resourceProfileId" : 0 + "resourceProfileId" : 0, + "isExcluded" : false, + "excludedInStages" : [ ] }, { "id" : "2", "hostPort" : "172.22.0.111:64539", @@ -79,9 +105,33 @@ "stderr" : "http://172.22.0.111:64519/logPage/?appId=app-20161115172038-0000&executorId=2&logType=stderr" }, "blacklistedInStages" : [ ], + "peakMemoryMetrics" : { + "JVMHeapMemory" : 0, + "JVMOffHeapMemory" : 0, + "OnHeapExecutionMemory" : 0, + "OffHeapExecutionMemory" : 0, + "OnHeapStorageMemory" : 0, + "OffHeapStorageMemory" : 0, + "OnHeapUnifiedMemory" : 0, + "OffHeapUnifiedMemory" : 0, + "DirectPoolMemory" : 0, + "MappedPoolMemory" : 0, + "ProcessTreeJVMVMemory" : 0, + "ProcessTreeJVMRSSMemory" : 0, + "ProcessTreePythonVMemory" : 0, + "ProcessTreePythonRSSMemory" : 0, + "ProcessTreeOtherVMemory" : 0, + "ProcessTreeOtherRSSMemory" : 0, + "MinorGCCount" : 0, + "MinorGCTime" : 0, + "MajorGCCount" : 0, + "MajorGCTime" : 0 + }, "attributes" : { }, "resources" : { }, - "resourceProfileId" : 0 + "resourceProfileId" : 0, + "isExcluded" : false, + "excludedInStages" : [ ] }, { "id" : "1", "hostPort" : "172.22.0.111:64541", @@ -108,9 +158,33 @@ "stderr" : "http://172.22.0.111:64518/logPage/?appId=app-20161115172038-0000&executorId=1&logType=stderr" }, "blacklistedInStages" : [ ], + "peakMemoryMetrics" : { + "JVMHeapMemory" : 0, + "JVMOffHeapMemory" : 0, + "OnHeapExecutionMemory" : 0, + "OffHeapExecutionMemory" : 0, + "OnHeapStorageMemory" : 0, + "OffHeapStorageMemory" : 0, + "OnHeapUnifiedMemory" : 0, + "OffHeapUnifiedMemory" : 0, + "DirectPoolMemory" : 0, + "MappedPoolMemory" : 0, + "ProcessTreeJVMVMemory" : 0, + "ProcessTreeJVMRSSMemory" : 0, + "ProcessTreePythonVMemory" : 0, + "ProcessTreePythonRSSMemory" : 0, + "ProcessTreeOtherVMemory" : 0, + "ProcessTreeOtherRSSMemory" : 0, + "MinorGCCount" : 0, + "MinorGCTime" : 0, + "MajorGCCount" : 0, + "MajorGCTime" : 0 + }, "attributes" : { }, "resources" : { }, - "resourceProfileId" : 0 + "resourceProfileId" : 0, + "isExcluded" : false, + "excludedInStages" : [ ] }, { "id" : "0", "hostPort" : "172.22.0.111:64540", @@ -137,7 +211,31 @@ "stderr" : "http://172.22.0.111:64517/logPage/?appId=app-20161115172038-0000&executorId=0&logType=stderr" }, "blacklistedInStages" : [ ], + "peakMemoryMetrics" : { + "JVMHeapMemory" : 0, + "JVMOffHeapMemory" : 0, + "OnHeapExecutionMemory" : 0, + "OffHeapExecutionMemory" : 0, + "OnHeapStorageMemory" : 0, + "OffHeapStorageMemory" : 0, + "OnHeapUnifiedMemory" : 0, + "OffHeapUnifiedMemory" : 0, + "DirectPoolMemory" : 0, + "MappedPoolMemory" : 0, + "ProcessTreeJVMVMemory" : 0, + "ProcessTreeJVMRSSMemory" : 0, + "ProcessTreePythonVMemory" : 0, + "ProcessTreePythonRSSMemory" : 0, + "ProcessTreeOtherVMemory" : 0, + "ProcessTreeOtherRSSMemory" : 0, + "MinorGCCount" : 0, + "MinorGCTime" : 0, + "MajorGCCount" : 0, + "MajorGCTime" : 0 + }, "attributes" : { }, "resources" : { }, - "resourceProfileId" : 0 + "resourceProfileId" : 0, + "isExcluded" : false, + "excludedInStages" : [ ] } ] diff --git a/core/src/test/resources/HistoryServerExpectations/executor_resource_information_expectation.json b/core/src/test/resources/HistoryServerExpectations/executor_resource_information_expectation.json index 165389cf25027..53ae9a0c7909e 100644 --- a/core/src/test/resources/HistoryServerExpectations/executor_resource_information_expectation.json +++ b/core/src/test/resources/HistoryServerExpectations/executor_resource_information_expectation.json @@ -29,7 +29,9 @@ "blacklistedInStages" : [ ], "attributes" : { }, "resources" : { }, - "resourceProfileId" : 0 + "resourceProfileId" : 0, + "isExcluded" : false, + "excludedInStages" : [ ] }, { "id" : "2", "hostPort" : "tomg-test:46005", @@ -79,7 +81,9 @@ "addresses" : [ "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12" ] } }, - "resourceProfileId" : 0 + "resourceProfileId" : 0, + "isExcluded" : false, + "excludedInStages" : [ ] }, { "id" : "1", "hostPort" : "tomg-test:44873", @@ -129,5 +133,7 @@ "addresses" : [ "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12" ] } }, - "resourceProfileId" : 0 + "resourceProfileId" : 0, + "isExcluded" : false, + "excludedInStages" : [ ] } ] diff --git a/core/src/test/resources/HistoryServerExpectations/failed_stage_list_json_expectation.json b/core/src/test/resources/HistoryServerExpectations/failed_stage_list_json_expectation.json index c38741646c64b..5573cf98db26a 100644 --- a/core/src/test/resources/HistoryServerExpectations/failed_stage_list_json_expectation.json +++ b/core/src/test/resources/HistoryServerExpectations/failed_stage_list_json_expectation.json @@ -43,5 +43,27 @@ "rddIds" : [ 3, 2 ], "accumulatorUpdates" : [ ], "killedTasksSummary" : { }, - "resourceProfileId" : 0 + "resourceProfileId" : 0, + "peakExecutorMetrics" : { + "JVMHeapMemory" : 0, + "JVMOffHeapMemory" : 0, + "OnHeapExecutionMemory" : 0, + "OffHeapExecutionMemory" : 0, + "OnHeapStorageMemory" : 0, + "OffHeapStorageMemory" : 0, + "OnHeapUnifiedMemory" : 0, + "OffHeapUnifiedMemory" : 0, + "DirectPoolMemory" : 0, + "MappedPoolMemory" : 0, + "ProcessTreeJVMVMemory" : 0, + "ProcessTreeJVMRSSMemory" : 0, + "ProcessTreePythonVMemory" : 0, + "ProcessTreePythonRSSMemory" : 0, + "ProcessTreeOtherVMemory" : 0, + "ProcessTreeOtherRSSMemory" : 0, + "MinorGCCount" : 0, + "MinorGCTime" : 0, + "MajorGCCount" : 0, + "MajorGCTime" : 0 + } } ] diff --git a/core/src/test/resources/HistoryServerExpectations/one_stage_attempt_json_expectation.json b/core/src/test/resources/HistoryServerExpectations/one_stage_attempt_json_expectation.json index 3db7d551b6130..9edb518132e87 100644 --- a/core/src/test/resources/HistoryServerExpectations/one_stage_attempt_json_expectation.json +++ b/core/src/test/resources/HistoryServerExpectations/one_stage_attempt_json_expectation.json @@ -459,9 +459,54 @@ "shuffleWriteRecords" : 0, "memoryBytesSpilled" : 0, "diskBytesSpilled" : 0, - "isBlacklistedForStage" : false + "isBlacklistedForStage" : false, + "peakMemoryMetrics" : { + "JVMHeapMemory" : 0, + "JVMOffHeapMemory" : 0, + "OnHeapExecutionMemory" : 0, + "OffHeapExecutionMemory" : 0, + "OnHeapStorageMemory" : 0, + "OffHeapStorageMemory" : 0, + "OnHeapUnifiedMemory" : 0, + "OffHeapUnifiedMemory" : 0, + "DirectPoolMemory" : 0, + "MappedPoolMemory" : 0, + "ProcessTreeJVMVMemory" : 0, + "ProcessTreeJVMRSSMemory" : 0, + "ProcessTreePythonVMemory" : 0, + "ProcessTreePythonRSSMemory" : 0, + "ProcessTreeOtherVMemory" : 0, + "ProcessTreeOtherRSSMemory" : 0, + "MinorGCCount" : 0, + "MinorGCTime" : 0, + "MajorGCCount" : 0, + "MajorGCTime" : 0 + }, + "isExcludedForStage" : false } }, "killedTasksSummary" : { }, - "resourceProfileId" : 0 + "resourceProfileId" : 0, + "peakExecutorMetrics" : { + "JVMHeapMemory" : 0, + "JVMOffHeapMemory" : 0, + "OnHeapExecutionMemory" : 0, + "OffHeapExecutionMemory" : 0, + "OnHeapStorageMemory" : 0, + "OffHeapStorageMemory" : 0, + "OnHeapUnifiedMemory" : 0, + "OffHeapUnifiedMemory" : 0, + "DirectPoolMemory" : 0, + "MappedPoolMemory" : 0, + "ProcessTreeJVMVMemory" : 0, + "ProcessTreeJVMRSSMemory" : 0, + "ProcessTreePythonVMemory" : 0, + "ProcessTreePythonRSSMemory" : 0, + "ProcessTreeOtherVMemory" : 0, + "ProcessTreeOtherRSSMemory" : 0, + "MinorGCCount" : 0, + "MinorGCTime" : 0, + "MajorGCCount" : 0, + "MajorGCTime" : 0 + } } diff --git a/core/src/test/resources/HistoryServerExpectations/one_stage_json_expectation.json b/core/src/test/resources/HistoryServerExpectations/one_stage_json_expectation.json index 8ef3769c1ca6b..9e661bdf8a034 100644 --- a/core/src/test/resources/HistoryServerExpectations/one_stage_json_expectation.json +++ b/core/src/test/resources/HistoryServerExpectations/one_stage_json_expectation.json @@ -459,9 +459,54 @@ "shuffleWriteRecords" : 0, "memoryBytesSpilled" : 0, "diskBytesSpilled" : 0, - "isBlacklistedForStage" : false + "isBlacklistedForStage" : false, + "peakMemoryMetrics" : { + "JVMHeapMemory" : 0, + "JVMOffHeapMemory" : 0, + "OnHeapExecutionMemory" : 0, + "OffHeapExecutionMemory" : 0, + "OnHeapStorageMemory" : 0, + "OffHeapStorageMemory" : 0, + "OnHeapUnifiedMemory" : 0, + "OffHeapUnifiedMemory" : 0, + "DirectPoolMemory" : 0, + "MappedPoolMemory" : 0, + "ProcessTreeJVMVMemory" : 0, + "ProcessTreeJVMRSSMemory" : 0, + "ProcessTreePythonVMemory" : 0, + "ProcessTreePythonRSSMemory" : 0, + "ProcessTreeOtherVMemory" : 0, + "ProcessTreeOtherRSSMemory" : 0, + "MinorGCCount" : 0, + "MinorGCTime" : 0, + "MajorGCCount" : 0, + "MajorGCTime" : 0 + }, + "isExcludedForStage" : false } }, "killedTasksSummary" : { }, - "resourceProfileId" : 0 + "resourceProfileId" : 0, + "peakExecutorMetrics" : { + "JVMHeapMemory" : 0, + "JVMOffHeapMemory" : 0, + "OnHeapExecutionMemory" : 0, + "OffHeapExecutionMemory" : 0, + "OnHeapStorageMemory" : 0, + "OffHeapStorageMemory" : 0, + "OnHeapUnifiedMemory" : 0, + "OffHeapUnifiedMemory" : 0, + "DirectPoolMemory" : 0, + "MappedPoolMemory" : 0, + "ProcessTreeJVMVMemory" : 0, + "ProcessTreeJVMRSSMemory" : 0, + "ProcessTreePythonVMemory" : 0, + "ProcessTreePythonRSSMemory" : 0, + "ProcessTreeOtherVMemory" : 0, + "ProcessTreeOtherRSSMemory" : 0, + "MinorGCCount" : 0, + "MinorGCTime" : 0, + "MajorGCCount" : 0, + "MajorGCTime" : 0 + } } ] diff --git a/core/src/test/resources/HistoryServerExpectations/stage_list_json_expectation.json b/core/src/test/resources/HistoryServerExpectations/stage_list_json_expectation.json index a31c907221388..d109c73b46133 100644 --- a/core/src/test/resources/HistoryServerExpectations/stage_list_json_expectation.json +++ b/core/src/test/resources/HistoryServerExpectations/stage_list_json_expectation.json @@ -42,7 +42,29 @@ "rddIds" : [ 6, 5 ], "accumulatorUpdates" : [ ], "killedTasksSummary" : { }, - "resourceProfileId" : 0 + "resourceProfileId" : 0, + "peakExecutorMetrics" : { + "JVMHeapMemory" : 0, + "JVMOffHeapMemory" : 0, + "OnHeapExecutionMemory" : 0, + "OffHeapExecutionMemory" : 0, + "OnHeapStorageMemory" : 0, + "OffHeapStorageMemory" : 0, + "OnHeapUnifiedMemory" : 0, + "OffHeapUnifiedMemory" : 0, + "DirectPoolMemory" : 0, + "MappedPoolMemory" : 0, + "ProcessTreeJVMVMemory" : 0, + "ProcessTreeJVMRSSMemory" : 0, + "ProcessTreePythonVMemory" : 0, + "ProcessTreePythonRSSMemory" : 0, + "ProcessTreeOtherVMemory" : 0, + "ProcessTreeOtherRSSMemory" : 0, + "MinorGCCount" : 0, + "MinorGCTime" : 0, + "MajorGCCount" : 0, + "MajorGCTime" : 0 + } }, { "status" : "FAILED", "stageId" : 2, @@ -88,7 +110,29 @@ "rddIds" : [ 3, 2 ], "accumulatorUpdates" : [ ], "killedTasksSummary" : { }, - "resourceProfileId" : 0 + "resourceProfileId" : 0, + "peakExecutorMetrics" : { + "JVMHeapMemory" : 0, + "JVMOffHeapMemory" : 0, + "OnHeapExecutionMemory" : 0, + "OffHeapExecutionMemory" : 0, + "OnHeapStorageMemory" : 0, + "OffHeapStorageMemory" : 0, + "OnHeapUnifiedMemory" : 0, + "OffHeapUnifiedMemory" : 0, + "DirectPoolMemory" : 0, + "MappedPoolMemory" : 0, + "ProcessTreeJVMVMemory" : 0, + "ProcessTreeJVMRSSMemory" : 0, + "ProcessTreePythonVMemory" : 0, + "ProcessTreePythonRSSMemory" : 0, + "ProcessTreeOtherVMemory" : 0, + "ProcessTreeOtherRSSMemory" : 0, + "MinorGCCount" : 0, + "MinorGCTime" : 0, + "MajorGCCount" : 0, + "MajorGCTime" : 0 + } }, { "status" : "COMPLETE", "stageId" : 1, @@ -133,7 +177,29 @@ "rddIds" : [ 1, 0 ], "accumulatorUpdates" : [ ], "killedTasksSummary" : { }, - "resourceProfileId" : 0 + "resourceProfileId" : 0, + "peakExecutorMetrics" : { + "JVMHeapMemory" : 0, + "JVMOffHeapMemory" : 0, + "OnHeapExecutionMemory" : 0, + "OffHeapExecutionMemory" : 0, + "OnHeapStorageMemory" : 0, + "OffHeapStorageMemory" : 0, + "OnHeapUnifiedMemory" : 0, + "OffHeapUnifiedMemory" : 0, + "DirectPoolMemory" : 0, + "MappedPoolMemory" : 0, + "ProcessTreeJVMVMemory" : 0, + "ProcessTreeJVMRSSMemory" : 0, + "ProcessTreePythonVMemory" : 0, + "ProcessTreePythonRSSMemory" : 0, + "ProcessTreeOtherVMemory" : 0, + "ProcessTreeOtherRSSMemory" : 0, + "MinorGCCount" : 0, + "MinorGCTime" : 0, + "MajorGCCount" : 0, + "MajorGCTime" : 0 + } }, { "status" : "COMPLETE", "stageId" : 0, @@ -178,5 +244,27 @@ "rddIds" : [ 0 ], "accumulatorUpdates" : [ ], "killedTasksSummary" : { }, - "resourceProfileId" : 0 + "resourceProfileId" : 0, + "peakExecutorMetrics" : { + "JVMHeapMemory" : 0, + "JVMOffHeapMemory" : 0, + "OnHeapExecutionMemory" : 0, + "OffHeapExecutionMemory" : 0, + "OnHeapStorageMemory" : 0, + "OffHeapStorageMemory" : 0, + "OnHeapUnifiedMemory" : 0, + "OffHeapUnifiedMemory" : 0, + "DirectPoolMemory" : 0, + "MappedPoolMemory" : 0, + "ProcessTreeJVMVMemory" : 0, + "ProcessTreeJVMRSSMemory" : 0, + "ProcessTreePythonVMemory" : 0, + "ProcessTreePythonRSSMemory" : 0, + "ProcessTreeOtherVMemory" : 0, + "ProcessTreeOtherRSSMemory" : 0, + "MinorGCCount" : 0, + "MinorGCTime" : 0, + "MajorGCCount" : 0, + "MajorGCTime" : 0 + } } ] diff --git a/core/src/test/resources/HistoryServerExpectations/stage_list_with_accumulable_json_expectation.json b/core/src/test/resources/HistoryServerExpectations/stage_list_with_accumulable_json_expectation.json index 08089d4f3f65b..7901c4f93367b 100644 --- a/core/src/test/resources/HistoryServerExpectations/stage_list_with_accumulable_json_expectation.json +++ b/core/src/test/resources/HistoryServerExpectations/stage_list_with_accumulable_json_expectation.json @@ -46,5 +46,27 @@ "value" : "5050" } ], "killedTasksSummary" : { }, - "resourceProfileId" : 0 + "resourceProfileId" : 0, + "peakExecutorMetrics" : { + "JVMHeapMemory" : 0, + "JVMOffHeapMemory" : 0, + "OnHeapExecutionMemory" : 0, + "OffHeapExecutionMemory" : 0, + "OnHeapStorageMemory" : 0, + "OffHeapStorageMemory" : 0, + "OnHeapUnifiedMemory" : 0, + "OffHeapUnifiedMemory" : 0, + "DirectPoolMemory" : 0, + "MappedPoolMemory" : 0, + "ProcessTreeJVMVMemory" : 0, + "ProcessTreeJVMRSSMemory" : 0, + "ProcessTreePythonVMemory" : 0, + "ProcessTreePythonRSSMemory" : 0, + "ProcessTreeOtherVMemory" : 0, + "ProcessTreeOtherRSSMemory" : 0, + "MinorGCCount" : 0, + "MinorGCTime" : 0, + "MajorGCCount" : 0, + "MajorGCTime" : 0 + } } ] diff --git a/core/src/test/resources/HistoryServerExpectations/stage_with_accumulable_json_expectation.json b/core/src/test/resources/HistoryServerExpectations/stage_with_accumulable_json_expectation.json index 3b5476ae8b160..a5958e0a093f1 100644 --- a/core/src/test/resources/HistoryServerExpectations/stage_with_accumulable_json_expectation.json +++ b/core/src/test/resources/HistoryServerExpectations/stage_with_accumulable_json_expectation.json @@ -503,9 +503,54 @@ "shuffleWriteRecords" : 0, "memoryBytesSpilled" : 0, "diskBytesSpilled" : 0, - "isBlacklistedForStage" : false + "isBlacklistedForStage" : false, + "peakMemoryMetrics" : { + "JVMHeapMemory" : 0, + "JVMOffHeapMemory" : 0, + "OnHeapExecutionMemory" : 0, + "OffHeapExecutionMemory" : 0, + "OnHeapStorageMemory" : 0, + "OffHeapStorageMemory" : 0, + "OnHeapUnifiedMemory" : 0, + "OffHeapUnifiedMemory" : 0, + "DirectPoolMemory" : 0, + "MappedPoolMemory" : 0, + "ProcessTreeJVMVMemory" : 0, + "ProcessTreeJVMRSSMemory" : 0, + "ProcessTreePythonVMemory" : 0, + "ProcessTreePythonRSSMemory" : 0, + "ProcessTreeOtherVMemory" : 0, + "ProcessTreeOtherRSSMemory" : 0, + "MinorGCCount" : 0, + "MinorGCTime" : 0, + "MajorGCCount" : 0, + "MajorGCTime" : 0 + }, + "isExcludedForStage" : false } }, "killedTasksSummary" : { }, - "resourceProfileId" : 0 + "resourceProfileId" : 0, + "peakExecutorMetrics" : { + "JVMHeapMemory" : 0, + "JVMOffHeapMemory" : 0, + "OnHeapExecutionMemory" : 0, + "OffHeapExecutionMemory" : 0, + "OnHeapStorageMemory" : 0, + "OffHeapStorageMemory" : 0, + "OnHeapUnifiedMemory" : 0, + "OffHeapUnifiedMemory" : 0, + "DirectPoolMemory" : 0, + "MappedPoolMemory" : 0, + "ProcessTreeJVMVMemory" : 0, + "ProcessTreeJVMRSSMemory" : 0, + "ProcessTreePythonVMemory" : 0, + "ProcessTreePythonRSSMemory" : 0, + "ProcessTreeOtherVMemory" : 0, + "ProcessTreeOtherRSSMemory" : 0, + "MinorGCCount" : 0, + "MinorGCTime" : 0, + "MajorGCCount" : 0, + "MajorGCTime" : 0 + } } diff --git a/core/src/test/resources/HistoryServerExpectations/stage_with_peak_metrics_expectation.json b/core/src/test/resources/HistoryServerExpectations/stage_with_peak_metrics_expectation.json index 373510d23058e..20a958073245a 100644 --- a/core/src/test/resources/HistoryServerExpectations/stage_with_peak_metrics_expectation.json +++ b/core/src/test/resources/HistoryServerExpectations/stage_with_peak_metrics_expectation.json @@ -929,7 +929,8 @@ "MinorGCTime" : 0, "MajorGCCount" : 0, "MajorGCTime" : 0 - } + }, + "isExcludedForStage" : false }, "driver" : { "taskTime" : 0, @@ -968,7 +969,8 @@ "MinorGCTime" : 115, "MajorGCCount" : 4, "MajorGCTime" : 339 - } + }, + "isExcludedForStage" : false } }, "killedTasksSummary" : { }, diff --git a/core/src/test/scala/org/apache/spark/CheckpointSuite.scala b/core/src/test/scala/org/apache/spark/CheckpointSuite.scala index 21090e98ea285..e42df0821589b 100644 --- a/core/src/test/scala/org/apache/spark/CheckpointSuite.scala +++ b/core/src/test/scala/org/apache/spark/CheckpointSuite.scala @@ -635,12 +635,12 @@ class CheckpointStorageSuite extends SparkFunSuite with LocalSparkContext { // Verify that RDD is checkpointed assert(rdd.firstParent.isInstanceOf[ReliableCheckpointRDD[_]]) val checkpointedRDD = rdd.firstParent.asInstanceOf[ReliableCheckpointRDD[_]] - val partiton = checkpointedRDD.partitions(0) - assert(!checkpointedRDD.cachedPreferredLocations.asMap.containsKey(partiton)) + val partition = checkpointedRDD.partitions(0) + assert(!checkpointedRDD.cachedPreferredLocations.asMap.containsKey(partition)) - val preferredLoc = checkpointedRDD.preferredLocations(partiton) - assert(checkpointedRDD.cachedPreferredLocations.asMap.containsKey(partiton)) - assert(preferredLoc == checkpointedRDD.cachedPreferredLocations.get(partiton)) + val preferredLoc = checkpointedRDD.preferredLocations(partition) + assert(checkpointedRDD.cachedPreferredLocations.asMap.containsKey(partition)) + assert(preferredLoc == checkpointedRDD.cachedPreferredLocations.get(partition)) } } @@ -653,7 +653,7 @@ class CheckpointStorageSuite extends SparkFunSuite with LocalSparkContext { val rdd = sc.makeRDD(1 to 200, numSlices = 4).repartition(1).mapPartitions { iter => iter.map { i => if (i > 100 && TaskContext.get().stageAttemptNumber() == 0) { - // throw new SparkException("Make first attemp failed.") + // throw new SparkException("Make first attempt failed.") // Throw FetchFailedException to explicitly trigger stage resubmission. // A normal exception will only trigger task resubmission in the same stage. throw new FetchFailedException(null, 0, 0L, 0, 0, "Fake") diff --git a/core/src/test/scala/org/apache/spark/ContextCleanerSuite.scala b/core/src/test/scala/org/apache/spark/ContextCleanerSuite.scala index 81530a8fda84d..5434e82c95b1b 100644 --- a/core/src/test/scala/org/apache/spark/ContextCleanerSuite.scala +++ b/core/src/test/scala/org/apache/spark/ContextCleanerSuite.scala @@ -368,7 +368,7 @@ class CleanerTester( val toBeCleanedRDDIds = new HashSet[Int] ++= rddIds val toBeCleanedShuffleIds = new HashSet[Int] ++= shuffleIds - val toBeCleanedBroadcstIds = new HashSet[Long] ++= broadcastIds + val toBeCleanedBroadcastIds = new HashSet[Long] ++= broadcastIds val toBeCheckpointIds = new HashSet[Long] ++= checkpointIds val isDistributed = !sc.isLocal @@ -384,7 +384,7 @@ class CleanerTester( } def broadcastCleaned(broadcastId: Long): Unit = { - toBeCleanedBroadcstIds.synchronized { toBeCleanedBroadcstIds -= broadcastId } + toBeCleanedBroadcastIds.synchronized { toBeCleanedBroadcastIds -= broadcastId } logInfo("Broadcast " + broadcastId + " cleaned") } @@ -508,8 +508,8 @@ class CleanerTester( val s2 = toBeCleanedShuffleIds.synchronized { toBeCleanedShuffleIds.toSeq.sorted.mkString("[", ", ", "]") } - val s3 = toBeCleanedBroadcstIds.synchronized { - toBeCleanedBroadcstIds.toSeq.sorted.mkString("[", ", ", "]") + val s3 = toBeCleanedBroadcastIds.synchronized { + toBeCleanedBroadcastIds.toSeq.sorted.mkString("[", ", ", "]") } s""" |\tRDDs = $s1 @@ -521,7 +521,7 @@ class CleanerTester( private def isAllCleanedUp = toBeCleanedRDDIds.synchronized { toBeCleanedRDDIds.isEmpty } && toBeCleanedShuffleIds.synchronized { toBeCleanedShuffleIds.isEmpty } && - toBeCleanedBroadcstIds.synchronized { toBeCleanedBroadcstIds.isEmpty } && + toBeCleanedBroadcastIds.synchronized { toBeCleanedBroadcastIds.isEmpty } && toBeCheckpointIds.synchronized { toBeCheckpointIds.isEmpty } private def getRDDBlocks(rddId: Int): Seq[BlockId] = { diff --git a/core/src/test/scala/org/apache/spark/ExecutorAllocationManagerSuite.scala b/core/src/test/scala/org/apache/spark/ExecutorAllocationManagerSuite.scala index 3f8cbf59bf527..5ae596b03d5fe 100644 --- a/core/src/test/scala/org/apache/spark/ExecutorAllocationManagerSuite.scala +++ b/core/src/test/scala/org/apache/spark/ExecutorAllocationManagerSuite.scala @@ -28,7 +28,7 @@ import org.scalatest.PrivateMethodTester import org.apache.spark.executor.ExecutorMetrics import org.apache.spark.internal.config import org.apache.spark.internal.config.DECOMMISSION_ENABLED -import org.apache.spark.internal.config.Tests.TEST_SCHEDULE_INTERVAL +import org.apache.spark.internal.config.Tests.TEST_DYNAMIC_ALLOCATION_SCHEDULE_ENABLED import org.apache.spark.metrics.MetricsSystem import org.apache.spark.resource._ import org.apache.spark.resource.ResourceProfile.DEFAULT_RESOURCE_PROFILE_ID @@ -268,7 +268,7 @@ class ExecutorAllocationManagerSuite extends SparkFunSuite { test("add executors multiple profiles initial num same as needed") { // test when the initial number of executors equals the number needed for the first - // stage using a non default profile to make sure we request the intitial number + // stage using a non default profile to make sure we request the initial number // properly. Here initial is 2, each executor in ResourceProfile 1 can have 2 tasks // per executor, and start a stage with 4 tasks, which would need 2 executors. val clock = new ManualClock(8888L) @@ -524,7 +524,7 @@ class ExecutorAllocationManagerSuite extends SparkFunSuite { assert(numExecutorsTarget(manager, defaultProfile.id) === 1) assert(maxNumExecutorsNeededPerResourceProfile(manager, defaultProfile) == 1) - // Stage 0 becomes unschedulable due to blacklisting + // Stage 0 becomes unschedulable due to excludeOnFailure post(SparkListenerUnschedulableTaskSetAdded(0, 0)) clock.advance(1000) manager invokePrivate _updateAndSyncNumExecutorsTarget(clock.nanoTime()) @@ -580,7 +580,7 @@ class ExecutorAllocationManagerSuite extends SparkFunSuite { post(SparkListenerTaskEnd(0, 0, null, Success, t2Info, new ExecutorMetrics, null)) post(SparkListenerStageCompleted(createStageInfo(0, 2))) - // Stage 1 and 2 becomes unschedulable now due to blacklisting + // Stage 1 and 2 becomes unschedulable now due to excludeOnFailure post(SparkListenerUnschedulableTaskSetAdded(1, 0)) post(SparkListenerUnschedulableTaskSetAdded(2, 0)) @@ -637,7 +637,7 @@ class ExecutorAllocationManagerSuite extends SparkFunSuite { (0 to 3).foreach { i => assert(removeExecutorDefaultProfile(manager, i.toString)) } (0 to 3).foreach { i => onExecutorRemoved(manager, i.toString) } - // Now due to blacklisting, the task becomes unschedulable + // Now due to executor being excluded, the task becomes unschedulable post(SparkListenerUnschedulableTaskSetAdded(0, 0)) clock.advance(1000) manager invokePrivate _updateAndSyncNumExecutorsTarget(clock.nanoTime()) @@ -1588,7 +1588,7 @@ class ExecutorAllocationManagerSuite extends SparkFunSuite { test("SPARK-23365 Don't update target num executors when killing idle executors") { val clock = new ManualClock() val manager = createManager( - createConf(1, 2, 1).set(config.DYN_ALLOCATION_TESTING, false), + createConf(1, 2, 1), clock = clock) when(client.requestTotalExecutors(any(), any(), any())).thenReturn(true) @@ -1616,19 +1616,17 @@ class ExecutorAllocationManagerSuite extends SparkFunSuite { clock.advance(1000) manager invokePrivate _updateAndSyncNumExecutorsTarget(clock.nanoTime()) assert(numExecutorsTargetForDefaultProfileId(manager) === 1) - verify(client, never).killExecutors(any(), any(), any(), any()) + assert(manager.executorMonitor.executorsPendingToRemove().isEmpty) // now we cross the idle timeout for executor-1, so we kill it. the really important // thing here is that we do *not* ask the executor allocation client to adjust the target // number of executors down - when(client.killExecutors(Seq("executor-1"), false, false, false)) - .thenReturn(Seq("executor-1")) clock.advance(3000) schedule(manager) assert(maxNumExecutorsNeededPerResourceProfile(manager, defaultProfile) === 1) assert(numExecutorsTargetForDefaultProfileId(manager) === 1) // here's the important verify -- we did kill the executors, but did not adjust the target count - verify(client).killExecutors(Seq("executor-1"), false, false, false) + assert(manager.executorMonitor.executorsPendingToRemove() === Set("executor-1")) } test("SPARK-26758 check executor target number after idle time out ") { @@ -1665,9 +1663,10 @@ class ExecutorAllocationManagerSuite extends SparkFunSuite { .set(config.DYN_ALLOCATION_EXECUTOR_IDLE_TIMEOUT.key, s"${executorIdleTimeout.toString}s") .set(config.SHUFFLE_SERVICE_ENABLED, true) .set(config.DYN_ALLOCATION_TESTING, true) - // SPARK-22864: effectively disable the allocation schedule by setting the period to a - // really long value. - .set(TEST_SCHEDULE_INTERVAL, 30000L) + // SPARK-22864/SPARK-32287: effectively disable the allocation schedule for the tests so that + // we won't result in the race condition between thread "spark-dynamic-executor-allocation" + // and thread "pool-1-thread-1-ScalaTest-running". + .set(TEST_DYNAMIC_ALLOCATION_SCHEDULE_ENABLED, false) .set(DECOMMISSION_ENABLED, decommissioningEnabled) sparkConf } diff --git a/core/src/test/scala/org/apache/spark/FileSuite.scala b/core/src/test/scala/org/apache/spark/FileSuite.scala index e9ee6b5dfb665..f953bf4043f33 100644 --- a/core/src/test/scala/org/apache/spark/FileSuite.scala +++ b/core/src/test/scala/org/apache/spark/FileSuite.scala @@ -170,7 +170,7 @@ class FileSuite extends SparkFunSuite with LocalSparkContext { val nums = sc.makeRDD(1 to 3).map(x => (x, "a" * x)) // (1,a), (2,aa), (3,aaa) nums.saveAsSequenceFile(outputDir) // Similar to the tests above, we read a SequenceFile, but this time we pass type params - // that are convertable to Writable instead of calling sequenceFile[IntWritable, Text] + // that are convertible to Writable instead of calling sequenceFile[IntWritable, Text] val output1 = sc.sequenceFile[Int, String](outputDir) assert(output1.collect().toList === List((1, "a"), (2, "aa"), (3, "aaa"))) // Also try having one type be a subclass of Writable and one not diff --git a/core/src/test/scala/org/apache/spark/HeartbeatReceiverSuite.scala b/core/src/test/scala/org/apache/spark/HeartbeatReceiverSuite.scala index a2e70b23a3e5d..c9d43f517afba 100644 --- a/core/src/test/scala/org/apache/spark/HeartbeatReceiverSuite.scala +++ b/core/src/test/scala/org/apache/spark/HeartbeatReceiverSuite.scala @@ -76,7 +76,7 @@ class HeartbeatReceiverSuite sc = spy(new SparkContext(conf)) scheduler = mock(classOf[TaskSchedulerImpl]) when(sc.taskScheduler).thenReturn(scheduler) - when(scheduler.nodeBlacklist).thenReturn(Predef.Set[String]()) + when(scheduler.excludedNodes).thenReturn(Predef.Set[String]()) when(scheduler.sc).thenReturn(sc) heartbeatReceiverClock = new ManualClock heartbeatReceiver = new HeartbeatReceiver(sc, heartbeatReceiverClock) diff --git a/core/src/test/scala/org/apache/spark/SparkContextSuite.scala b/core/src/test/scala/org/apache/spark/SparkContextSuite.scala index ebdf2f59a2770..770ffeef4106f 100644 --- a/core/src/test/scala/org/apache/spark/SparkContextSuite.scala +++ b/core/src/test/scala/org/apache/spark/SparkContextSuite.scala @@ -160,6 +160,85 @@ class SparkContextSuite extends SparkFunSuite with LocalSparkContext with Eventu } } + test("SPARK-33530: basic case for addArchive and listArchives") { + withTempDir { dir => + val file1 = File.createTempFile("someprefix1", "somesuffix1", dir) + val file2 = File.createTempFile("someprefix2", "somesuffix2", dir) + val file3 = File.createTempFile("someprefix3", "somesuffix3", dir) + val file4 = File.createTempFile("someprefix4", "somesuffix4", dir) + + val jarFile = new File(dir, "test!@$jar.jar") + val zipFile = new File(dir, "test-zip.zip") + val relativePath1 = + s"${zipFile.getParent}/../${zipFile.getParentFile.getName}/${zipFile.getName}" + val relativePath2 = + s"${jarFile.getParent}/../${jarFile.getParentFile.getName}/${jarFile.getName}#zoo" + + try { + Files.write("somewords1", file1, StandardCharsets.UTF_8) + Files.write("somewords22", file2, StandardCharsets.UTF_8) + Files.write("somewords333", file3, StandardCharsets.UTF_8) + Files.write("somewords4444", file4, StandardCharsets.UTF_8) + val length1 = file1.length() + val length2 = file2.length() + val length3 = file1.length() + val length4 = file2.length() + + createJar(Seq(file1, file2), jarFile) + createJar(Seq(file3, file4), zipFile) + + sc = new SparkContext(new SparkConf().setAppName("test").setMaster("local")) + sc.addArchive(jarFile.getAbsolutePath) + sc.addArchive(relativePath1) + sc.addArchive(s"${jarFile.getAbsolutePath}#foo") + sc.addArchive(s"${zipFile.getAbsolutePath}#bar") + sc.addArchive(relativePath2) + + sc.parallelize(Array(1), 1).map { x => + val gotten1 = new File(SparkFiles.get(jarFile.getName)) + val gotten2 = new File(SparkFiles.get(zipFile.getName)) + val gotten3 = new File(SparkFiles.get("foo")) + val gotten4 = new File(SparkFiles.get("bar")) + val gotten5 = new File(SparkFiles.get("zoo")) + + Seq(gotten1, gotten2, gotten3, gotten4, gotten5).foreach { gotten => + if (!gotten.exists()) { + throw new SparkException(s"The archive doesn't exist: ${gotten.getAbsolutePath}") + } + if (!gotten.isDirectory) { + throw new SparkException(s"The archive was not unpacked: ${gotten.getAbsolutePath}") + } + } + + // Jars + Seq(gotten1, gotten3, gotten5).foreach { gotten => + val actualLength1 = new File(gotten, file1.getName).length() + val actualLength2 = new File(gotten, file2.getName).length() + if (actualLength1 != length1 || actualLength2 != length2) { + s"Unpacked files have different lengths $actualLength1 and $actualLength2. at " + + s"${gotten.getAbsolutePath}. They should be $length1 and $length2." + } + } + + // Zip + Seq(gotten2, gotten4).foreach { gotten => + val actualLength3 = new File(gotten, file1.getName).length() + val actualLength4 = new File(gotten, file2.getName).length() + if (actualLength3 != length3 || actualLength4 != length4) { + s"Unpacked files have different lengths $actualLength3 and $actualLength4. at " + + s"${gotten.getAbsolutePath}. They should be $length3 and $length4." + } + } + x + }.count() + assert(sc.listArchives().count(_.endsWith("test!@$jar.jar")) == 1) + assert(sc.listArchives().count(_.contains("test-zip.zip")) == 2) + } finally { + sc.stop() + } + } + } + test("add and list jar files") { val jarPath = Thread.currentThread().getContextClassLoader.getResource("TestUDTF.jar") try { @@ -955,6 +1034,122 @@ class SparkContextSuite extends SparkFunSuite with LocalSparkContext with Eventu .set(EXECUTOR_ALLOW_SPARK_CONTEXT, true)).stop() } } + + test("SPARK-33084: Add jar support Ivy URI -- default transitive = false") { + sc = new SparkContext(new SparkConf().setAppName("test").setMaster("local-cluster[3, 1, 1024]")) + sc.addJar("ivy://org.apache.hive:hive-storage-api:2.7.0") + assert(sc.listJars().exists(_.contains("org.apache.hive_hive-storage-api-2.7.0.jar"))) + assert(!sc.listJars().exists(_.contains("commons-lang_commons-lang-2.6.jar"))) + + sc.addJar("ivy://org.apache.hive:hive-storage-api:2.7.0?transitive=true") + assert(sc.listJars().exists(_.contains("commons-lang_commons-lang-2.6.jar"))) + } + + test("SPARK-33084: Add jar support Ivy URI -- invalid transitive use default false") { + sc = new SparkContext(new SparkConf().setAppName("test").setMaster("local-cluster[3, 1, 1024]")) + sc.addJar("ivy://org.apache.hive:hive-storage-api:2.7.0?transitive=foo") + assert(sc.listJars().exists(_.contains("org.apache.hive_hive-storage-api-2.7.0.jar"))) + assert(!sc.listJars().exists(_.contains("org.slf4j_slf4j-api-1.7.10.jar"))) + assert(!sc.listJars().exists(_.contains("commons-lang_commons-lang-2.6.jar"))) + } + + test("SPARK-33084: Add jar support Ivy URI -- transitive=true will download dependency jars") { + val logAppender = new LogAppender("transitive=true will download dependency jars") + withLogAppender(logAppender) { + sc = new SparkContext( + new SparkConf().setAppName("test").setMaster("local-cluster[3, 1, 1024]")) + sc.addJar("ivy://org.apache.hive:hive-storage-api:2.7.0?transitive=true") + val dependencyJars = Array( + "org.apache.hive_hive-storage-api-2.7.0.jar", + "org.slf4j_slf4j-api-1.7.10.jar", + "commons-lang_commons-lang-2.6.jar") + + dependencyJars.foreach(jar => assert(sc.listJars().exists(_.contains(jar)))) + + assert(logAppender.loggingEvents.count(_.getRenderedMessage.contains( + "Added dependency jars of Ivy URI" + + " ivy://org.apache.hive:hive-storage-api:2.7.0?transitive=true")) == 1) + + // test dependency jars exist + sc.addJar("ivy://org.apache.hive:hive-storage-api:2.7.0?transitive=true") + assert(logAppender.loggingEvents.count(_.getRenderedMessage.contains( + "The dependency jars of Ivy URI" + + " ivy://org.apache.hive:hive-storage-api:2.7.0?transitive=true")) == 1) + val existMsg = logAppender.loggingEvents.filter(_.getRenderedMessage.contains( + "The dependency jars of Ivy URI" + + " ivy://org.apache.hive:hive-storage-api:2.7.0?transitive=true")) + .head.getRenderedMessage + dependencyJars.foreach(jar => assert(existMsg.contains(jar))) + } + } + + test("SPARK-33084: Add jar support Ivy URI -- test exclude param when transitive=true") { + sc = new SparkContext(new SparkConf().setAppName("test").setMaster("local-cluster[3, 1, 1024]")) + sc.addJar("ivy://org.apache.hive:hive-storage-api:2.7.0" + + "?exclude=commons-lang:commons-lang&transitive=true") + assert(sc.listJars().exists(_.contains("org.apache.hive_hive-storage-api-2.7.0.jar"))) + assert(sc.listJars().exists(_.contains("org.slf4j_slf4j-api-1.7.10.jar"))) + assert(!sc.listJars().exists(_.contains("commons-lang_commons-lang-2.6.jar"))) + } + + test("SPARK-33084: Add jar support Ivy URI -- test different version") { + sc = new SparkContext(new SparkConf().setAppName("test").setMaster("local-cluster[3, 1, 1024]")) + sc.addJar("ivy://org.apache.hive:hive-storage-api:2.7.0") + sc.addJar("ivy://org.apache.hive:hive-storage-api:2.6.0") + assert(sc.listJars().exists(_.contains("org.apache.hive_hive-storage-api-2.7.0.jar"))) + assert(sc.listJars().exists(_.contains("org.apache.hive_hive-storage-api-2.6.0.jar"))) + } + + test("SPARK-33084: Add jar support Ivy URI -- test invalid param") { + val logAppender = new LogAppender("test log when have invalid parameter") + withLogAppender(logAppender) { + sc = new SparkContext( + new SparkConf().setAppName("test").setMaster("local-cluster[3, 1, 1024]")) + sc.addJar("ivy://org.apache.hive:hive-storage-api:2.7.0?" + + "invalidParam1=foo&invalidParam2=boo") + assert(sc.listJars().exists(_.contains("org.apache.hive_hive-storage-api-2.7.0.jar"))) + assert(logAppender.loggingEvents.exists(_.getRenderedMessage.contains( + "Invalid parameters `invalidParam1,invalidParam2` found in Ivy URI query" + + " `invalidParam1=foo&invalidParam2=boo`."))) + } + } + + test("SPARK-33084: Add jar support Ivy URI -- test multiple transitive params") { + sc = new SparkContext(new SparkConf().setAppName("test").setMaster("local-cluster[3, 1, 1024]")) + // transitive=invalidValue will win and treated as false + sc.addJar("ivy://org.apache.hive:hive-storage-api:2.7.0?" + + "transitive=true&transitive=invalidValue") + assert(sc.listJars().exists(_.contains("org.apache.hive_hive-storage-api-2.7.0.jar"))) + assert(!sc.listJars().exists(_.contains("commons-lang_commons-lang-2.6.jar"))) + + // transitive=true will win + sc.addJar("ivy://org.apache.hive:hive-storage-api:2.7.0?" + + "transitive=false&transitive=invalidValue&transitive=true") + assert(sc.listJars().exists(_.contains("org.apache.hive_hive-storage-api-2.7.0.jar"))) + assert(sc.listJars().exists(_.contains("commons-lang_commons-lang-2.6.jar"))) + } + + test("SPARK-33084: Add jar support Ivy URI -- test param key case sensitive") { + sc = new SparkContext(new SparkConf().setAppName("test").setMaster("local-cluster[3, 1, 1024]")) + sc.addJar("ivy://org.apache.hive:hive-storage-api:2.7.0?TRANSITIVE=true") + assert(sc.listJars().exists(_.contains("org.apache.hive_hive-storage-api-2.7.0.jar"))) + assert(!sc.listJars().exists(_.contains("commons-lang_commons-lang-2.6.jar"))) + + sc.addJar("ivy://org.apache.hive:hive-storage-api:2.7.0?transitive=true") + assert(sc.listJars().exists(_.contains("org.apache.hive_hive-storage-api-2.7.0.jar"))) + assert(sc.listJars().exists(_.contains("commons-lang_commons-lang-2.6.jar"))) + } + + test("SPARK-33084: Add jar support Ivy URI -- test transitive value case sensitive") { + sc = new SparkContext(new SparkConf().setAppName("test").setMaster("local-cluster[3, 1, 1024]")) + sc.addJar("ivy://org.apache.hive:hive-storage-api:2.7.0?transitive=TRUE") + assert(sc.listJars().exists(_.contains("org.apache.hive_hive-storage-api-2.7.0.jar"))) + assert(!sc.listJars().exists(_.contains("commons-lang_commons-lang-2.6.jar"))) + + sc.addJar("ivy://org.apache.hive:hive-storage-api:2.7.0?transitive=true") + assert(sc.listJars().exists(_.contains("org.apache.hive_hive-storage-api-2.7.0.jar"))) + assert(sc.listJars().exists(_.contains("commons-lang_commons-lang-2.6.jar"))) + } } object SparkContextSuite { diff --git a/core/src/test/scala/org/apache/spark/StatusTrackerSuite.scala b/core/src/test/scala/org/apache/spark/StatusTrackerSuite.scala index fae6c4af1240c..e6d3377120e56 100644 --- a/core/src/test/scala/org/apache/spark/StatusTrackerSuite.scala +++ b/core/src/test/scala/org/apache/spark/StatusTrackerSuite.scala @@ -18,7 +18,6 @@ package org.apache.spark import scala.concurrent.duration._ -import scala.language.implicitConversions import org.scalatest.concurrent.Eventually._ import org.scalatest.matchers.must.Matchers diff --git a/core/src/test/scala/org/apache/spark/TempLocalSparkContext.scala b/core/src/test/scala/org/apache/spark/TempLocalSparkContext.scala new file mode 100644 index 0000000000000..6d5fcd1edfb03 --- /dev/null +++ b/core/src/test/scala/org/apache/spark/TempLocalSparkContext.scala @@ -0,0 +1,100 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark + +import _root_.io.netty.util.internal.logging.{InternalLoggerFactory, Slf4JLoggerFactory} +import org.scalatest.BeforeAndAfterAll +import org.scalatest.BeforeAndAfterEach +import org.scalatest.Suite + +import org.apache.spark.internal.Logging +import org.apache.spark.resource.ResourceProfile + +/** + * Manages a local `sc` `SparkContext` variable, correctly stopping it after each test. + * + * Note: this class is a copy of [[LocalSparkContext]]. Why copy it? Reduce conflict. Because + * many test suites use [[LocalSparkContext]] and overwrite some variable or function (e.g. + * sc of LocalSparkContext), there occurs conflict when we refactor the `sc` as a new function. + * After migrating all test suites that use [[LocalSparkContext]] to use + * [[TempLocalSparkContext]], we will delete the original [[LocalSparkContext]] and rename + * [[TempLocalSparkContext]] to [[LocalSparkContext]]. + */ +trait TempLocalSparkContext extends BeforeAndAfterEach + with BeforeAndAfterAll with Logging { self: Suite => + + private var _conf: SparkConf = defaultSparkConf + + @transient private var _sc: SparkContext = _ + + def conf: SparkConf = _conf + + /** + * Currently, we are focusing on the reconstruction of LocalSparkContext, so this method + * was created temporarily. When the migration work is completed, this method will be + * renamed to `sc` and the variable `sc` will be deleted. + */ + def sc: SparkContext = { + if (_sc == null) { + _sc = new SparkContext(_conf) + } + _sc + } + + override def beforeAll(): Unit = { + super.beforeAll() + InternalLoggerFactory.setDefaultFactory(Slf4JLoggerFactory.INSTANCE) + } + + override def afterEach(): Unit = { + try { + resetSparkContext() + } finally { + super.afterEach() + } + } + + def resetSparkContext(): Unit = { + TempLocalSparkContext.stop(_sc) + ResourceProfile.clearDefaultProfile() + _sc = null + _conf = defaultSparkConf + } + + private def defaultSparkConf: SparkConf = new SparkConf() + .setMaster("local[2]").setAppName(s"${this.getClass.getSimpleName}") +} + +object TempLocalSparkContext { + def stop(sc: SparkContext): Unit = { + if (sc != null) { + sc.stop() + } + // To avoid RPC rebinding to the same port, since it doesn't unbind immediately on shutdown + System.clearProperty("spark.driver.port") + } + + /** Runs `f` by passing in `sc` and ensures that `sc` is stopped. */ + def withSpark[T](sc: SparkContext)(f: SparkContext => T): T = { + try { + f(sc) + } finally { + stop(sc) + } + } +} diff --git a/core/src/test/scala/org/apache/spark/benchmark/Benchmark.scala b/core/src/test/scala/org/apache/spark/benchmark/Benchmark.scala index 72c05a92848ff..5511852ca176e 100644 --- a/core/src/test/scala/org/apache/spark/benchmark/Benchmark.scala +++ b/core/src/test/scala/org/apache/spark/benchmark/Benchmark.scala @@ -26,7 +26,6 @@ import scala.util.Try import org.apache.commons.io.output.TeeOutputStream import org.apache.commons.lang3.SystemUtils -import org.scalatest.Assertions._ import org.apache.spark.util.Utils diff --git a/core/src/test/scala/org/apache/spark/benchmark/BenchmarkBase.scala b/core/src/test/scala/org/apache/spark/benchmark/BenchmarkBase.scala index e97b9d5d6bea6..eff4fd20d7fca 100644 --- a/core/src/test/scala/org/apache/spark/benchmark/BenchmarkBase.scala +++ b/core/src/test/scala/org/apache/spark/benchmark/BenchmarkBase.scala @@ -21,7 +21,7 @@ import java.io.{File, FileOutputStream, OutputStream} /** * A base class for generate benchmark results to a file. - * For JDK9+, JDK major version number is added to the file names to distingush the results. + * For JDK9+, JDK major version number is added to the file names to distinguish the results. */ abstract class BenchmarkBase { var output: Option[OutputStream] = None diff --git a/core/src/test/scala/org/apache/spark/deploy/DecommissionWorkerSuite.scala b/core/src/test/scala/org/apache/spark/deploy/DecommissionWorkerSuite.scala index 9c5e460854053..abe5b7a71ca63 100644 --- a/core/src/test/scala/org/apache/spark/deploy/DecommissionWorkerSuite.scala +++ b/core/src/test/scala/org/apache/spark/deploy/DecommissionWorkerSuite.scala @@ -28,7 +28,7 @@ import org.scalatest.BeforeAndAfterEach import org.scalatest.concurrent.Eventually._ import org.apache.spark._ -import org.apache.spark.deploy.DeployMessages.{MasterStateResponse, RequestMasterState, WorkerDecommission} +import org.apache.spark.deploy.DeployMessages.{DecommissionWorkers, MasterStateResponse, RequestMasterState} import org.apache.spark.deploy.master.{ApplicationInfo, Master, WorkerInfo} import org.apache.spark.deploy.worker.Worker import org.apache.spark.internal.{config, Logging} @@ -414,7 +414,7 @@ class DecommissionWorkerSuite def decommissionWorkerOnMaster(workerInfo: WorkerInfo, reason: String): Unit = { logInfo(s"Trying to decommission worker ${workerInfo.id} for reason `$reason`") - master.self.send(WorkerDecommission(workerInfo.id, workerInfo.endpoint)) + master.self.send(DecommissionWorkers(Seq(workerInfo.id))) } def killWorkerAfterTimeout(workerInfo: WorkerInfo, secondsToWait: Int): Unit = { diff --git a/core/src/test/scala/org/apache/spark/deploy/ExternalShuffleServiceMetricsSuite.scala b/core/src/test/scala/org/apache/spark/deploy/ExternalShuffleServiceMetricsSuite.scala index d681c13337e0d..ea4d252f0dbae 100644 --- a/core/src/test/scala/org/apache/spark/deploy/ExternalShuffleServiceMetricsSuite.scala +++ b/core/src/test/scala/org/apache/spark/deploy/ExternalShuffleServiceMetricsSuite.scala @@ -61,7 +61,8 @@ class ExternalShuffleServiceMetricsSuite extends SparkFunSuite { "registeredExecutorsSize", "registerExecutorRequestLatencyMillis", "shuffle-server.usedDirectMemory", - "shuffle-server.usedHeapMemory") + "shuffle-server.usedHeapMemory", + "finalizeShuffleMergeLatencyMillis") ) } } diff --git a/core/src/test/scala/org/apache/spark/deploy/SparkSubmitSuite.scala b/core/src/test/scala/org/apache/spark/deploy/SparkSubmitSuite.scala index b5b3751439750..edcebf5fc60dd 100644 --- a/core/src/test/scala/org/apache/spark/deploy/SparkSubmitSuite.scala +++ b/core/src/test/scala/org/apache/spark/deploy/SparkSubmitSuite.scala @@ -47,7 +47,7 @@ import org.apache.spark.internal.Logging import org.apache.spark.internal.config._ import org.apache.spark.internal.config.UI._ import org.apache.spark.launcher.SparkLauncher -import org.apache.spark.util.{CommandLineUtils, ResetSystemProperties, Utils} +import org.apache.spark.util.{CommandLineUtils, DependencyUtils, ResetSystemProperties, Utils} trait TestPrematureExit { suite: SparkFunSuite => @@ -335,6 +335,43 @@ class SparkSubmitSuite sys.props("SPARK_SUBMIT") should be ("true") } + test("SPARK-33530: handles standalone mode with archives") { + val clArgs = Seq( + "--master", "spark://localhost:1234", + "--executor-memory", "5g", + "--executor-cores", "5", + "--class", "org.SomeClass", + "--jars", "one.jar,two.jar,three.jar", + "--driver-memory", "4g", + "--files", "file1.txt,file2.txt", + "--archives", "archive1.zip,archive2.jar", + "--num-executors", "6", + "--name", "beauty", + "--conf", "spark.ui.enabled=false", + "thejar.jar", + "arg1", "arg2") + val appArgs = new SparkSubmitArguments(clArgs) + val (childArgs, classpath, conf, mainClass) = submit.prepareSubmitEnvironment(appArgs) + val childArgsStr = childArgs.mkString(" ") + childArgsStr should include ("arg1 arg2") + mainClass should be ("org.SomeClass") + + classpath(0) should endWith ("thejar.jar") + classpath(1) should endWith ("one.jar") + classpath(2) should endWith ("two.jar") + classpath(3) should endWith ("three.jar") + + conf.get("spark.executor.memory") should be ("5g") + conf.get("spark.driver.memory") should be ("4g") + conf.get("spark.executor.cores") should be ("5") + conf.get("spark.jars") should include regex (".*one.jar,.*two.jar,.*three.jar") + conf.get("spark.files") should include regex (".*file1.txt,.*file2.txt") + conf.get("spark.archives") should include regex (".*archive1.zip,.*archive2.jar") + conf.get("spark.app.name") should be ("beauty") + conf.get(UI_ENABLED) should be (false) + sys.props("SPARK_SUBMIT") should be ("true") + } + test("handles standalone cluster mode") { testStandaloneCluster(useRest = true) } @@ -1084,8 +1121,7 @@ class SparkSubmitSuite val sparkConf = new SparkConf(false) intercept[IOException] { DependencyUtils.downloadFile( - "abc:/my/file", Utils.createTempDir(), sparkConf, new Configuration(), - new SecurityManager(sparkConf)) + "abc:/my/file", Utils.createTempDir(), sparkConf, new Configuration()) } } @@ -1095,19 +1131,17 @@ class SparkSubmitSuite val tmpDir = Utils.createTempDir() updateConfWithFakeS3Fs(hadoopConf) intercept[FileNotFoundException] { - DependencyUtils.downloadFile("s3a:/no/such/file", tmpDir, sparkConf, hadoopConf, - new SecurityManager(sparkConf)) + DependencyUtils.downloadFile("s3a:/no/such/file", tmpDir, sparkConf, hadoopConf) } } test("downloadFile does not download local file") { val sparkConf = new SparkConf(false) - val secMgr = new SecurityManager(sparkConf) // empty path is considered as local file. val tmpDir = Files.createTempDirectory("tmp").toFile - assert(DependencyUtils.downloadFile("", tmpDir, sparkConf, new Configuration(), secMgr) === "") - assert(DependencyUtils.downloadFile("/local/file", tmpDir, sparkConf, new Configuration(), - secMgr) === "/local/file") + assert(DependencyUtils.downloadFile("", tmpDir, sparkConf, new Configuration()) === "") + assert(DependencyUtils.downloadFile( + "/local/file", tmpDir, sparkConf, new Configuration()) === "/local/file") } test("download one file to local") { @@ -1120,8 +1154,7 @@ class SparkSubmitSuite val tmpDir = Files.createTempDirectory("tmp").toFile updateConfWithFakeS3Fs(hadoopConf) val sourcePath = s"s3a://${jarFile.toURI.getPath}" - val outputPath = DependencyUtils.downloadFile(sourcePath, tmpDir, sparkConf, hadoopConf, - new SecurityManager(sparkConf)) + val outputPath = DependencyUtils.downloadFile(sourcePath, tmpDir, sparkConf, hadoopConf) checkDownloadedFile(sourcePath, outputPath) deleteTempOutputFile(outputPath) } @@ -1137,8 +1170,7 @@ class SparkSubmitSuite updateConfWithFakeS3Fs(hadoopConf) val sourcePaths = Seq("/local/file", s"s3a://${jarFile.toURI.getPath}") val outputPaths = DependencyUtils - .downloadFileList(sourcePaths.mkString(","), tmpDir, sparkConf, hadoopConf, - new SecurityManager(sparkConf)) + .downloadFileList(sourcePaths.mkString(","), tmpDir, sparkConf, hadoopConf) .split(",") assert(outputPaths.length === sourcePaths.length) @@ -1152,7 +1184,6 @@ class SparkSubmitSuite val fs = File.separator val sparkConf = new SparkConf(false) val hadoopConf = new Configuration() - val secMgr = new SecurityManager(sparkConf) val appJarName = "myApp.jar" val jar1Name = "myJar1.jar" @@ -1160,8 +1191,7 @@ class SparkSubmitSuite val userJar = s"file:/path${fs}to${fs}app${fs}jar$fs$appJarName" val jars = s"file:/$jar1Name,file:/$appJarName,file:/$jar2Name" - val resolvedJars = DependencyUtils - .resolveAndDownloadJars(jars, userJar, sparkConf, hadoopConf, secMgr) + val resolvedJars = DependencyUtils.resolveAndDownloadJars(jars, userJar, sparkConf, hadoopConf) assert(!resolvedJars.contains(appJarName)) assert(resolvedJars.contains(jar1Name) && resolvedJars.contains(jar2Name)) diff --git a/core/src/test/scala/org/apache/spark/deploy/SparkSubmitUtilsSuite.scala b/core/src/test/scala/org/apache/spark/deploy/SparkSubmitUtilsSuite.scala index 2a37f75d86a41..7819b3aef4d6c 100644 --- a/core/src/test/scala/org/apache/spark/deploy/SparkSubmitUtilsSuite.scala +++ b/core/src/test/scala/org/apache/spark/deploy/SparkSubmitUtilsSuite.scala @@ -123,20 +123,17 @@ class SparkSubmitUtilsSuite extends SparkFunSuite with BeforeAndAfterAll { test("ivy path works correctly") { val md = SparkSubmitUtils.getModuleDescriptor val artifacts = for (i <- 0 until 3) yield new MDArtifact(md, s"jar-$i", "jar", "jar") - var jPaths = SparkSubmitUtils.resolveDependencyPaths(artifacts.toArray, new File(tempIvyPath)) - for (i <- 0 until 3) { - val index = jPaths.indexOf(tempIvyPath) - assert(index >= 0) - jPaths = jPaths.substring(index + tempIvyPath.length) - } + val jPaths = SparkSubmitUtils.resolveDependencyPaths(artifacts.toArray, new File(tempIvyPath)) + assert(jPaths.count(_.startsWith(tempIvyPath)) >= 3) val main = MavenCoordinate("my.awesome.lib", "mylib", "0.1") IvyTestUtils.withRepository(main, None, None) { repo => // end to end val jarPath = SparkSubmitUtils.resolveMavenCoordinates( main.toString, SparkSubmitUtils.buildIvySettings(Option(repo), Some(tempIvyPath)), + transitive = true, isTest = true) - assert(jarPath.indexOf(tempIvyPath) >= 0, "should use non-default ivy path") + assert(jarPath.forall(_.indexOf(tempIvyPath) >= 0), "should use non-default ivy path") } } @@ -148,9 +145,10 @@ class SparkSubmitUtilsSuite extends SparkFunSuite with BeforeAndAfterAll { val jarPath = SparkSubmitUtils.resolveMavenCoordinates( main.toString, SparkSubmitUtils.buildIvySettings(None, Some(tempIvyPath)), + transitive = true, isTest = true) - assert(jarPath.indexOf("mylib") >= 0, "should find artifact") - assert(jarPath.indexOf("mydep") >= 0, "should find dependency") + assert(jarPath.exists(_.indexOf("mylib") >= 0), "should find artifact") + assert(jarPath.exists(_.indexOf("mydep") >= 0), "should find dependency") } // Local Ivy Repository val settings = new IvySettings @@ -159,9 +157,10 @@ class SparkSubmitUtilsSuite extends SparkFunSuite with BeforeAndAfterAll { val jarPath = SparkSubmitUtils.resolveMavenCoordinates( main.toString, SparkSubmitUtils.buildIvySettings(None, Some(tempIvyPath)), + transitive = true, isTest = true) - assert(jarPath.indexOf("mylib") >= 0, "should find artifact") - assert(jarPath.indexOf("mydep") >= 0, "should find dependency") + assert(jarPath.exists(_.indexOf("mylib") >= 0), "should find artifact") + assert(jarPath.exists(_.indexOf("mydep") >= 0), "should find dependency") } // Local ivy repository with modified home val dummyIvyLocal = new File(tempIvyPath, "local" + File.separator) @@ -171,10 +170,11 @@ class SparkSubmitUtilsSuite extends SparkFunSuite with BeforeAndAfterAll { val jarPath = SparkSubmitUtils.resolveMavenCoordinates( main.toString, SparkSubmitUtils.buildIvySettings(None, Some(tempIvyPath)), + transitive = true, isTest = true) - assert(jarPath.indexOf("mylib") >= 0, "should find artifact") - assert(jarPath.indexOf(tempIvyPath) >= 0, "should be in new ivy path") - assert(jarPath.indexOf("mydep") >= 0, "should find dependency") + assert(jarPath.exists(_.indexOf("mylib") >= 0), "should find artifact") + assert(jarPath.forall(_.indexOf(tempIvyPath) >= 0), "should be in new ivy path") + assert(jarPath.exists(_.indexOf("mydep") >= 0), "should find dependency") } } @@ -183,6 +183,7 @@ class SparkSubmitUtilsSuite extends SparkFunSuite with BeforeAndAfterAll { SparkSubmitUtils.resolveMavenCoordinates( "a:b:c", SparkSubmitUtils.buildIvySettings(None, Some(tempIvyPath)), + transitive = true, isTest = true) } } @@ -195,15 +196,17 @@ class SparkSubmitUtilsSuite extends SparkFunSuite with BeforeAndAfterAll { val path = SparkSubmitUtils.resolveMavenCoordinates( coordinates, SparkSubmitUtils.buildIvySettings(None, Some(tempIvyPath)), + transitive = true, isTest = true) - assert(path === "", "should return empty path") + assert(path.isEmpty, "should return empty path") val main = MavenCoordinate("org.apache.spark", "spark-streaming-kafka-assembly_2.12", "1.2.0") IvyTestUtils.withRepository(main, None, None) { repo => val files = SparkSubmitUtils.resolveMavenCoordinates( coordinates + "," + main.toString, SparkSubmitUtils.buildIvySettings(Some(repo), Some(tempIvyPath)), + transitive = true, isTest = true) - assert(files.indexOf(main.artifactId) >= 0, "Did not return artifact") + assert(files.forall(_.indexOf(main.artifactId) >= 0), "Did not return artifact") } } @@ -214,10 +217,11 @@ class SparkSubmitUtilsSuite extends SparkFunSuite with BeforeAndAfterAll { val files = SparkSubmitUtils.resolveMavenCoordinates( main.toString, SparkSubmitUtils.buildIvySettings(Some(repo), Some(tempIvyPath)), - Seq("my.great.dep:mydep"), + exclusions = Seq("my.great.dep:mydep"), + transitive = true, isTest = true) - assert(files.indexOf(main.artifactId) >= 0, "Did not return artifact") - assert(files.indexOf("my.great.dep") < 0, "Returned excluded artifact") + assert(files.forall(_.indexOf(main.artifactId) >= 0), "Did not return artifact") + assert(files.forall(_.indexOf("my.great.dep") < 0), "Returned excluded artifact") } } @@ -250,10 +254,11 @@ class SparkSubmitUtilsSuite extends SparkFunSuite with BeforeAndAfterAll { testUtilSettings.setDefaultIvyUserDir(new File(tempIvyPath)) IvyTestUtils.withRepository(main, Some(dep), Some(dummyIvyLocal), useIvyLayout = true, ivySettings = testUtilSettings) { repo => - val jarPath = SparkSubmitUtils.resolveMavenCoordinates(main.toString, settings, isTest = true) - assert(jarPath.indexOf("mylib") >= 0, "should find artifact") - assert(jarPath.indexOf(tempIvyPath) >= 0, "should be in new ivy path") - assert(jarPath.indexOf("mydep") >= 0, "should find dependency") + val jarPath = SparkSubmitUtils.resolveMavenCoordinates(main.toString, settings, + transitive = true, isTest = true) + assert(jarPath.exists(_.indexOf("mylib") >= 0), "should find artifact") + assert(jarPath.forall(_.indexOf(tempIvyPath) >= 0), "should be in new ivy path") + assert(jarPath.exists(_.indexOf("mydep") >= 0), "should find dependency") } } @@ -265,6 +270,7 @@ class SparkSubmitUtilsSuite extends SparkFunSuite with BeforeAndAfterAll { val jarPath = SparkSubmitUtils.resolveMavenCoordinates( main.toString, ivySettings, + transitive = true, isTest = true) val r = """.*org.apache.spark-spark-submit-parent-.*""".r assert(!ivySettings.getDefaultCache.listFiles.map(_.getName) diff --git a/core/src/test/scala/org/apache/spark/deploy/StandaloneDynamicAllocationSuite.scala b/core/src/test/scala/org/apache/spark/deploy/StandaloneDynamicAllocationSuite.scala index c7c3ad27675fa..e47181719a9db 100644 --- a/core/src/test/scala/org/apache/spark/deploy/StandaloneDynamicAllocationSuite.scala +++ b/core/src/test/scala/org/apache/spark/deploy/StandaloneDynamicAllocationSuite.scala @@ -21,7 +21,7 @@ import scala.collection.mutable import scala.concurrent.duration._ import org.mockito.ArgumentMatchers.any -import org.mockito.Mockito.{mock, verify, when} +import org.mockito.Mockito.{mock, when} import org.scalatest.{BeforeAndAfterAll, PrivateMethodTester} import org.scalatest.concurrent.Eventually._ @@ -497,19 +497,19 @@ class StandaloneDynamicAllocationSuite } } - test("executor registration on a blacklisted host must fail") { + test("executor registration on a excluded host must fail") { // The context isn't really used by the test, but it helps with creating a test scheduler, // since CoarseGrainedSchedulerBackend makes a lot of calls to the context instance. - sc = new SparkContext(appConf.set(config.BLACKLIST_ENABLED.key, "true")) + sc = new SparkContext(appConf.set(config.EXCLUDE_ON_FAILURE_ENABLED.key, "true")) val endpointRef = mock(classOf[RpcEndpointRef]) val mockAddress = mock(classOf[RpcAddress]) when(endpointRef.address).thenReturn(mockAddress) - val message = RegisterExecutor("one", endpointRef, "blacklisted-host", 10, Map.empty, + val message = RegisterExecutor("one", endpointRef, "excluded-host", 10, Map.empty, Map.empty, Map.empty, ResourceProfile.DEFAULT_RESOURCE_PROFILE_ID) val taskScheduler = mock(classOf[TaskSchedulerImpl]) - when(taskScheduler.nodeBlacklist()).thenReturn(Set("blacklisted-host")) + when(taskScheduler.excludedNodes()).thenReturn(Set("excluded-host")) when(taskScheduler.resourceOffers(any(), any[Boolean])).thenReturn(Nil) when(taskScheduler.sc).thenReturn(sc) diff --git a/core/src/test/scala/org/apache/spark/deploy/client/AppClientSuite.scala b/core/src/test/scala/org/apache/spark/deploy/client/AppClientSuite.scala index fe88822bb46b5..93c0aa000e207 100644 --- a/core/src/test/scala/org/apache/spark/deploy/client/AppClientSuite.scala +++ b/core/src/test/scala/org/apache/spark/deploy/client/AppClientSuite.scala @@ -27,7 +27,7 @@ import org.scalatest.concurrent.{Eventually, ScalaFutures} import org.apache.spark._ import org.apache.spark.deploy.{ApplicationDescription, Command} -import org.apache.spark.deploy.DeployMessages.{MasterStateResponse, RequestMasterState} +import org.apache.spark.deploy.DeployMessages.{MasterStateResponse, RequestMasterState, WorkerDecommissioning} import org.apache.spark.deploy.master.{ApplicationInfo, Master} import org.apache.spark.deploy.worker.Worker import org.apache.spark.internal.{config, Logging} @@ -122,7 +122,11 @@ class AppClientSuite // Send a decommission self to all the workers // Note: normally the worker would send this on their own. - workers.foreach(worker => worker.decommissionSelf()) + workers.foreach { worker => + worker.decommissionSelf() + // send the notice to Master to tell the decommission of Workers + master.self.send(WorkerDecommissioning(worker.workerId, worker.self)) + } // Decommissioning is async. eventually(timeout(1.seconds), interval(10.millis)) { diff --git a/core/src/test/scala/org/apache/spark/deploy/history/BasicEventFilterSuite.scala b/core/src/test/scala/org/apache/spark/deploy/history/BasicEventFilterSuite.scala index 2da40dccba53e..5d40a0610eb6c 100644 --- a/core/src/test/scala/org/apache/spark/deploy/history/BasicEventFilterSuite.scala +++ b/core/src/test/scala/org/apache/spark/deploy/history/BasicEventFilterSuite.scala @@ -135,6 +135,8 @@ class BasicEventFilterSuite extends SparkFunSuite { SparkListenerStageExecutorMetrics(1.toString, 0, 0, new ExecutorMetrics))) assert(Some(false) === acceptFn(SparkListenerExecutorBlacklisted(0, 1.toString, 1))) assert(Some(false) === acceptFn(SparkListenerExecutorUnblacklisted(0, 1.toString))) + assert(Some(false) === acceptFn(SparkListenerExecutorExcluded(0, 1.toString, 1))) + assert(Some(false) === acceptFn(SparkListenerExecutorUnexcluded(0, 1.toString))) assert(Some(false) === acceptFn(createExecutorRemovedEvent(1))) val bmId = BlockManagerId(1.toString, "host1", 1) assert(Some(false) === acceptFn(SparkListenerBlockManagerAdded(0, bmId, 1))) @@ -148,6 +150,10 @@ class BasicEventFilterSuite extends SparkFunSuite { SparkListenerStageExecutorMetrics(2.toString, 0, 0, new ExecutorMetrics))) assert(Some(true) === acceptFn(SparkListenerExecutorBlacklisted(0, 2.toString, 1))) assert(Some(true) === acceptFn(SparkListenerExecutorUnblacklisted(0, 2.toString))) + assert(None === acceptFn(SparkListenerNodeBlacklisted(0, "host1", 1))) + assert(None === acceptFn(SparkListenerNodeUnblacklisted(0, "host1"))) + assert(Some(true) === acceptFn(SparkListenerExecutorExcluded(0, 2.toString, 1))) + assert(Some(true) === acceptFn(SparkListenerExecutorUnexcluded(0, 2.toString))) assert(Some(true) === acceptFn(createExecutorRemovedEvent(2))) val bmId2 = BlockManagerId(2.toString, "host1", 1) assert(Some(true) === acceptFn(SparkListenerBlockManagerAdded(0, bmId2, 1))) @@ -164,8 +170,8 @@ class BasicEventFilterSuite extends SparkFunSuite { assert(None === acceptFn(SparkListenerEnvironmentUpdate(Map.empty))) assert(None === acceptFn(SparkListenerApplicationStart("1", Some("1"), 0, "user", None))) assert(None === acceptFn(SparkListenerApplicationEnd(1))) - assert(None === acceptFn(SparkListenerNodeBlacklisted(0, "host1", 1))) - assert(None === acceptFn(SparkListenerNodeUnblacklisted(0, "host1"))) + assert(None === acceptFn(SparkListenerNodeExcluded(0, "host1", 1))) + assert(None === acceptFn(SparkListenerNodeUnexcluded(0, "host1"))) assert(None === acceptFn(SparkListenerLogStart("testVersion"))) } diff --git a/core/src/test/scala/org/apache/spark/deploy/history/EventLogFileCompactorSuite.scala b/core/src/test/scala/org/apache/spark/deploy/history/EventLogFileCompactorSuite.scala index 2a914023ec821..7d07af4d7246b 100644 --- a/core/src/test/scala/org/apache/spark/deploy/history/EventLogFileCompactorSuite.scala +++ b/core/src/test/scala/org/apache/spark/deploy/history/EventLogFileCompactorSuite.scala @@ -23,10 +23,9 @@ import scala.io.{Codec, Source} import org.apache.hadoop.fs.{FileStatus, FileSystem, Path} import org.json4s.jackson.JsonMethods.parse -import org.apache.spark.{SparkConf, SparkFunSuite, Success} +import org.apache.spark.{SparkConf, SparkFunSuite} import org.apache.spark.deploy.SparkHadoopUtil import org.apache.spark.deploy.history.EventLogTestHelper.writeEventsToRollingWriter -import org.apache.spark.executor.ExecutorMetrics import org.apache.spark.scheduler._ import org.apache.spark.scheduler.cluster.ExecutorInfo import org.apache.spark.status.ListenerEventsTestHelper._ @@ -219,10 +218,10 @@ class EventLogFileCompactorSuite extends SparkFunSuite { override def acceptFn(): PartialFunction[SparkListenerEvent, Boolean] = { case _: SparkListenerApplicationEnd => true case _: SparkListenerEnvironmentUpdate => true - case _: SparkListenerNodeBlacklisted => true + case _: SparkListenerNodeExcluded => true case _: SparkListenerBlockManagerAdded => false case _: SparkListenerApplicationStart => false - case _: SparkListenerNodeUnblacklisted => false + case _: SparkListenerNodeUnexcluded => false } override def statistics(): Option[EventFilter.FilterStatistics] = None @@ -254,11 +253,11 @@ class EventLogFileCompactorSuite extends SparkFunSuite { // filterApplicationStart: Some(false) & Some(false) => filter out writeEventToWriter(writer, SparkListenerApplicationStart("app", None, 0, "user", None)) - // filterNodeBlacklisted: None & Some(true) => filter in - expectedLines += writeEventToWriter(writer, SparkListenerNodeBlacklisted(0, "host1", 1)) + // filterNodeExcluded: None & Some(true) => filter in + expectedLines += writeEventToWriter(writer, SparkListenerNodeExcluded(0, "host1", 1)) - // filterNodeUnblacklisted: None & Some(false) => filter out - writeEventToWriter(writer, SparkListenerNodeUnblacklisted(0, "host1")) + // filterNodeUnexcluded: None & Some(false) => filter out + writeEventToWriter(writer, SparkListenerNodeUnexcluded(0, "host1")) // other events: None & None => filter in expectedLines += writeEventToWriter(writer, SparkListenerUnpersistRDD(0)) diff --git a/core/src/test/scala/org/apache/spark/deploy/history/EventLogFileWritersSuite.scala b/core/src/test/scala/org/apache/spark/deploy/history/EventLogFileWritersSuite.scala index 060b878fb8ef2..e9b739ce7a4c6 100644 --- a/core/src/test/scala/org/apache/spark/deploy/history/EventLogFileWritersSuite.scala +++ b/core/src/test/scala/org/apache/spark/deploy/history/EventLogFileWritersSuite.scala @@ -213,7 +213,7 @@ class SingleEventLogFileWriterSuite extends EventLogFileWritersSuite { compressionCodecShortName) val finalLogPath = new Path(logPath) - assert(fileSystem.exists(finalLogPath) && fileSystem.isFile(finalLogPath)) + assert(fileSystem.exists(finalLogPath) && fileSystem.getFileStatus(finalLogPath).isFile) assert(expectedLines === readLinesFromEventLogFile(finalLogPath, fileSystem)) } } @@ -357,10 +357,10 @@ class RollingEventLogFilesWriterSuite extends EventLogFileWritersSuite { expectedLines: Seq[String]): Unit = { val logDirPath = getAppEventLogDirPath(logBaseDir, appId, appAttemptId) - assert(fileSystem.exists(logDirPath) && fileSystem.isDirectory(logDirPath)) + assert(fileSystem.exists(logDirPath) && fileSystem.getFileStatus(logDirPath).isDirectory) val appStatusFile = getAppStatusFilePath(logDirPath, appId, appAttemptId, inProgress = false) - assert(fileSystem.exists(appStatusFile) && fileSystem.isFile(appStatusFile)) + assert(fileSystem.exists(appStatusFile) && fileSystem.getFileStatus(appStatusFile).isFile) val eventLogFiles = listEventLogFiles(logDirPath) val allLines = mutable.ArrayBuffer[String]() diff --git a/core/src/test/scala/org/apache/spark/deploy/history/FsHistoryProviderSuite.scala b/core/src/test/scala/org/apache/spark/deploy/history/FsHistoryProviderSuite.scala index 3f8c875f5a552..3b8677742ca16 100644 --- a/core/src/test/scala/org/apache/spark/deploy/history/FsHistoryProviderSuite.scala +++ b/core/src/test/scala/org/apache/spark/deploy/history/FsHistoryProviderSuite.scala @@ -44,7 +44,7 @@ import org.apache.spark.deploy.history.EventLogTestHelper._ import org.apache.spark.internal.Logging import org.apache.spark.internal.config.DRIVER_LOG_DFS_DIR import org.apache.spark.internal.config.History._ -import org.apache.spark.internal.config.UI.{ADMIN_ACLS, ADMIN_ACLS_GROUPS, USER_GROUPS_MAPPING} +import org.apache.spark.internal.config.UI.{ADMIN_ACLS, ADMIN_ACLS_GROUPS, UI_VIEW_ACLS, UI_VIEW_ACLS_GROUPS, USER_GROUPS_MAPPING} import org.apache.spark.io._ import org.apache.spark.scheduler._ import org.apache.spark.scheduler.cluster.ExecutorInfo @@ -926,8 +926,8 @@ class FsHistoryProviderSuite extends SparkFunSuite with Matchers with Logging { oldProvider.listing.setMetadata(meta) oldProvider.stop() - val mistatchedVersionProvider = new FsHistoryProvider(conf) - assert(mistatchedVersionProvider.listing.count(classOf[ApplicationInfoWrapper]) === 0) + val mismatchedVersionProvider = new FsHistoryProvider(conf) + assert(mismatchedVersionProvider.listing.count(classOf[ApplicationInfoWrapper]) === 0) } test("invalidate cached UI") { @@ -1475,6 +1475,107 @@ class FsHistoryProviderSuite extends SparkFunSuite with Matchers with Logging { } } + test("SPARK-33146: don't let one bad rolling log folder prevent loading other applications") { + withTempDir { dir => + val conf = createTestConf(true) + conf.set(HISTORY_LOG_DIR, dir.getAbsolutePath) + val hadoopConf = SparkHadoopUtil.newConfiguration(conf) + val fs = new Path(dir.getAbsolutePath).getFileSystem(hadoopConf) + + val provider = new FsHistoryProvider(conf) + + val writer = new RollingEventLogFilesWriter("app", None, dir.toURI, conf, hadoopConf) + writer.start() + + writeEventsToRollingWriter(writer, Seq( + SparkListenerApplicationStart("app", Some("app"), 0, "user", None), + SparkListenerJobStart(1, 0, Seq.empty)), rollFile = false) + provider.checkForLogs() + provider.cleanLogs() + assert(dir.listFiles().size === 1) + assert(provider.getListing.length === 1) + + // Manually delete the appstatus file to make an invalid rolling event log + val appStatusPath = RollingEventLogFilesWriter.getAppStatusFilePath(new Path(writer.logPath), + "app", None, true) + fs.delete(appStatusPath, false) + provider.checkForLogs() + provider.cleanLogs() + assert(provider.getListing.length === 0) + + // Create a new application + val writer2 = new RollingEventLogFilesWriter("app2", None, dir.toURI, conf, hadoopConf) + writer2.start() + writeEventsToRollingWriter(writer2, Seq( + SparkListenerApplicationStart("app2", Some("app2"), 0, "user", None), + SparkListenerJobStart(1, 0, Seq.empty)), rollFile = false) + + // Both folders exist but only one application found + provider.checkForLogs() + provider.cleanLogs() + assert(provider.getListing.length === 1) + assert(dir.listFiles().size === 2) + + // Make sure a new provider sees the valid application + provider.stop() + val newProvider = new FsHistoryProvider(conf) + newProvider.checkForLogs() + assert(newProvider.getListing.length === 1) + } + } + + test("SPARK-33215: check ui view permissions without retrieving ui") { + val conf = createTestConf() + .set(HISTORY_SERVER_UI_ACLS_ENABLE, true) + .set(HISTORY_SERVER_UI_ADMIN_ACLS, Seq("user1", "user2")) + .set(HISTORY_SERVER_UI_ADMIN_ACLS_GROUPS, Seq("group1")) + .set(USER_GROUPS_MAPPING, classOf[TestGroupsMappingProvider].getName) + + val provider = new FsHistoryProvider(conf) + val log = newLogFile("app1", Some("attempt1"), inProgress = false) + writeFile(log, None, + SparkListenerApplicationStart("app1", Some("app1"), System.currentTimeMillis(), + "test", Some("attempt1")), + SparkListenerEnvironmentUpdate(Map( + "Spark Properties" -> List((UI_VIEW_ACLS.key, "user"), (UI_VIEW_ACLS_GROUPS.key, "group")), + "Hadoop Properties" -> Seq.empty, + "JVM Information" -> Seq.empty, + "System Properties" -> Seq.empty, + "Classpath Entries" -> Seq.empty + )), + SparkListenerApplicationEnd(System.currentTimeMillis())) + + provider.checkForLogs() + + // attempt2 doesn't exist + intercept[NoSuchElementException] { + provider.checkUIViewPermissions("app1", Some("attempt2"), "user1") + } + // app2 doesn't exist + intercept[NoSuchElementException] { + provider.checkUIViewPermissions("app2", Some("attempt1"), "user1") + } + + // user1 and user2 are admins + assert(provider.checkUIViewPermissions("app1", Some("attempt1"), "user1")) + assert(provider.checkUIViewPermissions("app1", Some("attempt1"), "user2")) + // user3 is a member of admin group "group1" + assert(provider.checkUIViewPermissions("app1", Some("attempt1"), "user3")) + // test is the app owner + assert(provider.checkUIViewPermissions("app1", Some("attempt1"), "test")) + // user is in the app's view acls + assert(provider.checkUIViewPermissions("app1", Some("attempt1"), "user")) + // user5 is a member of the app's view acls group "group" + assert(provider.checkUIViewPermissions("app1", Some("attempt1"), "user5")) + + // abc, user6, user7 don't have permissions + assert(!provider.checkUIViewPermissions("app1", Some("attempt1"), "abc")) + assert(!provider.checkUIViewPermissions("app1", Some("attempt1"), "user6")) + assert(!provider.checkUIViewPermissions("app1", Some("attempt1"), "user7")) + + provider.stop() + } + /** * Asks the provider to check for logs and calls a function to perform checks on the updated * app list. Example: diff --git a/core/src/test/scala/org/apache/spark/deploy/history/HistoryServerSuite.scala b/core/src/test/scala/org/apache/spark/deploy/history/HistoryServerSuite.scala index 51e38f9cdcd2d..08b2118065521 100644 --- a/core/src/test/scala/org/apache/spark/deploy/history/HistoryServerSuite.scala +++ b/core/src/test/scala/org/apache/spark/deploy/history/HistoryServerSuite.scala @@ -169,12 +169,13 @@ class HistoryServerSuite extends SparkFunSuite with BeforeAndAfter with Matchers "applications/local-1426533911241/1/stages/0/0/taskList", "stage task list from multi-attempt app json(2)" -> "applications/local-1426533911241/2/stages/0/0/taskList", - "blacklisting for stage" -> "applications/app-20180109111548-0000/stages/0/0", - "blacklisting node for stage" -> "applications/application_1516285256255_0012/stages/0/0", + "excludeOnFailure for stage" -> "applications/app-20180109111548-0000/stages/0/0", + "excludeOnFailure node for stage" -> "applications/application_1516285256255_0012/stages/0/0", "rdd list storage json" -> "applications/local-1422981780767/storage/rdd", - "executor node blacklisting" -> "applications/app-20161116163331-0000/executors", - "executor node blacklisting unblacklisting" -> "applications/app-20161115172038-0000/executors", + "executor node excludeOnFailure" -> "applications/app-20161116163331-0000/executors", + "executor node excludeOnFailure unexcluding" -> + "applications/app-20161115172038-0000/executors", "executor memory usage" -> "applications/app-20161116163331-0000/executors", "executor resource information" -> "applications/application_1555004656427_0144/executors", "multiple resource profiles" -> "applications/application_1578436911597_0052/environment", @@ -584,6 +585,24 @@ class HistoryServerSuite extends SparkFunSuite with BeforeAndAfter with Matchers } } + test("SPARK-33215: speed up event log download by skipping UI rebuild") { + val appId = "local-1430917381535" + + stop() + init() + + val port = server.boundPort + val testUrls = Seq( + s"http://localhost:$port/api/v1/applications/$appId/logs", + s"http://localhost:$port/api/v1/applications/$appId/1/logs", + s"http://localhost:$port/api/v1/applications/$appId/2/logs") + + testUrls.foreach { url => + TestUtils.httpResponseCode(new URL(url)) + } + assert(server.cacheMetrics.loadCount.getCount === 0, "downloading event log shouldn't load ui") + } + test("access history application defaults to the last attempt id") { def getRedirectUrl(url: URL): (Int, String) = { diff --git a/core/src/test/scala/org/apache/spark/deploy/master/MasterSuite.scala b/core/src/test/scala/org/apache/spark/deploy/master/MasterSuite.scala index 8f19fb5cc80bd..b1b97a61ed1f0 100644 --- a/core/src/test/scala/org/apache/spark/deploy/master/MasterSuite.scala +++ b/core/src/test/scala/org/apache/spark/deploy/master/MasterSuite.scala @@ -72,6 +72,7 @@ class MockWorker(master: RpcEndpointRef, conf: SparkConf = new SparkConf) extend }) } + var decommissioned = false var appDesc = DeployTestUtils.createAppDesc() val drivers = mutable.HashSet[String]() val driverResources = new mutable.HashMap[String, Map[String, Set[String]]] @@ -96,6 +97,8 @@ class MockWorker(master: RpcEndpointRef, conf: SparkConf = new SparkConf) extend case None => } driverIdToAppId.remove(driverId) + case DecommissionWorker => + decommissioned = true } } @@ -140,6 +143,10 @@ class MockExecutorLaunchFailWorker(master: Master, conf: SparkConf = new SparkCo class MasterSuite extends SparkFunSuite with Matchers with Eventually with PrivateMethodTester with BeforeAndAfter { + // regex to extract worker links from the master webui HTML + // groups represent URL and worker ID + val WORKER_LINK_RE = """\s*(worker-.+?)\s*""".r + private var _master: Master = _ after { @@ -317,10 +324,10 @@ class MasterSuite extends SparkFunSuite val conf = new SparkConf() val localCluster = new LocalSparkCluster(2, 2, 512, conf) localCluster.start() + val masterUrl = s"http://localhost:${localCluster.masterWebUIPort}" try { eventually(timeout(5.seconds), interval(100.milliseconds)) { - val json = Source.fromURL(s"http://localhost:${localCluster.masterWebUIPort}/json") - .getLines().mkString("\n") + val json = Source.fromURL(s"$masterUrl/json").getLines().mkString("\n") val JArray(workers) = (parse(json) \ "workers") workers.size should be (2) workers.foreach { workerSummaryJson => @@ -329,6 +336,16 @@ class MasterSuite extends SparkFunSuite .getLines().mkString("\n")) (workerResponse \ "cores").extract[Int] should be (2) } + + val html = Source.fromURL(s"$masterUrl/").getLines().mkString("\n") + html should include ("Spark Master at spark://") + val workerLinks = (WORKER_LINK_RE findAllMatchIn html).toList + workerLinks.size should be (2) + workerLinks foreach { case WORKER_LINK_RE(workerUrl, workerId) => + val workerHtml = Source.fromURL(workerUrl).getLines().mkString("\n") + workerHtml should include ("Spark Worker at") + workerHtml should include ("Running Executors (0)") + } } } finally { localCluster.stop() @@ -337,31 +354,106 @@ class MasterSuite extends SparkFunSuite test("master/worker web ui available with reverseProxy") { implicit val formats = org.json4s.DefaultFormats - val reverseProxyUrl = "http://localhost:8080" + val conf = new SparkConf() + conf.set(UI_REVERSE_PROXY, true) + val localCluster = new LocalSparkCluster(2, 2, 512, conf) + localCluster.start() + val masterUrl = s"http://localhost:${localCluster.masterWebUIPort}" + try { + eventually(timeout(5.seconds), interval(100.milliseconds)) { + val json = Source.fromURL(s"$masterUrl/json") + .getLines().mkString("\n") + val JArray(workers) = (parse(json) \ "workers") + workers.size should be (2) + workers.foreach { workerSummaryJson => + // the webuiaddress intentionally points to the local web ui. + // explicitly construct reverse proxy url targeting the master + val JString(workerId) = workerSummaryJson \ "id" + val url = s"$masterUrl/proxy/${workerId}/json" + val workerResponse = parse(Source.fromURL(url).getLines().mkString("\n")) + (workerResponse \ "cores").extract[Int] should be (2) + } + + val html = Source.fromURL(s"$masterUrl/").getLines().mkString("\n") + html should include ("Spark Master at spark://") + html should include ("""href="/static""") + html should include ("""src="/static""") + verifyWorkerUI(html, masterUrl) + } + } finally { + localCluster.stop() + System.getProperties().remove("spark.ui.proxyBase") + } + } + + test("master/worker web ui available behind front-end reverseProxy") { + implicit val formats = org.json4s.DefaultFormats + val reverseProxyUrl = "http://proxyhost:8080/path/to/spark" val conf = new SparkConf() conf.set(UI_REVERSE_PROXY, true) conf.set(UI_REVERSE_PROXY_URL, reverseProxyUrl) val localCluster = new LocalSparkCluster(2, 2, 512, conf) localCluster.start() + val masterUrl = s"http://localhost:${localCluster.masterWebUIPort}" try { eventually(timeout(5.seconds), interval(100.milliseconds)) { - val json = Source.fromURL(s"http://localhost:${localCluster.masterWebUIPort}/json") + val json = Source.fromURL(s"$masterUrl/json") .getLines().mkString("\n") val JArray(workers) = (parse(json) \ "workers") workers.size should be (2) workers.foreach { workerSummaryJson => + // the webuiaddress intentionally points to the local web ui. + // explicitly construct reverse proxy url targeting the master val JString(workerId) = workerSummaryJson \ "id" - val url = s"http://localhost:${localCluster.masterWebUIPort}/proxy/${workerId}/json" + val url = s"$masterUrl/proxy/${workerId}/json" val workerResponse = parse(Source.fromURL(url).getLines().mkString("\n")) (workerResponse \ "cores").extract[Int] should be (2) - (workerResponse \ "masterwebuiurl").extract[String] should be (reverseProxyUrl) + (workerResponse \ "masterwebuiurl").extract[String] should be (reverseProxyUrl + "/") } + + // with LocalCluster, we have masters and workers in the same JVM, each overwriting + // system property spark.ui.proxyBase. + // so we need to manage this property explicitly for test + System.getProperty("spark.ui.proxyBase") should startWith + (s"$reverseProxyUrl/proxy/worker-") + System.setProperty("spark.ui.proxyBase", reverseProxyUrl) + val html = Source.fromURL(s"$masterUrl/").getLines().mkString("\n") + html should include ("Spark Master at spark://") + verifyStaticResourcesServedByProxy(html, reverseProxyUrl) + verifyWorkerUI(html, masterUrl, reverseProxyUrl) } } finally { localCluster.stop() + System.getProperties().remove("spark.ui.proxyBase") + } + } + + private def verifyWorkerUI(masterHtml: String, masterUrl: String, + reverseProxyUrl: String = ""): Unit = { + val workerLinks = (WORKER_LINK_RE findAllMatchIn masterHtml).toList + workerLinks.size should be (2) + workerLinks foreach { + case WORKER_LINK_RE(workerUrl, workerId) => + workerUrl should be (s"$reverseProxyUrl/proxy/$workerId") + // there is no real front-end proxy as defined in $reverseProxyUrl + // construct url directly targeting the master + val url = s"$masterUrl/proxy/$workerId/" + System.setProperty("spark.ui.proxyBase", workerUrl) + val workerHtml = Source.fromURL(url).getLines().mkString("\n") + workerHtml should include ("Spark Worker at") + workerHtml should include ("Running Executors (0)") + verifyStaticResourcesServedByProxy(workerHtml, workerUrl) + case _ => fail // make sure we don't accidentially skip the tests } } + private def verifyStaticResourcesServedByProxy(html: String, proxyUrl: String): Unit = { + html should not include ("""href="/static""") + html should include (s"""href="$proxyUrl/static""") + html should not include ("""src="/static""") + html should include (s"""src="$proxyUrl/static""") + } + test("basic scheduling - spread out") { basicScheduling(spreadOut = true) } @@ -692,7 +784,7 @@ class MasterSuite extends SparkFunSuite var worker: MockExecutorLaunchFailWorker = null try { val conf = new SparkConf() - // SPARK-32250: When running test on Github Action machine, the available processors in JVM + // SPARK-32250: When running test on GitHub Action machine, the available processors in JVM // is only 2, while on Jenkins it's 32. For this specific test, 2 available processors, which // also decides number of threads in Dispatcher, is not enough to consume the messages. In // the worst situation, MockExecutorLaunchFailWorker would occupy these 2 threads for @@ -742,9 +834,9 @@ class MasterSuite extends SparkFunSuite hostnames: Seq[String]): Unit = { val conf = new SparkConf() val master = makeAliveMaster(conf) - val workerRegs = (1 to numWorkers).map{idx => + val workers = (1 to numWorkers).map { idx => val worker = new MockWorker(master.self, conf) - worker.rpcEnv.setupEndpoint("worker", worker) + worker.rpcEnv.setupEndpoint(s"worker-$idx", worker) val workerReg = RegisterWorker( worker.id, "localhost", @@ -755,14 +847,14 @@ class MasterSuite extends SparkFunSuite "http://localhost:8080", RpcAddress("localhost", 10000)) master.self.send(workerReg) - workerReg + worker } eventually(timeout(10.seconds)) { val masterState = master.self.askSync[MasterStateResponse](RequestMasterState) assert(masterState.workers.length === numWorkers) assert(masterState.workers.forall(_.state == WorkerState.ALIVE)) - assert(masterState.workers.map(_.id).toSet == workerRegs.map(_.id).toSet) + assert(masterState.workers.map(_.id).toSet == workers.map(_.id).toSet) } val decomWorkersCount = master.self.askSync[Integer](DecommissionWorkersOnHosts(hostnames)) @@ -773,8 +865,11 @@ class MasterSuite extends SparkFunSuite eventually(timeout(30.seconds)) { val masterState = master.self.askSync[MasterStateResponse](RequestMasterState) assert(masterState.workers.length === numWorkers) - val workersActuallyDecomed = masterState.workers.count(_.state == WorkerState.DECOMMISSIONED) - assert(workersActuallyDecomed === numWorkersExpectedToDecom) + val workersActuallyDecomed = masterState.workers + .filter(_.state == WorkerState.DECOMMISSIONED).map(_.id) + val decommissionedWorkers = workers.filter(w => workersActuallyDecomed.contains(w.id)) + assert(workersActuallyDecomed.length === numWorkersExpectedToDecom) + assert(decommissionedWorkers.forall(_.decommissioned)) } // Decommissioning a worker again should return the same answer since we want this call to be diff --git a/core/src/test/scala/org/apache/spark/deploy/master/ui/MasterWebUISuite.scala b/core/src/test/scala/org/apache/spark/deploy/master/ui/MasterWebUISuite.scala index 35de457ec48ce..be83ec12f92f5 100644 --- a/core/src/test/scala/org/apache/spark/deploy/master/ui/MasterWebUISuite.scala +++ b/core/src/test/scala/org/apache/spark/deploy/master/ui/MasterWebUISuite.scala @@ -21,7 +21,6 @@ import java.io.DataOutputStream import java.net.{HttpURLConnection, URL} import java.nio.charset.StandardCharsets import java.util.Date -import javax.servlet.http.HttpServletResponse import scala.collection.mutable.HashMap @@ -32,7 +31,6 @@ import org.apache.spark.{SecurityManager, SparkConf, SparkFunSuite} import org.apache.spark.deploy.DeployMessages.{DecommissionWorkersOnHosts, KillDriverResponse, RequestKillDriver} import org.apache.spark.deploy.DeployTestUtils._ import org.apache.spark.deploy.master._ -import org.apache.spark.internal.config.UI import org.apache.spark.rpc.{RpcEndpointRef, RpcEnv} diff --git a/core/src/test/scala/org/apache/spark/deploy/rest/SubmitRestProtocolSuite.scala b/core/src/test/scala/org/apache/spark/deploy/rest/SubmitRestProtocolSuite.scala index d08052faa0043..9fdbf485e17d3 100644 --- a/core/src/test/scala/org/apache/spark/deploy/rest/SubmitRestProtocolSuite.scala +++ b/core/src/test/scala/org/apache/spark/deploy/rest/SubmitRestProtocolSuite.scala @@ -98,6 +98,7 @@ class SubmitRestProtocolSuite extends SparkFunSuite { // optional fields conf.set(JARS, Seq("mayonnaise.jar", "ketchup.jar")) conf.set(FILES.key, "fireball.png") + conf.set(ARCHIVES.key, "fireballs.zip") conf.set("spark.driver.memory", s"${Utils.DEFAULT_DRIVER_MEM_MB}m") conf.set(DRIVER_CORES, 180) conf.set("spark.driver.extraJavaOptions", " -Dslices=5 -Dcolor=mostly_red") @@ -246,6 +247,7 @@ class SubmitRestProtocolSuite extends SparkFunSuite { | }, | "mainClass" : "org.apache.spark.examples.SparkPie", | "sparkProperties" : { + | "spark.archives" : "fireballs.zip", | "spark.driver.extraLibraryPath" : "pickle.jar", | "spark.jars" : "mayonnaise.jar,ketchup.jar", | "spark.driver.supervise" : "false", @@ -272,6 +274,7 @@ class SubmitRestProtocolSuite extends SparkFunSuite { | }, | "mainClass" : "org.apache.spark.examples.SparkPie", | "sparkProperties" : { + | "spark.archives" : "fireballs.zip", | "spark.driver.extraLibraryPath" : "pickle.jar", | "spark.jars" : "mayonnaise.jar,ketchup.jar", | "spark.driver.supervise" : "false", diff --git a/core/src/test/scala/org/apache/spark/deploy/worker/WorkerSuite.scala b/core/src/test/scala/org/apache/spark/deploy/worker/WorkerSuite.scala index 5bbd60f99f77e..8ed861ad34ea7 100644 --- a/core/src/test/scala/org/apache/spark/deploy/worker/WorkerSuite.scala +++ b/core/src/test/scala/org/apache/spark/deploy/worker/WorkerSuite.scala @@ -342,7 +342,7 @@ class WorkerSuite extends SparkFunSuite with Matchers with BeforeAndAfter { testWorkDirCleanupAndRemoveMetadataWithConfig(true) } - test("WorkdDirCleanup cleans only app dirs when" + + test("WorkDirCleanup cleans only app dirs when" + "spark.shuffle.service.db.enabled=false") { testWorkDirCleanupAndRemoveMetadataWithConfig(false) } diff --git a/core/src/test/scala/org/apache/spark/executor/CoarseGrainedExecutorBackendSuite.scala b/core/src/test/scala/org/apache/spark/executor/CoarseGrainedExecutorBackendSuite.scala index e0b586074b89e..810dcf0e61007 100644 --- a/core/src/test/scala/org/apache/spark/executor/CoarseGrainedExecutorBackendSuite.scala +++ b/core/src/test/scala/org/apache/spark/executor/CoarseGrainedExecutorBackendSuite.scala @@ -106,7 +106,7 @@ class CoarseGrainedExecutorBackendSuite extends SparkFunSuite testParsingMultipleResources(conf, ResourceProfile.getOrCreateDefaultProfile(conf)) } - def testParsingMultipleResources(conf: SparkConf, resourceProfile: ResourceProfile) { + def testParsingMultipleResources(conf: SparkConf, resourceProfile: ResourceProfile): Unit = { val serializer = new JavaSerializer(conf) val env = createMockEnv(conf, serializer) // we don't really use this, just need it to get at the parser function @@ -302,7 +302,7 @@ class CoarseGrainedExecutorBackendSuite extends SparkFunSuite // We don't really verify the data, just pass it around. val data = ByteBuffer.wrap(Array[Byte](1, 2, 3, 4)) val taskDescription = new TaskDescription(taskId, 2, "1", "TASK 1000000", 19, - 1, mutable.Map.empty, mutable.Map.empty, new Properties, + 1, mutable.Map.empty, mutable.Map.empty, mutable.Map.empty, new Properties, Map(GPU -> new ResourceInformation(GPU, Array("0", "1"))), data) val serializedTaskDescription = TaskDescription.encode(taskDescription) backend.executor = mock[Executor] diff --git a/core/src/test/scala/org/apache/spark/executor/ExecutorSuite.scala b/core/src/test/scala/org/apache/spark/executor/ExecutorSuite.scala index 8e58beff74290..97ffb36062dbc 100644 --- a/core/src/test/scala/org/apache/spark/executor/ExecutorSuite.scala +++ b/core/src/test/scala/org/apache/spark/executor/ExecutorSuite.scala @@ -17,8 +17,9 @@ package org.apache.spark.executor -import java.io.{Externalizable, File, ObjectInput, ObjectOutput} +import java.io.{Externalizable, ObjectInput, ObjectOutput} import java.lang.Thread.UncaughtExceptionHandler +import java.net.URL import java.nio.ByteBuffer import java.util.Properties import java.util.concurrent.{ConcurrentHashMap, CountDownLatch, TimeUnit} @@ -28,6 +29,7 @@ import scala.collection.immutable import scala.collection.mutable.{ArrayBuffer, Map} import scala.concurrent.duration._ +import com.google.common.cache.{CacheBuilder, CacheLoader} import org.mockito.ArgumentCaptor import org.mockito.ArgumentMatchers.{any, eq => meq} import org.mockito.Mockito.{inOrder, verify, when} @@ -41,10 +43,9 @@ import org.scalatestplus.mockito.MockitoSugar import org.apache.spark._ import org.apache.spark.TaskState.TaskState import org.apache.spark.broadcast.Broadcast -import org.apache.spark.deploy.{SimpleApplicationTest, SparkSubmitSuite} import org.apache.spark.internal.config._ import org.apache.spark.internal.config.UI._ -import org.apache.spark.memory.TestMemoryManager +import org.apache.spark.memory.{SparkOutOfMemoryError, TestMemoryManager} import org.apache.spark.metrics.MetricsSystem import org.apache.spark.rdd.RDD import org.apache.spark.resource.ResourceInformation @@ -53,7 +54,7 @@ import org.apache.spark.scheduler.{DirectTaskResult, FakeTask, ResultTask, Task, import org.apache.spark.serializer.{JavaSerializer, SerializerInstance, SerializerManager} import org.apache.spark.shuffle.FetchFailedException import org.apache.spark.storage.{BlockManager, BlockManagerId} -import org.apache.spark.util.{LongAccumulator, UninterruptibleThread, Utils} +import org.apache.spark.util.{LongAccumulator, SparkUncaughtExceptionHandler, ThreadUtils, UninterruptibleThread} class ExecutorSuite extends SparkFunSuite with LocalSparkContext with MockitoSugar with Eventually with PrivateMethodTester { @@ -64,6 +65,33 @@ class ExecutorSuite extends SparkFunSuite super.afterEach() } + /** + * Creates an Executor with the provided arguments, is then passed to `f` + * and will be stopped after `f` returns. + */ + def withExecutor( + executorId: String, + executorHostname: String, + env: SparkEnv, + userClassPath: Seq[URL] = Nil, + isLocal: Boolean = true, + uncaughtExceptionHandler: UncaughtExceptionHandler + = new SparkUncaughtExceptionHandler, + resources: immutable.Map[String, ResourceInformation] + = immutable.Map.empty[String, ResourceInformation])(f: Executor => Unit): Unit = { + var executor: Executor = null + try { + executor = new Executor(executorId, executorHostname, env, userClassPath, isLocal, + uncaughtExceptionHandler, resources) + + f(executor) + } finally { + if (executor != null) { + executor.stop() + } + } + } + test("SPARK-15963: Catch `TaskKilledException` correctly in Executor.TaskRunner") { // mock some objects to make Executor.launchTask() happy val conf = new SparkConf @@ -116,10 +144,8 @@ class ExecutorSuite extends SparkFunSuite } }) - var executor: Executor = null - try { - executor = new Executor("id", "localhost", env, userClassPath = Nil, isLocal = true, - resources = immutable.Map.empty[String, ResourceInformation]) + withExecutor("id", "localhost", env) { executor => + // the task will be launched in a dedicated worker thread executor.launchTask(mockExecutorBackend, taskDescription) @@ -139,11 +165,6 @@ class ExecutorSuite extends SparkFunSuite assert(executorSuiteHelper.testFailedReason.toErrorString === "TaskKilled (test)") assert(executorSuiteHelper.taskState === TaskState.KILLED) } - finally { - if (executor != null) { - executor.stop() - } - } } test("SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions") { @@ -255,25 +276,24 @@ class ExecutorSuite extends SparkFunSuite confs.foreach { case (k, v) => conf.set(k, v) } val serializer = new JavaSerializer(conf) val env = createMockEnv(conf, serializer) - val executor = - new Executor("id", "localhost", SparkEnv.get, userClassPath = Nil, isLocal = true, - resources = immutable.Map.empty[String, ResourceInformation]) - val executorClass = classOf[Executor] - - // Save all heartbeats sent into an ArrayBuffer for verification - val heartbeats = ArrayBuffer[Heartbeat]() - val mockReceiver = mock[RpcEndpointRef] - when(mockReceiver.askSync(any[Heartbeat], any[RpcTimeout])(any)) - .thenAnswer((invocation: InvocationOnMock) => { - val args = invocation.getArguments() - heartbeats += args(0).asInstanceOf[Heartbeat] - HeartbeatResponse(false) - }) - val receiverRef = executorClass.getDeclaredField("heartbeatReceiverRef") - receiverRef.setAccessible(true) - receiverRef.set(executor, mockReceiver) + withExecutor("id", "localhost", SparkEnv.get) { executor => + val executorClass = classOf[Executor] - f(executor, heartbeats) + // Save all heartbeats sent into an ArrayBuffer for verification + val heartbeats = ArrayBuffer[Heartbeat]() + val mockReceiver = mock[RpcEndpointRef] + when(mockReceiver.askSync(any[Heartbeat], any[RpcTimeout])(any)) + .thenAnswer((invocation: InvocationOnMock) => { + val args = invocation.getArguments() + heartbeats += args(0).asInstanceOf[Heartbeat] + HeartbeatResponse(false) + }) + val receiverRef = executorClass.getDeclaredField("heartbeatReceiverRef") + receiverRef.setAccessible(true) + receiverRef.set(executor, mockReceiver) + + f(executor, heartbeats) + } } private def heartbeatZeroAccumulatorUpdateTest(dropZeroMetrics: Boolean): Unit = { @@ -354,10 +374,7 @@ class ExecutorSuite extends SparkFunSuite val taskDescription = createResultTaskDescription(serializer, taskBinary, rdd, 0) val mockBackend = mock[ExecutorBackend] - var executor: Executor = null - try { - executor = new Executor("id", "localhost", SparkEnv.get, userClassPath = Nil, isLocal = true, - resources = immutable.Map.empty[String, ResourceInformation]) + withExecutor("id", "localhost", SparkEnv.get) { executor => executor.launchTask(mockBackend, taskDescription) // Ensure that the executor's metricsPoller is polled so that values are recorded for @@ -368,10 +385,6 @@ class ExecutorSuite extends SparkFunSuite eventually(timeout(5.seconds), interval(10.milliseconds)) { assert(executor.numRunningTasks === 0) } - } finally { - if (executor != null) { - executor.stop() - } } // Verify that peak values for task metrics get sent in the TaskResult @@ -403,6 +416,74 @@ class ExecutorSuite extends SparkFunSuite assert(taskMetrics.getMetricValue("JVMHeapMemory") > 0) } + test("SPARK-33587: isFatalError") { + def errorInThreadPool(e: => Throwable): Throwable = { + intercept[Throwable] { + val taskPool = ThreadUtils.newDaemonFixedThreadPool(1, "test") + try { + val f = taskPool.submit(new java.util.concurrent.Callable[String] { + override def call(): String = throw e + }) + f.get() + } finally { + taskPool.shutdown() + } + } + } + + def errorInGuavaCache(e: => Throwable): Throwable = { + val cache = CacheBuilder.newBuilder() + .build(new CacheLoader[String, String] { + override def load(key: String): String = throw e + }) + intercept[Throwable] { + cache.get("test") + } + } + + def testThrowable( + e: => Throwable, + depthToCheck: Int, + isFatal: Boolean): Unit = { + import Executor.isFatalError + // `e`'s depth is 1 so `depthToCheck` needs to be at least 3 to detect fatal errors. + assert(isFatalError(e, depthToCheck) == (depthToCheck >= 1 && isFatal)) + // `e`'s depth is 2 so `depthToCheck` needs to be at least 3 to detect fatal errors. + assert(isFatalError(errorInThreadPool(e), depthToCheck) == (depthToCheck >= 2 && isFatal)) + assert(isFatalError(errorInGuavaCache(e), depthToCheck) == (depthToCheck >= 2 && isFatal)) + assert(isFatalError( + new SparkException("foo", e), + depthToCheck) == (depthToCheck >= 2 && isFatal)) + // `e`'s depth is 3 so `depthToCheck` needs to be at least 3 to detect fatal errors. + assert(isFatalError( + errorInThreadPool(errorInGuavaCache(e)), + depthToCheck) == (depthToCheck >= 3 && isFatal)) + assert(isFatalError( + errorInGuavaCache(errorInThreadPool(e)), + depthToCheck) == (depthToCheck >= 3 && isFatal)) + assert(isFatalError( + new SparkException("foo", new SparkException("foo", e)), + depthToCheck) == (depthToCheck >= 3 && isFatal)) + } + + for (depthToCheck <- 0 to 5) { + testThrowable(new OutOfMemoryError(), depthToCheck, isFatal = true) + testThrowable(new InterruptedException(), depthToCheck, isFatal = false) + testThrowable(new RuntimeException("test"), depthToCheck, isFatal = false) + testThrowable(new SparkOutOfMemoryError("test"), depthToCheck, isFatal = false) + } + + // Verify we can handle the cycle in the exception chain + val e1 = new Exception("test1") + val e2 = new Exception("test2") + e1.initCause(e2) + e2.initCause(e1) + for (depthToCheck <- 0 to 5) { + testThrowable(e1, depthToCheck, isFatal = false) + testThrowable(e2, depthToCheck, isFatal = false) + } + } + private def createMockEnv(conf: SparkConf, serializer: JavaSerializer): SparkEnv = { val mockEnv = mock[SparkEnv] val mockRpcEnv = mock[RpcEnv] @@ -451,6 +532,7 @@ class ExecutorSuite extends SparkFunSuite partitionId = 0, addedFiles = Map[String, Long](), addedJars = Map[String, Long](), + addedArchives = Map[String, Long](), properties = new Properties, resources = immutable.Map[String, ResourceInformation](), serializedTask) @@ -466,12 +548,11 @@ class ExecutorSuite extends SparkFunSuite poll: Boolean = false): (TaskFailedReason, UncaughtExceptionHandler) = { val mockBackend = mock[ExecutorBackend] val mockUncaughtExceptionHandler = mock[UncaughtExceptionHandler] - var executor: Executor = null val timedOut = new AtomicBoolean(false) - try { - executor = new Executor("id", "localhost", SparkEnv.get, userClassPath = Nil, isLocal = true, - uncaughtExceptionHandler = mockUncaughtExceptionHandler, - resources = immutable.Map.empty[String, ResourceInformation]) + + withExecutor("id", "localhost", SparkEnv.get, + uncaughtExceptionHandler = mockUncaughtExceptionHandler) { executor => + // the task will be launched in a dedicated worker thread executor.launchTask(mockBackend, taskDescription) if (killTask) { @@ -484,7 +565,7 @@ class ExecutorSuite extends SparkFunSuite if (poll) { executor.metricsPoller.poll() } - executor.killAllTasks(true, "Killed task, eg. because of speculative execution") + executor.killAllTasks(true, "Killed task, e.g. because of speculative execution") } else { timedOut.set(true) } @@ -504,11 +585,8 @@ class ExecutorSuite extends SparkFunSuite assert(executor.numRunningTasks === 0) } assert(!timedOut.get(), "timed out waiting to be ready to kill tasks") - } finally { - if (executor != null) { - executor.stop() - } } + val orderedMock = inOrder(mockBackend) val statusCaptor = ArgumentCaptor.forClass(classOf[ByteBuffer]) orderedMock.verify(mockBackend) diff --git a/core/src/test/scala/org/apache/spark/input/WholeTextFileRecordReaderSuite.scala b/core/src/test/scala/org/apache/spark/input/WholeTextFileRecordReaderSuite.scala index fab7aea6c47aa..f1d7053c34594 100644 --- a/core/src/test/scala/org/apache/spark/input/WholeTextFileRecordReaderSuite.scala +++ b/core/src/test/scala/org/apache/spark/input/WholeTextFileRecordReaderSuite.scala @@ -29,7 +29,6 @@ import org.scalatest.BeforeAndAfterAll import org.apache.spark.{SparkConf, SparkContext, SparkFunSuite} import org.apache.spark.internal.Logging -import org.apache.spark.util.Utils /** * Tests the correctness of diff --git a/core/src/test/scala/org/apache/spark/internal/io/FileCommitProtocolInstantiationSuite.scala b/core/src/test/scala/org/apache/spark/internal/io/FileCommitProtocolInstantiationSuite.scala index 2bd32fc927e21..778f748f83950 100644 --- a/core/src/test/scala/org/apache/spark/internal/io/FileCommitProtocolInstantiationSuite.scala +++ b/core/src/test/scala/org/apache/spark/internal/io/FileCommitProtocolInstantiationSuite.scala @@ -75,7 +75,7 @@ class FileCommitProtocolInstantiationSuite extends SparkFunSuite { /** * Create a classic two-arg protocol instance. - * @param dynamic dyanmic partitioning mode + * @param dynamic dynamic partitioning mode * @return the instance */ private def instantiateClassic(dynamic: Boolean): ClassicConstructorCommitProtocol = { @@ -88,7 +88,7 @@ class FileCommitProtocolInstantiationSuite extends SparkFunSuite { /** * Create a three-arg protocol instance. - * @param dynamic dyanmic partitioning mode + * @param dynamic dynamic partitioning mode * @return the instance */ private def instantiateNew( diff --git a/core/src/test/scala/org/apache/spark/internal/io/SparkHadoopWriterUtilsSuite.scala b/core/src/test/scala/org/apache/spark/internal/io/SparkHadoopWriterUtilsSuite.scala new file mode 100644 index 0000000000000..33b58ec9e6665 --- /dev/null +++ b/core/src/test/scala/org/apache/spark/internal/io/SparkHadoopWriterUtilsSuite.scala @@ -0,0 +1,102 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.internal.io + +import java.util.Date + +import org.apache.hadoop.mapreduce.JobID + +import org.apache.spark.SparkFunSuite +import org.apache.spark.internal.io.SparkHadoopWriterUtils.createJobID + +/** + * Unit tests for functions in SparkHadoopWriterUtils. + */ +class SparkHadoopWriterUtilsSuite extends SparkFunSuite { + + /** + * Core test of JobID generation: + * They are created. + * The job number is converted to the job ID. + * They round trip to string and back + * (which implies that the full string matches the regexp + * in the JobID class). + */ + test("JobID Generation") { + val jobNumber = 1010 + val j1 = createJobID(new Date(), jobNumber) + assert(jobNumber == j1.getId, + s"Job number mismatch in $j1") + + val jobStr = j1.toString + // the string value begins with job_ + assert(jobStr.startsWith("job_"), + s"wrong prefix of $jobStr") + // and the hadoop code can parse it + val j2 = roundTrip(j1) + assert(j1.getId == j2.getId, "Job ID mismatch") + assert(j1.getJtIdentifier == j2.getJtIdentifier, "Job identifier mismatch") + } + + /** + * This is the problem surfacing in situations where committers expect + * Job IDs to be unique: if the timestamp is (exclusively) used + * then there will conflict in directories created. + */ + test("JobIDs generated at same time are different") { + val now = new Date() + val j1 = createJobID(now, 1) + val j2 = createJobID(now, 1) + assert(j1.toString != j2.toString) + } + + /** + * There's nothing explicitly in the Hadoop classes to stop + * job numbers being negative. + * There's some big assumptions in the FileOutputCommitter about attempt IDs + * being positive during any recovery operations; for safety the ID + * job number is validated. + */ + test("JobIDs with negative job number") { + intercept[IllegalArgumentException] { + createJobID(new Date(), -1) + } + } + + /** + * If someone ever does reinstate use of timestamps, + * make sure that the case of timestamp == 0 is handled. + */ + test("JobIDs on Epoch are different") { + val j1 = createJobID(new Date(0), 0) + val j2 = createJobID(new Date(0), 0) + assert (j1.toString != j2.toString) + } + + /** + * Do a round trip as a string and back again. + * This uses the JobID parser. + * @param jobID job ID + * @return the returned jobID + */ + private def roundTrip(jobID: JobID): JobID = { + val parsedJobId = JobID.forName(jobID.toString) + assert(jobID == parsedJobId, "Round trip was inconsistent") + parsedJobId + } +} diff --git a/core/src/test/scala/org/apache/spark/internal/plugin/PluginContainerSuite.scala b/core/src/test/scala/org/apache/spark/internal/plugin/PluginContainerSuite.scala index 7888796dd55e6..9ef81d30ff196 100644 --- a/core/src/test/scala/org/apache/spark/internal/plugin/PluginContainerSuite.scala +++ b/core/src/test/scala/org/apache/spark/internal/plugin/PluginContainerSuite.scala @@ -20,6 +20,7 @@ package org.apache.spark.internal.plugin import java.io.File import java.nio.charset.StandardCharsets import java.util.{Map => JMap} +import java.util.concurrent.atomic.AtomicInteger import scala.collection.JavaConverters._ import scala.concurrent.duration._ @@ -129,6 +130,40 @@ class PluginContainerSuite extends SparkFunSuite with BeforeAndAfterEach with Lo assert(TestSparkPlugin.driverPlugin != null) } + test("SPARK-33088: executor tasks trigger plugin calls") { + val conf = new SparkConf() + .setAppName(getClass().getName()) + .set(SparkLauncher.SPARK_MASTER, "local[1]") + .set(PLUGINS, Seq(classOf[TestSparkPlugin].getName())) + + sc = new SparkContext(conf) + sc.parallelize(1 to 10, 2).count() + + assert(TestSparkPlugin.executorPlugin.numOnTaskStart.get() == 2) + assert(TestSparkPlugin.executorPlugin.numOnTaskSucceeded.get() == 2) + assert(TestSparkPlugin.executorPlugin.numOnTaskFailed.get() == 0) + } + + test("SPARK-33088: executor failed tasks trigger plugin calls") { + val conf = new SparkConf() + .setAppName(getClass().getName()) + .set(SparkLauncher.SPARK_MASTER, "local[2]") + .set(PLUGINS, Seq(classOf[TestSparkPlugin].getName())) + + sc = new SparkContext(conf) + try { + sc.parallelize(1 to 10, 2).foreach(i => throw new RuntimeException) + } catch { + case t: Throwable => // ignore exception + } + + eventually(timeout(10.seconds), interval(100.millis)) { + assert(TestSparkPlugin.executorPlugin.numOnTaskStart.get() == 2) + assert(TestSparkPlugin.executorPlugin.numOnTaskSucceeded.get() == 0) + assert(TestSparkPlugin.executorPlugin.numOnTaskFailed.get() == 2) + } + } + test("plugin initialization in non-local mode") { val path = Utils.createTempDir() @@ -309,6 +344,10 @@ private class TestDriverPlugin extends DriverPlugin { private class TestExecutorPlugin extends ExecutorPlugin { + val numOnTaskStart = new AtomicInteger(0) + val numOnTaskSucceeded = new AtomicInteger(0) + val numOnTaskFailed = new AtomicInteger(0) + override def init(ctx: PluginContext, extraConf: JMap[String, String]): Unit = { ctx.metricRegistry().register("executorMetric", new Gauge[Int] { override def getValue(): Int = 84 @@ -316,6 +355,17 @@ private class TestExecutorPlugin extends ExecutorPlugin { TestSparkPlugin.executorContext = ctx } + override def onTaskStart(): Unit = { + numOnTaskStart.incrementAndGet() + } + + override def onTaskSucceeded(): Unit = { + numOnTaskSucceeded.incrementAndGet() + } + + override def onTaskFailed(failureReason: TaskFailedReason): Unit = { + numOnTaskFailed.incrementAndGet() + } } private object TestSparkPlugin { diff --git a/core/src/test/scala/org/apache/spark/memory/TestMemoryManager.scala b/core/src/test/scala/org/apache/spark/memory/TestMemoryManager.scala index 60f67699f81be..987f383c9c4fa 100644 --- a/core/src/test/scala/org/apache/spark/memory/TestMemoryManager.scala +++ b/core/src/test/scala/org/apache/spark/memory/TestMemoryManager.scala @@ -119,6 +119,14 @@ class TestMemoryManager(conf: SparkConf) consequentOOM += n } + /** + * Undos the effects of [[markExecutionAsOutOfMemoryOnce]] and [[markconsequentOOM]] and lets + * calls to [[acquireExecutionMemory()]] (if there is enough memory available). + */ + def resetConsequentOOM(): Unit = synchronized { + consequentOOM = 0 + } + def limit(avail: Long): Unit = synchronized { require(avail >= 0) available = avail diff --git a/core/src/test/scala/org/apache/spark/metrics/InputOutputMetricsSuite.scala b/core/src/test/scala/org/apache/spark/metrics/InputOutputMetricsSuite.scala index 330347299ab56..905bb8110736d 100644 --- a/core/src/test/scala/org/apache/spark/metrics/InputOutputMetricsSuite.scala +++ b/core/src/test/scala/org/apache/spark/metrics/InputOutputMetricsSuite.scala @@ -213,7 +213,7 @@ class InputOutputMetricsSuite extends SparkFunSuite with SharedSparkContext } // Computing the amount of bytes read for a cartesian operation is a little involved. - // Cartesian interleaves reads between two partitions eg. p1 and p2. + // Cartesian interleaves reads between two partitions e.g. p1 and p2. // Here are the steps: // 1) First it creates an iterator for p1 // 2) Creates an iterator for p2 diff --git a/core/src/test/scala/org/apache/spark/metrics/sink/StatsdSinkSuite.scala b/core/src/test/scala/org/apache/spark/metrics/sink/StatsdSinkSuite.scala index 0e21a36071c42..3d4b8c868d6fc 100644 --- a/core/src/test/scala/org/apache/spark/metrics/sink/StatsdSinkSuite.scala +++ b/core/src/test/scala/org/apache/spark/metrics/sink/StatsdSinkSuite.scala @@ -35,12 +35,27 @@ class StatsdSinkSuite extends SparkFunSuite { STATSD_KEY_UNIT -> "seconds", STATSD_KEY_HOST -> "127.0.0.1" ) - private val socketTimeout = 30000 // milliseconds - private val socketBufferSize = 8192 + // The maximum size of a single datagram packet payload. Payloads + // larger than this will be truncated. + private val maxPayloadSize = 256 // bytes + + // The receive buffer must be large enough to hold all inflight + // packets. This includes any kernel and protocol overhead. + // This value was determined experimentally and should be + // increased if timeouts are seen. + private val socketMinRecvBufferSize = 16384 // bytes + private val socketTimeout = 30000 // milliseconds private def withSocketAndSink(testCode: (DatagramSocket, StatsdSink) => Any): Unit = { val socket = new DatagramSocket - socket.setReceiveBufferSize(socketBufferSize) + + // Leave the receive buffer size untouched unless it is too + // small. If the receive buffer is too small packets will be + // silently dropped and receive operations will timeout. + if (socket.getReceiveBufferSize() < socketMinRecvBufferSize) { + socket.setReceiveBufferSize(socketMinRecvBufferSize) + } + socket.setSoTimeout(socketTimeout) val props = new Properties defaultProps.foreach(e => props.put(e._1, e._2)) @@ -61,7 +76,7 @@ class StatsdSinkSuite extends SparkFunSuite { sink.registry.register("counter", counter) sink.report() - val p = new DatagramPacket(new Array[Byte](socketBufferSize), socketBufferSize) + val p = new DatagramPacket(new Array[Byte](maxPayloadSize), maxPayloadSize) socket.receive(p) val result = new String(p.getData, 0, p.getLength, UTF_8) @@ -77,7 +92,7 @@ class StatsdSinkSuite extends SparkFunSuite { sink.registry.register("gauge", gauge) sink.report() - val p = new DatagramPacket(new Array[Byte](socketBufferSize), socketBufferSize) + val p = new DatagramPacket(new Array[Byte](maxPayloadSize), maxPayloadSize) socket.receive(p) val result = new String(p.getData, 0, p.getLength, UTF_8) @@ -87,7 +102,7 @@ class StatsdSinkSuite extends SparkFunSuite { test("metrics StatsD sink with Histogram") { withSocketAndSink { (socket, sink) => - val p = new DatagramPacket(new Array[Byte](socketBufferSize), socketBufferSize) + val p = new DatagramPacket(new Array[Byte](maxPayloadSize), maxPayloadSize) val histogram = new Histogram(new UniformReservoir) histogram.update(10) histogram.update(20) @@ -121,7 +136,7 @@ class StatsdSinkSuite extends SparkFunSuite { test("metrics StatsD sink with Timer") { withSocketAndSink { (socket, sink) => - val p = new DatagramPacket(new Array[Byte](socketBufferSize), socketBufferSize) + val p = new DatagramPacket(new Array[Byte](maxPayloadSize), maxPayloadSize) val timer = new Timer() timer.update(1, SECONDS) timer.update(2, SECONDS) diff --git a/core/src/test/scala/org/apache/spark/metrics/source/SourceConfigSuite.scala b/core/src/test/scala/org/apache/spark/metrics/source/SourceConfigSuite.scala index 8f5ab7419d4f7..7da1403ecd4b5 100644 --- a/core/src/test/scala/org/apache/spark/metrics/source/SourceConfigSuite.scala +++ b/core/src/test/scala/org/apache/spark/metrics/source/SourceConfigSuite.scala @@ -80,4 +80,16 @@ class SourceConfigSuite extends SparkFunSuite with LocalSparkContext { } } + test("SPARK-31711: Test executor source registration in local mode") { + val conf = new SparkConf() + val sc = new SparkContext("local", "test", conf) + try { + val metricsSystem = sc.env.metricsSystem + + // Executor source should be registered + assert (metricsSystem.getSourcesByName("executor").nonEmpty) + } finally { + sc.stop() + } + } } diff --git a/core/src/test/scala/org/apache/spark/network/netty/NettyBlockTransferServiceSuite.scala b/core/src/test/scala/org/apache/spark/network/netty/NettyBlockTransferServiceSuite.scala index fa1a75d076051..c8a8f37212a82 100644 --- a/core/src/test/scala/org/apache/spark/network/netty/NettyBlockTransferServiceSuite.scala +++ b/core/src/test/scala/org/apache/spark/network/netty/NettyBlockTransferServiceSuite.scala @@ -24,7 +24,7 @@ import scala.reflect.ClassTag import scala.util.Random import org.mockito.ArgumentMatchers.any -import org.mockito.Mockito.{mock, times, verify, when} +import org.mockito.Mockito.{mock, when} import org.scalatest.BeforeAndAfterEach import org.scalatest.matchers.must.Matchers import org.scalatest.matchers.should.Matchers._ @@ -88,7 +88,7 @@ class NettyBlockTransferServiceSuite } test("SPARK-27637: test fetch block with executor dead") { - implicit val exectionContext = ExecutionContext.global + implicit val executionContext = ExecutionContext.global val port = 17634 + Random.nextInt(10000) logInfo("random port for test: " + port) diff --git a/core/src/test/scala/org/apache/spark/rdd/PairRDDFunctionsSuite.scala b/core/src/test/scala/org/apache/spark/rdd/PairRDDFunctionsSuite.scala index 2de4b109e40e9..a669993352fe7 100644 --- a/core/src/test/scala/org/apache/spark/rdd/PairRDDFunctionsSuite.scala +++ b/core/src/test/scala/org/apache/spark/rdd/PairRDDFunctionsSuite.scala @@ -28,7 +28,7 @@ import org.apache.hadoop.fs.FileSystem import org.apache.hadoop.mapred._ import org.apache.hadoop.mapreduce.{Job => NewJob, JobContext => NewJobContext, OutputCommitter => NewOutputCommitter, OutputFormat => NewOutputFormat, - RecordWriter => NewRecordWriter, TaskAttemptContext => NewTaskAttempContext} + RecordWriter => NewRecordWriter, TaskAttemptContext => NewTaskAttemptContext} import org.apache.hadoop.util.Progressable import org.scalatest.Assertions @@ -892,7 +892,7 @@ class FakeOutputFormat() extends OutputFormat[Integer, Integer]() { */ class NewFakeWriter extends NewRecordWriter[Integer, Integer] { - def close(p1: NewTaskAttempContext): Unit = () + def close(p1: NewTaskAttemptContext): Unit = () def write(p1: Integer, p2: Integer): Unit = () @@ -901,24 +901,24 @@ class NewFakeWriter extends NewRecordWriter[Integer, Integer] { class NewFakeCommitter extends NewOutputCommitter { def setupJob(p1: NewJobContext): Unit = () - def needsTaskCommit(p1: NewTaskAttempContext): Boolean = false + def needsTaskCommit(p1: NewTaskAttemptContext): Boolean = false - def setupTask(p1: NewTaskAttempContext): Unit = () + def setupTask(p1: NewTaskAttemptContext): Unit = () - def commitTask(p1: NewTaskAttempContext): Unit = () + def commitTask(p1: NewTaskAttemptContext): Unit = () - def abortTask(p1: NewTaskAttempContext): Unit = () + def abortTask(p1: NewTaskAttemptContext): Unit = () } class NewFakeFormat() extends NewOutputFormat[Integer, Integer]() { def checkOutputSpecs(p1: NewJobContext): Unit = () - def getRecordWriter(p1: NewTaskAttempContext): NewRecordWriter[Integer, Integer] = { + def getRecordWriter(p1: NewTaskAttemptContext): NewRecordWriter[Integer, Integer] = { new NewFakeWriter() } - def getOutputCommitter(p1: NewTaskAttempContext): NewOutputCommitter = { + def getOutputCommitter(p1: NewTaskAttemptContext): NewOutputCommitter = { new NewFakeCommitter() } } @@ -958,7 +958,7 @@ class FakeFormatWithCallback() extends FakeOutputFormat { } class NewFakeWriterWithCallback extends NewFakeWriter { - override def close(p1: NewTaskAttempContext): Unit = { + override def close(p1: NewTaskAttemptContext): Unit = { FakeWriterWithCallback.calledBy += "close" } @@ -972,7 +972,7 @@ class NewFakeWriterWithCallback extends NewFakeWriter { } class NewFakeFormatWithCallback() extends NewFakeFormat { - override def getRecordWriter(p1: NewTaskAttempContext): NewRecordWriter[Integer, Integer] = { + override def getRecordWriter(p1: NewTaskAttemptContext): NewRecordWriter[Integer, Integer] = { new NewFakeWriterWithCallback() } } @@ -982,27 +982,27 @@ class YetAnotherFakeCommitter extends NewOutputCommitter with Assertions { JobID.jobid = j.getJobID().getId } - def needsTaskCommit(t: NewTaskAttempContext): Boolean = false + def needsTaskCommit(t: NewTaskAttemptContext): Boolean = false - def setupTask(t: NewTaskAttempContext): Unit = { + def setupTask(t: NewTaskAttemptContext): Unit = { val jobId = t.getTaskAttemptID().getJobID().getId assert(jobId === JobID.jobid) } - def commitTask(t: NewTaskAttempContext): Unit = {} + def commitTask(t: NewTaskAttemptContext): Unit = {} - def abortTask(t: NewTaskAttempContext): Unit = {} + def abortTask(t: NewTaskAttemptContext): Unit = {} } class YetAnotherFakeFormat() extends NewOutputFormat[Integer, Integer]() { def checkOutputSpecs(j: NewJobContext): Unit = {} - def getRecordWriter(t: NewTaskAttempContext): NewRecordWriter[Integer, Integer] = { + def getRecordWriter(t: NewTaskAttemptContext): NewRecordWriter[Integer, Integer] = { new NewFakeWriter() } - def getOutputCommitter(t: NewTaskAttempContext): NewOutputCommitter = { + def getOutputCommitter(t: NewTaskAttemptContext): NewOutputCommitter = { new YetAnotherFakeCommitter() } } @@ -1021,7 +1021,7 @@ class ConfigTestFormat() extends NewFakeFormat() with Configurable { def getConf: Configuration = null - override def getRecordWriter(p1: NewTaskAttempContext): NewRecordWriter[Integer, Integer] = { + override def getRecordWriter(p1: NewTaskAttemptContext): NewRecordWriter[Integer, Integer] = { assert(setConfCalled, "setConf was never called") super.getRecordWriter(p1) } diff --git a/core/src/test/scala/org/apache/spark/rdd/RDDSuite.scala b/core/src/test/scala/org/apache/spark/rdd/RDDSuite.scala index 8962fd6740bf6..df8ac2ef744cd 100644 --- a/core/src/test/scala/org/apache/spark/rdd/RDDSuite.scala +++ b/core/src/test/scala/org/apache/spark/rdd/RDDSuite.scala @@ -1102,7 +1102,7 @@ class RDDSuite extends SparkFunSuite with SharedSparkContext with Eventually { } } - test("RDD.partitions() fails fast when partitions indicies are incorrect (SPARK-13021)") { + test("RDD.partitions() fails fast when partitions indices are incorrect (SPARK-13021)") { class BadRDD[T: ClassTag](prev: RDD[T]) extends RDD[T](prev) { override def compute(part: Partition, context: TaskContext): Iterator[T] = { diff --git a/core/src/test/scala/org/apache/spark/resource/ResourceProfileManagerSuite.scala b/core/src/test/scala/org/apache/spark/resource/ResourceProfileManagerSuite.scala index f4521738c4870..36a5620729912 100644 --- a/core/src/test/scala/org/apache/spark/resource/ResourceProfileManagerSuite.scala +++ b/core/src/test/scala/org/apache/spark/resource/ResourceProfileManagerSuite.scala @@ -24,7 +24,7 @@ import org.apache.spark.scheduler.LiveListenerBus class ResourceProfileManagerSuite extends SparkFunSuite { - override def beforeAll() { + override def beforeAll(): Unit = { try { ResourceProfile.clearDefaultProfile() } finally { @@ -32,7 +32,7 @@ class ResourceProfileManagerSuite extends SparkFunSuite { } } - override def afterEach() { + override def afterEach(): Unit = { try { ResourceProfile.clearDefaultProfile() } finally { @@ -47,8 +47,8 @@ class ResourceProfileManagerSuite extends SparkFunSuite { val rpmanager = new ResourceProfileManager(conf, listenerBus) val defaultProf = rpmanager.defaultResourceProfile assert(defaultProf.id === ResourceProfile.DEFAULT_RESOURCE_PROFILE_ID) - assert(defaultProf.executorResources.size === 2, - "Executor resources should contain cores and memory by default") + assert(defaultProf.executorResources.size === 3, + "Executor resources should contain cores, heap and offheap memory by default") assert(defaultProf.executorResources(ResourceProfile.CORES).amount === 4, s"Executor resources should have 4 cores") } @@ -67,7 +67,8 @@ class ResourceProfileManagerSuite extends SparkFunSuite { rpmanager.isSupported(immrprof) }.getMessage() - assert(error.contains("ResourceProfiles are only supported on YARN with dynamic allocation")) + assert(error.contains( + "ResourceProfiles are only supported on YARN and Kubernetes with dynamic allocation")) } test("isSupported yarn with dynamic allocation") { @@ -84,7 +85,22 @@ class ResourceProfileManagerSuite extends SparkFunSuite { assert(rpmanager.isSupported(immrprof) == true) } - test("isSupported yarn with local mode") { + test("isSupported k8s with dynamic allocation") { + val conf = new SparkConf().setMaster("k8s://foo").set(EXECUTOR_CORES, 4) + conf.set(DYN_ALLOCATION_ENABLED, true) + conf.set(DYN_ALLOCATION_SHUFFLE_TRACKING_ENABLED, true) + conf.set(RESOURCE_PROFILE_MANAGER_TESTING.key, "true") + val rpmanager = new ResourceProfileManager(conf, listenerBus) + // default profile should always work + val defaultProf = rpmanager.defaultResourceProfile + val rprof = new ResourceProfileBuilder() + val gpuExecReq = + new ExecutorResourceRequests().resource("gpu", 2, "someScript", "nvidia") + val immrprof = rprof.require(gpuExecReq).build + assert(rpmanager.isSupported(immrprof) == true) + } + + test("isSupported with local mode") { val conf = new SparkConf().setMaster("local").set(EXECUTOR_CORES, 4) conf.set(RESOURCE_PROFILE_MANAGER_TESTING.key, "true") val rpmanager = new ResourceProfileManager(conf, listenerBus) @@ -98,7 +114,8 @@ class ResourceProfileManagerSuite extends SparkFunSuite { rpmanager.isSupported(immrprof) }.getMessage() - assert(error.contains("ResourceProfiles are only supported on YARN with dynamic allocation")) + assert(error.contains( + "ResourceProfiles are only supported on YARN and Kubernetes with dynamic allocation")) } test("ResourceProfileManager has equivalent profile") { diff --git a/core/src/test/scala/org/apache/spark/resource/ResourceProfileSuite.scala b/core/src/test/scala/org/apache/spark/resource/ResourceProfileSuite.scala index d0479ca7db40c..27cc44a099de1 100644 --- a/core/src/test/scala/org/apache/spark/resource/ResourceProfileSuite.scala +++ b/core/src/test/scala/org/apache/spark/resource/ResourceProfileSuite.scala @@ -24,7 +24,7 @@ import org.apache.spark.resource.TestResourceIDs._ class ResourceProfileSuite extends SparkFunSuite { - override def beforeAll() { + override def beforeAll(): Unit = { try { ResourceProfile.clearDefaultProfile() } finally { @@ -32,7 +32,7 @@ class ResourceProfileSuite extends SparkFunSuite { } } - override def afterEach() { + override def afterEach(): Unit = { try { ResourceProfile.clearDefaultProfile() } finally { @@ -43,8 +43,8 @@ class ResourceProfileSuite extends SparkFunSuite { test("Default ResourceProfile") { val rprof = ResourceProfile.getOrCreateDefaultProfile(new SparkConf) assert(rprof.id === ResourceProfile.DEFAULT_RESOURCE_PROFILE_ID) - assert(rprof.executorResources.size === 2, - "Executor resources should contain cores and memory by default") + assert(rprof.executorResources.size === 3, + "Executor resources should contain cores, heap and offheap memory by default") assert(rprof.executorResources(ResourceProfile.CORES).amount === 1, "Executor resources should have 1 core") assert(rprof.getExecutorCores.get === 1, @@ -55,8 +55,8 @@ class ResourceProfileSuite extends SparkFunSuite { "pyspark memory empty if not specified") assert(rprof.executorResources.get(ResourceProfile.OVERHEAD_MEM) == None, "overhead memory empty if not specified") - assert(rprof.executorResources.get(ResourceProfile.OFFHEAP_MEM) == None, - "offHeap memory empty if not specified") + assert(rprof.executorResources(ResourceProfile.OFFHEAP_MEM).amount === 0, + "Executor resources should have 0 offheap memory") assert(rprof.taskResources.size === 1, "Task resources should just contain cpus by default") assert(rprof.taskResources(ResourceProfile.CPUS).amount === 1, diff --git a/core/src/test/scala/org/apache/spark/resource/ResourceUtilsSuite.scala b/core/src/test/scala/org/apache/spark/resource/ResourceUtilsSuite.scala index 278a72a7192d8..eac45e6ac5801 100644 --- a/core/src/test/scala/org/apache/spark/resource/ResourceUtilsSuite.scala +++ b/core/src/test/scala/org/apache/spark/resource/ResourceUtilsSuite.scala @@ -26,10 +26,8 @@ import org.json4s.{DefaultFormats, Extraction} import org.apache.spark.{LocalSparkContext, SparkConf, SparkException, SparkFunSuite} import org.apache.spark.TestUtils._ import org.apache.spark.internal.config._ -import org.apache.spark.internal.config.Tests._ import org.apache.spark.resource.ResourceUtils._ import org.apache.spark.resource.TestResourceIDs._ -import org.apache.spark.scheduler.LiveListenerBus import org.apache.spark.util.Utils class ResourceUtilsSuite extends SparkFunSuite @@ -223,7 +221,7 @@ class ResourceUtilsSuite extends SparkFunSuite val conf = new SparkConf assume(!(Utils.isWindows)) withTempDir { dir => - val gpuDiscovery = createTempScriptWithExpectedOutput(dir, "gpuDisocveryScript", + val gpuDiscovery = createTempScriptWithExpectedOutput(dir, "gpuDiscoveryScript", """{"name": "gpu", "addresses": ["0", "1"]}""") conf.set(DRIVER_GPU_ID.amountConf, "2") conf.set(DRIVER_GPU_ID.discoveryScriptConf, gpuDiscovery) diff --git a/core/src/test/scala/org/apache/spark/rpc/RpcEnvSuite.scala b/core/src/test/scala/org/apache/spark/rpc/RpcEnvSuite.scala index d25fd20340d48..bec96e523e9e5 100644 --- a/core/src/test/scala/org/apache/spark/rpc/RpcEnvSuite.scala +++ b/core/src/test/scala/org/apache/spark/rpc/RpcEnvSuite.scala @@ -33,7 +33,7 @@ import org.mockito.Mockito.{mock, never, verify, when} import org.scalatest.BeforeAndAfterAll import org.scalatest.concurrent.Eventually._ -import org.apache.spark.{SecurityManager, SparkConf, SparkEnv, SparkException, SparkFunSuite} +import org.apache.spark.{SparkConf, SparkEnv, SparkException, SparkFunSuite} import org.apache.spark.deploy.SparkHadoopUtil import org.apache.spark.internal.config._ import org.apache.spark.util.{ThreadUtils, Utils} @@ -901,7 +901,6 @@ abstract class RpcEnvSuite extends SparkFunSuite with BeforeAndAfterAll { } } - val sm = new SecurityManager(conf) val hc = SparkHadoopUtil.get.conf val files = Seq( @@ -913,7 +912,7 @@ abstract class RpcEnvSuite extends SparkFunSuite with BeforeAndAfterAll { (subFile2, dir2Uri + "/file2")) files.foreach { case (f, uri) => val destFile = new File(destDir, f.getName()) - Utils.fetchFile(uri, destDir, conf, sm, hc, 0L, false) + Utils.fetchFile(uri, destDir, conf, hc, 0L, false) assert(Files.equal(f, destFile)) } @@ -921,7 +920,7 @@ abstract class RpcEnvSuite extends SparkFunSuite with BeforeAndAfterAll { Seq("files", "jars", "dir1").foreach { root => intercept[Exception] { val uri = env.address.toSparkURL + s"/$root/doesNotExist" - Utils.fetchFile(uri, destDir, conf, sm, hc, 0L, false) + Utils.fetchFile(uri, destDir, conf, hc, 0L, false) } } } diff --git a/core/src/test/scala/org/apache/spark/rpc/netty/InboxSuite.scala b/core/src/test/scala/org/apache/spark/rpc/netty/InboxSuite.scala index c74c728b3e3f3..8b1c602cd8e58 100644 --- a/core/src/test/scala/org/apache/spark/rpc/netty/InboxSuite.scala +++ b/core/src/test/scala/org/apache/spark/rpc/netty/InboxSuite.scala @@ -136,4 +136,17 @@ class InboxSuite extends SparkFunSuite { endpoint.verifySingleOnNetworkErrorMessage(cause, remoteAddress) } + + test("SPARK-32738: should reduce the number of active threads when fatal error happens") { + val endpoint = mock(classOf[TestRpcEndpoint]) + when(endpoint.receive).thenThrow(new OutOfMemoryError()) + + val dispatcher = mock(classOf[Dispatcher]) + val inbox = new Inbox("name", endpoint) + inbox.post(OneWayMessage(null, "hi")) + intercept[OutOfMemoryError] { + inbox.process(dispatcher) + } + assert(inbox.getNumActiveThreads == 0) + } } diff --git a/core/src/test/scala/org/apache/spark/rpc/netty/NettyRpcEnvSuite.scala b/core/src/test/scala/org/apache/spark/rpc/netty/NettyRpcEnvSuite.scala index c2730f90ed982..fe6d0db837bda 100644 --- a/core/src/test/scala/org/apache/spark/rpc/netty/NettyRpcEnvSuite.scala +++ b/core/src/test/scala/org/apache/spark/rpc/netty/NettyRpcEnvSuite.scala @@ -73,7 +73,7 @@ class NettyRpcEnvSuite extends RpcEnvSuite with MockitoSugar with TimeLimits { val nettyEnv = env.asInstanceOf[NettyRpcEnv] val client = mock[TransportClient] - val senderAddress = RpcAddress("locahost", 12345) + val senderAddress = RpcAddress("localhost", 12345) val receiverAddress = RpcEndpointAddress("localhost", 54321, "test") val receiver = new NettyRpcEndpointRef(nettyEnv.conf, receiverAddress, nettyEnv) diff --git a/core/src/test/scala/org/apache/spark/scheduler/BarrierTaskContextSuite.scala b/core/src/test/scala/org/apache/spark/scheduler/BarrierTaskContextSuite.scala index d18ca36f1fa60..b7ac9ecac2387 100644 --- a/core/src/test/scala/org/apache/spark/scheduler/BarrierTaskContextSuite.scala +++ b/core/src/test/scala/org/apache/spark/scheduler/BarrierTaskContextSuite.scala @@ -25,7 +25,6 @@ import org.scalatest.concurrent.Eventually import org.scalatest.time.SpanSugar._ import org.apache.spark._ -import org.apache.spark.internal.config import org.apache.spark.internal.config.Tests.TEST_NO_STAGE_RETRY class BarrierTaskContextSuite extends SparkFunSuite with LocalSparkContext with Eventually { @@ -189,7 +188,7 @@ class BarrierTaskContextSuite extends SparkFunSuite with LocalSparkContext with test("throw exception if the number of barrier() calls are not the same on every task") { initLocalClusterSparkContext() - sc.conf.set("spark.barrier.sync.timeout", "1") + sc.conf.set("spark.barrier.sync.timeout", "5") val rdd = sc.makeRDD(1 to 10, 4) val rdd2 = rdd.barrier().mapPartitions { it => val context = BarrierTaskContext.get() @@ -212,7 +211,7 @@ class BarrierTaskContextSuite extends SparkFunSuite with LocalSparkContext with rdd2.collect() }.getMessage assert(error.contains("The coordinator didn't get all barrier sync requests")) - assert(error.contains("within 1 second(s)")) + assert(error.contains("within 5 second(s)")) } def testBarrierTaskKilled(interruptOnKill: Boolean): Unit = { diff --git a/core/src/test/scala/org/apache/spark/scheduler/BlacklistTrackerSuite.scala b/core/src/test/scala/org/apache/spark/scheduler/BlacklistTrackerSuite.scala deleted file mode 100644 index a1671a58f0d9b..0000000000000 --- a/core/src/test/scala/org/apache/spark/scheduler/BlacklistTrackerSuite.scala +++ /dev/null @@ -1,608 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.spark.scheduler - -import org.mockito.ArgumentMatchers.any -import org.mockito.Mockito.{never, verify, when} -import org.mockito.invocation.InvocationOnMock -import org.scalatest.BeforeAndAfterEach -import org.scalatestplus.mockito.MockitoSugar - -import org.apache.spark._ -import org.apache.spark.internal.config -import org.apache.spark.util.ManualClock - -class BlacklistTrackerSuite extends SparkFunSuite with BeforeAndAfterEach with MockitoSugar - with LocalSparkContext { - - private val clock = new ManualClock(0) - - private var blacklist: BlacklistTracker = _ - private var listenerBusMock: LiveListenerBus = _ - private var scheduler: TaskSchedulerImpl = _ - private var conf: SparkConf = _ - - override def beforeEach(): Unit = { - conf = new SparkConf().setAppName("test").setMaster("local") - .set(config.BLACKLIST_ENABLED.key, "true") - scheduler = mockTaskSchedWithConf(conf) - - clock.setTime(0) - - listenerBusMock = mock[LiveListenerBus] - blacklist = new BlacklistTracker(listenerBusMock, conf, None, clock) - } - - override def afterEach(): Unit = { - if (blacklist != null) { - blacklist = null - } - if (scheduler != null) { - scheduler.stop() - scheduler = null - } - super.afterEach() - } - - // All executors and hosts used in tests should be in this set, so that [[assertEquivalentToSet]] - // works. Its OK if its got extraneous entries - val allExecutorAndHostIds = { - (('A' to 'Z')++ (1 to 100).map(_.toString)) - .flatMap{ suffix => - Seq(s"host$suffix", s"host-$suffix") - } - }.toSet - - /** - * Its easier to write our tests as if we could directly look at the sets of nodes & executors in - * the blacklist. However the api doesn't expose a set, so this is a simple way to test - * something similar, since we know the universe of values that might appear in these sets. - */ - def assertEquivalentToSet(f: String => Boolean, expected: Set[String]): Unit = { - allExecutorAndHostIds.foreach { id => - val actual = f(id) - val exp = expected.contains(id) - assert(actual === exp, raw"""for string "$id" """) - } - } - - def mockTaskSchedWithConf(conf: SparkConf): TaskSchedulerImpl = { - sc = new SparkContext(conf) - val scheduler = mock[TaskSchedulerImpl] - when(scheduler.sc).thenReturn(sc) - when(scheduler.mapOutputTracker).thenReturn( - SparkEnv.get.mapOutputTracker.asInstanceOf[MapOutputTrackerMaster]) - scheduler - } - - def createTaskSetBlacklist(stageId: Int = 0): TaskSetBlacklist = { - new TaskSetBlacklist(listenerBusMock, conf, stageId, stageAttemptId = 0, clock = clock) - } - - test("executors can be blacklisted with only a few failures per stage") { - // For many different stages, executor 1 fails a task, then executor 2 succeeds the task, - // and then the task set is done. Not enough failures to blacklist the executor *within* - // any particular taskset, but we still blacklist the executor overall eventually. - // Also, we intentionally have a mix of task successes and failures -- there are even some - // successes after the executor is blacklisted. The idea here is those tasks get scheduled - // before the executor is blacklisted. We might get successes after blacklisting (because the - // executor might be flaky but not totally broken). But successes should not unblacklist the - // executor. - val failuresUntilBlacklisted = conf.get(config.MAX_FAILURES_PER_EXEC) - var failuresSoFar = 0 - (0 until failuresUntilBlacklisted * 10).foreach { stageId => - val taskSetBlacklist = createTaskSetBlacklist(stageId) - if (stageId % 2 == 0) { - // fail one task in every other taskset - taskSetBlacklist.updateBlacklistForFailedTask( - "hostA", exec = "1", index = 0, failureReason = "testing") - failuresSoFar += 1 - } - blacklist.updateBlacklistForSuccessfulTaskSet(stageId, 0, taskSetBlacklist.execToFailures) - assert(failuresSoFar == stageId / 2 + 1) - if (failuresSoFar < failuresUntilBlacklisted) { - assertEquivalentToSet(blacklist.isExecutorBlacklisted(_), Set()) - } else { - assertEquivalentToSet(blacklist.isExecutorBlacklisted(_), Set("1")) - verify(listenerBusMock).post( - SparkListenerExecutorBlacklisted(0, "1", failuresUntilBlacklisted)) - } - } - } - - // If an executor has many task failures, but the task set ends up failing, it shouldn't be - // counted against the executor. - test("executors aren't blacklisted as a result of tasks in failed task sets") { - val failuresUntilBlacklisted = conf.get(config.MAX_FAILURES_PER_EXEC) - // for many different stages, executor 1 fails a task, and then the taskSet fails. - (0 until failuresUntilBlacklisted * 10).foreach { stage => - val taskSetBlacklist = createTaskSetBlacklist(stage) - taskSetBlacklist.updateBlacklistForFailedTask( - "hostA", exec = "1", index = 0, failureReason = "testing") - } - assertEquivalentToSet(blacklist.isExecutorBlacklisted(_), Set()) - } - - Seq(true, false).foreach { succeedTaskSet => - val label = if (succeedTaskSet) "success" else "failure" - test(s"stage blacklist updates correctly on stage $label") { - // Within one taskset, an executor fails a few times, so it's blacklisted for the taskset. - // But if the taskset fails, we shouldn't blacklist the executor after the stage. - val taskSetBlacklist = createTaskSetBlacklist(0) - // We trigger enough failures for both the taskset blacklist, and the application blacklist. - val numFailures = math.max(conf.get(config.MAX_FAILURES_PER_EXEC), - conf.get(config.MAX_FAILURES_PER_EXEC_STAGE)) - (0 until numFailures).foreach { index => - taskSetBlacklist.updateBlacklistForFailedTask( - "hostA", exec = "1", index = index, failureReason = "testing") - } - assert(taskSetBlacklist.isExecutorBlacklistedForTaskSet("1")) - assertEquivalentToSet(blacklist.isExecutorBlacklisted(_), Set()) - if (succeedTaskSet) { - // The task set succeeded elsewhere, so we should count those failures against our executor, - // and it should be blacklisted for the entire application. - blacklist.updateBlacklistForSuccessfulTaskSet(0, 0, taskSetBlacklist.execToFailures) - assertEquivalentToSet(blacklist.isExecutorBlacklisted(_), Set("1")) - verify(listenerBusMock).post(SparkListenerExecutorBlacklisted(0, "1", numFailures)) - } else { - // The task set failed, so we don't count these failures against the executor for other - // stages. - assertEquivalentToSet(blacklist.isExecutorBlacklisted(_), Set()) - } - } - } - - test("blacklisted executors and nodes get recovered with time") { - val taskSetBlacklist0 = createTaskSetBlacklist(stageId = 0) - // Fail 4 tasks in one task set on executor 1, so that executor gets blacklisted for the whole - // application. - (0 until 4).foreach { partition => - taskSetBlacklist0.updateBlacklistForFailedTask( - "hostA", exec = "1", index = partition, failureReason = "testing") - } - blacklist.updateBlacklistForSuccessfulTaskSet(0, 0, taskSetBlacklist0.execToFailures) - assert(blacklist.nodeBlacklist() === Set()) - assertEquivalentToSet(blacklist.isNodeBlacklisted(_), Set()) - assertEquivalentToSet(blacklist.isExecutorBlacklisted(_), Set("1")) - verify(listenerBusMock).post(SparkListenerExecutorBlacklisted(0, "1", 4)) - - val taskSetBlacklist1 = createTaskSetBlacklist(stageId = 1) - // Fail 4 tasks in one task set on executor 2, so that executor gets blacklisted for the whole - // application. Since that's the second executor that is blacklisted on the same node, we also - // blacklist that node. - (0 until 4).foreach { partition => - taskSetBlacklist1.updateBlacklistForFailedTask( - "hostA", exec = "2", index = partition, failureReason = "testing") - } - blacklist.updateBlacklistForSuccessfulTaskSet(0, 0, taskSetBlacklist1.execToFailures) - assert(blacklist.nodeBlacklist() === Set("hostA")) - assertEquivalentToSet(blacklist.isNodeBlacklisted(_), Set("hostA")) - verify(listenerBusMock).post(SparkListenerNodeBlacklisted(0, "hostA", 2)) - assertEquivalentToSet(blacklist.isExecutorBlacklisted(_), Set("1", "2")) - verify(listenerBusMock).post(SparkListenerExecutorBlacklisted(0, "2", 4)) - - // Advance the clock and then make sure hostA and executors 1 and 2 have been removed from the - // blacklist. - val timeout = blacklist.BLACKLIST_TIMEOUT_MILLIS + 1 - clock.advance(timeout) - blacklist.applyBlacklistTimeout() - assert(blacklist.nodeBlacklist() === Set()) - assertEquivalentToSet(blacklist.isNodeBlacklisted(_), Set()) - assertEquivalentToSet(blacklist.isExecutorBlacklisted(_), Set()) - verify(listenerBusMock).post(SparkListenerExecutorUnblacklisted(timeout, "2")) - verify(listenerBusMock).post(SparkListenerExecutorUnblacklisted(timeout, "1")) - verify(listenerBusMock).post(SparkListenerNodeUnblacklisted(timeout, "hostA")) - - // Fail one more task, but executor isn't put back into blacklist since the count of failures - // on that executor should have been reset to 0. - val taskSetBlacklist2 = createTaskSetBlacklist(stageId = 2) - taskSetBlacklist2.updateBlacklistForFailedTask( - "hostA", exec = "1", index = 0, failureReason = "testing") - blacklist.updateBlacklistForSuccessfulTaskSet(2, 0, taskSetBlacklist2.execToFailures) - assert(blacklist.nodeBlacklist() === Set()) - assertEquivalentToSet(blacklist.isNodeBlacklisted(_), Set()) - assertEquivalentToSet(blacklist.isExecutorBlacklisted(_), Set()) - } - - test("blacklist can handle lost executors") { - // The blacklist should still work if an executor is killed completely. We should still - // be able to blacklist the entire node. - val taskSetBlacklist0 = createTaskSetBlacklist(stageId = 0) - // Lets say that executor 1 dies completely. We get some task failures, but - // the taskset then finishes successfully (elsewhere). - (0 until 4).foreach { partition => - taskSetBlacklist0.updateBlacklistForFailedTask( - "hostA", exec = "1", index = partition, failureReason = "testing") - } - blacklist.handleRemovedExecutor("1") - blacklist.updateBlacklistForSuccessfulTaskSet( - stageId = 0, - stageAttemptId = 0, - taskSetBlacklist0.execToFailures) - assert(blacklist.isExecutorBlacklisted("1")) - verify(listenerBusMock).post(SparkListenerExecutorBlacklisted(0, "1", 4)) - val t1 = blacklist.BLACKLIST_TIMEOUT_MILLIS / 2 - clock.advance(t1) - - // Now another executor gets spun up on that host, but it also dies. - val taskSetBlacklist1 = createTaskSetBlacklist(stageId = 1) - (0 until 4).foreach { partition => - taskSetBlacklist1.updateBlacklistForFailedTask( - "hostA", exec = "2", index = partition, failureReason = "testing") - } - blacklist.handleRemovedExecutor("2") - blacklist.updateBlacklistForSuccessfulTaskSet( - stageId = 1, - stageAttemptId = 0, - taskSetBlacklist1.execToFailures) - // We've now had two bad executors on the hostA, so we should blacklist the entire node. - assert(blacklist.isExecutorBlacklisted("1")) - assert(blacklist.isExecutorBlacklisted("2")) - verify(listenerBusMock).post(SparkListenerExecutorBlacklisted(t1, "2", 4)) - assert(blacklist.isNodeBlacklisted("hostA")) - verify(listenerBusMock).post(SparkListenerNodeBlacklisted(t1, "hostA", 2)) - - // Advance the clock so that executor 1 should no longer be explicitly blacklisted, but - // everything else should still be blacklisted. - val t2 = blacklist.BLACKLIST_TIMEOUT_MILLIS / 2 + 1 - clock.advance(t2) - blacklist.applyBlacklistTimeout() - assert(!blacklist.isExecutorBlacklisted("1")) - verify(listenerBusMock).post(SparkListenerExecutorUnblacklisted(t1 + t2, "1")) - assert(blacklist.isExecutorBlacklisted("2")) - assert(blacklist.isNodeBlacklisted("hostA")) - // make sure we don't leak memory - assert(!blacklist.executorIdToBlacklistStatus.contains("1")) - assert(!blacklist.nodeToBlacklistedExecs("hostA").contains("1")) - // Advance the timeout again so now hostA should be removed from the blacklist. - clock.advance(t1) - blacklist.applyBlacklistTimeout() - assert(!blacklist.nodeIdToBlacklistExpiryTime.contains("hostA")) - verify(listenerBusMock).post(SparkListenerNodeUnblacklisted(t1 + t2 + t1, "hostA")) - // Even though unblacklisting a node implicitly unblacklists all of its executors, - // there will be no SparkListenerExecutorUnblacklisted sent here. - } - - test("task failures expire with time") { - // Verifies that 2 failures within the timeout period cause an executor to be blacklisted, but - // if task failures are spaced out by more than the timeout period, the first failure is timed - // out, and the executor isn't blacklisted. - var stageId = 0 - - def failOneTaskInTaskSet(exec: String): Unit = { - val taskSetBlacklist = createTaskSetBlacklist(stageId = stageId) - taskSetBlacklist.updateBlacklistForFailedTask("host-" + exec, exec, 0, "testing") - blacklist.updateBlacklistForSuccessfulTaskSet(stageId, 0, taskSetBlacklist.execToFailures) - stageId += 1 - } - - failOneTaskInTaskSet(exec = "1") - // We have one sporadic failure on exec 2, but that's it. Later checks ensure that we never - // blacklist executor 2 despite this one failure. - failOneTaskInTaskSet(exec = "2") - assertEquivalentToSet(blacklist.isExecutorBlacklisted(_), Set()) - assert(blacklist.nextExpiryTime === Long.MaxValue) - - // We advance the clock past the expiry time. - clock.advance(blacklist.BLACKLIST_TIMEOUT_MILLIS + 1) - val t0 = clock.getTimeMillis() - blacklist.applyBlacklistTimeout() - assert(blacklist.nextExpiryTime === Long.MaxValue) - failOneTaskInTaskSet(exec = "1") - - // Because the 2nd failure on executor 1 happened past the expiry time, nothing should have been - // blacklisted. - assertEquivalentToSet(blacklist.isExecutorBlacklisted(_), Set()) - - // Now we add one more failure, within the timeout, and it should be counted. - clock.setTime(t0 + blacklist.BLACKLIST_TIMEOUT_MILLIS - 1) - val t1 = clock.getTimeMillis() - failOneTaskInTaskSet(exec = "1") - blacklist.applyBlacklistTimeout() - assertEquivalentToSet(blacklist.isExecutorBlacklisted(_), Set("1")) - verify(listenerBusMock).post(SparkListenerExecutorBlacklisted(t1, "1", 2)) - assert(blacklist.nextExpiryTime === t1 + blacklist.BLACKLIST_TIMEOUT_MILLIS) - - // Add failures on executor 3, make sure it gets put on the blacklist. - clock.setTime(t1 + blacklist.BLACKLIST_TIMEOUT_MILLIS - 1) - val t2 = clock.getTimeMillis() - failOneTaskInTaskSet(exec = "3") - failOneTaskInTaskSet(exec = "3") - blacklist.applyBlacklistTimeout() - assertEquivalentToSet(blacklist.isExecutorBlacklisted(_), Set("1", "3")) - verify(listenerBusMock).post(SparkListenerExecutorBlacklisted(t2, "3", 2)) - assert(blacklist.nextExpiryTime === t1 + blacklist.BLACKLIST_TIMEOUT_MILLIS) - - // Now we go past the timeout for executor 1, so it should be dropped from the blacklist. - clock.setTime(t1 + blacklist.BLACKLIST_TIMEOUT_MILLIS + 1) - blacklist.applyBlacklistTimeout() - assertEquivalentToSet(blacklist.isExecutorBlacklisted(_), Set("3")) - verify(listenerBusMock).post(SparkListenerExecutorUnblacklisted(clock.getTimeMillis(), "1")) - assert(blacklist.nextExpiryTime === t2 + blacklist.BLACKLIST_TIMEOUT_MILLIS) - - // Make sure that we update correctly when we go from having blacklisted executors to - // just having tasks with timeouts. - clock.setTime(t2 + blacklist.BLACKLIST_TIMEOUT_MILLIS - 1) - failOneTaskInTaskSet(exec = "4") - blacklist.applyBlacklistTimeout() - assertEquivalentToSet(blacklist.isExecutorBlacklisted(_), Set("3")) - assert(blacklist.nextExpiryTime === t2 + blacklist.BLACKLIST_TIMEOUT_MILLIS) - - clock.setTime(t2 + blacklist.BLACKLIST_TIMEOUT_MILLIS + 1) - blacklist.applyBlacklistTimeout() - assertEquivalentToSet(blacklist.isExecutorBlacklisted(_), Set()) - verify(listenerBusMock).post(SparkListenerExecutorUnblacklisted(clock.getTimeMillis(), "3")) - // we've got one task failure still, but we don't bother setting nextExpiryTime to it, to - // avoid wasting time checking for expiry of individual task failures. - assert(blacklist.nextExpiryTime === Long.MaxValue) - } - - test("task failure timeout works as expected for long-running tasksets") { - // This ensures that we don't trigger spurious blacklisting for long tasksets, when the taskset - // finishes long after the task failures. We create two tasksets, each with one failure. - // Individually they shouldn't cause any blacklisting since there is only one failure. - // Furthermore, we space the failures out so far that even when both tasksets have completed, - // we still don't trigger any blacklisting. - val taskSetBlacklist1 = createTaskSetBlacklist(stageId = 1) - val taskSetBlacklist2 = createTaskSetBlacklist(stageId = 2) - // Taskset1 has one failure immediately - taskSetBlacklist1.updateBlacklistForFailedTask("host-1", "1", 0, "testing") - // Then we have a *long* delay, much longer than the timeout, before any other failures or - // taskset completion - clock.advance(blacklist.BLACKLIST_TIMEOUT_MILLIS * 5) - // After the long delay, we have one failure on taskset 2, on the same executor - taskSetBlacklist2.updateBlacklistForFailedTask("host-1", "1", 0, "testing") - // Finally, we complete both tasksets. Its important here to complete taskset2 *first*. We - // want to make sure that when taskset 1 finishes, even though we've now got two task failures, - // we realize that the task failure we just added was well before the timeout. - clock.advance(1) - blacklist.updateBlacklistForSuccessfulTaskSet(stageId = 2, 0, taskSetBlacklist2.execToFailures) - clock.advance(1) - blacklist.updateBlacklistForSuccessfulTaskSet(stageId = 1, 0, taskSetBlacklist1.execToFailures) - - // Make sure nothing was blacklisted - assertEquivalentToSet(blacklist.isExecutorBlacklisted(_), Set()) - } - - test("only blacklist nodes for the application when enough executors have failed on that " + - "specific host") { - // we blacklist executors on two different hosts -- make sure that doesn't lead to any - // node blacklisting - val taskSetBlacklist0 = createTaskSetBlacklist(stageId = 0) - taskSetBlacklist0.updateBlacklistForFailedTask( - "hostA", exec = "1", index = 0, failureReason = "testing") - taskSetBlacklist0.updateBlacklistForFailedTask( - "hostA", exec = "1", index = 1, failureReason = "testing") - blacklist.updateBlacklistForSuccessfulTaskSet(0, 0, taskSetBlacklist0.execToFailures) - assertEquivalentToSet(blacklist.isExecutorBlacklisted(_), Set("1")) - verify(listenerBusMock).post(SparkListenerExecutorBlacklisted(0, "1", 2)) - assertEquivalentToSet(blacklist.isNodeBlacklisted(_), Set()) - - val taskSetBlacklist1 = createTaskSetBlacklist(stageId = 1) - taskSetBlacklist1.updateBlacklistForFailedTask( - "hostB", exec = "2", index = 0, failureReason = "testing") - taskSetBlacklist1.updateBlacklistForFailedTask( - "hostB", exec = "2", index = 1, failureReason = "testing") - blacklist.updateBlacklistForSuccessfulTaskSet(1, 0, taskSetBlacklist1.execToFailures) - assertEquivalentToSet(blacklist.isExecutorBlacklisted(_), Set("1", "2")) - verify(listenerBusMock).post(SparkListenerExecutorBlacklisted(0, "2", 2)) - assertEquivalentToSet(blacklist.isNodeBlacklisted(_), Set()) - - // Finally, blacklist another executor on the same node as the original blacklisted executor, - // and make sure this time we *do* blacklist the node. - val taskSetBlacklist2 = createTaskSetBlacklist(stageId = 0) - taskSetBlacklist2.updateBlacklistForFailedTask( - "hostA", exec = "3", index = 0, failureReason = "testing") - taskSetBlacklist2.updateBlacklistForFailedTask( - "hostA", exec = "3", index = 1, failureReason = "testing") - blacklist.updateBlacklistForSuccessfulTaskSet(0, 0, taskSetBlacklist2.execToFailures) - assertEquivalentToSet(blacklist.isExecutorBlacklisted(_), Set("1", "2", "3")) - verify(listenerBusMock).post(SparkListenerExecutorBlacklisted(0, "3", 2)) - assertEquivalentToSet(blacklist.isNodeBlacklisted(_), Set("hostA")) - verify(listenerBusMock).post(SparkListenerNodeBlacklisted(0, "hostA", 2)) - } - - test("blacklist still respects legacy configs") { - val conf = new SparkConf().setMaster("local") - assert(!BlacklistTracker.isBlacklistEnabled(conf)) - conf.set(config.BLACKLIST_LEGACY_TIMEOUT_CONF, 5000L) - assert(BlacklistTracker.isBlacklistEnabled(conf)) - assert(5000 === BlacklistTracker.getBlacklistTimeout(conf)) - // the new conf takes precedence, though - conf.set(config.BLACKLIST_TIMEOUT_CONF, 1000L) - assert(1000 === BlacklistTracker.getBlacklistTimeout(conf)) - - // if you explicitly set the legacy conf to 0, that also would disable blacklisting - conf.set(config.BLACKLIST_LEGACY_TIMEOUT_CONF, 0L) - assert(!BlacklistTracker.isBlacklistEnabled(conf)) - // but again, the new conf takes precedence - conf.set(config.BLACKLIST_ENABLED, true) - assert(BlacklistTracker.isBlacklistEnabled(conf)) - assert(1000 === BlacklistTracker.getBlacklistTimeout(conf)) - } - - test("check blacklist configuration invariants") { - val conf = new SparkConf().setMaster("yarn").set(config.SUBMIT_DEPLOY_MODE, "cluster") - Seq( - (2, 2), - (2, 3) - ).foreach { case (maxTaskFailures, maxNodeAttempts) => - conf.set(config.TASK_MAX_FAILURES, maxTaskFailures) - conf.set(config.MAX_TASK_ATTEMPTS_PER_NODE.key, maxNodeAttempts.toString) - val excMsg = intercept[IllegalArgumentException] { - BlacklistTracker.validateBlacklistConfs(conf) - }.getMessage() - assert(excMsg === s"${config.MAX_TASK_ATTEMPTS_PER_NODE.key} " + - s"( = ${maxNodeAttempts}) was >= ${config.TASK_MAX_FAILURES.key} " + - s"( = ${maxTaskFailures} ). Though blacklisting is enabled, with this configuration, " + - s"Spark will not be robust to one bad node. Decrease " + - s"${config.MAX_TASK_ATTEMPTS_PER_NODE.key}, increase ${config.TASK_MAX_FAILURES.key}, " + - s"or disable blacklisting with ${config.BLACKLIST_ENABLED.key}") - } - - conf.remove(config.TASK_MAX_FAILURES) - conf.remove(config.MAX_TASK_ATTEMPTS_PER_NODE) - - Seq( - config.MAX_TASK_ATTEMPTS_PER_EXECUTOR, - config.MAX_TASK_ATTEMPTS_PER_NODE, - config.MAX_FAILURES_PER_EXEC_STAGE, - config.MAX_FAILED_EXEC_PER_NODE_STAGE, - config.MAX_FAILURES_PER_EXEC, - config.MAX_FAILED_EXEC_PER_NODE, - config.BLACKLIST_TIMEOUT_CONF - ).foreach { config => - conf.set(config.key, "0") - val excMsg = intercept[IllegalArgumentException] { - BlacklistTracker.validateBlacklistConfs(conf) - }.getMessage() - assert(excMsg.contains(s"${config.key} was 0, but must be > 0.")) - conf.remove(config) - } - } - - test("blacklisting kills executors, configured by BLACKLIST_KILL_ENABLED") { - val allocationClientMock = mock[ExecutorAllocationClient] - when(allocationClientMock.killExecutors(any(), any(), any(), any())).thenReturn(Seq("called")) - when(allocationClientMock.killExecutorsOnHost("hostA")).thenAnswer { (_: InvocationOnMock) => - // To avoid a race between blacklisting and killing, it is important that the nodeBlacklist - // is updated before we ask the executor allocation client to kill all the executors - // on a particular host. - if (blacklist.nodeBlacklist.contains("hostA")) { - true - } else { - throw new IllegalStateException("hostA should be on the blacklist") - } - } - blacklist = new BlacklistTracker(listenerBusMock, conf, Some(allocationClientMock), clock) - - // Disable auto-kill. Blacklist an executor and make sure killExecutors is not called. - conf.set(config.BLACKLIST_KILL_ENABLED, false) - - val taskSetBlacklist0 = createTaskSetBlacklist(stageId = 0) - // Fail 4 tasks in one task set on executor 1, so that executor gets blacklisted for the whole - // application. - (0 until 4).foreach { partition => - taskSetBlacklist0.updateBlacklistForFailedTask( - "hostA", exec = "1", index = partition, failureReason = "testing") - } - blacklist.updateBlacklistForSuccessfulTaskSet(0, 0, taskSetBlacklist0.execToFailures) - - verify(allocationClientMock, never).killExecutor(any()) - - val taskSetBlacklist1 = createTaskSetBlacklist(stageId = 1) - // Fail 4 tasks in one task set on executor 2, so that executor gets blacklisted for the whole - // application. Since that's the second executor that is blacklisted on the same node, we also - // blacklist that node. - (0 until 4).foreach { partition => - taskSetBlacklist1.updateBlacklistForFailedTask( - "hostA", exec = "2", index = partition, failureReason = "testing") - } - blacklist.updateBlacklistForSuccessfulTaskSet(0, 0, taskSetBlacklist1.execToFailures) - - verify(allocationClientMock, never).killExecutors(any(), any(), any(), any()) - verify(allocationClientMock, never).killExecutorsOnHost(any()) - - // Enable auto-kill. Blacklist an executor and make sure killExecutors is called. - conf.set(config.BLACKLIST_KILL_ENABLED, true) - blacklist = new BlacklistTracker(listenerBusMock, conf, Some(allocationClientMock), clock) - - val taskSetBlacklist2 = createTaskSetBlacklist(stageId = 0) - // Fail 4 tasks in one task set on executor 1, so that executor gets blacklisted for the whole - // application. - (0 until 4).foreach { partition => - taskSetBlacklist2.updateBlacklistForFailedTask( - "hostA", exec = "1", index = partition, failureReason = "testing") - } - blacklist.updateBlacklistForSuccessfulTaskSet(0, 0, taskSetBlacklist2.execToFailures) - - verify(allocationClientMock).killExecutors(Seq("1"), false, false, true) - - val taskSetBlacklist3 = createTaskSetBlacklist(stageId = 1) - // Fail 4 tasks in one task set on executor 2, so that executor gets blacklisted for the whole - // application. Since that's the second executor that is blacklisted on the same node, we also - // blacklist that node. - (0 until 4).foreach { partition => - taskSetBlacklist3.updateBlacklistForFailedTask( - "hostA", exec = "2", index = partition, failureReason = "testing") - } - blacklist.updateBlacklistForSuccessfulTaskSet(0, 0, taskSetBlacklist3.execToFailures) - - verify(allocationClientMock).killExecutors(Seq("2"), false, false, true) - verify(allocationClientMock).killExecutorsOnHost("hostA") - } - - test("fetch failure blacklisting kills executors, configured by BLACKLIST_KILL_ENABLED") { - val allocationClientMock = mock[ExecutorAllocationClient] - when(allocationClientMock.killExecutors(any(), any(), any(), any())).thenReturn(Seq("called")) - when(allocationClientMock.killExecutorsOnHost("hostA")).thenAnswer { (_: InvocationOnMock) => - // To avoid a race between blacklisting and killing, it is important that the nodeBlacklist - // is updated before we ask the executor allocation client to kill all the executors - // on a particular host. - if (blacklist.nodeBlacklist.contains("hostA")) { - true - } else { - throw new IllegalStateException("hostA should be on the blacklist") - } - } - - conf.set(config.BLACKLIST_FETCH_FAILURE_ENABLED, true) - blacklist = new BlacklistTracker(listenerBusMock, conf, Some(allocationClientMock), clock) - - // Disable auto-kill. Blacklist an executor and make sure killExecutors is not called. - conf.set(config.BLACKLIST_KILL_ENABLED, false) - blacklist.updateBlacklistForFetchFailure("hostA", exec = "1") - - verify(allocationClientMock, never).killExecutors(any(), any(), any(), any()) - verify(allocationClientMock, never).killExecutorsOnHost(any()) - - assert(blacklist.nodeToBlacklistedExecs.contains("hostA")) - assert(blacklist.nodeToBlacklistedExecs("hostA").contains("1")) - - // Enable auto-kill. Blacklist an executor and make sure killExecutors is called. - conf.set(config.BLACKLIST_KILL_ENABLED, true) - blacklist = new BlacklistTracker(listenerBusMock, conf, Some(allocationClientMock), clock) - clock.advance(1000) - blacklist.updateBlacklistForFetchFailure("hostA", exec = "1") - - verify(allocationClientMock).killExecutors(Seq("1"), false, false, true) - verify(allocationClientMock, never).killExecutorsOnHost(any()) - - assert(blacklist.executorIdToBlacklistStatus.contains("1")) - assert(blacklist.executorIdToBlacklistStatus("1").node === "hostA") - assert(blacklist.executorIdToBlacklistStatus("1").expiryTime === - 1000 + blacklist.BLACKLIST_TIMEOUT_MILLIS) - assert(blacklist.nextExpiryTime === 1000 + blacklist.BLACKLIST_TIMEOUT_MILLIS) - assert(blacklist.nodeIdToBlacklistExpiryTime.isEmpty) - assert(blacklist.nodeToBlacklistedExecs.contains("hostA")) - assert(blacklist.nodeToBlacklistedExecs("hostA").contains("1")) - - // Enable external shuffle service to see if all the executors on this node will be killed. - conf.set(config.SHUFFLE_SERVICE_ENABLED, true) - clock.advance(1000) - blacklist.updateBlacklistForFetchFailure("hostA", exec = "2") - - verify(allocationClientMock, never).killExecutors(Seq("2"), true, true) - verify(allocationClientMock).killExecutorsOnHost("hostA") - - assert(blacklist.nodeIdToBlacklistExpiryTime.contains("hostA")) - assert(blacklist.nodeIdToBlacklistExpiryTime("hostA") === - 2000 + blacklist.BLACKLIST_TIMEOUT_MILLIS) - assert(blacklist.nextExpiryTime === 1000 + blacklist.BLACKLIST_TIMEOUT_MILLIS) - } -} diff --git a/core/src/test/scala/org/apache/spark/scheduler/CoarseGrainedSchedulerBackendSuite.scala b/core/src/test/scala/org/apache/spark/scheduler/CoarseGrainedSchedulerBackendSuite.scala index d648293fdbe06..7a74dd877a042 100644 --- a/core/src/test/scala/org/apache/spark/scheduler/CoarseGrainedSchedulerBackendSuite.scala +++ b/core/src/test/scala/org/apache/spark/scheduler/CoarseGrainedSchedulerBackendSuite.scala @@ -188,7 +188,6 @@ class CoarseGrainedSchedulerBackendSuite extends SparkFunSuite with LocalSparkCo } test("extra resources from executor") { - import TestUtils._ val execCores = 3 val conf = new SparkConf() @@ -245,7 +244,8 @@ class CoarseGrainedSchedulerBackendSuite extends SparkFunSuite with LocalSparkCo val taskResources = Map(GPU -> new ResourceInformation(GPU, Array("0"))) var taskDescs: Seq[Seq[TaskDescription]] = Seq(Seq(new TaskDescription(1, 0, "1", - "t1", 0, 1, mutable.Map.empty[String, Long], mutable.Map.empty[String, Long], + "t1", 0, 1, mutable.Map.empty[String, Long], + mutable.Map.empty[String, Long], mutable.Map.empty[String, Long], new Properties(), taskResources, bytebuffer))) val ts = backend.getTaskSchedulerImpl() when(ts.resourceOffers(any[IndexedSeq[WorkerOffer]], any[Boolean])).thenReturn(taskDescs) @@ -300,7 +300,7 @@ private class CSMockExternalClusterManager extends ExternalClusterManager { when(ts.applicationId()).thenReturn("appid1") when(ts.applicationAttemptId()).thenReturn(Some("attempt1")) when(ts.schedulingMode).thenReturn(SchedulingMode.FIFO) - when(ts.nodeBlacklist()).thenReturn(Set.empty[String]) + when(ts.excludedNodes()).thenReturn(Set.empty[String]) ts } diff --git a/core/src/test/scala/org/apache/spark/scheduler/DAGSchedulerSuite.scala b/core/src/test/scala/org/apache/spark/scheduler/DAGSchedulerSuite.scala index 436765808e22b..194e0dfe312d5 100644 --- a/core/src/test/scala/org/apache/spark/scheduler/DAGSchedulerSuite.scala +++ b/core/src/test/scala/org/apache/spark/scheduler/DAGSchedulerSuite.scala @@ -19,7 +19,7 @@ package org.apache.spark.scheduler import java.util.Properties import java.util.concurrent.{CountDownLatch, TimeUnit} -import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger, AtomicLong, AtomicReference} +import java.util.concurrent.atomic.{AtomicBoolean, AtomicLong, AtomicReference} import scala.annotation.meta.param import scala.collection.mutable.{ArrayBuffer, HashMap, HashSet, Map} @@ -42,7 +42,7 @@ import org.apache.spark.resource.ResourceUtils.{FPGA, GPU} import org.apache.spark.scheduler.SchedulingMode.SchedulingMode import org.apache.spark.shuffle.{FetchFailedException, MetadataFetchFailedException} import org.apache.spark.storage.{BlockId, BlockManagerId, BlockManagerMaster} -import org.apache.spark.util.{AccumulatorContext, AccumulatorV2, CallSite, LongAccumulator, ThreadUtils, Utils} +import org.apache.spark.util.{AccumulatorContext, AccumulatorV2, CallSite, LongAccumulator, Utils} class DAGSchedulerEventProcessLoopTester(dagScheduler: DAGScheduler) extends DAGSchedulerEventProcessLoop(dagScheduler) { @@ -125,14 +125,14 @@ class MyRDD( class DAGSchedulerSuiteDummyException extends Exception -class DAGSchedulerSuite extends SparkFunSuite with LocalSparkContext with TimeLimits { +class DAGSchedulerSuite extends SparkFunSuite with TempLocalSparkContext with TimeLimits { import DAGSchedulerSuite._ // Necessary to make ScalaTest 3.x interrupt a thread on the JVM like ScalaTest 2.2.x implicit val defaultSignaler: Signaler = ThreadSignaler - val conf = new SparkConf + private var firstInit: Boolean = _ /** Set of TaskSets the DAGScheduler has requested executed. */ val taskSets = scala.collection.mutable.Buffer[TaskSet]() @@ -297,11 +297,19 @@ class DAGSchedulerSuite extends SparkFunSuite with LocalSparkContext with TimeLi override def beforeEach(): Unit = { super.beforeEach() - init(new SparkConf()) + firstInit = true } - private def init(testConf: SparkConf): Unit = { - sc = new SparkContext("local[2]", "DAGSchedulerSuite", testConf) + override def sc: SparkContext = { + val sc = super.sc + if (firstInit) { + init(sc) + firstInit = false + } + sc + } + + private def init(sc: SparkContext): Unit = { sparkListener = new EventInfoRecordingListener failure = null sc.addSparkListener(sparkListener) @@ -310,10 +318,10 @@ class DAGSchedulerSuite extends SparkFunSuite with LocalSparkContext with TimeLi cancelledStages.clear() cacheLocations.clear() results.clear() - securityMgr = new SecurityManager(conf) - broadcastManager = new BroadcastManager(true, conf, securityMgr) - mapOutputTracker = spy(new MyMapOutputTrackerMaster(conf, broadcastManager)) - blockManagerMaster = spy(new MyBlockManagerMaster(conf)) + securityMgr = new SecurityManager(sc.getConf) + broadcastManager = new BroadcastManager(true, sc.getConf, securityMgr) + mapOutputTracker = spy(new MyMapOutputTrackerMaster(sc.getConf, broadcastManager)) + blockManagerMaster = spy(new MyBlockManagerMaster(sc.getConf)) scheduler = new DAGScheduler( sc, taskScheduler, @@ -353,6 +361,8 @@ class DAGSchedulerSuite extends SparkFunSuite with LocalSparkContext with TimeLi * DAGScheduler event loop. */ private def runEvent(event: DAGSchedulerEvent): Unit = { + // Ensure the initialization of various components + sc dagEventProcessLoopTester.post(event) } @@ -491,12 +501,8 @@ class DAGSchedulerSuite extends SparkFunSuite with LocalSparkContext with TimeLi } test("All shuffle files on the storage endpoint should be cleaned up when it is lost") { - // reset the test context with the right shuffle service config - afterEach() - val conf = new SparkConf() conf.set(config.SHUFFLE_SERVICE_ENABLED.key, "true") conf.set("spark.files.fetchFailure.unRegisterOutputOnHost", "true") - init(conf) runEvent(ExecutorAdded("hostA-exec1", "hostA")) runEvent(ExecutorAdded("hostA-exec2", "hostA")) runEvent(ExecutorAdded("hostB-exec", "hostB")) @@ -565,11 +571,7 @@ class DAGSchedulerSuite extends SparkFunSuite with LocalSparkContext with TimeLi } test("SPARK-32003: All shuffle files for executor should be cleaned up on fetch failure") { - // reset the test context with the right shuffle service config - afterEach() - val conf = new SparkConf() conf.set(config.SHUFFLE_SERVICE_ENABLED.key, "true") - init(conf) val shuffleMapRdd = new MyRDD(sc, 3, Nil) val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(3)) @@ -861,11 +863,7 @@ class DAGSchedulerSuite extends SparkFunSuite with LocalSparkContext with TimeLi "not lost" } test(s"shuffle files $maybeLost when $eventDescription") { - // reset the test context with the right shuffle service config - afterEach() - val conf = new SparkConf() conf.set(config.SHUFFLE_SERVICE_ENABLED.key, shuffleServiceOn.toString) - init(conf) assert(sc.env.blockManager.externalShuffleServiceEnabled == shuffleServiceOn) val shuffleMapRdd = new MyRDD(sc, 2, Nil) @@ -2571,7 +2569,7 @@ class DAGSchedulerSuite extends SparkFunSuite with LocalSparkContext with TimeLi val newTaskSet = taskSets(1) // 2 tasks should have been re-submitted, for tasks 0 and 1 (which ran on hostA). assert(newTaskSet.tasks.size === 2) - // Complete task 0 from the original task set (i.e., not hte one that's currently active). + // Complete task 0 from the original task set (i.e., not the one that's currently active). // This should still be counted towards the job being complete (but there's still one // outstanding task). runEvent(makeCompletionEvent(newTaskSet.tasks(0), Success, makeMapStatus("hostB", 2))) @@ -2888,11 +2886,7 @@ class DAGSchedulerSuite extends SparkFunSuite with LocalSparkContext with TimeLi } test("SPARK-25341: abort stage while using old fetch protocol") { - // reset the test context with using old fetch protocol - afterEach() - val conf = new SparkConf() conf.set(config.SHUFFLE_USE_OLD_FETCH_PROTOCOL.key, "true") - init(conf) // Construct the scenario of indeterminate stage fetch failed. constructIndeterminateStageFetchFailed() // The job should fail because Spark can't rollback the shuffle map stage while @@ -3063,7 +3057,7 @@ class DAGSchedulerSuite extends SparkFunSuite with LocalSparkContext with TimeLi assertResultStageFailToRollback(shuffleMapRdd) } - private def assertResultStageNotRollbacked(mapRdd: MyRDD): Unit = { + private def assertResultStageNotRolledBack(mapRdd: MyRDD): Unit = { val shuffleDep = new ShuffleDependency(mapRdd, new HashPartitioner(2)) val shuffleId = shuffleDep.shuffleId val finalRdd = new MyRDD(sc, 2, List(shuffleDep), tracker = mapOutputTracker) @@ -3103,7 +3097,7 @@ class DAGSchedulerSuite extends SparkFunSuite with LocalSparkContext with TimeLi val shuffleMapRdd = new MyCheckpointRDD(sc, 2, Nil, indeterminate = true) shuffleMapRdd.checkpoint() shuffleMapRdd.doCheckpoint() - assertResultStageNotRollbacked(shuffleMapRdd) + assertResultStageNotRolledBack(shuffleMapRdd) } } @@ -3220,10 +3214,7 @@ class DAGSchedulerSuite extends SparkFunSuite with LocalSparkContext with TimeLi } test("test 2 resource profile with merge conflict config true") { - afterEach() - val conf = new SparkConf() conf.set(config.RESOURCE_PROFILE_MERGE_CONFLICTS.key, "true") - init(conf) val ereqs = new ExecutorResourceRequests().cores(4) val treqs = new TaskResourceRequests().cpus(1) @@ -3241,10 +3232,7 @@ class DAGSchedulerSuite extends SparkFunSuite with LocalSparkContext with TimeLi } test("test multiple resource profiles created from merging use same rp") { - afterEach() - val conf = new SparkConf() conf.set(config.RESOURCE_PROFILE_MERGE_CONFLICTS.key, "true") - init(conf) val ereqs = new ExecutorResourceRequests().cores(4) val treqs = new TaskResourceRequests().cpus(1) @@ -3338,10 +3326,7 @@ class DAGSchedulerSuite extends SparkFunSuite with LocalSparkContext with TimeLi } test("test merge 3 resource profiles") { - afterEach() - val conf = new SparkConf() conf.set(config.RESOURCE_PROFILE_MERGE_CONFLICTS.key, "true") - init(conf) val ereqs = new ExecutorResourceRequests().cores(4) val treqs = new TaskResourceRequests().cpus(1) val rp1 = new ResourceProfile(ereqs.requests, treqs.requests) diff --git a/core/src/test/scala/org/apache/spark/scheduler/EventLoggingListenerSuite.scala b/core/src/test/scala/org/apache/spark/scheduler/EventLoggingListenerSuite.scala index 915035e9eb71c..7acb8451e3b38 100644 --- a/core/src/test/scala/org/apache/spark/scheduler/EventLoggingListenerSuite.scala +++ b/core/src/test/scala/org/apache/spark/scheduler/EventLoggingListenerSuite.scala @@ -18,7 +18,7 @@ package org.apache.spark.scheduler import java.io.{File, InputStream} -import java.util.Arrays +import java.util.{Arrays, Properties} import scala.collection.immutable.Map import scala.collection.mutable @@ -91,12 +91,75 @@ class EventLoggingListenerSuite extends SparkFunSuite with LocalSparkContext wit .set(key, secretPassword) val hadoopconf = SparkHadoopUtil.get.newConfiguration(new SparkConf()) val eventLogger = new EventLoggingListener("test", None, testDirPath.toUri(), conf) - val envDetails = SparkEnv.environmentDetails(conf, hadoopconf, "FIFO", Seq.empty, Seq.empty) + val envDetails = SparkEnv.environmentDetails( + conf, hadoopconf, "FIFO", Seq.empty, Seq.empty, Seq.empty) val event = SparkListenerEnvironmentUpdate(envDetails) val redactedProps = eventLogger.redactEvent(event).environmentDetails("Spark Properties").toMap assert(redactedProps(key) == "*********(redacted)") } + test("Spark-33504 sensitive attributes redaction in properties") { + val (secretKey, secretPassword) = ("spark.executorEnv.HADOOP_CREDSTORE_PASSWORD", + "secret_password") + val (customKey, customValue) = ("parse_token", "secret_password") + + val conf = getLoggingConf(testDirPath, None).set(secretKey, secretPassword) + + val properties = new Properties() + properties.setProperty(secretKey, secretPassword) + properties.setProperty(customKey, customValue) + + val logName = "properties-reaction-test" + val eventLogger = new EventLoggingListener(logName, None, testDirPath.toUri(), conf) + val listenerBus = new LiveListenerBus(conf) + + val stageId = 1 + val jobId = 1 + val stageInfo = new StageInfo(stageId, 0, stageId.toString, 0, + Seq.empty, Seq.empty, "details", + resourceProfileId = ResourceProfile.DEFAULT_RESOURCE_PROFILE_ID) + + val events = Array(SparkListenerStageSubmitted(stageInfo, properties), + SparkListenerJobStart(jobId, 0, Seq(stageInfo), properties)) + + eventLogger.start() + listenerBus.start(Mockito.mock(classOf[SparkContext]), Mockito.mock(classOf[MetricsSystem])) + listenerBus.addToEventLogQueue(eventLogger) + events.foreach(event => listenerBus.post(event)) + listenerBus.stop() + eventLogger.stop() + + val logData = EventLogFileReader.openEventLog(new Path(eventLogger.logWriter.logPath), + fileSystem) + try { + val lines = readLines(logData) + val logStart = SparkListenerLogStart(SPARK_VERSION) + assert(lines.size === 3) + assert(lines(0).contains("SparkListenerLogStart")) + assert(lines(1).contains("SparkListenerStageSubmitted")) + assert(lines(2).contains("SparkListenerJobStart")) + + lines.foreach{ + line => JsonProtocol.sparkEventFromJson(parse(line)) match { + case logStartEvent: SparkListenerLogStart => + assert(logStartEvent == logStart) + + case stageSubmittedEvent: SparkListenerStageSubmitted => + assert(stageSubmittedEvent.properties.getProperty(secretKey) == "*********(redacted)") + assert(stageSubmittedEvent.properties.getProperty(customKey) == customValue) + + case jobStartEvent : SparkListenerJobStart => + assert(jobStartEvent.properties.getProperty(secretKey) == "*********(redacted)") + assert(jobStartEvent.properties.getProperty(customKey) == customValue) + + case _ => assert(false) + } + } + } finally { + logData.close() + } + } + test("Executor metrics update") { testStageExecutorMetricsEventLogging() } diff --git a/core/src/test/scala/org/apache/spark/scheduler/BlacklistIntegrationSuite.scala b/core/src/test/scala/org/apache/spark/scheduler/HealthTrackerIntegrationSuite.scala similarity index 86% rename from core/src/test/scala/org/apache/spark/scheduler/BlacklistIntegrationSuite.scala rename to core/src/test/scala/org/apache/spark/scheduler/HealthTrackerIntegrationSuite.scala index 246d4b2f56ec9..29a8f4be8b72b 100644 --- a/core/src/test/scala/org/apache/spark/scheduler/BlacklistIntegrationSuite.scala +++ b/core/src/test/scala/org/apache/spark/scheduler/HealthTrackerIntegrationSuite.scala @@ -20,7 +20,7 @@ import org.apache.spark._ import org.apache.spark.internal.config import org.apache.spark.internal.config.Tests._ -class BlacklistIntegrationSuite extends SchedulerIntegrationSuite[MultiExecutorMockBackend]{ +class HealthTrackerIntegrationSuite extends SchedulerIntegrationSuite[MultiExecutorMockBackend]{ val badHost = "host-0" @@ -40,9 +40,9 @@ class BlacklistIntegrationSuite extends SchedulerIntegrationSuite[MultiExecutorM // Test demonstrating the issue -- without a config change, the scheduler keeps scheduling // according to locality preferences, and so the job fails - testScheduler("If preferred node is bad, without blacklist job will fail", + testScheduler("If preferred node is bad, without excludeOnFailure job will fail", extraConfs = Seq( - config.BLACKLIST_ENABLED.key -> "false" + config.EXCLUDE_ON_FAILURE_ENABLED.key -> "false" )) { val rdd = new MockRDDWithLocalityPrefs(sc, 10, Nil, badHost) withBackend(badHostBackend _) { @@ -55,19 +55,19 @@ class BlacklistIntegrationSuite extends SchedulerIntegrationSuite[MultiExecutorM testScheduler( "With default settings, job can succeed despite multiple bad executors on node", extraConfs = Seq( - config.BLACKLIST_ENABLED.key -> "true", + config.EXCLUDE_ON_FAILURE_ENABLED.key -> "true", config.TASK_MAX_FAILURES.key -> "4", TEST_N_HOSTS.key -> "2", TEST_N_EXECUTORS_HOST.key -> "5", TEST_N_CORES_EXECUTOR.key -> "10" ) ) { - // To reliably reproduce the failure that would occur without blacklisting, we have to use 1 + // To reliably reproduce the failure that would occur without exludeOnFailure, we have to use 1 // task. That way, we ensure this 1 task gets rotated through enough bad executors on the host // to fail the taskSet, before we have a bunch of different tasks fail in the executors so we - // blacklist them. - // But the point here is -- without blacklisting, we would never schedule anything on the good - // host-1 before we hit too many failures trying our preferred host-0. + // exclude them. + // But the point here is -- without excludeOnFailure, we would never schedule anything on the + // good host-1 before we hit too many failures trying our preferred host-0. val rdd = new MockRDDWithLocalityPrefs(sc, 1, Nil, badHost) withBackend(badHostBackend _) { val jobFuture = submit(rdd, (0 until 1).toArray) @@ -76,12 +76,12 @@ class BlacklistIntegrationSuite extends SchedulerIntegrationSuite[MultiExecutorM assertDataStructuresEmpty(noFailure = true) } - // Here we run with the blacklist on, and the default config takes care of having this + // Here we run with the excludeOnFailure on, and the default config takes care of having this // robust to one bad node. testScheduler( "Bad node with multiple executors, job will still succeed with the right confs", extraConfs = Seq( - config.BLACKLIST_ENABLED.key -> "true", + config.EXCLUDE_ON_FAILURE_ENABLED.key -> "true", // just to avoid this test taking too long config.LOCALITY_WAIT.key -> "10ms" ) @@ -100,7 +100,7 @@ class BlacklistIntegrationSuite extends SchedulerIntegrationSuite[MultiExecutorM testScheduler( "SPARK-15865 Progress with fewer executors than maxTaskFailures", extraConfs = Seq( - config.BLACKLIST_ENABLED.key -> "true", + config.EXCLUDE_ON_FAILURE_ENABLED.key -> "true", TEST_N_HOSTS.key -> "2", TEST_N_EXECUTORS_HOST.key -> "1", TEST_N_CORES_EXECUTOR.key -> "1", @@ -116,7 +116,7 @@ class BlacklistIntegrationSuite extends SchedulerIntegrationSuite[MultiExecutorM awaitJobTermination(jobFuture, duration) val pattern = ( s"""|Aborting TaskSet 0.0 because task .* - |cannot run anywhere due to node and executor blacklist""".stripMargin).r + |cannot run anywhere due to node and executor excludeOnFailure""".stripMargin).r assert(pattern.findFirstIn(failure.getMessage).isDefined, s"Couldn't find $pattern in ${failure.getMessage()}") } diff --git a/core/src/test/scala/org/apache/spark/scheduler/HealthTrackerSuite.scala b/core/src/test/scala/org/apache/spark/scheduler/HealthTrackerSuite.scala new file mode 100644 index 0000000000000..7ecc1f51ce236 --- /dev/null +++ b/core/src/test/scala/org/apache/spark/scheduler/HealthTrackerSuite.scala @@ -0,0 +1,615 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.scheduler + +import org.mockito.ArgumentMatchers.any +import org.mockito.Mockito.{never, verify, when} +import org.mockito.invocation.InvocationOnMock +import org.scalatest.BeforeAndAfterEach +import org.scalatestplus.mockito.MockitoSugar + +import org.apache.spark._ +import org.apache.spark.internal.config +import org.apache.spark.util.ManualClock + +class HealthTrackerSuite extends SparkFunSuite with BeforeAndAfterEach with MockitoSugar + with LocalSparkContext { + + private val clock = new ManualClock(0) + + private var healthTracker: HealthTracker = _ + private var listenerBusMock: LiveListenerBus = _ + private var scheduler: TaskSchedulerImpl = _ + private var conf: SparkConf = _ + + override def beforeEach(): Unit = { + conf = new SparkConf().setAppName("test").setMaster("local") + .set(config.EXCLUDE_ON_FAILURE_ENABLED.key, "true") + scheduler = mockTaskSchedWithConf(conf) + + clock.setTime(0) + + listenerBusMock = mock[LiveListenerBus] + healthTracker = new HealthTracker(listenerBusMock, conf, None, clock) + } + + override def afterEach(): Unit = { + if (healthTracker != null) { + healthTracker = null + } + if (scheduler != null) { + scheduler.stop() + scheduler = null + } + super.afterEach() + } + + // All executors and hosts used in tests should be in this set, so that [[assertEquivalentToSet]] + // works. Its OK if its got extraneous entries + val allExecutorAndHostIds = { + (('A' to 'Z')++ (1 to 100).map(_.toString)) + .flatMap{ suffix => + Seq(s"host$suffix", s"host-$suffix") + } + }.toSet + + /** + * Its easier to write our tests as if we could directly look at the sets of nodes & executors in + * the exclude. However the api doesn't expose a set, so this is a simple way to test + * something similar, since we know the universe of values that might appear in these sets. + */ + def assertEquivalentToSet(f: String => Boolean, expected: Set[String]): Unit = { + allExecutorAndHostIds.foreach { id => + val actual = f(id) + val exp = expected.contains(id) + assert(actual === exp, raw"""for string "$id" """) + } + } + + def mockTaskSchedWithConf(conf: SparkConf): TaskSchedulerImpl = { + sc = new SparkContext(conf) + val scheduler = mock[TaskSchedulerImpl] + when(scheduler.sc).thenReturn(sc) + when(scheduler.mapOutputTracker).thenReturn( + SparkEnv.get.mapOutputTracker.asInstanceOf[MapOutputTrackerMaster]) + scheduler + } + + def createTaskSetExcludelist(stageId: Int = 0): TaskSetExcludelist = { + new TaskSetExcludelist(listenerBusMock, conf, stageId, stageAttemptId = 0, clock = clock) + } + + test("executors can be excluded with only a few failures per stage") { + // For many different stages, executor 1 fails a task, then executor 2 succeeds the task, + // and then the task set is done. Not enough failures to exclude the executor *within* + // any particular taskset, but we still exclude the executor overall eventually. + // Also, we intentionally have a mix of task successes and failures -- there are even some + // successes after the executor is excluded. The idea here is those tasks get scheduled + // before the executor is excluded. We might get successes after excluding (because the + // executor might be flaky but not totally broken). But successes should not unexclude the + // executor. + val failuresUntilExcludeed = conf.get(config.MAX_FAILURES_PER_EXEC) + var failuresSoFar = 0 + (0 until failuresUntilExcludeed * 10).foreach { stageId => + val taskSetExclude = createTaskSetExcludelist(stageId) + if (stageId % 2 == 0) { + // fail one task in every other taskset + taskSetExclude.updateExcludedForFailedTask( + "hostA", exec = "1", index = 0, failureReason = "testing") + failuresSoFar += 1 + } + healthTracker.updateExcludedForSuccessfulTaskSet(stageId, 0, taskSetExclude.execToFailures) + assert(failuresSoFar == stageId / 2 + 1) + if (failuresSoFar < failuresUntilExcludeed) { + assertEquivalentToSet(healthTracker.isExecutorExcluded(_), Set()) + } else { + assertEquivalentToSet(healthTracker.isExecutorExcluded(_), Set("1")) + verify(listenerBusMock).post( + SparkListenerExecutorExcluded(0, "1", failuresUntilExcludeed)) + verify(listenerBusMock).post( + SparkListenerExecutorBlacklisted(0, "1", failuresUntilExcludeed)) + } + } + } + + // If an executor has many task failures, but the task set ends up failing, it shouldn't be + // counted against the executor. + test("executors aren't excluded as a result of tasks in failed task sets") { + val failuresUntilExcludeed = conf.get(config.MAX_FAILURES_PER_EXEC) + // for many different stages, executor 1 fails a task, and then the taskSet fails. + (0 until failuresUntilExcludeed * 10).foreach { stage => + val taskSetExclude = createTaskSetExcludelist(stage) + taskSetExclude.updateExcludedForFailedTask( + "hostA", exec = "1", index = 0, failureReason = "testing") + } + assertEquivalentToSet(healthTracker.isExecutorExcluded(_), Set()) + } + + Seq(true, false).foreach { succeedTaskSet => + val label = if (succeedTaskSet) "success" else "failure" + test(s"stage exclude updates correctly on stage $label") { + // Within one taskset, an executor fails a few times, so it's excluded for the taskset. + // But if the taskset fails, we shouldn't exclude the executor after the stage. + val taskSetExclude = createTaskSetExcludelist(0) + // We trigger enough failures for both the taskset exclude, and the application exclude. + val numFailures = math.max(conf.get(config.MAX_FAILURES_PER_EXEC), + conf.get(config.MAX_FAILURES_PER_EXEC_STAGE)) + (0 until numFailures).foreach { index => + taskSetExclude.updateExcludedForFailedTask( + "hostA", exec = "1", index = index, failureReason = "testing") + } + assert(taskSetExclude.isExecutorExcludedForTaskSet("1")) + assertEquivalentToSet(healthTracker.isExecutorExcluded(_), Set()) + if (succeedTaskSet) { + // The task set succeeded elsewhere, so we should count those failures against our executor, + // and it should be excluded for the entire application. + healthTracker.updateExcludedForSuccessfulTaskSet(0, 0, taskSetExclude.execToFailures) + assertEquivalentToSet(healthTracker.isExecutorExcluded(_), Set("1")) + verify(listenerBusMock).post(SparkListenerExecutorExcluded(0, "1", numFailures)) + } else { + // The task set failed, so we don't count these failures against the executor for other + // stages. + assertEquivalentToSet(healthTracker.isExecutorExcluded(_), Set()) + } + } + } + + test("excluded executors and nodes get recovered with time") { + val taskSetExclude0 = createTaskSetExcludelist(stageId = 0) + // Fail 4 tasks in one task set on executor 1, so that executor gets excluded for the whole + // application. + (0 until 4).foreach { partition => + taskSetExclude0.updateExcludedForFailedTask( + "hostA", exec = "1", index = partition, failureReason = "testing") + } + healthTracker.updateExcludedForSuccessfulTaskSet(0, 0, taskSetExclude0.execToFailures) + assert(healthTracker.excludedNodeList() === Set()) + assertEquivalentToSet(healthTracker.isNodeExcluded(_), Set()) + assertEquivalentToSet(healthTracker.isExecutorExcluded(_), Set("1")) + verify(listenerBusMock).post(SparkListenerExecutorExcluded(0, "1", 4)) + verify(listenerBusMock).post(SparkListenerExecutorBlacklisted(0, "1", 4)) + + val taskSetExclude1 = createTaskSetExcludelist(stageId = 1) + // Fail 4 tasks in one task set on executor 2, so that executor gets excluded for the whole + // application. Since that's the second executor that is excluded on the same node, we also + // exclude that node. + (0 until 4).foreach { partition => + taskSetExclude1.updateExcludedForFailedTask( + "hostA", exec = "2", index = partition, failureReason = "testing") + } + healthTracker.updateExcludedForSuccessfulTaskSet(0, 0, taskSetExclude1.execToFailures) + assert(healthTracker.excludedNodeList() === Set("hostA")) + assertEquivalentToSet(healthTracker.isNodeExcluded(_), Set("hostA")) + verify(listenerBusMock).post(SparkListenerNodeExcluded(0, "hostA", 2)) + verify(listenerBusMock).post(SparkListenerNodeBlacklisted(0, "hostA", 2)) + assertEquivalentToSet(healthTracker.isExecutorExcluded(_), Set("1", "2")) + verify(listenerBusMock).post(SparkListenerExecutorExcluded(0, "2", 4)) + verify(listenerBusMock).post(SparkListenerExecutorBlacklisted(0, "2", 4)) + + // Advance the clock and then make sure hostA and executors 1 and 2 have been removed from the + // exclude. + val timeout = healthTracker.EXCLUDE_ON_FAILURE_TIMEOUT_MILLIS + 1 + clock.advance(timeout) + healthTracker.applyExcludeOnFailureTimeout() + assert(healthTracker.excludedNodeList() === Set()) + assertEquivalentToSet(healthTracker.isNodeExcluded(_), Set()) + assertEquivalentToSet(healthTracker.isExecutorExcluded(_), Set()) + verify(listenerBusMock).post(SparkListenerExecutorUnexcluded(timeout, "2")) + verify(listenerBusMock).post(SparkListenerExecutorUnexcluded(timeout, "1")) + verify(listenerBusMock).post(SparkListenerExecutorUnblacklisted(timeout, "2")) + verify(listenerBusMock).post(SparkListenerExecutorUnblacklisted(timeout, "1")) + verify(listenerBusMock).post(SparkListenerNodeUnexcluded(timeout, "hostA")) + + // Fail one more task, but executor isn't put back into exclude since the count of failures + // on that executor should have been reset to 0. + val taskSetExclude2 = createTaskSetExcludelist(stageId = 2) + taskSetExclude2.updateExcludedForFailedTask( + "hostA", exec = "1", index = 0, failureReason = "testing") + healthTracker.updateExcludedForSuccessfulTaskSet(2, 0, taskSetExclude2.execToFailures) + assert(healthTracker.excludedNodeList() === Set()) + assertEquivalentToSet(healthTracker.isNodeExcluded(_), Set()) + assertEquivalentToSet(healthTracker.isExecutorExcluded(_), Set()) + } + + test("exclude can handle lost executors") { + // The exclude should still work if an executor is killed completely. We should still + // be able to exclude the entire node. + val taskSetExclude0 = createTaskSetExcludelist(stageId = 0) + // Lets say that executor 1 dies completely. We get some task failures, but + // the taskset then finishes successfully (elsewhere). + (0 until 4).foreach { partition => + taskSetExclude0.updateExcludedForFailedTask( + "hostA", exec = "1", index = partition, failureReason = "testing") + } + healthTracker.handleRemovedExecutor("1") + healthTracker.updateExcludedForSuccessfulTaskSet( + stageId = 0, + stageAttemptId = 0, + taskSetExclude0.execToFailures) + assert(healthTracker.isExecutorExcluded("1")) + verify(listenerBusMock).post(SparkListenerExecutorExcluded(0, "1", 4)) + val t1 = healthTracker.EXCLUDE_ON_FAILURE_TIMEOUT_MILLIS / 2 + clock.advance(t1) + + // Now another executor gets spun up on that host, but it also dies. + val taskSetExclude1 = createTaskSetExcludelist(stageId = 1) + (0 until 4).foreach { partition => + taskSetExclude1.updateExcludedForFailedTask( + "hostA", exec = "2", index = partition, failureReason = "testing") + } + healthTracker.handleRemovedExecutor("2") + healthTracker.updateExcludedForSuccessfulTaskSet( + stageId = 1, + stageAttemptId = 0, + taskSetExclude1.execToFailures) + // We've now had two bad executors on the hostA, so we should exclude the entire node. + assert(healthTracker.isExecutorExcluded("1")) + assert(healthTracker.isExecutorExcluded("2")) + verify(listenerBusMock).post(SparkListenerExecutorExcluded(t1, "2", 4)) + assert(healthTracker.isNodeExcluded("hostA")) + verify(listenerBusMock).post(SparkListenerNodeExcluded(t1, "hostA", 2)) + + // Advance the clock so that executor 1 should no longer be explicitly excluded, but + // everything else should still be excluded. + val t2 = healthTracker.EXCLUDE_ON_FAILURE_TIMEOUT_MILLIS / 2 + 1 + clock.advance(t2) + healthTracker.applyExcludeOnFailureTimeout() + assert(!healthTracker.isExecutorExcluded("1")) + verify(listenerBusMock).post(SparkListenerExecutorUnexcluded(t1 + t2, "1")) + assert(healthTracker.isExecutorExcluded("2")) + assert(healthTracker.isNodeExcluded("hostA")) + // make sure we don't leak memory + assert(!healthTracker.executorIdToExcludedStatus.contains("1")) + assert(!healthTracker.nodeToExcludedExecs("hostA").contains("1")) + // Advance the timeout again so now hostA should be removed from the exclude. + clock.advance(t1) + healthTracker.applyExcludeOnFailureTimeout() + assert(!healthTracker.nodeIdToExcludedExpiryTime.contains("hostA")) + verify(listenerBusMock).post(SparkListenerNodeUnexcluded(t1 + t2 + t1, "hostA")) + // Even though unexcluding a node implicitly unexcludes all of its executors, + // there will be no SparkListenerExecutorUnexcluded sent here. + } + + test("task failures expire with time") { + // Verifies that 2 failures within the timeout period cause an executor to be excluded, but + // if task failures are spaced out by more than the timeout period, the first failure is timed + // out, and the executor isn't excluded. + var stageId = 0 + + def failOneTaskInTaskSet(exec: String): Unit = { + val taskSetExclude = createTaskSetExcludelist(stageId = stageId) + taskSetExclude.updateExcludedForFailedTask("host-" + exec, exec, 0, "testing") + healthTracker.updateExcludedForSuccessfulTaskSet(stageId, 0, taskSetExclude.execToFailures) + stageId += 1 + } + + failOneTaskInTaskSet(exec = "1") + // We have one sporadic failure on exec 2, but that's it. Later checks ensure that we never + // exclude executor 2 despite this one failure. + failOneTaskInTaskSet(exec = "2") + assertEquivalentToSet(healthTracker.isExecutorExcluded(_), Set()) + assert(healthTracker.nextExpiryTime === Long.MaxValue) + + // We advance the clock past the expiry time. + clock.advance(healthTracker.EXCLUDE_ON_FAILURE_TIMEOUT_MILLIS + 1) + val t0 = clock.getTimeMillis() + healthTracker.applyExcludeOnFailureTimeout() + assert(healthTracker.nextExpiryTime === Long.MaxValue) + failOneTaskInTaskSet(exec = "1") + + // Because the 2nd failure on executor 1 happened past the expiry time, nothing should have been + // excluded. + assertEquivalentToSet(healthTracker.isExecutorExcluded(_), Set()) + + // Now we add one more failure, within the timeout, and it should be counted. + clock.setTime(t0 + healthTracker.EXCLUDE_ON_FAILURE_TIMEOUT_MILLIS - 1) + val t1 = clock.getTimeMillis() + failOneTaskInTaskSet(exec = "1") + healthTracker.applyExcludeOnFailureTimeout() + assertEquivalentToSet(healthTracker.isExecutorExcluded(_), Set("1")) + verify(listenerBusMock).post(SparkListenerExecutorExcluded(t1, "1", 2)) + assert(healthTracker.nextExpiryTime === t1 + healthTracker.EXCLUDE_ON_FAILURE_TIMEOUT_MILLIS) + + // Add failures on executor 3, make sure it gets put on the exclude. + clock.setTime(t1 + healthTracker.EXCLUDE_ON_FAILURE_TIMEOUT_MILLIS - 1) + val t2 = clock.getTimeMillis() + failOneTaskInTaskSet(exec = "3") + failOneTaskInTaskSet(exec = "3") + healthTracker.applyExcludeOnFailureTimeout() + assertEquivalentToSet(healthTracker.isExecutorExcluded(_), Set("1", "3")) + verify(listenerBusMock).post(SparkListenerExecutorExcluded(t2, "3", 2)) + assert(healthTracker.nextExpiryTime === t1 + healthTracker.EXCLUDE_ON_FAILURE_TIMEOUT_MILLIS) + + // Now we go past the timeout for executor 1, so it should be dropped from the exclude. + clock.setTime(t1 + healthTracker.EXCLUDE_ON_FAILURE_TIMEOUT_MILLIS + 1) + healthTracker.applyExcludeOnFailureTimeout() + assertEquivalentToSet(healthTracker.isExecutorExcluded(_), Set("3")) + verify(listenerBusMock).post(SparkListenerExecutorUnexcluded(clock.getTimeMillis(), "1")) + assert(healthTracker.nextExpiryTime === t2 + healthTracker.EXCLUDE_ON_FAILURE_TIMEOUT_MILLIS) + + // Make sure that we update correctly when we go from having excluded executors to + // just having tasks with timeouts. + clock.setTime(t2 + healthTracker.EXCLUDE_ON_FAILURE_TIMEOUT_MILLIS - 1) + failOneTaskInTaskSet(exec = "4") + healthTracker.applyExcludeOnFailureTimeout() + assertEquivalentToSet(healthTracker.isExecutorExcluded(_), Set("3")) + assert(healthTracker.nextExpiryTime === t2 + healthTracker.EXCLUDE_ON_FAILURE_TIMEOUT_MILLIS) + + clock.setTime(t2 + healthTracker.EXCLUDE_ON_FAILURE_TIMEOUT_MILLIS + 1) + healthTracker.applyExcludeOnFailureTimeout() + assertEquivalentToSet(healthTracker.isExecutorExcluded(_), Set()) + verify(listenerBusMock).post(SparkListenerExecutorUnexcluded(clock.getTimeMillis(), "3")) + // we've got one task failure still, but we don't bother setting nextExpiryTime to it, to + // avoid wasting time checking for expiry of individual task failures. + assert(healthTracker.nextExpiryTime === Long.MaxValue) + } + + test("task failure timeout works as expected for long-running tasksets") { + // This ensures that we don't trigger spurious excluding for long tasksets, when the taskset + // finishes long after the task failures. We create two tasksets, each with one failure. + // Individually they shouldn't cause any excluding since there is only one failure. + // Furthermore, we space the failures out so far that even when both tasksets have completed, + // we still don't trigger any excluding. + val taskSetExclude1 = createTaskSetExcludelist(stageId = 1) + val taskSetExclude2 = createTaskSetExcludelist(stageId = 2) + // Taskset1 has one failure immediately + taskSetExclude1.updateExcludedForFailedTask("host-1", "1", 0, "testing") + // Then we have a *long* delay, much longer than the timeout, before any other failures or + // taskset completion + clock.advance(healthTracker.EXCLUDE_ON_FAILURE_TIMEOUT_MILLIS * 5) + // After the long delay, we have one failure on taskset 2, on the same executor + taskSetExclude2.updateExcludedForFailedTask("host-1", "1", 0, "testing") + // Finally, we complete both tasksets. Its important here to complete taskset2 *first*. We + // want to make sure that when taskset 1 finishes, even though we've now got two task failures, + // we realize that the task failure we just added was well before the timeout. + clock.advance(1) + healthTracker.updateExcludedForSuccessfulTaskSet(stageId = 2, 0, taskSetExclude2.execToFailures) + clock.advance(1) + healthTracker.updateExcludedForSuccessfulTaskSet(stageId = 1, 0, taskSetExclude1.execToFailures) + + // Make sure nothing was excluded + assertEquivalentToSet(healthTracker.isExecutorExcluded(_), Set()) + } + + test("only exclude nodes for the application when enough executors have failed on that " + + "specific host") { + // we exclude executors on two different hosts -- make sure that doesn't lead to any + // node excluding + val taskSetExclude0 = createTaskSetExcludelist(stageId = 0) + taskSetExclude0.updateExcludedForFailedTask( + "hostA", exec = "1", index = 0, failureReason = "testing") + taskSetExclude0.updateExcludedForFailedTask( + "hostA", exec = "1", index = 1, failureReason = "testing") + healthTracker.updateExcludedForSuccessfulTaskSet(0, 0, taskSetExclude0.execToFailures) + assertEquivalentToSet(healthTracker.isExecutorExcluded(_), Set("1")) + verify(listenerBusMock).post(SparkListenerExecutorExcluded(0, "1", 2)) + assertEquivalentToSet(healthTracker.isNodeExcluded(_), Set()) + + val taskSetExclude1 = createTaskSetExcludelist(stageId = 1) + taskSetExclude1.updateExcludedForFailedTask( + "hostB", exec = "2", index = 0, failureReason = "testing") + taskSetExclude1.updateExcludedForFailedTask( + "hostB", exec = "2", index = 1, failureReason = "testing") + healthTracker.updateExcludedForSuccessfulTaskSet(1, 0, taskSetExclude1.execToFailures) + assertEquivalentToSet(healthTracker.isExecutorExcluded(_), Set("1", "2")) + verify(listenerBusMock).post(SparkListenerExecutorExcluded(0, "2", 2)) + assertEquivalentToSet(healthTracker.isNodeExcluded(_), Set()) + + // Finally, exclude another executor on the same node as the original excluded executor, + // and make sure this time we *do* exclude the node. + val taskSetExclude2 = createTaskSetExcludelist(stageId = 0) + taskSetExclude2.updateExcludedForFailedTask( + "hostA", exec = "3", index = 0, failureReason = "testing") + taskSetExclude2.updateExcludedForFailedTask( + "hostA", exec = "3", index = 1, failureReason = "testing") + healthTracker.updateExcludedForSuccessfulTaskSet(0, 0, taskSetExclude2.execToFailures) + assertEquivalentToSet(healthTracker.isExecutorExcluded(_), Set("1", "2", "3")) + verify(listenerBusMock).post(SparkListenerExecutorExcluded(0, "3", 2)) + assertEquivalentToSet(healthTracker.isNodeExcluded(_), Set("hostA")) + verify(listenerBusMock).post(SparkListenerNodeExcluded(0, "hostA", 2)) + } + + test("exclude still respects legacy configs") { + val conf = new SparkConf().setMaster("local") + assert(!HealthTracker.isExcludeOnFailureEnabled(conf)) + conf.set(config.EXCLUDE_ON_FAILURE_LEGACY_TIMEOUT_CONF, 5000L) + assert(HealthTracker.isExcludeOnFailureEnabled(conf)) + assert(5000 === HealthTracker.getExludeOnFailureTimeout(conf)) + // the new conf takes precedence, though + conf.set(config.EXCLUDE_ON_FAILURE_TIMEOUT_CONF, 1000L) + assert(1000 === HealthTracker.getExludeOnFailureTimeout(conf)) + + // if you explicitly set the legacy conf to 0, that also would disable excluding + conf.set(config.EXCLUDE_ON_FAILURE_LEGACY_TIMEOUT_CONF, 0L) + assert(!HealthTracker.isExcludeOnFailureEnabled(conf)) + // but again, the new conf takes precedence + conf.set(config.EXCLUDE_ON_FAILURE_ENABLED, true) + assert(HealthTracker.isExcludeOnFailureEnabled(conf)) + assert(1000 === HealthTracker.getExludeOnFailureTimeout(conf)) + } + + test("check exclude configuration invariants") { + val conf = new SparkConf().setMaster("yarn").set(config.SUBMIT_DEPLOY_MODE, "cluster") + Seq( + (2, 2), + (2, 3) + ).foreach { case (maxTaskFailures, maxNodeAttempts) => + conf.set(config.TASK_MAX_FAILURES, maxTaskFailures) + conf.set(config.MAX_TASK_ATTEMPTS_PER_NODE.key, maxNodeAttempts.toString) + val excMsg = intercept[IllegalArgumentException] { + HealthTracker.validateExcludeOnFailureConfs(conf) + }.getMessage() + assert(excMsg === s"${config.MAX_TASK_ATTEMPTS_PER_NODE.key} " + + s"( = ${maxNodeAttempts}) was >= ${config.TASK_MAX_FAILURES.key} " + + s"( = ${maxTaskFailures} ). Though excludeOnFailure is enabled, with this " + + s"configuration, Spark will not be robust to one bad node. Decrease " + + s"${config.MAX_TASK_ATTEMPTS_PER_NODE.key}, increase ${config.TASK_MAX_FAILURES.key}, " + + s"or disable excludeOnFailure with ${config.EXCLUDE_ON_FAILURE_ENABLED.key}") + } + + conf.remove(config.TASK_MAX_FAILURES) + conf.remove(config.MAX_TASK_ATTEMPTS_PER_NODE) + + Seq( + config.MAX_TASK_ATTEMPTS_PER_EXECUTOR, + config.MAX_TASK_ATTEMPTS_PER_NODE, + config.MAX_FAILURES_PER_EXEC_STAGE, + config.MAX_FAILED_EXEC_PER_NODE_STAGE, + config.MAX_FAILURES_PER_EXEC, + config.MAX_FAILED_EXEC_PER_NODE, + config.EXCLUDE_ON_FAILURE_TIMEOUT_CONF + ).foreach { config => + conf.set(config.key, "0") + val excMsg = intercept[IllegalArgumentException] { + HealthTracker.validateExcludeOnFailureConfs(conf) + }.getMessage() + assert(excMsg.contains(s"${config.key} was 0, but must be > 0.")) + conf.remove(config) + } + } + + test("excluding kills executors, configured by EXCLUDE_ON_FAILURE_KILL_ENABLED") { + val allocationClientMock = mock[ExecutorAllocationClient] + when(allocationClientMock.killExecutors(any(), any(), any(), any())).thenReturn(Seq("called")) + when(allocationClientMock.killExecutorsOnHost("hostA")).thenAnswer { (_: InvocationOnMock) => + // To avoid a race between excluding and killing, it is important that the nodeExclude + // is updated before we ask the executor allocation client to kill all the executors + // on a particular host. + if (healthTracker.excludedNodeList().contains("hostA")) { + true + } else { + throw new IllegalStateException("hostA should be on the exclude") + } + } + healthTracker = new HealthTracker(listenerBusMock, conf, Some(allocationClientMock), clock) + + // Disable auto-kill. Exclude an executor and make sure killExecutors is not called. + conf.set(config.EXCLUDE_ON_FAILURE_KILL_ENABLED, false) + + val taskSetExclude0 = createTaskSetExcludelist(stageId = 0) + // Fail 4 tasks in one task set on executor 1, so that executor gets excluded for the whole + // application. + (0 until 4).foreach { partition => + taskSetExclude0.updateExcludedForFailedTask( + "hostA", exec = "1", index = partition, failureReason = "testing") + } + healthTracker.updateExcludedForSuccessfulTaskSet(0, 0, taskSetExclude0.execToFailures) + + verify(allocationClientMock, never).killExecutor(any()) + + val taskSetExclude1 = createTaskSetExcludelist(stageId = 1) + // Fail 4 tasks in one task set on executor 2, so that executor gets excluded for the whole + // application. Since that's the second executor that is excluded on the same node, we also + // exclude that node. + (0 until 4).foreach { partition => + taskSetExclude1.updateExcludedForFailedTask( + "hostA", exec = "2", index = partition, failureReason = "testing") + } + healthTracker.updateExcludedForSuccessfulTaskSet(0, 0, taskSetExclude1.execToFailures) + + verify(allocationClientMock, never).killExecutors(any(), any(), any(), any()) + verify(allocationClientMock, never).killExecutorsOnHost(any()) + + // Enable auto-kill. Exclude an executor and make sure killExecutors is called. + conf.set(config.EXCLUDE_ON_FAILURE_KILL_ENABLED, true) + healthTracker = new HealthTracker(listenerBusMock, conf, Some(allocationClientMock), clock) + + val taskSetExclude2 = createTaskSetExcludelist(stageId = 0) + // Fail 4 tasks in one task set on executor 1, so that executor gets excluded for the whole + // application. + (0 until 4).foreach { partition => + taskSetExclude2.updateExcludedForFailedTask( + "hostA", exec = "1", index = partition, failureReason = "testing") + } + healthTracker.updateExcludedForSuccessfulTaskSet(0, 0, taskSetExclude2.execToFailures) + + verify(allocationClientMock).killExecutors(Seq("1"), false, false, true) + + val taskSetExclude3 = createTaskSetExcludelist(stageId = 1) + // Fail 4 tasks in one task set on executor 2, so that executor gets excluded for the whole + // application. Since that's the second executor that is excluded on the same node, we also + // exclude that node. + (0 until 4).foreach { partition => + taskSetExclude3.updateExcludedForFailedTask( + "hostA", exec = "2", index = partition, failureReason = "testing") + } + healthTracker.updateExcludedForSuccessfulTaskSet(0, 0, taskSetExclude3.execToFailures) + + verify(allocationClientMock).killExecutors(Seq("2"), false, false, true) + verify(allocationClientMock).killExecutorsOnHost("hostA") + } + + test("fetch failure excluding kills executors, configured by EXCLUDE_ON_FAILURE_KILL_ENABLED") { + val allocationClientMock = mock[ExecutorAllocationClient] + when(allocationClientMock.killExecutors(any(), any(), any(), any())).thenReturn(Seq("called")) + when(allocationClientMock.killExecutorsOnHost("hostA")).thenAnswer { (_: InvocationOnMock) => + // To avoid a race between excluding and killing, it is important that the nodeExclude + // is updated before we ask the executor allocation client to kill all the executors + // on a particular host. + if (healthTracker.excludedNodeList().contains("hostA")) { + true + } else { + throw new IllegalStateException("hostA should be on the exclude") + } + } + + conf.set(config.EXCLUDE_ON_FAILURE_FETCH_FAILURE_ENABLED, true) + healthTracker = new HealthTracker(listenerBusMock, conf, Some(allocationClientMock), clock) + + // Disable auto-kill. Exclude an executor and make sure killExecutors is not called. + conf.set(config.EXCLUDE_ON_FAILURE_KILL_ENABLED, false) + healthTracker.updateExcludedForFetchFailure("hostA", exec = "1") + + verify(allocationClientMock, never).killExecutors(any(), any(), any(), any()) + verify(allocationClientMock, never).killExecutorsOnHost(any()) + + assert(healthTracker.nodeToExcludedExecs.contains("hostA")) + assert(healthTracker.nodeToExcludedExecs("hostA").contains("1")) + + // Enable auto-kill. Exclude an executor and make sure killExecutors is called. + conf.set(config.EXCLUDE_ON_FAILURE_KILL_ENABLED, true) + healthTracker = new HealthTracker(listenerBusMock, conf, Some(allocationClientMock), clock) + clock.advance(1000) + healthTracker.updateExcludedForFetchFailure("hostA", exec = "1") + + verify(allocationClientMock).killExecutors(Seq("1"), false, false, true) + verify(allocationClientMock, never).killExecutorsOnHost(any()) + + assert(healthTracker.executorIdToExcludedStatus.contains("1")) + assert(healthTracker.executorIdToExcludedStatus("1").node === "hostA") + assert(healthTracker.executorIdToExcludedStatus("1").expiryTime === + 1000 + healthTracker.EXCLUDE_ON_FAILURE_TIMEOUT_MILLIS) + assert(healthTracker.nextExpiryTime === 1000 + healthTracker.EXCLUDE_ON_FAILURE_TIMEOUT_MILLIS) + assert(healthTracker.nodeIdToExcludedExpiryTime.isEmpty) + assert(healthTracker.nodeToExcludedExecs.contains("hostA")) + assert(healthTracker.nodeToExcludedExecs("hostA").contains("1")) + + // Enable external shuffle service to see if all the executors on this node will be killed. + conf.set(config.SHUFFLE_SERVICE_ENABLED, true) + clock.advance(1000) + healthTracker.updateExcludedForFetchFailure("hostA", exec = "2") + + verify(allocationClientMock, never).killExecutors(Seq("2"), true, true) + verify(allocationClientMock).killExecutorsOnHost("hostA") + + assert(healthTracker.nodeIdToExcludedExpiryTime.contains("hostA")) + assert(healthTracker.nodeIdToExcludedExpiryTime("hostA") === + 2000 + healthTracker.EXCLUDE_ON_FAILURE_TIMEOUT_MILLIS) + assert(healthTracker.nextExpiryTime === 1000 + healthTracker.EXCLUDE_ON_FAILURE_TIMEOUT_MILLIS) + } +} diff --git a/core/src/test/scala/org/apache/spark/scheduler/ReplayListenerSuite.scala b/core/src/test/scala/org/apache/spark/scheduler/ReplayListenerSuite.scala index e6fbf9b09d43d..cb50c7c959754 100644 --- a/core/src/test/scala/org/apache/spark/scheduler/ReplayListenerSuite.scala +++ b/core/src/test/scala/org/apache/spark/scheduler/ReplayListenerSuite.scala @@ -255,7 +255,7 @@ class ReplayListenerSuite extends SparkFunSuite with BeforeAndAfter with LocalSp /* * This is a dummy input stream that wraps another input stream but ends prematurely when - * reading at the specified position, throwing an EOFExeption. + * reading at the specified position, throwing an EOFException. */ private class EarlyEOFInputStream(in: InputStream, failAtPos: Int) extends InputStream { private val countDown = new AtomicInteger(failAtPos) diff --git a/core/src/test/scala/org/apache/spark/scheduler/SchedulerIntegrationSuite.scala b/core/src/test/scala/org/apache/spark/scheduler/SchedulerIntegrationSuite.scala index 0874163b0e946..88d2868b957f9 100644 --- a/core/src/test/scala/org/apache/spark/scheduler/SchedulerIntegrationSuite.scala +++ b/core/src/test/scala/org/apache/spark/scheduler/SchedulerIntegrationSuite.scala @@ -44,7 +44,7 @@ import org.apache.spark.util.{CallSite, ThreadUtils, Utils} * TaskSetManagers. * * Test cases are configured by providing a set of jobs to submit, and then simulating interaction - * with spark's executors via a mocked backend (eg., task completion, task failure, executors + * with spark's executors via a mocked backend (e.g., task completion, task failure, executors * disconnecting, etc.). */ abstract class SchedulerIntegrationSuite[T <: MockBackend: ClassTag] extends SparkFunSuite @@ -372,7 +372,7 @@ private[spark] abstract class MockBackend( /** * Accessed by both scheduling and backend thread, so should be protected by this. - * Most likely the only thing that needs to be protected are the inidividual ExecutorTaskStatus, + * Most likely the only thing that needs to be protected are the individual ExecutorTaskStatus, * but for simplicity in this mock just lock the whole backend. */ def executorIdToExecutor: Map[String, ExecutorTaskStatus] @@ -535,8 +535,8 @@ class BasicSchedulerIntegrationSuite extends SchedulerIntegrationSuite[SingleCor */ testScheduler("super simple job") { def runBackend(): Unit = { - val (taskDescripition, _) = backend.beginTask() - backend.taskSuccess(taskDescripition, 42) + val (taskDescription, _) = backend.beginTask() + backend.taskSuccess(taskDescription, 42) } withBackend(runBackend _) { val jobFuture = submit(new MockRDD(sc, 10, Nil), (0 until 10).toArray) diff --git a/core/src/test/scala/org/apache/spark/scheduler/SparkListenerSuite.scala b/core/src/test/scala/org/apache/spark/scheduler/SparkListenerSuite.scala index a4a84b0e89809..d72744c5cc348 100644 --- a/core/src/test/scala/org/apache/spark/scheduler/SparkListenerSuite.scala +++ b/core/src/test/scala/org/apache/spark/scheduler/SparkListenerSuite.scala @@ -571,9 +571,9 @@ class SparkListenerSuite extends SparkFunSuite with LocalSparkContext with Match } } - test("event queue size can be configued through spark conf") { + test("event queue size can be configured through spark conf") { // configure the shared queue size to be 1, event log queue size to be 2, - // and listner bus event queue size to be 5 + // and listener bus event queue size to be 5 val conf = new SparkConf(false) .set(LISTENER_BUS_EVENT_QUEUE_CAPACITY, 5) .set(s"spark.scheduler.listenerbus.eventqueue.${SHARED_QUEUE}.capacity", "1") @@ -593,7 +593,7 @@ class SparkListenerSuite extends SparkFunSuite with LocalSparkContext with Match // check the size of shared queue is 1 as configured assert(bus.getQueueCapacity(SHARED_QUEUE) == Some(1)) // no specific size of status queue is configured, - // it shoud use the LISTENER_BUS_EVENT_QUEUE_CAPACITY + // it should use the LISTENER_BUS_EVENT_QUEUE_CAPACITY assert(bus.getQueueCapacity(APP_STATUS_QUEUE) == Some(5)) // check the size of event log queue is 5 as configured assert(bus.getQueueCapacity(EVENT_LOG_QUEUE) == Some(2)) diff --git a/core/src/test/scala/org/apache/spark/scheduler/TaskContextSuite.scala b/core/src/test/scala/org/apache/spark/scheduler/TaskContextSuite.scala index 394a2a9fbf7cb..8a7ff9eb6dcd3 100644 --- a/core/src/test/scala/org/apache/spark/scheduler/TaskContextSuite.scala +++ b/core/src/test/scala/org/apache/spark/scheduler/TaskContextSuite.scala @@ -70,7 +70,7 @@ class TaskContextSuite extends SparkFunSuite with BeforeAndAfter with LocalSpark 0, 0, taskBinary, rdd.partitions(0), Seq.empty, 0, new Properties, closureSerializer.serialize(TaskMetrics.registered).array()) intercept[RuntimeException] { - task.run(0, 0, null, null) + task.run(0, 0, null, null, Option.empty) } assert(TaskContextSuite.completed) } @@ -92,7 +92,7 @@ class TaskContextSuite extends SparkFunSuite with BeforeAndAfter with LocalSpark 0, 0, taskBinary, rdd.partitions(0), Seq.empty, 0, new Properties, closureSerializer.serialize(TaskMetrics.registered).array()) intercept[RuntimeException] { - task.run(0, 0, null, null) + task.run(0, 0, null, null, Option.empty) } assert(TaskContextSuite.lastError.getMessage == "damn error") } diff --git a/core/src/test/scala/org/apache/spark/scheduler/TaskDescriptionSuite.scala b/core/src/test/scala/org/apache/spark/scheduler/TaskDescriptionSuite.scala index 5839532f11666..98b5bada27646 100644 --- a/core/src/test/scala/org/apache/spark/scheduler/TaskDescriptionSuite.scala +++ b/core/src/test/scala/org/apache/spark/scheduler/TaskDescriptionSuite.scala @@ -33,6 +33,10 @@ class TaskDescriptionSuite extends SparkFunSuite { originalFiles.put("fileUrl1", 1824) originalFiles.put("fileUrl2", 2) + val originalArchives = new HashMap[String, Long]() + originalArchives.put("archiveUrl1", 1824) + originalArchives.put("archiveUrl2", 2) + val originalJars = new HashMap[String, Long]() originalJars.put("jar1", 3) @@ -70,6 +74,7 @@ class TaskDescriptionSuite extends SparkFunSuite { partitionId = 1, originalFiles, originalJars, + originalArchives, originalProperties, originalResources, taskBuffer @@ -87,6 +92,7 @@ class TaskDescriptionSuite extends SparkFunSuite { assert(decodedTaskDescription.partitionId === originalTaskDescription.partitionId) assert(decodedTaskDescription.addedFiles.equals(originalFiles)) assert(decodedTaskDescription.addedJars.equals(originalJars)) + assert(decodedTaskDescription.addedArchives.equals(originalArchives)) assert(decodedTaskDescription.properties.equals(originalTaskDescription.properties)) assert(equalResources(decodedTaskDescription.resources, originalTaskDescription.resources)) assert(decodedTaskDescription.serializedTask.equals(taskBuffer)) diff --git a/core/src/test/scala/org/apache/spark/scheduler/TaskSchedulerImplSuite.scala b/core/src/test/scala/org/apache/spark/scheduler/TaskSchedulerImplSuite.scala index f29eb70eb3628..b6a59c8bbd944 100644 --- a/core/src/test/scala/org/apache/spark/scheduler/TaskSchedulerImplSuite.scala +++ b/core/src/test/scala/org/apache/spark/scheduler/TaskSchedulerImplSuite.scala @@ -34,7 +34,7 @@ import org.apache.spark.internal.config import org.apache.spark.resource.{ExecutorResourceRequests, ResourceProfile, TaskResourceRequests} import org.apache.spark.resource.ResourceUtils._ import org.apache.spark.resource.TestResourceIDs._ -import org.apache.spark.util.{Clock, ManualClock, SystemClock} +import org.apache.spark.util.{Clock, ManualClock} class FakeSchedulerBackend extends SchedulerBackend { def start(): Unit = {} @@ -51,11 +51,11 @@ class TaskSchedulerImplSuite extends SparkFunSuite with LocalSparkContext with B var failedTaskSetReason: String = null var failedTaskSet = false - var blacklist: BlacklistTracker = null + var healthTracker: HealthTracker = null var taskScheduler: TaskSchedulerImpl = null var dagScheduler: DAGScheduler = null - val stageToMockTaskSetBlacklist = new HashMap[Int, TaskSetBlacklist]() + val stageToMockTaskSetExcludelist = new HashMap[Int, TaskSetExcludelist]() val stageToMockTaskSetManager = new HashMap[Int, TaskSetManager]() override def beforeEach(): Unit = { @@ -63,7 +63,7 @@ class TaskSchedulerImplSuite extends SparkFunSuite with LocalSparkContext with B failedTaskSet = false failedTaskSetException = None failedTaskSetReason = null - stageToMockTaskSetBlacklist.clear() + stageToMockTaskSetExcludelist.clear() stageToMockTaskSetManager.clear() } @@ -95,10 +95,10 @@ class TaskSchedulerImplSuite extends SparkFunSuite with LocalSparkContext with B setupHelper() } - def setupSchedulerWithMockTaskSetBlacklist(confs: (String, String)*): TaskSchedulerImpl = { - blacklist = mock[BlacklistTracker] + def setupSchedulerWithMockTaskSetExcludelist(confs: (String, String)*): TaskSchedulerImpl = { + healthTracker = mock[HealthTracker] val conf = new SparkConf().setMaster("local").setAppName("TaskSchedulerImplSuite") - conf.set(config.BLACKLIST_ENABLED, true) + conf.set(config.EXCLUDE_ON_FAILURE_ENABLED, true) confs.foreach { case (k, v) => conf.set(k, v) } sc = new SparkContext(conf) @@ -106,16 +106,16 @@ class TaskSchedulerImplSuite extends SparkFunSuite with LocalSparkContext with B new TaskSchedulerImpl(sc, sc.conf.get(config.TASK_MAX_FAILURES)) { override def createTaskSetManager(taskSet: TaskSet, maxFailures: Int): TaskSetManager = { val tsm = super.createTaskSetManager(taskSet, maxFailures) - // we need to create a spied tsm just so we can set the TaskSetBlacklist + // we need to create a spied tsm just so we can set the TaskSetExcludelist val tsmSpy = spy(tsm) - val taskSetBlacklist = mock[TaskSetBlacklist] - when(tsmSpy.taskSetBlacklistHelperOpt).thenReturn(Some(taskSetBlacklist)) + val taskSetExcludelist = mock[TaskSetExcludelist] + when(tsmSpy.taskSetExcludelistHelperOpt).thenReturn(Some(taskSetExcludelist)) stageToMockTaskSetManager(taskSet.stageId) = tsmSpy - stageToMockTaskSetBlacklist(taskSet.stageId) = taskSetBlacklist + stageToMockTaskSetExcludelist(taskSet.stageId) = taskSetExcludelist tsmSpy } - override private[scheduler] lazy val blacklistTrackerOpt = Some(blacklist) + override private[scheduler] lazy val healthTrackerOpt = Some(healthTracker) } setupHelper() } @@ -230,7 +230,7 @@ class TaskSchedulerImplSuite extends SparkFunSuite with LocalSparkContext with B sc.conf.get(config.TASK_MAX_FAILURES), clock = clock) { override def createTaskSetManager(taskSet: TaskSet, maxTaskFailures: Int): TaskSetManager = { - new TaskSetManager(this, taskSet, maxTaskFailures, blacklistTrackerOpt, clock) + new TaskSetManager(this, taskSet, maxTaskFailures, healthTrackerOpt, clock) } override def shuffleOffers(offers: IndexedSeq[WorkerOffer]): IndexedSeq[WorkerOffer] = { // Don't shuffle the offers around for this test. Instead, we'll just pass in all @@ -678,22 +678,22 @@ class TaskSchedulerImplSuite extends SparkFunSuite with LocalSparkContext with B assert(!failedTaskSet) } - test("scheduled tasks obey task and stage blacklists") { - taskScheduler = setupSchedulerWithMockTaskSetBlacklist() + test("scheduled tasks obey task and stage excludelist") { + taskScheduler = setupSchedulerWithMockTaskSetExcludelist() (0 to 2).foreach {stageId => val taskSet = FakeTask.createTaskSet(numTasks = 2, stageId = stageId, stageAttemptId = 0) taskScheduler.submitTasks(taskSet) } - // Setup our mock blacklist: - // * stage 0 is blacklisted on node "host1" - // * stage 1 is blacklisted on executor "executor3" - // * stage 0, partition 0 is blacklisted on executor 0 - // (mocked methods default to returning false, ie. no blacklisting) - when(stageToMockTaskSetBlacklist(0).isNodeBlacklistedForTaskSet("host1")).thenReturn(true) - when(stageToMockTaskSetBlacklist(1).isExecutorBlacklistedForTaskSet("executor3")) + // Setup our mock excludelist: + // * stage 0 is excluded on node "host1" + // * stage 1 is excluded on executor "executor3" + // * stage 0, partition 0 is excluded on executor 0 + // (mocked methods default to returning false, ie. no excluding) + when(stageToMockTaskSetExcludelist(0).isNodeExcludedForTaskSet("host1")).thenReturn(true) + when(stageToMockTaskSetExcludelist(1).isExecutorExcludedForTaskSet("executor3")) .thenReturn(true) - when(stageToMockTaskSetBlacklist(0).isExecutorBlacklistedForTask("executor0", 0)) + when(stageToMockTaskSetExcludelist(0).isExecutorExcludedForTask("executor0", 0)) .thenReturn(true) val offers = IndexedSeq( @@ -705,21 +705,21 @@ class TaskSchedulerImplSuite extends SparkFunSuite with LocalSparkContext with B val firstTaskAttempts = taskScheduler.resourceOffers(offers).flatten // We should schedule all tasks. assert(firstTaskAttempts.size === 6) - // Whenever we schedule a task, we must consult the node and executor blacklist. (The test + // Whenever we schedule a task, we must consult the node and executor excludelist. (The test // doesn't check exactly what checks are made because the offers get shuffled.) (0 to 2).foreach { stageId => - verify(stageToMockTaskSetBlacklist(stageId), atLeast(1)) - .isNodeBlacklistedForTaskSet(anyString()) - verify(stageToMockTaskSetBlacklist(stageId), atLeast(1)) - .isExecutorBlacklistedForTaskSet(anyString()) + verify(stageToMockTaskSetExcludelist(stageId), atLeast(1)) + .isNodeExcludedForTaskSet(anyString()) + verify(stageToMockTaskSetExcludelist(stageId), atLeast(1)) + .isExecutorExcludedForTaskSet(anyString()) } def tasksForStage(stageId: Int): Seq[TaskDescription] = { firstTaskAttempts.filter{_.name.contains(s"stage $stageId")} } tasksForStage(0).foreach { task => - // executors 1 & 2 blacklisted for node - // executor 0 blacklisted just for partition 0 + // executors 1 & 2 excluded for node + // executor 0 excluded just for partition 0 if (task.index == 0) { assert(task.executorId === "executor3") } else { @@ -727,12 +727,12 @@ class TaskSchedulerImplSuite extends SparkFunSuite with LocalSparkContext with B } } tasksForStage(1).foreach { task => - // executor 3 blacklisted + // executor 3 excluded assert("executor3" != task.executorId) } // no restrictions on stage 2 - // Finally, just make sure that we can still complete tasks as usual with blacklisting + // Finally, just make sure that we can still complete tasks as usual with exclusion // in effect. Finish each of the tasksets -- taskset 0 & 1 complete successfully, taskset 2 // fails. (0 to 2).foreach { stageId => @@ -770,23 +770,23 @@ class TaskSchedulerImplSuite extends SparkFunSuite with LocalSparkContext with B } // the tasksSets complete, so the tracker should be notified of the successful ones - verify(blacklist, times(1)).updateBlacklistForSuccessfulTaskSet( + verify(healthTracker, times(1)).updateExcludedForSuccessfulTaskSet( stageId = 0, stageAttemptId = 0, - failuresByExec = stageToMockTaskSetBlacklist(0).execToFailures) - verify(blacklist, times(1)).updateBlacklistForSuccessfulTaskSet( + failuresByExec = stageToMockTaskSetExcludelist(0).execToFailures) + verify(healthTracker, times(1)).updateExcludedForSuccessfulTaskSet( stageId = 1, stageAttemptId = 0, - failuresByExec = stageToMockTaskSetBlacklist(1).execToFailures) + failuresByExec = stageToMockTaskSetExcludelist(1).execToFailures) // but we shouldn't update for the failed taskset - verify(blacklist, never).updateBlacklistForSuccessfulTaskSet( + verify(healthTracker, never).updateExcludedForSuccessfulTaskSet( stageId = meq(2), stageAttemptId = anyInt(), failuresByExec = any()) } - test("scheduled tasks obey node and executor blacklists") { - taskScheduler = setupSchedulerWithMockTaskSetBlacklist() + test("scheduled tasks obey node and executor excludelists") { + taskScheduler = setupSchedulerWithMockTaskSetExcludelist() (0 to 2).foreach { stageId => val taskSet = FakeTask.createTaskSet(numTasks = 2, stageId = stageId, stageAttemptId = 0) taskScheduler.submitTasks(taskSet) @@ -800,13 +800,13 @@ class TaskSchedulerImplSuite extends SparkFunSuite with LocalSparkContext with B new WorkerOffer("executor4", "host3", 1) ) - // setup our mock blacklist: - // host1, executor0 & executor3 are completely blacklisted + // setup our mock excludelist: + // host1, executor0 & executor3 are completely excluded // This covers everything *except* one core on executor4 / host3, so that everything is still // schedulable. - when(blacklist.isNodeBlacklisted("host1")).thenReturn(true) - when(blacklist.isExecutorBlacklisted("executor0")).thenReturn(true) - when(blacklist.isExecutorBlacklisted("executor3")).thenReturn(true) + when(healthTracker.isNodeExcluded("host1")).thenReturn(true) + when(healthTracker.isExecutorExcluded("executor0")).thenReturn(true) + when(healthTracker.isExecutorExcluded("executor3")).thenReturn(true) val stageToTsm = (0 to 2).map { stageId => val tsm = taskScheduler.taskSetManagerForAttempt(stageId, 0).get @@ -818,12 +818,12 @@ class TaskSchedulerImplSuite extends SparkFunSuite with LocalSparkContext with B assert(firstTaskAttempts.size === 1) assert(firstTaskAttempts.head.executorId === "executor4") ('0' until '2').foreach { hostNum => - verify(blacklist, atLeast(1)).isNodeBlacklisted("host" + hostNum) + verify(healthTracker, atLeast(1)).isNodeExcluded("host" + hostNum) } } - test("abort stage when all executors are blacklisted and we cannot acquire new executor") { - taskScheduler = setupSchedulerWithMockTaskSetBlacklist() + test("abort stage when all executors are excluded and we cannot acquire new executor") { + taskScheduler = setupSchedulerWithMockTaskSetExcludelist() val taskSet = FakeTask.createTaskSet(numTasks = 10) taskScheduler.submitTasks(taskSet) val tsm = stageToMockTaskSetManager(0) @@ -836,11 +836,11 @@ class TaskSchedulerImplSuite extends SparkFunSuite with LocalSparkContext with B WorkerOffer("executor3", "host1", 2) )) - // now say our blacklist updates to blacklist a bunch of resources, but *not* everything - when(blacklist.isNodeBlacklisted("host1")).thenReturn(true) - when(blacklist.isExecutorBlacklisted("executor0")).thenReturn(true) + // now say our health tracker updates to exclude a bunch of resources, but *not* everything + when(healthTracker.isNodeExcluded("host1")).thenReturn(true) + when(healthTracker.isExecutorExcluded("executor0")).thenReturn(true) - // make an offer on the blacklisted resources. We won't schedule anything, but also won't + // make an offer on the excluded resources. We won't schedule anything, but also won't // abort yet, since we know of other resources that work assert(taskScheduler.resourceOffers(IndexedSeq( WorkerOffer("executor0", "host0", 2), @@ -848,9 +848,9 @@ class TaskSchedulerImplSuite extends SparkFunSuite with LocalSparkContext with B )).flatten.size === 0) assert(!tsm.isZombie) - // now update the blacklist so that everything really is blacklisted - when(blacklist.isExecutorBlacklisted("executor1")).thenReturn(true) - when(blacklist.isExecutorBlacklisted("executor2")).thenReturn(true) + // now update the health tracker so that everything really is excluded + when(healthTracker.isExecutorExcluded("executor1")).thenReturn(true) + when(healthTracker.isExecutorExcluded("executor2")).thenReturn(true) assert(taskScheduler.resourceOffers(IndexedSeq( WorkerOffer("executor0", "host0", 2), WorkerOffer("executor3", "host1", 2) @@ -859,10 +859,10 @@ class TaskSchedulerImplSuite extends SparkFunSuite with LocalSparkContext with B verify(tsm).abort(anyString(), any()) } - test("SPARK-22148 abort timer should kick in when task is completely blacklisted & no new " + + test("SPARK-22148 abort timer should kick in when task is completely excluded & no new " + "executor can be acquired") { // set the abort timer to fail immediately - taskScheduler = setupSchedulerWithMockTaskSetBlacklist( + taskScheduler = setupSchedulerWithMockTaskSetExcludelist( config.UNSCHEDULABLE_TASKSET_TIMEOUT.key -> "0") // We have only 1 task remaining with 1 executor @@ -878,10 +878,10 @@ class TaskSchedulerImplSuite extends SparkFunSuite with LocalSparkContext with B // Fail the running task val failedTask = firstTaskAttempts.find(_.executorId == "executor0").get failTask(failedTask.taskId, TaskState.FAILED, UnknownReason, tsm) - when(tsm.taskSetBlacklistHelperOpt.get.isExecutorBlacklistedForTask( + when(tsm.taskSetExcludelistHelperOpt.get.isExecutorExcludedForTask( "executor0", failedTask.index)).thenReturn(true) - // make an offer on the blacklisted executor. We won't schedule anything, and set the abort + // make an offer on the excluded executor. We won't schedule anything, and set the abort // timer to kick in immediately assert(taskScheduler.resourceOffers(IndexedSeq( WorkerOffer("executor0", "host0", 1) @@ -894,7 +894,7 @@ class TaskSchedulerImplSuite extends SparkFunSuite with LocalSparkContext with B } test("SPARK-22148 try to acquire a new executor when task is unschedulable with 1 executor") { - taskScheduler = setupSchedulerWithMockTaskSetBlacklist( + taskScheduler = setupSchedulerWithMockTaskSetExcludelist( config.UNSCHEDULABLE_TASKSET_TIMEOUT.key -> "10") // We have only 1 task remaining with 1 executor @@ -910,11 +910,11 @@ class TaskSchedulerImplSuite extends SparkFunSuite with LocalSparkContext with B // Fail the running task val failedTask = firstTaskAttempts.head failTask(failedTask.taskId, TaskState.FAILED, UnknownReason, tsm) - when(tsm.taskSetBlacklistHelperOpt.get.isExecutorBlacklistedForTask( + when(tsm.taskSetExcludelistHelperOpt.get.isExecutorExcludedForTask( "executor0", failedTask.index)).thenReturn(true) - // make an offer on the blacklisted executor. We won't schedule anything, and set the abort - // timer to expire if no new executors could be acquired. We kill the existing idle blacklisted + // make an offer on the excluded executor. We won't schedule anything, and set the abort + // timer to expire if no new executors could be acquired. We kill the existing idle excluded // executor and try to acquire a new one. assert(taskScheduler.resourceOffers(IndexedSeq( WorkerOffer("executor0", "host0", 1) @@ -930,12 +930,12 @@ class TaskSchedulerImplSuite extends SparkFunSuite with LocalSparkContext with B assert(!tsm.isZombie) } - // This is to test a scenario where we have two taskSets completely blacklisted and on acquiring + // This is to test a scenario where we have two taskSets completely excluded and on acquiring // a new executor we don't want the abort timer for the second taskSet to expire and abort the job test("SPARK-22148 abort timer should clear unschedulableTaskSetToExpiryTime for all TaskSets") { - taskScheduler = setupSchedulerWithMockTaskSetBlacklist() + taskScheduler = setupSchedulerWithMockTaskSetExcludelist() - // We have 2 taskSets with 1 task remaining in each with 1 executor completely blacklisted + // We have 2 taskSets with 1 task remaining in each with 1 executor completely excluded val taskSet1 = FakeTask.createTaskSet(numTasks = 1, stageId = 0, stageAttemptId = 0) taskScheduler.submitTasks(taskSet1) val taskSet2 = FakeTask.createTaskSet(numTasks = 1, stageId = 1, stageAttemptId = 0) @@ -952,7 +952,7 @@ class TaskSchedulerImplSuite extends SparkFunSuite with LocalSparkContext with B // Fail the running task val failedTask = firstTaskAttempts.head failTask(failedTask.taskId, TaskState.FAILED, UnknownReason, tsm) - when(tsm.taskSetBlacklistHelperOpt.get.isExecutorBlacklistedForTask( + when(tsm.taskSetExcludelistHelperOpt.get.isExecutorExcludedForTask( "executor0", failedTask.index)).thenReturn(true) // make an offer. We will schedule the task from the second taskSet. Since a task was scheduled @@ -966,10 +966,10 @@ class TaskSchedulerImplSuite extends SparkFunSuite with LocalSparkContext with B val tsm2 = stageToMockTaskSetManager(1) val failedTask2 = secondTaskAttempts.head failTask(failedTask2.taskId, TaskState.FAILED, UnknownReason, tsm2) - when(tsm2.taskSetBlacklistHelperOpt.get.isExecutorBlacklistedForTask( + when(tsm2.taskSetExcludelistHelperOpt.get.isExecutorExcludedForTask( "executor0", failedTask2.index)).thenReturn(true) - // make an offer on the blacklisted executor. We won't schedule anything, and set the abort + // make an offer on the excluded executor. We won't schedule anything, and set the abort // timer for taskSet1 and taskSet2 assert(taskScheduler.resourceOffers(IndexedSeq( WorkerOffer("executor0", "host0", 1) @@ -991,9 +991,9 @@ class TaskSchedulerImplSuite extends SparkFunSuite with LocalSparkContext with B // this test is to check that we don't abort a taskSet which is not being scheduled on other // executors as it is waiting on locality timeout and not being aborted because it is still not - // completely blacklisted. - test("SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely blacklisted") { - taskScheduler = setupSchedulerWithMockTaskSetBlacklist( + // completely excluded. + test("SPARK-22148 Ensure we don't abort the taskSet if we haven't been completely excluded") { + taskScheduler = setupSchedulerWithMockTaskSetExcludelist( config.UNSCHEDULABLE_TASKSET_TIMEOUT.key -> "0", // This is to avoid any potential flakiness in the test because of large pauses in jenkins config.LOCALITY_WAIT.key -> "30s" @@ -1014,7 +1014,7 @@ class TaskSchedulerImplSuite extends SparkFunSuite with LocalSparkContext with B // Fail the running task val failedTask = taskAttempts.head failTask(failedTask.taskId, TaskState.FAILED, UnknownReason, tsm) - when(tsm.taskSetBlacklistHelperOpt.get.isExecutorBlacklistedForTask( + when(tsm.taskSetExcludelistHelperOpt.get.isExecutorExcludedForTask( "executor0", failedTask.index)).thenReturn(true) // make an offer but we won't schedule anything yet as scheduler locality is still PROCESS_LOCAL @@ -1027,10 +1027,10 @@ class TaskSchedulerImplSuite extends SparkFunSuite with LocalSparkContext with B assert(!tsm.isZombie) } - test("SPARK-31418 abort timer should kick in when task is completely blacklisted &" + + test("SPARK-31418 abort timer should kick in when task is completely excluded &" + "allocation manager could not acquire a new executor before the timeout") { // set the abort timer to fail immediately - taskScheduler = setupSchedulerWithMockTaskSetBlacklist( + taskScheduler = setupSchedulerWithMockTaskSetExcludelist( config.UNSCHEDULABLE_TASKSET_TIMEOUT.key -> "0", config.DYN_ALLOCATION_ENABLED.key -> "true") @@ -1044,14 +1044,14 @@ class TaskSchedulerImplSuite extends SparkFunSuite with LocalSparkContext with B // Fail the running task failTask(0, TaskState.FAILED, UnknownReason, tsm) - when(tsm.taskSetBlacklistHelperOpt.get.isExecutorBlacklistedForTask( + when(tsm.taskSetExcludelistHelperOpt.get.isExecutorExcludedForTask( "executor0", 0)).thenReturn(true) // If the executor is busy, then dynamic allocation should kick in and try - // to acquire additional executors to schedule the blacklisted task + // to acquire additional executors to schedule the excluded task assert(taskScheduler.isExecutorBusy("executor0")) - // make an offer on the blacklisted executor. We won't schedule anything, and set the abort + // make an offer on the excluded executor. We won't schedule anything, and set the abort // timer to kick in immediately assert(taskScheduler.resourceOffers(IndexedSeq( WorkerOffer("executor0", "host0", 1) @@ -1064,31 +1064,31 @@ class TaskSchedulerImplSuite extends SparkFunSuite with LocalSparkContext with B } /** - * Helper for performance tests. Takes the explicitly blacklisted nodes and executors; verifies - * that the blacklists are used efficiently to ensure scheduling is not O(numPendingTasks). + * Helper for performance tests. Takes the explicitly excluded nodes and executors; verifies + * that the excluded are used efficiently to ensure scheduling is not O(numPendingTasks). * Creates 1 offer on executor[1-3]. Executor1 & 2 are on host1, executor3 is on host2. Passed * in nodes and executors should be on that list. */ - private def testBlacklistPerformance( + private def testExcludelistPerformance( testName: String, - nodeBlacklist: Seq[String], - execBlacklist: Seq[String]): Unit = { + nodeExcludelist: Seq[String], + execExcludelist: Seq[String]): Unit = { // Because scheduling involves shuffling the order of offers around, we run this test a few // times to cover more possibilities. There are only 3 offers, which means 6 permutations, // so 10 iterations is pretty good. (0 until 10).foreach { testItr => test(s"$testName: iteration $testItr") { - // When an executor or node is blacklisted, we want to make sure that we don't try - // scheduling each pending task, one by one, to discover they are all blacklisted. This is + // When an executor or node is excluded, we want to make sure that we don't try + // scheduling each pending task, one by one, to discover they are all excluded. This is // important for performance -- if we did check each task one-by-one, then responding to a // resource offer (which is usually O(1)-ish) would become O(numPendingTasks), which would // slow down scheduler throughput and slow down scheduling even on healthy executors. // Here, we check a proxy for the runtime -- we make sure the scheduling is short-circuited - // at the node or executor blacklist, so we never check the per-task blacklist. We also - // make sure we don't check the node & executor blacklist for the entire taskset + // at the node or executor excludelist, so we never check the per-task excludelist. We also + // make sure we don't check the node & executor excludelist for the entire taskset // O(numPendingTasks) times. - taskScheduler = setupSchedulerWithMockTaskSetBlacklist() + taskScheduler = setupSchedulerWithMockTaskSetExcludelist() // we schedule 500 tasks so we can clearly distinguish anything that is O(numPendingTasks) val taskSet = FakeTask.createTaskSet(numTasks = 500, stageId = 0, stageAttemptId = 0) taskScheduler.submitTasks(taskSet) @@ -1098,91 +1098,92 @@ class TaskSchedulerImplSuite extends SparkFunSuite with LocalSparkContext with B new WorkerOffer("executor2", "host1", 1), new WorkerOffer("executor3", "host2", 1) ) - // We should check the node & exec blacklists, but only O(numOffers), not O(numPendingTasks) - // times. In the worst case, after shuffling, we offer our blacklisted resource first, and - // then offer other resources which do get used. The taskset blacklist is consulted - // repeatedly as we offer resources to the taskset -- each iteration either schedules - // something, or it terminates that locality level, so the maximum number of checks is - // numCores + numLocalityLevels + // We should check the node & exec excludelists, but only O(numOffers), + // not O(numPendingTasks) times. In the worst case, after shuffling, + // we offer our excluded resource first, and then offer other resources + // which do get used. The taskset excludelist is consulted repeatedly + // as we offer resources to the taskset -- each iteration either schedules + // something, or it terminates that locality level, so the maximum number of + // checks is numCores + numLocalityLevels val numCoresOnAllOffers = offers.map(_.cores).sum val numLocalityLevels = TaskLocality.values.size - val maxBlacklistChecks = numCoresOnAllOffers + numLocalityLevels + val maxExcludelistChecks = numCoresOnAllOffers + numLocalityLevels - // Setup the blacklist - nodeBlacklist.foreach { node => - when(stageToMockTaskSetBlacklist(0).isNodeBlacklistedForTaskSet(node)).thenReturn(true) + // Setup the excludelist + nodeExcludelist.foreach { node => + when(stageToMockTaskSetExcludelist(0).isNodeExcludedForTaskSet(node)).thenReturn(true) } - execBlacklist.foreach { exec => - when(stageToMockTaskSetBlacklist(0).isExecutorBlacklistedForTaskSet(exec)) + execExcludelist.foreach { exec => + when(stageToMockTaskSetExcludelist(0).isExecutorExcludedForTaskSet(exec)) .thenReturn(true) } - // Figure out which nodes have any effective blacklisting on them. This means all nodes - // that are explicitly blacklisted, plus those that have *any* executors blacklisted. - val nodesForBlacklistedExecutors = offers.filter { offer => - execBlacklist.contains(offer.executorId) + // Figure out which nodes have any effective exclusions on them. This means all nodes + // that are explicitly excluded, plus those that have *any* executors excluded. + val nodesForExcludedExecutors = offers.filter { offer => + execExcludelist.contains(offer.executorId) }.map(_.host).distinct - val nodesWithAnyBlacklisting = (nodeBlacklist ++ nodesForBlacklistedExecutors).toSet - // Similarly, figure out which executors have any blacklisting. This means all executors - // that are explicitly blacklisted, plus all executors on nodes that are blacklisted. - val execsForBlacklistedNodes = offers.filter { offer => - nodeBlacklist.contains(offer.host) + val nodesWithAnyExclusions = (nodeExcludelist ++ nodesForExcludedExecutors).toSet + // Similarly, figure out which executors have any exclusions. This means all executors + // that are explicitly excluded, plus all executors on nodes that are excluded. + val execsForExcludedNodes = offers.filter { offer => + nodeExcludelist.contains(offer.host) }.map(_.executorId).toSeq - val executorsWithAnyBlacklisting = (execBlacklist ++ execsForBlacklistedNodes).toSet + val executorsWithAnyExclusions = (execExcludelist ++ execsForExcludedNodes).toSet // Schedule a taskset, and make sure our test setup is correct -- we are able to schedule - // a task on all executors that aren't blacklisted (whether that executor is a explicitly - // blacklisted, or implicitly blacklisted via the node blacklist). + // a task on all executors that aren't excluded (whether that executor is a explicitly + // excluded, or implicitly excluded via the node excludeOnFailures). val firstTaskAttempts = taskScheduler.resourceOffers(offers).flatten - assert(firstTaskAttempts.size === offers.size - executorsWithAnyBlacklisting.size) + assert(firstTaskAttempts.size === offers.size - executorsWithAnyExclusions.size) - // Now check that we haven't made too many calls to any of the blacklist methods. - // We should be checking our node blacklist, but it should be within the bound we defined + // Now check that we haven't made too many calls to any of the excludelist methods. + // We should be checking our node excludelist, but it should be within the bound we defined // above. - verify(stageToMockTaskSetBlacklist(0), atMost(maxBlacklistChecks)) - .isNodeBlacklistedForTaskSet(anyString()) - // We shouldn't ever consult the per-task blacklist for the nodes that have been blacklisted - // for the entire taskset, since the taskset level blacklisting should prevent scheduling + verify(stageToMockTaskSetExcludelist(0), atMost(maxExcludelistChecks)) + .isNodeExcludedForTaskSet(anyString()) + // We shouldn't ever consult the per-task excludelist for the nodes that have been excluded + // for the entire taskset, since the taskset level exclusions should prevent scheduling // from ever looking at specific tasks. - nodesWithAnyBlacklisting.foreach { node => - verify(stageToMockTaskSetBlacklist(0), never) - .isNodeBlacklistedForTask(meq(node), anyInt()) + nodesWithAnyExclusions.foreach { node => + verify(stageToMockTaskSetExcludelist(0), never) + .isNodeExcludedForTask(meq(node), anyInt()) } - executorsWithAnyBlacklisting.foreach { exec => - // We should be checking our executor blacklist, but it should be within the bound defined - // above. Its possible that this will be significantly fewer calls, maybe even 0, if - // there is also a node-blacklist which takes effect first. But this assert is all we - // need to avoid an O(numPendingTask) slowdown. - verify(stageToMockTaskSetBlacklist(0), atMost(maxBlacklistChecks)) - .isExecutorBlacklistedForTaskSet(exec) - // We shouldn't ever consult the per-task blacklist for executors that have been - // blacklisted for the entire taskset, since the taskset level blacklisting should prevent + executorsWithAnyExclusions.foreach { exec => + // We should be checking our executor excludelist, but it should be within the bound + // defined above. Its possible that this will be significantly fewer calls, maybe even + // 0, if there is also a node-excludelist which takes effect first. But this assert is + // all we need to avoid an O(numPendingTask) slowdown. + verify(stageToMockTaskSetExcludelist(0), atMost(maxExcludelistChecks)) + .isExecutorExcludedForTaskSet(exec) + // We shouldn't ever consult the per-task excludelist for executors that have been + // excluded for the entire taskset, since the taskset level exclusions should prevent // scheduling from ever looking at specific tasks. - verify(stageToMockTaskSetBlacklist(0), never) - .isExecutorBlacklistedForTask(meq(exec), anyInt()) + verify(stageToMockTaskSetExcludelist(0), never) + .isExecutorExcludedForTask(meq(exec), anyInt()) } } } } - testBlacklistPerformance( - testName = "Blacklisted node for entire task set prevents per-task blacklist checks", - nodeBlacklist = Seq("host1"), - execBlacklist = Seq()) + testExcludelistPerformance( + testName = "Excluded node for entire task set prevents per-task exclusion checks", + nodeExcludelist = Seq("host1"), + execExcludelist = Seq()) - testBlacklistPerformance( - testName = "Blacklisted executor for entire task set prevents per-task blacklist checks", - nodeBlacklist = Seq(), - execBlacklist = Seq("executor3") + testExcludelistPerformance( + testName = "Excluded executor for entire task set prevents per-task exclusion checks", + nodeExcludelist = Seq(), + execExcludelist = Seq("executor3") ) test("abort stage if executor loss results in unschedulability from previously failed tasks") { - // Make sure we can detect when a taskset becomes unschedulable from a blacklisting. This + // Make sure we can detect when a taskset becomes unschedulable from excludeOnFailure. This // test explores a particular corner case -- you may have one task fail, but still be // schedulable on another executor. However, that executor may fail later on, leaving the // first task with no place to run. val taskScheduler = setupScheduler( - config.BLACKLIST_ENABLED.key -> "true" + config.EXCLUDE_ON_FAILURE_ENABLED.key -> "true" ) val taskSet = FakeTask.createTaskSet(2) @@ -1215,7 +1216,7 @@ class TaskSchedulerImplSuite extends SparkFunSuite with LocalSparkContext with B assert(nextTaskAttempts.head.index != failedTask.index) // Now we should definitely realize that our task set is unschedulable, because the only - // task left can't be scheduled on any executors due to the blacklist. + // task left can't be scheduled on any executors due to the excludelist. taskScheduler.resourceOffers(IndexedSeq(new WorkerOffer("executor0", "host0", 1))) sc.listenerBus.waitUntilEmpty(100000) assert(tsm.isZombie) @@ -1223,11 +1224,11 @@ class TaskSchedulerImplSuite extends SparkFunSuite with LocalSparkContext with B val idx = failedTask.index assert(failedTaskSetReason === s""" |Aborting $taskSet because task $idx (partition $idx) - |cannot run anywhere due to node and executor blacklist. + |cannot run anywhere due to node and executor excludeOnFailure. |Most recent failure: - |${tsm.taskSetBlacklistHelperOpt.get.getLatestFailureReason} + |${tsm.taskSetExcludelistHelperOpt.get.getLatestFailureReason} | - |Blacklisting behavior can be configured via spark.blacklist.*. + |ExcludeOnFailure behavior can be configured via spark.excludeOnFailure.*. |""".stripMargin) } @@ -1238,7 +1239,7 @@ class TaskSchedulerImplSuite extends SparkFunSuite with LocalSparkContext with B // available and not bail on the job val taskScheduler = setupScheduler( - config.BLACKLIST_ENABLED.key -> "true" + config.EXCLUDE_ON_FAILURE_ENABLED.key -> "true" ) val taskSet = FakeTask.createTaskSet(2, (0 until 2).map { _ => Seq(TaskLocation("host0")) }: _*) @@ -1306,7 +1307,7 @@ class TaskSchedulerImplSuite extends SparkFunSuite with LocalSparkContext with B assert(taskScheduler.getExecutorsAliveOnHost("host1") === Some(Set("executor1", "executor3"))) } - test("scheduler checks for executors that can be expired from blacklist") { + test("scheduler checks for executors that can be expired from excludeOnFailure") { taskScheduler = setupScheduler() taskScheduler.submitTasks(FakeTask.createTaskSet(1, stageId = 0, stageAttemptId = 0)) @@ -1314,7 +1315,7 @@ class TaskSchedulerImplSuite extends SparkFunSuite with LocalSparkContext with B new WorkerOffer("executor0", "host0", 1) )).flatten - verify(blacklist).applyBlacklistTimeout() + verify(healthTracker).applyExcludeOnFailureTimeout() } test("if an executor is lost then the state for its running tasks is cleaned up (SPARK-18553)") { @@ -1400,7 +1401,7 @@ class TaskSchedulerImplSuite extends SparkFunSuite with LocalSparkContext with B offers } override def createTaskSetManager(taskSet: TaskSet, maxTaskFailures: Int): TaskSetManager = { - new TaskSetManager(this, taskSet, maxTaskFailures, blacklistTrackerOpt, clock) + new TaskSetManager(this, taskSet, maxTaskFailures, healthTrackerOpt, clock) } } // Need to initialize a DAGScheduler for the taskScheduler to use for callbacks. @@ -1440,7 +1441,7 @@ class TaskSchedulerImplSuite extends SparkFunSuite with LocalSparkContext with B val clock = new ManualClock() val taskScheduler = new TaskSchedulerImpl(sc) { override def createTaskSetManager(taskSet: TaskSet, maxTaskFailures: Int): TaskSetManager = { - new TaskSetManager(this, taskSet, maxTaskFailures, blacklistTrackerOpt, clock) + new TaskSetManager(this, taskSet, maxTaskFailures, healthTrackerOpt, clock) } } // Need to initialize a DAGScheduler for the taskScheduler to use for callbacks. diff --git a/core/src/test/scala/org/apache/spark/scheduler/TaskSetBlacklistSuite.scala b/core/src/test/scala/org/apache/spark/scheduler/TaskSetBlacklistSuite.scala deleted file mode 100644 index ed97a4c206ca3..0000000000000 --- a/core/src/test/scala/org/apache/spark/scheduler/TaskSetBlacklistSuite.scala +++ /dev/null @@ -1,287 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.spark.scheduler - -import org.mockito.ArgumentMatchers.isA -import org.mockito.Mockito.{never, verify} -import org.scalatest.BeforeAndAfterEach -import org.scalatestplus.mockito.MockitoSugar - -import org.apache.spark.{SparkConf, SparkFunSuite} -import org.apache.spark.internal.config -import org.apache.spark.util.ManualClock - -class TaskSetBlacklistSuite extends SparkFunSuite with BeforeAndAfterEach with MockitoSugar { - - private var listenerBusMock: LiveListenerBus = _ - - override def beforeEach(): Unit = { - listenerBusMock = mock[LiveListenerBus] - super.beforeEach() - } - - test("Blacklisting tasks, executors, and nodes") { - val conf = new SparkConf().setAppName("test").setMaster("local") - .set(config.BLACKLIST_ENABLED.key, "true") - val clock = new ManualClock - val attemptId = 0 - val taskSetBlacklist = new TaskSetBlacklist( - listenerBusMock, conf, stageId = 0, stageAttemptId = attemptId, clock = clock) - - clock.setTime(0) - // We will mark task 0 & 1 failed on both executor 1 & 2. - // We should blacklist all executors on that host, for all tasks for the stage. Note the API - // will return false for isExecutorBacklistedForTaskSet even when the node is blacklisted, so - // the executor is implicitly blacklisted (this makes sense with how the scheduler uses the - // blacklist) - - // First, mark task 0 as failed on exec1. - // task 0 should be blacklisted on exec1, and nowhere else - taskSetBlacklist.updateBlacklistForFailedTask( - "hostA", exec = "exec1", index = 0, failureReason = "testing") - for { - executor <- (1 to 4).map(_.toString) - index <- 0 until 10 - } { - val shouldBeBlacklisted = (executor == "exec1" && index == 0) - assert(taskSetBlacklist.isExecutorBlacklistedForTask(executor, index) === shouldBeBlacklisted) - } - - assert(!taskSetBlacklist.isExecutorBlacklistedForTaskSet("exec1")) - verify(listenerBusMock, never()) - .post(isA(classOf[SparkListenerExecutorBlacklistedForStage])) - - assert(!taskSetBlacklist.isNodeBlacklistedForTaskSet("hostA")) - verify(listenerBusMock, never()) - .post(isA(classOf[SparkListenerNodeBlacklistedForStage])) - - // Mark task 1 failed on exec1 -- this pushes the executor into the blacklist - taskSetBlacklist.updateBlacklistForFailedTask( - "hostA", exec = "exec1", index = 1, failureReason = "testing") - - assert(taskSetBlacklist.isExecutorBlacklistedForTaskSet("exec1")) - verify(listenerBusMock).post( - SparkListenerExecutorBlacklistedForStage(0, "exec1", 2, 0, attemptId)) - - assert(!taskSetBlacklist.isNodeBlacklistedForTaskSet("hostA")) - verify(listenerBusMock, never()) - .post(isA(classOf[SparkListenerNodeBlacklistedForStage])) - - // Mark one task as failed on exec2 -- not enough for any further blacklisting yet. - taskSetBlacklist.updateBlacklistForFailedTask( - "hostA", exec = "exec2", index = 0, failureReason = "testing") - assert(taskSetBlacklist.isExecutorBlacklistedForTaskSet("exec1")) - - assert(!taskSetBlacklist.isExecutorBlacklistedForTaskSet("exec2")) - - assert(!taskSetBlacklist.isNodeBlacklistedForTaskSet("hostA")) - verify(listenerBusMock, never()) - .post(isA(classOf[SparkListenerNodeBlacklistedForStage])) - - // Mark another task as failed on exec2 -- now we blacklist exec2, which also leads to - // blacklisting the entire node. - taskSetBlacklist.updateBlacklistForFailedTask( - "hostA", exec = "exec2", index = 1, failureReason = "testing") - - assert(taskSetBlacklist.isExecutorBlacklistedForTaskSet("exec1")) - - assert(taskSetBlacklist.isExecutorBlacklistedForTaskSet("exec2")) - verify(listenerBusMock).post( - SparkListenerExecutorBlacklistedForStage(0, "exec2", 2, 0, attemptId)) - - assert(taskSetBlacklist.isNodeBlacklistedForTaskSet("hostA")) - verify(listenerBusMock).post( - SparkListenerNodeBlacklistedForStage(0, "hostA", 2, 0, attemptId)) - - // Make sure the blacklist has the correct per-task && per-executor responses, over a wider - // range of inputs. - for { - executor <- (1 to 4).map(e => s"exec$e") - index <- 0 until 10 - } { - withClue(s"exec = $executor; index = $index") { - val badExec = (executor == "exec1" || executor == "exec2") - val badIndex = (index == 0 || index == 1) - assert( - // this ignores whether the executor is blacklisted entirely for the taskset -- that is - // intentional, it keeps it fast and is sufficient for usage in the scheduler. - taskSetBlacklist.isExecutorBlacklistedForTask(executor, index) === (badExec && badIndex)) - assert(taskSetBlacklist.isExecutorBlacklistedForTaskSet(executor) === badExec) - if (badExec) { - verify(listenerBusMock).post( - SparkListenerExecutorBlacklistedForStage(0, executor, 2, 0, attemptId)) - } - } - } - assert(taskSetBlacklist.isNodeBlacklistedForTaskSet("hostA")) - val execToFailures = taskSetBlacklist.execToFailures - assert(execToFailures.keySet === Set("exec1", "exec2")) - - Seq("exec1", "exec2").foreach { exec => - assert( - execToFailures(exec).taskToFailureCountAndFailureTime === Map( - 0 -> ((1, 0)), - 1 -> ((1, 0)) - ) - ) - } - } - - test("multiple attempts for the same task count once") { - // Make sure that for blacklisting tasks, the node counts task attempts, not executors. But for - // stage-level blacklisting, we count unique tasks. The reason for this difference is, with - // task-attempt blacklisting, we want to make it easy to configure so that you ensure a node - // is blacklisted before the taskset is completely aborted because of spark.task.maxFailures. - // But with stage-blacklisting, we want to make sure we're not just counting one bad task - // that has failed many times. - - val conf = new SparkConf().setMaster("local").setAppName("test") - .set(config.MAX_TASK_ATTEMPTS_PER_EXECUTOR, 2) - .set(config.MAX_TASK_ATTEMPTS_PER_NODE, 3) - .set(config.MAX_FAILURES_PER_EXEC_STAGE, 2) - .set(config.MAX_FAILED_EXEC_PER_NODE_STAGE, 3) - val clock = new ManualClock - - val attemptId = 0 - val taskSetBlacklist = new TaskSetBlacklist( - listenerBusMock, conf, stageId = 0, stageAttemptId = attemptId, clock = clock) - - var time = 0 - clock.setTime(time) - // Fail a task twice on hostA, exec:1 - taskSetBlacklist.updateBlacklistForFailedTask( - "hostA", exec = "1", index = 0, failureReason = "testing") - taskSetBlacklist.updateBlacklistForFailedTask( - "hostA", exec = "1", index = 0, failureReason = "testing") - assert(taskSetBlacklist.isExecutorBlacklistedForTask("1", 0)) - assert(!taskSetBlacklist.isNodeBlacklistedForTask("hostA", 0)) - - assert(!taskSetBlacklist.isExecutorBlacklistedForTaskSet("1")) - verify(listenerBusMock, never()).post( - SparkListenerExecutorBlacklistedForStage(time, "1", 2, 0, attemptId)) - - assert(!taskSetBlacklist.isNodeBlacklistedForTaskSet("hostA")) - verify(listenerBusMock, never()).post( - SparkListenerNodeBlacklistedForStage(time, "hostA", 2, 0, attemptId)) - - // Fail the same task once more on hostA, exec:2 - time += 1 - clock.setTime(time) - taskSetBlacklist.updateBlacklistForFailedTask( - "hostA", exec = "2", index = 0, failureReason = "testing") - assert(taskSetBlacklist.isNodeBlacklistedForTask("hostA", 0)) - - assert(!taskSetBlacklist.isExecutorBlacklistedForTaskSet("2")) - verify(listenerBusMock, never()).post( - SparkListenerExecutorBlacklistedForStage(time, "2", 2, 0, attemptId)) - - assert(!taskSetBlacklist.isNodeBlacklistedForTaskSet("hostA")) - verify(listenerBusMock, never()).post( - SparkListenerNodeBlacklistedForStage(time, "hostA", 2, 0, attemptId)) - - // Fail another task on hostA, exec:1. Now that executor has failures on two different tasks, - // so its blacklisted - time += 1 - clock.setTime(time) - taskSetBlacklist.updateBlacklistForFailedTask( - "hostA", exec = "1", index = 1, failureReason = "testing") - - assert(taskSetBlacklist.isExecutorBlacklistedForTaskSet("1")) - verify(listenerBusMock) - .post(SparkListenerExecutorBlacklistedForStage(time, "1", 2, 0, attemptId)) - - assert(!taskSetBlacklist.isNodeBlacklistedForTaskSet("hostA")) - verify(listenerBusMock, never()) - .post(isA(classOf[SparkListenerNodeBlacklistedForStage])) - - // Fail a third task on hostA, exec:2, so that exec is blacklisted for the whole task set - time += 1 - clock.setTime(time) - taskSetBlacklist.updateBlacklistForFailedTask( - "hostA", exec = "2", index = 2, failureReason = "testing") - - assert(taskSetBlacklist.isExecutorBlacklistedForTaskSet("2")) - verify(listenerBusMock) - .post(SparkListenerExecutorBlacklistedForStage(time, "2", 2, 0, attemptId)) - - assert(!taskSetBlacklist.isNodeBlacklistedForTaskSet("hostA")) - verify(listenerBusMock, never()) - .post(isA(classOf[SparkListenerNodeBlacklistedForStage])) - - // Fail a fourth & fifth task on hostA, exec:3. Now we've got three executors that are - // blacklisted for the taskset, so blacklist the whole node. - time += 1 - clock.setTime(time) - taskSetBlacklist.updateBlacklistForFailedTask( - "hostA", exec = "3", index = 3, failureReason = "testing") - taskSetBlacklist.updateBlacklistForFailedTask( - "hostA", exec = "3", index = 4, failureReason = "testing") - - assert(taskSetBlacklist.isExecutorBlacklistedForTaskSet("3")) - verify(listenerBusMock) - .post(SparkListenerExecutorBlacklistedForStage(time, "3", 2, 0, attemptId)) - - assert(taskSetBlacklist.isNodeBlacklistedForTaskSet("hostA")) - verify(listenerBusMock).post( - SparkListenerNodeBlacklistedForStage(time, "hostA", 3, 0, attemptId)) - } - - test("only blacklist nodes for the task set when all the blacklisted executors are all on " + - "same host") { - // we blacklist executors on two different hosts within one taskSet -- make sure that doesn't - // lead to any node blacklisting - val conf = new SparkConf().setAppName("test").setMaster("local") - .set(config.BLACKLIST_ENABLED.key, "true") - val clock = new ManualClock - - val attemptId = 0 - val taskSetBlacklist = new TaskSetBlacklist( - listenerBusMock, conf, stageId = 0, stageAttemptId = attemptId, clock = clock) - var time = 0 - clock.setTime(time) - taskSetBlacklist.updateBlacklistForFailedTask( - "hostA", exec = "1", index = 0, failureReason = "testing") - taskSetBlacklist.updateBlacklistForFailedTask( - "hostA", exec = "1", index = 1, failureReason = "testing") - - assert(taskSetBlacklist.isExecutorBlacklistedForTaskSet("1")) - verify(listenerBusMock) - .post(SparkListenerExecutorBlacklistedForStage(time, "1", 2, 0, attemptId)) - - assert(!taskSetBlacklist.isNodeBlacklistedForTaskSet("hostA")) - verify(listenerBusMock, never()).post( - SparkListenerNodeBlacklistedForStage(time, "hostA", 2, 0, attemptId)) - - time += 1 - clock.setTime(time) - taskSetBlacklist.updateBlacklistForFailedTask( - "hostB", exec = "2", index = 0, failureReason = "testing") - taskSetBlacklist.updateBlacklistForFailedTask( - "hostB", exec = "2", index = 1, failureReason = "testing") - assert(taskSetBlacklist.isExecutorBlacklistedForTaskSet("1")) - - assert(taskSetBlacklist.isExecutorBlacklistedForTaskSet("2")) - verify(listenerBusMock) - .post(SparkListenerExecutorBlacklistedForStage(time, "2", 2, 0, attemptId)) - - assert(!taskSetBlacklist.isNodeBlacklistedForTaskSet("hostA")) - assert(!taskSetBlacklist.isNodeBlacklistedForTaskSet("hostB")) - verify(listenerBusMock, never()) - .post(isA(classOf[SparkListenerNodeBlacklistedForStage])) - } - -} diff --git a/core/src/test/scala/org/apache/spark/scheduler/TaskSetExcludelistSuite.scala b/core/src/test/scala/org/apache/spark/scheduler/TaskSetExcludelistSuite.scala new file mode 100644 index 0000000000000..d20768d7cd12b --- /dev/null +++ b/core/src/test/scala/org/apache/spark/scheduler/TaskSetExcludelistSuite.scala @@ -0,0 +1,310 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.spark.scheduler + +import org.mockito.ArgumentMatchers.isA +import org.mockito.Mockito.{never, verify} +import org.scalatest.BeforeAndAfterEach +import org.scalatestplus.mockito.MockitoSugar + +import org.apache.spark.{SparkConf, SparkFunSuite} +import org.apache.spark.internal.config +import org.apache.spark.util.ManualClock + +class TaskSetExcludelistSuite extends SparkFunSuite with BeforeAndAfterEach with MockitoSugar { + + private var listenerBusMock: LiveListenerBus = _ + + override def beforeEach(): Unit = { + listenerBusMock = mock[LiveListenerBus] + super.beforeEach() + } + + test("Excluding tasks, executors, and nodes") { + val conf = new SparkConf().setAppName("test").setMaster("local") + .set(config.EXCLUDE_ON_FAILURE_ENABLED.key, "true") + val clock = new ManualClock + val attemptId = 0 + val taskSetExcludelist = new TaskSetExcludelist( + listenerBusMock, conf, stageId = 0, stageAttemptId = attemptId, clock = clock) + + clock.setTime(0) + // We will mark task 0 & 1 failed on both executor 1 & 2. + // We should exclude all executors on that host, for all tasks for the stage. Note the API + // will return false for isExecutorBacklistedForTaskSet even when the node is excluded, so + // the executor is implicitly excluded (this makes sense with how the scheduler uses the + // exclude) + + // First, mark task 0 as failed on exec1. + // task 0 should be excluded on exec1, and nowhere else + taskSetExcludelist.updateExcludedForFailedTask( + "hostA", exec = "exec1", index = 0, failureReason = "testing") + for { + executor <- (1 to 4).map(_.toString) + index <- 0 until 10 + } { + val shouldBeExcluded = (executor == "exec1" && index == 0) + assert(taskSetExcludelist.isExecutorExcludedForTask(executor, index) === shouldBeExcluded) + } + + assert(!taskSetExcludelist.isExecutorExcludedForTaskSet("exec1")) + verify(listenerBusMock, never()) + .post(isA(classOf[SparkListenerExecutorExcludedForStage])) + verify(listenerBusMock, never()) + .post(isA(classOf[SparkListenerExecutorBlacklistedForStage])) + + assert(!taskSetExcludelist.isNodeExcludedForTaskSet("hostA")) + verify(listenerBusMock, never()) + .post(isA(classOf[SparkListenerNodeExcludedForStage])) + + // Mark task 1 failed on exec1 -- this pushes the executor into the exclude + taskSetExcludelist.updateExcludedForFailedTask( + "hostA", exec = "exec1", index = 1, failureReason = "testing") + + assert(taskSetExcludelist.isExecutorExcludedForTaskSet("exec1")) + verify(listenerBusMock).post( + SparkListenerExecutorExcludedForStage(0, "exec1", 2, 0, attemptId)) + verify(listenerBusMock).post( + SparkListenerExecutorBlacklistedForStage(0, "exec1", 2, 0, attemptId)) + + + assert(!taskSetExcludelist.isNodeExcludedForTaskSet("hostA")) + verify(listenerBusMock, never()) + .post(isA(classOf[SparkListenerNodeExcludedForStage])) + verify(listenerBusMock, never()) + .post(isA(classOf[SparkListenerNodeBlacklistedForStage])) + + // Mark one task as failed on exec2 -- not enough for any further excluding yet. + taskSetExcludelist.updateExcludedForFailedTask( + "hostA", exec = "exec2", index = 0, failureReason = "testing") + assert(taskSetExcludelist.isExecutorExcludedForTaskSet("exec1")) + + assert(!taskSetExcludelist.isExecutorExcludedForTaskSet("exec2")) + + assert(!taskSetExcludelist.isNodeExcludedForTaskSet("hostA")) + verify(listenerBusMock, never()) + .post(isA(classOf[SparkListenerNodeExcludedForStage])) + verify(listenerBusMock, never()) + .post(isA(classOf[SparkListenerNodeBlacklistedForStage])) + + // Mark another task as failed on exec2 -- now we exclude exec2, which also leads to + // excluding the entire node. + taskSetExcludelist.updateExcludedForFailedTask( + "hostA", exec = "exec2", index = 1, failureReason = "testing") + + assert(taskSetExcludelist.isExecutorExcludedForTaskSet("exec1")) + + assert(taskSetExcludelist.isExecutorExcludedForTaskSet("exec2")) + verify(listenerBusMock).post( + SparkListenerExecutorExcludedForStage(0, "exec2", 2, 0, attemptId)) + verify(listenerBusMock).post( + SparkListenerExecutorBlacklistedForStage(0, "exec2", 2, 0, attemptId)) + + assert(taskSetExcludelist.isNodeExcludedForTaskSet("hostA")) + verify(listenerBusMock).post( + SparkListenerNodeExcludedForStage(0, "hostA", 2, 0, attemptId)) + verify(listenerBusMock).post( + SparkListenerNodeBlacklistedForStage(0, "hostA", 2, 0, attemptId)) + + // Make sure the exclude has the correct per-task && per-executor responses, over a wider + // range of inputs. + for { + executor <- (1 to 4).map(e => s"exec$e") + index <- 0 until 10 + } { + withClue(s"exec = $executor; index = $index") { + val badExec = (executor == "exec1" || executor == "exec2") + val badIndex = (index == 0 || index == 1) + assert( + // this ignores whether the executor is excluded entirely for the taskset -- that is + // intentional, it keeps it fast and is sufficient for usage in the scheduler. + taskSetExcludelist.isExecutorExcludedForTask(executor, index) === (badExec && badIndex)) + assert(taskSetExcludelist.isExecutorExcludedForTaskSet(executor) === badExec) + if (badExec) { + verify(listenerBusMock).post( + SparkListenerExecutorExcludedForStage(0, executor, 2, 0, attemptId)) + verify(listenerBusMock).post( + SparkListenerExecutorBlacklistedForStage(0, executor, 2, 0, attemptId)) + } + } + } + assert(taskSetExcludelist.isNodeExcludedForTaskSet("hostA")) + val execToFailures = taskSetExcludelist.execToFailures + assert(execToFailures.keySet === Set("exec1", "exec2")) + + Seq("exec1", "exec2").foreach { exec => + assert( + execToFailures(exec).taskToFailureCountAndFailureTime === Map( + 0 -> ((1, 0)), + 1 -> ((1, 0)) + ) + ) + } + } + + test("multiple attempts for the same task count once") { + // Make sure that for excluding tasks, the node counts task attempts, not executors. But for + // stage-level excluding, we count unique tasks. The reason for this difference is, with + // task-attempt excluding, we want to make it easy to configure so that you ensure a node + // is excluded before the taskset is completely aborted because of spark.task.maxFailures. + // But with stage-excluding, we want to make sure we're not just counting one bad task + // that has failed many times. + + val conf = new SparkConf().setMaster("local").setAppName("test") + .set(config.MAX_TASK_ATTEMPTS_PER_EXECUTOR, 2) + .set(config.MAX_TASK_ATTEMPTS_PER_NODE, 3) + .set(config.MAX_FAILURES_PER_EXEC_STAGE, 2) + .set(config.MAX_FAILED_EXEC_PER_NODE_STAGE, 3) + val clock = new ManualClock + + val attemptId = 0 + val taskSetExcludlist = new TaskSetExcludelist( + listenerBusMock, conf, stageId = 0, stageAttemptId = attemptId, clock = clock) + + var time = 0 + clock.setTime(time) + // Fail a task twice on hostA, exec:1 + taskSetExcludlist.updateExcludedForFailedTask( + "hostA", exec = "1", index = 0, failureReason = "testing") + taskSetExcludlist.updateExcludedForFailedTask( + "hostA", exec = "1", index = 0, failureReason = "testing") + assert(taskSetExcludlist.isExecutorExcludedForTask("1", 0)) + assert(!taskSetExcludlist.isNodeExcludedForTask("hostA", 0)) + + assert(!taskSetExcludlist.isExecutorExcludedForTaskSet("1")) + verify(listenerBusMock, never()).post( + SparkListenerExecutorExcludedForStage(time, "1", 2, 0, attemptId)) + + assert(!taskSetExcludlist.isNodeExcludedForTaskSet("hostA")) + verify(listenerBusMock, never()).post( + SparkListenerNodeExcludedForStage(time, "hostA", 2, 0, attemptId)) + + // Fail the same task once more on hostA, exec:2 + time += 1 + clock.setTime(time) + taskSetExcludlist.updateExcludedForFailedTask( + "hostA", exec = "2", index = 0, failureReason = "testing") + assert(taskSetExcludlist.isNodeExcludedForTask("hostA", 0)) + + assert(!taskSetExcludlist.isExecutorExcludedForTaskSet("2")) + verify(listenerBusMock, never()).post( + SparkListenerExecutorExcludedForStage(time, "2", 2, 0, attemptId)) + + assert(!taskSetExcludlist.isNodeExcludedForTaskSet("hostA")) + verify(listenerBusMock, never()).post( + SparkListenerNodeExcludedForStage(time, "hostA", 2, 0, attemptId)) + + // Fail another task on hostA, exec:1. Now that executor has failures on two different tasks, + // so its excluded + time += 1 + clock.setTime(time) + taskSetExcludlist.updateExcludedForFailedTask( + "hostA", exec = "1", index = 1, failureReason = "testing") + + assert(taskSetExcludlist.isExecutorExcludedForTaskSet("1")) + verify(listenerBusMock) + .post(SparkListenerExecutorExcludedForStage(time, "1", 2, 0, attemptId)) + + assert(!taskSetExcludlist.isNodeExcludedForTaskSet("hostA")) + verify(listenerBusMock, never()) + .post(isA(classOf[SparkListenerNodeExcludedForStage])) + + // Fail a third task on hostA, exec:2, so that exec is excluded for the whole task set + time += 1 + clock.setTime(time) + taskSetExcludlist.updateExcludedForFailedTask( + "hostA", exec = "2", index = 2, failureReason = "testing") + + assert(taskSetExcludlist.isExecutorExcludedForTaskSet("2")) + verify(listenerBusMock) + .post(SparkListenerExecutorExcludedForStage(time, "2", 2, 0, attemptId)) + + assert(!taskSetExcludlist.isNodeExcludedForTaskSet("hostA")) + verify(listenerBusMock, never()) + .post(isA(classOf[SparkListenerNodeExcludedForStage])) + + // Fail a fourth & fifth task on hostA, exec:3. Now we've got three executors that are + // excluded for the taskset, so exclude the whole node. + time += 1 + clock.setTime(time) + taskSetExcludlist.updateExcludedForFailedTask( + "hostA", exec = "3", index = 3, failureReason = "testing") + taskSetExcludlist.updateExcludedForFailedTask( + "hostA", exec = "3", index = 4, failureReason = "testing") + + assert(taskSetExcludlist.isExecutorExcludedForTaskSet("3")) + verify(listenerBusMock) + .post(SparkListenerExecutorExcludedForStage(time, "3", 2, 0, attemptId)) + + assert(taskSetExcludlist.isNodeExcludedForTaskSet("hostA")) + verify(listenerBusMock).post( + SparkListenerNodeExcludedForStage(time, "hostA", 3, 0, attemptId)) + } + + test("only exclude nodes for the task set when all the excluded executors are all on " + + "same host") { + // we exclude executors on two different hosts within one taskSet -- make sure that doesn't + // lead to any node excluding + val conf = new SparkConf().setAppName("test").setMaster("local") + .set(config.EXCLUDE_ON_FAILURE_ENABLED.key, "true") + val clock = new ManualClock + + val attemptId = 0 + val taskSetExcludlist = new TaskSetExcludelist( + listenerBusMock, conf, stageId = 0, stageAttemptId = attemptId, clock = clock) + var time = 0 + clock.setTime(time) + taskSetExcludlist.updateExcludedForFailedTask( + "hostA", exec = "1", index = 0, failureReason = "testing") + taskSetExcludlist.updateExcludedForFailedTask( + "hostA", exec = "1", index = 1, failureReason = "testing") + + assert(taskSetExcludlist.isExecutorExcludedForTaskSet("1")) + verify(listenerBusMock) + .post(SparkListenerExecutorExcludedForStage(time, "1", 2, 0, attemptId)) + verify(listenerBusMock) + .post(SparkListenerExecutorBlacklistedForStage(time, "1", 2, 0, attemptId)) + + assert(!taskSetExcludlist.isNodeExcludedForTaskSet("hostA")) + verify(listenerBusMock, never()).post( + SparkListenerNodeExcludedForStage(time, "hostA", 2, 0, attemptId)) + verify(listenerBusMock, never()).post( + SparkListenerNodeBlacklistedForStage(time, "hostA", 2, 0, attemptId)) + + time += 1 + clock.setTime(time) + taskSetExcludlist.updateExcludedForFailedTask( + "hostB", exec = "2", index = 0, failureReason = "testing") + taskSetExcludlist.updateExcludedForFailedTask( + "hostB", exec = "2", index = 1, failureReason = "testing") + assert(taskSetExcludlist.isExecutorExcludedForTaskSet("1")) + + assert(taskSetExcludlist.isExecutorExcludedForTaskSet("2")) + verify(listenerBusMock) + .post(SparkListenerExecutorExcludedForStage(time, "2", 2, 0, attemptId)) + verify(listenerBusMock) + .post(SparkListenerExecutorBlacklistedForStage(time, "2", 2, 0, attemptId)) + + assert(!taskSetExcludlist.isNodeExcludedForTaskSet("hostA")) + assert(!taskSetExcludlist.isNodeExcludedForTaskSet("hostB")) + verify(listenerBusMock, never()) + .post(isA(classOf[SparkListenerNodeExcludedForStage])) + verify(listenerBusMock, never()) + .post(isA(classOf[SparkListenerNodeBlacklistedForStage])) + } + +} diff --git a/core/src/test/scala/org/apache/spark/scheduler/TaskSetManagerSuite.scala b/core/src/test/scala/org/apache/spark/scheduler/TaskSetManagerSuite.scala index c389fd2ffa8b1..3bf6cc226c0aa 100644 --- a/core/src/test/scala/org/apache/spark/scheduler/TaskSetManagerSuite.scala +++ b/core/src/test/scala/org/apache/spark/scheduler/TaskSetManagerSuite.scala @@ -377,19 +377,19 @@ class TaskSetManagerSuite // offers not accepted due to task set zombies are not delay schedule rejects manager.isZombie = true - val (taskDesciption, delayReject) = manager.resourceOffer("exec2", "host2", ANY) - assert(taskDesciption.isEmpty) + val (taskDescription, delayReject) = manager.resourceOffer("exec2", "host2", ANY) + assert(taskDescription.isEmpty) assert(delayReject === false) manager.isZombie = false - // offers not accepted due to blacklisting are not delay schedule rejects + // offers not accepted due to excludelist are not delay schedule rejects val tsmSpy = spy(manager) - val blacklist = mock(classOf[TaskSetBlacklist]) - when(tsmSpy.taskSetBlacklistHelperOpt).thenReturn(Some(blacklist)) - when(blacklist.isNodeBlacklistedForTaskSet(any())).thenReturn(true) - val (blacklistTask, blackListReject) = tsmSpy.resourceOffer("exec2", "host2", ANY) - assert(blacklistTask.isEmpty) - assert(blackListReject === false) + val excludelist = mock(classOf[TaskSetExcludelist]) + when(tsmSpy.taskSetExcludelistHelperOpt).thenReturn(Some(excludelist)) + when(excludelist.isNodeExcludedForTaskSet(any())).thenReturn(true) + val (task, taskReject) = tsmSpy.resourceOffer("exec2", "host2", ANY) + assert(task.isEmpty) + assert(taskReject === false) // After another delay, we can go ahead and launch that task non-locally assert(manager.resourceOffer("exec2", "host2", ANY)._1.get.index === 3) @@ -479,11 +479,11 @@ class TaskSetManagerSuite } } - test("executors should be blacklisted after task failure, in spite of locality preferences") { + test("executors should be excluded after task failure, in spite of locality preferences") { val rescheduleDelay = 300L val conf = new SparkConf(). - set(config.BLACKLIST_ENABLED, true). - set(config.BLACKLIST_TIMEOUT_CONF, rescheduleDelay). + set(config.EXCLUDE_ON_FAILURE_ENABLED, true). + set(config.EXCLUDE_ON_FAILURE_TIMEOUT_CONF, rescheduleDelay). // don't wait to jump locality levels in this test set(config.LOCALITY_WAIT.key, "0") @@ -495,11 +495,11 @@ class TaskSetManagerSuite val taskSet = FakeTask.createTaskSet(1, Seq(TaskLocation("host1", "exec1"))) val clock = new ManualClock clock.advance(1) - // We don't directly use the application blacklist, but its presence triggers blacklisting + // We don't directly use the application excludelist, but its presence triggers exclusion // within the taskset. val mockListenerBus = mock(classOf[LiveListenerBus]) - val blacklistTrackerOpt = Some(new BlacklistTracker(mockListenerBus, conf, None, clock)) - val manager = new TaskSetManager(sched, taskSet, 4, blacklistTrackerOpt, clock) + val healthTrackerOpt = Some(new HealthTracker(mockListenerBus, conf, None, clock)) + val manager = new TaskSetManager(sched, taskSet, 4, healthTrackerOpt, clock) { val offerResult = manager.resourceOffer("exec1", "host1", PROCESS_LOCAL)._1 @@ -512,7 +512,7 @@ class TaskSetManagerSuite manager.handleFailedTask(offerResult.get.taskId, TaskState.FINISHED, TaskResultLost) assert(!sched.taskSetsFailed.contains(taskSet.id)) - // Ensure scheduling on exec1 fails after failure 1 due to blacklist + // Ensure scheduling on exec1 fails after failure 1 due to executor being excluded assert(manager.resourceOffer("exec1", "host1", PROCESS_LOCAL)._1.isEmpty) assert(manager.resourceOffer("exec1", "host1", NODE_LOCAL)._1.isEmpty) assert(manager.resourceOffer("exec1", "host1", RACK_LOCAL)._1.isEmpty) @@ -532,7 +532,7 @@ class TaskSetManagerSuite manager.handleFailedTask(offerResult.get.taskId, TaskState.FINISHED, TaskResultLost) assert(!sched.taskSetsFailed.contains(taskSet.id)) - // Ensure scheduling on exec1.1 fails after failure 2 due to blacklist + // Ensure scheduling on exec1.1 fails after failure 2 due to executor being excluded assert(manager.resourceOffer("exec1.1", "host1", NODE_LOCAL)._1.isEmpty) } @@ -548,12 +548,12 @@ class TaskSetManagerSuite manager.handleFailedTask(offerResult.get.taskId, TaskState.FINISHED, TaskResultLost) assert(!sched.taskSetsFailed.contains(taskSet.id)) - // Ensure scheduling on exec2 fails after failure 3 due to blacklist + // Ensure scheduling on exec2 fails after failure 3 due to executor being excluded assert(manager.resourceOffer("exec2", "host2", ANY)._1.isEmpty) } - // Despite advancing beyond the time for expiring executors from within the blacklist, - // we *never* expire from *within* the stage blacklist + // Despite advancing beyond the time for expiring executors from within the excludelist, + // we *never* expire from *within* the stage excludelist clock.advance(rescheduleDelay) { @@ -1322,7 +1322,7 @@ class TaskSetManagerSuite test("SPARK-19868: DagScheduler only notified of taskEnd when state is ready") { // dagScheduler.taskEnded() is async, so it may *seem* ok to call it before we've set all - // appropriate state, eg. isZombie. However, this sets up a race that could go the wrong way. + // appropriate state, e.g. isZombie. However, this sets up a race that could go the wrong way. // This is a super-focused regression test which checks the zombie state as soon as // dagScheduler.taskEnded() is called, to ensure we haven't introduced a race. sc = new SparkContext("local", "test") @@ -1358,20 +1358,20 @@ class TaskSetManagerSuite assert(manager3.name === "TaskSet_1.1") } - test("don't update blacklist for shuffle-fetch failures, preemption, denied commits, " + + test("don't update excludelist for shuffle-fetch failures, preemption, denied commits, " + "or killed tasks") { // Setup a taskset, and fail some tasks for a fetch failure, preemption, denied commit, // and killed task. val conf = new SparkConf(). - set(config.BLACKLIST_ENABLED, true) + set(config.EXCLUDE_ON_FAILURE_ENABLED, true) sc = new SparkContext("local", "test", conf) sched = new FakeTaskScheduler(sc, ("exec1", "host1"), ("exec2", "host2")) val taskSet = FakeTask.createTaskSet(4) val tsm = new TaskSetManager(sched, taskSet, 4) - // we need a spy so we can attach our mock blacklist + // we need a spy so we can attach our mock excludelist val tsmSpy = spy(tsm) - val blacklist = mock(classOf[TaskSetBlacklist]) - when(tsmSpy.taskSetBlacklistHelperOpt).thenReturn(Some(blacklist)) + val excludelist = mock(classOf[TaskSetExcludelist]) + when(tsmSpy.taskSetExcludelistHelperOpt).thenReturn(Some(excludelist)) // make some offers to our taskset, to get tasks we will fail val taskDescs = Seq( @@ -1392,23 +1392,23 @@ class TaskSetManagerSuite TaskCommitDenied(0, 2, 0)) tsmSpy.handleFailedTask(taskDescs(3).taskId, TaskState.KILLED, TaskKilled("test")) - // Make sure that the blacklist ignored all of the task failures above, since they aren't + // Make sure that the excludelist ignored all of the task failures above, since they aren't // the fault of the executor where the task was running. - verify(blacklist, never()) - .updateBlacklistForFailedTask(anyString(), anyString(), anyInt(), anyString()) + verify(excludelist, never()) + .updateExcludedForFailedTask(anyString(), anyString(), anyInt(), anyString()) } - test("update application blacklist for shuffle-fetch") { + test("update application healthTracker for shuffle-fetch") { // Setup a taskset, and fail some one task for fetch failure. val conf = new SparkConf() - .set(config.BLACKLIST_ENABLED, true) + .set(config.EXCLUDE_ON_FAILURE_ENABLED, true) .set(config.SHUFFLE_SERVICE_ENABLED, true) - .set(config.BLACKLIST_FETCH_FAILURE_ENABLED, true) + .set(config.EXCLUDE_ON_FAILURE_FETCH_FAILURE_ENABLED, true) sc = new SparkContext("local", "test", conf) sched = new FakeTaskScheduler(sc, ("exec1", "host1"), ("exec2", "host2")) val taskSet = FakeTask.createTaskSet(4) - val blacklistTracker = new BlacklistTracker(sc, None) - val tsm = new TaskSetManager(sched, taskSet, 4, Some(blacklistTracker)) + val healthTracker = new HealthTracker(sc, None) + val tsm = new TaskSetManager(sched, taskSet, 4, Some(healthTracker)) // make some offers to our taskset, to get tasks we will fail val taskDescs = Seq( @@ -1420,22 +1420,22 @@ class TaskSetManagerSuite } assert(taskDescs.size === 4) - assert(!blacklistTracker.isExecutorBlacklisted(taskDescs(0).executorId)) - assert(!blacklistTracker.isNodeBlacklisted("host1")) + assert(!healthTracker.isExecutorExcluded(taskDescs(0).executorId)) + assert(!healthTracker.isNodeExcluded("host1")) // Fail the task with fetch failure tsm.handleFailedTask(taskDescs(0).taskId, TaskState.FAILED, FetchFailed(BlockManagerId(taskDescs(0).executorId, "host1", 12345), 0, 0L, 0, 0, "ignored")) - assert(blacklistTracker.isNodeBlacklisted("host1")) + assert(healthTracker.isNodeExcluded("host1")) } - test("update blacklist before adding pending task to avoid race condition") { - // When a task fails, it should apply the blacklist policy prior to + test("update healthTracker before adding pending task to avoid race condition") { + // When a task fails, it should apply the excludeOnFailure policy prior to // retrying the task otherwise there's a race condition where run on // the same executor that it was intended to be black listed from. val conf = new SparkConf(). - set(config.BLACKLIST_ENABLED, true) + set(config.EXCLUDE_ON_FAILURE_ENABLED, true) // Create a task with two executors. sc = new SparkContext("local", "test", conf) @@ -1448,8 +1448,8 @@ class TaskSetManagerSuite val clock = new ManualClock val mockListenerBus = mock(classOf[LiveListenerBus]) - val blacklistTracker = new BlacklistTracker(mockListenerBus, conf, None, clock) - val taskSetManager = new TaskSetManager(sched, taskSet, 1, Some(blacklistTracker)) + val healthTracker = new HealthTracker(mockListenerBus, conf, None, clock) + val taskSetManager = new TaskSetManager(sched, taskSet, 1, Some(healthTracker)) val taskSetManagerSpy = spy(taskSetManager) val taskDesc = taskSetManagerSpy.resourceOffer(exec, host, TaskLocality.ANY)._1 @@ -1458,8 +1458,8 @@ class TaskSetManagerSuite when(taskSetManagerSpy.addPendingTask(anyInt(), anyBoolean(), anyBoolean())).thenAnswer( (invocationOnMock: InvocationOnMock) => { val task: Int = invocationOnMock.getArgument(0) - assert(taskSetManager.taskSetBlacklistHelperOpt.get. - isExecutorBlacklistedForTask(exec, task)) + assert(taskSetManager.taskSetExcludelistHelperOpt.get. + isExecutorExcludedForTask(exec, task)) } ) @@ -1768,7 +1768,6 @@ class TaskSetManagerSuite } test("TaskSetManager passes task resource along") { - import TestUtils._ sc = new SparkContext("local", "test") sc.conf.set(TASK_GPU_ID.amountConf, "2") diff --git a/core/src/test/scala/org/apache/spark/scheduler/WorkerDecommissionSuite.scala b/core/src/test/scala/org/apache/spark/scheduler/WorkerDecommissionSuite.scala index 83bb66efdac9e..1c2326db6dc99 100644 --- a/core/src/test/scala/org/apache/spark/scheduler/WorkerDecommissionSuite.scala +++ b/core/src/test/scala/org/apache/spark/scheduler/WorkerDecommissionSuite.scala @@ -19,19 +19,17 @@ package org.apache.spark.scheduler import java.util.concurrent.Semaphore -import scala.concurrent.TimeoutException import scala.concurrent.duration._ -import org.apache.spark.{LocalSparkContext, SparkConf, SparkContext, SparkException, SparkFunSuite, - TestUtils} +import org.apache.spark.{LocalSparkContext, SparkConf, SparkContext, SparkFunSuite, TestUtils} import org.apache.spark.internal.config import org.apache.spark.scheduler.cluster.StandaloneSchedulerBackend -import org.apache.spark.util.{RpcUtils, SerializableBuffer, ThreadUtils} +import org.apache.spark.util.ThreadUtils class WorkerDecommissionSuite extends SparkFunSuite with LocalSparkContext { override def beforeEach(): Unit = { - val conf = new SparkConf().setAppName("test").setMaster("local") + val conf = new SparkConf().setAppName("test") .set(config.DECOMMISSION_ENABLED, true) sc = new SparkContext("local-cluster[2, 1, 1024]", "test", conf) @@ -78,7 +76,10 @@ class WorkerDecommissionSuite extends SparkFunSuite with LocalSparkContext { val execs = sched.getExecutorIds() // Make the executors decommission, finish, exit, and not be replaced. val execsAndDecomInfo = execs.map((_, ExecutorDecommissionInfo("", None))).toArray - sched.decommissionExecutors(execsAndDecomInfo, adjustTargetNumExecutors = true) + sched.decommissionExecutors( + execsAndDecomInfo, + adjustTargetNumExecutors = true, + triggeredByExecutor = false) val asyncCountResult = ThreadUtils.awaitResult(asyncCount, 20.seconds) assert(asyncCountResult === 10) } diff --git a/core/src/test/scala/org/apache/spark/serializer/KryoSerializerDistributedSuite.scala b/core/src/test/scala/org/apache/spark/serializer/KryoSerializerDistributedSuite.scala index 397fdce8ae6e3..4acb4bbc779c3 100644 --- a/core/src/test/scala/org/apache/spark/serializer/KryoSerializerDistributedSuite.scala +++ b/core/src/test/scala/org/apache/spark/serializer/KryoSerializerDistributedSuite.scala @@ -31,7 +31,7 @@ class KryoSerializerDistributedSuite extends SparkFunSuite with LocalSparkContex .set(config.SERIALIZER, "org.apache.spark.serializer.KryoSerializer") .set(config.Kryo.KRYO_USER_REGISTRATORS, Seq(classOf[AppJarRegistrator].getName)) .set(config.TASK_MAX_FAILURES, 1) - .set(config.BLACKLIST_ENABLED, false) + .set(config.EXCLUDE_ON_FAILURE_ENABLED, false) val jar = TestUtils.createJarWithClasses(List(AppJarRegistrator.customClassName)) conf.setJars(List(jar.getPath)) diff --git a/core/src/test/scala/org/apache/spark/shuffle/HostLocalShuffleReadingSuite.scala b/core/src/test/scala/org/apache/spark/shuffle/HostLocalShuffleReadingSuite.scala index 12c40f4462c7c..8f0c4da88feb2 100644 --- a/core/src/test/scala/org/apache/spark/shuffle/HostLocalShuffleReadingSuite.scala +++ b/core/src/test/scala/org/apache/spark/shuffle/HostLocalShuffleReadingSuite.scala @@ -58,6 +58,7 @@ class HostLocalShuffleReadingSuite extends SparkFunSuite with Matchers with Loca val conf = new SparkConf() .set(SHUFFLE_HOST_LOCAL_DISK_READING_ENABLED, true) + import scala.language.existentials val (essStatus, blockStoreClientClass) = if (isESSEnabled) { // LocalSparkCluster will disable the ExternalShuffleService by default. Therefore, // we have to manually setup an server which embedded with ExternalBlockHandler to diff --git a/core/src/test/scala/org/apache/spark/shuffle/ShuffleBlockPusherSuite.scala b/core/src/test/scala/org/apache/spark/shuffle/ShuffleBlockPusherSuite.scala new file mode 100644 index 0000000000000..cc561e6106019 --- /dev/null +++ b/core/src/test/scala/org/apache/spark/shuffle/ShuffleBlockPusherSuite.scala @@ -0,0 +1,355 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.shuffle + +import java.io.File +import java.net.ConnectException +import java.nio.ByteBuffer +import java.util.concurrent.LinkedBlockingQueue + +import scala.collection.mutable.ArrayBuffer + +import org.mockito.{Mock, MockitoAnnotations} +import org.mockito.Answers.RETURNS_SMART_NULLS +import org.mockito.ArgumentMatchers.any +import org.mockito.Mockito._ +import org.mockito.invocation.InvocationOnMock +import org.scalatest.BeforeAndAfterEach + +import org.apache.spark._ +import org.apache.spark.network.buffer.ManagedBuffer +import org.apache.spark.network.shuffle.{BlockFetchingListener, BlockStoreClient} +import org.apache.spark.network.shuffle.ErrorHandler.BlockPushErrorHandler +import org.apache.spark.network.util.TransportConf +import org.apache.spark.serializer.JavaSerializer +import org.apache.spark.shuffle.ShuffleBlockPusher.PushRequest +import org.apache.spark.storage._ + +class ShuffleBlockPusherSuite extends SparkFunSuite with BeforeAndAfterEach { + + @Mock(answer = RETURNS_SMART_NULLS) private var blockManager: BlockManager = _ + @Mock(answer = RETURNS_SMART_NULLS) private var dependency: ShuffleDependency[Int, Int, Int] = _ + @Mock(answer = RETURNS_SMART_NULLS) private var shuffleClient: BlockStoreClient = _ + + private var conf: SparkConf = _ + private var pushedBlocks = new ArrayBuffer[String] + + override def beforeEach(): Unit = { + super.beforeEach() + conf = new SparkConf(loadDefaults = false) + MockitoAnnotations.initMocks(this) + when(dependency.partitioner).thenReturn(new HashPartitioner(8)) + when(dependency.serializer).thenReturn(new JavaSerializer(conf)) + when(dependency.getMergerLocs).thenReturn(Seq(BlockManagerId("test-client", "test-client", 1))) + conf.set("spark.shuffle.push.based.enabled", "true") + conf.set("spark.shuffle.service.enabled", "true") + // Set the env because the shuffler writer gets the shuffle client instance from the env. + val mockEnv = mock(classOf[SparkEnv]) + when(mockEnv.conf).thenReturn(conf) + when(mockEnv.blockManager).thenReturn(blockManager) + SparkEnv.set(mockEnv) + when(blockManager.blockStoreClient).thenReturn(shuffleClient) + } + + override def afterEach(): Unit = { + pushedBlocks.clear() + super.afterEach() + } + + private def interceptPushedBlocksForSuccess(): Unit = { + when(shuffleClient.pushBlocks(any(), any(), any(), any(), any())) + .thenAnswer((invocation: InvocationOnMock) => { + val blocks = invocation.getArguments()(2).asInstanceOf[Array[String]] + pushedBlocks ++= blocks + val managedBuffers = invocation.getArguments()(3).asInstanceOf[Array[ManagedBuffer]] + val blockFetchListener = invocation.getArguments()(4).asInstanceOf[BlockFetchingListener] + (blocks, managedBuffers).zipped.foreach((blockId, buffer) => { + blockFetchListener.onBlockFetchSuccess(blockId, buffer) + }) + }) + } + + private def verifyPushRequests( + pushRequests: Seq[PushRequest], + expectedSizes: Seq[Int]): Unit = { + (pushRequests, expectedSizes).zipped.foreach((req, size) => { + assert(req.size == size) + }) + } + + test("A batch of blocks is limited by maxBlocksBatchSize") { + conf.set("spark.shuffle.push.maxBlockBatchSize", "1m") + conf.set("spark.shuffle.push.maxBlockSizeToPush", "2048k") + val blockPusher = new TestShuffleBlockPusher(conf) + val mergerLocs = dependency.getMergerLocs.map(loc => BlockManagerId("", loc.host, loc.port)) + val largeBlockSize = 2 * 1024 * 1024 + val pushRequests = blockPusher.prepareBlockPushRequests(5, 0, 0, + mock(classOf[File]), Array(2, 2, 2, largeBlockSize, largeBlockSize), mergerLocs, + mock(classOf[TransportConf])) + assert(pushRequests.length == 3) + verifyPushRequests(pushRequests, Seq(6, largeBlockSize, largeBlockSize)) + } + + test("Large blocks are excluded in the preparation") { + conf.set("spark.shuffle.push.maxBlockSizeToPush", "1k") + val blockPusher = new TestShuffleBlockPusher(conf) + val mergerLocs = dependency.getMergerLocs.map(loc => BlockManagerId("", loc.host, loc.port)) + val pushRequests = blockPusher.prepareBlockPushRequests(5, 0, 0, + mock(classOf[File]), Array(2, 2, 2, 1028, 1024), mergerLocs, mock(classOf[TransportConf])) + assert(pushRequests.length == 2) + verifyPushRequests(pushRequests, Seq(6, 1024)) + } + + test("Number of blocks in a push request are limited by maxBlocksInFlightPerAddress ") { + conf.set("spark.reducer.maxBlocksInFlightPerAddress", "1") + val blockPusher = new TestShuffleBlockPusher(conf) + val mergerLocs = dependency.getMergerLocs.map(loc => BlockManagerId("", loc.host, loc.port)) + val pushRequests = blockPusher.prepareBlockPushRequests(5, 0, 0, + mock(classOf[File]), Array(2, 2, 2, 2, 2), mergerLocs, mock(classOf[TransportConf])) + assert(pushRequests.length == 5) + verifyPushRequests(pushRequests, Seq(2, 2, 2, 2, 2)) + } + + test("Basic block push") { + interceptPushedBlocksForSuccess() + val blockPusher = new TestShuffleBlockPusher(conf) + blockPusher.initiateBlockPush(mock(classOf[File]), + Array.fill(dependency.partitioner.numPartitions) { 2 }, dependency, 0) + blockPusher.runPendingTasks() + verify(shuffleClient, times(1)) + .pushBlocks(any(), any(), any(), any(), any()) + assert(pushedBlocks.length == dependency.partitioner.numPartitions) + ShuffleBlockPusher.stop() + } + + test("Large blocks are skipped for push") { + conf.set("spark.shuffle.push.maxBlockSizeToPush", "1k") + interceptPushedBlocksForSuccess() + val pusher = new TestShuffleBlockPusher(conf) + pusher.initiateBlockPush( + mock(classOf[File]), Array(2, 2, 2, 2, 2, 2, 2, 1100), dependency, 0) + pusher.runPendingTasks() + verify(shuffleClient, times(1)) + .pushBlocks(any(), any(), any(), any(), any()) + assert(pushedBlocks.length == dependency.partitioner.numPartitions - 1) + ShuffleBlockPusher.stop() + } + + test("Number of blocks in flight per address are limited by maxBlocksInFlightPerAddress") { + conf.set("spark.reducer.maxBlocksInFlightPerAddress", "1") + interceptPushedBlocksForSuccess() + val pusher = new TestShuffleBlockPusher(conf) + pusher.initiateBlockPush( + mock(classOf[File]), Array.fill(dependency.partitioner.numPartitions) { 2 }, dependency, 0) + pusher.runPendingTasks() + verify(shuffleClient, times(8)) + .pushBlocks(any(), any(), any(), any(), any()) + assert(pushedBlocks.length == dependency.partitioner.numPartitions) + ShuffleBlockPusher.stop() + } + + test("Hit maxBlocksInFlightPerAddress limit so that the blocks are deferred") { + conf.set("spark.reducer.maxBlocksInFlightPerAddress", "2") + var blockPendingResponse : String = null + var listener : BlockFetchingListener = null + when(shuffleClient.pushBlocks(any(), any(), any(), any(), any())) + .thenAnswer((invocation: InvocationOnMock) => { + val blocks = invocation.getArguments()(2).asInstanceOf[Array[String]] + pushedBlocks ++= blocks + val managedBuffers = invocation.getArguments()(3).asInstanceOf[Array[ManagedBuffer]] + val blockFetchListener = invocation.getArguments()(4).asInstanceOf[BlockFetchingListener] + // Expecting 2 blocks + assert(blocks.length == 2) + if (blockPendingResponse == null) { + blockPendingResponse = blocks(1) + listener = blockFetchListener + // Respond with success only for the first block which will cause all the rest of the + // blocks to be deferred + blockFetchListener.onBlockFetchSuccess(blocks(0), managedBuffers(0)) + } else { + (blocks, managedBuffers).zipped.foreach((blockId, buffer) => { + blockFetchListener.onBlockFetchSuccess(blockId, buffer) + }) + } + }) + val pusher = new TestShuffleBlockPusher(conf) + pusher.initiateBlockPush( + mock(classOf[File]), Array.fill(dependency.partitioner.numPartitions) { 2 }, dependency, 0) + pusher.runPendingTasks() + verify(shuffleClient, times(1)) + .pushBlocks(any(), any(), any(), any(), any()) + assert(pushedBlocks.length == 2) + // this will trigger push of deferred blocks + listener.onBlockFetchSuccess(blockPendingResponse, mock(classOf[ManagedBuffer])) + pusher.runPendingTasks() + verify(shuffleClient, times(4)) + .pushBlocks(any(), any(), any(), any(), any()) + assert(pushedBlocks.length == 8) + ShuffleBlockPusher.stop() + } + + test("Number of shuffle blocks grouped in a single push request is limited by " + + "maxBlockBatchSize") { + conf.set("spark.shuffle.push.maxBlockBatchSize", "1m") + interceptPushedBlocksForSuccess() + val pusher = new TestShuffleBlockPusher(conf) + pusher.initiateBlockPush(mock(classOf[File]), + Array.fill(dependency.partitioner.numPartitions) { 512 * 1024 }, dependency, 0) + pusher.runPendingTasks() + verify(shuffleClient, times(4)) + .pushBlocks(any(), any(), any(), any(), any()) + assert(pushedBlocks.length == dependency.partitioner.numPartitions) + ShuffleBlockPusher.stop() + } + + test("Error retries") { + val pusher = new ShuffleBlockPusher(conf) + val errorHandler = pusher.createErrorHandler() + assert( + !errorHandler.shouldRetryError(new RuntimeException( + new IllegalArgumentException(BlockPushErrorHandler.TOO_LATE_MESSAGE_SUFFIX)))) + assert(errorHandler.shouldRetryError(new RuntimeException(new ConnectException()))) + assert( + errorHandler.shouldRetryError(new RuntimeException(new IllegalArgumentException( + BlockPushErrorHandler.BLOCK_APPEND_COLLISION_DETECTED_MSG_PREFIX)))) + assert (errorHandler.shouldRetryError(new Throwable())) + } + + test("Error logging") { + val pusher = new ShuffleBlockPusher(conf) + val errorHandler = pusher.createErrorHandler() + assert( + !errorHandler.shouldLogError(new RuntimeException( + new IllegalArgumentException(BlockPushErrorHandler.TOO_LATE_MESSAGE_SUFFIX)))) + assert(!errorHandler.shouldLogError(new RuntimeException( + new IllegalArgumentException( + BlockPushErrorHandler.BLOCK_APPEND_COLLISION_DETECTED_MSG_PREFIX)))) + assert(errorHandler.shouldLogError(new Throwable())) + } + + test("Blocks are continued to push even when a block push fails with collision " + + "exception") { + conf.set("spark.reducer.maxBlocksInFlightPerAddress", "1") + val pusher = new TestShuffleBlockPusher(conf) + var failBlock: Boolean = true + when(shuffleClient.pushBlocks(any(), any(), any(), any(), any())) + .thenAnswer((invocation: InvocationOnMock) => { + val blocks = invocation.getArguments()(2).asInstanceOf[Array[String]] + val blockFetchListener = invocation.getArguments()(4).asInstanceOf[BlockFetchingListener] + blocks.foreach(blockId => { + if (failBlock) { + failBlock = false + // Fail the first block with the collision exception. + blockFetchListener.onBlockFetchFailure(blockId, new RuntimeException( + new IllegalArgumentException( + BlockPushErrorHandler.BLOCK_APPEND_COLLISION_DETECTED_MSG_PREFIX))) + } else { + pushedBlocks += blockId + blockFetchListener.onBlockFetchSuccess(blockId, mock(classOf[ManagedBuffer])) + } + }) + }) + pusher.initiateBlockPush( + mock(classOf[File]), Array.fill(dependency.partitioner.numPartitions) { 2 }, dependency, 0) + pusher.runPendingTasks() + verify(shuffleClient, times(8)) + .pushBlocks(any(), any(), any(), any(), any()) + assert(pushedBlocks.length == 7) + } + + test("More blocks are not pushed when a block push fails with too late " + + "exception") { + conf.set("spark.reducer.maxBlocksInFlightPerAddress", "1") + val pusher = new TestShuffleBlockPusher(conf) + var failBlock: Boolean = true + when(shuffleClient.pushBlocks(any(), any(), any(), any(), any())) + .thenAnswer((invocation: InvocationOnMock) => { + val blocks = invocation.getArguments()(2).asInstanceOf[Array[String]] + val blockFetchListener = invocation.getArguments()(4).asInstanceOf[BlockFetchingListener] + blocks.foreach(blockId => { + if (failBlock) { + failBlock = false + // Fail the first block with the too late exception. + blockFetchListener.onBlockFetchFailure(blockId, new RuntimeException( + new IllegalArgumentException(BlockPushErrorHandler.TOO_LATE_MESSAGE_SUFFIX))) + } else { + pushedBlocks += blockId + blockFetchListener.onBlockFetchSuccess(blockId, mock(classOf[ManagedBuffer])) + } + }) + }) + pusher.initiateBlockPush( + mock(classOf[File]), Array.fill(dependency.partitioner.numPartitions) { 2 }, dependency, 0) + pusher.runPendingTasks() + verify(shuffleClient, times(1)) + .pushBlocks(any(), any(), any(), any(), any()) + assert(pushedBlocks.isEmpty) + } + + test("Connect exceptions remove all the push requests for that host") { + when(dependency.getMergerLocs).thenReturn( + Seq(BlockManagerId("client1", "client1", 1), BlockManagerId("client2", "client2", 2))) + conf.set("spark.reducer.maxBlocksInFlightPerAddress", "2") + when(shuffleClient.pushBlocks(any(), any(), any(), any(), any())) + .thenAnswer((invocation: InvocationOnMock) => { + val blocks = invocation.getArguments()(2).asInstanceOf[Array[String]] + pushedBlocks ++= blocks + val blockFetchListener = invocation.getArguments()(4).asInstanceOf[BlockFetchingListener] + blocks.foreach(blockId => { + blockFetchListener.onBlockFetchFailure( + blockId, new RuntimeException(new ConnectException())) + }) + }) + val pusher = new TestShuffleBlockPusher(conf) + pusher.initiateBlockPush( + mock(classOf[File]), Array.fill(dependency.partitioner.numPartitions) { 2 }, dependency, 0) + pusher.runPendingTasks() + verify(shuffleClient, times(2)) + .pushBlocks(any(), any(), any(), any(), any()) + // 2 blocks for each merger locations + assert(pushedBlocks.length == 4) + assert(pusher.unreachableBlockMgrs.size == 2) + } + + private class TestShuffleBlockPusher(conf: SparkConf) extends ShuffleBlockPusher(conf) { + private[this] val tasks = new LinkedBlockingQueue[Runnable] + + override protected def submitTask(task: Runnable): Unit = { + tasks.add(task) + } + + def runPendingTasks(): Unit = { + // This ensures that all the submitted tasks - updateStateAndCheckIfPushMore and pushUpToMax + // are run synchronously. + while (!tasks.isEmpty) { + tasks.take().run() + } + } + + override protected def createRequestBuffer( + conf: TransportConf, + dataFile: File, + offset: Long, + length: Long): ManagedBuffer = { + val managedBuffer = mock(classOf[ManagedBuffer]) + val byteBuffer = new Array[Byte](length.toInt) + when(managedBuffer.nioByteBuffer()).thenReturn(ByteBuffer.wrap(byteBuffer)) + managedBuffer + } + } +} diff --git a/core/src/test/scala/org/apache/spark/shuffle/sort/IndexShuffleBlockResolverSuite.scala b/core/src/test/scala/org/apache/spark/shuffle/sort/IndexShuffleBlockResolverSuite.scala index 725a1d90557a2..91260d01eb8b6 100644 --- a/core/src/test/scala/org/apache/spark/shuffle/sort/IndexShuffleBlockResolverSuite.scala +++ b/core/src/test/scala/org/apache/spark/shuffle/sort/IndexShuffleBlockResolverSuite.scala @@ -156,4 +156,9 @@ class IndexShuffleBlockResolverSuite extends SparkFunSuite with BeforeAndAfterEa indexIn2.close() } } + + test("SPARK-33198 getMigrationBlocks should not fail at missing files") { + val resolver = new IndexShuffleBlockResolver(conf, blockManager) + assert(resolver.getMigrationBlocks(ShuffleBlockInfo(Int.MaxValue, Long.MaxValue)).isEmpty) + } } diff --git a/core/src/test/scala/org/apache/spark/status/AppStatusListenerSuite.scala b/core/src/test/scala/org/apache/spark/status/AppStatusListenerSuite.scala index d5829c352be9b..a251c164a79ca 100644 --- a/core/src/test/scala/org/apache/spark/status/AppStatusListenerSuite.scala +++ b/core/src/test/scala/org/apache/spark/status/AppStatusListenerSuite.scala @@ -234,7 +234,7 @@ class AppStatusListenerSuite extends SparkFunSuite with BeforeAndAfter { // Send two executor metrics update. Only update one metric to avoid a lot of boilerplate code. // The tasks are distributed among the two executors, so the executor-level metrics should - // hold half of the cummulative value of the metric being updated. + // hold half of the cumulative value of the metric being updated. Seq(1L, 2L).foreach { value => s1Tasks.foreach { task => val accum = new AccumulableInfo(1L, Some(InternalAccumulator.MEMORY_BYTES_SPILLED), @@ -256,9 +256,9 @@ class AppStatusListenerSuite extends SparkFunSuite with BeforeAndAfter { } } - // Blacklisting executor for stage + // Excluding executor for stage time += 1 - listener.onExecutorBlacklistedForStage(SparkListenerExecutorBlacklistedForStage( + listener.onExecutorExcludedForStage(SparkListenerExecutorExcludedForStage( time = time, executorId = execIds.head, taskFailures = 2, @@ -273,18 +273,21 @@ class AppStatusListenerSuite extends SparkFunSuite with BeforeAndAfter { assert(executorStageSummaryWrappers.nonEmpty) executorStageSummaryWrappers.foreach { exec => - // only the first executor is expected to be blacklisted - val expectedBlacklistedFlag = exec.executorId == execIds.head - assert(exec.info.isBlacklistedForStage === expectedBlacklistedFlag) + // only the first executor is expected to be excluded + val expectedExcludedFlag = exec.executorId == execIds.head + assert(exec.info.isBlacklistedForStage === expectedExcludedFlag) + assert(exec.info.isExcludedForStage === expectedExcludedFlag) } check[ExecutorSummaryWrapper](execIds.head) { exec => assert(exec.info.blacklistedInStages === Set(stages.head.stageId)) + assert(exec.info.excludedInStages === Set(stages.head.stageId)) + } - // Blacklisting node for stage + // Excluding node for stage time += 1 - listener.onNodeBlacklistedForStage(SparkListenerNodeBlacklistedForStage( + listener.onNodeExcludedForStage(SparkListenerNodeExcludedForStage( time = time, hostId = "2.example.com", // this is where the second executor is hosted executorFailures = 1, @@ -299,8 +302,10 @@ class AppStatusListenerSuite extends SparkFunSuite with BeforeAndAfter { assert(executorStageSummaryWrappersForNode.nonEmpty) executorStageSummaryWrappersForNode.foreach { exec => - // both executor is expected to be blacklisted + // both executor is expected to be excluded assert(exec.info.isBlacklistedForStage) + assert(exec.info.isExcludedForStage) + } // Fail one of the tasks, re-start it. @@ -450,6 +455,7 @@ class AppStatusListenerSuite extends SparkFunSuite with BeforeAndAfter { check[ExecutorSummaryWrapper](execIds.head) { exec => assert(exec.info.blacklistedInStages === Set()) + assert(exec.info.excludedInStages === Set()) } // Submit stage 2. @@ -466,9 +472,9 @@ class AppStatusListenerSuite extends SparkFunSuite with BeforeAndAfter { assert(stage.info.submissionTime === Some(new Date(stages.last.submissionTime.get))) } - // Blacklisting node for stage + // Excluding node for stage time += 1 - listener.onNodeBlacklistedForStage(SparkListenerNodeBlacklistedForStage( + listener.onNodeExcludedForStage(SparkListenerNodeExcludedForStage( time = time, hostId = "1.example.com", executorFailures = 1, @@ -477,6 +483,7 @@ class AppStatusListenerSuite extends SparkFunSuite with BeforeAndAfter { check[ExecutorSummaryWrapper](execIds.head) { exec => assert(exec.info.blacklistedInStages === Set(stages.last.stageId)) + assert(exec.info.excludedInStages === Set(stages.last.stageId)) } // Start and fail all tasks of stage 2. @@ -628,30 +635,34 @@ class AppStatusListenerSuite extends SparkFunSuite with BeforeAndAfter { assert(job.info.numSkippedTasks === s1Tasks.size) } - // Blacklist an executor. + // Exclude an executor. time += 1 - listener.onExecutorBlacklisted(SparkListenerExecutorBlacklisted(time, "1", 42)) + listener.onExecutorExcluded(SparkListenerExecutorExcluded(time, "1", 42)) check[ExecutorSummaryWrapper]("1") { exec => assert(exec.info.isBlacklisted) + assert(exec.info.isExcluded) } time += 1 - listener.onExecutorUnblacklisted(SparkListenerExecutorUnblacklisted(time, "1")) + listener.onExecutorUnexcluded(SparkListenerExecutorUnexcluded(time, "1")) check[ExecutorSummaryWrapper]("1") { exec => assert(!exec.info.isBlacklisted) + assert(!exec.info.isExcluded) } - // Blacklist a node. + // Exclude a node. time += 1 - listener.onNodeBlacklisted(SparkListenerNodeBlacklisted(time, "1.example.com", 2)) + listener.onNodeExcluded(SparkListenerNodeExcluded(time, "1.example.com", 2)) check[ExecutorSummaryWrapper]("1") { exec => assert(exec.info.isBlacklisted) + assert(exec.info.isExcluded) } time += 1 - listener.onNodeUnblacklisted(SparkListenerNodeUnblacklisted(time, "1.example.com")) + listener.onNodeUnexcluded(SparkListenerNodeUnexcluded(time, "1.example.com")) check[ExecutorSummaryWrapper]("1") { exec => assert(!exec.info.isBlacklisted) + assert(!exec.info.isExcluded) } // Stop executors. diff --git a/core/src/test/scala/org/apache/spark/status/api/v1/ExecutorSummarySuite.scala b/core/src/test/scala/org/apache/spark/status/api/v1/ExecutorSummarySuite.scala new file mode 100644 index 0000000000000..541a7821a51fb --- /dev/null +++ b/core/src/test/scala/org/apache/spark/status/api/v1/ExecutorSummarySuite.scala @@ -0,0 +1,53 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.spark.status.api.v1 + +import java.util.Date + +import com.fasterxml.jackson.core.`type`.TypeReference +import com.fasterxml.jackson.databind.ObjectMapper +import com.fasterxml.jackson.module.scala.DefaultScalaModule + +import org.apache.spark.SparkFunSuite + +class ExecutorSummarySuite extends SparkFunSuite { + + test("Check ExecutorSummary serialize and deserialize with empty peakMemoryMetrics") { + val mapper = new ObjectMapper().registerModule(DefaultScalaModule) + val executorSummary = new ExecutorSummary("id", "host:port", true, 1, + 10, 10, 1, 1, 1, + 0, 0, 1, 100, + 1, 100, 100, + 10, false, 20, new Date(1600984336352L), + Option.empty, Option.empty, Map(), Option.empty, Set(), Option.empty, Map(), Map(), 1, + false, Set()) + val expectedJson = "{\"id\":\"id\",\"hostPort\":\"host:port\",\"isActive\":true," + + "\"rddBlocks\":1,\"memoryUsed\":10,\"diskUsed\":10,\"totalCores\":1,\"maxTasks\":1," + + "\"activeTasks\":1,\"failedTasks\":0,\"completedTasks\":0,\"totalTasks\":1," + + "\"totalDuration\":100,\"totalGCTime\":1,\"totalInputBytes\":100," + + "\"totalShuffleRead\":100,\"totalShuffleWrite\":10,\"isBlacklisted\":false," + + "\"maxMemory\":20,\"addTime\":1600984336352,\"removeTime\":null,\"removeReason\":null," + + "\"executorLogs\":{},\"memoryMetrics\":null,\"blacklistedInStages\":[]," + + "\"peakMemoryMetrics\":null,\"attributes\":{},\"resources\":{},\"resourceProfileId\":1," + + "\"isExcluded\":false,\"excludedInStages\":[]}" + val json = mapper.writeValueAsString(executorSummary) + assert(expectedJson.equals(json)) + val deserializeExecutorSummary = mapper.readValue(json, new TypeReference[ExecutorSummary] {}) + assert(deserializeExecutorSummary.peakMemoryMetrics == None) + } + +} diff --git a/core/src/test/scala/org/apache/spark/storage/BlockInfoManagerSuite.scala b/core/src/test/scala/org/apache/spark/storage/BlockInfoManagerSuite.scala index 9c0699bc981f8..d2bf385e10796 100644 --- a/core/src/test/scala/org/apache/spark/storage/BlockInfoManagerSuite.scala +++ b/core/src/test/scala/org/apache/spark/storage/BlockInfoManagerSuite.scala @@ -19,7 +19,7 @@ package org.apache.spark.storage import java.util.Properties -import scala.concurrent.{Await, ExecutionContext, Future} +import scala.concurrent.{ExecutionContext, Future} import scala.language.implicitConversions import scala.reflect.ClassTag diff --git a/core/src/test/scala/org/apache/spark/storage/BlockManagerDecommissionIntegrationSuite.scala b/core/src/test/scala/org/apache/spark/storage/BlockManagerDecommissionIntegrationSuite.scala index 094b893cdda2e..bb685cd353ddc 100644 --- a/core/src/test/scala/org/apache/spark/storage/BlockManagerDecommissionIntegrationSuite.scala +++ b/core/src/test/scala/org/apache/spark/storage/BlockManagerDecommissionIntegrationSuite.scala @@ -40,6 +40,46 @@ class BlockManagerDecommissionIntegrationSuite extends SparkFunSuite with LocalS val TaskEnded = "TASK_ENDED" val JobEnded = "JOB_ENDED" + Seq(false, true).foreach { isEnabled => + test(s"SPARK-32850: BlockManager decommission should respect the configuration " + + s"(enabled=${isEnabled})") { + val conf = new SparkConf() + .setAppName("test-blockmanager-decommissioner") + .setMaster("local-cluster[2, 1, 1024]") + .set(config.DECOMMISSION_ENABLED, true) + .set(config.STORAGE_DECOMMISSION_ENABLED, isEnabled) + sc = new SparkContext(conf) + TestUtils.waitUntilExecutorsUp(sc, 2, 6000) + val executors = sc.getExecutorIds().toArray + val decommissionListener = new SparkListener { + override def onTaskStart(taskStart: SparkListenerTaskStart): Unit = { + // ensure Tasks launched at executors before they're marked as decommissioned by driver + Thread.sleep(3000) + sc.schedulerBackend.asInstanceOf[StandaloneSchedulerBackend] + .decommissionExecutors( + executors.map { id => (id, ExecutorDecommissionInfo("test")) }, + true, + false) + } + } + sc.addSparkListener(decommissionListener) + + val decommissionStatus: Seq[Boolean] = sc.parallelize(1 to 100, 2).mapPartitions { _ => + val startTime = System.currentTimeMillis() + while (SparkEnv.get.blockManager.decommissioner.isEmpty && + // wait at most 6 seconds for BlockManager to start to decommission (if enabled) + System.currentTimeMillis() - startTime < 6000) { + Thread.sleep(300) + } + val blockManagerDecommissionStatus = + if (SparkEnv.get.blockManager.decommissioner.isEmpty) false else true + Iterator.single(blockManagerDecommissionStatus) + }.collect() + assert(decommissionStatus.forall(_ == isEnabled)) + sc.removeSparkListener(decommissionListener) + } + } + testRetry(s"verify that an already running task which is going to cache data succeeds " + s"on a decommissioned executor after task start") { runDecomTest(true, false, TaskStarted) @@ -69,6 +109,8 @@ class BlockManagerDecommissionIntegrationSuite extends SparkFunSuite with LocalS .set(config.STORAGE_DECOMMISSION_ENABLED, true) .set(config.STORAGE_DECOMMISSION_RDD_BLOCKS_ENABLED, persist) .set(config.STORAGE_DECOMMISSION_SHUFFLE_BLOCKS_ENABLED, shuffle) + // Since we use the bus for testing we don't want to drop any messages + .set(config.LISTENER_BUS_EVENT_QUEUE_CAPACITY, 1000000) // Just replicate blocks quickly during testing, there isn't another // workload we need to worry about. .set(config.STORAGE_DECOMMISSION_REPLICATION_REATTEMPT_INTERVAL, 10L) @@ -137,7 +179,7 @@ class BlockManagerDecommissionIntegrationSuite extends SparkFunSuite with LocalS taskEndEvents.add(taskEnd) } - override def onBlockUpdated(blockUpdated: SparkListenerBlockUpdated): Unit = { + override def onBlockUpdated(blockUpdated: SparkListenerBlockUpdated): Unit = synchronized { blocksUpdated.append(blockUpdated) } diff --git a/core/src/test/scala/org/apache/spark/storage/BlockManagerDecommissionUnitSuite.scala b/core/src/test/scala/org/apache/spark/storage/BlockManagerDecommissionUnitSuite.scala index 74ad8bd2bcf9d..b7ac378b4c6cd 100644 --- a/core/src/test/scala/org/apache/spark/storage/BlockManagerDecommissionUnitSuite.scala +++ b/core/src/test/scala/org/apache/spark/storage/BlockManagerDecommissionUnitSuite.scala @@ -63,15 +63,20 @@ class BlockManagerDecommissionUnitSuite extends SparkFunSuite with Matchers { * a constant Long.MaxValue timestamp. */ private def validateDecommissionTimestamps(conf: SparkConf, bm: BlockManager, - migratableShuffleBlockResolver: MigratableResolver, fail: Boolean = false) = { + fail: Boolean = false, assertDone: Boolean = true) = { // Verify the decommissioning manager timestamps and status val bmDecomManager = new BlockManagerDecommissioner(conf, bm) + validateDecommissionTimestampsOnManager(bmDecomManager, fail, assertDone) + } + + private def validateDecommissionTimestampsOnManager(bmDecomManager: BlockManagerDecommissioner, + fail: Boolean = false, assertDone: Boolean = true, numShuffles: Option[Int] = None) = { var previousTime: Option[Long] = None try { bmDecomManager.start() eventually(timeout(100.second), interval(10.milliseconds)) { val (currentTime, done) = bmDecomManager.lastMigrationInfo() - assert(done) + assert(!assertDone || done) // Make sure the time stamp starts moving forward. if (!fail) { previousTime match { @@ -85,12 +90,15 @@ class BlockManagerDecommissionUnitSuite extends SparkFunSuite with Matchers { // If we expect migration to fail we should get the max value quickly. assert(currentTime === Long.MaxValue) } + numShuffles.foreach { s => + assert(bmDecomManager.numMigratedShuffles.get() === s) + } } if (!fail) { // Wait 5 seconds and assert times keep moving forward. Thread.sleep(5000) val (currentTime, done) = bmDecomManager.lastMigrationInfo() - assert(done && currentTime > previousTime.get) + assert((!assertDone || done) && currentTime > previousTime.get) } } finally { bmDecomManager.stop() @@ -110,7 +118,7 @@ class BlockManagerDecommissionUnitSuite extends SparkFunSuite with Matchers { .thenReturn(Seq(BlockManagerId("exec2", "host2", 12345))) // Verify the decom manager handles this correctly - validateDecommissionTimestamps(sparkConf, bm, migratableShuffleBlockResolver) + validateDecommissionTimestamps(sparkConf, bm) } test("block decom manager with no migrations configured") { @@ -128,8 +136,7 @@ class BlockManagerDecommissionUnitSuite extends SparkFunSuite with Matchers { .set(config.STORAGE_DECOMMISSION_RDD_BLOCKS_ENABLED, false) .set(config.STORAGE_DECOMMISSION_REPLICATION_REATTEMPT_INTERVAL, 10L) // Verify the decom manager handles this correctly - validateDecommissionTimestamps(badConf, bm, migratableShuffleBlockResolver, - fail = true) + validateDecommissionTimestamps(badConf, bm, fail = true) } test("block decom manager with no peers") { @@ -144,8 +151,7 @@ class BlockManagerDecommissionUnitSuite extends SparkFunSuite with Matchers { .thenReturn(Seq()) // Verify the decom manager handles this correctly - validateDecommissionTimestamps(sparkConf, bm, migratableShuffleBlockResolver, - fail = true) + validateDecommissionTimestamps(sparkConf, bm, fail = true) } @@ -161,7 +167,83 @@ class BlockManagerDecommissionUnitSuite extends SparkFunSuite with Matchers { .thenReturn(Seq(BlockManagerId("exec2", "host2", 12345))) // Verify the decom manager handles this correctly - validateDecommissionTimestamps(sparkConf, bm, migratableShuffleBlockResolver) + validateDecommissionTimestamps(sparkConf, bm) + } + + test("block decom manager does not re-add removed shuffle files") { + // Set up the mocks so we return one shuffle block + val bm = mock(classOf[BlockManager]) + val migratableShuffleBlockResolver = mock(classOf[MigratableResolver]) + registerShuffleBlocks(migratableShuffleBlockResolver, Set()) + when(bm.migratableResolver).thenReturn(migratableShuffleBlockResolver) + when(bm.getMigratableRDDBlocks()) + .thenReturn(Seq()) + when(bm.getPeers(mc.any())) + .thenReturn(Seq(BlockManagerId("exec2", "host2", 12345))) + val bmDecomManager = new BlockManagerDecommissioner(sparkConf, bm) + bmDecomManager.migratingShuffles += ShuffleBlockInfo(10, 10) + + validateDecommissionTimestampsOnManager(bmDecomManager, fail = false, assertDone = false) + } + + test("block decom manager handles IO failures") { + // Set up the mocks so we return one shuffle block + val bm = mock(classOf[BlockManager]) + val migratableShuffleBlockResolver = mock(classOf[MigratableResolver]) + registerShuffleBlocks(migratableShuffleBlockResolver, Set((1, 1L, 1))) + when(bm.migratableResolver).thenReturn(migratableShuffleBlockResolver) + when(bm.getMigratableRDDBlocks()) + .thenReturn(Seq()) + when(bm.getPeers(mc.any())) + .thenReturn(Seq(BlockManagerId("exec2", "host2", 12345))) + + val blockTransferService = mock(classOf[BlockTransferService]) + // Simulate an ambiguous IO error (e.g. block could be gone, connection failed, etc.) + when(blockTransferService.uploadBlockSync( + mc.any(), mc.any(), mc.any(), mc.any(), mc.any(), mc.any(), mc.isNull())).thenThrow( + new java.io.IOException("boop") + ) + + when(bm.blockTransferService).thenReturn(blockTransferService) + + // Verify the decom manager handles this correctly + val bmDecomManager = new BlockManagerDecommissioner(sparkConf, bm) + validateDecommissionTimestampsOnManager(bmDecomManager, fail = false) + } + + test("block decom manager short circuits removed blocks") { + // Set up the mocks so we return one shuffle block + val bm = mock(classOf[BlockManager]) + val migratableShuffleBlockResolver = mock(classOf[MigratableResolver]) + // First call get blocks, then empty list simulating a delete. + when(migratableShuffleBlockResolver.getStoredShuffles()) + .thenReturn(Seq(ShuffleBlockInfo(1, 1))) + .thenReturn(Seq()) + when(migratableShuffleBlockResolver.getMigrationBlocks(mc.any())) + .thenReturn(List( + (ShuffleIndexBlockId(1, 1, 1), mock(classOf[ManagedBuffer])), + (ShuffleDataBlockId(1, 1, 1), mock(classOf[ManagedBuffer])))) + .thenReturn(List()) + + when(bm.migratableResolver).thenReturn(migratableShuffleBlockResolver) + when(bm.getMigratableRDDBlocks()) + .thenReturn(Seq()) + when(bm.getPeers(mc.any())) + .thenReturn(Seq(BlockManagerId("exec2", "host2", 12345))) + + val blockTransferService = mock(classOf[BlockTransferService]) + // Simulate an ambiguous IO error (e.g. block could be gone, connection failed, etc.) + when(blockTransferService.uploadBlockSync( + mc.any(), mc.any(), mc.any(), mc.any(), mc.any(), mc.any(), mc.isNull())).thenThrow( + new java.io.IOException("boop") + ) + + when(bm.blockTransferService).thenReturn(blockTransferService) + + // Verify the decom manager handles this correctly + val bmDecomManager = new BlockManagerDecommissioner(sparkConf, bm) + validateDecommissionTimestampsOnManager(bmDecomManager, fail = false, + numShuffles = Some(1)) } test("test shuffle and cached rdd migration without any error") { @@ -192,7 +274,8 @@ class BlockManagerDecommissionUnitSuite extends SparkFunSuite with Matchers { // We don't check that all blocks are migrated because out mock is always returning an RDD. eventually(timeout(100.second), interval(10.milliseconds)) { - assert(bmDecomManager.shufflesToMigrate.isEmpty == true) + assert(bmDecomManager.shufflesToMigrate.isEmpty === true) + assert(bmDecomManager.numMigratedShuffles.get() === 1) verify(bm, least(1)).replicateBlock( mc.eq(storedBlockId1), mc.any(), mc.any(), mc.eq(Some(3))) verify(blockTransferService, times(2)) diff --git a/core/src/test/scala/org/apache/spark/storage/BlockManagerReplicationSuite.scala b/core/src/test/scala/org/apache/spark/storage/BlockManagerReplicationSuite.scala index 0b673c580d71f..1e9b48102616f 100644 --- a/core/src/test/scala/org/apache/spark/storage/BlockManagerReplicationSuite.scala +++ b/core/src/test/scala/org/apache/spark/storage/BlockManagerReplicationSuite.scala @@ -95,8 +95,6 @@ trait BlockManagerReplicationBehavior extends SparkFunSuite conf.set(MEMORY_STORAGE_FRACTION, 0.999) conf.set(STORAGE_UNROLL_MEMORY_THRESHOLD, 512L) - // to make a replication attempt to inactive store fail fast - conf.set("spark.core.connection.ack.wait.timeout", "1s") // to make cached peers refresh frequently conf.set(STORAGE_CACHED_PEERS_TTL, 10) diff --git a/core/src/test/scala/org/apache/spark/storage/BlockManagerSuite.scala b/core/src/test/scala/org/apache/spark/storage/BlockManagerSuite.scala index 861c16269583a..44b6f1b82e75a 100644 --- a/core/src/test/scala/org/apache/spark/storage/BlockManagerSuite.scala +++ b/core/src/test/scala/org/apache/spark/storage/BlockManagerSuite.scala @@ -57,7 +57,7 @@ import org.apache.spark.scheduler.{LiveListenerBus, MapStatus, SparkListenerBloc import org.apache.spark.scheduler.cluster.{CoarseGrainedClusterMessages, CoarseGrainedSchedulerBackend} import org.apache.spark.security.{CryptoStreamUtils, EncryptionFunSuite} import org.apache.spark.serializer.{JavaSerializer, KryoSerializer, SerializerManager} -import org.apache.spark.shuffle.{ShuffleBlockResolver, ShuffleManager} +import org.apache.spark.shuffle.{MigratableResolver, ShuffleBlockInfo, ShuffleBlockResolver, ShuffleManager} import org.apache.spark.shuffle.sort.SortShuffleManager import org.apache.spark.storage.BlockManagerMessages._ import org.apache.spark.util._ @@ -100,6 +100,7 @@ class BlockManagerSuite extends SparkFunSuite with Matchers with BeforeAndAfterE .set(Kryo.KRYO_SERIALIZER_BUFFER_SIZE.key, "1m") .set(STORAGE_UNROLL_MEMORY_THRESHOLD, 512L) .set(Network.RPC_ASK_TIMEOUT, "5s") + .set(PUSH_BASED_SHUFFLE_ENABLED, true) } private def makeSortShuffleManager(): SortShuffleManager = { @@ -240,7 +241,7 @@ class BlockManagerSuite extends SparkFunSuite with Matchers with BeforeAndAfterE val driverEndpoint = rpcEnv.setupEndpoint(CoarseGrainedSchedulerBackend.ENDPOINT_NAME, new RpcEndpoint { private val executorSet = mutable.HashSet[String]() - override val rpcEnv: RpcEnv = this.rpcEnv + override val rpcEnv: RpcEnv = BlockManagerSuite.this.rpcEnv override def receiveAndReply(context: RpcCallContext): PartialFunction[Any, Unit] = { case CoarseGrainedClusterMessages.RegisterExecutor(executorId, _, _, _, _, _, _, _) => executorSet += executorId @@ -254,7 +255,7 @@ class BlockManagerSuite extends SparkFunSuite with Matchers with BeforeAndAfterE def createAndRegisterBlockManager(timeout: Boolean): BlockManagerId = { val id = if (timeout) "timeout" else "normal" val bmRef = rpcEnv.setupEndpoint(s"bm-$id", new RpcEndpoint { - override val rpcEnv: RpcEnv = this.rpcEnv + override val rpcEnv: RpcEnv = BlockManagerSuite.this.rpcEnv private def reply[T](context: RpcCallContext, response: T): Unit = { if (timeout) { Thread.sleep(conf.getTimeAsMs(Network.RPC_ASK_TIMEOUT.key) + 1000) @@ -1711,12 +1712,12 @@ class BlockManagerSuite extends SparkFunSuite with Matchers with BeforeAndAfterE val externalShuffleServicePort = StorageUtils.externalShuffleServicePort(conf) val port = store.blockTransferService.port val rack = Some("rack") - val blockManagerWithTopolgyInfo = BlockManagerId( + val blockManagerWithTopologyInfo = BlockManagerId( store.blockManagerId.executorId, store.blockManagerId.host, store.blockManagerId.port, rack) - store.blockManagerId = blockManagerWithTopolgyInfo + store.blockManagerId = blockManagerWithTopologyInfo val locations = Seq( BlockManagerId("executor4", otherHost, externalShuffleServicePort, rack), BlockManagerId("executor3", otherHost, port, rack), @@ -1974,7 +1975,72 @@ class BlockManagerSuite extends SparkFunSuite with Matchers with BeforeAndAfterE } } - class MockBlockTransferService(val maxFailures: Int) extends BlockTransferService { + test("SPARK-32919: Shuffle push merger locations should be bounded with in" + + " spark.shuffle.push.retainedMergerLocations") { + assert(master.getShufflePushMergerLocations(10, Set.empty).isEmpty) + makeBlockManager(100, "execA", + transferService = Some(new MockBlockTransferService(10, "hostA"))) + makeBlockManager(100, "execB", + transferService = Some(new MockBlockTransferService(10, "hostB"))) + makeBlockManager(100, "execC", + transferService = Some(new MockBlockTransferService(10, "hostC"))) + makeBlockManager(100, "execD", + transferService = Some(new MockBlockTransferService(10, "hostD"))) + makeBlockManager(100, "execE", + transferService = Some(new MockBlockTransferService(10, "hostA"))) + assert(master.getShufflePushMergerLocations(10, Set.empty).size == 4) + assert(master.getShufflePushMergerLocations(10, Set.empty).map(_.host).sorted === + Seq("hostC", "hostD", "hostA", "hostB").sorted) + assert(master.getShufflePushMergerLocations(10, Set("hostB")).size == 3) + } + + test("SPARK-32919: Prefer active executor locations for shuffle push mergers") { + makeBlockManager(100, "execA", + transferService = Some(new MockBlockTransferService(10, "hostA"))) + makeBlockManager(100, "execB", + transferService = Some(new MockBlockTransferService(10, "hostB"))) + makeBlockManager(100, "execC", + transferService = Some(new MockBlockTransferService(10, "hostC"))) + makeBlockManager(100, "execD", + transferService = Some(new MockBlockTransferService(10, "hostD"))) + makeBlockManager(100, "execE", + transferService = Some(new MockBlockTransferService(10, "hostA"))) + assert(master.getShufflePushMergerLocations(5, Set.empty).size == 4) + + master.removeExecutor("execA") + master.removeExecutor("execE") + + assert(master.getShufflePushMergerLocations(3, Set.empty).size == 3) + assert(master.getShufflePushMergerLocations(3, Set.empty).map(_.host).sorted === + Seq("hostC", "hostB", "hostD").sorted) + assert(master.getShufflePushMergerLocations(4, Set.empty).map(_.host).sorted === + Seq("hostB", "hostA", "hostC", "hostD").sorted) + } + + test("SPARK-33387 Support ordered shuffle block migration") { + val blocks: Seq[ShuffleBlockInfo] = Seq( + ShuffleBlockInfo(1, 0L), + ShuffleBlockInfo(0, 1L), + ShuffleBlockInfo(0, 0L), + ShuffleBlockInfo(1, 1L)) + val sortedBlocks = blocks.sortBy(b => (b.shuffleId, b.mapId)) + + val resolver = mock(classOf[MigratableResolver]) + when(resolver.getStoredShuffles).thenReturn(blocks) + + val bm = mock(classOf[BlockManager]) + when(bm.migratableResolver).thenReturn(resolver) + when(bm.getPeers(mc.any())).thenReturn(Seq.empty) + + val decomManager = new BlockManagerDecommissioner(conf, bm) + decomManager.refreshOffloadingShuffleBlocks() + + assert(sortedBlocks.sameElements(decomManager.shufflesToMigrate.asScala.map(_._1))) + } + + class MockBlockTransferService( + val maxFailures: Int, + override val hostName: String = "MockBlockTransferServiceHost") extends BlockTransferService { var numCalls = 0 var tempFileManager: DownloadFileManager = null @@ -1992,8 +2058,6 @@ class BlockManagerSuite extends SparkFunSuite with Matchers with BeforeAndAfterE override def close(): Unit = {} - override def hostName: String = { "MockBlockTransferServiceHost" } - override def port: Int = { 63332 } override def uploadBlock( diff --git a/core/src/test/scala/org/apache/spark/storage/FallbackStorageSuite.scala b/core/src/test/scala/org/apache/spark/storage/FallbackStorageSuite.scala new file mode 100644 index 0000000000000..c07edb65efb53 --- /dev/null +++ b/core/src/test/scala/org/apache/spark/storage/FallbackStorageSuite.scala @@ -0,0 +1,272 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.spark.storage + +import java.io.{DataOutputStream, FileOutputStream, IOException} +import java.nio.file.Files + +import scala.concurrent.duration._ + +import org.mockito.{ArgumentMatchers => mc} +import org.mockito.Mockito.{mock, times, verify, when} +import org.scalatest.concurrent.Eventually.{eventually, interval, timeout} + +import org.apache.spark.{LocalSparkContext, SparkConf, SparkContext, SparkFunSuite, TestUtils} +import org.apache.spark.LocalSparkContext.withSpark +import org.apache.spark.internal.config._ +import org.apache.spark.launcher.SparkLauncher.{EXECUTOR_MEMORY, SPARK_MASTER} +import org.apache.spark.network.BlockTransferService +import org.apache.spark.network.buffer.ManagedBuffer +import org.apache.spark.scheduler.ExecutorDecommissionInfo +import org.apache.spark.scheduler.cluster.StandaloneSchedulerBackend +import org.apache.spark.shuffle.{IndexShuffleBlockResolver, ShuffleBlockInfo} +import org.apache.spark.shuffle.IndexShuffleBlockResolver.NOOP_REDUCE_ID +import org.apache.spark.util.Utils.tryWithResource + +class FallbackStorageSuite extends SparkFunSuite with LocalSparkContext { + + def getSparkConf(initialExecutor: Int = 1, minExecutor: Int = 1): SparkConf = { + new SparkConf(false) + .setAppName(getClass.getName) + .set(SPARK_MASTER, s"local-cluster[$initialExecutor,1,1024]") + .set(EXECUTOR_MEMORY, "1g") + .set(UI.UI_ENABLED, false) + .set(DYN_ALLOCATION_ENABLED, true) + .set(DYN_ALLOCATION_SHUFFLE_TRACKING_ENABLED, true) + .set(DYN_ALLOCATION_INITIAL_EXECUTORS, initialExecutor) + .set(DYN_ALLOCATION_MIN_EXECUTORS, minExecutor) + .set(DECOMMISSION_ENABLED, true) + .set(STORAGE_DECOMMISSION_ENABLED, true) + .set(STORAGE_DECOMMISSION_SHUFFLE_BLOCKS_ENABLED, true) + .set(STORAGE_DECOMMISSION_FALLBACK_STORAGE_PATH, + Files.createTempDirectory("tmp").toFile.getAbsolutePath + "/") + } + + test("fallback storage APIs - copy/exists") { + val conf = new SparkConf(false) + .set("spark.app.id", "testId") + .set(SHUFFLE_COMPRESS, false) + .set(STORAGE_DECOMMISSION_SHUFFLE_BLOCKS_ENABLED, true) + .set(STORAGE_DECOMMISSION_FALLBACK_STORAGE_PATH, + Files.createTempDirectory("tmp").toFile.getAbsolutePath + "/") + val fallbackStorage = new FallbackStorage(conf) + val bmm = new BlockManagerMaster(new NoopRpcEndpointRef(conf), null, conf, false) + + val bm = mock(classOf[BlockManager]) + val dbm = new DiskBlockManager(conf, false) + when(bm.diskBlockManager).thenReturn(dbm) + when(bm.master).thenReturn(bmm) + val resolver = new IndexShuffleBlockResolver(conf, bm) + when(bm.migratableResolver).thenReturn(resolver) + + resolver.getIndexFile(1, 1L).createNewFile() + resolver.getDataFile(1, 1L).createNewFile() + + val indexFile = resolver.getIndexFile(1, 2L) + tryWithResource(new FileOutputStream(indexFile)) { fos => + tryWithResource(new DataOutputStream(fos)) { dos => + dos.writeLong(0) + dos.writeLong(4) + } + } + + val dataFile = resolver.getDataFile(1, 2L) + tryWithResource(new FileOutputStream(dataFile)) { fos => + tryWithResource(new DataOutputStream(fos)) { dos => + dos.writeLong(0) + } + } + + fallbackStorage.copy(ShuffleBlockInfo(1, 1L), bm) + fallbackStorage.copy(ShuffleBlockInfo(1, 2L), bm) + + assert(fallbackStorage.exists(1, ShuffleIndexBlockId(1, 1L, NOOP_REDUCE_ID).name)) + assert(fallbackStorage.exists(1, ShuffleDataBlockId(1, 1L, NOOP_REDUCE_ID).name)) + assert(fallbackStorage.exists(1, ShuffleIndexBlockId(1, 2L, NOOP_REDUCE_ID).name)) + assert(fallbackStorage.exists(1, ShuffleDataBlockId(1, 2L, NOOP_REDUCE_ID).name)) + + // The files for shuffle 1 and map 1 are empty intentionally. + intercept[java.io.EOFException] { + FallbackStorage.read(conf, ShuffleBlockId(1, 1L, 0)) + } + FallbackStorage.read(conf, ShuffleBlockId(1, 2L, 0)) + } + + test("migrate shuffle data to fallback storage") { + val conf = new SparkConf(false) + .set("spark.app.id", "testId") + .set(STORAGE_DECOMMISSION_SHUFFLE_BLOCKS_ENABLED, true) + .set(STORAGE_DECOMMISSION_FALLBACK_STORAGE_PATH, + Files.createTempDirectory("tmp").toFile.getAbsolutePath + "/") + + val ids = Set((1, 1L, 1)) + val bm = mock(classOf[BlockManager]) + val dbm = new DiskBlockManager(conf, false) + when(bm.diskBlockManager).thenReturn(dbm) + val indexShuffleBlockResolver = new IndexShuffleBlockResolver(conf, bm) + val indexFile = indexShuffleBlockResolver.getIndexFile(1, 1L) + val dataFile = indexShuffleBlockResolver.getDataFile(1, 1L) + indexFile.createNewFile() + dataFile.createNewFile() + + val resolver = mock(classOf[IndexShuffleBlockResolver]) + when(resolver.getStoredShuffles()) + .thenReturn(ids.map(triple => ShuffleBlockInfo(triple._1, triple._2)).toSeq) + ids.foreach { case (shuffleId: Int, mapId: Long, reduceId: Int) => + when(resolver.getMigrationBlocks(mc.any())) + .thenReturn(List( + (ShuffleIndexBlockId(shuffleId, mapId, reduceId), mock(classOf[ManagedBuffer])), + (ShuffleDataBlockId(shuffleId, mapId, reduceId), mock(classOf[ManagedBuffer])))) + when(resolver.getIndexFile(shuffleId, mapId)).thenReturn(indexFile) + when(resolver.getDataFile(shuffleId, mapId)).thenReturn(dataFile) + } + + when(bm.getPeers(mc.any())) + .thenReturn(Seq(FallbackStorage.FALLBACK_BLOCK_MANAGER_ID)) + val bmm = new BlockManagerMaster(new NoopRpcEndpointRef(conf), null, conf, false) + when(bm.master).thenReturn(bmm) + val blockTransferService = mock(classOf[BlockTransferService]) + when(blockTransferService.uploadBlockSync(mc.any(), mc.any(), mc.any(), mc.any(), mc.any(), + mc.any(), mc.any())).thenThrow(new IOException) + when(bm.blockTransferService).thenReturn(blockTransferService) + when(bm.migratableResolver).thenReturn(resolver) + when(bm.getMigratableRDDBlocks()).thenReturn(Seq()) + + val decommissioner = new BlockManagerDecommissioner(conf, bm) + + try { + decommissioner.start() + val fallbackStorage = new FallbackStorage(conf) + eventually(timeout(10.second), interval(1.seconds)) { + // uploadBlockSync is not used + verify(blockTransferService, times(1)) + .uploadBlockSync(mc.any(), mc.any(), mc.any(), mc.any(), mc.any(), mc.any(), mc.any()) + + Seq("shuffle_1_1_0.index", "shuffle_1_1_0.data").foreach { filename => + assert(fallbackStorage.exists(shuffleId = 1, filename)) + } + } + } finally { + decommissioner.stop() + } + } + + test("Upload from all decommissioned executors") { + sc = new SparkContext(getSparkConf(2, 2)) + withSpark(sc) { sc => + TestUtils.waitUntilExecutorsUp(sc, 2, 60000) + val rdd1 = sc.parallelize(1 to 10, 10) + val rdd2 = rdd1.map(x => (x % 2, 1)) + val rdd3 = rdd2.reduceByKey(_ + _) + assert(rdd3.count() === 2) + + // Decommission all + val sched = sc.schedulerBackend.asInstanceOf[StandaloneSchedulerBackend] + sc.getExecutorIds().foreach { + sched.decommissionExecutor(_, ExecutorDecommissionInfo(""), false) + } + + val files = Seq("shuffle_0_0_0.index", "shuffle_0_0_0.data") + val fallbackStorage = new FallbackStorage(sc.getConf) + // Uploading is not started yet. + files.foreach { file => assert(!fallbackStorage.exists(0, file)) } + + // Uploading is completed on decommissioned executors + eventually(timeout(20.seconds), interval(1.seconds)) { + files.foreach { file => assert(fallbackStorage.exists(0, file)) } + } + + // All executors are still alive. + assert(sc.getExecutorIds().size == 2) + } + } + + test("Upload multi stages") { + sc = new SparkContext(getSparkConf()) + withSpark(sc) { sc => + TestUtils.waitUntilExecutorsUp(sc, 1, 60000) + val rdd1 = sc.parallelize(1 to 10, 2) + val rdd2 = rdd1.map(x => (x % 2, 1)) + val rdd3 = rdd2.reduceByKey(_ + _) + val rdd4 = rdd3.sortByKey() + assert(rdd4.count() === 2) + + val shuffle0_files = Seq( + "shuffle_0_0_0.index", "shuffle_0_0_0.data", + "shuffle_0_1_0.index", "shuffle_0_1_0.data") + val shuffle1_files = Seq( + "shuffle_1_4_0.index", "shuffle_1_4_0.data", + "shuffle_1_5_0.index", "shuffle_1_5_0.data") + val fallbackStorage = new FallbackStorage(sc.getConf) + shuffle0_files.foreach { file => assert(!fallbackStorage.exists(0, file)) } + shuffle1_files.foreach { file => assert(!fallbackStorage.exists(1, file)) } + + // Decommission all + val sched = sc.schedulerBackend.asInstanceOf[StandaloneSchedulerBackend] + sc.getExecutorIds().foreach { + sched.decommissionExecutor(_, ExecutorDecommissionInfo(""), false) + } + + eventually(timeout(10.seconds), interval(1.seconds)) { + shuffle0_files.foreach { file => assert(fallbackStorage.exists(0, file)) } + shuffle1_files.foreach { file => assert(fallbackStorage.exists(1, file)) } + } + } + } + + Seq("lz4", "lzf", "snappy", "zstd").foreach { codec => + test(s"$codec - Newly added executors should access old data from remote storage") { + sc = new SparkContext(getSparkConf(2, 0).set(IO_COMPRESSION_CODEC, codec)) + withSpark(sc) { sc => + TestUtils.waitUntilExecutorsUp(sc, 2, 60000) + val rdd1 = sc.parallelize(1 to 10, 2) + val rdd2 = rdd1.map(x => (x % 2, 1)) + val rdd3 = rdd2.reduceByKey(_ + _) + assert(rdd3.collect() === Array((0, 5), (1, 5))) + + // Decommission all + val sched = sc.schedulerBackend.asInstanceOf[StandaloneSchedulerBackend] + sc.getExecutorIds().foreach { + sched.decommissionExecutor(_, ExecutorDecommissionInfo(""), false) + } + + // Make it sure that fallback storage are ready + val fallbackStorage = new FallbackStorage(sc.getConf) + eventually(timeout(10.seconds), interval(1.seconds)) { + Seq( + "shuffle_0_0_0.index", "shuffle_0_0_0.data", + "shuffle_0_1_0.index", "shuffle_0_1_0.data").foreach { file => + assert(fallbackStorage.exists(0, file)) + } + } + + // Since the data is safe, force to shrink down to zero executor + sc.getExecutorIds().foreach { id => + sched.killExecutor(id) + } + eventually(timeout(20.seconds), interval(1.seconds)) { + assert(sc.getExecutorIds().isEmpty) + } + + // Dynamic allocation will start new executors + assert(rdd3.collect() === Array((0, 5), (1, 5))) + assert(rdd3.sortByKey().count() == 2) + assert(sc.getExecutorIds().nonEmpty) + } + } + } +} diff --git a/core/src/test/scala/org/apache/spark/ui/StagePageSuite.scala b/core/src/test/scala/org/apache/spark/ui/StagePageSuite.scala index 48e0d218c0e5c..d02d7f862df80 100644 --- a/core/src/test/scala/org/apache/spark/ui/StagePageSuite.scala +++ b/core/src/test/scala/org/apache/spark/ui/StagePageSuite.scala @@ -17,7 +17,6 @@ package org.apache.spark.ui -import java.util.Locale import javax.servlet.http.HttpServletRequest import scala.xml.Node diff --git a/core/src/test/scala/org/apache/spark/ui/UISuite.scala b/core/src/test/scala/org/apache/spark/ui/UISuite.scala index 56026eaa0072b..c7e1dfe71d563 100644 --- a/core/src/test/scala/org/apache/spark/ui/UISuite.scala +++ b/core/src/test/scala/org/apache/spark/ui/UISuite.scala @@ -216,6 +216,15 @@ class UISuite extends SparkFunSuite { assert(rewrittenURI === null) } + test("SPARK-33611: Avoid encoding twice on the query parameter of proxy rewrittenURI") { + val prefix = "/worker-id" + val target = "http://localhost:8081" + val path = "/worker-id/json" + val rewrittenURI = + JettyUtils.createProxyURI(prefix, target, path, "order%5B0%5D%5Bcolumn%5D=0") + assert(rewrittenURI.toString === "http://localhost:8081/json?order%5B0%5D%5Bcolumn%5D=0") + } + test("verify rewriting location header for reverse proxy") { val clientRequest = mock(classOf[HttpServletRequest]) var headerValue = "http://localhost:4040/jobs" diff --git a/core/src/test/scala/org/apache/spark/util/DependencyUtils.scala b/core/src/test/scala/org/apache/spark/util/DependencyUtils.scala new file mode 100644 index 0000000000000..d181d4d8ce669 --- /dev/null +++ b/core/src/test/scala/org/apache/spark/util/DependencyUtils.scala @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.util + +import java.net.URI + +import org.apache.spark.SparkFunSuite + +class DependencyUtilsSuite extends SparkFunSuite { + + test("SPARK-33084: Add jar support Ivy URI -- test invalid ivy uri") { + val e1 = intercept[IllegalArgumentException] { + DependencyUtils.resolveMavenDependencies(URI.create("ivy://")) + }.getMessage + assert(e1.contains("Expected authority at index 6: ivy://")) + + val e2 = intercept[IllegalArgumentException] { + DependencyUtils.resolveMavenDependencies(URI.create("ivy://org.apache.hive:hive-contrib")) + }.getMessage + assert(e2.contains("Invalid Ivy URI authority in uri ivy://org.apache.hive:hive-contrib:" + + " Expected 'org:module:version', found org.apache.hive:hive-contrib.")) + + val e3 = intercept[IllegalArgumentException] { + DependencyUtils.resolveMavenDependencies( + URI.create("ivy://org.apache.hive:hive-contrib:2.3.7?foo=")) + }.getMessage + assert(e3.contains("Invalid query string in Ivy URI" + + " ivy://org.apache.hive:hive-contrib:2.3.7?foo=:")) + + val e4 = intercept[IllegalArgumentException] { + DependencyUtils.resolveMavenDependencies( + URI.create("ivy://org.apache.hive:hive-contrib:2.3.7?bar=&baz=foo")) + }.getMessage + assert(e4.contains("Invalid query string in Ivy URI" + + " ivy://org.apache.hive:hive-contrib:2.3.7?bar=&baz=foo: bar=&baz=foo")) + + val e5 = intercept[IllegalArgumentException] { + DependencyUtils.resolveMavenDependencies( + URI.create("ivy://org.apache.hive:hive-contrib:2.3.7?exclude=org.pentaho")) + }.getMessage + assert(e5.contains("Invalid exclude string in Ivy URI" + + " ivy://org.apache.hive:hive-contrib:2.3.7?exclude=org.pentaho:" + + " expected 'org:module,org:module,..', found org.pentaho")) + } +} diff --git a/core/src/test/scala/org/apache/spark/util/JsonProtocolSuite.scala b/core/src/test/scala/org/apache/spark/util/JsonProtocolSuite.scala index 2ae51f425dcb5..7640c17166222 100644 --- a/core/src/test/scala/org/apache/spark/util/JsonProtocolSuite.scala +++ b/core/src/test/scala/org/apache/spark/util/JsonProtocolSuite.scala @@ -94,12 +94,18 @@ class JsonProtocolSuite extends SparkFunSuite { val executorAdded = SparkListenerExecutorAdded(executorAddedTime, "exec1", new ExecutorInfo("Hostee.awesome.com", 11, logUrlMap, attributes, resources.toMap, 4)) val executorRemoved = SparkListenerExecutorRemoved(executorRemovedTime, "exec2", "test reason") - val executorBlacklisted = SparkListenerExecutorBlacklisted(executorBlacklistedTime, "exec1", 22) + val executorBlacklisted = SparkListenerExecutorBlacklisted(executorExcludedTime, "exec1", 22) val executorUnblacklisted = - SparkListenerExecutorUnblacklisted(executorUnblacklistedTime, "exec1") - val nodeBlacklisted = SparkListenerNodeBlacklisted(nodeBlacklistedTime, "node1", 33) + SparkListenerExecutorUnblacklisted(executorUnexcludedTime, "exec1") + val nodeBlacklisted = SparkListenerNodeBlacklisted(nodeExcludedTime, "node1", 33) + val executorExcluded = SparkListenerExecutorExcluded(executorExcludedTime, "exec1", 22) + val executorUnexcluded = + SparkListenerExecutorUnexcluded(executorUnexcludedTime, "exec1") + val nodeExcluded = SparkListenerNodeExcluded(nodeExcludedTime, "node1", 33) val nodeUnblacklisted = - SparkListenerNodeUnblacklisted(nodeUnblacklistedTime, "node1") + SparkListenerNodeUnblacklisted(nodeUnexcludedTime, "node1") + val nodeUnexcluded = + SparkListenerNodeUnexcluded(nodeUnexcludedTime, "node1") val executorMetricsUpdate = { // Use custom accum ID for determinism val accumUpdates = @@ -147,8 +153,12 @@ class JsonProtocolSuite extends SparkFunSuite { testEvent(executorRemoved, executorRemovedJsonString) testEvent(executorBlacklisted, executorBlacklistedJsonString) testEvent(executorUnblacklisted, executorUnblacklistedJsonString) + testEvent(executorExcluded, executorExcludedJsonString) + testEvent(executorUnexcluded, executorUnexcludedJsonString) testEvent(nodeBlacklisted, nodeBlacklistedJsonString) testEvent(nodeUnblacklisted, nodeUnblacklistedJsonString) + testEvent(nodeExcluded, nodeExcludedJsonString) + testEvent(nodeUnexcluded, nodeUnexcludedJsonString) testEvent(executorMetricsUpdate, executorMetricsUpdateJsonString) testEvent(blockUpdated, blockUpdatedJsonString) testEvent(stageExecutorMetrics, stageExecutorMetricsJsonString) @@ -496,9 +506,9 @@ class JsonProtocolSuite extends SparkFunSuite { val oldExecutorMetricsJson = JsonProtocol.executorMetricsToJson(executorMetrics) .removeField( _._1 == "MappedPoolMemory") - val exepectedExecutorMetrics = new ExecutorMetrics(Array(12L, 23L, 45L, 67L, + val expectedExecutorMetrics = new ExecutorMetrics(Array(12L, 23L, 45L, 67L, 78L, 89L, 90L, 123L, 456L, 0L, 40L, 20L, 20L, 10L, 20L, 10L)) - assertEquals(exepectedExecutorMetrics, + assertEquals(expectedExecutorMetrics, JsonProtocol.executorMetricsFromJson(oldExecutorMetricsJson)) } @@ -598,10 +608,10 @@ private[spark] object JsonProtocolSuite extends Assertions { private val jobCompletionTime = 1421191296660L private val executorAddedTime = 1421458410000L private val executorRemovedTime = 1421458922000L - private val executorBlacklistedTime = 1421458932000L - private val executorUnblacklistedTime = 1421458942000L - private val nodeBlacklistedTime = 1421458952000L - private val nodeUnblacklistedTime = 1421458962000L + private val executorExcludedTime = 1421458932000L + private val executorUnexcludedTime = 1421458942000L + private val nodeExcludedTime = 1421458952000L + private val nodeUnexcludedTime = 1421458962000L private def testEvent(event: SparkListenerEvent, jsonString: String): Unit = { val actualJsonString = compact(render(JsonProtocol.sparkEventToJson(event))) @@ -968,8 +978,8 @@ private[spark] object JsonProtocolSuite extends Assertions { private val stackTrace = { Array[StackTraceElement]( new StackTraceElement("Apollo", "Venus", "Mercury", 42), - new StackTraceElement("Afollo", "Vemus", "Mercurry", 420), - new StackTraceElement("Ayollo", "Vesus", "Blackberry", 4200) + new StackTraceElement("Afollo", "Vemus", "Mercurry", 420), /* odd spellings intentional */ + new StackTraceElement("Ayollo", "Vesus", "Blackberry", 4200) /* odd spellings intentional */ ) } @@ -2415,36 +2425,70 @@ private[spark] object JsonProtocolSuite extends Assertions { s""" |{ | "Event" : "org.apache.spark.scheduler.SparkListenerExecutorBlacklisted", - | "time" : ${executorBlacklistedTime}, + | "time" : ${executorExcludedTime}, | "executorId" : "exec1", | "taskFailures" : 22 |} """.stripMargin + private val executorExcludedJsonString = + s""" + |{ + | "Event" : "org.apache.spark.scheduler.SparkListenerExecutorExcluded", + | "time" : ${executorExcludedTime}, + | "executorId" : "exec1", + | "taskFailures" : 22 + |} + """.stripMargin private val executorUnblacklistedJsonString = s""" |{ | "Event" : "org.apache.spark.scheduler.SparkListenerExecutorUnblacklisted", - | "time" : ${executorUnblacklistedTime}, + | "time" : ${executorUnexcludedTime}, | "executorId" : "exec1" |} """.stripMargin + private val executorUnexcludedJsonString = + s""" + |{ + | "Event" : "org.apache.spark.scheduler.SparkListenerExecutorUnexcluded", + | "time" : ${executorUnexcludedTime}, + | "executorId" : "exec1" + |} + """.stripMargin private val nodeBlacklistedJsonString = s""" |{ | "Event" : "org.apache.spark.scheduler.SparkListenerNodeBlacklisted", - | "time" : ${nodeBlacklistedTime}, + | "time" : ${nodeExcludedTime}, | "hostId" : "node1", | "executorFailures" : 33 |} """.stripMargin + private val nodeExcludedJsonString = + s""" + |{ + | "Event" : "org.apache.spark.scheduler.SparkListenerNodeExcluded", + | "time" : ${nodeExcludedTime}, + | "hostId" : "node1", + | "executorFailures" : 33 + |} + """.stripMargin private val nodeUnblacklistedJsonString = s""" |{ | "Event" : "org.apache.spark.scheduler.SparkListenerNodeUnblacklisted", - | "time" : ${nodeUnblacklistedTime}, + | "time" : ${nodeUnexcludedTime}, | "hostId" : "node1" |} """.stripMargin + private val nodeUnexcludedJsonString = + s""" + |{ + | "Event" : "org.apache.spark.scheduler.SparkListenerNodeUnexcluded", + | "time" : ${nodeUnexcludedTime}, + | "hostId" : "node1" + |} + """.stripMargin private val resourceProfileJsonString = """ |{ diff --git a/core/src/test/scala/org/apache/spark/util/SizeEstimatorSuite.scala b/core/src/test/scala/org/apache/spark/util/SizeEstimatorSuite.scala index 6183ba9faa6b4..d669f2c655abb 100644 --- a/core/src/test/scala/org/apache/spark/util/SizeEstimatorSuite.scala +++ b/core/src/test/scala/org/apache/spark/util/SizeEstimatorSuite.scala @@ -94,7 +94,7 @@ class SizeEstimatorSuite override def beforeEach(): Unit = { super.beforeEach() // Set the arch to 64-bit and compressedOops to true so that SizeEstimator - // provides identical results accross all systems in these tests. + // provides identical results across all systems in these tests. reinitializeSizeEstimator("amd64", "true") } diff --git a/core/src/test/scala/org/apache/spark/util/SparkUncaughtExceptionHandlerSuite.scala b/core/src/test/scala/org/apache/spark/util/SparkUncaughtExceptionHandlerSuite.scala index 90741a6bde7f0..9e23b25493dfe 100644 --- a/core/src/test/scala/org/apache/spark/util/SparkUncaughtExceptionHandlerSuite.scala +++ b/core/src/test/scala/org/apache/spark/util/SparkUncaughtExceptionHandlerSuite.scala @@ -80,7 +80,7 @@ object ThrowableThrower { // a thread that uses SparkUncaughtExceptionHandler and throws a Throwable by name class ThrowerThread(name: String, exitOnUncaughtException: Boolean) extends Thread { - override def run() { + override def run(): Unit = { Thread.setDefaultUncaughtExceptionHandler( new SparkUncaughtExceptionHandler(exitOnUncaughtException)) throw ThrowableTypes.getThrowableByName(name) diff --git a/core/src/test/scala/org/apache/spark/util/UtilsSuite.scala b/core/src/test/scala/org/apache/spark/util/UtilsSuite.scala index 7ec7c5afca1df..8fb408041ca9d 100644 --- a/core/src/test/scala/org/apache/spark/util/UtilsSuite.scala +++ b/core/src/test/scala/org/apache/spark/util/UtilsSuite.scala @@ -18,8 +18,7 @@ package org.apache.spark.util import java.io.{ByteArrayInputStream, ByteArrayOutputStream, DataOutput, DataOutputStream, File, - FileOutputStream, InputStream, PrintStream, SequenceInputStream} -import java.lang.{Double => JDouble, Float => JFloat} + FileOutputStream, PrintStream, SequenceInputStream} import java.lang.reflect.Field import java.net.{BindException, ServerSocket, URI} import java.nio.{ByteBuffer, ByteOrder} @@ -42,6 +41,7 @@ import org.apache.hadoop.fs.Path import org.apache.spark.{SparkConf, SparkException, SparkFunSuite, TaskContext} import org.apache.spark.internal.Logging import org.apache.spark.internal.config._ +import org.apache.spark.internal.config.Tests.IS_TESTING import org.apache.spark.network.util.ByteUnit import org.apache.spark.scheduler.SparkListener import org.apache.spark.util.io.ChunkedByteBufferInputStream @@ -1406,6 +1406,44 @@ class UtilsSuite extends SparkFunSuite with ResetSystemProperties with Logging { assert(hostnamePort._1.equals("localhost")) assert(hostnamePort._2 === 0) } + + test("executorOffHeapMemorySizeAsMb when MEMORY_OFFHEAP_ENABLED is false") { + val executorOffHeapMemory = Utils.executorOffHeapMemorySizeAsMb(new SparkConf()) + assert(executorOffHeapMemory == 0) + } + + test("executorOffHeapMemorySizeAsMb when MEMORY_OFFHEAP_ENABLED is true") { + val offHeapMemoryInMB = 50 + val offHeapMemory: Long = offHeapMemoryInMB * 1024 * 1024 + val sparkConf = new SparkConf() + .set(MEMORY_OFFHEAP_ENABLED, true) + .set(MEMORY_OFFHEAP_SIZE, offHeapMemory) + val executorOffHeapMemory = Utils.executorOffHeapMemorySizeAsMb(sparkConf) + assert(executorOffHeapMemory == offHeapMemoryInMB) + } + + test("executorMemoryOverhead when MEMORY_OFFHEAP_ENABLED is true, " + + "but MEMORY_OFFHEAP_SIZE not config scene") { + val sparkConf = new SparkConf() + .set(MEMORY_OFFHEAP_ENABLED, true) + val expected = + s"${MEMORY_OFFHEAP_SIZE.key} must be > 0 when ${MEMORY_OFFHEAP_ENABLED.key} == true" + val message = intercept[IllegalArgumentException] { + Utils.executorOffHeapMemorySizeAsMb(sparkConf) + }.getMessage + assert(message.contains(expected)) + } + + test("isPushBasedShuffleEnabled when both PUSH_BASED_SHUFFLE_ENABLED" + + " and SHUFFLE_SERVICE_ENABLED are true") { + val conf = new SparkConf() + assert(Utils.isPushBasedShuffleEnabled(conf) === false) + conf.set(PUSH_BASED_SHUFFLE_ENABLED, true) + conf.set(IS_TESTING, false) + assert(Utils.isPushBasedShuffleEnabled(conf) === false) + conf.set(SHUFFLE_SERVICE_ENABLED, true) + assert(Utils.isPushBasedShuffleEnabled(conf) === true) + } } private class SimpleExtension diff --git a/core/src/test/scala/org/apache/spark/util/collection/ExternalAppendOnlyMapSuite.scala b/core/src/test/scala/org/apache/spark/util/collection/ExternalAppendOnlyMapSuite.scala index 83595ba22aa57..81a145906d33c 100644 --- a/core/src/test/scala/org/apache/spark/util/collection/ExternalAppendOnlyMapSuite.scala +++ b/core/src/test/scala/org/apache/spark/util/collection/ExternalAppendOnlyMapSuite.scala @@ -220,13 +220,13 @@ class ExternalAppendOnlyMapSuite extends SparkFunSuite testSimpleSpilling() } - test("spilling with compression") { + private def testSimpleSpillingForAllCodecs(encrypt: Boolean) { // Keep track of which compression codec we're using to report in test failure messages var lastCompressionCodec: Option[String] = None try { allCompressionCodecs.foreach { c => lastCompressionCodec = Some(c) - testSimpleSpilling(Some(c)) + testSimpleSpilling(Some(c), encrypt) } } catch { // Include compression codec used in test failure message @@ -241,8 +241,12 @@ class ExternalAppendOnlyMapSuite extends SparkFunSuite } } + test("spilling with compression") { + testSimpleSpillingForAllCodecs(encrypt = false) + } + test("spilling with compression and encryption") { - testSimpleSpilling(Some(CompressionCodec.DEFAULT_COMPRESSION_CODEC), encrypt = true) + testSimpleSpillingForAllCodecs(encrypt = true) } /** diff --git a/dev/.rat-excludes b/dev/.rat-excludes index df1dd51a7c519..167cf224f92c2 100644 --- a/dev/.rat-excludes +++ b/dev/.rat-excludes @@ -25,7 +25,7 @@ bootstrap.bundle.min.js bootstrap.bundle.min.js.map bootstrap.min.css bootstrap.min.css.map -jquery-3.4.1.min.js +jquery-3.5.1.min.js d3.min.js dagre-d3.min.js graphlib-dot.min.js @@ -42,11 +42,11 @@ jquery.dataTables.1.10.20.min.js jquery.mustache.js jsonFormatter.min.css jsonFormatter.min.js -.*avsc -.*txt -.*json -.*data -.*log +.*\.avsc +.*\.txt +.*\.json +.*\.data +.*\.log pyspark-coverage-site/* cloudpickle/* join.py @@ -98,17 +98,17 @@ local-1430917381535_2 DESCRIPTION NAMESPACE test_support/* -.*Rd +.*\.Rd help/* html/* INDEX .lintr gen-java.* -.*avpr -.*parquet +.*\.avpr +.*\.parquet spark-deps-.* -.*csv -.*tsv +.*\.csv +.*\.tsv .*\.sql .Rbuildignore META-INF/* @@ -123,4 +123,14 @@ SessionHandler.java GangliaReporter.java application_1578436911597_0052 config.properties +local-1596020211915 app-20200706201101-0003 +py.typed +_metadata +_SUCCESS +part-00000 +.*\.res +flights_tiny.txt.1 +over1k +over10k +exported_table/* diff --git a/dev/appveyor-guide.md b/dev/appveyor-guide.md index a8c0c1ef23ac3..c68b5de9e61d0 100644 --- a/dev/appveyor-guide.md +++ b/dev/appveyor-guide.md @@ -33,22 +33,22 @@ Currently, SparkR on Windows is being tested with [AppVeyor](https://ci.appveyor 2016-09-04 11 07 58 -- Click "Github". +- Click "GitHub". 2016-09-04 11 08 10 -#### After signing up, go to profile to link Github and AppVeyor. +#### After signing up, go to profile to link GitHub and AppVeyor. - Click your account and then click "Profile". 2016-09-04 11 09 43 -- Enable the link with GitHub via clicking "Link Github account". +- Enable the link with GitHub via clicking "Link GitHub account". 2016-09-04 11 09 52 -- Click "Authorize application" in Github site. +- Click "Authorize application" in GitHub site. 2016-09-04 11 10 05 @@ -63,11 +63,11 @@ Currently, SparkR on Windows is being tested with [AppVeyor](https://ci.appveyor 2016-08-30 12 16 35 -- Since we will use Github here, click the "GITHUB" button and then click "Authorize Github" so that AppVeyor can access the Github logs (e.g. commits). +- Since we will use GitHub here, click the "GITHUB" button and then click "Authorize GitHub" so that AppVeyor can access the GitHub logs (e.g. commits). 2016-09-04 11 10 22 -- Click "Authorize application" from Github (the above step will pop up this page). +- Click "Authorize application" from GitHub (the above step will pop up this page). 2016-09-04 11 10 27 diff --git a/dev/appveyor-install-dependencies.ps1 b/dev/appveyor-install-dependencies.ps1 index e344a7fc23191..fb4cc22de35f4 100644 --- a/dev/appveyor-install-dependencies.ps1 +++ b/dev/appveyor-install-dependencies.ps1 @@ -19,7 +19,7 @@ $CRAN = "https://cloud.r-project.org" Function InstallR { if ( -not(Test-Path Env:\R_ARCH) ) { - $arch = "i386" + $arch = "x64" } Else { $arch = $env:R_ARCH @@ -68,7 +68,7 @@ Function InstallRtools { $gccPath = $env:GCC_PATH } $env:PATH = $RtoolsDrive + '\Rtools40\bin;' + $RtoolsDrive + '\Rtools40\mingw64\bin;' + $RtoolsDrive + '\Rtools40\' + $gccPath + '\bin;' + $env:PATH - $env:BINPREF=$RtoolsDrive + '/Rtools40/mingw64/bin/' + $env:BINPREF=$RtoolsDrive + '/Rtools40/mingw$(WIN)/bin/' } # create tools directory outside of Spark directory diff --git a/dev/check-license b/dev/check-license index 0cc17ffe55c67..bd255954d6db4 100755 --- a/dev/check-license +++ b/dev/check-license @@ -67,7 +67,7 @@ mkdir -p "$FWDIR"/lib exit 1 } -mkdir target +mkdir -p target $java_cmd -jar "$rat_jar" -E "$FWDIR"/dev/.rat-excludes -d "$FWDIR" > target/rat-results.txt if [ $? -ne 0 ]; then diff --git a/dev/create-release/known_translations b/dev/create-release/known_translations index ff41cccde0140..64bd9ada1bf61 100644 --- a/dev/create-release/known_translations +++ b/dev/create-release/known_translations @@ -1,5 +1,5 @@ # This is a mapping of names to be translated through translate-contributors.py -# The format expected on each line should be: - +# The format expected on each line should be: - 012huang - Weiyi Huang 07ARB - Ankit Raj Boudh 10110346 - Xian Liu diff --git a/dev/create-release/release-build.sh b/dev/create-release/release-build.sh index d948ee4bee08d..d2953a86afafd 100755 --- a/dev/create-release/release-build.sh +++ b/dev/create-release/release-build.sh @@ -182,8 +182,7 @@ if [[ "$1" == "package" ]]; then tar cvzf spark-$SPARK_VERSION.tgz --exclude spark-$SPARK_VERSION/.git spark-$SPARK_VERSION echo $GPG_PASSPHRASE | $GPG --passphrase-fd 0 --armour --output spark-$SPARK_VERSION.tgz.asc \ --detach-sig spark-$SPARK_VERSION.tgz - echo $GPG_PASSPHRASE | $GPG --passphrase-fd 0 --print-md \ - SHA512 spark-$SPARK_VERSION.tgz > spark-$SPARK_VERSION.tgz.sha512 + shasum -a 512 spark-$SPARK_VERSION.tgz > spark-$SPARK_VERSION.tgz.sha512 rm -rf spark-$SPARK_VERSION ZINC_PORT=3035 @@ -275,6 +274,9 @@ if [[ "$1" == "package" ]]; then # In dry run mode, only build the first one. The keys in BINARY_PKGS_ARGS are used as the # list of packages to be built, so it's ok for things to be missing in BINARY_PKGS_EXTRA. + # NOTE: Don't forget to update the valid combinations of distributions at + # 'python/pyspark/install.py' and 'python/docs/source/getting_started/install.rst' + # if you're changing them. declare -A BINARY_PKGS_ARGS BINARY_PKGS_ARGS["hadoop3.2"]="-Phadoop-3.2 $HIVE_PROFILES" if ! is_dry_run; then @@ -282,7 +284,6 @@ if [[ "$1" == "package" ]]; then if [[ $SPARK_VERSION < "3.0." ]]; then BINARY_PKGS_ARGS["hadoop2.6"]="-Phadoop-2.6 $HIVE_PROFILES" else - BINARY_PKGS_ARGS["hadoop2.7-hive1.2"]="-Phadoop-2.7 -Phive-1.2 $HIVE_PROFILES" BINARY_PKGS_ARGS["hadoop2.7"]="-Phadoop-2.7 $HIVE_PROFILES" fi fi @@ -451,7 +452,7 @@ if [[ "$1" == "publish-release" ]]; then if ! is_dry_run; then nexus_upload=$NEXUS_ROOT/deployByRepositoryId/$staged_repo_id - echo "Uplading files to $nexus_upload" + echo "Uploading files to $nexus_upload" for file in $(find . -type f) do # strip leading ./ diff --git a/dev/create-release/releaseutils.py b/dev/create-release/releaseutils.py index cc7ad931198a2..a0e9695d58361 100755 --- a/dev/create-release/releaseutils.py +++ b/dev/create-release/releaseutils.py @@ -110,7 +110,7 @@ def __str__(self): # Under the hood, this runs a `git log` on that tag and parses the fields # from the command output to construct a list of Commit objects. Note that # because certain fields reside in the commit description and cannot be parsed -# through the Github API itself, we need to do some intelligent regex parsing +# through the GitHub API itself, we need to do some intelligent regex parsing # to extract those fields. # # This is written using Git 1.8.5. @@ -140,7 +140,7 @@ def get_commits(tag): sys.exit("Unexpected format in commit: %s" % commit_digest) [_hash, author, title] = commit_digest.split(field_end_marker) # The PR number and github username is in the commit message - # itself and cannot be accessed through any Github API + # itself and cannot be accessed through any GitHub API pr_number = None match = re.search("Closes #([0-9]+) from ([^/\\s]+)/", commit_body) if match: @@ -252,7 +252,7 @@ def nice_join(str_list): return ", ".join(str_list[:-1]) + ", and " + str_list[-1] -# Return the full name of the specified user on Github +# Return the full name of the specified user on GitHub # If the user doesn't exist, return None def get_github_name(author, github_client): if github_client: diff --git a/dev/create-release/spark-rm/Dockerfile b/dev/create-release/spark-rm/Dockerfile index 4e007a5eeb93a..8735d1fd23ce2 100644 --- a/dev/create-release/spark-rm/Dockerfile +++ b/dev/create-release/spark-rm/Dockerfile @@ -15,16 +15,20 @@ # limitations under the License. # -# Image for building Spark releases. Based on Ubuntu 18.04. +# Image for building Spark releases. Based on Ubuntu 20.04. # # Includes: # * Java 8 # * Ivy -# * Python (2.7.15/3.6.7) -# * R-base/R-base-dev (4.0.2) -# * Ruby 2.3 build utilities +# * Python (3.8.5) +# * R-base/R-base-dev (4.0.3) +# * Ruby (2.7.0) +# +# You can test it as below: +# cd dev/create-release/spark-rm +# docker build -t spark-rm --build-arg UID=$UID . -FROM ubuntu:18.04 +FROM ubuntu:20.04 # For apt to be noninteractive ENV DEBIAN_FRONTEND noninteractive @@ -36,8 +40,8 @@ ARG APT_INSTALL="apt-get install --no-install-recommends -y" # TODO(SPARK-32407): Sphinx 3.1+ does not correctly index nested classes. # See also https://github.com/sphinx-doc/sphinx/issues/7551. # We should use the latest Sphinx version once this is fixed. -ARG PIP_PKGS="sphinx==3.0.4 mkdocs==1.0.4 numpy==1.18.1 pydata_sphinx_theme==0.3.1 ipython==7.16.1 nbsphinx==0.7.1" -ARG GEM_PKGS="jekyll:4.0.0 jekyll-redirect-from:0.16.0 rouge:3.15.0" +ARG PIP_PKGS="sphinx==3.0.4 mkdocs==1.1.2 numpy==1.19.4 pydata_sphinx_theme==0.4.1 ipython==7.19.0 nbsphinx==0.8.0 numpydoc==1.1.0" +ARG GEM_PKGS="jekyll:4.2.0 jekyll-redirect-from:0.16.0 rouge:3.26.0" # Install extra needed repos and refresh. # - CRAN repo @@ -46,7 +50,7 @@ ARG GEM_PKGS="jekyll:4.0.0 jekyll-redirect-from:0.16.0 rouge:3.15.0" # This is all in a single "RUN" command so that if anything changes, "apt update" is run to fetch # the most current package versions (instead of potentially using old versions cached by docker). RUN apt-get clean && apt-get update && $APT_INSTALL gnupg ca-certificates && \ - echo 'deb https://cloud.r-project.org/bin/linux/ubuntu bionic-cran40/' >> /etc/apt/sources.list && \ + echo 'deb https://cloud.r-project.org/bin/linux/ubuntu focal-cran40/' >> /etc/apt/sources.list && \ gpg --keyserver keyserver.ubuntu.com --recv-key E298A3A825C0D65DFD57CBB651716619E084DAB9 && \ gpg -a --export E084DAB9 | apt-key add - && \ apt-get clean && \ @@ -54,7 +58,6 @@ RUN apt-get clean && apt-get update && $APT_INSTALL gnupg ca-certificates && \ apt-get clean && \ apt-get update && \ $APT_INSTALL software-properties-common && \ - apt-add-repository -y ppa:brightbox/ruby-ng && \ apt-get update && \ # Install openjdk 8. $APT_INSTALL openjdk-8-jdk && \ @@ -62,26 +65,23 @@ RUN apt-get clean && apt-get update && $APT_INSTALL gnupg ca-certificates && \ # Install build / source control tools $APT_INSTALL curl wget git maven ivy subversion make gcc lsof libffi-dev \ pandoc pandoc-citeproc libssl-dev libcurl4-openssl-dev libxml2-dev && \ - curl -sL https://deb.nodesource.com/setup_11.x | bash && \ + curl -sL https://deb.nodesource.com/setup_12.x | bash && \ $APT_INSTALL nodejs && \ # Install needed python packages. Use pip for installing packages (for consistency). - $APT_INSTALL libpython3-dev python3-pip python3-setuptools && \ + $APT_INSTALL python3-pip python3-setuptools && \ # qpdf is required for CRAN checks to pass. $APT_INSTALL qpdf jq && \ - # Change default python version to python3. - update-alternatives --install /usr/bin/python python /usr/bin/python2.7 1 && \ - update-alternatives --install /usr/bin/python python /usr/bin/python3.6 2 && \ - update-alternatives --set python /usr/bin/python3.6 && \ pip3 install $PIP_PKGS && \ # Install R packages and dependencies used when building. # R depends on pandoc*, libssl (which are installed above). # Note that PySpark doc generation also needs pandoc due to nbsphinx $APT_INSTALL r-base r-base-dev && \ + $APT_INSTALL libcurl4-openssl-dev libgit2-dev libssl-dev libxml2-dev && \ $APT_INSTALL texlive-latex-base texlive texlive-fonts-extra texinfo qpdf && \ Rscript -e "install.packages(c('curl', 'xml2', 'httr', 'devtools', 'testthat', 'knitr', 'rmarkdown', 'roxygen2', 'e1071', 'survival'), repos='https://cloud.r-project.org/')" && \ Rscript -e "devtools::install_github('jimhester/lintr')" && \ # Install tools needed to build the documentation. - $APT_INSTALL ruby2.5 ruby2.5-dev && \ + $APT_INSTALL ruby2.7 ruby2.7-dev && \ gem install --no-document $GEM_PKGS WORKDIR /opt/spark-rm/output diff --git a/dev/create-release/translate-contributors.py b/dev/create-release/translate-contributors.py index 8340266527fc6..be5611ce65a7d 100755 --- a/dev/create-release/translate-contributors.py +++ b/dev/create-release/translate-contributors.py @@ -17,7 +17,7 @@ # This script translates invalid authors in the contributors list generated # by generate-contributors.py. When the script encounters an author name that -# is considered invalid, it searches Github and JIRA in an attempt to search +# is considered invalid, it searches GitHub and JIRA in an attempt to search # for replacements. This tool runs in two modes: # # (1) Interactive mode: For each invalid author name, this script presents @@ -68,7 +68,7 @@ if INTERACTIVE_MODE: print("Running in interactive mode. To disable this, provide the --non-interactive flag.") -# Setup Github and JIRA clients +# Setup GitHub and JIRA clients jira_options = {"server": JIRA_API_BASE} jira_client = JIRA(options=jira_options, basic_auth=(JIRA_USERNAME, JIRA_PASSWORD)) github_client = Github(GITHUB_API_TOKEN) @@ -89,11 +89,11 @@ # Generate candidates for the given author. This should only be called if the given author # name does not represent a full name as this operation is somewhat expensive. Under the -# hood, it makes several calls to the Github and JIRA API servers to find the candidates. +# hood, it makes several calls to the GitHub and JIRA API servers to find the candidates. # # This returns a list of (candidate name, source) 2-tuples. E.g. # [ -# (NOT_FOUND, "No full name found for Github user andrewor14"), +# (NOT_FOUND, "No full name found for GitHub user andrewor14"), # ("Andrew Or", "Full name of JIRA user andrewor14"), # ("Andrew Orso", "Full name of SPARK-1444 assignee andrewor14"), # ("Andrew Ordall", "Full name of SPARK-1663 assignee andrewor14"), @@ -104,12 +104,12 @@ def generate_candidates(author, issues): candidates = [] - # First check for full name of Github user + # First check for full name of GitHub user github_name = get_github_name(author, github_client) if github_name: - candidates.append((github_name, "Full name of Github user %s" % author)) + candidates.append((github_name, "Full name of GitHub user %s" % author)) else: - candidates.append((NOT_FOUND, "No full name found for Github user %s" % author)) + candidates.append((NOT_FOUND, "No full name found for GitHub user %s" % author)) # Then do the same for JIRA user jira_name = get_jira_name(author, jira_client) if jira_name: @@ -151,7 +151,7 @@ def generate_candidates(author, issues): candidates[i] = (candidate, source) return candidates -# Translate each invalid author by searching for possible candidates from Github and JIRA +# Translate each invalid author by searching for possible candidates from GitHub and JIRA # In interactive mode, this script presents the user with a list of choices and have the user # select from this list. Additionally, the user may also choose to enter a custom name. # In non-interactive mode, this script picks the first valid author name from the candidates @@ -180,12 +180,12 @@ def generate_candidates(author, issues): issues = temp_author.split("/")[1:] candidates = generate_candidates(author, issues) # Print out potential replacement candidates along with the sources, e.g. - # [X] No full name found for Github user andrewor14 + # [X] No full name found for GitHub user andrewor14 # [X] No assignee found for SPARK-1763 # [0] Andrew Or - Full name of JIRA user andrewor14 # [1] Andrew Orso - Full name of SPARK-1444 assignee andrewor14 # [2] Andrew Ordall - Full name of SPARK-1663 assignee andrewor14 - # [3] andrewor14 - Raw Github username + # [3] andrewor14 - Raw GitHub username # [4] Custom candidate_names = [] bad_prompts = [] # Prompts that can't actually be selected; print these first. @@ -207,7 +207,7 @@ def generate_candidates(author, issues): print(p) # In interactive mode, additionally provide "custom" option and await user response if INTERACTIVE_MODE: - print(" [%d] %s - Raw Github username" % (raw_index, author)) + print(" [%d] %s - Raw GitHub username" % (raw_index, author)) print(" [%d] Custom" % custom_index) response = raw_input(" Your choice: ") last_index = custom_index diff --git a/dev/deps/spark-deps-hadoop-2.7-hive-1.2 b/dev/deps/spark-deps-hadoop-2.7-hive-1.2 deleted file mode 100644 index 900ee6d18d06d..0000000000000 --- a/dev/deps/spark-deps-hadoop-2.7-hive-1.2 +++ /dev/null @@ -1,212 +0,0 @@ -JLargeArrays/1.5//JLargeArrays-1.5.jar -JTransforms/3.1//JTransforms-3.1.jar -JavaEWAH/0.3.2//JavaEWAH-0.3.2.jar -RoaringBitmap/0.9.0//RoaringBitmap-0.9.0.jar -ST4/4.0.4//ST4-4.0.4.jar -activation/1.1.1//activation-1.1.1.jar -aircompressor/0.10//aircompressor-0.10.jar -algebra_2.12/2.0.0-M2//algebra_2.12-2.0.0-M2.jar -antlr-runtime/3.4//antlr-runtime-3.4.jar -antlr/2.7.7//antlr-2.7.7.jar -antlr4-runtime/4.7.1//antlr4-runtime-4.7.1.jar -aopalliance-repackaged/2.6.1//aopalliance-repackaged-2.6.1.jar -aopalliance/1.0//aopalliance-1.0.jar -apache-log4j-extras/1.2.17//apache-log4j-extras-1.2.17.jar -apacheds-i18n/2.0.0-M15//apacheds-i18n-2.0.0-M15.jar -apacheds-kerberos-codec/2.0.0-M15//apacheds-kerberos-codec-2.0.0-M15.jar -api-asn1-api/1.0.0-M20//api-asn1-api-1.0.0-M20.jar -api-util/1.0.0-M20//api-util-1.0.0-M20.jar -arpack_combined_all/0.1//arpack_combined_all-0.1.jar -arrow-format/1.0.1//arrow-format-1.0.1.jar -arrow-memory-core/1.0.1//arrow-memory-core-1.0.1.jar -arrow-memory-netty/1.0.1//arrow-memory-netty-1.0.1.jar -arrow-vector/1.0.1//arrow-vector-1.0.1.jar -audience-annotations/0.5.0//audience-annotations-0.5.0.jar -automaton/1.11-8//automaton-1.11-8.jar -avro-ipc/1.8.2//avro-ipc-1.8.2.jar -avro-mapred/1.8.2/hadoop2/avro-mapred-1.8.2-hadoop2.jar -avro/1.8.2//avro-1.8.2.jar -bonecp/0.8.0.RELEASE//bonecp-0.8.0.RELEASE.jar -breeze-macros_2.12/1.0//breeze-macros_2.12-1.0.jar -breeze_2.12/1.0//breeze_2.12-1.0.jar -cats-kernel_2.12/2.0.0-M4//cats-kernel_2.12-2.0.0-M4.jar -chill-java/0.9.5//chill-java-0.9.5.jar -chill_2.12/0.9.5//chill_2.12-0.9.5.jar -commons-beanutils/1.9.4//commons-beanutils-1.9.4.jar -commons-cli/1.2//commons-cli-1.2.jar -commons-codec/1.10//commons-codec-1.10.jar -commons-collections/3.2.2//commons-collections-3.2.2.jar -commons-compiler/3.0.16//commons-compiler-3.0.16.jar -commons-compress/1.8.1//commons-compress-1.8.1.jar -commons-configuration/1.6//commons-configuration-1.6.jar -commons-crypto/1.0.0//commons-crypto-1.0.0.jar -commons-dbcp/1.4//commons-dbcp-1.4.jar -commons-digester/1.8//commons-digester-1.8.jar -commons-httpclient/3.1//commons-httpclient-3.1.jar -commons-io/2.4//commons-io-2.4.jar -commons-lang/2.6//commons-lang-2.6.jar -commons-lang3/3.10//commons-lang3-3.10.jar -commons-logging/1.1.3//commons-logging-1.1.3.jar -commons-math3/3.4.1//commons-math3-3.4.1.jar -commons-net/3.1//commons-net-3.1.jar -commons-pool/1.5.4//commons-pool-1.5.4.jar -commons-text/1.6//commons-text-1.6.jar -compress-lzf/1.0.3//compress-lzf-1.0.3.jar -core/1.1.2//core-1.1.2.jar -curator-client/2.7.1//curator-client-2.7.1.jar -curator-framework/2.7.1//curator-framework-2.7.1.jar -curator-recipes/2.7.1//curator-recipes-2.7.1.jar -datanucleus-api-jdo/3.2.6//datanucleus-api-jdo-3.2.6.jar -datanucleus-core/3.2.10//datanucleus-core-3.2.10.jar -datanucleus-rdbms/3.2.9//datanucleus-rdbms-3.2.9.jar -derby/10.12.1.1//derby-10.12.1.1.jar -flatbuffers-java/1.9.0//flatbuffers-java-1.9.0.jar -generex/1.0.2//generex-1.0.2.jar -gson/2.2.4//gson-2.2.4.jar -guava/14.0.1//guava-14.0.1.jar -guice-servlet/3.0//guice-servlet-3.0.jar -guice/3.0//guice-3.0.jar -hadoop-annotations/2.7.4//hadoop-annotations-2.7.4.jar -hadoop-auth/2.7.4//hadoop-auth-2.7.4.jar -hadoop-client/2.7.4//hadoop-client-2.7.4.jar -hadoop-common/2.7.4//hadoop-common-2.7.4.jar -hadoop-hdfs/2.7.4//hadoop-hdfs-2.7.4.jar -hadoop-mapreduce-client-app/2.7.4//hadoop-mapreduce-client-app-2.7.4.jar -hadoop-mapreduce-client-common/2.7.4//hadoop-mapreduce-client-common-2.7.4.jar -hadoop-mapreduce-client-core/2.7.4//hadoop-mapreduce-client-core-2.7.4.jar -hadoop-mapreduce-client-jobclient/2.7.4//hadoop-mapreduce-client-jobclient-2.7.4.jar -hadoop-mapreduce-client-shuffle/2.7.4//hadoop-mapreduce-client-shuffle-2.7.4.jar -hadoop-yarn-api/2.7.4//hadoop-yarn-api-2.7.4.jar -hadoop-yarn-client/2.7.4//hadoop-yarn-client-2.7.4.jar -hadoop-yarn-common/2.7.4//hadoop-yarn-common-2.7.4.jar -hadoop-yarn-server-common/2.7.4//hadoop-yarn-server-common-2.7.4.jar -hadoop-yarn-server-web-proxy/2.7.4//hadoop-yarn-server-web-proxy-2.7.4.jar -hk2-api/2.6.1//hk2-api-2.6.1.jar -hk2-locator/2.6.1//hk2-locator-2.6.1.jar -hk2-utils/2.6.1//hk2-utils-2.6.1.jar -htrace-core/3.1.0-incubating//htrace-core-3.1.0-incubating.jar -httpclient/4.5.6//httpclient-4.5.6.jar -httpcore/4.4.12//httpcore-4.4.12.jar -istack-commons-runtime/3.0.8//istack-commons-runtime-3.0.8.jar -ivy/2.4.0//ivy-2.4.0.jar -jackson-annotations/2.10.0//jackson-annotations-2.10.0.jar -jackson-core-asl/1.9.13//jackson-core-asl-1.9.13.jar -jackson-core/2.10.0//jackson-core-2.10.0.jar -jackson-databind/2.10.0//jackson-databind-2.10.0.jar -jackson-dataformat-yaml/2.10.0//jackson-dataformat-yaml-2.10.0.jar -jackson-datatype-jsr310/2.10.3//jackson-datatype-jsr310-2.10.3.jar -jackson-jaxrs/1.9.13//jackson-jaxrs-1.9.13.jar -jackson-mapper-asl/1.9.13//jackson-mapper-asl-1.9.13.jar -jackson-module-jaxb-annotations/2.10.0//jackson-module-jaxb-annotations-2.10.0.jar -jackson-module-paranamer/2.10.0//jackson-module-paranamer-2.10.0.jar -jackson-module-scala_2.12/2.10.0//jackson-module-scala_2.12-2.10.0.jar -jackson-xc/1.9.13//jackson-xc-1.9.13.jar -jakarta.activation-api/1.2.1//jakarta.activation-api-1.2.1.jar -jakarta.annotation-api/1.3.5//jakarta.annotation-api-1.3.5.jar -jakarta.inject/2.6.1//jakarta.inject-2.6.1.jar -jakarta.validation-api/2.0.2//jakarta.validation-api-2.0.2.jar -jakarta.ws.rs-api/2.1.6//jakarta.ws.rs-api-2.1.6.jar -jakarta.xml.bind-api/2.3.2//jakarta.xml.bind-api-2.3.2.jar -janino/3.0.16//janino-3.0.16.jar -javassist/3.25.0-GA//javassist-3.25.0-GA.jar -javax.inject/1//javax.inject-1.jar -javax.servlet-api/3.1.0//javax.servlet-api-3.1.0.jar -javolution/5.5.1//javolution-5.5.1.jar -jaxb-api/2.2.2//jaxb-api-2.2.2.jar -jaxb-runtime/2.3.2//jaxb-runtime-2.3.2.jar -jcl-over-slf4j/1.7.30//jcl-over-slf4j-1.7.30.jar -jdo-api/3.0.1//jdo-api-3.0.1.jar -jersey-client/2.30//jersey-client-2.30.jar -jersey-common/2.30//jersey-common-2.30.jar -jersey-container-servlet-core/2.30//jersey-container-servlet-core-2.30.jar -jersey-container-servlet/2.30//jersey-container-servlet-2.30.jar -jersey-hk2/2.30//jersey-hk2-2.30.jar -jersey-media-jaxb/2.30//jersey-media-jaxb-2.30.jar -jersey-server/2.30//jersey-server-2.30.jar -jetty-sslengine/6.1.26//jetty-sslengine-6.1.26.jar -jetty-util/6.1.26//jetty-util-6.1.26.jar -jetty/6.1.26//jetty-6.1.26.jar -jline/2.14.6//jline-2.14.6.jar -joda-time/2.10.5//joda-time-2.10.5.jar -jodd-core/3.5.2//jodd-core-3.5.2.jar -jpam/1.1//jpam-1.1.jar -json4s-ast_2.12/3.7.0-M5//json4s-ast_2.12-3.7.0-M5.jar -json4s-core_2.12/3.7.0-M5//json4s-core_2.12-3.7.0-M5.jar -json4s-jackson_2.12/3.7.0-M5//json4s-jackson_2.12-3.7.0-M5.jar -json4s-scalap_2.12/3.7.0-M5//json4s-scalap_2.12-3.7.0-M5.jar -jsp-api/2.1//jsp-api-2.1.jar -jsr305/3.0.0//jsr305-3.0.0.jar -jta/1.1//jta-1.1.jar -jul-to-slf4j/1.7.30//jul-to-slf4j-1.7.30.jar -kryo-shaded/4.0.2//kryo-shaded-4.0.2.jar -kubernetes-client/4.9.2//kubernetes-client-4.9.2.jar -kubernetes-model-common/4.9.2//kubernetes-model-common-4.9.2.jar -kubernetes-model/4.9.2//kubernetes-model-4.9.2.jar -leveldbjni-all/1.8//leveldbjni-all-1.8.jar -libfb303/0.9.3//libfb303-0.9.3.jar -libthrift/0.12.0//libthrift-0.12.0.jar -log4j/1.2.17//log4j-1.2.17.jar -logging-interceptor/3.12.6//logging-interceptor-3.12.6.jar -lz4-java/1.7.1//lz4-java-1.7.1.jar -machinist_2.12/0.6.8//machinist_2.12-0.6.8.jar -macro-compat_2.12/1.1.1//macro-compat_2.12-1.1.1.jar -mesos/1.4.0/shaded-protobuf/mesos-1.4.0-shaded-protobuf.jar -metrics-core/4.1.1//metrics-core-4.1.1.jar -metrics-graphite/4.1.1//metrics-graphite-4.1.1.jar -metrics-jmx/4.1.1//metrics-jmx-4.1.1.jar -metrics-json/4.1.1//metrics-json-4.1.1.jar -metrics-jvm/4.1.1//metrics-jvm-4.1.1.jar -minlog/1.3.0//minlog-1.3.0.jar -netty-all/4.1.51.Final//netty-all-4.1.51.Final.jar -objenesis/2.6//objenesis-2.6.jar -okhttp/3.12.6//okhttp-3.12.6.jar -okio/1.14.0//okio-1.14.0.jar -opencsv/2.3//opencsv-2.3.jar -orc-core/1.5.10/nohive/orc-core-1.5.10-nohive.jar -orc-mapreduce/1.5.10/nohive/orc-mapreduce-1.5.10-nohive.jar -orc-shims/1.5.10//orc-shims-1.5.10.jar -oro/2.0.8//oro-2.0.8.jar -osgi-resource-locator/1.0.3//osgi-resource-locator-1.0.3.jar -paranamer/2.8//paranamer-2.8.jar -parquet-column/1.10.1//parquet-column-1.10.1.jar -parquet-common/1.10.1//parquet-common-1.10.1.jar -parquet-encoding/1.10.1//parquet-encoding-1.10.1.jar -parquet-format/2.4.0//parquet-format-2.4.0.jar -parquet-hadoop-bundle/1.6.0//parquet-hadoop-bundle-1.6.0.jar -parquet-hadoop/1.10.1//parquet-hadoop-1.10.1.jar -parquet-jackson/1.10.1//parquet-jackson-1.10.1.jar -protobuf-java/2.5.0//protobuf-java-2.5.0.jar -py4j/0.10.9//py4j-0.10.9.jar -pyrolite/4.30//pyrolite-4.30.jar -scala-collection-compat_2.12/2.1.1//scala-collection-compat_2.12-2.1.1.jar -scala-compiler/2.12.10//scala-compiler-2.12.10.jar -scala-library/2.12.10//scala-library-2.12.10.jar -scala-parser-combinators_2.12/1.1.2//scala-parser-combinators_2.12-1.1.2.jar -scala-reflect/2.12.10//scala-reflect-2.12.10.jar -scala-xml_2.12/1.2.0//scala-xml_2.12-1.2.0.jar -shapeless_2.12/2.3.3//shapeless_2.12-2.3.3.jar -shims/0.9.0//shims-0.9.0.jar -slf4j-api/1.7.30//slf4j-api-1.7.30.jar -slf4j-log4j12/1.7.30//slf4j-log4j12-1.7.30.jar -snakeyaml/1.24//snakeyaml-1.24.jar -snappy-java/1.1.7.5//snappy-java-1.1.7.5.jar -snappy/0.2//snappy-0.2.jar -spire-macros_2.12/0.17.0-M1//spire-macros_2.12-0.17.0-M1.jar -spire-platform_2.12/0.17.0-M1//spire-platform_2.12-0.17.0-M1.jar -spire-util_2.12/0.17.0-M1//spire-util_2.12-0.17.0-M1.jar -spire_2.12/0.17.0-M1//spire_2.12-0.17.0-M1.jar -stax-api/1.0-2//stax-api-1.0-2.jar -stax-api/1.0.1//stax-api-1.0.1.jar -stream/2.9.6//stream-2.9.6.jar -stringtemplate/3.2.1//stringtemplate-3.2.1.jar -super-csv/2.2.0//super-csv-2.2.0.jar -threeten-extra/1.5.0//threeten-extra-1.5.0.jar -univocity-parsers/2.9.0//univocity-parsers-2.9.0.jar -xbean-asm7-shaded/4.15//xbean-asm7-shaded-4.15.jar -xercesImpl/2.12.0//xercesImpl-2.12.0.jar -xml-apis/1.4.01//xml-apis-1.4.01.jar -xmlenc/0.52//xmlenc-0.52.jar -xz/1.5//xz-1.5.jar -zjsonpatch/0.3.0//zjsonpatch-0.3.0.jar -zookeeper/3.4.14//zookeeper-3.4.14.jar -zstd-jni/1.4.5-4//zstd-jni-1.4.5-4.jar diff --git a/dev/deps/spark-deps-hadoop-2.7-hive-2.3 b/dev/deps/spark-deps-hadoop-2.7-hive-2.3 index 9e167256236c0..8d8ef2e972098 100644 --- a/dev/deps/spark-deps-hadoop-2.7-hive-2.3 +++ b/dev/deps/spark-deps-hadoop-2.7-hive-2.3 @@ -4,10 +4,11 @@ JTransforms/3.1//JTransforms-3.1.jar RoaringBitmap/0.9.0//RoaringBitmap-0.9.0.jar ST4/4.0.4//ST4-4.0.4.jar activation/1.1.1//activation-1.1.1.jar -aircompressor/0.10//aircompressor-0.10.jar +aircompressor/0.16//aircompressor-0.16.jar algebra_2.12/2.0.0-M2//algebra_2.12-2.0.0-M2.jar +annotations/17.0.0//annotations-17.0.0.jar antlr-runtime/3.5.2//antlr-runtime-3.5.2.jar -antlr4-runtime/4.7.1//antlr4-runtime-4.7.1.jar +antlr4-runtime/4.8-1//antlr4-runtime-4.8-1.jar aopalliance-repackaged/2.6.1//aopalliance-repackaged-2.6.1.jar aopalliance/1.0//aopalliance-1.0.jar apacheds-i18n/2.0.0-M15//apacheds-i18n-2.0.0-M15.jar @@ -15,10 +16,10 @@ apacheds-kerberos-codec/2.0.0-M15//apacheds-kerberos-codec-2.0.0-M15.jar api-asn1-api/1.0.0-M20//api-asn1-api-1.0.0-M20.jar api-util/1.0.0-M20//api-util-1.0.0-M20.jar arpack_combined_all/0.1//arpack_combined_all-0.1.jar -arrow-format/1.0.1//arrow-format-1.0.1.jar -arrow-memory-core/1.0.1//arrow-memory-core-1.0.1.jar -arrow-memory-netty/1.0.1//arrow-memory-netty-1.0.1.jar -arrow-vector/1.0.1//arrow-vector-1.0.1.jar +arrow-format/2.0.0//arrow-format-2.0.0.jar +arrow-memory-core/2.0.0//arrow-memory-core-2.0.0.jar +arrow-memory-netty/2.0.0//arrow-memory-netty-2.0.0.jar +arrow-vector/2.0.0//arrow-vector-2.0.0.jar audience-annotations/0.5.0//audience-annotations-0.5.0.jar automaton/1.11-8//automaton-1.11-8.jar avro-ipc/1.8.2//avro-ipc-1.8.2.jar @@ -32,18 +33,18 @@ chill-java/0.9.5//chill-java-0.9.5.jar chill_2.12/0.9.5//chill_2.12-0.9.5.jar commons-beanutils/1.9.4//commons-beanutils-1.9.4.jar commons-cli/1.2//commons-cli-1.2.jar -commons-codec/1.10//commons-codec-1.10.jar +commons-codec/1.15//commons-codec-1.15.jar commons-collections/3.2.2//commons-collections-3.2.2.jar commons-compiler/3.0.16//commons-compiler-3.0.16.jar -commons-compress/1.8.1//commons-compress-1.8.1.jar +commons-compress/1.20//commons-compress-1.20.jar commons-configuration/1.6//commons-configuration-1.6.jar -commons-crypto/1.0.0//commons-crypto-1.0.0.jar +commons-crypto/1.1.0//commons-crypto-1.1.0.jar commons-dbcp/1.4//commons-dbcp-1.4.jar commons-digester/1.8//commons-digester-1.8.jar commons-httpclient/3.1//commons-httpclient-3.1.jar commons-io/2.4//commons-io-2.4.jar commons-lang/2.6//commons-lang-2.6.jar -commons-lang3/3.10//commons-lang3-3.10.jar +commons-lang3/3.11//commons-lang3-3.11.jar commons-logging/1.1.3//commons-logging-1.1.3.jar commons-math3/3.4.1//commons-math3-3.4.1.jar commons-net/3.1//commons-net-3.1.jar @@ -57,7 +58,7 @@ curator-recipes/2.7.1//curator-recipes-2.7.1.jar datanucleus-api-jdo/4.2.4//datanucleus-api-jdo-4.2.4.jar datanucleus-core/4.1.17//datanucleus-core-4.1.17.jar datanucleus-rdbms/4.1.19//datanucleus-rdbms-4.1.19.jar -derby/10.12.1.1//derby-10.12.1.1.jar +derby/10.14.2.0//derby-10.14.2.0.jar dropwizard-metrics-hadoop-metrics2-reporter/0.1.2//dropwizard-metrics-hadoop-metrics2-reporter-0.1.2.jar flatbuffers-java/1.9.0//flatbuffers-java-1.9.0.jar generex/1.0.2//generex-1.0.2.jar @@ -88,35 +89,37 @@ hive-jdbc/2.3.7//hive-jdbc-2.3.7.jar hive-llap-common/2.3.7//hive-llap-common-2.3.7.jar hive-metastore/2.3.7//hive-metastore-2.3.7.jar hive-serde/2.3.7//hive-serde-2.3.7.jar +hive-service-rpc/3.1.2//hive-service-rpc-3.1.2.jar hive-shims-0.23/2.3.7//hive-shims-0.23-2.3.7.jar hive-shims-common/2.3.7//hive-shims-common-2.3.7.jar hive-shims-scheduler/2.3.7//hive-shims-scheduler-2.3.7.jar hive-shims/2.3.7//hive-shims-2.3.7.jar -hive-storage-api/2.7.1//hive-storage-api-2.7.1.jar +hive-storage-api/2.7.2//hive-storage-api-2.7.2.jar hive-vector-code-gen/2.3.7//hive-vector-code-gen-2.3.7.jar hk2-api/2.6.1//hk2-api-2.6.1.jar hk2-locator/2.6.1//hk2-locator-2.6.1.jar hk2-utils/2.6.1//hk2-utils-2.6.1.jar htrace-core/3.1.0-incubating//htrace-core-3.1.0-incubating.jar -httpclient/4.5.6//httpclient-4.5.6.jar +httpclient/4.5.13//httpclient-4.5.13.jar httpcore/4.4.12//httpcore-4.4.12.jar istack-commons-runtime/3.0.8//istack-commons-runtime-3.0.8.jar ivy/2.4.0//ivy-2.4.0.jar -jackson-annotations/2.10.0//jackson-annotations-2.10.0.jar +jackson-annotations/2.11.4//jackson-annotations-2.11.4.jar jackson-core-asl/1.9.13//jackson-core-asl-1.9.13.jar -jackson-core/2.10.0//jackson-core-2.10.0.jar -jackson-databind/2.10.0//jackson-databind-2.10.0.jar -jackson-dataformat-yaml/2.10.0//jackson-dataformat-yaml-2.10.0.jar -jackson-datatype-jsr310/2.10.3//jackson-datatype-jsr310-2.10.3.jar +jackson-core/2.11.4//jackson-core-2.11.4.jar +jackson-databind/2.11.4//jackson-databind-2.11.4.jar +jackson-dataformat-yaml/2.11.4//jackson-dataformat-yaml-2.11.4.jar +jackson-datatype-jsr310/2.11.2//jackson-datatype-jsr310-2.11.2.jar jackson-jaxrs/1.9.13//jackson-jaxrs-1.9.13.jar jackson-mapper-asl/1.9.13//jackson-mapper-asl-1.9.13.jar -jackson-module-jaxb-annotations/2.10.0//jackson-module-jaxb-annotations-2.10.0.jar -jackson-module-paranamer/2.10.0//jackson-module-paranamer-2.10.0.jar -jackson-module-scala_2.12/2.10.0//jackson-module-scala_2.12-2.10.0.jar +jackson-module-jaxb-annotations/2.11.4//jackson-module-jaxb-annotations-2.11.4.jar +jackson-module-paranamer/2.11.4//jackson-module-paranamer-2.11.4.jar +jackson-module-scala_2.12/2.11.4//jackson-module-scala_2.12-2.11.4.jar jackson-xc/1.9.13//jackson-xc-1.9.13.jar jakarta.activation-api/1.2.1//jakarta.activation-api-1.2.1.jar jakarta.annotation-api/1.3.5//jakarta.annotation-api-1.3.5.jar jakarta.inject/2.6.1//jakarta.inject-2.6.1.jar +jakarta.servlet-api/4.0.3//jakarta.servlet-api-4.0.3.jar jakarta.validation-api/2.0.2//jakarta.validation-api-2.0.2.jar jakarta.ws.rs-api/2.1.6//jakarta.ws.rs-api-2.1.6.jar jakarta.xml.bind-api/2.3.2//jakarta.xml.bind-api-2.3.2.jar @@ -124,7 +127,6 @@ janino/3.0.16//janino-3.0.16.jar javassist/3.25.0-GA//javassist-3.25.0-GA.jar javax.inject/1//javax.inject-1.jar javax.jdo/3.2.0-m3//javax.jdo-3.2.0-m3.jar -javax.servlet-api/3.1.0//javax.servlet-api-3.1.0.jar javolution/5.5.1//javolution-5.5.1.jar jaxb-api/2.2.2//jaxb-api-2.2.2.jar jaxb-runtime/2.3.2//jaxb-runtime-2.3.2.jar @@ -154,14 +156,31 @@ jsr305/3.0.0//jsr305-3.0.0.jar jta/1.1//jta-1.1.jar jul-to-slf4j/1.7.30//jul-to-slf4j-1.7.30.jar kryo-shaded/4.0.2//kryo-shaded-4.0.2.jar -kubernetes-client/4.9.2//kubernetes-client-4.9.2.jar -kubernetes-model-common/4.9.2//kubernetes-model-common-4.9.2.jar -kubernetes-model/4.9.2//kubernetes-model-4.9.2.jar +kubernetes-client/4.12.0//kubernetes-client-4.12.0.jar +kubernetes-model-admissionregistration/4.12.0//kubernetes-model-admissionregistration-4.12.0.jar +kubernetes-model-apiextensions/4.12.0//kubernetes-model-apiextensions-4.12.0.jar +kubernetes-model-apps/4.12.0//kubernetes-model-apps-4.12.0.jar +kubernetes-model-autoscaling/4.12.0//kubernetes-model-autoscaling-4.12.0.jar +kubernetes-model-batch/4.12.0//kubernetes-model-batch-4.12.0.jar +kubernetes-model-certificates/4.12.0//kubernetes-model-certificates-4.12.0.jar +kubernetes-model-common/4.12.0//kubernetes-model-common-4.12.0.jar +kubernetes-model-coordination/4.12.0//kubernetes-model-coordination-4.12.0.jar +kubernetes-model-core/4.12.0//kubernetes-model-core-4.12.0.jar +kubernetes-model-discovery/4.12.0//kubernetes-model-discovery-4.12.0.jar +kubernetes-model-events/4.12.0//kubernetes-model-events-4.12.0.jar +kubernetes-model-extensions/4.12.0//kubernetes-model-extensions-4.12.0.jar +kubernetes-model-metrics/4.12.0//kubernetes-model-metrics-4.12.0.jar +kubernetes-model-networking/4.12.0//kubernetes-model-networking-4.12.0.jar +kubernetes-model-policy/4.12.0//kubernetes-model-policy-4.12.0.jar +kubernetes-model-rbac/4.12.0//kubernetes-model-rbac-4.12.0.jar +kubernetes-model-scheduling/4.12.0//kubernetes-model-scheduling-4.12.0.jar +kubernetes-model-settings/4.12.0//kubernetes-model-settings-4.12.0.jar +kubernetes-model-storageclass/4.12.0//kubernetes-model-storageclass-4.12.0.jar leveldbjni-all/1.8//leveldbjni-all-1.8.jar libfb303/0.9.3//libfb303-0.9.3.jar libthrift/0.12.0//libthrift-0.12.0.jar log4j/1.2.17//log4j-1.2.17.jar -logging-interceptor/3.12.6//logging-interceptor-3.12.6.jar +logging-interceptor/3.12.12//logging-interceptor-3.12.12.jar lz4-java/1.7.1//lz4-java-1.7.1.jar machinist_2.12/0.6.8//machinist_2.12-0.6.8.jar macro-compat_2.12/1.1.1//macro-compat_2.12-1.1.1.jar @@ -174,12 +193,12 @@ metrics-jvm/4.1.1//metrics-jvm-4.1.1.jar minlog/1.3.0//minlog-1.3.0.jar netty-all/4.1.51.Final//netty-all-4.1.51.Final.jar objenesis/2.6//objenesis-2.6.jar -okhttp/3.12.6//okhttp-3.12.6.jar +okhttp/3.12.12//okhttp-3.12.12.jar okio/1.14.0//okio-1.14.0.jar opencsv/2.3//opencsv-2.3.jar -orc-core/1.5.10//orc-core-1.5.10.jar -orc-mapreduce/1.5.10//orc-mapreduce-1.5.10.jar -orc-shims/1.5.10//orc-shims-1.5.10.jar +orc-core/1.6.6//orc-core-1.6.6.jar +orc-mapreduce/1.6.6//orc-mapreduce-1.6.6.jar +orc-shims/1.6.6//orc-shims-1.6.6.jar oro/2.0.8//oro-2.0.8.jar osgi-resource-locator/1.0.3//osgi-resource-locator-1.0.3.jar paranamer/2.8//paranamer-2.8.jar @@ -190,7 +209,7 @@ parquet-format/2.4.0//parquet-format-2.4.0.jar parquet-hadoop/1.10.1//parquet-hadoop-1.10.1.jar parquet-jackson/1.10.1//parquet-jackson-1.10.1.jar protobuf-java/2.5.0//protobuf-java-2.5.0.jar -py4j/0.10.9//py4j-0.10.9.jar +py4j/0.10.9.1//py4j-0.10.9.1.jar pyrolite/4.30//pyrolite-4.30.jar scala-collection-compat_2.12/2.1.1//scala-collection-compat_2.12-2.1.1.jar scala-compiler/2.12.10//scala-compiler-2.12.10.jar @@ -202,8 +221,8 @@ shapeless_2.12/2.3.3//shapeless_2.12-2.3.3.jar shims/0.9.0//shims-0.9.0.jar slf4j-api/1.7.30//slf4j-api-1.7.30.jar slf4j-log4j12/1.7.30//slf4j-log4j12-1.7.30.jar -snakeyaml/1.24//snakeyaml-1.24.jar -snappy-java/1.1.7.5//snappy-java-1.1.7.5.jar +snakeyaml/1.26//snakeyaml-1.26.jar +snappy-java/1.1.8.2//snappy-java-1.1.8.2.jar spire-macros_2.12/0.17.0-M1//spire-macros_2.12-0.17.0-M1.jar spire-platform_2.12/0.17.0-M1//spire-platform_2.12-0.17.0-M1.jar spire-util_2.12/0.17.0-M1//spire-util_2.12-0.17.0-M1.jar @@ -223,4 +242,4 @@ xmlenc/0.52//xmlenc-0.52.jar xz/1.5//xz-1.5.jar zjsonpatch/0.3.0//zjsonpatch-0.3.0.jar zookeeper/3.4.14//zookeeper-3.4.14.jar -zstd-jni/1.4.5-4//zstd-jni-1.4.5-4.jar +zstd-jni/1.4.8-1//zstd-jni-1.4.8-1.jar diff --git a/dev/deps/spark-deps-hadoop-3.2-hive-2.3 b/dev/deps/spark-deps-hadoop-3.2-hive-2.3 index b44b461014cd7..bf56fc18c0446 100644 --- a/dev/deps/spark-deps-hadoop-3.2-hive-2.3 +++ b/dev/deps/spark-deps-hadoop-3.2-hive-2.3 @@ -5,17 +5,18 @@ RoaringBitmap/0.9.0//RoaringBitmap-0.9.0.jar ST4/4.0.4//ST4-4.0.4.jar accessors-smart/1.2//accessors-smart-1.2.jar activation/1.1.1//activation-1.1.1.jar -aircompressor/0.10//aircompressor-0.10.jar +aircompressor/0.16//aircompressor-0.16.jar algebra_2.12/2.0.0-M2//algebra_2.12-2.0.0-M2.jar +annotations/17.0.0//annotations-17.0.0.jar antlr-runtime/3.5.2//antlr-runtime-3.5.2.jar -antlr4-runtime/4.7.1//antlr4-runtime-4.7.1.jar +antlr4-runtime/4.8-1//antlr4-runtime-4.8-1.jar aopalliance-repackaged/2.6.1//aopalliance-repackaged-2.6.1.jar aopalliance/1.0//aopalliance-1.0.jar arpack_combined_all/0.1//arpack_combined_all-0.1.jar -arrow-format/1.0.1//arrow-format-1.0.1.jar -arrow-memory-core/1.0.1//arrow-memory-core-1.0.1.jar -arrow-memory-netty/1.0.1//arrow-memory-netty-1.0.1.jar -arrow-vector/1.0.1//arrow-vector-1.0.1.jar +arrow-format/2.0.0//arrow-format-2.0.0.jar +arrow-memory-core/2.0.0//arrow-memory-core-2.0.0.jar +arrow-memory-netty/2.0.0//arrow-memory-netty-2.0.0.jar +arrow-vector/2.0.0//arrow-vector-2.0.0.jar audience-annotations/0.5.0//audience-annotations-0.5.0.jar automaton/1.11-8//automaton-1.11-8.jar avro-ipc/1.8.2//avro-ipc-1.8.2.jar @@ -29,18 +30,18 @@ chill-java/0.9.5//chill-java-0.9.5.jar chill_2.12/0.9.5//chill_2.12-0.9.5.jar commons-beanutils/1.9.4//commons-beanutils-1.9.4.jar commons-cli/1.2//commons-cli-1.2.jar -commons-codec/1.10//commons-codec-1.10.jar +commons-codec/1.15//commons-codec-1.15.jar commons-collections/3.2.2//commons-collections-3.2.2.jar commons-compiler/3.0.16//commons-compiler-3.0.16.jar -commons-compress/1.8.1//commons-compress-1.8.1.jar +commons-compress/1.20//commons-compress-1.20.jar commons-configuration2/2.1.1//commons-configuration2-2.1.1.jar -commons-crypto/1.0.0//commons-crypto-1.0.0.jar +commons-crypto/1.1.0//commons-crypto-1.1.0.jar commons-daemon/1.0.13//commons-daemon-1.0.13.jar commons-dbcp/1.4//commons-dbcp-1.4.jar commons-httpclient/3.1//commons-httpclient-3.1.jar commons-io/2.5//commons-io-2.5.jar commons-lang/2.6//commons-lang-2.6.jar -commons-lang3/3.10//commons-lang3-3.10.jar +commons-lang3/3.11//commons-lang3-3.11.jar commons-logging/1.1.3//commons-logging-1.1.3.jar commons-math3/3.4.1//commons-math3-3.4.1.jar commons-net/3.1//commons-net-3.1.jar @@ -54,7 +55,7 @@ curator-recipes/2.13.0//curator-recipes-2.13.0.jar datanucleus-api-jdo/4.2.4//datanucleus-api-jdo-4.2.4.jar datanucleus-core/4.1.17//datanucleus-core-4.1.17.jar datanucleus-rdbms/4.1.19//datanucleus-rdbms-4.1.19.jar -derby/10.12.1.1//derby-10.12.1.1.jar +derby/10.14.2.0//derby-10.14.2.0.jar dnsjava/2.1.7//dnsjava-2.1.7.jar dropwizard-metrics-hadoop-metrics2-reporter/0.1.2//dropwizard-metrics-hadoop-metrics2-reporter-0.1.2.jar ehcache/3.3.1//ehcache-3.3.1.jar @@ -87,35 +88,37 @@ hive-jdbc/2.3.7//hive-jdbc-2.3.7.jar hive-llap-common/2.3.7//hive-llap-common-2.3.7.jar hive-metastore/2.3.7//hive-metastore-2.3.7.jar hive-serde/2.3.7//hive-serde-2.3.7.jar +hive-service-rpc/3.1.2//hive-service-rpc-3.1.2.jar hive-shims-0.23/2.3.7//hive-shims-0.23-2.3.7.jar hive-shims-common/2.3.7//hive-shims-common-2.3.7.jar hive-shims-scheduler/2.3.7//hive-shims-scheduler-2.3.7.jar hive-shims/2.3.7//hive-shims-2.3.7.jar -hive-storage-api/2.7.1//hive-storage-api-2.7.1.jar +hive-storage-api/2.7.2//hive-storage-api-2.7.2.jar hive-vector-code-gen/2.3.7//hive-vector-code-gen-2.3.7.jar hk2-api/2.6.1//hk2-api-2.6.1.jar hk2-locator/2.6.1//hk2-locator-2.6.1.jar hk2-utils/2.6.1//hk2-utils-2.6.1.jar htrace-core4/4.1.0-incubating//htrace-core4-4.1.0-incubating.jar -httpclient/4.5.6//httpclient-4.5.6.jar +httpclient/4.5.13//httpclient-4.5.13.jar httpcore/4.4.12//httpcore-4.4.12.jar istack-commons-runtime/3.0.8//istack-commons-runtime-3.0.8.jar ivy/2.4.0//ivy-2.4.0.jar -jackson-annotations/2.10.0//jackson-annotations-2.10.0.jar +jackson-annotations/2.11.4//jackson-annotations-2.11.4.jar jackson-core-asl/1.9.13//jackson-core-asl-1.9.13.jar -jackson-core/2.10.0//jackson-core-2.10.0.jar -jackson-databind/2.10.0//jackson-databind-2.10.0.jar -jackson-dataformat-yaml/2.10.0//jackson-dataformat-yaml-2.10.0.jar -jackson-datatype-jsr310/2.10.3//jackson-datatype-jsr310-2.10.3.jar +jackson-core/2.11.4//jackson-core-2.11.4.jar +jackson-databind/2.11.4//jackson-databind-2.11.4.jar +jackson-dataformat-yaml/2.11.4//jackson-dataformat-yaml-2.11.4.jar +jackson-datatype-jsr310/2.11.2//jackson-datatype-jsr310-2.11.2.jar jackson-jaxrs-base/2.9.5//jackson-jaxrs-base-2.9.5.jar jackson-jaxrs-json-provider/2.9.5//jackson-jaxrs-json-provider-2.9.5.jar jackson-mapper-asl/1.9.13//jackson-mapper-asl-1.9.13.jar -jackson-module-jaxb-annotations/2.10.0//jackson-module-jaxb-annotations-2.10.0.jar -jackson-module-paranamer/2.10.0//jackson-module-paranamer-2.10.0.jar -jackson-module-scala_2.12/2.10.0//jackson-module-scala_2.12-2.10.0.jar +jackson-module-jaxb-annotations/2.11.4//jackson-module-jaxb-annotations-2.11.4.jar +jackson-module-paranamer/2.11.4//jackson-module-paranamer-2.11.4.jar +jackson-module-scala_2.12/2.11.4//jackson-module-scala_2.12-2.11.4.jar jakarta.activation-api/1.2.1//jakarta.activation-api-1.2.1.jar jakarta.annotation-api/1.3.5//jakarta.annotation-api-1.3.5.jar jakarta.inject/2.6.1//jakarta.inject-2.6.1.jar +jakarta.servlet-api/4.0.3//jakarta.servlet-api-4.0.3.jar jakarta.validation-api/2.0.2//jakarta.validation-api-2.0.2.jar jakarta.ws.rs-api/2.1.6//jakarta.ws.rs-api-2.1.6.jar jakarta.xml.bind-api/2.3.2//jakarta.xml.bind-api-2.3.2.jar @@ -166,14 +169,31 @@ kerby-pkix/1.0.1//kerby-pkix-1.0.1.jar kerby-util/1.0.1//kerby-util-1.0.1.jar kerby-xdr/1.0.1//kerby-xdr-1.0.1.jar kryo-shaded/4.0.2//kryo-shaded-4.0.2.jar -kubernetes-client/4.9.2//kubernetes-client-4.9.2.jar -kubernetes-model-common/4.9.2//kubernetes-model-common-4.9.2.jar -kubernetes-model/4.9.2//kubernetes-model-4.9.2.jar +kubernetes-client/4.12.0//kubernetes-client-4.12.0.jar +kubernetes-model-admissionregistration/4.12.0//kubernetes-model-admissionregistration-4.12.0.jar +kubernetes-model-apiextensions/4.12.0//kubernetes-model-apiextensions-4.12.0.jar +kubernetes-model-apps/4.12.0//kubernetes-model-apps-4.12.0.jar +kubernetes-model-autoscaling/4.12.0//kubernetes-model-autoscaling-4.12.0.jar +kubernetes-model-batch/4.12.0//kubernetes-model-batch-4.12.0.jar +kubernetes-model-certificates/4.12.0//kubernetes-model-certificates-4.12.0.jar +kubernetes-model-common/4.12.0//kubernetes-model-common-4.12.0.jar +kubernetes-model-coordination/4.12.0//kubernetes-model-coordination-4.12.0.jar +kubernetes-model-core/4.12.0//kubernetes-model-core-4.12.0.jar +kubernetes-model-discovery/4.12.0//kubernetes-model-discovery-4.12.0.jar +kubernetes-model-events/4.12.0//kubernetes-model-events-4.12.0.jar +kubernetes-model-extensions/4.12.0//kubernetes-model-extensions-4.12.0.jar +kubernetes-model-metrics/4.12.0//kubernetes-model-metrics-4.12.0.jar +kubernetes-model-networking/4.12.0//kubernetes-model-networking-4.12.0.jar +kubernetes-model-policy/4.12.0//kubernetes-model-policy-4.12.0.jar +kubernetes-model-rbac/4.12.0//kubernetes-model-rbac-4.12.0.jar +kubernetes-model-scheduling/4.12.0//kubernetes-model-scheduling-4.12.0.jar +kubernetes-model-settings/4.12.0//kubernetes-model-settings-4.12.0.jar +kubernetes-model-storageclass/4.12.0//kubernetes-model-storageclass-4.12.0.jar leveldbjni-all/1.8//leveldbjni-all-1.8.jar libfb303/0.9.3//libfb303-0.9.3.jar libthrift/0.12.0//libthrift-0.12.0.jar log4j/1.2.17//log4j-1.2.17.jar -logging-interceptor/3.12.6//logging-interceptor-3.12.6.jar +logging-interceptor/3.12.12//logging-interceptor-3.12.12.jar lz4-java/1.7.1//lz4-java-1.7.1.jar machinist_2.12/0.6.8//machinist_2.12-0.6.8.jar macro-compat_2.12/1.1.1//macro-compat_2.12-1.1.1.jar @@ -188,12 +208,12 @@ netty-all/4.1.51.Final//netty-all-4.1.51.Final.jar nimbus-jose-jwt/4.41.1//nimbus-jose-jwt-4.41.1.jar objenesis/2.6//objenesis-2.6.jar okhttp/2.7.5//okhttp-2.7.5.jar -okhttp/3.12.6//okhttp-3.12.6.jar +okhttp/3.12.12//okhttp-3.12.12.jar okio/1.14.0//okio-1.14.0.jar opencsv/2.3//opencsv-2.3.jar -orc-core/1.5.10//orc-core-1.5.10.jar -orc-mapreduce/1.5.10//orc-mapreduce-1.5.10.jar -orc-shims/1.5.10//orc-shims-1.5.10.jar +orc-core/1.6.6//orc-core-1.6.6.jar +orc-mapreduce/1.6.6//orc-mapreduce-1.6.6.jar +orc-shims/1.6.6//orc-shims-1.6.6.jar oro/2.0.8//oro-2.0.8.jar osgi-resource-locator/1.0.3//osgi-resource-locator-1.0.3.jar paranamer/2.8//paranamer-2.8.jar @@ -204,7 +224,7 @@ parquet-format/2.4.0//parquet-format-2.4.0.jar parquet-hadoop/1.10.1//parquet-hadoop-1.10.1.jar parquet-jackson/1.10.1//parquet-jackson-1.10.1.jar protobuf-java/2.5.0//protobuf-java-2.5.0.jar -py4j/0.10.9//py4j-0.10.9.jar +py4j/0.10.9.1//py4j-0.10.9.1.jar pyrolite/4.30//pyrolite-4.30.jar re2j/1.1//re2j-1.1.jar scala-collection-compat_2.12/2.1.1//scala-collection-compat_2.12-2.1.1.jar @@ -217,8 +237,8 @@ shapeless_2.12/2.3.3//shapeless_2.12-2.3.3.jar shims/0.9.0//shims-0.9.0.jar slf4j-api/1.7.30//slf4j-api-1.7.30.jar slf4j-log4j12/1.7.30//slf4j-log4j12-1.7.30.jar -snakeyaml/1.24//snakeyaml-1.24.jar -snappy-java/1.1.7.5//snappy-java-1.1.7.5.jar +snakeyaml/1.26//snakeyaml-1.26.jar +snappy-java/1.1.8.2//snappy-java-1.1.8.2.jar spire-macros_2.12/0.17.0-M1//spire-macros_2.12-0.17.0-M1.jar spire-platform_2.12/0.17.0-M1//spire-platform_2.12-0.17.0-M1.jar spire-util_2.12/0.17.0-M1//spire-util_2.12-0.17.0-M1.jar @@ -237,4 +257,4 @@ xbean-asm7-shaded/4.15//xbean-asm7-shaded-4.15.jar xz/1.5//xz-1.5.jar zjsonpatch/0.3.0//zjsonpatch-0.3.0.jar zookeeper/3.4.14//zookeeper-3.4.14.jar -zstd-jni/1.4.5-4//zstd-jni-1.4.5-4.jar +zstd-jni/1.4.8-1//zstd-jni-1.4.8-1.jar diff --git a/dev/github_jira_sync.py b/dev/github_jira_sync.py index 9bcebaa22ab86..27451bba905dd 100755 --- a/dev/github_jira_sync.py +++ b/dev/github_jira_sync.py @@ -16,7 +16,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# Utility for updating JIRA's with information about Github pull requests +# Utility for updating JIRA's with information about GitHub pull requests import json import os @@ -142,9 +142,9 @@ def reset_pr_labels(pr_num, jira_components): jira_prs = get_jira_prs() previous_max = get_max_pr() -print("Retrieved %s JIRA PR's from Github" % len(jira_prs)) +print("Retrieved %s JIRA PR's from GitHub" % len(jira_prs)) jira_prs = [(k, v) for k, v in jira_prs if int(v['number']) > previous_max] -print("%s PR's remain after excluding visted ones" % len(jira_prs)) +print("%s PR's remain after excluding visited ones" % len(jira_prs)) num_updates = 0 considered = [] @@ -157,7 +157,7 @@ def reset_pr_labels(pr_num, jira_components): considered = considered + [pr_num] url = pr['html_url'] - title = "[Github] Pull Request #%s (%s)" % (pr['number'], pr['user']['login']) + title = "[GitHub] Pull Request #%s (%s)" % (pr['number'], pr['user']['login']) try: page = get_json(get_url(JIRA_API_BASE + "/rest/api/2/issue/" + issue + "/remotelink")) existing_links = map(lambda l: l['object']['url'], page) @@ -174,7 +174,7 @@ def reset_pr_labels(pr_num, jira_components): destination = {"title": title, "url": url, "icon": icon} # For all possible fields see: # https://developer.atlassian.com/display/JIRADEV/Fields+in+Remote+Issue+Links - # application = {"name": "Github pull requests", "type": "org.apache.spark.jira.github"} + # application = {"name": "GitHub pull requests", "type": "org.apache.spark.jira.github"} jira_client.add_remote_link(issue, destination) comment = "User '%s' has created a pull request for this issue:" % pr['user']['login'] diff --git a/dev/lint-python b/dev/lint-python index 21949e5d8e4e7..2c244e0c0b297 100755 --- a/dev/lint-python +++ b/dev/lint-python @@ -18,7 +18,7 @@ # define test binaries + versions FLAKE8_BUILD="flake8" MINIMUM_FLAKE8="3.5.0" - +MYPY_BUILD="mypy" PYCODESTYLE_BUILD="pycodestyle" MINIMUM_PYCODESTYLE="2.6.0" @@ -122,6 +122,31 @@ function pycodestyle_test { fi } +function mypy_test { + local MYPY_REPORT= + local MYPY_STATUS= + + # TODO(SPARK-32797): Install mypy on the Jenkins CI workers + if ! hash "$MYPY_BUILD" 2> /dev/null; then + echo "The $MYPY_BUILD command was not found. Skipping for now." + return + fi + + echo "starting $MYPY_BUILD test..." + MYPY_REPORT=$( ($MYPY_BUILD --config-file python/mypy.ini python/pyspark) 2>&1) + MYPY_STATUS=$? + + if [ "$MYPY_STATUS" -ne 0 ]; then + echo "mypy checks failed:" + echo "$MYPY_REPORT" + echo "$MYPY_STATUS" + exit "$MYPY_STATUS" + else + echo "mypy checks passed." + echo + fi +} + function flake8_test { local FLAKE8_VERSION= local EXPECTED_FLAKE8= @@ -212,6 +237,14 @@ function sphinx_test { return fi + # TODO(SPARK-33242): Install numpydoc in Jenkins machines + PYTHON_HAS_NUMPYDOC=$("$PYTHON_EXECUTABLE" -c 'import importlib.util; print(importlib.util.find_spec("numpydoc") is not None)') + if [[ "$PYTHON_HAS_NUMPYDOC" == "False" ]]; then + echo "$PYTHON_EXECUTABLE does not have numpydoc installed. Skipping Sphinx build for now." + echo + return + fi + echo "starting $SPHINX_BUILD tests..." pushd python/docs &> /dev/null make clean &> /dev/null @@ -246,6 +279,7 @@ PYTHON_SOURCE="$(find . -name "*.py")" compile_python_test "$PYTHON_SOURCE" pycodestyle_test "$PYTHON_SOURCE" flake8_test +mypy_test sphinx_test echo diff --git a/dev/mima b/dev/mima index f324c5c00a45c..d214bb96e09a3 100755 --- a/dev/mima +++ b/dev/mima @@ -25,8 +25,8 @@ FWDIR="$(cd "`dirname "$0"`"/..; pwd)" cd "$FWDIR" SPARK_PROFILES=${1:-"-Pmesos -Pkubernetes -Pyarn -Pspark-ganglia-lgpl -Pkinesis-asl -Phive-thriftserver -Phive"} -TOOLS_CLASSPATH="$(build/sbt -DcopyDependencies=false "export tools/fullClasspath" | tail -n1)" -OLD_DEPS_CLASSPATH="$(build/sbt -DcopyDependencies=false $SPARK_PROFILES "export oldDeps/fullClasspath" | tail -n1)" +TOOLS_CLASSPATH="$(build/sbt -DcopyDependencies=false "export tools/fullClasspath" | grep jar | tail -n1)" +OLD_DEPS_CLASSPATH="$(build/sbt -DcopyDependencies=false $SPARK_PROFILES "export oldDeps/fullClasspath" | grep jar | tail -n1)" rm -f .generated-mima* diff --git a/dev/requirements.txt b/dev/requirements.txt index b11f24fdbd4b2..c1546c8b8d4d3 100644 --- a/dev/requirements.txt +++ b/dev/requirements.txt @@ -6,3 +6,4 @@ sphinx pydata_sphinx_theme ipython nbsphinx +numpydoc diff --git a/dev/run-tests-jenkins b/dev/run-tests-jenkins index c3adc696a5122..c155d4ea3f076 100755 --- a/dev/run-tests-jenkins +++ b/dev/run-tests-jenkins @@ -26,6 +26,7 @@ FWDIR="$( cd "$( dirname "$0" )/.." && pwd )" cd "$FWDIR" export PATH=/home/anaconda/envs/py36/bin:$PATH +export LANG="en_US.UTF-8" PYTHON_VERSION_CHECK=$(python3 -c 'import sys; print(sys.version_info < (3, 6, 0))') if [[ "$PYTHON_VERSION_CHECK" == "True" ]]; then diff --git a/dev/run-tests-jenkins.py b/dev/run-tests-jenkins.py index 4ff5b327e3325..4309a74773e89 100755 --- a/dev/run-tests-jenkins.py +++ b/dev/run-tests-jenkins.py @@ -38,7 +38,7 @@ def print_err(msg): def post_message_to_github(msg, ghprb_pull_id): - print("Attempting to post to Github...") + print("Attempting to post to GitHub...") api_url = os.getenv("GITHUB_API_BASE", "https://api.github.com/repos/apache/spark") url = api_url + "/issues/" + ghprb_pull_id + "/comments" @@ -57,12 +57,12 @@ def post_message_to_github(msg, ghprb_pull_id): if response.getcode() == 201: print(" > Post successful.") except HTTPError as http_e: - print_err("Failed to post message to Github.") + print_err("Failed to post message to GitHub.") print_err(" > http_code: %s" % http_e.code) print_err(" > api_response: %s" % http_e.read()) print_err(" > data: %s" % posted_message) except URLError as url_e: - print_err("Failed to post message to Github.") + print_err("Failed to post message to GitHub.") print_err(" > urllib_status: %s" % url_e.reason[1]) print_err(" > data: %s" % posted_message) @@ -89,7 +89,7 @@ def run_pr_checks(pr_tests, ghprb_actual_commit, sha1): """ Executes a set of pull request checks to ease development and report issues with various components such as style, linting, dependencies, compatibilities, etc. - @return a list of messages to post back to Github + @return a list of messages to post back to GitHub """ # Ensure we save off the current HEAD to revert to current_pr_head = run_cmd(['git', 'rev-parse', 'HEAD'], return_output=True).strip() @@ -109,7 +109,7 @@ def run_tests(tests_timeout): """ Runs the `dev/run-tests` script and responds with the correct error message under the various failure scenarios. - @return a tuple containing the test result code and the result note to post to Github + @return a tuple containing the test result code and the result note to post to GitHub """ test_result_code = subprocess.Popen(['timeout', @@ -175,8 +175,6 @@ def main(): if "test-hadoop3.2" in ghprb_pull_title: os.environ["AMPLAB_JENKINS_BUILD_PROFILE"] = "hadoop3.2" # Switch the Hive profile based on the PR title: - if "test-hive1.2" in ghprb_pull_title: - os.environ["AMPLAB_JENKINS_BUILD_HIVE_PROFILE"] = "hive1.2" if "test-hive2.3" in ghprb_pull_title: os.environ["AMPLAB_JENKINS_BUILD_HIVE_PROFILE"] = "hive2.3" @@ -200,16 +198,16 @@ def main(): # To write a PR test: # * the file must reside within the dev/tests directory # * be an executable bash script - # * accept three arguments on the command line, the first being the Github PR long commit - # hash, the second the Github SHA1 hash, and the final the current PR hash + # * accept three arguments on the command line, the first being the GitHub PR long commit + # hash, the second the GitHub SHA1 hash, and the final the current PR hash # * and, lastly, return string output to be included in the pr message output that will - # be posted to Github + # be posted to GitHub pr_tests = [ "pr_merge_ability", "pr_public_classes" ] - # `bind_message_base` returns a function to generate messages for Github posting + # `bind_message_base` returns a function to generate messages for GitHub posting github_message = functools.partial(pr_message, build_display_name, build_url, diff --git a/dev/run-tests.py b/dev/run-tests.py index 3e118dcbc160d..d9d1ac85d5cd9 100755 --- a/dev/run-tests.py +++ b/dev/run-tests.py @@ -42,7 +42,8 @@ def determine_modules_for_files(filenames): """ Given a list of filenames, return the set of modules that contain those files. If a file is not associated with a more specific submodule, then this method will consider that - file to belong to the 'root' module. GitHub Action and Appveyor files are ignored. + file to belong to the 'root' module. `.github` directory is counted only in GitHub Actions, + and `appveyor.yml` is always ignored because this file is dedicated only to AppVeyor builds. >>> sorted(x.name for x in determine_modules_for_files(["python/pyspark/a.py", "sql/core/foo"])) ['pyspark-core', 'sql'] @@ -55,6 +56,8 @@ def determine_modules_for_files(filenames): for filename in filenames: if filename in ("appveyor.yml",): continue + if ("GITHUB_ACTIONS" not in os.environ) and filename.startswith(".github"): + continue matched_at_least_one_module = False for module in modules.all_modules: if module.contains_file(filename): @@ -325,7 +328,6 @@ def get_hive_profiles(hive_version): """ sbt_maven_hive_profiles = { - "hive1.2": ["-Phive-1.2"], "hive2.3": ["-Phive-2.3"], } @@ -481,6 +483,12 @@ def run_python_tests(test_modules, parallelism, with_coverage=False): if test_modules != [modules.root]: command.append("--modules=%s" % ','.join(m.name for m in test_modules)) command.append("--parallelism=%i" % parallelism) + if "GITHUB_ACTIONS" in os.environ: + # See SPARK-33565. Python 3.8 was temporarily removed as its default Python executables + # to test because of Jenkins environment issue. Once Jenkins has Python 3.8 to test, + # we should remove this change back and add python3.8 into python/run-tests.py script. + command.append("--python-executable=%s" % ','.join( + x for x in ["python3.6", "python3.8", "pypy3"] if which(x))) run_cmd(command) if with_coverage: @@ -513,10 +521,13 @@ def post_python_tests_results(): # 6. Commit current HTMLs. run_cmd([ "git", + "-c", + "user.name='Apache Spark Test Account'", + "-c", + "user.email='sparktestacc@gmail.com'", "commit", "-am", - "Coverage report at latest commit in Apache Spark", - '--author="Apache Spark Test Account "']) + "Coverage report at latest commit in Apache Spark"]) # 7. Delete the old branch. run_cmd(["git", "branch", "-D", "gh-pages"]) # 8. Rename the temporary branch to master. @@ -634,9 +645,9 @@ def main(): # /home/jenkins/anaconda2/envs/py36/bin os.environ["PATH"] = "/home/anaconda/envs/py36/bin:" + os.environ.get("PATH") else: - # else we're running locally or Github Actions. + # else we're running locally or GitHub Actions. build_tool = "sbt" - hadoop_version = os.environ.get("HADOOP_PROFILE", "hadoop2.7") + hadoop_version = os.environ.get("HADOOP_PROFILE", "hadoop3.2") hive_version = os.environ.get("HIVE_PROFILE", "hive2.3") if "GITHUB_ACTIONS" in os.environ: test_env = "github_actions" @@ -652,12 +663,12 @@ def main(): included_tags = [] excluded_tags = [] if should_only_test_modules: - # If we're running the tests in Github Actions, attempt to detect and test + # If we're running the tests in GitHub Actions, attempt to detect and test # only the affected modules. if test_env == "github_actions": if os.environ["GITHUB_INPUT_BRANCH"] != "": # Dispatched request - # Note that it assumes Github Actions has already merged + # Note that it assumes GitHub Actions has already merged # the given `GITHUB_INPUT_BRANCH` branch. changed_files = identify_changed_files_from_git_commits( "HEAD", target_branch=os.environ["GITHUB_SHA"]) diff --git a/dev/sparktestsupport/modules.py b/dev/sparktestsupport/modules.py index 3c438e309c22d..87bfbdf64a49f 100644 --- a/dev/sparktestsupport/modules.py +++ b/dev/sparktestsupport/modules.py @@ -31,9 +31,10 @@ class Module(object): files have changed. """ - def __init__(self, name, dependencies, source_file_regexes, build_profile_flags=(), environ={}, - sbt_test_goals=(), python_test_goals=(), excluded_python_implementations=(), - test_tags=(), should_run_r_tests=False, should_run_build_tests=False): + def __init__(self, name, dependencies, source_file_regexes, build_profile_flags=(), + environ=None, sbt_test_goals=(), python_test_goals=(), + excluded_python_implementations=(), test_tags=(), should_run_r_tests=False, + should_run_build_tests=False): """ Define a new module. @@ -62,7 +63,7 @@ def __init__(self, name, dependencies, source_file_regexes, build_profile_flags= self.source_file_prefixes = source_file_regexes self.sbt_test_goals = sbt_test_goals self.build_profile_flags = build_profile_flags - self.environ = environ + self.environ = environ or {} self.python_test_goals = python_test_goals self.excluded_python_implementations = excluded_python_implementations self.test_tags = test_tags @@ -386,6 +387,7 @@ def __hash__(self): "pyspark.tests.test_conf", "pyspark.tests.test_context", "pyspark.tests.test_daemon", + "pyspark.tests.test_install_spark", "pyspark.tests.test_join", "pyspark.tests.test_profiler", "pyspark.tests.test_rdd", @@ -563,6 +565,7 @@ def __hash__(self): "pyspark.ml.tests.test_stat", "pyspark.ml.tests.test_training_summary", "pyspark.ml.tests.test_tuning", + "pyspark.ml.tests.test_util", "pyspark.ml.tests.test_wrapper", ], excluded_python_implementations=[ diff --git a/dev/test-dependencies.sh b/dev/test-dependencies.sh index 129b073d75254..e9e9227d239e1 100755 --- a/dev/test-dependencies.sh +++ b/dev/test-dependencies.sh @@ -32,7 +32,6 @@ export LC_ALL=C HADOOP_MODULE_PROFILES="-Phive-thriftserver -Pmesos -Pkubernetes -Pyarn -Phive" MVN="build/mvn" HADOOP_HIVE_PROFILES=( - hadoop-2.7-hive-1.2 hadoop-2.7-hive-2.3 hadoop-3.2-hive-2.3 ) @@ -71,12 +70,9 @@ for HADOOP_HIVE_PROFILE in "${HADOOP_HIVE_PROFILES[@]}"; do if [[ $HADOOP_HIVE_PROFILE == **hadoop-3.2-hive-2.3** ]]; then HADOOP_PROFILE=hadoop-3.2 HIVE_PROFILE=hive-2.3 - elif [[ $HADOOP_HIVE_PROFILE == **hadoop-2.7-hive-2.3** ]]; then - HADOOP_PROFILE=hadoop-2.7 - HIVE_PROFILE=hive-2.3 else HADOOP_PROFILE=hadoop-2.7 - HIVE_PROFILE=hive-1.2 + HIVE_PROFILE=hive-2.3 fi echo "Performing Maven install for $HADOOP_HIVE_PROFILE" $MVN $HADOOP_MODULE_PROFILES -P$HADOOP_PROFILE -P$HIVE_PROFILE jar:jar jar:test-jar install:install clean -q diff --git a/dev/tests/pr_merge_ability.sh b/dev/tests/pr_merge_ability.sh index 25fdbccac4dd8..a32667730f76c 100755 --- a/dev/tests/pr_merge_ability.sh +++ b/dev/tests/pr_merge_ability.sh @@ -22,7 +22,7 @@ # another branch and returning results to be published. More details can be # found at dev/run-tests-jenkins. # -# Arg1: The Github Pull Request Actual Commit +# Arg1: The GitHub Pull Request Actual Commit # known as `ghprbActualCommit` in `run-tests-jenkins` # Arg2: The SHA1 hash # known as `sha1` in `run-tests-jenkins` diff --git a/dev/tests/pr_public_classes.sh b/dev/tests/pr_public_classes.sh index 479d1851fe0b8..ad1ad5e736594 100755 --- a/dev/tests/pr_public_classes.sh +++ b/dev/tests/pr_public_classes.sh @@ -22,7 +22,7 @@ # another branch and returning results to be published. More details can be # found at dev/run-tests-jenkins. # -# Arg1: The Github Pull Request Actual Commit +# Arg1: The GitHub Pull Request Actual Commit # known as `ghprbActualCommit` in `run-tests-jenkins` ghprbActualCommit="$1" diff --git a/dev/tox.ini b/dev/tox.ini index c14e6b9446cca..68e875f4c54ed 100644 --- a/dev/tox.ini +++ b/dev/tox.ini @@ -16,9 +16,9 @@ [pycodestyle] ignore=E226,E241,E305,E402,E722,E731,E741,W503,W504 max-line-length=100 -exclude=python/pyspark/cloudpickle/*.py,shared.py,python/docs/source/conf.py,work/*/*.py,python/.eggs/*,dist/*,.git/* +exclude=*/target/*,python/pyspark/cloudpickle/*.py,shared.py,python/docs/source/conf.py,work/*/*.py,python/.eggs/*,dist/*,.git/* [flake8] -select = E901,E999,F821,F822,F823,F401,F405 -exclude = python/pyspark/cloudpickle/*.py,shared.py,python/docs/source/conf.py,work/*/*.py,python/.eggs/*,dist/*,.git/* +select = E901,E999,F821,F822,F823,F401,F405,B006 +exclude = */target/*,python/pyspark/cloudpickle/*.py,shared.py*,python/docs/source/conf.py,work/*/*.py,python/.eggs/*,dist/*,.git/*,python/out,python/pyspark/sql/pandas/functions.pyi,python/pyspark/sql/column.pyi,python/pyspark/worker.pyi,python/pyspark/java_gateway.pyi max-line-length = 100 diff --git a/docs/README.md b/docs/README.md index 09982c1301163..af51dca6180a9 100644 --- a/docs/README.md +++ b/docs/README.md @@ -63,7 +63,7 @@ See also https://github.com/sphinx-doc/sphinx/issues/7551. --> ```sh -$ sudo pip install 'sphinx<3.1.0' mkdocs numpy pydata_sphinx_theme ipython nbsphinx +$ sudo pip install 'sphinx<3.1.0' mkdocs numpy pydata_sphinx_theme ipython nbsphinx numpydoc ``` ## Generating the Documentation HTML diff --git a/docs/_config.yml b/docs/_config.yml index 3be9807f81082..a8d42e483d17d 100644 --- a/docs/_config.yml +++ b/docs/_config.yml @@ -19,10 +19,27 @@ include: # These allow the documentation to be updated with newer releases # of Spark, Scala, and Mesos. -SPARK_VERSION: 3.1.0-SNAPSHOT -SPARK_VERSION_SHORT: 3.1.0 +SPARK_VERSION: 3.2.0-SNAPSHOT +SPARK_VERSION_SHORT: 3.2.0 SCALA_BINARY_VERSION: "2.12" SCALA_VERSION: "2.12.10" MESOS_VERSION: 1.0.0 SPARK_ISSUE_TRACKER_URL: https://issues.apache.org/jira/browse/SPARK SPARK_GITHUB_URL: https://github.com/apache/spark +# Before a new release, we should: +# 1. update the `version` array for the new Spark documentation +# on https://github.com/algolia/docsearch-configs/blob/master/configs/apache_spark.json. +# 2. update the value of `facetFilters.version` in `algoliaOptions` on the new release branch. +# Otherwise, after release, the search results are always based on the latest documentation +# (https://spark.apache.org/docs/latest/) even when visiting the documentation of previous releases. +DOCSEARCH_SCRIPT: | + docsearch({ + apiKey: 'b18ca3732c502995563043aa17bc6ecb', + indexName: 'apache_spark', + inputSelector: '#docsearch-input', + enhancedSearchInput: true, + algoliaOptions: { + 'facetFilters': ["version:latest"] + }, + debug: false // Set debug to true if you want to inspect the dropdown + }); diff --git a/docs/_data/menu-sql.yaml b/docs/_data/menu-sql.yaml index 63f6b4a0a204b..cda2a1a5139a1 100644 --- a/docs/_data/menu-sql.yaml +++ b/docs/_data/menu-sql.yaml @@ -51,6 +51,10 @@ url: sql-performance-tuning.html#other-configuration-options - text: Join Strategy Hints for SQL Queries url: sql-performance-tuning.html#join-strategy-hints-for-sql-queries + - text: Coalesce Hints for SQL Queries + url: sql-performance-tuning.html#coalesce-hints-for-sql-queries + - text: Adaptive Query Execution + url: sql-performance-tuning.html#adaptive-query-execution - text: Distributed SQL Engine url: sql-distributed-sql-engine.html subitems: @@ -60,17 +64,6 @@ url: sql-distributed-sql-engine.html#running-the-spark-sql-cli - text: PySpark Usage Guide for Pandas with Apache Arrow url: sql-pyspark-pandas-with-arrow.html - subitems: - - text: Apache Arrow in Spark - url: sql-pyspark-pandas-with-arrow.html#apache-arrow-in-spark - - text: "Enabling for Conversion to/from Pandas" - url: sql-pyspark-pandas-with-arrow.html#enabling-for-conversion-tofrom-pandas - - text: "Pandas UDFs (a.k.a. Vectorized UDFs)" - url: sql-pyspark-pandas-with-arrow.html#pandas-udfs-aka-vectorized-udfs - - text: "Pandas Function APIs" - url: sql-pyspark-pandas-with-arrow.html#pandas-function-apis - - text: Usage Notes - url: sql-pyspark-pandas-with-arrow.html#usage-notes - text: Migration Guide url: sql-migration-old.html - text: SQL Reference @@ -175,6 +168,8 @@ url: sql-ref-syntax-qry-select-hints.html - text: Inline Table url: sql-ref-syntax-qry-select-inline-table.html + - text: File + url: sql-ref-syntax-qry-select-file.html - text: JOIN url: sql-ref-syntax-qry-select-join.html - text: LIKE Predicate diff --git a/docs/_layouts/global.html b/docs/_layouts/global.html index 09f7018262a0b..f10d46763cf76 100755 --- a/docs/_layouts/global.html +++ b/docs/_layouts/global.html @@ -30,6 +30,8 @@ + + {% production %} @@ -82,6 +84,7 @@ MLlib (Machine Learning) GraphX (Graph Processing) SparkR (R on Spark) + PySpark (Python on Spark) @@ -110,7 +113,7 @@ + + @@ -168,10 +175,21 @@

{{ page.title }}

- + + + @@ -44,8 +58,8 @@ private[ui] class StreamingQueryStatisticsPage(parent: StreamingQueryTab) val parameterId = request.getParameter("id") require(parameterId != null && parameterId.nonEmpty, "Missing id parameter") - val query = parent.statusListener.allQueryStatus.find { case q => - q.runId.equals(UUID.fromString(parameterId)) + val query = parent.store.allQueryUIData.find { uiData => + uiData.summary.runId.equals(UUID.fromString(parameterId)) }.getOrElse(throw new IllegalArgumentException(s"Failed to find streaming query $parameterId")) val resources = generateLoadResources(request) @@ -95,37 +109,257 @@ private[ui] class StreamingQueryStatisticsPage(parent: StreamingQueryTab) } - def generateBasicInfo(query: StreamingQueryUIData): Seq[Node] = { - val duration = if (query.isActive) { - SparkUIUtils.formatDurationVerbose(System.currentTimeMillis() - query.startTimestamp) + def generateBasicInfo(uiData: StreamingQueryUIData): Seq[Node] = { + val duration = if (uiData.summary.isActive) { + val durationMs = System.currentTimeMillis() - uiData.summary.startTimestamp + SparkUIUtils.formatDurationVerbose(durationMs) } else { - withNoProgress(query, { - val end = query.lastProgress.timestamp - val start = query.recentProgress.head.timestamp + withNoProgress(uiData, { + val end = uiData.lastProgress.timestamp + val start = uiData.recentProgress.head.timestamp SparkUIUtils.formatDurationVerbose( parseProgressTimestamp(end) - parseProgressTimestamp(start)) }, "-") } - val name = UIUtils.getQueryName(query) - val numBatches = withNoProgress(query, { query.lastProgress.batchId + 1L }, 0) + val name = UIUtils.getQueryName(uiData) + val numBatches = withNoProgress(uiData, { uiData.lastProgress.batchId + 1L }, 0)
Running batches for {duration} since - {SparkUIUtils.formatDate(query.startTimestamp)} + {SparkUIUtils.formatDate(uiData.summary.startTimestamp)} ({numBatches} completed batches)

Name: {name}
-
Id: {query.id}
-
RunId: {query.runId}
+
Id: {uiData.summary.id}
+
RunId: {uiData.summary.runId}

} + def generateWatermark( + query: StreamingQueryUIData, + minBatchTime: Long, + maxBatchTime: Long, + jsCollector: JsCollector): Seq[Node] = { + // This is made sure on caller side but put it here to be defensive + require(query.lastProgress != null) + if (query.lastProgress.eventTime.containsKey("watermark")) { + val watermarkData = query.recentProgress.flatMap { p => + val batchTimestamp = parseProgressTimestamp(p.timestamp) + val watermarkValue = parseProgressTimestamp(p.eventTime.get("watermark")) + if (watermarkValue > 0L) { + // seconds + Some((batchTimestamp, ((batchTimestamp - watermarkValue) / 1000.0))) + } else { + None + } + } + + if (watermarkData.nonEmpty) { + val maxWatermark = watermarkData.maxBy(_._2)._2 + val graphUIDataForWatermark = + new GraphUIData( + "watermark-gap-timeline", + "watermark-gap-histogram", + watermarkData, + minBatchTime, + maxBatchTime, + 0, + maxWatermark, + "seconds") + graphUIDataForWatermark.generateDataJs(jsCollector) + + // scalastyle:off + + +
+
Global Watermark Gap {SparkUIUtils.tooltip("The gap between batch timestamp and global watermark for the batch.", "right")}
+
+ + {graphUIDataForWatermark.generateTimelineHtml(jsCollector)} + {graphUIDataForWatermark.generateHistogramHtml(jsCollector)} + + // scalastyle:on + } else { + Seq.empty[Node] + } + } else { + Seq.empty[Node] + } + } + + def generateAggregatedStateOperators( + query: StreamingQueryUIData, + minBatchTime: Long, + maxBatchTime: Long, + jsCollector: JsCollector): NodeBuffer = { + // This is made sure on caller side but put it here to be defensive + require(query.lastProgress != null) + if (query.lastProgress.stateOperators.nonEmpty) { + val numRowsTotalData = query.recentProgress.map(p => (parseProgressTimestamp(p.timestamp), + p.stateOperators.map(_.numRowsTotal).sum.toDouble)) + val maxNumRowsTotal = numRowsTotalData.maxBy(_._2)._2 + + val numRowsUpdatedData = query.recentProgress.map(p => (parseProgressTimestamp(p.timestamp), + p.stateOperators.map(_.numRowsUpdated).sum.toDouble)) + val maxNumRowsUpdated = numRowsUpdatedData.maxBy(_._2)._2 + + val memoryUsedBytesData = query.recentProgress.map(p => (parseProgressTimestamp(p.timestamp), + p.stateOperators.map(_.memoryUsedBytes).sum.toDouble)) + val maxMemoryUsedBytes = memoryUsedBytesData.maxBy(_._2)._2 + + val numRowsDroppedByWatermarkData = query.recentProgress + .map(p => (parseProgressTimestamp(p.timestamp), + p.stateOperators.map(_.numRowsDroppedByWatermark).sum.toDouble)) + val maxNumRowsDroppedByWatermark = numRowsDroppedByWatermarkData.maxBy(_._2)._2 + + val graphUIDataForNumberTotalRows = + new GraphUIData( + "aggregated-num-total-state-rows-timeline", + "aggregated-num-total-state-rows-histogram", + numRowsTotalData, + minBatchTime, + maxBatchTime, + 0, + maxNumRowsTotal, + "records") + graphUIDataForNumberTotalRows.generateDataJs(jsCollector) + + val graphUIDataForNumberUpdatedRows = + new GraphUIData( + "aggregated-num-updated-state-rows-timeline", + "aggregated-num-updated-state-rows-histogram", + numRowsUpdatedData, + minBatchTime, + maxBatchTime, + 0, + maxNumRowsUpdated, + "records") + graphUIDataForNumberUpdatedRows.generateDataJs(jsCollector) + + val graphUIDataForMemoryUsedBytes = + new GraphUIData( + "aggregated-state-memory-used-bytes-timeline", + "aggregated-state-memory-used-bytes-histogram", + memoryUsedBytesData, + minBatchTime, + maxBatchTime, + 0, + maxMemoryUsedBytes, + "bytes") + graphUIDataForMemoryUsedBytes.generateDataJs(jsCollector) + + val graphUIDataForNumRowsDroppedByWatermark = + new GraphUIData( + "aggregated-num-rows-dropped-by-watermark-timeline", + "aggregated-num-rows-dropped-by-watermark-histogram", + numRowsDroppedByWatermarkData, + minBatchTime, + maxBatchTime, + 0, + maxNumRowsDroppedByWatermark, + "records") + graphUIDataForNumRowsDroppedByWatermark.generateDataJs(jsCollector) + + val result = + // scalastyle:off + + +
+
Aggregated Number Of Total State Rows {SparkUIUtils.tooltip("Aggregated number of total state rows.", "right")}
+
+ + {graphUIDataForNumberTotalRows.generateTimelineHtml(jsCollector)} + {graphUIDataForNumberTotalRows.generateHistogramHtml(jsCollector)} + + + +
+
Aggregated Number Of Updated State Rows {SparkUIUtils.tooltip("Aggregated number of updated state rows.", "right")}
+
+ + {graphUIDataForNumberUpdatedRows.generateTimelineHtml(jsCollector)} + {graphUIDataForNumberUpdatedRows.generateHistogramHtml(jsCollector)} + + + +
+
Aggregated State Memory Used In Bytes {SparkUIUtils.tooltip("Aggregated state memory used in bytes.", "right")}
+
+ + {graphUIDataForMemoryUsedBytes.generateTimelineHtml(jsCollector)} + {graphUIDataForMemoryUsedBytes.generateHistogramHtml(jsCollector)} + + + +
+
Aggregated Number Of Rows Dropped By Watermark {SparkUIUtils.tooltip("Accumulates all input rows being dropped in stateful operators by watermark. 'Inputs' are relative to operators.", "right")}
+
+ + {graphUIDataForNumRowsDroppedByWatermark.generateTimelineHtml(jsCollector)} + {graphUIDataForNumRowsDroppedByWatermark.generateHistogramHtml(jsCollector)} + + // scalastyle:on + + if (enabledCustomMetrics.nonEmpty) { + result ++= generateAggregatedCustomMetrics(query, minBatchTime, maxBatchTime, jsCollector) + } + result + } else { + new NodeBuffer() + } + } + + def generateAggregatedCustomMetrics( + query: StreamingQueryUIData, + minBatchTime: Long, + maxBatchTime: Long, + jsCollector: JsCollector): NodeBuffer = { + val result: NodeBuffer = new NodeBuffer + + // This is made sure on caller side but put it here to be defensive + require(query.lastProgress.stateOperators.nonEmpty) + query.lastProgress.stateOperators.head.customMetrics.keySet().asScala + .filter(m => enabledCustomMetrics.contains(m.toLowerCase(Locale.ROOT))).map { metricName => + val data = query.recentProgress.map(p => (parseProgressTimestamp(p.timestamp), + p.stateOperators.map(_.customMetrics.get(metricName).toDouble).sum)) + val max = data.maxBy(_._2)._2 + val metric = supportedCustomMetrics.find(_.name.equalsIgnoreCase(metricName)).get + + val graphUIData = + new GraphUIData( + s"aggregated-$metricName-timeline", + s"aggregated-$metricName-histogram", + data, + minBatchTime, + maxBatchTime, + 0, + max, + "") + graphUIData.generateDataJs(jsCollector) + + result ++= + // scalastyle:off + + +
+
Aggregated Custom Metric {s"$metricName"} {SparkUIUtils.tooltip(metric.desc, "right")}
+
+ + {graphUIData.generateTimelineHtml(jsCollector)} + {graphUIData.generateHistogramHtml(jsCollector)} + + // scalastyle:on + } + + result + } + def generateStatTable(query: StreamingQueryUIData): Seq[Node] = { val batchToTimestamps = withNoProgress(query, query.recentProgress.map(p => (p.batchId, parseProgressTimestamp(p.timestamp))), @@ -284,6 +518,8 @@ private[ui] class StreamingQueryStatisticsPage(parent: StreamingQueryTab) {graphUIDataForDuration.generateAreaStackHtmlWithData(jsCollector, operationDurationData)} + {generateWatermark(query, minBatchTime, maxBatchTime, jsCollector)} + {generateAggregatedStateOperators(query, minBatchTime, maxBatchTime, jsCollector)} } else { diff --git a/sql/core/src/main/scala/org/apache/spark/sql/streaming/ui/StreamingQueryStatusListener.scala b/sql/core/src/main/scala/org/apache/spark/sql/streaming/ui/StreamingQueryStatusListener.scala index e331083b30024..fdd3754344108 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/streaming/ui/StreamingQueryStatusListener.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/streaming/ui/StreamingQueryStatusListener.scala @@ -20,102 +20,144 @@ package org.apache.spark.sql.streaming.ui import java.util.UUID import java.util.concurrent.ConcurrentHashMap -import scala.collection.JavaConverters._ import scala.collection.mutable +import com.fasterxml.jackson.annotation.JsonIgnore + import org.apache.spark.SparkConf import org.apache.spark.sql.internal.StaticSQLConf import org.apache.spark.sql.streaming.{StreamingQueryListener, StreamingQueryProgress} +import org.apache.spark.sql.streaming.ui.StreamingQueryProgressWrapper._ import org.apache.spark.sql.streaming.ui.UIUtils.parseProgressTimestamp +import org.apache.spark.status.{ElementTrackingStore, KVUtils} +import org.apache.spark.status.KVUtils.KVIndexParam +import org.apache.spark.util.kvstore.KVIndex /** * A customized StreamingQueryListener used in structured streaming UI, which contains all * UI data for both active and inactive query. - * TODO: Add support for history server. */ -private[sql] class StreamingQueryStatusListener(conf: SparkConf) extends StreamingQueryListener { - - /** - * We use runId as the key here instead of id in active query status map, - * because the runId is unique for every started query, even it its a restart. - */ - private[ui] val activeQueryStatus = new ConcurrentHashMap[UUID, StreamingQueryUIData]() - private[ui] val inactiveQueryStatus = new mutable.Queue[StreamingQueryUIData]() +private[sql] class StreamingQueryStatusListener( + conf: SparkConf, + store: ElementTrackingStore) extends StreamingQueryListener { private val streamingProgressRetention = conf.get(StaticSQLConf.STREAMING_UI_RETAINED_PROGRESS_UPDATES) private val inactiveQueryStatusRetention = conf.get(StaticSQLConf.STREAMING_UI_RETAINED_QUERIES) + store.addTrigger(classOf[StreamingQueryData], inactiveQueryStatusRetention) { count => + cleanupInactiveQueries(count) + } + + // Events from the same query run will never be processed concurrently, so it's safe to + // access `progressIds` without any protection. + private val queryToProgress = new ConcurrentHashMap[UUID, mutable.Queue[String]]() + + private def cleanupInactiveQueries(count: Long): Unit = { + val view = store.view(classOf[StreamingQueryData]).index("active").first(false).last(false) + val inactiveQueries = KVUtils.viewToSeq(view, Int.MaxValue)(_ => true) + val numInactiveQueries = inactiveQueries.size + if (numInactiveQueries <= inactiveQueryStatusRetention) { + return + } + val toDelete = inactiveQueries.sortBy(_.endTimestamp.get) + .take(numInactiveQueries - inactiveQueryStatusRetention) + val runIds = toDelete.map { e => + store.delete(e.getClass, e.runId) + e.runId.toString + } + // Delete wrappers in one pass, as deleting them for each summary is slow + store.removeAllByIndexValues(classOf[StreamingQueryProgressWrapper], "runId", runIds) + } + override def onQueryStarted(event: StreamingQueryListener.QueryStartedEvent): Unit = { val startTimestamp = parseProgressTimestamp(event.timestamp) - activeQueryStatus.putIfAbsent(event.runId, - new StreamingQueryUIData(event.name, event.id, event.runId, startTimestamp)) + store.write(new StreamingQueryData( + event.name, + event.id, + event.runId, + isActive = true, + None, + startTimestamp + ), checkTriggers = true) } override def onQueryProgress(event: StreamingQueryListener.QueryProgressEvent): Unit = { - val batchTimestamp = parseProgressTimestamp(event.progress.timestamp) - val queryStatus = activeQueryStatus.getOrDefault( - event.progress.runId, - new StreamingQueryUIData(event.progress.name, event.progress.id, event.progress.runId, - batchTimestamp)) - queryStatus.updateProcess(event.progress, streamingProgressRetention) - } - - override def onQueryTerminated( - event: StreamingQueryListener.QueryTerminatedEvent): Unit = synchronized { - val queryStatus = activeQueryStatus.remove(event.runId) - if (queryStatus != null) { - queryStatus.queryTerminated(event) - inactiveQueryStatus += queryStatus - while (inactiveQueryStatus.length >= inactiveQueryStatusRetention) { - inactiveQueryStatus.dequeue() - } + val runId = event.progress.runId + val batchId = event.progress.batchId + val timestamp = event.progress.timestamp + if (!queryToProgress.containsKey(runId)) { + queryToProgress.put(runId, mutable.Queue.empty[String]) + } + val progressIds = queryToProgress.get(runId) + progressIds.enqueue(getUniqueId(runId, batchId, timestamp)) + store.write(new StreamingQueryProgressWrapper(event.progress)) + while (progressIds.length > streamingProgressRetention) { + val uniqueId = progressIds.dequeue + store.delete(classOf[StreamingQueryProgressWrapper], uniqueId) } } - def allQueryStatus: Seq[StreamingQueryUIData] = synchronized { - activeQueryStatus.values().asScala.toSeq ++ inactiveQueryStatus + override def onQueryTerminated( + event: StreamingQueryListener.QueryTerminatedEvent): Unit = { + val querySummary = store.read(classOf[StreamingQueryData], event.runId) + val curTime = System.currentTimeMillis() + store.write(new StreamingQueryData( + querySummary.name, + querySummary.id, + querySummary.runId, + isActive = false, + querySummary.exception, + querySummary.startTimestamp, + Some(curTime) + ), checkTriggers = true) + queryToProgress.remove(event.runId) } } +private[sql] class StreamingQueryData( + val name: String, + val id: UUID, + @KVIndexParam val runId: UUID, + @KVIndexParam("active") val isActive: Boolean, + val exception: Option[String], + @KVIndexParam("startTimestamp") val startTimestamp: Long, + val endTimestamp: Option[Long] = None) + /** * This class contains all message related to UI display, each instance corresponds to a single * [[org.apache.spark.sql.streaming.StreamingQuery]]. */ -private[ui] class StreamingQueryUIData( - val name: String, - val id: UUID, - val runId: UUID, - val startTimestamp: Long) { - - /** Holds the most recent query progress updates. */ - private val progressBuffer = new mutable.Queue[StreamingQueryProgress]() - - private var _isActive = true - private var _exception: Option[String] = None - - def isActive: Boolean = synchronized { _isActive } - - def exception: Option[String] = synchronized { _exception } - - def queryTerminated(event: StreamingQueryListener.QueryTerminatedEvent): Unit = synchronized { - _isActive = false - _exception = event.exception - } - - def updateProcess( - newProgress: StreamingQueryProgress, retentionNum: Int): Unit = progressBuffer.synchronized { - progressBuffer += newProgress - while (progressBuffer.length >= retentionNum) { - progressBuffer.dequeue() +private[sql] case class StreamingQueryUIData( + summary: StreamingQueryData, + recentProgress: Array[StreamingQueryProgress]) { + + def lastProgress: StreamingQueryProgress = { + if (recentProgress.nonEmpty) { + recentProgress.last + } else { + null } } +} - def recentProgress: Array[StreamingQueryProgress] = progressBuffer.synchronized { - progressBuffer.toArray - } +private[sql] class StreamingQueryProgressWrapper(val progress: StreamingQueryProgress) { + @JsonIgnore @KVIndex + private val uniqueId: String = getUniqueId(progress.runId, progress.batchId, progress.timestamp) - def lastProgress: StreamingQueryProgress = progressBuffer.synchronized { - progressBuffer.lastOption.orNull + @JsonIgnore @KVIndex("runId") + private def runIdIndex: String = progress.runId.toString +} + +private[sql] object StreamingQueryProgressWrapper { + /** + * Adding `timestamp` into unique id to support reporting `empty` query progress + * in which no data comes but with the same batchId. + */ + def getUniqueId( + runId: UUID, + batchId: Long, + timestamp: String): String = { + s"${runId}_${batchId}_$timestamp" } } diff --git a/sql/core/src/main/scala/org/apache/spark/sql/streaming/ui/StreamingQueryTab.scala b/sql/core/src/main/scala/org/apache/spark/sql/streaming/ui/StreamingQueryTab.scala index bb097ffc06912..65cad8f06cc1c 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/streaming/ui/StreamingQueryTab.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/streaming/ui/StreamingQueryTab.scala @@ -17,10 +17,11 @@ package org.apache.spark.sql.streaming.ui import org.apache.spark.internal.Logging +import org.apache.spark.sql.execution.ui.StreamingQueryStatusStore import org.apache.spark.ui.{SparkUI, SparkUITab} private[sql] class StreamingQueryTab( - val statusListener: StreamingQueryStatusListener, + val store: StreamingQueryStatusStore, sparkUI: SparkUI) extends SparkUITab(sparkUI, "StreamingQuery") with Logging { override val name = "Structured Streaming" diff --git a/sql/core/src/main/scala/org/apache/spark/sql/streaming/ui/UIUtils.scala b/sql/core/src/main/scala/org/apache/spark/sql/streaming/ui/UIUtils.scala index cdad5ed9942b5..88a110fa9a329 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/streaming/ui/UIUtils.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/streaming/ui/UIUtils.scala @@ -18,7 +18,6 @@ package org.apache.spark.sql.streaming.ui import java.text.SimpleDateFormat -import java.util.Locale import org.apache.spark.sql.catalyst.util.DateTimeUtils.getTimeZone @@ -47,19 +46,19 @@ private[ui] object UIUtils { } } - def getQueryName(query: StreamingQueryUIData): String = { - if (query.name == null || query.name.isEmpty) { + def getQueryName(uiData: StreamingQueryUIData): String = { + if (uiData.summary.name == null || uiData.summary.name.isEmpty) { "" } else { - query.name + uiData.summary.name } } - def getQueryStatus(query: StreamingQueryUIData): String = { - if (query.isActive) { + def getQueryStatus(uiData: StreamingQueryUIData): String = { + if (uiData.summary.isActive) { "RUNNING" } else { - query.exception.map(_ => "FAILED").getOrElse("FINISHED") + uiData.summary.exception.map(_ => "FAILED").getOrElse("FINISHED") } } diff --git a/sql/core/src/test/java/test/org/apache/spark/sql/Java8DatasetAggregatorSuite.java b/sql/core/src/test/java/test/org/apache/spark/sql/Java8DatasetAggregatorSuite.java index dd3755d3f904e..de88f80eb53b8 100644 --- a/sql/core/src/test/java/test/org/apache/spark/sql/Java8DatasetAggregatorSuite.java +++ b/sql/core/src/test/java/test/org/apache/spark/sql/Java8DatasetAggregatorSuite.java @@ -34,43 +34,43 @@ public class Java8DatasetAggregatorSuite extends JavaDatasetAggregatorSuiteBase @Test public void testTypedAggregationAverage() { KeyValueGroupedDataset> grouped = generateGroupedDataset(); - Dataset> agged = grouped.agg( + Dataset> aggregated = grouped.agg( org.apache.spark.sql.expressions.javalang.typed.avg(v -> (double)(v._2() * 2))); Assert.assertEquals( Arrays.asList(new Tuple2<>("a", 3.0), new Tuple2<>("b", 6.0)), - agged.collectAsList()); + aggregated.collectAsList()); } @SuppressWarnings("deprecation") @Test public void testTypedAggregationCount() { KeyValueGroupedDataset> grouped = generateGroupedDataset(); - Dataset> agged = grouped.agg( + Dataset> aggregated = grouped.agg( org.apache.spark.sql.expressions.javalang.typed.count(v -> v)); Assert.assertEquals( Arrays.asList(new Tuple2<>("a", 2L), new Tuple2<>("b", 1L)), - agged.collectAsList()); + aggregated.collectAsList()); } @SuppressWarnings("deprecation") @Test public void testTypedAggregationSumDouble() { KeyValueGroupedDataset> grouped = generateGroupedDataset(); - Dataset> agged = grouped.agg( + Dataset> aggregated = grouped.agg( org.apache.spark.sql.expressions.javalang.typed.sum(v -> (double)v._2())); Assert.assertEquals( Arrays.asList(new Tuple2<>("a", 3.0), new Tuple2<>("b", 3.0)), - agged.collectAsList()); + aggregated.collectAsList()); } @SuppressWarnings("deprecation") @Test public void testTypedAggregationSumLong() { KeyValueGroupedDataset> grouped = generateGroupedDataset(); - Dataset> agged = grouped.agg( + Dataset> aggregated = grouped.agg( org.apache.spark.sql.expressions.javalang.typed.sumLong(v -> (long)v._2())); Assert.assertEquals( Arrays.asList(new Tuple2<>("a", 3L), new Tuple2<>("b", 3L)), - agged.collectAsList()); + aggregated.collectAsList()); } } diff --git a/sql/core/src/test/java/test/org/apache/spark/sql/JavaDatasetAggregatorSuite.java b/sql/core/src/test/java/test/org/apache/spark/sql/JavaDatasetAggregatorSuite.java index 8a90624f2070b..979b7751fa9a8 100644 --- a/sql/core/src/test/java/test/org/apache/spark/sql/JavaDatasetAggregatorSuite.java +++ b/sql/core/src/test/java/test/org/apache/spark/sql/JavaDatasetAggregatorSuite.java @@ -38,18 +38,18 @@ public class JavaDatasetAggregatorSuite extends JavaDatasetAggregatorSuiteBase { public void testTypedAggregationAnonClass() { KeyValueGroupedDataset> grouped = generateGroupedDataset(); - Dataset> agged = grouped.agg(new IntSumOf().toColumn()); + Dataset> aggregated = grouped.agg(new IntSumOf().toColumn()); Assert.assertEquals( Arrays.asList(new Tuple2<>("a", 3), new Tuple2<>("b", 3)), - agged.collectAsList()); + aggregated.collectAsList()); - Dataset> agged2 = grouped.agg(new IntSumOf().toColumn()) + Dataset> aggregated2 = grouped.agg(new IntSumOf().toColumn()) .as(Encoders.tuple(Encoders.STRING(), Encoders.INT())); Assert.assertEquals( Arrays.asList( new Tuple2<>("a", 3), new Tuple2<>("b", 3)), - agged2.collectAsList()); + aggregated2.collectAsList()); } static class IntSumOf extends Aggregator, Integer, Integer> { @@ -88,43 +88,43 @@ public Encoder outputEncoder() { @Test public void testTypedAggregationAverage() { KeyValueGroupedDataset> grouped = generateGroupedDataset(); - Dataset> agged = grouped.agg( + Dataset> aggregated = grouped.agg( org.apache.spark.sql.expressions.javalang.typed.avg(value -> value._2() * 2.0)); Assert.assertEquals( Arrays.asList(new Tuple2<>("a", 3.0), new Tuple2<>("b", 6.0)), - agged.collectAsList()); + aggregated.collectAsList()); } @SuppressWarnings("deprecation") @Test public void testTypedAggregationCount() { KeyValueGroupedDataset> grouped = generateGroupedDataset(); - Dataset> agged = grouped.agg( + Dataset> aggregated = grouped.agg( org.apache.spark.sql.expressions.javalang.typed.count(value -> value)); Assert.assertEquals( Arrays.asList(new Tuple2<>("a", 2L), new Tuple2<>("b", 1L)), - agged.collectAsList()); + aggregated.collectAsList()); } @SuppressWarnings("deprecation") @Test public void testTypedAggregationSumDouble() { KeyValueGroupedDataset> grouped = generateGroupedDataset(); - Dataset> agged = grouped.agg( + Dataset> aggregated = grouped.agg( org.apache.spark.sql.expressions.javalang.typed.sum(value -> (double) value._2())); Assert.assertEquals( Arrays.asList(new Tuple2<>("a", 3.0), new Tuple2<>("b", 3.0)), - agged.collectAsList()); + aggregated.collectAsList()); } @SuppressWarnings("deprecation") @Test public void testTypedAggregationSumLong() { KeyValueGroupedDataset> grouped = generateGroupedDataset(); - Dataset> agged = grouped.agg( + Dataset> aggregated = grouped.agg( org.apache.spark.sql.expressions.javalang.typed.sumLong(value -> (long) value._2())); Assert.assertEquals( Arrays.asList(new Tuple2<>("a", 3L), new Tuple2<>("b", 3L)), - agged.collectAsList()); + aggregated.collectAsList()); } } diff --git a/sql/core/src/test/resources/META-INF/services/org.apache.spark.sql.jdbc.JdbcConnectionProvider b/sql/core/src/test/resources/META-INF/services/org.apache.spark.sql.jdbc.JdbcConnectionProvider new file mode 100644 index 0000000000000..afb48e1a3511f --- /dev/null +++ b/sql/core/src/test/resources/META-INF/services/org.apache.spark.sql.jdbc.JdbcConnectionProvider @@ -0,0 +1 @@ +org.apache.spark.sql.execution.datasources.jdbc.connection.IntentionallyFaultyConnectionProvider \ No newline at end of file diff --git a/sql/core/src/test/resources/META-INF/services/org.apache.spark.sql.sources.DataSourceRegister b/sql/core/src/test/resources/META-INF/services/org.apache.spark.sql.sources.DataSourceRegister index 914af589384df..dd22970203b3c 100644 --- a/sql/core/src/test/resources/META-INF/services/org.apache.spark.sql.sources.DataSourceRegister +++ b/sql/core/src/test/resources/META-INF/services/org.apache.spark.sql.sources.DataSourceRegister @@ -11,4 +11,5 @@ org.apache.spark.sql.streaming.sources.FakeReadBothModes org.apache.spark.sql.streaming.sources.FakeReadNeitherMode org.apache.spark.sql.streaming.sources.FakeWriteOnly org.apache.spark.sql.streaming.sources.FakeNoWrite +org.apache.spark.sql.streaming.sources.FakeWriteSupportingExternalMetadata org.apache.spark.sql.streaming.sources.FakeWriteSupportProviderV1Fallback diff --git a/sql/core/src/test/resources/SPARK-33084.jar b/sql/core/src/test/resources/SPARK-33084.jar new file mode 100644 index 0000000000000..61e1663ad3a28 Binary files /dev/null and b/sql/core/src/test/resources/SPARK-33084.jar differ diff --git a/sql/core/src/test/resources/hive-site.xml b/sql/core/src/test/resources/hive-site.xml index 17297b3e22a7e..4bf6189b73ca9 100644 --- a/sql/core/src/test/resources/hive-site.xml +++ b/sql/core/src/test/resources/hive-site.xml @@ -23,4 +23,9 @@ true Internal marker for test. + + hadoop.tmp.dir + /tmp/hive_one + default is /tmp/hadoop-${user.name} and will be overridden + diff --git a/sql/core/src/test/resources/spark-events/local-1596020211915 b/sql/core/src/test/resources/spark-events/local-1596020211915 new file mode 100644 index 0000000000000..ff34bbc16ef3a --- /dev/null +++ b/sql/core/src/test/resources/spark-events/local-1596020211915 @@ -0,0 +1,160 @@ +{"Event":"SparkListenerLogStart","Spark Version":"3.1.0-SNAPSHOT"} +{"Event":"SparkListenerResourceProfileAdded","Resource Profile Id":0,"Executor Resource Requests":{"cores":{"Resource Name":"cores","Amount":1,"Discovery Script":"","Vendor":""},"memory":{"Resource Name":"memory","Amount":1024,"Discovery Script":"","Vendor":""}},"Task Resource Requests":{"cpus":{"Resource Name":"cpus","Amount":1.0}}} +{"Event":"SparkListenerExecutorAdded","Timestamp":1596020212090,"Executor ID":"driver","Executor Info":{"Host":"iZbp19vpr16ix621sdw476Z","Total Cores":4,"Log Urls":{},"Attributes":{},"Resources":{},"Resource Profile Id":0}} +{"Event":"SparkListenerBlockManagerAdded","Block Manager ID":{"Executor ID":"driver","Host":"iZbp19vpr16ix621sdw476Z","Port":39845},"Maximum Memory":384093388,"Timestamp":1596020212109,"Maximum Onheap Memory":384093388,"Maximum Offheap Memory":0} +{"Event":"SparkListenerEnvironmentUpdate","JVM Information":{"Java Home":"/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.252.b09-2.el7_8.x86_64/jre","Java Version":"1.8.0_252 (Oracle Corporation)","Scala Version":"version 2.12.10"},"Spark Properties":{"spark.driver.host":"iZbp19vpr16ix621sdw476Z","spark.eventLog.enabled":"true","spark.driver.port":"46309","spark.jars":"file:/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/./examples/jars/spark-examples_2.12-3.1.0-SNAPSHOT.jar","spark.app.name":"StructuredKafkaWordCount","spark.scheduler.mode":"FIFO","spark.submit.pyFiles":"","spark.executor.id":"driver","spark.submit.deployMode":"client","spark.master":"local[*]","spark.eventLog.dir":"/tmp/spark-history","spark.app.id":"local-1596020211915","spark.sql.shuffle.partitions":"2"},"Hadoop Properties":{"yarn.resourcemanager.amlauncher.thread-count":"50","yarn.sharedcache.enabled":"false","fs.s3a.connection.maximum":"15","fs.s3a.impl":"org.apache.hadoop.fs.s3a.S3AFileSystem","yarn.app.mapreduce.am.scheduler.heartbeat.interval-ms":"1000","hadoop.security.kms.client.timeout":"60","hadoop.http.authentication.kerberos.principal":"HTTP/_HOST@LOCALHOST","mapreduce.framework.name":"local","yarn.sharedcache.uploader.server.thread-count":"50","yarn.nodemanager.linux-container-executor.nonsecure-mode.user-pattern":"^[_.A-Za-z0-9][-@_.A-Za-z0-9]{0,255}?[$]?$","tfile.fs.output.buffer.size":"262144","yarn.app.mapreduce.am.job.task.listener.thread-count":"30","hadoop.security.groups.cache.background.reload.threads":"3","yarn.resourcemanager.webapp.cross-origin.enabled":"false","fs.AbstractFileSystem.ftp.impl":"org.apache.hadoop.fs.ftp.FtpFs","fs.s3.block.size":"67108864","hadoop.registry.secure":"false","hadoop.shell.safely.delete.limit.num.files":"100","dfs.bytes-per-checksum":"512","fs.s3.buffer.dir":"${hadoop.tmp.dir}/s3","mapreduce.job.acl-view-job":" ","mapreduce.jobhistory.loadedjobs.cache.size":"5","mapreduce.input.fileinputformat.split.minsize":"0","yarn.resourcemanager.container.liveness-monitor.interval-ms":"600000","yarn.resourcemanager.client.thread-count":"50","io.seqfile.compress.blocksize":"1000000","yarn.sharedcache.checksum.algo.impl":"org.apache.hadoop.yarn.sharedcache.ChecksumSHA256Impl","yarn.nodemanager.amrmproxy.interceptor-class.pipeline":"org.apache.hadoop.yarn.server.nodemanager.amrmproxy.DefaultRequestInterceptor","yarn.timeline-service.entity-group-fs-store.leveldb-cache-read-cache-size":"10485760","mapreduce.reduce.shuffle.fetch.retry.interval-ms":"1000","mapreduce.task.profile.maps":"0-2","yarn.scheduler.include-port-in-node-name":"false","yarn.nodemanager.admin-env":"MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX","yarn.resourcemanager.node-removal-untracked.timeout-ms":"60000","mapreduce.am.max-attempts":"2","hadoop.security.kms.client.failover.sleep.base.millis":"100","mapreduce.jobhistory.webapp.https.address":"0.0.0.0:19890","yarn.node-labels.fs-store.impl.class":"org.apache.hadoop.yarn.nodelabels.FileSystemNodeLabelsStore","fs.trash.checkpoint.interval":"0","mapreduce.job.map.output.collector.class":"org.apache.hadoop.mapred.MapTask$MapOutputBuffer","yarn.resourcemanager.node-ip-cache.expiry-interval-secs":"-1","hadoop.http.authentication.signature.secret.file":"*********(redacted)","hadoop.jetty.logs.serve.aliases":"true","yarn.timeline-service.handler-thread-count":"10","yarn.resourcemanager.max-completed-applications":"10000","yarn.resourcemanager.system-metrics-publisher.enabled":"false","yarn.sharedcache.webapp.address":"0.0.0.0:8788","yarn.resourcemanager.delegation.token.renew-interval":"*********(redacted)","yarn.sharedcache.nm.uploader.replication.factor":"10","hadoop.security.groups.negative-cache.secs":"30","yarn.app.mapreduce.task.container.log.backups":"0","mapreduce.reduce.skip.proc-count.auto-incr":"true","hadoop.security.group.mapping.ldap.posix.attr.gid.name":"gidNumber","ipc.client.fallback-to-simple-auth-allowed":"false","yarn.client.failover-proxy-provider":"org.apache.hadoop.yarn.client.ConfiguredRMFailoverProxyProvider","yarn.timeline-service.http-authentication.simple.anonymous.allowed":"true","ha.health-monitor.check-interval.ms":"1000","yarn.acl.reservation-enable":"false","yarn.resourcemanager.store.class":"org.apache.hadoop.yarn.server.resourcemanager.recovery.FileSystemRMStateStore","yarn.app.mapreduce.am.hard-kill-timeout-ms":"10000","yarn.nodemanager.container-metrics.enable":"true","yarn.timeline-service.client.fd-clean-interval-secs":"60","yarn.nodemanager.docker-container-executor.exec-name":"/usr/bin/docker","yarn.resourcemanager.nodemanagers.heartbeat-interval-ms":"1000","mapred.child.java.opts":"-Xmx200m","hadoop.common.configuration.version":"0.23.0","yarn.nodemanager.remote-app-log-dir-suffix":"logs","yarn.nodemanager.windows-container.cpu-limit.enabled":"false","yarn.nodemanager.runtime.linux.docker.privileged-containers.allowed":"false","file.blocksize":"67108864","hadoop.registry.zk.retry.ceiling.ms":"60000","yarn.sharedcache.store.in-memory.initial-delay-mins":"10","mapreduce.jobhistory.principal":"jhs/_HOST@REALM.TLD","mapreduce.map.skip.proc-count.auto-incr":"true","mapreduce.task.profile.reduces":"0-2","yarn.timeline-service.webapp.https.address":"${yarn.timeline-service.hostname}:8190","yarn.resourcemanager.scheduler.address":"${yarn.resourcemanager.hostname}:8030","yarn.node-labels.enabled":"false","yarn.resourcemanager.webapp.ui-actions.enabled":"true","mapreduce.task.timeout":"600000","yarn.sharedcache.client-server.thread-count":"50","hadoop.security.crypto.cipher.suite":"AES/CTR/NoPadding","yarn.resourcemanager.connect.max-wait.ms":"900000","fs.defaultFS":"file:///","yarn.minicluster.use-rpc":"false","fs.har.impl.disable.cache":"true","io.compression.codec.bzip2.library":"system-native","mapreduce.shuffle.connection-keep-alive.timeout":"5","yarn.resourcemanager.webapp.https.address":"${yarn.resourcemanager.hostname}:8090","mapreduce.jobhistory.address":"0.0.0.0:10020","yarn.resourcemanager.nm-tokens.master-key-rolling-interval-secs":"*********(redacted)","yarn.is.minicluster":"false","yarn.nodemanager.address":"${yarn.nodemanager.hostname}:0","fs.AbstractFileSystem.s3a.impl":"org.apache.hadoop.fs.s3a.S3A","mapreduce.task.combine.progress.records":"10000","yarn.resourcemanager.am.max-attempts":"2","yarn.nodemanager.linux-container-executor.cgroups.hierarchy":"/hadoop-yarn","ipc.server.log.slow.rpc":"false","yarn.resourcemanager.node-labels.provider.fetch-interval-ms":"1800000","yarn.nodemanager.webapp.cross-origin.enabled":"false","yarn.app.mapreduce.am.job.committer.cancel-timeout":"60000","ftp.bytes-per-checksum":"512","yarn.nodemanager.resource.memory-mb":"-1","fs.s3a.fast.upload.active.blocks":"4","mapreduce.jobhistory.joblist.cache.size":"20000","fs.ftp.host":"0.0.0.0","yarn.resourcemanager.fs.state-store.num-retries":"0","yarn.resourcemanager.nodemanager-connect-retries":"10","hadoop.security.kms.client.encrypted.key.cache.low-watermark":"0.3f","yarn.timeline-service.client.max-retries":"30","dfs.ha.fencing.ssh.connect-timeout":"30000","yarn.log-aggregation-enable":"false","mapreduce.reduce.markreset.buffer.percent":"0.0","fs.AbstractFileSystem.viewfs.impl":"org.apache.hadoop.fs.viewfs.ViewFs","mapreduce.task.io.sort.factor":"10","yarn.nodemanager.amrmproxy.client.thread-count":"25","ha.failover-controller.new-active.rpc-timeout.ms":"60000","yarn.nodemanager.container-localizer.java.opts":"-Xmx256m","mapreduce.jobhistory.datestring.cache.size":"200000","mapreduce.job.acl-modify-job":" ","yarn.nodemanager.windows-container.memory-limit.enabled":"false","yarn.timeline-service.webapp.address":"${yarn.timeline-service.hostname}:8188","yarn.app.mapreduce.am.job.committer.commit-window":"10000","yarn.nodemanager.container-manager.thread-count":"20","yarn.minicluster.fixed.ports":"false","yarn.cluster.max-application-priority":"0","yarn.timeline-service.ttl-enable":"true","mapreduce.jobhistory.recovery.store.fs.uri":"${hadoop.tmp.dir}/mapred/history/recoverystore","hadoop.caller.context.signature.max.size":"40","ha.zookeeper.session-timeout.ms":"10000","tfile.io.chunk.size":"1048576","mapreduce.job.speculative.slowtaskthreshold":"1.0","io.serializations":"org.apache.hadoop.io.serializer.WritableSerialization, org.apache.hadoop.io.serializer.avro.AvroSpecificSerialization, org.apache.hadoop.io.serializer.avro.AvroReflectSerialization","hadoop.security.kms.client.failover.sleep.max.millis":"2000","hadoop.security.group.mapping.ldap.directory.search.timeout":"10000","fs.swift.impl":"org.apache.hadoop.fs.swift.snative.SwiftNativeFileSystem","yarn.nodemanager.local-cache.max-files-per-directory":"8192","hadoop.http.cross-origin.enabled":"false","mapreduce.map.sort.spill.percent":"0.80","yarn.timeline-service.entity-group-fs-store.scan-interval-seconds":"60","yarn.timeline-service.client.best-effort":"false","yarn.resourcemanager.webapp.delegation-token-auth-filter.enabled":"*********(redacted)","hadoop.security.group.mapping.ldap.posix.attr.uid.name":"uidNumber","fs.AbstractFileSystem.swebhdfs.impl":"org.apache.hadoop.fs.SWebHdfs","mapreduce.ifile.readahead":"true","yarn.timeline-service.leveldb-timeline-store.ttl-interval-ms":"300000","hadoop.security.kms.client.encrypted.key.cache.num.refill.threads":"2","yarn.resourcemanager.scheduler.class":"org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler","yarn.app.mapreduce.am.command-opts":"-Xmx1024m","mapreduce.cluster.local.dir":"${hadoop.tmp.dir}/mapred/local","io.mapfile.bloom.error.rate":"0.005","yarn.nodemanager.runtime.linux.allowed-runtimes":"default","yarn.sharedcache.store.class":"org.apache.hadoop.yarn.server.sharedcachemanager.store.InMemorySCMStore","ha.failover-controller.graceful-fence.rpc-timeout.ms":"5000","ftp.replication":"3","hadoop.security.uid.cache.secs":"14400","mapreduce.job.maxtaskfailures.per.tracker":"3","io.skip.checksum.errors":"false","yarn.app.mapreduce.client-am.ipc.max-retries-on-timeouts":"3","fs.s3a.connection.timeout":"200000","mapreduce.job.max.split.locations":"10","hadoop.registry.zk.session.timeout.ms":"60000","mapreduce.jvm.system-properties-to-log":"os.name,os.version,java.home,java.runtime.version,java.vendor,java.version,java.vm.name,java.class.path,java.io.tmpdir,user.dir,user.name","yarn.timeline-service.entity-group-fs-store.active-dir":"/tmp/entity-file-history/active","mapreduce.shuffle.transfer.buffer.size":"131072","yarn.timeline-service.client.retry-interval-ms":"1000","yarn.http.policy":"HTTP_ONLY","fs.s3a.socket.send.buffer":"8192","yarn.sharedcache.uploader.server.address":"0.0.0.0:8046","hadoop.http.authentication.token.validity":"*********(redacted)","mapreduce.shuffle.max.connections":"0","yarn.minicluster.yarn.nodemanager.resource.memory-mb":"4096","mapreduce.job.emit-timeline-data":"false","yarn.nodemanager.resource.system-reserved-memory-mb":"-1","hadoop.kerberos.min.seconds.before.relogin":"60","mapreduce.jobhistory.move.thread-count":"3","yarn.resourcemanager.admin.client.thread-count":"1","yarn.dispatcher.drain-events.timeout":"300000","fs.s3a.buffer.dir":"${hadoop.tmp.dir}/s3a","hadoop.ssl.enabled.protocols":"TLSv1,SSLv2Hello,TLSv1.1,TLSv1.2","mapreduce.jobhistory.admin.address":"0.0.0.0:10033","yarn.log-aggregation-status.time-out.ms":"600000","mapreduce.shuffle.port":"13562","yarn.resourcemanager.max-log-aggregation-diagnostics-in-memory":"10","yarn.nodemanager.health-checker.interval-ms":"600000","ftp.blocksize":"67108864","yarn.nodemanager.log-container-debug-info.enabled":"false","yarn.client.max-cached-nodemanagers-proxies":"0","yarn.nodemanager.linux-container-executor.cgroups.delete-delay-ms":"20","yarn.nodemanager.delete.debug-delay-sec":"0","yarn.nodemanager.pmem-check-enabled":"true","yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage":"90.0","mapreduce.app-submission.cross-platform":"false","yarn.resourcemanager.work-preserving-recovery.scheduling-wait-ms":"10000","hadoop.security.groups.cache.secs":"300","yarn.resourcemanager.zk-retry-interval-ms":"1000","ipc.maximum.data.length":"67108864","mapreduce.shuffle.max.threads":"0","hadoop.security.authorization":"false","mapreduce.job.complete.cancel.delegation.tokens":"*********(redacted)","fs.s3a.paging.maximum":"5000","nfs.exports.allowed.hosts":"* rw","mapreduce.jobhistory.http.policy":"HTTP_ONLY","yarn.sharedcache.store.in-memory.check-period-mins":"720","s3native.replication":"3","hadoop.security.group.mapping.ldap.ssl":"false","yarn.client.application-client-protocol.poll-interval-ms":"200","ha.zookeeper.parent-znode":"/hadoop-ha","yarn.nodemanager.log-aggregation.policy.class":"org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation.AllContainerLogAggregationPolicy","mapreduce.reduce.shuffle.merge.percent":"0.66","hadoop.security.group.mapping.ldap.search.filter.group":"(objectClass=group)","yarn.nodemanager.resourcemanager.minimum.version":"NONE","mapreduce.job.speculative.speculative-cap-running-tasks":"0.1","yarn.admin.acl":"*","yarn.nodemanager.recovery.supervised":"false","yarn.sharedcache.admin.thread-count":"1","yarn.resourcemanager.ha.automatic-failover.enabled":"true","mapreduce.reduce.skip.maxgroups":"0","mapreduce.reduce.shuffle.connect.timeout":"180000","yarn.resourcemanager.address":"${yarn.resourcemanager.hostname}:8032","ipc.client.ping":"true","mapreduce.task.local-fs.write-limit.bytes":"-1","fs.adl.oauth2.access.token.provider.type":"*********(redacted)","mapreduce.shuffle.ssl.file.buffer.size":"65536","yarn.resourcemanager.ha.automatic-failover.embedded":"true","hadoop.ssl.enabled":"false","fs.s3a.multipart.purge":"false","mapreduce.job.end-notification.max.attempts":"5","mapreduce.output.fileoutputformat.compress.codec":"org.apache.hadoop.io.compress.DefaultCodec","yarn.nodemanager.container-monitor.procfs-tree.smaps-based-rss.enabled":"false","ha.health-monitor.connect-retry-interval.ms":"1000","yarn.nodemanager.keytab":"/etc/krb5.keytab","mapreduce.jobhistory.keytab":"/etc/security/keytab/jhs.service.keytab","fs.s3a.threads.max":"10","mapreduce.reduce.shuffle.input.buffer.percent":"0.70","mapreduce.cluster.temp.dir":"${hadoop.tmp.dir}/mapred/temp","s3.replication":"3","yarn.nodemanager.node-labels.resync-interval-ms":"120000","hadoop.tmp.dir":"/tmp/hadoop-${user.name}","mapreduce.job.maps":"2","mapreduce.job.end-notification.max.retry.interval":"5000","yarn.log-aggregation.retain-check-interval-seconds":"-1","yarn.resourcemanager.resource-tracker.client.thread-count":"50","yarn.timeline-service.leveldb-timeline-store.start-time-read-cache-size":"10000","yarn.resourcemanager.ha.automatic-failover.zk-base-path":"/yarn-leader-election","io.seqfile.local.dir":"${hadoop.tmp.dir}/io/local","mapreduce.client.submit.file.replication":"10","mapreduce.jobhistory.minicluster.fixed.ports":"false","fs.s3a.multipart.threshold":"2147483647","mapreduce.jobhistory.done-dir":"${yarn.app.mapreduce.am.staging-dir}/history/done","yarn.resourcemanager.zk-acl":"world:anyone:rwcda","ipc.client.idlethreshold":"4000","yarn.nodemanager.linux-container-executor.cgroups.strict-resource-usage":"false","mapreduce.reduce.input.buffer.percent":"0.0","yarn.nodemanager.amrmproxy.enable":"false","fs.ftp.host.port":"21","ipc.ping.interval":"60000","yarn.resourcemanager.history-writer.multi-threaded-dispatcher.pool-size":"10","yarn.resourcemanager.admin.address":"${yarn.resourcemanager.hostname}:8033","file.client-write-packet-size":"65536","ipc.client.kill.max":"10","mapreduce.reduce.speculative":"true","mapreduce.local.clientfactory.class.name":"org.apache.hadoop.mapred.LocalClientFactory","mapreduce.job.reducer.unconditional-preempt.delay.sec":"300","yarn.nodemanager.disk-health-checker.interval-ms":"120000","yarn.nodemanager.log.deletion-threads-count":"4","ipc.client.connection.maxidletime":"10000","mapreduce.task.io.sort.mb":"100","yarn.nodemanager.localizer.client.thread-count":"5","yarn.sharedcache.admin.address":"0.0.0.0:8047","yarn.nodemanager.localizer.cache.cleanup.interval-ms":"600000","hadoop.security.crypto.codec.classes.aes.ctr.nopadding":"org.apache.hadoop.crypto.OpensslAesCtrCryptoCodec, org.apache.hadoop.crypto.JceAesCtrCryptoCodec","fs.s3a.connection.ssl.enabled":"true","yarn.nodemanager.process-kill-wait.ms":"2000","mapreduce.job.hdfs-servers":"${fs.defaultFS}","hadoop.workaround.non.threadsafe.getpwuid":"true","fs.df.interval":"60000","fs.s3.sleepTimeSeconds":"10","fs.s3a.multiobjectdelete.enable":"true","yarn.sharedcache.cleaner.resource-sleep-ms":"0","yarn.nodemanager.disk-health-checker.min-healthy-disks":"0.25","hadoop.shell.missing.defaultFs.warning":"false","io.file.buffer.size":"65536","hadoop.security.group.mapping.ldap.search.attr.member":"member","hadoop.security.random.device.file.path":"/dev/urandom","hadoop.security.sensitive-config-keys":"*********(redacted)","hadoop.rpc.socket.factory.class.default":"org.apache.hadoop.net.StandardSocketFactory","yarn.intermediate-data-encryption.enable":"false","yarn.resourcemanager.connect.retry-interval.ms":"30000","yarn.scheduler.minimum-allocation-mb":"1024","yarn.app.mapreduce.am.staging-dir":"/tmp/hadoop-yarn/staging","mapreduce.reduce.shuffle.read.timeout":"180000","hadoop.http.cross-origin.max-age":"1800","fs.s3a.connection.establish.timeout":"5000","mapreduce.job.running.map.limit":"0","yarn.minicluster.control-resource-monitoring":"false","hadoop.ssl.require.client.cert":"false","hadoop.kerberos.kinit.command":"kinit","mapreduce.reduce.log.level":"INFO","hadoop.security.dns.log-slow-lookups.threshold.ms":"1000","mapreduce.job.ubertask.enable":"false","hadoop.caller.context.enabled":"false","yarn.nodemanager.vmem-pmem-ratio":"2.1","hadoop.rpc.protection":"authentication","ha.health-monitor.rpc-timeout.ms":"45000","s3native.stream-buffer-size":"4096","yarn.nodemanager.remote-app-log-dir":"/tmp/logs","yarn.nodemanager.resource.pcores-vcores-multiplier":"1.0","yarn.app.mapreduce.am.containerlauncher.threadpool-initial-size":"10","fs.s3n.multipart.uploads.enabled":"false","hadoop.security.crypto.buffer.size":"8192","yarn.nodemanager.node-labels.provider.fetch-interval-ms":"600000","mapreduce.jobhistory.recovery.store.leveldb.path":"${hadoop.tmp.dir}/mapred/history/recoverystore","yarn.client.failover-retries-on-socket-timeouts":"0","hadoop.security.instrumentation.requires.admin":"false","yarn.nodemanager.delete.thread-count":"4","mapreduce.job.finish-when-all-reducers-done":"false","hadoop.registry.jaas.context":"Client","yarn.timeline-service.leveldb-timeline-store.path":"${hadoop.tmp.dir}/yarn/timeline","s3.blocksize":"67108864","io.map.index.interval":"128","mapreduce.job.counters.max":"120","yarn.timeline-service.store-class":"org.apache.hadoop.yarn.server.timeline.LeveldbTimelineStore","mapreduce.jobhistory.move.interval-ms":"180000","yarn.nodemanager.localizer.fetch.thread-count":"4","yarn.resourcemanager.scheduler.client.thread-count":"50","hadoop.ssl.hostname.verifier":"DEFAULT","yarn.timeline-service.leveldb-state-store.path":"${hadoop.tmp.dir}/yarn/timeline","mapreduce.job.classloader":"false","mapreduce.task.profile.map.params":"${mapreduce.task.profile.params}","ipc.client.connect.timeout":"20000","s3.stream-buffer-size":"4096","yarn.nm.liveness-monitor.expiry-interval-ms":"600000","yarn.resourcemanager.reservation-system.planfollower.time-step":"1000","s3native.bytes-per-checksum":"512","mapreduce.jobtracker.address":"local","yarn.nodemanager.recovery.enabled":"false","mapreduce.job.end-notification.retry.interval":"1000","fs.du.interval":"600000","hadoop.security.group.mapping.ldap.read.timeout.ms":"60000","hadoop.security.groups.cache.warn.after.ms":"5000","file.bytes-per-checksum":"512","yarn.node-labels.fs-store.retry-policy-spec":"2000, 500","hadoop.security.groups.cache.background.reload":"false","net.topology.script.number.args":"100","mapreduce.task.merge.progress.records":"10000","yarn.nodemanager.localizer.address":"${yarn.nodemanager.hostname}:8040","yarn.timeline-service.keytab":"/etc/krb5.keytab","mapreduce.reduce.shuffle.fetch.retry.timeout-ms":"30000","yarn.resourcemanager.rm.container-allocation.expiry-interval-ms":"600000","mapreduce.fileoutputcommitter.algorithm.version":"1","yarn.resourcemanager.work-preserving-recovery.enabled":"true","mapreduce.map.skip.maxrecords":"0","yarn.sharedcache.root-dir":"/sharedcache","hadoop.http.authentication.type":"simple","mapreduce.task.userlog.limit.kb":"0","yarn.resourcemanager.scheduler.monitor.enable":"false","fs.s3n.block.size":"67108864","ipc.client.connect.max.retries":"10","hadoop.registry.zk.retry.times":"5","mapreduce.jobtracker.staging.root.dir":"${hadoop.tmp.dir}/mapred/staging","yarn.nodemanager.resource-monitor.interval-ms":"3000","mapreduce.shuffle.listen.queue.size":"128","mapreduce.map.cpu.vcores":"1","yarn.timeline-service.client.fd-retain-secs":"300","hadoop.user.group.static.mapping.overrides":"dr.who=;","mapreduce.jobhistory.recovery.store.class":"org.apache.hadoop.mapreduce.v2.hs.HistoryServerFileSystemStateStoreService","yarn.resourcemanager.fail-fast":"${yarn.fail-fast}","yarn.resourcemanager.proxy-user-privileges.enabled":"false","mapreduce.job.reducer.preempt.delay.sec":"0","hadoop.util.hash.type":"murmur","yarn.app.mapreduce.client.job.max-retries":"0","mapreduce.reduce.shuffle.retry-delay.max.ms":"60000","hadoop.security.group.mapping.ldap.connection.timeout.ms":"60000","mapreduce.task.profile.params":"-agentlib:hprof=cpu=samples,heap=sites,force=n,thread=y,verbose=n,file=%s","yarn.app.mapreduce.shuffle.log.backups":"0","hadoop.registry.zk.retry.interval.ms":"1000","yarn.nodemanager.linux-container-executor.cgroups.delete-timeout-ms":"1000","fs.AbstractFileSystem.file.impl":"org.apache.hadoop.fs.local.LocalFs","yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds":"-1","mapreduce.jobhistory.cleaner.interval-ms":"86400000","hadoop.registry.zk.quorum":"localhost:2181","mapreduce.output.fileoutputformat.compress":"false","yarn.resourcemanager.am-rm-tokens.master-key-rolling-interval-secs":"*********(redacted)","hadoop.ssl.server.conf":"ssl-server.xml","yarn.sharedcache.cleaner.initial-delay-mins":"10","mapreduce.client.completion.pollinterval":"5000","hadoop.ssl.keystores.factory.class":"org.apache.hadoop.security.ssl.FileBasedKeyStoresFactory","yarn.app.mapreduce.am.resource.cpu-vcores":"1","yarn.timeline-service.enabled":"false","yarn.nodemanager.runtime.linux.docker.capabilities":"CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE","yarn.acl.enable":"false","yarn.timeline-service.entity-group-fs-store.done-dir":"/tmp/entity-file-history/done/","mapreduce.task.profile":"false","yarn.resourcemanager.fs.state-store.uri":"${hadoop.tmp.dir}/yarn/system/rmstore","yarn.nodemanager.linux-container-executor.nonsecure-mode.local-user":"nobody","yarn.resourcemanager.configuration.provider-class":"org.apache.hadoop.yarn.LocalConfigurationProvider","yarn.resourcemanager.configuration.file-system-based-store":"/yarn/conf","yarn.nodemanager.resource.percentage-physical-cpu-limit":"100","mapreduce.jobhistory.client.thread-count":"10","tfile.fs.input.buffer.size":"262144","mapreduce.client.progressmonitor.pollinterval":"1000","yarn.nodemanager.log-dirs":"${yarn.log.dir}/userlogs","fs.automatic.close":"true","fs.s3n.multipart.copy.block.size":"5368709120","yarn.nodemanager.hostname":"0.0.0.0","yarn.resourcemanager.zk-timeout-ms":"10000","ftp.stream-buffer-size":"4096","yarn.fail-fast":"false","hadoop.security.group.mapping.ldap.search.filter.user":"(&(objectClass=user)(sAMAccountName={0}))","yarn.timeline-service.address":"${yarn.timeline-service.hostname}:10200","mapreduce.job.ubertask.maxmaps":"9","fs.s3a.threads.keepalivetime":"60","mapreduce.task.files.preserve.failedtasks":"false","yarn.app.mapreduce.client.job.retry-interval":"2000","ha.failover-controller.graceful-fence.connection.retries":"1","yarn.resourcemanager.delegation.token.max-lifetime":"*********(redacted)","yarn.timeline-service.entity-group-fs-store.summary-store":"org.apache.hadoop.yarn.server.timeline.LeveldbTimelineStore","mapreduce.reduce.cpu.vcores":"1","fs.client.resolve.remote.symlinks":"true","yarn.nodemanager.webapp.https.address":"0.0.0.0:8044","hadoop.http.cross-origin.allowed-origins":"*","yarn.timeline-service.entity-group-fs-store.retain-seconds":"604800","yarn.resourcemanager.metrics.runtime.buckets":"60,300,1440","yarn.timeline-service.generic-application-history.max-applications":"10000","yarn.nodemanager.local-dirs":"${hadoop.tmp.dir}/nm-local-dir","mapreduce.shuffle.connection-keep-alive.enable":"false","yarn.node-labels.configuration-type":"centralized","fs.s3a.path.style.access":"false","yarn.nodemanager.aux-services.mapreduce_shuffle.class":"org.apache.hadoop.mapred.ShuffleHandler","yarn.sharedcache.store.in-memory.staleness-period-mins":"10080","fs.adl.impl":"org.apache.hadoop.fs.adl.AdlFileSystem","yarn.resourcemanager.nodemanager.minimum.version":"NONE","net.topology.impl":"org.apache.hadoop.net.NetworkTopology","io.map.index.skip":"0","yarn.scheduler.maximum-allocation-vcores":"4","hadoop.http.cross-origin.allowed-headers":"X-Requested-With,Content-Type,Accept,Origin","yarn.nodemanager.log-aggregation.compression-type":"none","yarn.timeline-service.version":"1.0f","yarn.ipc.rpc.class":"org.apache.hadoop.yarn.ipc.HadoopYarnProtoRPC","mapreduce.reduce.maxattempts":"4","hadoop.security.dns.log-slow-lookups.enabled":"false","mapreduce.job.committer.setup.cleanup.needed":"true","mapreduce.job.running.reduce.limit":"0","ipc.maximum.response.length":"134217728","mapreduce.job.token.tracking.ids.enabled":"*********(redacted)","hadoop.caller.context.max.size":"128","hadoop.registry.system.acls":"sasl:yarn@, sasl:mapred@, sasl:hdfs@","yarn.nodemanager.recovery.dir":"${hadoop.tmp.dir}/yarn-nm-recovery","fs.s3a.fast.upload.buffer":"disk","mapreduce.jobhistory.intermediate-done-dir":"${yarn.app.mapreduce.am.staging-dir}/history/done_intermediate","yarn.app.mapreduce.shuffle.log.separate":"true","fs.s3a.max.total.tasks":"5","fs.s3a.readahead.range":"64K","hadoop.http.authentication.simple.anonymous.allowed":"true","fs.s3a.fast.upload":"false","fs.s3a.attempts.maximum":"20","hadoop.registry.zk.connection.timeout.ms":"15000","yarn.resourcemanager.delegation-token-renewer.thread-count":"*********(redacted)","yarn.nodemanager.health-checker.script.timeout-ms":"1200000","yarn.timeline-service.leveldb-timeline-store.start-time-write-cache-size":"10000","mapreduce.map.log.level":"INFO","mapreduce.output.fileoutputformat.compress.type":"RECORD","yarn.resourcemanager.leveldb-state-store.path":"${hadoop.tmp.dir}/yarn/system/rmstore","hadoop.registry.rm.enabled":"false","mapreduce.ifile.readahead.bytes":"4194304","yarn.resourcemanager.fs.state-store.retry-policy-spec":"2000, 500","yarn.sharedcache.app-checker.class":"org.apache.hadoop.yarn.server.sharedcachemanager.RemoteAppChecker","yarn.nodemanager.linux-container-executor.nonsecure-mode.limit-users":"true","yarn.nodemanager.resource.detect-hardware-capabilities":"false","mapreduce.cluster.acls.enabled":"false","mapreduce.job.speculative.retry-after-no-speculate":"1000","yarn.resourcemanager.fs.state-store.retry-interval-ms":"1000","file.stream-buffer-size":"4096","mapreduce.map.output.compress.codec":"org.apache.hadoop.io.compress.DefaultCodec","mapreduce.map.speculative":"true","mapreduce.job.speculative.retry-after-speculate":"15000","yarn.nodemanager.linux-container-executor.cgroups.mount":"false","yarn.app.mapreduce.am.container.log.backups":"0","yarn.app.mapreduce.am.log.level":"INFO","mapreduce.job.reduce.slowstart.completedmaps":"0.05","yarn.timeline-service.http-authentication.type":"simple","hadoop.security.group.mapping.ldap.search.attr.group.name":"cn","yarn.timeline-service.client.internal-timers-ttl-secs":"420","fs.s3a.block.size":"32M","yarn.sharedcache.client-server.address":"0.0.0.0:8045","yarn.resourcemanager.hostname":"0.0.0.0","yarn.resourcemanager.delegation.key.update-interval":"86400000","mapreduce.reduce.shuffle.fetch.retry.enabled":"${yarn.nodemanager.recovery.enabled}","mapreduce.map.memory.mb":"1024","mapreduce.task.skip.start.attempts":"2","fs.AbstractFileSystem.hdfs.impl":"org.apache.hadoop.fs.Hdfs","yarn.nodemanager.disk-health-checker.enable":"true","ipc.client.tcpnodelay":"true","ipc.client.rpc-timeout.ms":"0","fs.s3.maxRetries":"4","ipc.client.low-latency":"false","mapreduce.input.lineinputformat.linespermap":"1","ipc.client.connect.max.retries.on.timeouts":"45","yarn.timeline-service.leveldb-timeline-store.read-cache-size":"104857600","fs.AbstractFileSystem.har.impl":"org.apache.hadoop.fs.HarFs","mapreduce.job.split.metainfo.maxsize":"10000000","yarn.am.liveness-monitor.expiry-interval-ms":"600000","yarn.resourcemanager.container-tokens.master-key-rolling-interval-secs":"*********(redacted)","yarn.timeline-service.entity-group-fs-store.app-cache-size":"10","fs.s3a.socket.recv.buffer":"8192","fs.s3n.multipart.uploads.block.size":"67108864","yarn.resourcemanager.resource-tracker.address":"${yarn.resourcemanager.hostname}:8031","yarn.nodemanager.node-labels.provider.fetch-timeout-ms":"1200000","yarn.resourcemanager.leveldb-state-store.compaction-interval-secs":"3600","mapreduce.client.output.filter":"FAILED","hadoop.http.filter.initializers":"org.apache.hadoop.http.lib.StaticUserWebFilter","mapreduce.reduce.memory.mb":"1024","s3native.client-write-packet-size":"65536","yarn.timeline-service.hostname":"0.0.0.0","file.replication":"1","yarn.nodemanager.container-metrics.unregister-delay-ms":"10000","yarn.nodemanager.container-metrics.period-ms":"-1","yarn.nodemanager.log.retain-seconds":"10800","yarn.timeline-service.entity-group-fs-store.cleaner-interval-seconds":"3600","yarn.resourcemanager.keytab":"/etc/krb5.keytab","hadoop.security.group.mapping.providers.combined":"true","mapreduce.reduce.merge.inmem.threshold":"1000","yarn.timeline-service.recovery.enabled":"false","yarn.sharedcache.nm.uploader.thread-count":"20","mapreduce.shuffle.ssl.enabled":"false","yarn.resourcemanager.state-store.max-completed-applications":"${yarn.resourcemanager.max-completed-applications}","mapreduce.job.speculative.minimum-allowed-tasks":"10","yarn.log-aggregation.retain-seconds":"-1","yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb":"0","mapreduce.jobhistory.max-age-ms":"604800000","hadoop.http.cross-origin.allowed-methods":"GET,POST,HEAD","mapreduce.jobhistory.webapp.address":"0.0.0.0:19888","mapreduce.jobtracker.system.dir":"${hadoop.tmp.dir}/mapred/system","yarn.client.nodemanager-connect.max-wait-ms":"180000","yarn.resourcemanager.webapp.address":"${yarn.resourcemanager.hostname}:8088","mapreduce.jobhistory.recovery.enable":"false","mapreduce.reduce.shuffle.parallelcopies":"5","fs.AbstractFileSystem.webhdfs.impl":"org.apache.hadoop.fs.WebHdfs","fs.trash.interval":"0","yarn.app.mapreduce.client.max-retries":"3","hadoop.security.authentication":"simple","mapreduce.task.profile.reduce.params":"${mapreduce.task.profile.params}","yarn.app.mapreduce.am.resource.mb":"1536","mapreduce.input.fileinputformat.list-status.num-threads":"1","yarn.nodemanager.container-executor.class":"org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor","io.mapfile.bloom.size":"1048576","yarn.timeline-service.ttl-ms":"604800000","yarn.nodemanager.resource.cpu-vcores":"-1","mapreduce.job.reduces":"1","fs.s3a.multipart.size":"100M","yarn.scheduler.minimum-allocation-vcores":"1","mapreduce.job.speculative.speculative-cap-total-tasks":"0.01","hadoop.ssl.client.conf":"ssl-client.xml","mapreduce.job.queuename":"default","ha.health-monitor.sleep-after-disconnect.ms":"1000","s3.bytes-per-checksum":"512","yarn.app.mapreduce.shuffle.log.limit.kb":"0","hadoop.security.group.mapping":"org.apache.hadoop.security.JniBasedUnixGroupsMappingWithFallback","yarn.client.application-client-protocol.poll-timeout-ms":"-1","mapreduce.jobhistory.jhist.format":"json","yarn.resourcemanager.ha.enabled":"false","hadoop.http.staticuser.user":"dr.who","mapreduce.task.exit.timeout.check-interval-ms":"20000","mapreduce.task.exit.timeout":"60000","yarn.nodemanager.linux-container-executor.resources-handler.class":"org.apache.hadoop.yarn.server.nodemanager.util.DefaultLCEResourcesHandler","mapreduce.reduce.shuffle.memory.limit.percent":"0.25","yarn.resourcemanager.reservation-system.enable":"false","s3.client-write-packet-size":"65536","mapreduce.map.output.compress":"false","ha.zookeeper.acl":"world:anyone:rwcda","ipc.server.max.connections":"0","yarn.scheduler.maximum-allocation-mb":"8192","yarn.resourcemanager.scheduler.monitor.policies":"org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.ProportionalCapacityPreemptionPolicy","yarn.sharedcache.cleaner.period-mins":"1440","yarn.app.mapreduce.am.container.log.limit.kb":"0","s3native.blocksize":"67108864","ipc.client.connect.retry.interval":"1000","yarn.resourcemanager.zk-state-store.parent-path":"/rmstore","mapreduce.jobhistory.cleaner.enable":"true","yarn.timeline-service.client.fd-flush-interval-secs":"10","hadoop.security.kms.client.encrypted.key.cache.expiry":"43200000","yarn.client.nodemanager-client-async.thread-pool-max-size":"500","mapreduce.map.maxattempts":"4","yarn.nodemanager.sleep-delay-before-sigkill.ms":"250","mapreduce.job.end-notification.retry.attempts":"0","yarn.nodemanager.resource.count-logical-processors-as-cores":"false","yarn.resourcemanager.zk-num-retries":"1000","hadoop.registry.zk.root":"/registry","adl.feature.ownerandgroup.enableupn":"false","mapreduce.job.reduce.shuffle.consumer.plugin.class":"org.apache.hadoop.mapreduce.task.reduce.Shuffle","yarn.resourcemanager.delayed.delegation-token.removal-interval-ms":"*********(redacted)","yarn.nodemanager.localizer.cache.target-size-mb":"10240","ftp.client-write-packet-size":"65536","fs.AbstractFileSystem.adl.impl":"org.apache.hadoop.fs.adl.Adl","yarn.client.failover-retries":"0","fs.s3a.multipart.purge.age":"86400","io.native.lib.available":"true","net.topology.node.switch.mapping.impl":"org.apache.hadoop.net.ScriptBasedMapping","yarn.nodemanager.amrmproxy.address":"0.0.0.0:8048","ipc.server.listen.queue.size":"128","map.sort.class":"org.apache.hadoop.util.QuickSort","fs.viewfs.rename.strategy":"SAME_MOUNTPOINT","hadoop.security.kms.client.authentication.retry-count":"1","fs.permissions.umask-mode":"022","yarn.nodemanager.vmem-check-enabled":"true","yarn.nodemanager.recovery.compaction-interval-secs":"3600","yarn.app.mapreduce.client-am.ipc.max-retries":"3","mapreduce.job.ubertask.maxreduces":"1","hadoop.security.kms.client.encrypted.key.cache.size":"500","hadoop.security.java.secure.random.algorithm":"SHA1PRNG","ha.failover-controller.cli-check.rpc-timeout.ms":"20000","mapreduce.jobhistory.jobname.limit":"50","yarn.client.nodemanager-connect.retry-interval-ms":"10000","yarn.timeline-service.state-store-class":"org.apache.hadoop.yarn.server.timeline.recovery.LeveldbTimelineStateStore","yarn.nodemanager.env-whitelist":"JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,CLASSPATH_PREPEND_DISTCACHE,HADOOP_YARN_HOME","yarn.sharedcache.nested-level":"3","yarn.nodemanager.webapp.address":"${yarn.nodemanager.hostname}:8042","rpc.metrics.quantile.enable":"false","mapreduce.jobhistory.admin.acl":"*","yarn.resourcemanager.system-metrics-publisher.dispatcher.pool-size":"10","hadoop.http.authentication.kerberos.keytab":"${user.home}/hadoop.keytab","yarn.resourcemanager.recovery.enabled":"false"},"System Properties":{"java.io.tmpdir":"/tmp","line.separator":"\n","path.separator":":","sun.management.compiler":"HotSpot 64-Bit Tiered Compilers","SPARK_SUBMIT":"true","sun.cpu.endian":"little","java.specification.version":"1.8","java.vm.specification.name":"Java Virtual Machine Specification","java.vendor":"Oracle Corporation","java.vm.specification.version":"1.8","user.home":"/root","file.encoding.pkg":"sun.io","sun.nio.ch.bugLevel":"","sun.arch.data.model":"64","sun.boot.library.path":"/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.252.b09-2.el7_8.x86_64/jre/lib/amd64","user.dir":"/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8","java.library.path":"/usr/java/packages/lib/amd64:/usr/lib64:/lib64:/lib:/usr/lib","sun.cpu.isalist":"","os.arch":"amd64","java.vm.version":"25.252-b09","jetty.git.hash":"ab228fde9e55e9164c738d7fa121f8ac5acd51c9","java.endorsed.dirs":"/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.252.b09-2.el7_8.x86_64/jre/lib/endorsed","java.runtime.version":"1.8.0_252-b09","java.vm.info":"mixed mode","java.ext.dirs":"/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.252.b09-2.el7_8.x86_64/jre/lib/ext:/usr/java/packages/lib/ext","java.runtime.name":"OpenJDK Runtime Environment","file.separator":"/","java.class.version":"52.0","java.specification.name":"Java Platform API Specification","sun.boot.class.path":"/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.252.b09-2.el7_8.x86_64/jre/lib/resources.jar:/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.252.b09-2.el7_8.x86_64/jre/lib/rt.jar:/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.252.b09-2.el7_8.x86_64/jre/lib/sunrsasign.jar:/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.252.b09-2.el7_8.x86_64/jre/lib/jsse.jar:/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.252.b09-2.el7_8.x86_64/jre/lib/jce.jar:/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.252.b09-2.el7_8.x86_64/jre/lib/charsets.jar:/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.252.b09-2.el7_8.x86_64/jre/lib/jfr.jar:/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.252.b09-2.el7_8.x86_64/jre/classes","file.encoding":"UTF-8","user.timezone":"Asia/Shanghai","java.specification.vendor":"Oracle Corporation","sun.java.launcher":"SUN_STANDARD","os.version":"3.10.0-1127.10.1.el7.x86_64","sun.os.patch.level":"unknown","java.vm.specification.vendor":"Oracle Corporation","user.country":"US","sun.jnu.encoding":"UTF-8","user.language":"en","java.vendor.url":"http://java.oracle.com/","java.awt.printerjob":"sun.print.PSPrinterJob","java.awt.graphicsenv":"sun.awt.X11GraphicsEnvironment","awt.toolkit":"sun.awt.X11.XToolkit","os.name":"Linux","java.vm.vendor":"Oracle Corporation","java.vendor.url.bug":"http://bugreport.sun.com/bugreport/","user.name":"root","java.vm.name":"OpenJDK 64-Bit Server VM","sun.java.command":"org.apache.spark.deploy.SparkSubmit --master local[*] --conf spark.eventLog.dir=/tmp/spark-history --conf spark.eventLog.enabled=true --conf spark.sql.shuffle.partitions=2 --class org.apache.spark.examples.sql.streaming.StructuredKafkaWordCount ./examples/jars/spark-examples_2.12-3.1.0-SNAPSHOT.jar 192.168.130.97:9092 subscribe test5","java.home":"/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.252.b09-2.el7_8.x86_64/jre","java.version":"1.8.0_252","sun.io.unicode.encoding":"UnicodeLittle"},"Classpath Entries":{"/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/metrics-graphite-4.1.1.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/nimbus-jose-jwt-4.41.1.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/hive-vector-code-gen-2.3.7.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/jackson-jaxrs-1.9.13.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/jersey-server-2.30.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/pyrolite-4.30.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/conf/":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/json-smart-2.3.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/objenesis-2.5.1.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/hadoop-auth-2.8.5.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/jsp-api-2.1.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/spark-unsafe_2.12-3.1.0-SNAPSHOT.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/commons-codec-1.10.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/protobuf-java-2.5.0.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/avro-1.8.2.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/guice-3.0.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/aopalliance-repackaged-2.6.1.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/transaction-api-1.1.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/spire_2.12-0.17.0-M1.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/gson-2.2.4.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/datanucleus-rdbms-4.1.19.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/jackson-module-paranamer-2.10.0.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/libfb303-0.9.3.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/commons-cli-1.2.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/spark-tags_2.12-3.1.0-SNAPSHOT.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/scala-library-2.12.10.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/xbean-asm7-shaded-4.15.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/jersey-container-servlet-2.30.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/hk2-api-2.6.1.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/jakarta.xml.bind-api-2.3.2.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/okhttp-2.4.0.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/derby-10.12.1.1.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/jackson-core-asl-1.9.13.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/commons-collections-3.2.2.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/httpcore-4.4.12.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/commons-beanutils-1.9.4.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/spire-util_2.12-0.17.0-M1.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/commons-crypto-1.0.0.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/spark-launcher_2.12-3.1.0-SNAPSHOT.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/stax-api-1.0-2.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/json4s-ast_2.12-3.6.6.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/lz4-java-1.7.1.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/scala-parser-combinators_2.12-1.1.2.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/parquet-format-2.4.0.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/parquet-column-1.10.1.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/commons-logging-1.1.3.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/audience-annotations-0.5.0.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/hive-jdbc-2.3.7.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/spark-hive-thriftserver_2.12-3.1.0-SNAPSHOT.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/hive-cli-2.3.7.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/javolution-5.5.1.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/JLargeArrays-1.5.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/hadoop-yarn-api-2.8.5.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/algebra_2.12-2.0.0-M2.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/commons-dbcp-1.4.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/jakarta.ws.rs-api-2.1.6.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/threeten-extra-1.5.0.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/commons-io-2.4.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/metrics-json-4.1.1.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/libthrift-0.12.0.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/compress-lzf-1.0.3.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/metrics-jmx-4.1.1.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/jakarta.inject-2.6.1.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/stax-api-1.0.1.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/hive-shims-common-2.3.7.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/curator-recipes-2.7.1.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/antlr4-runtime-4.7.1.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/slf4j-api-1.7.30.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/oro-2.0.8.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/arrow-memory-0.15.1.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/jpam-1.1.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/velocity-1.5.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/orc-core-1.5.10.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/spark-sql_2.12-3.1.0-SNAPSHOT.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/jackson-databind-2.10.0.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/commons-text-1.6.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/jersey-client-2.30.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/htrace-core4-4.0.1-incubating.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/json-1.8.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/spark-graphx_2.12-3.1.0-SNAPSHOT.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/avro-ipc-1.8.2.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/macro-compat_2.12-1.1.1.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/jetty-util-6.1.26.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/core-1.1.2.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/hive-shims-2.3.7.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/joda-time-2.10.5.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/parquet-encoding-1.10.1.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/hive-llap-common-2.3.7.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/spark-network-common_2.12-3.1.0-SNAPSHOT.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/datanucleus-api-jdo-4.2.4.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/paranamer-2.8.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/hive-shims-0.23-2.3.7.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/activation-1.1.1.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/curator-framework-2.7.1.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/commons-compress-1.8.1.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/hadoop-mapreduce-client-common-2.8.5.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/RoaringBitmap-0.7.45.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/ivy-2.4.0.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/jackson-core-2.10.0.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/hadoop-yarn-client-2.8.5.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/commons-httpclient-3.1.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/spark-yarn_2.12-3.1.0-SNAPSHOT.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/parquet-common-1.10.1.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/zstd-jni-1.4.5-2.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/jersey-container-servlet-core-2.30.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/snappy-java-1.1.7.5.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/shapeless_2.12-2.3.3.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/commons-pool-1.5.4.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/json4s-core_2.12-3.6.6.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/httpclient-4.5.6.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/api-util-1.0.0-M20.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/aircompressor-0.10.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/spark-repl_2.12-3.1.0-SNAPSHOT.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/hadoop-yarn-common-2.8.5.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/leveldbjni-all-1.8.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/jersey-hk2-2.30.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/jta-1.1.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/jetty-sslengine-6.1.26.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/commons-net-3.1.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/datanucleus-core-4.1.17.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/hadoop-yarn-server-web-proxy-2.8.5.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/breeze_2.12-1.0.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/orc-mapreduce-1.5.10.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/jetty-6.1.26.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/spark-core_2.12-3.1.0-SNAPSHOT.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/xz-1.5.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/javax.inject-1.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/scala-compiler-2.12.10.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/metrics-jvm-4.1.1.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/orc-shims-1.5.10.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/jaxb-api-2.2.2.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/jakarta.validation-api-2.0.2.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/spire-macros_2.12-0.17.0-M1.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/janino-3.1.2.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/osgi-resource-locator-1.0.3.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/jcl-over-slf4j-1.7.30.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/hadoop-mapreduce-client-app-2.8.5.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/hk2-utils-2.6.1.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/spark-sketch_2.12-3.1.0-SNAPSHOT.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/JTransforms-3.1.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/kafka-clients-2.4.0.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/guice-servlet-3.0.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/okio-1.4.0.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/hadoop-annotations-2.8.5.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/commons-math3-3.4.1.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/json4s-scalap_2.12-3.6.6.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/bonecp-0.8.0.RELEASE.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/spark-streaming_2.12-3.1.0-SNAPSHOT.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/accessors-smart-1.2.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/guava-14.0.1.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/shims-0.7.45.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/ST4-4.0.4.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/jackson-module-scala_2.12-2.10.0.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/scala-xml_2.12-1.2.0.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/chill-java-0.9.5.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/hadoop-mapreduce-client-shuffle-2.8.5.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/cats-kernel_2.12-2.0.0-M4.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/stream-2.9.6.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/commons-configuration-1.6.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/jodd-core-3.5.2.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/scala-collection-compat_2.12-2.1.1.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/commons-pool2-2.6.2.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/jul-to-slf4j-1.7.30.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/xmlenc-0.52.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/flatbuffers-java-1.9.0.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/spark-token-provider-kafka-0-10_2.12-3.1.0-SNAPSHOT.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/json4s-jackson_2.12-3.6.6.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/commons-compiler-3.1.2.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/jline-2.14.6.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/breeze-macros_2.12-1.0.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/kryo-shaded-4.0.2.jar":"System Classpath","spark://iZbp19vpr16ix621sdw476Z:46309/jars/spark-examples_2.12-3.1.0-SNAPSHOT.jar":"Added By User","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/hadoop-common-2.8.5.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/spark-hive_2.12-3.1.0-SNAPSHOT.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/jersey-common-2.30.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/istack-commons-runtime-3.0.8.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/curator-client-2.7.1.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/jackson-xc-1.9.13.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/zookeeper-3.4.14.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/parquet-hadoop-1.10.1.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/jakarta.annotation-api-1.3.5.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/hive-shims-scheduler-2.3.7.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/univocity-parsers-2.8.3.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/commons-digester-1.8.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/spark-mllib_2.12-3.1.0-SNAPSHOT.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/arpack_combined_all-0.1.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/spark-sql-kafka-0-10_2.12-3.1.0-SNAPSHOT.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/jackson-annotations-2.10.0.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/hk2-locator-2.6.1.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/hadoop-mapreduce-client-core-2.8.5.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/avro-mapred-1.8.2-hadoop2.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/hadoop-yarn-server-common-2.8.5.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/scala-reflect-2.12.10.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/super-csv-2.2.0.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/hadoop-mapreduce-client-jobclient-2.8.5.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/hadoop-client-2.8.5.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/hive-common-2.3.7.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/apacheds-kerberos-codec-2.0.0-M15.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/hive-exec-2.3.7-core.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/opencsv-2.3.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/api-asn1-api-1.0.0-M20.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/hive-storage-api-2.7.1.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/spire-platform_2.12-0.17.0-M1.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/aopalliance-1.0.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/HikariCP-2.5.1.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/hive-metastore-2.3.7.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/minlog-1.3.0.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/arrow-format-0.15.1.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/jsr305-3.0.0.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/commons-lang-2.6.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/commons-lang3-3.9.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/javax.jdo-3.2.0-m3.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/apacheds-i18n-2.0.0-M15.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/javassist-3.25.0-GA.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/jackson-mapper-asl-1.9.13.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/antlr-runtime-3.5.2.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/log4j-1.2.17.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/hive-beeline-2.3.7.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/chill_2.12-0.9.5.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/jdo-api-3.0.1.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/spark-kvstore_2.12-3.1.0-SNAPSHOT.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/metrics-core-4.1.1.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/jaxb-runtime-2.3.2.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/spark-mllib-local_2.12-3.1.0-SNAPSHOT.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/py4j-0.10.9.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/javax.servlet-api-3.1.0.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/hive-serde-2.3.7.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/hadoop-hdfs-client-2.8.5.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/spark-network-shuffle_2.12-3.1.0-SNAPSHOT.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/jcip-annotations-1.0-1.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/jersey-media-jaxb-2.30.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/parquet-jackson-1.10.1.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/slf4j-log4j12-1.7.30.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/netty-all-4.1.47.Final.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/dropwizard-metrics-hadoop-metrics2-reporter-0.1.2.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/arrow-vector-0.15.1.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/spark-catalyst_2.12-3.1.0-SNAPSHOT.jar":"System Classpath","/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/jars/machinist_2.12-0.6.8.jar":"System Classpath"}} +{"Event":"SparkListenerApplicationStart","App Name":"StructuredKafkaWordCount","App ID":"local-1596020211915","Timestamp":1596020210919,"User":"root"} +{"Event":"org.apache.spark.sql.streaming.StreamingQueryListener$QueryStartedEvent","id":"8d268dc2-bc9c-4be8-97a9-b135d2943028","runId":"e225d92f-2545-48f8-87a2-9c0309580f8a","name":null,"timestamp":"2020-07-29T10:56:55.947Z"} +{"Event":"org.apache.spark.sql.execution.ui.SparkListenerSQLExecutionStart","executionId":0,"description":"\nid = 8d268dc2-bc9c-4be8-97a9-b135d2943028\nrunId = e225d92f-2545-48f8-87a2-9c0309580f8a\nbatch = 0","details":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","physicalPlanDescription":"== Physical Plan ==\nWriteToDataSourceV2 (14)\n+- * HashAggregate (13)\n +- StateStoreSave (12)\n +- * HashAggregate (11)\n +- StateStoreRestore (10)\n +- Exchange (9)\n +- * HashAggregate (8)\n +- * HashAggregate (7)\n +- * SerializeFromObject (6)\n +- MapPartitions (5)\n +- DeserializeToObject (4)\n +- * Project (3)\n +- * Project (2)\n +- MicroBatchScan (1)\n\n\n(1) MicroBatchScan\nOutput [7]: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]\nArguments: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13], org.apache.spark.sql.kafka010.KafkaSourceProvider$KafkaScan@7e7b182c, KafkaV2[Subscribe[test5]], {\"test5\":{\"0\":48276}}, {\"test5\":{\"0\":48279}}\n\n(2) Project [codegen id : 1]\nOutput [7]: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]\nInput [7]: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]\n\n(3) Project [codegen id : 1]\nOutput [1]: [cast(value#8 as string) AS value#21]\nInput [7]: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]\n\n(4) DeserializeToObject\nInput [1]: [value#21]\nArguments: value#21.toString, obj#27: java.lang.String\n\n(5) MapPartitions\nInput [1]: [obj#27]\nArguments: org.apache.spark.sql.Dataset$$Lambda$1321/872917583@67b99068, obj#28: java.lang.String\n\n(6) SerializeFromObject [codegen id : 2]\nInput [1]: [obj#28]\nArguments: [staticinvoke(class org.apache.spark.unsafe.types.UTF8String, StringType, fromString, input[0, java.lang.String, true], true, false) AS value#29]\n\n(7) HashAggregate [codegen id : 2]\nInput [1]: [value#29]\nKeys [1]: [value#29]\nFunctions [1]: [partial_count(1)]\nAggregate Attributes [1]: [count(1)#31L]\nResults [2]: [value#29, count#38L]\n\n(8) HashAggregate [codegen id : 2]\nInput [2]: [value#29, count#38L]\nKeys [1]: [value#29]\nFunctions [1]: [merge_count(1)]\nAggregate Attributes [1]: [count(1)#31L]\nResults [2]: [value#29, count#38L]\n\n(9) Exchange\nInput [2]: [value#29, count#38L]\nArguments: hashpartitioning(value#29, 2), true, [id=#142]\n\n(10) StateStoreRestore\nInput [2]: [value#29, count#38L]\nArguments: [value#29], state info [ checkpoint = , runId = f7faa1e9-69d9-41b4-9d77-919795af2413, opId = 0, ver = 0, numPartitions = 2], 2\n\n(11) HashAggregate [codegen id : 3]\nInput [2]: [value#29, count#38L]\nKeys [1]: [value#29]\nFunctions [1]: [merge_count(1)]\nAggregate Attributes [1]: [count(1)#31L]\nResults [2]: [value#29, count#38L]\n\n(12) StateStoreSave\nInput [2]: [value#29, count#38L]\nArguments: [value#29], state info [ checkpoint = , runId = f7faa1e9-69d9-41b4-9d77-919795af2413, opId = 0, ver = 0, numPartitions = 2], Append, 0, 2\n\n(13) HashAggregate [codegen id : 4]\nInput [2]: [value#29, count#38L]\nKeys [1]: [value#29]\nFunctions [1]: [count(1)]\nAggregate Attributes [1]: [count(1)#31L]\nResults [2]: [value#29, count(1)#31L AS count#32L]\n\n(14) WriteToDataSourceV2\nInput [2]: [value#29, count#32L]\nArguments: org.apache.spark.sql.execution.streaming.sources.MicroBatchWrite@27fafcca\n\n","sparkPlanInfo":{"nodeName":"WriteToDataSourceV2","simpleString":"WriteToDataSourceV2 org.apache.spark.sql.execution.streaming.sources.MicroBatchWrite@27fafcca","children":[{"nodeName":"WholeStageCodegen (4)","simpleString":"WholeStageCodegen (4)","children":[{"nodeName":"HashAggregate","simpleString":"HashAggregate(keys=[value#29], functions=[count(1)])","children":[{"nodeName":"InputAdapter","simpleString":"InputAdapter","children":[{"nodeName":"StateStoreSave","simpleString":"StateStoreSave [value#29], state info [ checkpoint = file:/tmp/temporary-025d7997-5b66-4def-abbf-bdcca57312b9/state, runId = e225d92f-2545-48f8-87a2-9c0309580f8a, opId = 0, ver = 0, numPartitions = 2], Complete, 0, 2","children":[{"nodeName":"WholeStageCodegen (3)","simpleString":"WholeStageCodegen (3)","children":[{"nodeName":"HashAggregate","simpleString":"HashAggregate(keys=[value#29], functions=[merge_count(1)])","children":[{"nodeName":"InputAdapter","simpleString":"InputAdapter","children":[{"nodeName":"StateStoreRestore","simpleString":"StateStoreRestore [value#29], state info [ checkpoint = file:/tmp/temporary-025d7997-5b66-4def-abbf-bdcca57312b9/state, runId = e225d92f-2545-48f8-87a2-9c0309580f8a, opId = 0, ver = 0, numPartitions = 2], 2","children":[{"nodeName":"Exchange","simpleString":"Exchange hashpartitioning(value#29, 2), true, [id=#66]","children":[{"nodeName":"WholeStageCodegen (2)","simpleString":"WholeStageCodegen (2)","children":[{"nodeName":"HashAggregate","simpleString":"HashAggregate(keys=[value#29], functions=[merge_count(1)])","children":[{"nodeName":"HashAggregate","simpleString":"HashAggregate(keys=[value#29], functions=[partial_count(1)])","children":[{"nodeName":"SerializeFromObject","simpleString":"SerializeFromObject [staticinvoke(class org.apache.spark.unsafe.types.UTF8String, StringType, fromString, input[0, java.lang.String, true], true, false) AS value#29]","children":[{"nodeName":"InputAdapter","simpleString":"InputAdapter","children":[{"nodeName":"MapPartitions","simpleString":"MapPartitions org.apache.spark.sql.Dataset$$Lambda$1321/872917583@67b99068, obj#28: java.lang.String","children":[{"nodeName":"DeserializeToObject","simpleString":"DeserializeToObject value#21.toString, obj#27: java.lang.String","children":[{"nodeName":"WholeStageCodegen (1)","simpleString":"WholeStageCodegen (1)","children":[{"nodeName":"Project","simpleString":"Project [cast(value#8 as string) AS value#21]","children":[{"nodeName":"Project","simpleString":"Project [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]","children":[{"nodeName":"InputAdapter","simpleString":"InputAdapter","children":[{"nodeName":"MicroBatchScan","simpleString":"MicroBatchScan[key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13] class org.apache.spark.sql.kafka010.KafkaSourceProvider$KafkaScan","children":[],"metadata":{},"metrics":[{"name":"number of output rows","accumulatorId":80,"metricType":"sum"}]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[{"name":"duration","accumulatorId":79,"metricType":"timing"}]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[{"name":"spill size","accumulatorId":76,"metricType":"size"},{"name":"time in aggregation build","accumulatorId":77,"metricType":"timing"},{"name":"peak memory","accumulatorId":75,"metricType":"size"},{"name":"number of output rows","accumulatorId":74,"metricType":"sum"},{"name":"avg hash probe bucket list iters","accumulatorId":78,"metricType":"average"}]}],"metadata":{},"metrics":[{"name":"spill size","accumulatorId":71,"metricType":"size"},{"name":"time in aggregation build","accumulatorId":72,"metricType":"timing"},{"name":"peak memory","accumulatorId":70,"metricType":"size"},{"name":"number of output rows","accumulatorId":69,"metricType":"sum"},{"name":"avg hash probe bucket list iters","accumulatorId":73,"metricType":"average"}]}],"metadata":{},"metrics":[{"name":"duration","accumulatorId":68,"metricType":"timing"}]}],"metadata":{},"metrics":[{"name":"shuffle records written","accumulatorId":20,"metricType":"sum"},{"name":"shuffle write time","accumulatorId":21,"metricType":"nsTiming"},{"name":"records read","accumulatorId":18,"metricType":"sum"},{"name":"local bytes read","accumulatorId":16,"metricType":"size"},{"name":"fetch wait time","accumulatorId":17,"metricType":"timing"},{"name":"remote bytes read","accumulatorId":14,"metricType":"size"},{"name":"local blocks read","accumulatorId":13,"metricType":"sum"},{"name":"remote blocks read","accumulatorId":12,"metricType":"sum"},{"name":"data size","accumulatorId":11,"metricType":"size"},{"name":"remote bytes read to disk","accumulatorId":15,"metricType":"size"},{"name":"shuffle bytes written","accumulatorId":19,"metricType":"size"}]}],"metadata":{},"metrics":[{"name":"number of output rows","accumulatorId":67,"metricType":"sum"}]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[{"name":"spill size","accumulatorId":64,"metricType":"size"},{"name":"time in aggregation build","accumulatorId":65,"metricType":"timing"},{"name":"peak memory","accumulatorId":63,"metricType":"size"},{"name":"number of output rows","accumulatorId":62,"metricType":"sum"},{"name":"avg hash probe bucket list iters","accumulatorId":66,"metricType":"average"}]}],"metadata":{},"metrics":[{"name":"duration","accumulatorId":61,"metricType":"timing"}]}],"metadata":{},"metrics":[{"name":"number of inputs which are later than watermark ('inputs' are relative to operators)","accumulatorId":51,"metricType":"sum"},{"name":"number of total state rows","accumulatorId":52,"metricType":"sum"},{"name":"memory used by state","accumulatorId":57,"metricType":"size"},{"name":"count of cache hit on states cache in provider","accumulatorId":59,"metricType":"sum"},{"name":"number of output rows","accumulatorId":50,"metricType":"sum"},{"name":"estimated size of state only on current version","accumulatorId":58,"metricType":"size"},{"name":"count of cache miss on states cache in provider","accumulatorId":60,"metricType":"sum"},{"name":"time to commit changes","accumulatorId":56,"metricType":"timing"},{"name":"time to remove","accumulatorId":55,"metricType":"timing"},{"name":"number of updated state rows","accumulatorId":53,"metricType":"sum"},{"name":"time to update","accumulatorId":54,"metricType":"timing"}]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[{"name":"spill size","accumulatorId":47,"metricType":"size"},{"name":"time in aggregation build","accumulatorId":48,"metricType":"timing"},{"name":"peak memory","accumulatorId":46,"metricType":"size"},{"name":"number of output rows","accumulatorId":45,"metricType":"sum"},{"name":"avg hash probe bucket list iters","accumulatorId":49,"metricType":"average"}]}],"metadata":{},"metrics":[{"name":"duration","accumulatorId":44,"metricType":"timing"}]}],"metadata":{},"metrics":[]},"time":1596020220179} +{"Event":"org.apache.spark.sql.execution.ui.SparkListenerSQLExecutionStart","executionId":1,"description":"\nid = 8d268dc2-bc9c-4be8-97a9-b135d2943028\nrunId = e225d92f-2545-48f8-87a2-9c0309580f8a\nbatch = 0","details":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","physicalPlanDescription":"== Physical Plan ==\nWriteToDataSourceV2 (14)\n+- * HashAggregate (13)\n +- StateStoreSave (12)\n +- * HashAggregate (11)\n +- StateStoreRestore (10)\n +- Exchange (9)\n +- * HashAggregate (8)\n +- * HashAggregate (7)\n +- * SerializeFromObject (6)\n +- MapPartitions (5)\n +- DeserializeToObject (4)\n +- * Project (3)\n +- * Project (2)\n +- MicroBatchScan (1)\n\n\n(1) MicroBatchScan\nOutput [7]: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]\nArguments: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13], org.apache.spark.sql.kafka010.KafkaSourceProvider$KafkaScan@7e7b182c, KafkaV2[Subscribe[test5]], {\"test5\":{\"0\":48276}}, {\"test5\":{\"0\":48279}}\n\n(2) Project [codegen id : 1]\nOutput [7]: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]\nInput [7]: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]\n\n(3) Project [codegen id : 1]\nOutput [1]: [cast(value#8 as string) AS value#21]\nInput [7]: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]\n\n(4) DeserializeToObject\nInput [1]: [value#21]\nArguments: value#21.toString, obj#27: java.lang.String\n\n(5) MapPartitions\nInput [1]: [obj#27]\nArguments: org.apache.spark.sql.Dataset$$Lambda$1321/872917583@67b99068, obj#28: java.lang.String\n\n(6) SerializeFromObject [codegen id : 2]\nInput [1]: [obj#28]\nArguments: [staticinvoke(class org.apache.spark.unsafe.types.UTF8String, StringType, fromString, input[0, java.lang.String, true], true, false) AS value#29]\n\n(7) HashAggregate [codegen id : 2]\nInput [1]: [value#29]\nKeys [1]: [value#29]\nFunctions [1]: [partial_count(1)]\nAggregate Attributes [1]: [count(1)#31L]\nResults [2]: [value#29, count#38L]\n\n(8) HashAggregate [codegen id : 2]\nInput [2]: [value#29, count#38L]\nKeys [1]: [value#29]\nFunctions [1]: [merge_count(1)]\nAggregate Attributes [1]: [count(1)#31L]\nResults [2]: [value#29, count#38L]\n\n(9) Exchange\nInput [2]: [value#29, count#38L]\nArguments: hashpartitioning(value#29, 2), true, [id=#218]\n\n(10) StateStoreRestore\nInput [2]: [value#29, count#38L]\nArguments: [value#29], state info [ checkpoint = , runId = 64a4779b-846a-4f20-9f5c-899a8dbf68d8, opId = 0, ver = 0, numPartitions = 2], 2\n\n(11) HashAggregate [codegen id : 3]\nInput [2]: [value#29, count#38L]\nKeys [1]: [value#29]\nFunctions [1]: [merge_count(1)]\nAggregate Attributes [1]: [count(1)#31L]\nResults [2]: [value#29, count#38L]\n\n(12) StateStoreSave\nInput [2]: [value#29, count#38L]\nArguments: [value#29], state info [ checkpoint = , runId = 64a4779b-846a-4f20-9f5c-899a8dbf68d8, opId = 0, ver = 0, numPartitions = 2], Append, 0, 2\n\n(13) HashAggregate [codegen id : 4]\nInput [2]: [value#29, count#38L]\nKeys [1]: [value#29]\nFunctions [1]: [count(1)]\nAggregate Attributes [1]: [count(1)#31L]\nResults [2]: [value#29, count(1)#31L AS count#32L]\n\n(14) WriteToDataSourceV2\nInput [2]: [value#29, count#32L]\nArguments: org.apache.spark.sql.execution.streaming.sources.MicroBatchWrite@27fafcca\n\n","sparkPlanInfo":{"nodeName":"WriteToDataSourceV2","simpleString":"WriteToDataSourceV2 org.apache.spark.sql.execution.streaming.sources.MicroBatchWrite@27fafcca","children":[{"nodeName":"WholeStageCodegen (4)","simpleString":"WholeStageCodegen (4)","children":[{"nodeName":"HashAggregate","simpleString":"HashAggregate(keys=[value#29], functions=[count(1)])","children":[{"nodeName":"InputAdapter","simpleString":"InputAdapter","children":[{"nodeName":"StateStoreSave","simpleString":"StateStoreSave [value#29], state info [ checkpoint = file:/tmp/temporary-025d7997-5b66-4def-abbf-bdcca57312b9/state, runId = e225d92f-2545-48f8-87a2-9c0309580f8a, opId = 0, ver = 0, numPartitions = 2], Complete, 0, 2","children":[{"nodeName":"WholeStageCodegen (3)","simpleString":"WholeStageCodegen (3)","children":[{"nodeName":"HashAggregate","simpleString":"HashAggregate(keys=[value#29], functions=[merge_count(1)])","children":[{"nodeName":"InputAdapter","simpleString":"InputAdapter","children":[{"nodeName":"StateStoreRestore","simpleString":"StateStoreRestore [value#29], state info [ checkpoint = file:/tmp/temporary-025d7997-5b66-4def-abbf-bdcca57312b9/state, runId = e225d92f-2545-48f8-87a2-9c0309580f8a, opId = 0, ver = 0, numPartitions = 2], 2","children":[{"nodeName":"Exchange","simpleString":"Exchange hashpartitioning(value#29, 2), true, [id=#66]","children":[{"nodeName":"WholeStageCodegen (2)","simpleString":"WholeStageCodegen (2)","children":[{"nodeName":"HashAggregate","simpleString":"HashAggregate(keys=[value#29], functions=[merge_count(1)])","children":[{"nodeName":"HashAggregate","simpleString":"HashAggregate(keys=[value#29], functions=[partial_count(1)])","children":[{"nodeName":"SerializeFromObject","simpleString":"SerializeFromObject [staticinvoke(class org.apache.spark.unsafe.types.UTF8String, StringType, fromString, input[0, java.lang.String, true], true, false) AS value#29]","children":[{"nodeName":"InputAdapter","simpleString":"InputAdapter","children":[{"nodeName":"MapPartitions","simpleString":"MapPartitions org.apache.spark.sql.Dataset$$Lambda$1321/872917583@67b99068, obj#28: java.lang.String","children":[{"nodeName":"DeserializeToObject","simpleString":"DeserializeToObject value#21.toString, obj#27: java.lang.String","children":[{"nodeName":"WholeStageCodegen (1)","simpleString":"WholeStageCodegen (1)","children":[{"nodeName":"Project","simpleString":"Project [cast(value#8 as string) AS value#21]","children":[{"nodeName":"Project","simpleString":"Project [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]","children":[{"nodeName":"InputAdapter","simpleString":"InputAdapter","children":[{"nodeName":"MicroBatchScan","simpleString":"MicroBatchScan[key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13] class org.apache.spark.sql.kafka010.KafkaSourceProvider$KafkaScan","children":[],"metadata":{},"metrics":[{"name":"number of output rows","accumulatorId":80,"metricType":"sum"}]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[{"name":"duration","accumulatorId":79,"metricType":"timing"}]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[{"name":"spill size","accumulatorId":76,"metricType":"size"},{"name":"time in aggregation build","accumulatorId":77,"metricType":"timing"},{"name":"peak memory","accumulatorId":75,"metricType":"size"},{"name":"number of output rows","accumulatorId":74,"metricType":"sum"},{"name":"avg hash probe bucket list iters","accumulatorId":78,"metricType":"average"}]}],"metadata":{},"metrics":[{"name":"spill size","accumulatorId":71,"metricType":"size"},{"name":"time in aggregation build","accumulatorId":72,"metricType":"timing"},{"name":"peak memory","accumulatorId":70,"metricType":"size"},{"name":"number of output rows","accumulatorId":69,"metricType":"sum"},{"name":"avg hash probe bucket list iters","accumulatorId":73,"metricType":"average"}]}],"metadata":{},"metrics":[{"name":"duration","accumulatorId":68,"metricType":"timing"}]}],"metadata":{},"metrics":[{"name":"shuffle records written","accumulatorId":20,"metricType":"sum"},{"name":"shuffle write time","accumulatorId":21,"metricType":"nsTiming"},{"name":"records read","accumulatorId":18,"metricType":"sum"},{"name":"local bytes read","accumulatorId":16,"metricType":"size"},{"name":"fetch wait time","accumulatorId":17,"metricType":"timing"},{"name":"remote bytes read","accumulatorId":14,"metricType":"size"},{"name":"local blocks read","accumulatorId":13,"metricType":"sum"},{"name":"remote blocks read","accumulatorId":12,"metricType":"sum"},{"name":"data size","accumulatorId":11,"metricType":"size"},{"name":"remote bytes read to disk","accumulatorId":15,"metricType":"size"},{"name":"shuffle bytes written","accumulatorId":19,"metricType":"size"}]}],"metadata":{},"metrics":[{"name":"number of output rows","accumulatorId":67,"metricType":"sum"}]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[{"name":"spill size","accumulatorId":64,"metricType":"size"},{"name":"time in aggregation build","accumulatorId":65,"metricType":"timing"},{"name":"peak memory","accumulatorId":63,"metricType":"size"},{"name":"number of output rows","accumulatorId":62,"metricType":"sum"},{"name":"avg hash probe bucket list iters","accumulatorId":66,"metricType":"average"}]}],"metadata":{},"metrics":[{"name":"duration","accumulatorId":61,"metricType":"timing"}]}],"metadata":{},"metrics":[{"name":"number of inputs which are later than watermark ('inputs' are relative to operators)","accumulatorId":51,"metricType":"sum"},{"name":"number of total state rows","accumulatorId":52,"metricType":"sum"},{"name":"memory used by state","accumulatorId":57,"metricType":"size"},{"name":"count of cache hit on states cache in provider","accumulatorId":59,"metricType":"sum"},{"name":"number of output rows","accumulatorId":50,"metricType":"sum"},{"name":"estimated size of state only on current version","accumulatorId":58,"metricType":"size"},{"name":"count of cache miss on states cache in provider","accumulatorId":60,"metricType":"sum"},{"name":"time to commit changes","accumulatorId":56,"metricType":"timing"},{"name":"time to remove","accumulatorId":55,"metricType":"timing"},{"name":"number of updated state rows","accumulatorId":53,"metricType":"sum"},{"name":"time to update","accumulatorId":54,"metricType":"timing"}]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[{"name":"spill size","accumulatorId":47,"metricType":"size"},{"name":"time in aggregation build","accumulatorId":48,"metricType":"timing"},{"name":"peak memory","accumulatorId":46,"metricType":"size"},{"name":"number of output rows","accumulatorId":45,"metricType":"sum"},{"name":"avg hash probe bucket list iters","accumulatorId":49,"metricType":"average"}]}],"metadata":{},"metrics":[{"name":"duration","accumulatorId":44,"metricType":"timing"}]}],"metadata":{},"metrics":[]},"time":1596020220258} +{"Event":"SparkListenerJobStart","Job ID":0,"Submission Time":1596020221633,"Stage Infos":[{"Stage ID":0,"Stage Attempt ID":0,"Stage Name":"start at StructuredKafkaWordCount.scala:86","Number of Tasks":1,"RDD Info":[{"RDD ID":6,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"8\",\"name\":\"Exchange\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[5],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":3,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"15\",\"name\":\"DeserializeToObject\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[2],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":1,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"20\",\"name\":\"MicroBatchScan\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[0],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":2,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"16\",\"name\":\"WholeStageCodegen (1)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[1],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":5,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"9\",\"name\":\"WholeStageCodegen (2)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[4],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":4,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"14\",\"name\":\"MapPartitions\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[3],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":0,"Name":"DataSourceRDD","Scope":"{\"id\":\"20\",\"name\":\"MicroBatchScan\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0}],"Parent IDs":[],"Details":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","Accumulables":[],"Resource Profile Id":0},{"Stage ID":1,"Stage Attempt ID":0,"Stage Name":"start at StructuredKafkaWordCount.scala:86","Number of Tasks":2,"RDD Info":[{"RDD ID":11,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"0\",\"name\":\"WholeStageCodegen (4)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[10],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":7,"Name":"ShuffledRowRDD","Scope":"{\"id\":\"8\",\"name\":\"Exchange\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[6],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":9,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"4\",\"name\":\"WholeStageCodegen (3)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[8],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":10,"Name":"StateStoreRDD","Scope":"{\"id\":\"3\",\"name\":\"StateStoreSave\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[9],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":8,"Name":"StateStoreRDD","Scope":"{\"id\":\"7\",\"name\":\"StateStoreRestore\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[7],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0}],"Parent IDs":[0],"Details":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","Accumulables":[],"Resource Profile Id":0}],"Stage IDs":[0,1],"Properties":{"sql.streaming.queryId":"8d268dc2-bc9c-4be8-97a9-b135d2943028","spark.driver.host":"iZbp19vpr16ix621sdw476Z","spark.eventLog.enabled":"true","spark.sql.adaptive.enabled":"false","spark.job.interruptOnCancel":"true","spark.driver.port":"46309","__fetch_continuous_blocks_in_batch_enabled":"true","spark.jars":"file:/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/./examples/jars/spark-examples_2.12-3.1.0-SNAPSHOT.jar","__is_continuous_processing":"false","spark.app.name":"StructuredKafkaWordCount","callSite.long":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","callSite.short":"start at StructuredKafkaWordCount.scala:86","spark.submit.pyFiles":"","spark.job.description":"\nid = 8d268dc2-bc9c-4be8-97a9-b135d2943028\nrunId = e225d92f-2545-48f8-87a2-9c0309580f8a\nbatch = 0","spark.executor.id":"driver","spark.sql.cbo.enabled":"false","streaming.sql.batchId":"0","spark.jobGroup.id":"e225d92f-2545-48f8-87a2-9c0309580f8a","spark.submit.deployMode":"client","spark.master":"local[*]","spark.eventLog.dir":"/tmp/spark-history","spark.sql.execution.id":"1","spark.app.id":"local-1596020211915","spark.sql.shuffle.partitions":"2"}} +{"Event":"SparkListenerStageSubmitted","Stage Info":{"Stage ID":0,"Stage Attempt ID":0,"Stage Name":"start at StructuredKafkaWordCount.scala:86","Number of Tasks":1,"RDD Info":[{"RDD ID":6,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"8\",\"name\":\"Exchange\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[5],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":3,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"15\",\"name\":\"DeserializeToObject\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[2],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":1,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"20\",\"name\":\"MicroBatchScan\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[0],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":2,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"16\",\"name\":\"WholeStageCodegen (1)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[1],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":5,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"9\",\"name\":\"WholeStageCodegen (2)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[4],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":4,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"14\",\"name\":\"MapPartitions\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[3],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":0,"Name":"DataSourceRDD","Scope":"{\"id\":\"20\",\"name\":\"MicroBatchScan\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0}],"Parent IDs":[],"Details":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","Submission Time":1596020221656,"Accumulables":[],"Resource Profile Id":0},"Properties":{"sql.streaming.queryId":"8d268dc2-bc9c-4be8-97a9-b135d2943028","spark.driver.host":"iZbp19vpr16ix621sdw476Z","spark.eventLog.enabled":"true","spark.sql.adaptive.enabled":"false","spark.job.interruptOnCancel":"true","spark.driver.port":"46309","__fetch_continuous_blocks_in_batch_enabled":"true","spark.jars":"file:/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/./examples/jars/spark-examples_2.12-3.1.0-SNAPSHOT.jar","__is_continuous_processing":"false","spark.app.name":"StructuredKafkaWordCount","callSite.long":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","callSite.short":"start at StructuredKafkaWordCount.scala:86","spark.submit.pyFiles":"","spark.job.description":"\nid = 8d268dc2-bc9c-4be8-97a9-b135d2943028\nrunId = e225d92f-2545-48f8-87a2-9c0309580f8a\nbatch = 0","spark.executor.id":"driver","spark.sql.cbo.enabled":"false","streaming.sql.batchId":"0","spark.jobGroup.id":"e225d92f-2545-48f8-87a2-9c0309580f8a","spark.submit.deployMode":"client","spark.master":"local[*]","spark.eventLog.dir":"/tmp/spark-history","spark.sql.execution.id":"1","spark.app.id":"local-1596020211915","spark.sql.shuffle.partitions":"2"}} +{"Event":"SparkListenerTaskStart","Stage ID":0,"Stage Attempt ID":0,"Task Info":{"Task ID":0,"Index":0,"Attempt":0,"Launch Time":1596020221738,"Executor ID":"driver","Host":"iZbp19vpr16ix621sdw476Z","Locality":"PROCESS_LOCAL","Speculative":false,"Getting Result Time":0,"Finish Time":0,"Failed":false,"Killed":false,"Accumulables":[]}} +{"Event":"SparkListenerTaskEnd","Stage ID":0,"Stage Attempt ID":0,"Task Type":"ShuffleMapTask","Task End Reason":{"Reason":"Success"},"Task Info":{"Task ID":0,"Index":0,"Attempt":0,"Launch Time":1596020221738,"Executor ID":"driver","Host":"iZbp19vpr16ix621sdw476Z","Locality":"PROCESS_LOCAL","Speculative":false,"Getting Result Time":0,"Finish Time":1596020222649,"Failed":false,"Killed":false,"Accumulables":[{"ID":21,"Name":"shuffle write time","Update":"9599308","Value":"9599308","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":20,"Name":"shuffle records written","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":19,"Name":"shuffle bytes written","Update":"168","Value":"168","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":11,"Name":"data size","Update":"128","Value":"128","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":68,"Name":"duration","Update":"296","Value":"296","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":69,"Name":"number of output rows","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":70,"Name":"peak memory","Update":"262144","Value":"262144","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":72,"Name":"time in aggregation build","Update":"200","Value":"200","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":74,"Name":"number of output rows","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":75,"Name":"peak memory","Update":"262144","Value":"262144","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":77,"Name":"time in aggregation build","Update":"190","Value":"190","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":79,"Name":"duration","Update":"336","Value":"336","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":80,"Name":"number of output rows","Update":"3","Value":"3","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":125,"Name":"internal.metrics.input.recordsRead","Update":3,"Value":3,"Internal":true,"Count Failed Values":true},{"ID":123,"Name":"internal.metrics.shuffle.write.writeTime","Update":9599308,"Value":9599308,"Internal":true,"Count Failed Values":true},{"ID":122,"Name":"internal.metrics.shuffle.write.recordsWritten","Update":1,"Value":1,"Internal":true,"Count Failed Values":true},{"ID":121,"Name":"internal.metrics.shuffle.write.bytesWritten","Update":168,"Value":168,"Internal":true,"Count Failed Values":true},{"ID":112,"Name":"internal.metrics.peakExecutionMemory","Update":524288,"Value":524288,"Internal":true,"Count Failed Values":true},{"ID":109,"Name":"internal.metrics.resultSerializationTime","Update":1,"Value":1,"Internal":true,"Count Failed Values":true},{"ID":108,"Name":"internal.metrics.jvmGCTime","Update":17,"Value":17,"Internal":true,"Count Failed Values":true},{"ID":107,"Name":"internal.metrics.resultSize","Update":2630,"Value":2630,"Internal":true,"Count Failed Values":true},{"ID":106,"Name":"internal.metrics.executorCpuTime","Update":466139164,"Value":466139164,"Internal":true,"Count Failed Values":true},{"ID":105,"Name":"internal.metrics.executorRunTime","Update":503,"Value":503,"Internal":true,"Count Failed Values":true},{"ID":104,"Name":"internal.metrics.executorDeserializeCpuTime","Update":301869581,"Value":301869581,"Internal":true,"Count Failed Values":true},{"ID":103,"Name":"internal.metrics.executorDeserializeTime","Update":361,"Value":361,"Internal":true,"Count Failed Values":true}]},"Task Executor Metrics":{"JVMHeapMemory":0,"JVMOffHeapMemory":0,"OnHeapExecutionMemory":0,"OffHeapExecutionMemory":0,"OnHeapStorageMemory":0,"OffHeapStorageMemory":0,"OnHeapUnifiedMemory":0,"OffHeapUnifiedMemory":0,"DirectPoolMemory":0,"MappedPoolMemory":0,"ProcessTreeJVMVMemory":0,"ProcessTreeJVMRSSMemory":0,"ProcessTreePythonVMemory":0,"ProcessTreePythonRSSMemory":0,"ProcessTreeOtherVMemory":0,"ProcessTreeOtherRSSMemory":0,"MinorGCCount":0,"MinorGCTime":0,"MajorGCCount":0,"MajorGCTime":0},"Task Metrics":{"Executor Deserialize Time":361,"Executor Deserialize CPU Time":301869581,"Executor Run Time":503,"Executor CPU Time":466139164,"Peak Execution Memory":524288,"Result Size":2630,"JVM GC Time":17,"Result Serialization Time":1,"Memory Bytes Spilled":0,"Disk Bytes Spilled":0,"Shuffle Read Metrics":{"Remote Blocks Fetched":0,"Local Blocks Fetched":0,"Fetch Wait Time":0,"Remote Bytes Read":0,"Remote Bytes Read To Disk":0,"Local Bytes Read":0,"Total Records Read":0},"Shuffle Write Metrics":{"Shuffle Bytes Written":168,"Shuffle Write Time":9599308,"Shuffle Records Written":1},"Input Metrics":{"Bytes Read":0,"Records Read":3},"Output Metrics":{"Bytes Written":0,"Records Written":0},"Updated Blocks":[]}} +{"Event":"SparkListenerStageCompleted","Stage Info":{"Stage ID":0,"Stage Attempt ID":0,"Stage Name":"start at StructuredKafkaWordCount.scala:86","Number of Tasks":1,"RDD Info":[{"RDD ID":6,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"8\",\"name\":\"Exchange\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[5],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":3,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"15\",\"name\":\"DeserializeToObject\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[2],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":1,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"20\",\"name\":\"MicroBatchScan\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[0],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":2,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"16\",\"name\":\"WholeStageCodegen (1)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[1],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":5,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"9\",\"name\":\"WholeStageCodegen (2)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[4],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":4,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"14\",\"name\":\"MapPartitions\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[3],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":0,"Name":"DataSourceRDD","Scope":"{\"id\":\"20\",\"name\":\"MicroBatchScan\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0}],"Parent IDs":[],"Details":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","Submission Time":1596020221656,"Completion Time":1596020222661,"Accumulables":[{"ID":104,"Name":"internal.metrics.executorDeserializeCpuTime","Value":301869581,"Internal":true,"Count Failed Values":true},{"ID":122,"Name":"internal.metrics.shuffle.write.recordsWritten","Value":1,"Internal":true,"Count Failed Values":true},{"ID":77,"Name":"time in aggregation build","Value":"190","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":68,"Name":"duration","Value":"296","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":80,"Name":"number of output rows","Value":"3","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":125,"Name":"internal.metrics.input.recordsRead","Value":3,"Internal":true,"Count Failed Values":true},{"ID":107,"Name":"internal.metrics.resultSize","Value":2630,"Internal":true,"Count Failed Values":true},{"ID":74,"Name":"number of output rows","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":11,"Name":"data size","Value":"128","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":20,"Name":"shuffle records written","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":106,"Name":"internal.metrics.executorCpuTime","Value":466139164,"Internal":true,"Count Failed Values":true},{"ID":109,"Name":"internal.metrics.resultSerializationTime","Value":1,"Internal":true,"Count Failed Values":true},{"ID":121,"Name":"internal.metrics.shuffle.write.bytesWritten","Value":168,"Internal":true,"Count Failed Values":true},{"ID":112,"Name":"internal.metrics.peakExecutionMemory","Value":524288,"Internal":true,"Count Failed Values":true},{"ID":103,"Name":"internal.metrics.executorDeserializeTime","Value":361,"Internal":true,"Count Failed Values":true},{"ID":79,"Name":"duration","Value":"336","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":70,"Name":"peak memory","Value":"262144","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":19,"Name":"shuffle bytes written","Value":"168","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":123,"Name":"internal.metrics.shuffle.write.writeTime","Value":9599308,"Internal":true,"Count Failed Values":true},{"ID":105,"Name":"internal.metrics.executorRunTime","Value":503,"Internal":true,"Count Failed Values":true},{"ID":69,"Name":"number of output rows","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":72,"Name":"time in aggregation build","Value":"200","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":108,"Name":"internal.metrics.jvmGCTime","Value":17,"Internal":true,"Count Failed Values":true},{"ID":21,"Name":"shuffle write time","Value":"9599308","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":75,"Name":"peak memory","Value":"262144","Internal":true,"Count Failed Values":true,"Metadata":"sql"}],"Resource Profile Id":0}} +{"Event":"SparkListenerStageSubmitted","Stage Info":{"Stage ID":1,"Stage Attempt ID":0,"Stage Name":"start at StructuredKafkaWordCount.scala:86","Number of Tasks":2,"RDD Info":[{"RDD ID":11,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"0\",\"name\":\"WholeStageCodegen (4)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[10],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":7,"Name":"ShuffledRowRDD","Scope":"{\"id\":\"8\",\"name\":\"Exchange\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[6],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":9,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"4\",\"name\":\"WholeStageCodegen (3)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[8],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":10,"Name":"StateStoreRDD","Scope":"{\"id\":\"3\",\"name\":\"StateStoreSave\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[9],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":8,"Name":"StateStoreRDD","Scope":"{\"id\":\"7\",\"name\":\"StateStoreRestore\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[7],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0}],"Parent IDs":[0],"Details":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","Submission Time":1596020222688,"Accumulables":[],"Resource Profile Id":0},"Properties":{"sql.streaming.queryId":"8d268dc2-bc9c-4be8-97a9-b135d2943028","spark.driver.host":"iZbp19vpr16ix621sdw476Z","spark.eventLog.enabled":"true","spark.sql.adaptive.enabled":"false","spark.job.interruptOnCancel":"true","spark.driver.port":"46309","__fetch_continuous_blocks_in_batch_enabled":"true","spark.jars":"file:/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/./examples/jars/spark-examples_2.12-3.1.0-SNAPSHOT.jar","__is_continuous_processing":"false","spark.app.name":"StructuredKafkaWordCount","callSite.long":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","callSite.short":"start at StructuredKafkaWordCount.scala:86","spark.submit.pyFiles":"","spark.job.description":"\nid = 8d268dc2-bc9c-4be8-97a9-b135d2943028\nrunId = e225d92f-2545-48f8-87a2-9c0309580f8a\nbatch = 0","spark.executor.id":"driver","spark.sql.cbo.enabled":"false","streaming.sql.batchId":"0","spark.jobGroup.id":"e225d92f-2545-48f8-87a2-9c0309580f8a","spark.submit.deployMode":"client","spark.master":"local[*]","spark.eventLog.dir":"/tmp/spark-history","spark.sql.execution.id":"1","spark.app.id":"local-1596020211915","spark.sql.shuffle.partitions":"2"}} +{"Event":"SparkListenerTaskStart","Stage ID":1,"Stage Attempt ID":0,"Task Info":{"Task ID":1,"Index":1,"Attempt":0,"Launch Time":1596020222709,"Executor ID":"driver","Host":"iZbp19vpr16ix621sdw476Z","Locality":"NODE_LOCAL","Speculative":false,"Getting Result Time":0,"Finish Time":0,"Failed":false,"Killed":false,"Accumulables":[]}} +{"Event":"SparkListenerTaskStart","Stage ID":1,"Stage Attempt ID":0,"Task Info":{"Task ID":2,"Index":0,"Attempt":0,"Launch Time":1596020222713,"Executor ID":"driver","Host":"iZbp19vpr16ix621sdw476Z","Locality":"PROCESS_LOCAL","Speculative":false,"Getting Result Time":0,"Finish Time":0,"Failed":false,"Killed":false,"Accumulables":[]}} +{"Event":"SparkListenerTaskEnd","Stage ID":1,"Stage Attempt ID":0,"Task Type":"ResultTask","Task End Reason":{"Reason":"Success"},"Task Info":{"Task ID":2,"Index":0,"Attempt":0,"Launch Time":1596020222713,"Executor ID":"driver","Host":"iZbp19vpr16ix621sdw476Z","Locality":"PROCESS_LOCAL","Speculative":false,"Getting Result Time":0,"Finish Time":1596020222954,"Failed":false,"Killed":false,"Accumulables":[{"ID":44,"Name":"duration","Update":"19","Value":"19","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":46,"Name":"peak memory","Update":"262144","Value":"262144","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":48,"Name":"time in aggregation build","Update":"0","Value":"0","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":54,"Name":"time to update","Update":"14","Value":"14","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":55,"Name":"time to remove","Update":"0","Value":"0","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":56,"Name":"time to commit changes","Update":"50","Value":"50","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":58,"Name":"estimated size of state only on current version","Update":"64","Value":"64","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":57,"Name":"memory used by state","Update":"208","Value":"208","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":61,"Name":"duration","Update":"14","Value":"14","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":63,"Name":"peak memory","Update":"262144","Value":"262144","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":65,"Name":"time in aggregation build","Update":"0","Value":"0","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":145,"Name":"internal.metrics.shuffle.read.recordsRead","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":144,"Name":"internal.metrics.shuffle.read.fetchWaitTime","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":143,"Name":"internal.metrics.shuffle.read.localBytesRead","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":142,"Name":"internal.metrics.shuffle.read.remoteBytesReadToDisk","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":141,"Name":"internal.metrics.shuffle.read.remoteBytesRead","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":140,"Name":"internal.metrics.shuffle.read.localBlocksFetched","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":139,"Name":"internal.metrics.shuffle.read.remoteBlocksFetched","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":137,"Name":"internal.metrics.peakExecutionMemory","Update":524288,"Value":524288,"Internal":true,"Count Failed Values":true},{"ID":134,"Name":"internal.metrics.resultSerializationTime","Update":1,"Value":1,"Internal":true,"Count Failed Values":true},{"ID":132,"Name":"internal.metrics.resultSize","Update":5354,"Value":5354,"Internal":true,"Count Failed Values":true},{"ID":131,"Name":"internal.metrics.executorCpuTime","Update":93367533,"Value":93367533,"Internal":true,"Count Failed Values":true},{"ID":130,"Name":"internal.metrics.executorRunTime","Update":203,"Value":203,"Internal":true,"Count Failed Values":true},{"ID":129,"Name":"internal.metrics.executorDeserializeCpuTime","Update":10308753,"Value":10308753,"Internal":true,"Count Failed Values":true},{"ID":128,"Name":"internal.metrics.executorDeserializeTime","Update":23,"Value":23,"Internal":true,"Count Failed Values":true}]},"Task Executor Metrics":{"JVMHeapMemory":0,"JVMOffHeapMemory":0,"OnHeapExecutionMemory":0,"OffHeapExecutionMemory":0,"OnHeapStorageMemory":0,"OffHeapStorageMemory":0,"OnHeapUnifiedMemory":0,"OffHeapUnifiedMemory":0,"DirectPoolMemory":0,"MappedPoolMemory":0,"ProcessTreeJVMVMemory":0,"ProcessTreeJVMRSSMemory":0,"ProcessTreePythonVMemory":0,"ProcessTreePythonRSSMemory":0,"ProcessTreeOtherVMemory":0,"ProcessTreeOtherRSSMemory":0,"MinorGCCount":0,"MinorGCTime":0,"MajorGCCount":0,"MajorGCTime":0},"Task Metrics":{"Executor Deserialize Time":23,"Executor Deserialize CPU Time":10308753,"Executor Run Time":203,"Executor CPU Time":93367533,"Peak Execution Memory":524288,"Result Size":5354,"JVM GC Time":0,"Result Serialization Time":1,"Memory Bytes Spilled":0,"Disk Bytes Spilled":0,"Shuffle Read Metrics":{"Remote Blocks Fetched":0,"Local Blocks Fetched":0,"Fetch Wait Time":0,"Remote Bytes Read":0,"Remote Bytes Read To Disk":0,"Local Bytes Read":0,"Total Records Read":0},"Shuffle Write Metrics":{"Shuffle Bytes Written":0,"Shuffle Write Time":0,"Shuffle Records Written":0},"Input Metrics":{"Bytes Read":0,"Records Read":0},"Output Metrics":{"Bytes Written":0,"Records Written":0},"Updated Blocks":[]}} +{"Event":"SparkListenerTaskEnd","Stage ID":1,"Stage Attempt ID":0,"Task Type":"ResultTask","Task End Reason":{"Reason":"Success"},"Task Info":{"Task ID":1,"Index":1,"Attempt":0,"Launch Time":1596020222709,"Executor ID":"driver","Host":"iZbp19vpr16ix621sdw476Z","Locality":"NODE_LOCAL","Speculative":false,"Getting Result Time":0,"Finish Time":1596020222965,"Failed":false,"Killed":false,"Accumulables":[{"ID":44,"Name":"duration","Update":"33","Value":"52","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":49,"Name":"avg hash probe bucket list iters","Update":"10","Value":"10","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":45,"Name":"number of output rows","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":46,"Name":"peak memory","Update":"4456448","Value":"4718592","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":48,"Name":"time in aggregation build","Update":"19","Value":"19","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":54,"Name":"time to update","Update":"28","Value":"42","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":53,"Name":"number of updated state rows","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":55,"Name":"time to remove","Update":"0","Value":"0","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":56,"Name":"time to commit changes","Update":"31","Value":"81","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":58,"Name":"estimated size of state only on current version","Update":"424","Value":"488","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":50,"Name":"number of output rows","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":57,"Name":"memory used by state","Update":"568","Value":"776","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":52,"Name":"number of total state rows","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":61,"Name":"duration","Update":"28","Value":"42","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":62,"Name":"number of output rows","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":63,"Name":"peak memory","Update":"262144","Value":"524288","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":65,"Name":"time in aggregation build","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":67,"Name":"number of output rows","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":13,"Name":"local blocks read","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":17,"Name":"fetch wait time","Update":"0","Value":"0","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":16,"Name":"local bytes read","Update":"168","Value":"168","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":18,"Name":"records read","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":145,"Name":"internal.metrics.shuffle.read.recordsRead","Update":1,"Value":1,"Internal":true,"Count Failed Values":true},{"ID":144,"Name":"internal.metrics.shuffle.read.fetchWaitTime","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":143,"Name":"internal.metrics.shuffle.read.localBytesRead","Update":168,"Value":168,"Internal":true,"Count Failed Values":true},{"ID":142,"Name":"internal.metrics.shuffle.read.remoteBytesReadToDisk","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":141,"Name":"internal.metrics.shuffle.read.remoteBytesRead","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":140,"Name":"internal.metrics.shuffle.read.localBlocksFetched","Update":1,"Value":1,"Internal":true,"Count Failed Values":true},{"ID":139,"Name":"internal.metrics.shuffle.read.remoteBlocksFetched","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":137,"Name":"internal.metrics.peakExecutionMemory","Update":4718592,"Value":5242880,"Internal":true,"Count Failed Values":true},{"ID":132,"Name":"internal.metrics.resultSize","Update":5574,"Value":10928,"Internal":true,"Count Failed Values":true},{"ID":131,"Name":"internal.metrics.executorCpuTime","Update":91355172,"Value":184722705,"Internal":true,"Count Failed Values":true},{"ID":130,"Name":"internal.metrics.executorRunTime","Update":205,"Value":408,"Internal":true,"Count Failed Values":true},{"ID":129,"Name":"internal.metrics.executorDeserializeCpuTime","Update":21029530,"Value":31338283,"Internal":true,"Count Failed Values":true},{"ID":128,"Name":"internal.metrics.executorDeserializeTime","Update":34,"Value":57,"Internal":true,"Count Failed Values":true}]},"Task Executor Metrics":{"JVMHeapMemory":0,"JVMOffHeapMemory":0,"OnHeapExecutionMemory":0,"OffHeapExecutionMemory":0,"OnHeapStorageMemory":0,"OffHeapStorageMemory":0,"OnHeapUnifiedMemory":0,"OffHeapUnifiedMemory":0,"DirectPoolMemory":0,"MappedPoolMemory":0,"ProcessTreeJVMVMemory":0,"ProcessTreeJVMRSSMemory":0,"ProcessTreePythonVMemory":0,"ProcessTreePythonRSSMemory":0,"ProcessTreeOtherVMemory":0,"ProcessTreeOtherRSSMemory":0,"MinorGCCount":0,"MinorGCTime":0,"MajorGCCount":0,"MajorGCTime":0},"Task Metrics":{"Executor Deserialize Time":34,"Executor Deserialize CPU Time":21029530,"Executor Run Time":205,"Executor CPU Time":91355172,"Peak Execution Memory":4718592,"Result Size":5574,"JVM GC Time":0,"Result Serialization Time":0,"Memory Bytes Spilled":0,"Disk Bytes Spilled":0,"Shuffle Read Metrics":{"Remote Blocks Fetched":0,"Local Blocks Fetched":1,"Fetch Wait Time":0,"Remote Bytes Read":0,"Remote Bytes Read To Disk":0,"Local Bytes Read":168,"Total Records Read":1},"Shuffle Write Metrics":{"Shuffle Bytes Written":0,"Shuffle Write Time":0,"Shuffle Records Written":0},"Input Metrics":{"Bytes Read":0,"Records Read":0},"Output Metrics":{"Bytes Written":0,"Records Written":0},"Updated Blocks":[]}} +{"Event":"SparkListenerStageCompleted","Stage Info":{"Stage ID":1,"Stage Attempt ID":0,"Stage Name":"start at StructuredKafkaWordCount.scala:86","Number of Tasks":2,"RDD Info":[{"RDD ID":11,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"0\",\"name\":\"WholeStageCodegen (4)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[10],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":7,"Name":"ShuffledRowRDD","Scope":"{\"id\":\"8\",\"name\":\"Exchange\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[6],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":9,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"4\",\"name\":\"WholeStageCodegen (3)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[8],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":10,"Name":"StateStoreRDD","Scope":"{\"id\":\"3\",\"name\":\"StateStoreSave\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[9],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":8,"Name":"StateStoreRDD","Scope":"{\"id\":\"7\",\"name\":\"StateStoreRestore\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[7],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0}],"Parent IDs":[0],"Details":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","Submission Time":1596020222688,"Completion Time":1596020222967,"Accumulables":[{"ID":137,"Name":"internal.metrics.peakExecutionMemory","Value":5242880,"Internal":true,"Count Failed Values":true},{"ID":128,"Name":"internal.metrics.executorDeserializeTime","Value":57,"Internal":true,"Count Failed Values":true},{"ID":131,"Name":"internal.metrics.executorCpuTime","Value":184722705,"Internal":true,"Count Failed Values":true},{"ID":50,"Name":"number of output rows","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":140,"Name":"internal.metrics.shuffle.read.localBlocksFetched","Value":1,"Internal":true,"Count Failed Values":true},{"ID":53,"Name":"number of updated state rows","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":62,"Name":"number of output rows","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":17,"Name":"fetch wait time","Value":"0","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":134,"Name":"internal.metrics.resultSerializationTime","Value":1,"Internal":true,"Count Failed Values":true},{"ID":44,"Name":"duration","Value":"52","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":56,"Name":"time to commit changes","Value":"81","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":65,"Name":"time in aggregation build","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":142,"Name":"internal.metrics.shuffle.read.remoteBytesReadToDisk","Value":0,"Internal":true,"Count Failed Values":true},{"ID":46,"Name":"peak memory","Value":"4718592","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":145,"Name":"internal.metrics.shuffle.read.recordsRead","Value":1,"Internal":true,"Count Failed Values":true},{"ID":55,"Name":"time to remove","Value":"0","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":49,"Name":"avg hash probe bucket list iters","Value":"10","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":67,"Name":"number of output rows","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":139,"Name":"internal.metrics.shuffle.read.remoteBlocksFetched","Value":0,"Internal":true,"Count Failed Values":true},{"ID":58,"Name":"estimated size of state only on current version","Value":"488","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":13,"Name":"local blocks read","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":130,"Name":"internal.metrics.executorRunTime","Value":408,"Internal":true,"Count Failed Values":true},{"ID":16,"Name":"local bytes read","Value":"168","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":52,"Name":"number of total state rows","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":61,"Name":"duration","Value":"42","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":129,"Name":"internal.metrics.executorDeserializeCpuTime","Value":31338283,"Internal":true,"Count Failed Values":true},{"ID":132,"Name":"internal.metrics.resultSize","Value":10928,"Internal":true,"Count Failed Values":true},{"ID":141,"Name":"internal.metrics.shuffle.read.remoteBytesRead","Value":0,"Internal":true,"Count Failed Values":true},{"ID":45,"Name":"number of output rows","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":63,"Name":"peak memory","Value":"524288","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":54,"Name":"time to update","Value":"42","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":144,"Name":"internal.metrics.shuffle.read.fetchWaitTime","Value":0,"Internal":true,"Count Failed Values":true},{"ID":18,"Name":"records read","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":57,"Name":"memory used by state","Value":"776","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":48,"Name":"time in aggregation build","Value":"19","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":143,"Name":"internal.metrics.shuffle.read.localBytesRead","Value":168,"Internal":true,"Count Failed Values":true}],"Resource Profile Id":0}} +{"Event":"SparkListenerJobEnd","Job ID":0,"Completion Time":1596020222973,"Job Result":{"Result":"JobSucceeded"}} +{"Event":"org.apache.spark.sql.execution.ui.SparkListenerSQLExecutionStart","executionId":2,"description":"\nid = 8d268dc2-bc9c-4be8-97a9-b135d2943028\nrunId = e225d92f-2545-48f8-87a2-9c0309580f8a\nbatch = 0","details":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","physicalPlanDescription":"== Physical Plan ==\nLocalTableScan (1)\n\n\n(1) LocalTableScan\nOutput [2]: [value#46, count#47]\nArguments: [value#46, count#47]\n\n","sparkPlanInfo":{"nodeName":"LocalTableScan","simpleString":"LocalTableScan [value#46, count#47]","children":[],"metadata":{},"metrics":[{"name":"number of output rows","accumulatorId":153,"metricType":"sum"}]},"time":1596020223028} +{"Event":"org.apache.spark.sql.execution.ui.SparkListenerSQLExecutionEnd","executionId":2,"time":1596020223062} +{"Event":"org.apache.spark.sql.execution.ui.SparkListenerSQLExecutionEnd","executionId":1,"time":1596020223069} +{"Event":"org.apache.spark.sql.execution.ui.SparkListenerSQLExecutionEnd","executionId":0,"time":1596020223069} +{"Event":"org.apache.spark.sql.streaming.StreamingQueryListener$QueryProgressEvent","progress":{"id":"8d268dc2-bc9c-4be8-97a9-b135d2943028","runId":"e225d92f-2545-48f8-87a2-9c0309580f8a","name":null,"timestamp":"2020-07-29T10:56:56.015Z","batchId":0,"batchDuration":7110,"durationMs":{"triggerExecution":7109,"queryPlanning":439,"getBatch":21,"latestOffset":3524,"addBatch":3011,"walCommit":35},"eventTime":{},"stateOperators":[{"numRowsTotal":1,"numRowsUpdated":1,"memoryUsedBytes":776,"numLateInputs":0,"customMetrics":{"stateOnCurrentVersionSizeBytes":488,"loadedMapCacheHitCount":0,"loadedMapCacheMissCount":0}}],"sources":[{"description":"KafkaV2[Subscribe[test5]]","startOffset":null,"endOffset":"{\"test5\":{\"0\":48279}}","numInputRows":3,"inputRowsPerSecond":"NaN","processedRowsPerSecond":0.42194092827004215}],"sink":{"description":"org.apache.spark.sql.execution.streaming.ConsoleTable$@514ba885","numOutputRows":1},"observedMetrics":{}}} +{"Event":"org.apache.spark.sql.execution.ui.SparkListenerSQLExecutionStart","executionId":3,"description":"\nid = 8d268dc2-bc9c-4be8-97a9-b135d2943028\nrunId = e225d92f-2545-48f8-87a2-9c0309580f8a\nbatch = 1","details":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","physicalPlanDescription":"== Physical Plan ==\nWriteToDataSourceV2 (14)\n+- * HashAggregate (13)\n +- StateStoreSave (12)\n +- * HashAggregate (11)\n +- StateStoreRestore (10)\n +- Exchange (9)\n +- * HashAggregate (8)\n +- * HashAggregate (7)\n +- * SerializeFromObject (6)\n +- MapPartitions (5)\n +- DeserializeToObject (4)\n +- * Project (3)\n +- * Project (2)\n +- MicroBatchScan (1)\n\n\n(1) MicroBatchScan\nOutput [7]: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]\nArguments: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13], org.apache.spark.sql.kafka010.KafkaSourceProvider$KafkaScan@7e7b182c, KafkaV2[Subscribe[test5]], {\"test5\":{\"0\":48279}}, {\"test5\":{\"0\":48642}}\n\n(2) Project [codegen id : 1]\nOutput [7]: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]\nInput [7]: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]\n\n(3) Project [codegen id : 1]\nOutput [1]: [cast(value#8 as string) AS value#21]\nInput [7]: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]\n\n(4) DeserializeToObject\nInput [1]: [value#21]\nArguments: value#21.toString, obj#27: java.lang.String\n\n(5) MapPartitions\nInput [1]: [obj#27]\nArguments: org.apache.spark.sql.Dataset$$Lambda$1321/872917583@67b99068, obj#28: java.lang.String\n\n(6) SerializeFromObject [codegen id : 2]\nInput [1]: [obj#28]\nArguments: [staticinvoke(class org.apache.spark.unsafe.types.UTF8String, StringType, fromString, input[0, java.lang.String, true], true, false) AS value#29]\n\n(7) HashAggregate [codegen id : 2]\nInput [1]: [value#29]\nKeys [1]: [value#29]\nFunctions [1]: [partial_count(1)]\nAggregate Attributes [1]: [count(1)#31L]\nResults [2]: [value#29, count#38L]\n\n(8) HashAggregate [codegen id : 2]\nInput [2]: [value#29, count#38L]\nKeys [1]: [value#29]\nFunctions [1]: [merge_count(1)]\nAggregate Attributes [1]: [count(1)#31L]\nResults [2]: [value#29, count#38L]\n\n(9) Exchange\nInput [2]: [value#29, count#38L]\nArguments: hashpartitioning(value#29, 2), true, [id=#373]\n\n(10) StateStoreRestore\nInput [2]: [value#29, count#38L]\nArguments: [value#29], state info [ checkpoint = , runId = 1fb6b6c6-ced8-4f85-80af-1f3f4c424457, opId = 0, ver = 0, numPartitions = 2], 2\n\n(11) HashAggregate [codegen id : 3]\nInput [2]: [value#29, count#38L]\nKeys [1]: [value#29]\nFunctions [1]: [merge_count(1)]\nAggregate Attributes [1]: [count(1)#31L]\nResults [2]: [value#29, count#38L]\n\n(12) StateStoreSave\nInput [2]: [value#29, count#38L]\nArguments: [value#29], state info [ checkpoint = , runId = 1fb6b6c6-ced8-4f85-80af-1f3f4c424457, opId = 0, ver = 0, numPartitions = 2], Append, 0, 2\n\n(13) HashAggregate [codegen id : 4]\nInput [2]: [value#29, count#38L]\nKeys [1]: [value#29]\nFunctions [1]: [count(1)]\nAggregate Attributes [1]: [count(1)#31L]\nResults [2]: [value#29, count(1)#31L AS count#32L]\n\n(14) WriteToDataSourceV2\nInput [2]: [value#29, count#32L]\nArguments: org.apache.spark.sql.execution.streaming.sources.MicroBatchWrite@3a1eb73c\n\n","sparkPlanInfo":{"nodeName":"WriteToDataSourceV2","simpleString":"WriteToDataSourceV2 org.apache.spark.sql.execution.streaming.sources.MicroBatchWrite@3a1eb73c","children":[{"nodeName":"WholeStageCodegen (4)","simpleString":"WholeStageCodegen (4)","children":[{"nodeName":"HashAggregate","simpleString":"HashAggregate(keys=[value#29], functions=[count(1)])","children":[{"nodeName":"InputAdapter","simpleString":"InputAdapter","children":[{"nodeName":"StateStoreSave","simpleString":"StateStoreSave [value#29], state info [ checkpoint = file:/tmp/temporary-025d7997-5b66-4def-abbf-bdcca57312b9/state, runId = e225d92f-2545-48f8-87a2-9c0309580f8a, opId = 0, ver = 1, numPartitions = 2], Complete, 0, 2","children":[{"nodeName":"WholeStageCodegen (3)","simpleString":"WholeStageCodegen (3)","children":[{"nodeName":"HashAggregate","simpleString":"HashAggregate(keys=[value#29], functions=[merge_count(1)])","children":[{"nodeName":"InputAdapter","simpleString":"InputAdapter","children":[{"nodeName":"StateStoreRestore","simpleString":"StateStoreRestore [value#29], state info [ checkpoint = file:/tmp/temporary-025d7997-5b66-4def-abbf-bdcca57312b9/state, runId = e225d92f-2545-48f8-87a2-9c0309580f8a, opId = 0, ver = 1, numPartitions = 2], 2","children":[{"nodeName":"Exchange","simpleString":"Exchange hashpartitioning(value#29, 2), true, [id=#297]","children":[{"nodeName":"WholeStageCodegen (2)","simpleString":"WholeStageCodegen (2)","children":[{"nodeName":"HashAggregate","simpleString":"HashAggregate(keys=[value#29], functions=[merge_count(1)])","children":[{"nodeName":"HashAggregate","simpleString":"HashAggregate(keys=[value#29], functions=[partial_count(1)])","children":[{"nodeName":"SerializeFromObject","simpleString":"SerializeFromObject [staticinvoke(class org.apache.spark.unsafe.types.UTF8String, StringType, fromString, input[0, java.lang.String, true], true, false) AS value#29]","children":[{"nodeName":"InputAdapter","simpleString":"InputAdapter","children":[{"nodeName":"MapPartitions","simpleString":"MapPartitions org.apache.spark.sql.Dataset$$Lambda$1321/872917583@67b99068, obj#28: java.lang.String","children":[{"nodeName":"DeserializeToObject","simpleString":"DeserializeToObject value#21.toString, obj#27: java.lang.String","children":[{"nodeName":"WholeStageCodegen (1)","simpleString":"WholeStageCodegen (1)","children":[{"nodeName":"Project","simpleString":"Project [cast(value#8 as string) AS value#21]","children":[{"nodeName":"Project","simpleString":"Project [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]","children":[{"nodeName":"InputAdapter","simpleString":"InputAdapter","children":[{"nodeName":"MicroBatchScan","simpleString":"MicroBatchScan[key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13] class org.apache.spark.sql.kafka010.KafkaSourceProvider$KafkaScan","children":[],"metadata":{},"metrics":[{"name":"number of output rows","accumulatorId":237,"metricType":"sum"}]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[{"name":"duration","accumulatorId":236,"metricType":"timing"}]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[{"name":"spill size","accumulatorId":233,"metricType":"size"},{"name":"time in aggregation build","accumulatorId":234,"metricType":"timing"},{"name":"peak memory","accumulatorId":232,"metricType":"size"},{"name":"number of output rows","accumulatorId":231,"metricType":"sum"},{"name":"avg hash probe bucket list iters","accumulatorId":235,"metricType":"average"}]}],"metadata":{},"metrics":[{"name":"spill size","accumulatorId":228,"metricType":"size"},{"name":"time in aggregation build","accumulatorId":229,"metricType":"timing"},{"name":"peak memory","accumulatorId":227,"metricType":"size"},{"name":"number of output rows","accumulatorId":226,"metricType":"sum"},{"name":"avg hash probe bucket list iters","accumulatorId":230,"metricType":"average"}]}],"metadata":{},"metrics":[{"name":"duration","accumulatorId":225,"metricType":"timing"}]}],"metadata":{},"metrics":[{"name":"shuffle records written","accumulatorId":177,"metricType":"sum"},{"name":"shuffle write time","accumulatorId":178,"metricType":"nsTiming"},{"name":"records read","accumulatorId":175,"metricType":"sum"},{"name":"local bytes read","accumulatorId":173,"metricType":"size"},{"name":"fetch wait time","accumulatorId":174,"metricType":"timing"},{"name":"remote bytes read","accumulatorId":171,"metricType":"size"},{"name":"local blocks read","accumulatorId":170,"metricType":"sum"},{"name":"remote blocks read","accumulatorId":169,"metricType":"sum"},{"name":"data size","accumulatorId":168,"metricType":"size"},{"name":"remote bytes read to disk","accumulatorId":172,"metricType":"size"},{"name":"shuffle bytes written","accumulatorId":176,"metricType":"size"}]}],"metadata":{},"metrics":[{"name":"number of output rows","accumulatorId":224,"metricType":"sum"}]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[{"name":"spill size","accumulatorId":221,"metricType":"size"},{"name":"time in aggregation build","accumulatorId":222,"metricType":"timing"},{"name":"peak memory","accumulatorId":220,"metricType":"size"},{"name":"number of output rows","accumulatorId":219,"metricType":"sum"},{"name":"avg hash probe bucket list iters","accumulatorId":223,"metricType":"average"}]}],"metadata":{},"metrics":[{"name":"duration","accumulatorId":218,"metricType":"timing"}]}],"metadata":{},"metrics":[{"name":"number of inputs which are later than watermark ('inputs' are relative to operators)","accumulatorId":208,"metricType":"sum"},{"name":"number of total state rows","accumulatorId":209,"metricType":"sum"},{"name":"memory used by state","accumulatorId":214,"metricType":"size"},{"name":"count of cache hit on states cache in provider","accumulatorId":216,"metricType":"sum"},{"name":"number of output rows","accumulatorId":207,"metricType":"sum"},{"name":"estimated size of state only on current version","accumulatorId":215,"metricType":"size"},{"name":"count of cache miss on states cache in provider","accumulatorId":217,"metricType":"sum"},{"name":"time to commit changes","accumulatorId":213,"metricType":"timing"},{"name":"time to remove","accumulatorId":212,"metricType":"timing"},{"name":"number of updated state rows","accumulatorId":210,"metricType":"sum"},{"name":"time to update","accumulatorId":211,"metricType":"timing"}]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[{"name":"spill size","accumulatorId":204,"metricType":"size"},{"name":"time in aggregation build","accumulatorId":205,"metricType":"timing"},{"name":"peak memory","accumulatorId":203,"metricType":"size"},{"name":"number of output rows","accumulatorId":202,"metricType":"sum"},{"name":"avg hash probe bucket list iters","accumulatorId":206,"metricType":"average"}]}],"metadata":{},"metrics":[{"name":"duration","accumulatorId":201,"metricType":"timing"}]}],"metadata":{},"metrics":[]},"time":1596020223333} +{"Event":"org.apache.spark.sql.execution.ui.SparkListenerSQLExecutionStart","executionId":4,"description":"\nid = 8d268dc2-bc9c-4be8-97a9-b135d2943028\nrunId = e225d92f-2545-48f8-87a2-9c0309580f8a\nbatch = 1","details":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","physicalPlanDescription":"== Physical Plan ==\nWriteToDataSourceV2 (14)\n+- * HashAggregate (13)\n +- StateStoreSave (12)\n +- * HashAggregate (11)\n +- StateStoreRestore (10)\n +- Exchange (9)\n +- * HashAggregate (8)\n +- * HashAggregate (7)\n +- * SerializeFromObject (6)\n +- MapPartitions (5)\n +- DeserializeToObject (4)\n +- * Project (3)\n +- * Project (2)\n +- MicroBatchScan (1)\n\n\n(1) MicroBatchScan\nOutput [7]: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]\nArguments: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13], org.apache.spark.sql.kafka010.KafkaSourceProvider$KafkaScan@7e7b182c, KafkaV2[Subscribe[test5]], {\"test5\":{\"0\":48279}}, {\"test5\":{\"0\":48642}}\n\n(2) Project [codegen id : 1]\nOutput [7]: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]\nInput [7]: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]\n\n(3) Project [codegen id : 1]\nOutput [1]: [cast(value#8 as string) AS value#21]\nInput [7]: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]\n\n(4) DeserializeToObject\nInput [1]: [value#21]\nArguments: value#21.toString, obj#27: java.lang.String\n\n(5) MapPartitions\nInput [1]: [obj#27]\nArguments: org.apache.spark.sql.Dataset$$Lambda$1321/872917583@67b99068, obj#28: java.lang.String\n\n(6) SerializeFromObject [codegen id : 2]\nInput [1]: [obj#28]\nArguments: [staticinvoke(class org.apache.spark.unsafe.types.UTF8String, StringType, fromString, input[0, java.lang.String, true], true, false) AS value#29]\n\n(7) HashAggregate [codegen id : 2]\nInput [1]: [value#29]\nKeys [1]: [value#29]\nFunctions [1]: [partial_count(1)]\nAggregate Attributes [1]: [count(1)#31L]\nResults [2]: [value#29, count#38L]\n\n(8) HashAggregate [codegen id : 2]\nInput [2]: [value#29, count#38L]\nKeys [1]: [value#29]\nFunctions [1]: [merge_count(1)]\nAggregate Attributes [1]: [count(1)#31L]\nResults [2]: [value#29, count#38L]\n\n(9) Exchange\nInput [2]: [value#29, count#38L]\nArguments: hashpartitioning(value#29, 2), true, [id=#449]\n\n(10) StateStoreRestore\nInput [2]: [value#29, count#38L]\nArguments: [value#29], state info [ checkpoint = , runId = 7992c0a8-0641-440d-aaf7-ad453fe25c0a, opId = 0, ver = 0, numPartitions = 2], 2\n\n(11) HashAggregate [codegen id : 3]\nInput [2]: [value#29, count#38L]\nKeys [1]: [value#29]\nFunctions [1]: [merge_count(1)]\nAggregate Attributes [1]: [count(1)#31L]\nResults [2]: [value#29, count#38L]\n\n(12) StateStoreSave\nInput [2]: [value#29, count#38L]\nArguments: [value#29], state info [ checkpoint = , runId = 7992c0a8-0641-440d-aaf7-ad453fe25c0a, opId = 0, ver = 0, numPartitions = 2], Append, 0, 2\n\n(13) HashAggregate [codegen id : 4]\nInput [2]: [value#29, count#38L]\nKeys [1]: [value#29]\nFunctions [1]: [count(1)]\nAggregate Attributes [1]: [count(1)#31L]\nResults [2]: [value#29, count(1)#31L AS count#32L]\n\n(14) WriteToDataSourceV2\nInput [2]: [value#29, count#32L]\nArguments: org.apache.spark.sql.execution.streaming.sources.MicroBatchWrite@3a1eb73c\n\n","sparkPlanInfo":{"nodeName":"WriteToDataSourceV2","simpleString":"WriteToDataSourceV2 org.apache.spark.sql.execution.streaming.sources.MicroBatchWrite@3a1eb73c","children":[{"nodeName":"WholeStageCodegen (4)","simpleString":"WholeStageCodegen (4)","children":[{"nodeName":"HashAggregate","simpleString":"HashAggregate(keys=[value#29], functions=[count(1)])","children":[{"nodeName":"InputAdapter","simpleString":"InputAdapter","children":[{"nodeName":"StateStoreSave","simpleString":"StateStoreSave [value#29], state info [ checkpoint = file:/tmp/temporary-025d7997-5b66-4def-abbf-bdcca57312b9/state, runId = e225d92f-2545-48f8-87a2-9c0309580f8a, opId = 0, ver = 1, numPartitions = 2], Complete, 0, 2","children":[{"nodeName":"WholeStageCodegen (3)","simpleString":"WholeStageCodegen (3)","children":[{"nodeName":"HashAggregate","simpleString":"HashAggregate(keys=[value#29], functions=[merge_count(1)])","children":[{"nodeName":"InputAdapter","simpleString":"InputAdapter","children":[{"nodeName":"StateStoreRestore","simpleString":"StateStoreRestore [value#29], state info [ checkpoint = file:/tmp/temporary-025d7997-5b66-4def-abbf-bdcca57312b9/state, runId = e225d92f-2545-48f8-87a2-9c0309580f8a, opId = 0, ver = 1, numPartitions = 2], 2","children":[{"nodeName":"Exchange","simpleString":"Exchange hashpartitioning(value#29, 2), true, [id=#297]","children":[{"nodeName":"WholeStageCodegen (2)","simpleString":"WholeStageCodegen (2)","children":[{"nodeName":"HashAggregate","simpleString":"HashAggregate(keys=[value#29], functions=[merge_count(1)])","children":[{"nodeName":"HashAggregate","simpleString":"HashAggregate(keys=[value#29], functions=[partial_count(1)])","children":[{"nodeName":"SerializeFromObject","simpleString":"SerializeFromObject [staticinvoke(class org.apache.spark.unsafe.types.UTF8String, StringType, fromString, input[0, java.lang.String, true], true, false) AS value#29]","children":[{"nodeName":"InputAdapter","simpleString":"InputAdapter","children":[{"nodeName":"MapPartitions","simpleString":"MapPartitions org.apache.spark.sql.Dataset$$Lambda$1321/872917583@67b99068, obj#28: java.lang.String","children":[{"nodeName":"DeserializeToObject","simpleString":"DeserializeToObject value#21.toString, obj#27: java.lang.String","children":[{"nodeName":"WholeStageCodegen (1)","simpleString":"WholeStageCodegen (1)","children":[{"nodeName":"Project","simpleString":"Project [cast(value#8 as string) AS value#21]","children":[{"nodeName":"Project","simpleString":"Project [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]","children":[{"nodeName":"InputAdapter","simpleString":"InputAdapter","children":[{"nodeName":"MicroBatchScan","simpleString":"MicroBatchScan[key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13] class org.apache.spark.sql.kafka010.KafkaSourceProvider$KafkaScan","children":[],"metadata":{},"metrics":[{"name":"number of output rows","accumulatorId":237,"metricType":"sum"}]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[{"name":"duration","accumulatorId":236,"metricType":"timing"}]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[{"name":"spill size","accumulatorId":233,"metricType":"size"},{"name":"time in aggregation build","accumulatorId":234,"metricType":"timing"},{"name":"peak memory","accumulatorId":232,"metricType":"size"},{"name":"number of output rows","accumulatorId":231,"metricType":"sum"},{"name":"avg hash probe bucket list iters","accumulatorId":235,"metricType":"average"}]}],"metadata":{},"metrics":[{"name":"spill size","accumulatorId":228,"metricType":"size"},{"name":"time in aggregation build","accumulatorId":229,"metricType":"timing"},{"name":"peak memory","accumulatorId":227,"metricType":"size"},{"name":"number of output rows","accumulatorId":226,"metricType":"sum"},{"name":"avg hash probe bucket list iters","accumulatorId":230,"metricType":"average"}]}],"metadata":{},"metrics":[{"name":"duration","accumulatorId":225,"metricType":"timing"}]}],"metadata":{},"metrics":[{"name":"shuffle records written","accumulatorId":177,"metricType":"sum"},{"name":"shuffle write time","accumulatorId":178,"metricType":"nsTiming"},{"name":"records read","accumulatorId":175,"metricType":"sum"},{"name":"local bytes read","accumulatorId":173,"metricType":"size"},{"name":"fetch wait time","accumulatorId":174,"metricType":"timing"},{"name":"remote bytes read","accumulatorId":171,"metricType":"size"},{"name":"local blocks read","accumulatorId":170,"metricType":"sum"},{"name":"remote blocks read","accumulatorId":169,"metricType":"sum"},{"name":"data size","accumulatorId":168,"metricType":"size"},{"name":"remote bytes read to disk","accumulatorId":172,"metricType":"size"},{"name":"shuffle bytes written","accumulatorId":176,"metricType":"size"}]}],"metadata":{},"metrics":[{"name":"number of output rows","accumulatorId":224,"metricType":"sum"}]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[{"name":"spill size","accumulatorId":221,"metricType":"size"},{"name":"time in aggregation build","accumulatorId":222,"metricType":"timing"},{"name":"peak memory","accumulatorId":220,"metricType":"size"},{"name":"number of output rows","accumulatorId":219,"metricType":"sum"},{"name":"avg hash probe bucket list iters","accumulatorId":223,"metricType":"average"}]}],"metadata":{},"metrics":[{"name":"duration","accumulatorId":218,"metricType":"timing"}]}],"metadata":{},"metrics":[{"name":"number of inputs which are later than watermark ('inputs' are relative to operators)","accumulatorId":208,"metricType":"sum"},{"name":"number of total state rows","accumulatorId":209,"metricType":"sum"},{"name":"memory used by state","accumulatorId":214,"metricType":"size"},{"name":"count of cache hit on states cache in provider","accumulatorId":216,"metricType":"sum"},{"name":"number of output rows","accumulatorId":207,"metricType":"sum"},{"name":"estimated size of state only on current version","accumulatorId":215,"metricType":"size"},{"name":"count of cache miss on states cache in provider","accumulatorId":217,"metricType":"sum"},{"name":"time to commit changes","accumulatorId":213,"metricType":"timing"},{"name":"time to remove","accumulatorId":212,"metricType":"timing"},{"name":"number of updated state rows","accumulatorId":210,"metricType":"sum"},{"name":"time to update","accumulatorId":211,"metricType":"timing"}]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[{"name":"spill size","accumulatorId":204,"metricType":"size"},{"name":"time in aggregation build","accumulatorId":205,"metricType":"timing"},{"name":"peak memory","accumulatorId":203,"metricType":"size"},{"name":"number of output rows","accumulatorId":202,"metricType":"sum"},{"name":"avg hash probe bucket list iters","accumulatorId":206,"metricType":"average"}]}],"metadata":{},"metrics":[{"name":"duration","accumulatorId":201,"metricType":"timing"}]}],"metadata":{},"metrics":[]},"time":1596020223382} +{"Event":"SparkListenerJobStart","Job ID":1,"Submission Time":1596020223482,"Stage Infos":[{"Stage ID":2,"Stage Attempt ID":0,"Stage Name":"start at StructuredKafkaWordCount.scala:86","Number of Tasks":1,"RDD Info":[{"RDD ID":18,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"41\",\"name\":\"Exchange\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[17],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":12,"Name":"DataSourceRDD","Scope":"{\"id\":\"53\",\"name\":\"MicroBatchScan\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":13,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"53\",\"name\":\"MicroBatchScan\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[12],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":15,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"48\",\"name\":\"DeserializeToObject\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[14],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":14,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"49\",\"name\":\"WholeStageCodegen (1)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[13],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":16,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"47\",\"name\":\"MapPartitions\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[15],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":17,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"42\",\"name\":\"WholeStageCodegen (2)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[16],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0}],"Parent IDs":[],"Details":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","Accumulables":[],"Resource Profile Id":0},{"Stage ID":3,"Stage Attempt ID":0,"Stage Name":"start at StructuredKafkaWordCount.scala:86","Number of Tasks":2,"RDD Info":[{"RDD ID":23,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"33\",\"name\":\"WholeStageCodegen (4)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[22],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":22,"Name":"StateStoreRDD","Scope":"{\"id\":\"36\",\"name\":\"StateStoreSave\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[21],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":19,"Name":"ShuffledRowRDD","Scope":"{\"id\":\"41\",\"name\":\"Exchange\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[18],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":21,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"37\",\"name\":\"WholeStageCodegen (3)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[20],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":20,"Name":"StateStoreRDD","Scope":"{\"id\":\"40\",\"name\":\"StateStoreRestore\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[19],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0}],"Parent IDs":[2],"Details":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","Accumulables":[],"Resource Profile Id":0}],"Stage IDs":[2,3],"Properties":{"sql.streaming.queryId":"8d268dc2-bc9c-4be8-97a9-b135d2943028","spark.driver.host":"iZbp19vpr16ix621sdw476Z","spark.eventLog.enabled":"true","spark.sql.adaptive.enabled":"false","spark.job.interruptOnCancel":"true","spark.driver.port":"46309","__fetch_continuous_blocks_in_batch_enabled":"true","spark.jars":"file:/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/./examples/jars/spark-examples_2.12-3.1.0-SNAPSHOT.jar","__is_continuous_processing":"false","spark.app.name":"StructuredKafkaWordCount","callSite.long":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","callSite.short":"start at StructuredKafkaWordCount.scala:86","spark.submit.pyFiles":"","spark.job.description":"\nid = 8d268dc2-bc9c-4be8-97a9-b135d2943028\nrunId = e225d92f-2545-48f8-87a2-9c0309580f8a\nbatch = 1","spark.executor.id":"driver","spark.sql.cbo.enabled":"false","streaming.sql.batchId":"1","spark.jobGroup.id":"e225d92f-2545-48f8-87a2-9c0309580f8a","spark.submit.deployMode":"client","spark.master":"local[*]","spark.eventLog.dir":"/tmp/spark-history","spark.sql.execution.id":"4","spark.app.id":"local-1596020211915","spark.sql.shuffle.partitions":"2"}} +{"Event":"SparkListenerStageSubmitted","Stage Info":{"Stage ID":2,"Stage Attempt ID":0,"Stage Name":"start at StructuredKafkaWordCount.scala:86","Number of Tasks":1,"RDD Info":[{"RDD ID":18,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"41\",\"name\":\"Exchange\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[17],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":12,"Name":"DataSourceRDD","Scope":"{\"id\":\"53\",\"name\":\"MicroBatchScan\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":13,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"53\",\"name\":\"MicroBatchScan\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[12],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":15,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"48\",\"name\":\"DeserializeToObject\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[14],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":14,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"49\",\"name\":\"WholeStageCodegen (1)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[13],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":16,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"47\",\"name\":\"MapPartitions\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[15],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":17,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"42\",\"name\":\"WholeStageCodegen (2)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[16],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0}],"Parent IDs":[],"Details":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","Submission Time":1596020223485,"Accumulables":[],"Resource Profile Id":0},"Properties":{"sql.streaming.queryId":"8d268dc2-bc9c-4be8-97a9-b135d2943028","spark.driver.host":"iZbp19vpr16ix621sdw476Z","spark.eventLog.enabled":"true","spark.sql.adaptive.enabled":"false","spark.job.interruptOnCancel":"true","spark.driver.port":"46309","__fetch_continuous_blocks_in_batch_enabled":"true","spark.jars":"file:/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/./examples/jars/spark-examples_2.12-3.1.0-SNAPSHOT.jar","__is_continuous_processing":"false","spark.app.name":"StructuredKafkaWordCount","callSite.long":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","callSite.short":"start at StructuredKafkaWordCount.scala:86","spark.submit.pyFiles":"","spark.job.description":"\nid = 8d268dc2-bc9c-4be8-97a9-b135d2943028\nrunId = e225d92f-2545-48f8-87a2-9c0309580f8a\nbatch = 1","spark.executor.id":"driver","spark.sql.cbo.enabled":"false","streaming.sql.batchId":"1","spark.jobGroup.id":"e225d92f-2545-48f8-87a2-9c0309580f8a","spark.submit.deployMode":"client","spark.master":"local[*]","spark.eventLog.dir":"/tmp/spark-history","spark.sql.execution.id":"4","spark.app.id":"local-1596020211915","spark.sql.shuffle.partitions":"2"}} +{"Event":"SparkListenerTaskStart","Stage ID":2,"Stage Attempt ID":0,"Task Info":{"Task ID":3,"Index":0,"Attempt":0,"Launch Time":1596020223493,"Executor ID":"driver","Host":"iZbp19vpr16ix621sdw476Z","Locality":"PROCESS_LOCAL","Speculative":false,"Getting Result Time":0,"Finish Time":0,"Failed":false,"Killed":false,"Accumulables":[]}} +{"Event":"SparkListenerTaskEnd","Stage ID":2,"Stage Attempt ID":0,"Task Type":"ShuffleMapTask","Task End Reason":{"Reason":"Success"},"Task Info":{"Task ID":3,"Index":0,"Attempt":0,"Launch Time":1596020223493,"Executor ID":"driver","Host":"iZbp19vpr16ix621sdw476Z","Locality":"PROCESS_LOCAL","Speculative":false,"Getting Result Time":0,"Finish Time":1596020223601,"Failed":false,"Killed":false,"Accumulables":[{"ID":178,"Name":"shuffle write time","Update":"837580","Value":"837580","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":177,"Name":"shuffle records written","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":176,"Name":"shuffle bytes written","Update":"169","Value":"169","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":168,"Name":"data size","Update":"128","Value":"128","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":225,"Name":"duration","Update":"84","Value":"84","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":226,"Name":"number of output rows","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":227,"Name":"peak memory","Update":"262144","Value":"262144","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":229,"Name":"time in aggregation build","Update":"74","Value":"74","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":231,"Name":"number of output rows","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":232,"Name":"peak memory","Update":"262144","Value":"262144","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":234,"Name":"time in aggregation build","Update":"68","Value":"68","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":236,"Name":"duration","Update":"84","Value":"84","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":237,"Name":"number of output rows","Update":"363","Value":"363","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":282,"Name":"internal.metrics.input.recordsRead","Update":363,"Value":363,"Internal":true,"Count Failed Values":true},{"ID":280,"Name":"internal.metrics.shuffle.write.writeTime","Update":837580,"Value":837580,"Internal":true,"Count Failed Values":true},{"ID":279,"Name":"internal.metrics.shuffle.write.recordsWritten","Update":1,"Value":1,"Internal":true,"Count Failed Values":true},{"ID":278,"Name":"internal.metrics.shuffle.write.bytesWritten","Update":169,"Value":169,"Internal":true,"Count Failed Values":true},{"ID":269,"Name":"internal.metrics.peakExecutionMemory","Update":524288,"Value":524288,"Internal":true,"Count Failed Values":true},{"ID":264,"Name":"internal.metrics.resultSize","Update":2544,"Value":2544,"Internal":true,"Count Failed Values":true},{"ID":263,"Name":"internal.metrics.executorCpuTime","Update":95945587,"Value":95945587,"Internal":true,"Count Failed Values":true},{"ID":262,"Name":"internal.metrics.executorRunTime","Update":96,"Value":96,"Internal":true,"Count Failed Values":true},{"ID":261,"Name":"internal.metrics.executorDeserializeCpuTime","Update":7437557,"Value":7437557,"Internal":true,"Count Failed Values":true},{"ID":260,"Name":"internal.metrics.executorDeserializeTime","Update":7,"Value":7,"Internal":true,"Count Failed Values":true}]},"Task Executor Metrics":{"JVMHeapMemory":0,"JVMOffHeapMemory":0,"OnHeapExecutionMemory":0,"OffHeapExecutionMemory":0,"OnHeapStorageMemory":0,"OffHeapStorageMemory":0,"OnHeapUnifiedMemory":0,"OffHeapUnifiedMemory":0,"DirectPoolMemory":0,"MappedPoolMemory":0,"ProcessTreeJVMVMemory":0,"ProcessTreeJVMRSSMemory":0,"ProcessTreePythonVMemory":0,"ProcessTreePythonRSSMemory":0,"ProcessTreeOtherVMemory":0,"ProcessTreeOtherRSSMemory":0,"MinorGCCount":0,"MinorGCTime":0,"MajorGCCount":0,"MajorGCTime":0},"Task Metrics":{"Executor Deserialize Time":7,"Executor Deserialize CPU Time":7437557,"Executor Run Time":96,"Executor CPU Time":95945587,"Peak Execution Memory":524288,"Result Size":2544,"JVM GC Time":0,"Result Serialization Time":0,"Memory Bytes Spilled":0,"Disk Bytes Spilled":0,"Shuffle Read Metrics":{"Remote Blocks Fetched":0,"Local Blocks Fetched":0,"Fetch Wait Time":0,"Remote Bytes Read":0,"Remote Bytes Read To Disk":0,"Local Bytes Read":0,"Total Records Read":0},"Shuffle Write Metrics":{"Shuffle Bytes Written":169,"Shuffle Write Time":837580,"Shuffle Records Written":1},"Input Metrics":{"Bytes Read":0,"Records Read":363},"Output Metrics":{"Bytes Written":0,"Records Written":0},"Updated Blocks":[]}} +{"Event":"SparkListenerStageCompleted","Stage Info":{"Stage ID":2,"Stage Attempt ID":0,"Stage Name":"start at StructuredKafkaWordCount.scala:86","Number of Tasks":1,"RDD Info":[{"RDD ID":18,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"41\",\"name\":\"Exchange\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[17],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":12,"Name":"DataSourceRDD","Scope":"{\"id\":\"53\",\"name\":\"MicroBatchScan\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":13,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"53\",\"name\":\"MicroBatchScan\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[12],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":15,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"48\",\"name\":\"DeserializeToObject\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[14],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":14,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"49\",\"name\":\"WholeStageCodegen (1)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[13],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":16,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"47\",\"name\":\"MapPartitions\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[15],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":17,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"42\",\"name\":\"WholeStageCodegen (2)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[16],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0}],"Parent IDs":[],"Details":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","Submission Time":1596020223485,"Completion Time":1596020223603,"Accumulables":[{"ID":227,"Name":"peak memory","Value":"262144","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":236,"Name":"duration","Value":"84","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":176,"Name":"shuffle bytes written","Value":"169","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":262,"Name":"internal.metrics.executorRunTime","Value":96,"Internal":true,"Count Failed Values":true},{"ID":226,"Name":"number of output rows","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":280,"Name":"internal.metrics.shuffle.write.writeTime","Value":837580,"Internal":true,"Count Failed Values":true},{"ID":229,"Name":"time in aggregation build","Value":"74","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":232,"Name":"peak memory","Value":"262144","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":178,"Name":"shuffle write time","Value":"837580","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":225,"Name":"duration","Value":"84","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":261,"Name":"internal.metrics.executorDeserializeCpuTime","Value":7437557,"Internal":true,"Count Failed Values":true},{"ID":279,"Name":"internal.metrics.shuffle.write.recordsWritten","Value":1,"Internal":true,"Count Failed Values":true},{"ID":234,"Name":"time in aggregation build","Value":"68","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":264,"Name":"internal.metrics.resultSize","Value":2544,"Internal":true,"Count Failed Values":true},{"ID":282,"Name":"internal.metrics.input.recordsRead","Value":363,"Internal":true,"Count Failed Values":true},{"ID":237,"Name":"number of output rows","Value":"363","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":177,"Name":"shuffle records written","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":168,"Name":"data size","Value":"128","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":231,"Name":"number of output rows","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":263,"Name":"internal.metrics.executorCpuTime","Value":95945587,"Internal":true,"Count Failed Values":true},{"ID":260,"Name":"internal.metrics.executorDeserializeTime","Value":7,"Internal":true,"Count Failed Values":true},{"ID":269,"Name":"internal.metrics.peakExecutionMemory","Value":524288,"Internal":true,"Count Failed Values":true},{"ID":278,"Name":"internal.metrics.shuffle.write.bytesWritten","Value":169,"Internal":true,"Count Failed Values":true}],"Resource Profile Id":0}} +{"Event":"SparkListenerStageSubmitted","Stage Info":{"Stage ID":3,"Stage Attempt ID":0,"Stage Name":"start at StructuredKafkaWordCount.scala:86","Number of Tasks":2,"RDD Info":[{"RDD ID":23,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"33\",\"name\":\"WholeStageCodegen (4)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[22],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":22,"Name":"StateStoreRDD","Scope":"{\"id\":\"36\",\"name\":\"StateStoreSave\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[21],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":19,"Name":"ShuffledRowRDD","Scope":"{\"id\":\"41\",\"name\":\"Exchange\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[18],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":21,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"37\",\"name\":\"WholeStageCodegen (3)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[20],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":20,"Name":"StateStoreRDD","Scope":"{\"id\":\"40\",\"name\":\"StateStoreRestore\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[19],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0}],"Parent IDs":[2],"Details":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","Submission Time":1596020223613,"Accumulables":[],"Resource Profile Id":0},"Properties":{"sql.streaming.queryId":"8d268dc2-bc9c-4be8-97a9-b135d2943028","spark.driver.host":"iZbp19vpr16ix621sdw476Z","spark.eventLog.enabled":"true","spark.sql.adaptive.enabled":"false","spark.job.interruptOnCancel":"true","spark.driver.port":"46309","__fetch_continuous_blocks_in_batch_enabled":"true","spark.jars":"file:/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/./examples/jars/spark-examples_2.12-3.1.0-SNAPSHOT.jar","__is_continuous_processing":"false","spark.app.name":"StructuredKafkaWordCount","callSite.long":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","callSite.short":"start at StructuredKafkaWordCount.scala:86","spark.submit.pyFiles":"","spark.job.description":"\nid = 8d268dc2-bc9c-4be8-97a9-b135d2943028\nrunId = e225d92f-2545-48f8-87a2-9c0309580f8a\nbatch = 1","spark.executor.id":"driver","spark.sql.cbo.enabled":"false","streaming.sql.batchId":"1","spark.jobGroup.id":"e225d92f-2545-48f8-87a2-9c0309580f8a","spark.submit.deployMode":"client","spark.master":"local[*]","spark.eventLog.dir":"/tmp/spark-history","spark.sql.execution.id":"4","spark.app.id":"local-1596020211915","spark.sql.shuffle.partitions":"2"}} +{"Event":"SparkListenerTaskStart","Stage ID":3,"Stage Attempt ID":0,"Task Info":{"Task ID":4,"Index":0,"Attempt":0,"Launch Time":1596020223625,"Executor ID":"driver","Host":"iZbp19vpr16ix621sdw476Z","Locality":"PROCESS_LOCAL","Speculative":false,"Getting Result Time":0,"Finish Time":0,"Failed":false,"Killed":false,"Accumulables":[]}} +{"Event":"SparkListenerTaskStart","Stage ID":3,"Stage Attempt ID":0,"Task Info":{"Task ID":5,"Index":1,"Attempt":0,"Launch Time":1596020223626,"Executor ID":"driver","Host":"iZbp19vpr16ix621sdw476Z","Locality":"PROCESS_LOCAL","Speculative":false,"Getting Result Time":0,"Finish Time":0,"Failed":false,"Killed":false,"Accumulables":[]}} +{"Event":"SparkListenerTaskEnd","Stage ID":3,"Stage Attempt ID":0,"Task Type":"ResultTask","Task End Reason":{"Reason":"Success"},"Task Info":{"Task ID":4,"Index":0,"Attempt":0,"Launch Time":1596020223625,"Executor ID":"driver","Host":"iZbp19vpr16ix621sdw476Z","Locality":"PROCESS_LOCAL","Speculative":false,"Getting Result Time":0,"Finish Time":1596020223717,"Failed":false,"Killed":false,"Accumulables":[{"ID":201,"Name":"duration","Update":"4","Value":"4","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":203,"Name":"peak memory","Update":"262144","Value":"262144","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":205,"Name":"time in aggregation build","Update":"0","Value":"0","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":211,"Name":"time to update","Update":"6","Value":"6","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":212,"Name":"time to remove","Update":"0","Value":"0","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":213,"Name":"time to commit changes","Update":"38","Value":"38","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":215,"Name":"estimated size of state only on current version","Update":"88","Value":"88","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":216,"Name":"count of cache hit on states cache in provider","Update":"2","Value":"2","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":214,"Name":"memory used by state","Update":"376","Value":"376","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":218,"Name":"duration","Update":"6","Value":"6","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":220,"Name":"peak memory","Update":"262144","Value":"262144","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":222,"Name":"time in aggregation build","Update":"0","Value":"0","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":302,"Name":"internal.metrics.shuffle.read.recordsRead","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":301,"Name":"internal.metrics.shuffle.read.fetchWaitTime","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":300,"Name":"internal.metrics.shuffle.read.localBytesRead","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":299,"Name":"internal.metrics.shuffle.read.remoteBytesReadToDisk","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":298,"Name":"internal.metrics.shuffle.read.remoteBytesRead","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":297,"Name":"internal.metrics.shuffle.read.localBlocksFetched","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":296,"Name":"internal.metrics.shuffle.read.remoteBlocksFetched","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":294,"Name":"internal.metrics.peakExecutionMemory","Update":524288,"Value":524288,"Internal":true,"Count Failed Values":true},{"ID":289,"Name":"internal.metrics.resultSize","Update":5311,"Value":5311,"Internal":true,"Count Failed Values":true},{"ID":288,"Name":"internal.metrics.executorCpuTime","Update":22954307,"Value":22954307,"Internal":true,"Count Failed Values":true},{"ID":287,"Name":"internal.metrics.executorRunTime","Update":77,"Value":77,"Internal":true,"Count Failed Values":true},{"ID":286,"Name":"internal.metrics.executorDeserializeCpuTime","Update":6627382,"Value":6627382,"Internal":true,"Count Failed Values":true},{"ID":285,"Name":"internal.metrics.executorDeserializeTime","Update":6,"Value":6,"Internal":true,"Count Failed Values":true}]},"Task Executor Metrics":{"JVMHeapMemory":0,"JVMOffHeapMemory":0,"OnHeapExecutionMemory":0,"OffHeapExecutionMemory":0,"OnHeapStorageMemory":0,"OffHeapStorageMemory":0,"OnHeapUnifiedMemory":0,"OffHeapUnifiedMemory":0,"DirectPoolMemory":0,"MappedPoolMemory":0,"ProcessTreeJVMVMemory":0,"ProcessTreeJVMRSSMemory":0,"ProcessTreePythonVMemory":0,"ProcessTreePythonRSSMemory":0,"ProcessTreeOtherVMemory":0,"ProcessTreeOtherRSSMemory":0,"MinorGCCount":0,"MinorGCTime":0,"MajorGCCount":0,"MajorGCTime":0},"Task Metrics":{"Executor Deserialize Time":6,"Executor Deserialize CPU Time":6627382,"Executor Run Time":77,"Executor CPU Time":22954307,"Peak Execution Memory":524288,"Result Size":5311,"JVM GC Time":0,"Result Serialization Time":0,"Memory Bytes Spilled":0,"Disk Bytes Spilled":0,"Shuffle Read Metrics":{"Remote Blocks Fetched":0,"Local Blocks Fetched":0,"Fetch Wait Time":0,"Remote Bytes Read":0,"Remote Bytes Read To Disk":0,"Local Bytes Read":0,"Total Records Read":0},"Shuffle Write Metrics":{"Shuffle Bytes Written":0,"Shuffle Write Time":0,"Shuffle Records Written":0},"Input Metrics":{"Bytes Read":0,"Records Read":0},"Output Metrics":{"Bytes Written":0,"Records Written":0},"Updated Blocks":[]}} +{"Event":"SparkListenerTaskEnd","Stage ID":3,"Stage Attempt ID":0,"Task Type":"ResultTask","Task End Reason":{"Reason":"Success"},"Task Info":{"Task ID":5,"Index":1,"Attempt":0,"Launch Time":1596020223626,"Executor ID":"driver","Host":"iZbp19vpr16ix621sdw476Z","Locality":"PROCESS_LOCAL","Speculative":false,"Getting Result Time":0,"Finish Time":1596020223720,"Failed":false,"Killed":false,"Accumulables":[{"ID":201,"Name":"duration","Update":"4","Value":"8","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":206,"Name":"avg hash probe bucket list iters","Update":"10","Value":"10","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":202,"Name":"number of output rows","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":203,"Name":"peak memory","Update":"4456448","Value":"4718592","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":205,"Name":"time in aggregation build","Update":"0","Value":"0","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":211,"Name":"time to update","Update":"18","Value":"24","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":210,"Name":"number of updated state rows","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":212,"Name":"time to remove","Update":"0","Value":"0","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":213,"Name":"time to commit changes","Update":"30","Value":"68","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":215,"Name":"estimated size of state only on current version","Update":"368","Value":"456","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":207,"Name":"number of output rows","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":216,"Name":"count of cache hit on states cache in provider","Update":"2","Value":"4","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":214,"Name":"memory used by state","Update":"840","Value":"1216","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":209,"Name":"number of total state rows","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":218,"Name":"duration","Update":"19","Value":"25","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":219,"Name":"number of output rows","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":220,"Name":"peak memory","Update":"262144","Value":"524288","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":222,"Name":"time in aggregation build","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":224,"Name":"number of output rows","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":170,"Name":"local blocks read","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":174,"Name":"fetch wait time","Update":"0","Value":"0","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":173,"Name":"local bytes read","Update":"169","Value":"169","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":175,"Name":"records read","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":302,"Name":"internal.metrics.shuffle.read.recordsRead","Update":1,"Value":1,"Internal":true,"Count Failed Values":true},{"ID":301,"Name":"internal.metrics.shuffle.read.fetchWaitTime","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":300,"Name":"internal.metrics.shuffle.read.localBytesRead","Update":169,"Value":169,"Internal":true,"Count Failed Values":true},{"ID":299,"Name":"internal.metrics.shuffle.read.remoteBytesReadToDisk","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":298,"Name":"internal.metrics.shuffle.read.remoteBytesRead","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":297,"Name":"internal.metrics.shuffle.read.localBlocksFetched","Update":1,"Value":1,"Internal":true,"Count Failed Values":true},{"ID":296,"Name":"internal.metrics.shuffle.read.remoteBlocksFetched","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":294,"Name":"internal.metrics.peakExecutionMemory","Update":4718592,"Value":5242880,"Internal":true,"Count Failed Values":true},{"ID":289,"Name":"internal.metrics.resultSize","Update":5574,"Value":10885,"Internal":true,"Count Failed Values":true},{"ID":288,"Name":"internal.metrics.executorCpuTime","Update":25907369,"Value":48861676,"Internal":true,"Count Failed Values":true},{"ID":287,"Name":"internal.metrics.executorRunTime","Update":82,"Value":159,"Internal":true,"Count Failed Values":true},{"ID":286,"Name":"internal.metrics.executorDeserializeCpuTime","Update":7573630,"Value":14201012,"Internal":true,"Count Failed Values":true},{"ID":285,"Name":"internal.metrics.executorDeserializeTime","Update":7,"Value":13,"Internal":true,"Count Failed Values":true}]},"Task Executor Metrics":{"JVMHeapMemory":0,"JVMOffHeapMemory":0,"OnHeapExecutionMemory":0,"OffHeapExecutionMemory":0,"OnHeapStorageMemory":0,"OffHeapStorageMemory":0,"OnHeapUnifiedMemory":0,"OffHeapUnifiedMemory":0,"DirectPoolMemory":0,"MappedPoolMemory":0,"ProcessTreeJVMVMemory":0,"ProcessTreeJVMRSSMemory":0,"ProcessTreePythonVMemory":0,"ProcessTreePythonRSSMemory":0,"ProcessTreeOtherVMemory":0,"ProcessTreeOtherRSSMemory":0,"MinorGCCount":0,"MinorGCTime":0,"MajorGCCount":0,"MajorGCTime":0},"Task Metrics":{"Executor Deserialize Time":7,"Executor Deserialize CPU Time":7573630,"Executor Run Time":82,"Executor CPU Time":25907369,"Peak Execution Memory":4718592,"Result Size":5574,"JVM GC Time":0,"Result Serialization Time":0,"Memory Bytes Spilled":0,"Disk Bytes Spilled":0,"Shuffle Read Metrics":{"Remote Blocks Fetched":0,"Local Blocks Fetched":1,"Fetch Wait Time":0,"Remote Bytes Read":0,"Remote Bytes Read To Disk":0,"Local Bytes Read":169,"Total Records Read":1},"Shuffle Write Metrics":{"Shuffle Bytes Written":0,"Shuffle Write Time":0,"Shuffle Records Written":0},"Input Metrics":{"Bytes Read":0,"Records Read":0},"Output Metrics":{"Bytes Written":0,"Records Written":0},"Updated Blocks":[]}} +{"Event":"SparkListenerStageCompleted","Stage Info":{"Stage ID":3,"Stage Attempt ID":0,"Stage Name":"start at StructuredKafkaWordCount.scala:86","Number of Tasks":2,"RDD Info":[{"RDD ID":23,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"33\",\"name\":\"WholeStageCodegen (4)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[22],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":22,"Name":"StateStoreRDD","Scope":"{\"id\":\"36\",\"name\":\"StateStoreSave\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[21],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":19,"Name":"ShuffledRowRDD","Scope":"{\"id\":\"41\",\"name\":\"Exchange\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[18],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":21,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"37\",\"name\":\"WholeStageCodegen (3)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[20],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":20,"Name":"StateStoreRDD","Scope":"{\"id\":\"40\",\"name\":\"StateStoreRestore\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[19],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0}],"Parent IDs":[2],"Details":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","Submission Time":1596020223613,"Completion Time":1596020223724,"Accumulables":[{"ID":218,"Name":"duration","Value":"25","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":209,"Name":"number of total state rows","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":173,"Name":"local bytes read","Value":"169","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":286,"Name":"internal.metrics.executorDeserializeCpuTime","Value":14201012,"Internal":true,"Count Failed Values":true},{"ID":298,"Name":"internal.metrics.shuffle.read.remoteBytesRead","Value":0,"Internal":true,"Count Failed Values":true},{"ID":289,"Name":"internal.metrics.resultSize","Value":10885,"Internal":true,"Count Failed Values":true},{"ID":301,"Name":"internal.metrics.shuffle.read.fetchWaitTime","Value":0,"Internal":true,"Count Failed Values":true},{"ID":175,"Name":"records read","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":211,"Name":"time to update","Value":"24","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":202,"Name":"number of output rows","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":220,"Name":"peak memory","Value":"524288","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":214,"Name":"memory used by state","Value":"1216","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":205,"Name":"time in aggregation build","Value":"0","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":300,"Name":"internal.metrics.shuffle.read.localBytesRead","Value":169,"Internal":true,"Count Failed Values":true},{"ID":294,"Name":"internal.metrics.peakExecutionMemory","Value":5242880,"Internal":true,"Count Failed Values":true},{"ID":285,"Name":"internal.metrics.executorDeserializeTime","Value":13,"Internal":true,"Count Failed Values":true},{"ID":207,"Name":"number of output rows","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":297,"Name":"internal.metrics.shuffle.read.localBlocksFetched","Value":1,"Internal":true,"Count Failed Values":true},{"ID":288,"Name":"internal.metrics.executorCpuTime","Value":48861676,"Internal":true,"Count Failed Values":true},{"ID":216,"Name":"count of cache hit on states cache in provider","Value":"4","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":174,"Name":"fetch wait time","Value":"0","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":210,"Name":"number of updated state rows","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":219,"Name":"number of output rows","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":201,"Name":"duration","Value":"8","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":222,"Name":"time in aggregation build","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":213,"Name":"time to commit changes","Value":"68","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":299,"Name":"internal.metrics.shuffle.read.remoteBytesReadToDisk","Value":0,"Internal":true,"Count Failed Values":true},{"ID":302,"Name":"internal.metrics.shuffle.read.recordsRead","Value":1,"Internal":true,"Count Failed Values":true},{"ID":212,"Name":"time to remove","Value":"0","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":203,"Name":"peak memory","Value":"4718592","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":170,"Name":"local blocks read","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":215,"Name":"estimated size of state only on current version","Value":"456","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":287,"Name":"internal.metrics.executorRunTime","Value":159,"Internal":true,"Count Failed Values":true},{"ID":206,"Name":"avg hash probe bucket list iters","Value":"10","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":224,"Name":"number of output rows","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":296,"Name":"internal.metrics.shuffle.read.remoteBlocksFetched","Value":0,"Internal":true,"Count Failed Values":true}],"Resource Profile Id":0}} +{"Event":"SparkListenerJobEnd","Job ID":1,"Completion Time":1596020223725,"Job Result":{"Result":"JobSucceeded"}} +{"Event":"org.apache.spark.sql.execution.ui.SparkListenerSQLExecutionStart","executionId":5,"description":"\nid = 8d268dc2-bc9c-4be8-97a9-b135d2943028\nrunId = e225d92f-2545-48f8-87a2-9c0309580f8a\nbatch = 1","details":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","physicalPlanDescription":"== Physical Plan ==\nLocalTableScan (1)\n\n\n(1) LocalTableScan\nOutput [2]: [value#60, count#61]\nArguments: [value#60, count#61]\n\n","sparkPlanInfo":{"nodeName":"LocalTableScan","simpleString":"LocalTableScan [value#60, count#61]","children":[],"metadata":{},"metrics":[{"name":"number of output rows","accumulatorId":310,"metricType":"sum"}]},"time":1596020223752} +{"Event":"org.apache.spark.sql.execution.ui.SparkListenerSQLExecutionEnd","executionId":5,"time":1596020223761} +{"Event":"org.apache.spark.sql.execution.ui.SparkListenerSQLExecutionEnd","executionId":4,"time":1596020223762} +{"Event":"org.apache.spark.sql.execution.ui.SparkListenerSQLExecutionEnd","executionId":3,"time":1596020223762} +{"Event":"org.apache.spark.sql.streaming.StreamingQueryListener$QueryProgressEvent","progress":{"id":"8d268dc2-bc9c-4be8-97a9-b135d2943028","runId":"e225d92f-2545-48f8-87a2-9c0309580f8a","name":null,"timestamp":"2020-07-29T10:57:03.168Z","batchId":1,"batchDuration":622,"durationMs":{"triggerExecution":622,"queryPlanning":47,"getBatch":0,"latestOffset":7,"addBatch":478,"walCommit":59},"eventTime":{},"stateOperators":[{"numRowsTotal":1,"numRowsUpdated":1,"memoryUsedBytes":1216,"numLateInputs":0,"customMetrics":{"stateOnCurrentVersionSizeBytes":456,"loadedMapCacheHitCount":4,"loadedMapCacheMissCount":0}}],"sources":[{"description":"KafkaV2[Subscribe[test5]]","startOffset":"{\"test5\":{\"0\":48279}}","endOffset":"{\"test5\":{\"0\":48642}}","numInputRows":363,"inputRowsPerSecond":50.74793792814204,"processedRowsPerSecond":583.6012861736334}],"sink":{"description":"org.apache.spark.sql.execution.streaming.ConsoleTable$@514ba885","numOutputRows":1},"observedMetrics":{}}} +{"Event":"org.apache.spark.sql.execution.ui.SparkListenerSQLExecutionStart","executionId":6,"description":"\nid = 8d268dc2-bc9c-4be8-97a9-b135d2943028\nrunId = e225d92f-2545-48f8-87a2-9c0309580f8a\nbatch = 2","details":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","physicalPlanDescription":"== Physical Plan ==\nWriteToDataSourceV2 (14)\n+- * HashAggregate (13)\n +- StateStoreSave (12)\n +- * HashAggregate (11)\n +- StateStoreRestore (10)\n +- Exchange (9)\n +- * HashAggregate (8)\n +- * HashAggregate (7)\n +- * SerializeFromObject (6)\n +- MapPartitions (5)\n +- DeserializeToObject (4)\n +- * Project (3)\n +- * Project (2)\n +- MicroBatchScan (1)\n\n\n(1) MicroBatchScan\nOutput [7]: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]\nArguments: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13], org.apache.spark.sql.kafka010.KafkaSourceProvider$KafkaScan@7e7b182c, KafkaV2[Subscribe[test5]], {\"test5\":{\"0\":48642}}, {\"test5\":{\"0\":48705}}\n\n(2) Project [codegen id : 1]\nOutput [7]: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]\nInput [7]: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]\n\n(3) Project [codegen id : 1]\nOutput [1]: [cast(value#8 as string) AS value#21]\nInput [7]: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]\n\n(4) DeserializeToObject\nInput [1]: [value#21]\nArguments: value#21.toString, obj#27: java.lang.String\n\n(5) MapPartitions\nInput [1]: [obj#27]\nArguments: org.apache.spark.sql.Dataset$$Lambda$1321/872917583@67b99068, obj#28: java.lang.String\n\n(6) SerializeFromObject [codegen id : 2]\nInput [1]: [obj#28]\nArguments: [staticinvoke(class org.apache.spark.unsafe.types.UTF8String, StringType, fromString, input[0, java.lang.String, true], true, false) AS value#29]\n\n(7) HashAggregate [codegen id : 2]\nInput [1]: [value#29]\nKeys [1]: [value#29]\nFunctions [1]: [partial_count(1)]\nAggregate Attributes [1]: [count(1)#31L]\nResults [2]: [value#29, count#38L]\n\n(8) HashAggregate [codegen id : 2]\nInput [2]: [value#29, count#38L]\nKeys [1]: [value#29]\nFunctions [1]: [merge_count(1)]\nAggregate Attributes [1]: [count(1)#31L]\nResults [2]: [value#29, count#38L]\n\n(9) Exchange\nInput [2]: [value#29, count#38L]\nArguments: hashpartitioning(value#29, 2), true, [id=#604]\n\n(10) StateStoreRestore\nInput [2]: [value#29, count#38L]\nArguments: [value#29], state info [ checkpoint = , runId = 39c861a0-0e30-4ca2-b363-495aff0f3f93, opId = 0, ver = 0, numPartitions = 2], 2\n\n(11) HashAggregate [codegen id : 3]\nInput [2]: [value#29, count#38L]\nKeys [1]: [value#29]\nFunctions [1]: [merge_count(1)]\nAggregate Attributes [1]: [count(1)#31L]\nResults [2]: [value#29, count#38L]\n\n(12) StateStoreSave\nInput [2]: [value#29, count#38L]\nArguments: [value#29], state info [ checkpoint = , runId = 39c861a0-0e30-4ca2-b363-495aff0f3f93, opId = 0, ver = 0, numPartitions = 2], Append, 0, 2\n\n(13) HashAggregate [codegen id : 4]\nInput [2]: [value#29, count#38L]\nKeys [1]: [value#29]\nFunctions [1]: [count(1)]\nAggregate Attributes [1]: [count(1)#31L]\nResults [2]: [value#29, count(1)#31L AS count#32L]\n\n(14) WriteToDataSourceV2\nInput [2]: [value#29, count#32L]\nArguments: org.apache.spark.sql.execution.streaming.sources.MicroBatchWrite@52d6c50a\n\n","sparkPlanInfo":{"nodeName":"WriteToDataSourceV2","simpleString":"WriteToDataSourceV2 org.apache.spark.sql.execution.streaming.sources.MicroBatchWrite@52d6c50a","children":[{"nodeName":"WholeStageCodegen (4)","simpleString":"WholeStageCodegen (4)","children":[{"nodeName":"HashAggregate","simpleString":"HashAggregate(keys=[value#29], functions=[count(1)])","children":[{"nodeName":"InputAdapter","simpleString":"InputAdapter","children":[{"nodeName":"StateStoreSave","simpleString":"StateStoreSave [value#29], state info [ checkpoint = file:/tmp/temporary-025d7997-5b66-4def-abbf-bdcca57312b9/state, runId = e225d92f-2545-48f8-87a2-9c0309580f8a, opId = 0, ver = 2, numPartitions = 2], Complete, 0, 2","children":[{"nodeName":"WholeStageCodegen (3)","simpleString":"WholeStageCodegen (3)","children":[{"nodeName":"HashAggregate","simpleString":"HashAggregate(keys=[value#29], functions=[merge_count(1)])","children":[{"nodeName":"InputAdapter","simpleString":"InputAdapter","children":[{"nodeName":"StateStoreRestore","simpleString":"StateStoreRestore [value#29], state info [ checkpoint = file:/tmp/temporary-025d7997-5b66-4def-abbf-bdcca57312b9/state, runId = e225d92f-2545-48f8-87a2-9c0309580f8a, opId = 0, ver = 2, numPartitions = 2], 2","children":[{"nodeName":"Exchange","simpleString":"Exchange hashpartitioning(value#29, 2), true, [id=#528]","children":[{"nodeName":"WholeStageCodegen (2)","simpleString":"WholeStageCodegen (2)","children":[{"nodeName":"HashAggregate","simpleString":"HashAggregate(keys=[value#29], functions=[merge_count(1)])","children":[{"nodeName":"HashAggregate","simpleString":"HashAggregate(keys=[value#29], functions=[partial_count(1)])","children":[{"nodeName":"SerializeFromObject","simpleString":"SerializeFromObject [staticinvoke(class org.apache.spark.unsafe.types.UTF8String, StringType, fromString, input[0, java.lang.String, true], true, false) AS value#29]","children":[{"nodeName":"InputAdapter","simpleString":"InputAdapter","children":[{"nodeName":"MapPartitions","simpleString":"MapPartitions org.apache.spark.sql.Dataset$$Lambda$1321/872917583@67b99068, obj#28: java.lang.String","children":[{"nodeName":"DeserializeToObject","simpleString":"DeserializeToObject value#21.toString, obj#27: java.lang.String","children":[{"nodeName":"WholeStageCodegen (1)","simpleString":"WholeStageCodegen (1)","children":[{"nodeName":"Project","simpleString":"Project [cast(value#8 as string) AS value#21]","children":[{"nodeName":"Project","simpleString":"Project [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]","children":[{"nodeName":"InputAdapter","simpleString":"InputAdapter","children":[{"nodeName":"MicroBatchScan","simpleString":"MicroBatchScan[key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13] class org.apache.spark.sql.kafka010.KafkaSourceProvider$KafkaScan","children":[],"metadata":{},"metrics":[{"name":"number of output rows","accumulatorId":394,"metricType":"sum"}]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[{"name":"duration","accumulatorId":393,"metricType":"timing"}]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[{"name":"spill size","accumulatorId":390,"metricType":"size"},{"name":"time in aggregation build","accumulatorId":391,"metricType":"timing"},{"name":"peak memory","accumulatorId":389,"metricType":"size"},{"name":"number of output rows","accumulatorId":388,"metricType":"sum"},{"name":"avg hash probe bucket list iters","accumulatorId":392,"metricType":"average"}]}],"metadata":{},"metrics":[{"name":"spill size","accumulatorId":385,"metricType":"size"},{"name":"time in aggregation build","accumulatorId":386,"metricType":"timing"},{"name":"peak memory","accumulatorId":384,"metricType":"size"},{"name":"number of output rows","accumulatorId":383,"metricType":"sum"},{"name":"avg hash probe bucket list iters","accumulatorId":387,"metricType":"average"}]}],"metadata":{},"metrics":[{"name":"duration","accumulatorId":382,"metricType":"timing"}]}],"metadata":{},"metrics":[{"name":"shuffle records written","accumulatorId":334,"metricType":"sum"},{"name":"shuffle write time","accumulatorId":335,"metricType":"nsTiming"},{"name":"records read","accumulatorId":332,"metricType":"sum"},{"name":"local bytes read","accumulatorId":330,"metricType":"size"},{"name":"fetch wait time","accumulatorId":331,"metricType":"timing"},{"name":"remote bytes read","accumulatorId":328,"metricType":"size"},{"name":"local blocks read","accumulatorId":327,"metricType":"sum"},{"name":"remote blocks read","accumulatorId":326,"metricType":"sum"},{"name":"data size","accumulatorId":325,"metricType":"size"},{"name":"remote bytes read to disk","accumulatorId":329,"metricType":"size"},{"name":"shuffle bytes written","accumulatorId":333,"metricType":"size"}]}],"metadata":{},"metrics":[{"name":"number of output rows","accumulatorId":381,"metricType":"sum"}]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[{"name":"spill size","accumulatorId":378,"metricType":"size"},{"name":"time in aggregation build","accumulatorId":379,"metricType":"timing"},{"name":"peak memory","accumulatorId":377,"metricType":"size"},{"name":"number of output rows","accumulatorId":376,"metricType":"sum"},{"name":"avg hash probe bucket list iters","accumulatorId":380,"metricType":"average"}]}],"metadata":{},"metrics":[{"name":"duration","accumulatorId":375,"metricType":"timing"}]}],"metadata":{},"metrics":[{"name":"number of inputs which are later than watermark ('inputs' are relative to operators)","accumulatorId":365,"metricType":"sum"},{"name":"number of total state rows","accumulatorId":366,"metricType":"sum"},{"name":"memory used by state","accumulatorId":371,"metricType":"size"},{"name":"count of cache hit on states cache in provider","accumulatorId":373,"metricType":"sum"},{"name":"number of output rows","accumulatorId":364,"metricType":"sum"},{"name":"estimated size of state only on current version","accumulatorId":372,"metricType":"size"},{"name":"count of cache miss on states cache in provider","accumulatorId":374,"metricType":"sum"},{"name":"time to commit changes","accumulatorId":370,"metricType":"timing"},{"name":"time to remove","accumulatorId":369,"metricType":"timing"},{"name":"number of updated state rows","accumulatorId":367,"metricType":"sum"},{"name":"time to update","accumulatorId":368,"metricType":"timing"}]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[{"name":"spill size","accumulatorId":361,"metricType":"size"},{"name":"time in aggregation build","accumulatorId":362,"metricType":"timing"},{"name":"peak memory","accumulatorId":360,"metricType":"size"},{"name":"number of output rows","accumulatorId":359,"metricType":"sum"},{"name":"avg hash probe bucket list iters","accumulatorId":363,"metricType":"average"}]}],"metadata":{},"metrics":[{"name":"duration","accumulatorId":358,"metricType":"timing"}]}],"metadata":{},"metrics":[]},"time":1596020223909} +{"Event":"org.apache.spark.sql.execution.ui.SparkListenerSQLExecutionStart","executionId":7,"description":"\nid = 8d268dc2-bc9c-4be8-97a9-b135d2943028\nrunId = e225d92f-2545-48f8-87a2-9c0309580f8a\nbatch = 2","details":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","physicalPlanDescription":"== Physical Plan ==\nWriteToDataSourceV2 (14)\n+- * HashAggregate (13)\n +- StateStoreSave (12)\n +- * HashAggregate (11)\n +- StateStoreRestore (10)\n +- Exchange (9)\n +- * HashAggregate (8)\n +- * HashAggregate (7)\n +- * SerializeFromObject (6)\n +- MapPartitions (5)\n +- DeserializeToObject (4)\n +- * Project (3)\n +- * Project (2)\n +- MicroBatchScan (1)\n\n\n(1) MicroBatchScan\nOutput [7]: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]\nArguments: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13], org.apache.spark.sql.kafka010.KafkaSourceProvider$KafkaScan@7e7b182c, KafkaV2[Subscribe[test5]], {\"test5\":{\"0\":48642}}, {\"test5\":{\"0\":48705}}\n\n(2) Project [codegen id : 1]\nOutput [7]: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]\nInput [7]: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]\n\n(3) Project [codegen id : 1]\nOutput [1]: [cast(value#8 as string) AS value#21]\nInput [7]: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]\n\n(4) DeserializeToObject\nInput [1]: [value#21]\nArguments: value#21.toString, obj#27: java.lang.String\n\n(5) MapPartitions\nInput [1]: [obj#27]\nArguments: org.apache.spark.sql.Dataset$$Lambda$1321/872917583@67b99068, obj#28: java.lang.String\n\n(6) SerializeFromObject [codegen id : 2]\nInput [1]: [obj#28]\nArguments: [staticinvoke(class org.apache.spark.unsafe.types.UTF8String, StringType, fromString, input[0, java.lang.String, true], true, false) AS value#29]\n\n(7) HashAggregate [codegen id : 2]\nInput [1]: [value#29]\nKeys [1]: [value#29]\nFunctions [1]: [partial_count(1)]\nAggregate Attributes [1]: [count(1)#31L]\nResults [2]: [value#29, count#38L]\n\n(8) HashAggregate [codegen id : 2]\nInput [2]: [value#29, count#38L]\nKeys [1]: [value#29]\nFunctions [1]: [merge_count(1)]\nAggregate Attributes [1]: [count(1)#31L]\nResults [2]: [value#29, count#38L]\n\n(9) Exchange\nInput [2]: [value#29, count#38L]\nArguments: hashpartitioning(value#29, 2), true, [id=#680]\n\n(10) StateStoreRestore\nInput [2]: [value#29, count#38L]\nArguments: [value#29], state info [ checkpoint = , runId = c2fd3b95-1ba6-4d3e-8b9c-0256dfd90973, opId = 0, ver = 0, numPartitions = 2], 2\n\n(11) HashAggregate [codegen id : 3]\nInput [2]: [value#29, count#38L]\nKeys [1]: [value#29]\nFunctions [1]: [merge_count(1)]\nAggregate Attributes [1]: [count(1)#31L]\nResults [2]: [value#29, count#38L]\n\n(12) StateStoreSave\nInput [2]: [value#29, count#38L]\nArguments: [value#29], state info [ checkpoint = , runId = c2fd3b95-1ba6-4d3e-8b9c-0256dfd90973, opId = 0, ver = 0, numPartitions = 2], Append, 0, 2\n\n(13) HashAggregate [codegen id : 4]\nInput [2]: [value#29, count#38L]\nKeys [1]: [value#29]\nFunctions [1]: [count(1)]\nAggregate Attributes [1]: [count(1)#31L]\nResults [2]: [value#29, count(1)#31L AS count#32L]\n\n(14) WriteToDataSourceV2\nInput [2]: [value#29, count#32L]\nArguments: org.apache.spark.sql.execution.streaming.sources.MicroBatchWrite@52d6c50a\n\n","sparkPlanInfo":{"nodeName":"WriteToDataSourceV2","simpleString":"WriteToDataSourceV2 org.apache.spark.sql.execution.streaming.sources.MicroBatchWrite@52d6c50a","children":[{"nodeName":"WholeStageCodegen (4)","simpleString":"WholeStageCodegen (4)","children":[{"nodeName":"HashAggregate","simpleString":"HashAggregate(keys=[value#29], functions=[count(1)])","children":[{"nodeName":"InputAdapter","simpleString":"InputAdapter","children":[{"nodeName":"StateStoreSave","simpleString":"StateStoreSave [value#29], state info [ checkpoint = file:/tmp/temporary-025d7997-5b66-4def-abbf-bdcca57312b9/state, runId = e225d92f-2545-48f8-87a2-9c0309580f8a, opId = 0, ver = 2, numPartitions = 2], Complete, 0, 2","children":[{"nodeName":"WholeStageCodegen (3)","simpleString":"WholeStageCodegen (3)","children":[{"nodeName":"HashAggregate","simpleString":"HashAggregate(keys=[value#29], functions=[merge_count(1)])","children":[{"nodeName":"InputAdapter","simpleString":"InputAdapter","children":[{"nodeName":"StateStoreRestore","simpleString":"StateStoreRestore [value#29], state info [ checkpoint = file:/tmp/temporary-025d7997-5b66-4def-abbf-bdcca57312b9/state, runId = e225d92f-2545-48f8-87a2-9c0309580f8a, opId = 0, ver = 2, numPartitions = 2], 2","children":[{"nodeName":"Exchange","simpleString":"Exchange hashpartitioning(value#29, 2), true, [id=#528]","children":[{"nodeName":"WholeStageCodegen (2)","simpleString":"WholeStageCodegen (2)","children":[{"nodeName":"HashAggregate","simpleString":"HashAggregate(keys=[value#29], functions=[merge_count(1)])","children":[{"nodeName":"HashAggregate","simpleString":"HashAggregate(keys=[value#29], functions=[partial_count(1)])","children":[{"nodeName":"SerializeFromObject","simpleString":"SerializeFromObject [staticinvoke(class org.apache.spark.unsafe.types.UTF8String, StringType, fromString, input[0, java.lang.String, true], true, false) AS value#29]","children":[{"nodeName":"InputAdapter","simpleString":"InputAdapter","children":[{"nodeName":"MapPartitions","simpleString":"MapPartitions org.apache.spark.sql.Dataset$$Lambda$1321/872917583@67b99068, obj#28: java.lang.String","children":[{"nodeName":"DeserializeToObject","simpleString":"DeserializeToObject value#21.toString, obj#27: java.lang.String","children":[{"nodeName":"WholeStageCodegen (1)","simpleString":"WholeStageCodegen (1)","children":[{"nodeName":"Project","simpleString":"Project [cast(value#8 as string) AS value#21]","children":[{"nodeName":"Project","simpleString":"Project [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]","children":[{"nodeName":"InputAdapter","simpleString":"InputAdapter","children":[{"nodeName":"MicroBatchScan","simpleString":"MicroBatchScan[key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13] class org.apache.spark.sql.kafka010.KafkaSourceProvider$KafkaScan","children":[],"metadata":{},"metrics":[{"name":"number of output rows","accumulatorId":394,"metricType":"sum"}]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[{"name":"duration","accumulatorId":393,"metricType":"timing"}]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[{"name":"spill size","accumulatorId":390,"metricType":"size"},{"name":"time in aggregation build","accumulatorId":391,"metricType":"timing"},{"name":"peak memory","accumulatorId":389,"metricType":"size"},{"name":"number of output rows","accumulatorId":388,"metricType":"sum"},{"name":"avg hash probe bucket list iters","accumulatorId":392,"metricType":"average"}]}],"metadata":{},"metrics":[{"name":"spill size","accumulatorId":385,"metricType":"size"},{"name":"time in aggregation build","accumulatorId":386,"metricType":"timing"},{"name":"peak memory","accumulatorId":384,"metricType":"size"},{"name":"number of output rows","accumulatorId":383,"metricType":"sum"},{"name":"avg hash probe bucket list iters","accumulatorId":387,"metricType":"average"}]}],"metadata":{},"metrics":[{"name":"duration","accumulatorId":382,"metricType":"timing"}]}],"metadata":{},"metrics":[{"name":"shuffle records written","accumulatorId":334,"metricType":"sum"},{"name":"shuffle write time","accumulatorId":335,"metricType":"nsTiming"},{"name":"records read","accumulatorId":332,"metricType":"sum"},{"name":"local bytes read","accumulatorId":330,"metricType":"size"},{"name":"fetch wait time","accumulatorId":331,"metricType":"timing"},{"name":"remote bytes read","accumulatorId":328,"metricType":"size"},{"name":"local blocks read","accumulatorId":327,"metricType":"sum"},{"name":"remote blocks read","accumulatorId":326,"metricType":"sum"},{"name":"data size","accumulatorId":325,"metricType":"size"},{"name":"remote bytes read to disk","accumulatorId":329,"metricType":"size"},{"name":"shuffle bytes written","accumulatorId":333,"metricType":"size"}]}],"metadata":{},"metrics":[{"name":"number of output rows","accumulatorId":381,"metricType":"sum"}]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[{"name":"spill size","accumulatorId":378,"metricType":"size"},{"name":"time in aggregation build","accumulatorId":379,"metricType":"timing"},{"name":"peak memory","accumulatorId":377,"metricType":"size"},{"name":"number of output rows","accumulatorId":376,"metricType":"sum"},{"name":"avg hash probe bucket list iters","accumulatorId":380,"metricType":"average"}]}],"metadata":{},"metrics":[{"name":"duration","accumulatorId":375,"metricType":"timing"}]}],"metadata":{},"metrics":[{"name":"number of inputs which are later than watermark ('inputs' are relative to operators)","accumulatorId":365,"metricType":"sum"},{"name":"number of total state rows","accumulatorId":366,"metricType":"sum"},{"name":"memory used by state","accumulatorId":371,"metricType":"size"},{"name":"count of cache hit on states cache in provider","accumulatorId":373,"metricType":"sum"},{"name":"number of output rows","accumulatorId":364,"metricType":"sum"},{"name":"estimated size of state only on current version","accumulatorId":372,"metricType":"size"},{"name":"count of cache miss on states cache in provider","accumulatorId":374,"metricType":"sum"},{"name":"time to commit changes","accumulatorId":370,"metricType":"timing"},{"name":"time to remove","accumulatorId":369,"metricType":"timing"},{"name":"number of updated state rows","accumulatorId":367,"metricType":"sum"},{"name":"time to update","accumulatorId":368,"metricType":"timing"}]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[{"name":"spill size","accumulatorId":361,"metricType":"size"},{"name":"time in aggregation build","accumulatorId":362,"metricType":"timing"},{"name":"peak memory","accumulatorId":360,"metricType":"size"},{"name":"number of output rows","accumulatorId":359,"metricType":"sum"},{"name":"avg hash probe bucket list iters","accumulatorId":363,"metricType":"average"}]}],"metadata":{},"metrics":[{"name":"duration","accumulatorId":358,"metricType":"timing"}]}],"metadata":{},"metrics":[]},"time":1596020224006} +{"Event":"SparkListenerJobStart","Job ID":2,"Submission Time":1596020224100,"Stage Infos":[{"Stage ID":5,"Stage Attempt ID":0,"Stage Name":"start at StructuredKafkaWordCount.scala:86","Number of Tasks":2,"RDD Info":[{"RDD ID":35,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"66\",\"name\":\"WholeStageCodegen (4)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[34],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":32,"Name":"StateStoreRDD","Scope":"{\"id\":\"73\",\"name\":\"StateStoreRestore\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[31],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":34,"Name":"StateStoreRDD","Scope":"{\"id\":\"69\",\"name\":\"StateStoreSave\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[33],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":33,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"70\",\"name\":\"WholeStageCodegen (3)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[32],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":31,"Name":"ShuffledRowRDD","Scope":"{\"id\":\"74\",\"name\":\"Exchange\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[30],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0}],"Parent IDs":[4],"Details":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","Accumulables":[],"Resource Profile Id":0},{"Stage ID":4,"Stage Attempt ID":0,"Stage Name":"start at StructuredKafkaWordCount.scala:86","Number of Tasks":1,"RDD Info":[{"RDD ID":30,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"74\",\"name\":\"Exchange\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[29],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":27,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"81\",\"name\":\"DeserializeToObject\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[26],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":29,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"75\",\"name\":\"WholeStageCodegen (2)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[28],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":28,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"80\",\"name\":\"MapPartitions\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[27],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":26,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"82\",\"name\":\"WholeStageCodegen (1)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[25],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":24,"Name":"DataSourceRDD","Scope":"{\"id\":\"86\",\"name\":\"MicroBatchScan\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":25,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"86\",\"name\":\"MicroBatchScan\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[24],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0}],"Parent IDs":[],"Details":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","Accumulables":[],"Resource Profile Id":0}],"Stage IDs":[5,4],"Properties":{"sql.streaming.queryId":"8d268dc2-bc9c-4be8-97a9-b135d2943028","spark.driver.host":"iZbp19vpr16ix621sdw476Z","spark.eventLog.enabled":"true","spark.sql.adaptive.enabled":"false","spark.job.interruptOnCancel":"true","spark.driver.port":"46309","__fetch_continuous_blocks_in_batch_enabled":"true","spark.jars":"file:/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/./examples/jars/spark-examples_2.12-3.1.0-SNAPSHOT.jar","__is_continuous_processing":"false","spark.app.name":"StructuredKafkaWordCount","callSite.long":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","callSite.short":"start at StructuredKafkaWordCount.scala:86","spark.submit.pyFiles":"","spark.job.description":"\nid = 8d268dc2-bc9c-4be8-97a9-b135d2943028\nrunId = e225d92f-2545-48f8-87a2-9c0309580f8a\nbatch = 2","spark.executor.id":"driver","spark.sql.cbo.enabled":"false","streaming.sql.batchId":"2","spark.jobGroup.id":"e225d92f-2545-48f8-87a2-9c0309580f8a","spark.submit.deployMode":"client","spark.master":"local[*]","spark.eventLog.dir":"/tmp/spark-history","spark.sql.execution.id":"7","spark.app.id":"local-1596020211915","spark.sql.shuffle.partitions":"2"}} +{"Event":"SparkListenerStageSubmitted","Stage Info":{"Stage ID":4,"Stage Attempt ID":0,"Stage Name":"start at StructuredKafkaWordCount.scala:86","Number of Tasks":1,"RDD Info":[{"RDD ID":30,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"74\",\"name\":\"Exchange\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[29],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":27,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"81\",\"name\":\"DeserializeToObject\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[26],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":29,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"75\",\"name\":\"WholeStageCodegen (2)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[28],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":28,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"80\",\"name\":\"MapPartitions\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[27],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":26,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"82\",\"name\":\"WholeStageCodegen (1)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[25],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":24,"Name":"DataSourceRDD","Scope":"{\"id\":\"86\",\"name\":\"MicroBatchScan\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":25,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"86\",\"name\":\"MicroBatchScan\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[24],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0}],"Parent IDs":[],"Details":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","Submission Time":1596020224103,"Accumulables":[],"Resource Profile Id":0},"Properties":{"sql.streaming.queryId":"8d268dc2-bc9c-4be8-97a9-b135d2943028","spark.driver.host":"iZbp19vpr16ix621sdw476Z","spark.eventLog.enabled":"true","spark.sql.adaptive.enabled":"false","spark.job.interruptOnCancel":"true","spark.driver.port":"46309","__fetch_continuous_blocks_in_batch_enabled":"true","spark.jars":"file:/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/./examples/jars/spark-examples_2.12-3.1.0-SNAPSHOT.jar","__is_continuous_processing":"false","spark.app.name":"StructuredKafkaWordCount","callSite.long":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","callSite.short":"start at StructuredKafkaWordCount.scala:86","spark.submit.pyFiles":"","spark.job.description":"\nid = 8d268dc2-bc9c-4be8-97a9-b135d2943028\nrunId = e225d92f-2545-48f8-87a2-9c0309580f8a\nbatch = 2","spark.executor.id":"driver","spark.sql.cbo.enabled":"false","streaming.sql.batchId":"2","spark.jobGroup.id":"e225d92f-2545-48f8-87a2-9c0309580f8a","spark.submit.deployMode":"client","spark.master":"local[*]","spark.eventLog.dir":"/tmp/spark-history","spark.sql.execution.id":"7","spark.app.id":"local-1596020211915","spark.sql.shuffle.partitions":"2"}} +{"Event":"SparkListenerTaskStart","Stage ID":4,"Stage Attempt ID":0,"Task Info":{"Task ID":6,"Index":0,"Attempt":0,"Launch Time":1596020224113,"Executor ID":"driver","Host":"iZbp19vpr16ix621sdw476Z","Locality":"PROCESS_LOCAL","Speculative":false,"Getting Result Time":0,"Finish Time":0,"Failed":false,"Killed":false,"Accumulables":[]}} +{"Event":"SparkListenerTaskEnd","Stage ID":4,"Stage Attempt ID":0,"Task Type":"ShuffleMapTask","Task End Reason":{"Reason":"Success"},"Task Info":{"Task ID":6,"Index":0,"Attempt":0,"Launch Time":1596020224113,"Executor ID":"driver","Host":"iZbp19vpr16ix621sdw476Z","Locality":"PROCESS_LOCAL","Speculative":false,"Getting Result Time":0,"Finish Time":1596020224174,"Failed":false,"Killed":false,"Accumulables":[{"ID":335,"Name":"shuffle write time","Update":"686296","Value":"686296","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":334,"Name":"shuffle records written","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":333,"Name":"shuffle bytes written","Update":"168","Value":"168","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":325,"Name":"data size","Update":"128","Value":"128","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":382,"Name":"duration","Update":"39","Value":"39","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":383,"Name":"number of output rows","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":384,"Name":"peak memory","Update":"262144","Value":"262144","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":386,"Name":"time in aggregation build","Update":"32","Value":"32","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":388,"Name":"number of output rows","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":389,"Name":"peak memory","Update":"262144","Value":"262144","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":391,"Name":"time in aggregation build","Update":"26","Value":"26","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":393,"Name":"duration","Update":"40","Value":"40","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":394,"Name":"number of output rows","Update":"63","Value":"63","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":439,"Name":"internal.metrics.input.recordsRead","Update":63,"Value":63,"Internal":true,"Count Failed Values":true},{"ID":437,"Name":"internal.metrics.shuffle.write.writeTime","Update":686296,"Value":686296,"Internal":true,"Count Failed Values":true},{"ID":436,"Name":"internal.metrics.shuffle.write.recordsWritten","Update":1,"Value":1,"Internal":true,"Count Failed Values":true},{"ID":435,"Name":"internal.metrics.shuffle.write.bytesWritten","Update":168,"Value":168,"Internal":true,"Count Failed Values":true},{"ID":426,"Name":"internal.metrics.peakExecutionMemory","Update":524288,"Value":524288,"Internal":true,"Count Failed Values":true},{"ID":421,"Name":"internal.metrics.resultSize","Update":2544,"Value":2544,"Internal":true,"Count Failed Values":true},{"ID":420,"Name":"internal.metrics.executorCpuTime","Update":33390843,"Value":33390843,"Internal":true,"Count Failed Values":true},{"ID":419,"Name":"internal.metrics.executorRunTime","Update":49,"Value":49,"Internal":true,"Count Failed Values":true},{"ID":418,"Name":"internal.metrics.executorDeserializeCpuTime","Update":4867521,"Value":4867521,"Internal":true,"Count Failed Values":true},{"ID":417,"Name":"internal.metrics.executorDeserializeTime","Update":8,"Value":8,"Internal":true,"Count Failed Values":true}]},"Task Executor Metrics":{"JVMHeapMemory":0,"JVMOffHeapMemory":0,"OnHeapExecutionMemory":0,"OffHeapExecutionMemory":0,"OnHeapStorageMemory":0,"OffHeapStorageMemory":0,"OnHeapUnifiedMemory":0,"OffHeapUnifiedMemory":0,"DirectPoolMemory":0,"MappedPoolMemory":0,"ProcessTreeJVMVMemory":0,"ProcessTreeJVMRSSMemory":0,"ProcessTreePythonVMemory":0,"ProcessTreePythonRSSMemory":0,"ProcessTreeOtherVMemory":0,"ProcessTreeOtherRSSMemory":0,"MinorGCCount":0,"MinorGCTime":0,"MajorGCCount":0,"MajorGCTime":0},"Task Metrics":{"Executor Deserialize Time":8,"Executor Deserialize CPU Time":4867521,"Executor Run Time":49,"Executor CPU Time":33390843,"Peak Execution Memory":524288,"Result Size":2544,"JVM GC Time":0,"Result Serialization Time":0,"Memory Bytes Spilled":0,"Disk Bytes Spilled":0,"Shuffle Read Metrics":{"Remote Blocks Fetched":0,"Local Blocks Fetched":0,"Fetch Wait Time":0,"Remote Bytes Read":0,"Remote Bytes Read To Disk":0,"Local Bytes Read":0,"Total Records Read":0},"Shuffle Write Metrics":{"Shuffle Bytes Written":168,"Shuffle Write Time":686296,"Shuffle Records Written":1},"Input Metrics":{"Bytes Read":0,"Records Read":63},"Output Metrics":{"Bytes Written":0,"Records Written":0},"Updated Blocks":[]}} +{"Event":"SparkListenerStageCompleted","Stage Info":{"Stage ID":4,"Stage Attempt ID":0,"Stage Name":"start at StructuredKafkaWordCount.scala:86","Number of Tasks":1,"RDD Info":[{"RDD ID":30,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"74\",\"name\":\"Exchange\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[29],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":27,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"81\",\"name\":\"DeserializeToObject\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[26],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":29,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"75\",\"name\":\"WholeStageCodegen (2)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[28],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":28,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"80\",\"name\":\"MapPartitions\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[27],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":26,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"82\",\"name\":\"WholeStageCodegen (1)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[25],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":24,"Name":"DataSourceRDD","Scope":"{\"id\":\"86\",\"name\":\"MicroBatchScan\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":25,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"86\",\"name\":\"MicroBatchScan\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[24],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0}],"Parent IDs":[],"Details":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","Submission Time":1596020224103,"Completion Time":1596020224175,"Accumulables":[{"ID":436,"Name":"internal.metrics.shuffle.write.recordsWritten","Value":1,"Internal":true,"Count Failed Values":true},{"ID":391,"Name":"time in aggregation build","Value":"26","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":382,"Name":"duration","Value":"39","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":418,"Name":"internal.metrics.executorDeserializeCpuTime","Value":4867521,"Internal":true,"Count Failed Values":true},{"ID":421,"Name":"internal.metrics.resultSize","Value":2544,"Internal":true,"Count Failed Values":true},{"ID":394,"Name":"number of output rows","Value":"63","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":439,"Name":"internal.metrics.input.recordsRead","Value":63,"Internal":true,"Count Failed Values":true},{"ID":388,"Name":"number of output rows","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":334,"Name":"shuffle records written","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":325,"Name":"data size","Value":"128","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":420,"Name":"internal.metrics.executorCpuTime","Value":33390843,"Internal":true,"Count Failed Values":true},{"ID":426,"Name":"internal.metrics.peakExecutionMemory","Value":524288,"Internal":true,"Count Failed Values":true},{"ID":417,"Name":"internal.metrics.executorDeserializeTime","Value":8,"Internal":true,"Count Failed Values":true},{"ID":435,"Name":"internal.metrics.shuffle.write.bytesWritten","Value":168,"Internal":true,"Count Failed Values":true},{"ID":384,"Name":"peak memory","Value":"262144","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":393,"Name":"duration","Value":"40","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":333,"Name":"shuffle bytes written","Value":"168","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":383,"Name":"number of output rows","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":437,"Name":"internal.metrics.shuffle.write.writeTime","Value":686296,"Internal":true,"Count Failed Values":true},{"ID":419,"Name":"internal.metrics.executorRunTime","Value":49,"Internal":true,"Count Failed Values":true},{"ID":386,"Name":"time in aggregation build","Value":"32","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":335,"Name":"shuffle write time","Value":"686296","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":389,"Name":"peak memory","Value":"262144","Internal":true,"Count Failed Values":true,"Metadata":"sql"}],"Resource Profile Id":0}} +{"Event":"SparkListenerStageSubmitted","Stage Info":{"Stage ID":5,"Stage Attempt ID":0,"Stage Name":"start at StructuredKafkaWordCount.scala:86","Number of Tasks":2,"RDD Info":[{"RDD ID":35,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"66\",\"name\":\"WholeStageCodegen (4)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[34],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":32,"Name":"StateStoreRDD","Scope":"{\"id\":\"73\",\"name\":\"StateStoreRestore\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[31],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":34,"Name":"StateStoreRDD","Scope":"{\"id\":\"69\",\"name\":\"StateStoreSave\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[33],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":33,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"70\",\"name\":\"WholeStageCodegen (3)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[32],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":31,"Name":"ShuffledRowRDD","Scope":"{\"id\":\"74\",\"name\":\"Exchange\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[30],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0}],"Parent IDs":[4],"Details":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","Submission Time":1596020224179,"Accumulables":[],"Resource Profile Id":0},"Properties":{"sql.streaming.queryId":"8d268dc2-bc9c-4be8-97a9-b135d2943028","spark.driver.host":"iZbp19vpr16ix621sdw476Z","spark.eventLog.enabled":"true","spark.sql.adaptive.enabled":"false","spark.job.interruptOnCancel":"true","spark.driver.port":"46309","__fetch_continuous_blocks_in_batch_enabled":"true","spark.jars":"file:/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/./examples/jars/spark-examples_2.12-3.1.0-SNAPSHOT.jar","__is_continuous_processing":"false","spark.app.name":"StructuredKafkaWordCount","callSite.long":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","callSite.short":"start at StructuredKafkaWordCount.scala:86","spark.submit.pyFiles":"","spark.job.description":"\nid = 8d268dc2-bc9c-4be8-97a9-b135d2943028\nrunId = e225d92f-2545-48f8-87a2-9c0309580f8a\nbatch = 2","spark.executor.id":"driver","spark.sql.cbo.enabled":"false","streaming.sql.batchId":"2","spark.jobGroup.id":"e225d92f-2545-48f8-87a2-9c0309580f8a","spark.submit.deployMode":"client","spark.master":"local[*]","spark.eventLog.dir":"/tmp/spark-history","spark.sql.execution.id":"7","spark.app.id":"local-1596020211915","spark.sql.shuffle.partitions":"2"}} +{"Event":"SparkListenerTaskStart","Stage ID":5,"Stage Attempt ID":0,"Task Info":{"Task ID":7,"Index":0,"Attempt":0,"Launch Time":1596020224187,"Executor ID":"driver","Host":"iZbp19vpr16ix621sdw476Z","Locality":"PROCESS_LOCAL","Speculative":false,"Getting Result Time":0,"Finish Time":0,"Failed":false,"Killed":false,"Accumulables":[]}} +{"Event":"SparkListenerTaskStart","Stage ID":5,"Stage Attempt ID":0,"Task Info":{"Task ID":8,"Index":1,"Attempt":0,"Launch Time":1596020224187,"Executor ID":"driver","Host":"iZbp19vpr16ix621sdw476Z","Locality":"PROCESS_LOCAL","Speculative":false,"Getting Result Time":0,"Finish Time":0,"Failed":false,"Killed":false,"Accumulables":[]}} +{"Event":"SparkListenerTaskEnd","Stage ID":5,"Stage Attempt ID":0,"Task Type":"ResultTask","Task End Reason":{"Reason":"Success"},"Task Info":{"Task ID":7,"Index":0,"Attempt":0,"Launch Time":1596020224187,"Executor ID":"driver","Host":"iZbp19vpr16ix621sdw476Z","Locality":"PROCESS_LOCAL","Speculative":false,"Getting Result Time":0,"Finish Time":1596020224256,"Failed":false,"Killed":false,"Accumulables":[{"ID":358,"Name":"duration","Update":"3","Value":"3","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":360,"Name":"peak memory","Update":"262144","Value":"262144","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":362,"Name":"time in aggregation build","Update":"0","Value":"0","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":368,"Name":"time to update","Update":"3","Value":"3","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":369,"Name":"time to remove","Update":"0","Value":"0","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":370,"Name":"time to commit changes","Update":"32","Value":"32","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":372,"Name":"estimated size of state only on current version","Update":"88","Value":"88","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":373,"Name":"count of cache hit on states cache in provider","Update":"4","Value":"4","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":371,"Name":"memory used by state","Update":"400","Value":"400","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":375,"Name":"duration","Update":"3","Value":"3","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":377,"Name":"peak memory","Update":"262144","Value":"262144","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":379,"Name":"time in aggregation build","Update":"0","Value":"0","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":459,"Name":"internal.metrics.shuffle.read.recordsRead","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":458,"Name":"internal.metrics.shuffle.read.fetchWaitTime","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":457,"Name":"internal.metrics.shuffle.read.localBytesRead","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":456,"Name":"internal.metrics.shuffle.read.remoteBytesReadToDisk","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":455,"Name":"internal.metrics.shuffle.read.remoteBytesRead","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":454,"Name":"internal.metrics.shuffle.read.localBlocksFetched","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":453,"Name":"internal.metrics.shuffle.read.remoteBlocksFetched","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":451,"Name":"internal.metrics.peakExecutionMemory","Update":524288,"Value":524288,"Internal":true,"Count Failed Values":true},{"ID":446,"Name":"internal.metrics.resultSize","Update":5311,"Value":5311,"Internal":true,"Count Failed Values":true},{"ID":445,"Name":"internal.metrics.executorCpuTime","Update":17230622,"Value":17230622,"Internal":true,"Count Failed Values":true},{"ID":444,"Name":"internal.metrics.executorRunTime","Update":56,"Value":56,"Internal":true,"Count Failed Values":true},{"ID":443,"Name":"internal.metrics.executorDeserializeCpuTime","Update":5948051,"Value":5948051,"Internal":true,"Count Failed Values":true},{"ID":442,"Name":"internal.metrics.executorDeserializeTime","Update":6,"Value":6,"Internal":true,"Count Failed Values":true}]},"Task Executor Metrics":{"JVMHeapMemory":0,"JVMOffHeapMemory":0,"OnHeapExecutionMemory":0,"OffHeapExecutionMemory":0,"OnHeapStorageMemory":0,"OffHeapStorageMemory":0,"OnHeapUnifiedMemory":0,"OffHeapUnifiedMemory":0,"DirectPoolMemory":0,"MappedPoolMemory":0,"ProcessTreeJVMVMemory":0,"ProcessTreeJVMRSSMemory":0,"ProcessTreePythonVMemory":0,"ProcessTreePythonRSSMemory":0,"ProcessTreeOtherVMemory":0,"ProcessTreeOtherRSSMemory":0,"MinorGCCount":0,"MinorGCTime":0,"MajorGCCount":0,"MajorGCTime":0},"Task Metrics":{"Executor Deserialize Time":6,"Executor Deserialize CPU Time":5948051,"Executor Run Time":56,"Executor CPU Time":17230622,"Peak Execution Memory":524288,"Result Size":5311,"JVM GC Time":0,"Result Serialization Time":0,"Memory Bytes Spilled":0,"Disk Bytes Spilled":0,"Shuffle Read Metrics":{"Remote Blocks Fetched":0,"Local Blocks Fetched":0,"Fetch Wait Time":0,"Remote Bytes Read":0,"Remote Bytes Read To Disk":0,"Local Bytes Read":0,"Total Records Read":0},"Shuffle Write Metrics":{"Shuffle Bytes Written":0,"Shuffle Write Time":0,"Shuffle Records Written":0},"Input Metrics":{"Bytes Read":0,"Records Read":0},"Output Metrics":{"Bytes Written":0,"Records Written":0},"Updated Blocks":[]}} +{"Event":"SparkListenerTaskEnd","Stage ID":5,"Stage Attempt ID":0,"Task Type":"ResultTask","Task End Reason":{"Reason":"Success"},"Task Info":{"Task ID":8,"Index":1,"Attempt":0,"Launch Time":1596020224187,"Executor ID":"driver","Host":"iZbp19vpr16ix621sdw476Z","Locality":"PROCESS_LOCAL","Speculative":false,"Getting Result Time":0,"Finish Time":1596020224257,"Failed":false,"Killed":false,"Accumulables":[{"ID":358,"Name":"duration","Update":"4","Value":"7","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":363,"Name":"avg hash probe bucket list iters","Update":"10","Value":"10","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":359,"Name":"number of output rows","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":360,"Name":"peak memory","Update":"4456448","Value":"4718592","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":362,"Name":"time in aggregation build","Update":"0","Value":"0","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":368,"Name":"time to update","Update":"21","Value":"24","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":367,"Name":"number of updated state rows","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":369,"Name":"time to remove","Update":"0","Value":"0","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":370,"Name":"time to commit changes","Update":"18","Value":"50","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":372,"Name":"estimated size of state only on current version","Update":"368","Value":"456","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":364,"Name":"number of output rows","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":373,"Name":"count of cache hit on states cache in provider","Update":"4","Value":"8","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":371,"Name":"memory used by state","Update":"784","Value":"1184","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":366,"Name":"number of total state rows","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":375,"Name":"duration","Update":"22","Value":"25","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":376,"Name":"number of output rows","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":377,"Name":"peak memory","Update":"262144","Value":"524288","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":379,"Name":"time in aggregation build","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":381,"Name":"number of output rows","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":327,"Name":"local blocks read","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":331,"Name":"fetch wait time","Update":"0","Value":"0","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":330,"Name":"local bytes read","Update":"168","Value":"168","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":332,"Name":"records read","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":459,"Name":"internal.metrics.shuffle.read.recordsRead","Update":1,"Value":1,"Internal":true,"Count Failed Values":true},{"ID":458,"Name":"internal.metrics.shuffle.read.fetchWaitTime","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":457,"Name":"internal.metrics.shuffle.read.localBytesRead","Update":168,"Value":168,"Internal":true,"Count Failed Values":true},{"ID":456,"Name":"internal.metrics.shuffle.read.remoteBytesReadToDisk","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":455,"Name":"internal.metrics.shuffle.read.remoteBytesRead","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":454,"Name":"internal.metrics.shuffle.read.localBlocksFetched","Update":1,"Value":1,"Internal":true,"Count Failed Values":true},{"ID":453,"Name":"internal.metrics.shuffle.read.remoteBlocksFetched","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":451,"Name":"internal.metrics.peakExecutionMemory","Update":4718592,"Value":5242880,"Internal":true,"Count Failed Values":true},{"ID":446,"Name":"internal.metrics.resultSize","Update":5574,"Value":10885,"Internal":true,"Count Failed Values":true},{"ID":445,"Name":"internal.metrics.executorCpuTime","Update":23808555,"Value":41039177,"Internal":true,"Count Failed Values":true},{"ID":444,"Name":"internal.metrics.executorRunTime","Update":56,"Value":112,"Internal":true,"Count Failed Values":true},{"ID":443,"Name":"internal.metrics.executorDeserializeCpuTime","Update":6247106,"Value":12195157,"Internal":true,"Count Failed Values":true},{"ID":442,"Name":"internal.metrics.executorDeserializeTime","Update":6,"Value":12,"Internal":true,"Count Failed Values":true}]},"Task Executor Metrics":{"JVMHeapMemory":0,"JVMOffHeapMemory":0,"OnHeapExecutionMemory":0,"OffHeapExecutionMemory":0,"OnHeapStorageMemory":0,"OffHeapStorageMemory":0,"OnHeapUnifiedMemory":0,"OffHeapUnifiedMemory":0,"DirectPoolMemory":0,"MappedPoolMemory":0,"ProcessTreeJVMVMemory":0,"ProcessTreeJVMRSSMemory":0,"ProcessTreePythonVMemory":0,"ProcessTreePythonRSSMemory":0,"ProcessTreeOtherVMemory":0,"ProcessTreeOtherRSSMemory":0,"MinorGCCount":0,"MinorGCTime":0,"MajorGCCount":0,"MajorGCTime":0},"Task Metrics":{"Executor Deserialize Time":6,"Executor Deserialize CPU Time":6247106,"Executor Run Time":56,"Executor CPU Time":23808555,"Peak Execution Memory":4718592,"Result Size":5574,"JVM GC Time":0,"Result Serialization Time":0,"Memory Bytes Spilled":0,"Disk Bytes Spilled":0,"Shuffle Read Metrics":{"Remote Blocks Fetched":0,"Local Blocks Fetched":1,"Fetch Wait Time":0,"Remote Bytes Read":0,"Remote Bytes Read To Disk":0,"Local Bytes Read":168,"Total Records Read":1},"Shuffle Write Metrics":{"Shuffle Bytes Written":0,"Shuffle Write Time":0,"Shuffle Records Written":0},"Input Metrics":{"Bytes Read":0,"Records Read":0},"Output Metrics":{"Bytes Written":0,"Records Written":0},"Updated Blocks":[]}} +{"Event":"SparkListenerStageCompleted","Stage Info":{"Stage ID":5,"Stage Attempt ID":0,"Stage Name":"start at StructuredKafkaWordCount.scala:86","Number of Tasks":2,"RDD Info":[{"RDD ID":35,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"66\",\"name\":\"WholeStageCodegen (4)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[34],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":32,"Name":"StateStoreRDD","Scope":"{\"id\":\"73\",\"name\":\"StateStoreRestore\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[31],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":34,"Name":"StateStoreRDD","Scope":"{\"id\":\"69\",\"name\":\"StateStoreSave\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[33],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":33,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"70\",\"name\":\"WholeStageCodegen (3)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[32],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":31,"Name":"ShuffledRowRDD","Scope":"{\"id\":\"74\",\"name\":\"Exchange\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[30],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0}],"Parent IDs":[4],"Details":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","Submission Time":1596020224179,"Completion Time":1596020224259,"Accumulables":[{"ID":442,"Name":"internal.metrics.executorDeserializeTime","Value":12,"Internal":true,"Count Failed Values":true},{"ID":451,"Name":"internal.metrics.peakExecutionMemory","Value":5242880,"Internal":true,"Count Failed Values":true},{"ID":445,"Name":"internal.metrics.executorCpuTime","Value":41039177,"Internal":true,"Count Failed Values":true},{"ID":364,"Name":"number of output rows","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":454,"Name":"internal.metrics.shuffle.read.localBlocksFetched","Value":1,"Internal":true,"Count Failed Values":true},{"ID":373,"Name":"count of cache hit on states cache in provider","Value":"8","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":367,"Name":"number of updated state rows","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":376,"Name":"number of output rows","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":358,"Name":"duration","Value":"7","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":331,"Name":"fetch wait time","Value":"0","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":457,"Name":"internal.metrics.shuffle.read.localBytesRead","Value":168,"Internal":true,"Count Failed Values":true},{"ID":379,"Name":"time in aggregation build","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":370,"Name":"time to commit changes","Value":"50","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":456,"Name":"internal.metrics.shuffle.read.remoteBytesReadToDisk","Value":0,"Internal":true,"Count Failed Values":true},{"ID":369,"Name":"time to remove","Value":"0","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":459,"Name":"internal.metrics.shuffle.read.recordsRead","Value":1,"Internal":true,"Count Failed Values":true},{"ID":360,"Name":"peak memory","Value":"4718592","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":381,"Name":"number of output rows","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":453,"Name":"internal.metrics.shuffle.read.remoteBlocksFetched","Value":0,"Internal":true,"Count Failed Values":true},{"ID":372,"Name":"estimated size of state only on current version","Value":"456","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":363,"Name":"avg hash probe bucket list iters","Value":"10","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":327,"Name":"local blocks read","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":444,"Name":"internal.metrics.executorRunTime","Value":112,"Internal":true,"Count Failed Values":true},{"ID":375,"Name":"duration","Value":"25","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":366,"Name":"number of total state rows","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":330,"Name":"local bytes read","Value":"168","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":443,"Name":"internal.metrics.executorDeserializeCpuTime","Value":12195157,"Internal":true,"Count Failed Values":true},{"ID":455,"Name":"internal.metrics.shuffle.read.remoteBytesRead","Value":0,"Internal":true,"Count Failed Values":true},{"ID":446,"Name":"internal.metrics.resultSize","Value":10885,"Internal":true,"Count Failed Values":true},{"ID":332,"Name":"records read","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":377,"Name":"peak memory","Value":"524288","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":359,"Name":"number of output rows","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":458,"Name":"internal.metrics.shuffle.read.fetchWaitTime","Value":0,"Internal":true,"Count Failed Values":true},{"ID":368,"Name":"time to update","Value":"24","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":362,"Name":"time in aggregation build","Value":"0","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":371,"Name":"memory used by state","Value":"1184","Internal":true,"Count Failed Values":true,"Metadata":"sql"}],"Resource Profile Id":0}} +{"Event":"SparkListenerJobEnd","Job ID":2,"Completion Time":1596020224259,"Job Result":{"Result":"JobSucceeded"}} +{"Event":"org.apache.spark.sql.execution.ui.SparkListenerSQLExecutionStart","executionId":8,"description":"\nid = 8d268dc2-bc9c-4be8-97a9-b135d2943028\nrunId = e225d92f-2545-48f8-87a2-9c0309580f8a\nbatch = 2","details":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","physicalPlanDescription":"== Physical Plan ==\nLocalTableScan (1)\n\n\n(1) LocalTableScan\nOutput [2]: [value#74, count#75]\nArguments: [value#74, count#75]\n\n","sparkPlanInfo":{"nodeName":"LocalTableScan","simpleString":"LocalTableScan [value#74, count#75]","children":[],"metadata":{},"metrics":[{"name":"number of output rows","accumulatorId":467,"metricType":"sum"}]},"time":1596020224278} +{"Event":"org.apache.spark.sql.execution.ui.SparkListenerSQLExecutionEnd","executionId":8,"time":1596020224287} +{"Event":"org.apache.spark.sql.execution.ui.SparkListenerSQLExecutionEnd","executionId":7,"time":1596020224287} +{"Event":"org.apache.spark.sql.execution.ui.SparkListenerSQLExecutionEnd","executionId":6,"time":1596020224288} +{"Event":"org.apache.spark.sql.streaming.StreamingQueryListener$QueryProgressEvent","progress":{"id":"8d268dc2-bc9c-4be8-97a9-b135d2943028","runId":"e225d92f-2545-48f8-87a2-9c0309580f8a","name":null,"timestamp":"2020-07-29T10:57:03.793Z","batchId":2,"batchDuration":522,"durationMs":{"triggerExecution":522,"queryPlanning":41,"getBatch":1,"latestOffset":3,"addBatch":421,"walCommit":27},"eventTime":{},"stateOperators":[{"numRowsTotal":1,"numRowsUpdated":1,"memoryUsedBytes":1184,"numLateInputs":0,"customMetrics":{"stateOnCurrentVersionSizeBytes":456,"loadedMapCacheHitCount":8,"loadedMapCacheMissCount":0}}],"sources":[{"description":"KafkaV2[Subscribe[test5]]","startOffset":"{\"test5\":{\"0\":48642}}","endOffset":"{\"test5\":{\"0\":48705}}","numInputRows":63,"inputRowsPerSecond":100.8,"processedRowsPerSecond":120.6896551724138}],"sink":{"description":"org.apache.spark.sql.execution.streaming.ConsoleTable$@514ba885","numOutputRows":1},"observedMetrics":{}}} +{"Event":"org.apache.spark.sql.execution.ui.SparkListenerSQLExecutionStart","executionId":9,"description":"\nid = 8d268dc2-bc9c-4be8-97a9-b135d2943028\nrunId = e225d92f-2545-48f8-87a2-9c0309580f8a\nbatch = 3","details":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","physicalPlanDescription":"== Physical Plan ==\nWriteToDataSourceV2 (14)\n+- * HashAggregate (13)\n +- StateStoreSave (12)\n +- * HashAggregate (11)\n +- StateStoreRestore (10)\n +- Exchange (9)\n +- * HashAggregate (8)\n +- * HashAggregate (7)\n +- * SerializeFromObject (6)\n +- MapPartitions (5)\n +- DeserializeToObject (4)\n +- * Project (3)\n +- * Project (2)\n +- MicroBatchScan (1)\n\n\n(1) MicroBatchScan\nOutput [7]: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]\nArguments: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13], org.apache.spark.sql.kafka010.KafkaSourceProvider$KafkaScan@7e7b182c, KafkaV2[Subscribe[test5]], {\"test5\":{\"0\":48705}}, {\"test5\":{\"0\":48757}}\n\n(2) Project [codegen id : 1]\nOutput [7]: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]\nInput [7]: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]\n\n(3) Project [codegen id : 1]\nOutput [1]: [cast(value#8 as string) AS value#21]\nInput [7]: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]\n\n(4) DeserializeToObject\nInput [1]: [value#21]\nArguments: value#21.toString, obj#27: java.lang.String\n\n(5) MapPartitions\nInput [1]: [obj#27]\nArguments: org.apache.spark.sql.Dataset$$Lambda$1321/872917583@67b99068, obj#28: java.lang.String\n\n(6) SerializeFromObject [codegen id : 2]\nInput [1]: [obj#28]\nArguments: [staticinvoke(class org.apache.spark.unsafe.types.UTF8String, StringType, fromString, input[0, java.lang.String, true], true, false) AS value#29]\n\n(7) HashAggregate [codegen id : 2]\nInput [1]: [value#29]\nKeys [1]: [value#29]\nFunctions [1]: [partial_count(1)]\nAggregate Attributes [1]: [count(1)#31L]\nResults [2]: [value#29, count#38L]\n\n(8) HashAggregate [codegen id : 2]\nInput [2]: [value#29, count#38L]\nKeys [1]: [value#29]\nFunctions [1]: [merge_count(1)]\nAggregate Attributes [1]: [count(1)#31L]\nResults [2]: [value#29, count#38L]\n\n(9) Exchange\nInput [2]: [value#29, count#38L]\nArguments: hashpartitioning(value#29, 2), true, [id=#835]\n\n(10) StateStoreRestore\nInput [2]: [value#29, count#38L]\nArguments: [value#29], state info [ checkpoint = , runId = 8bb5d8a6-42f8-4141-8f25-e1b98f81aac4, opId = 0, ver = 0, numPartitions = 2], 2\n\n(11) HashAggregate [codegen id : 3]\nInput [2]: [value#29, count#38L]\nKeys [1]: [value#29]\nFunctions [1]: [merge_count(1)]\nAggregate Attributes [1]: [count(1)#31L]\nResults [2]: [value#29, count#38L]\n\n(12) StateStoreSave\nInput [2]: [value#29, count#38L]\nArguments: [value#29], state info [ checkpoint = , runId = 8bb5d8a6-42f8-4141-8f25-e1b98f81aac4, opId = 0, ver = 0, numPartitions = 2], Append, 0, 2\n\n(13) HashAggregate [codegen id : 4]\nInput [2]: [value#29, count#38L]\nKeys [1]: [value#29]\nFunctions [1]: [count(1)]\nAggregate Attributes [1]: [count(1)#31L]\nResults [2]: [value#29, count(1)#31L AS count#32L]\n\n(14) WriteToDataSourceV2\nInput [2]: [value#29, count#32L]\nArguments: org.apache.spark.sql.execution.streaming.sources.MicroBatchWrite@59b7c509\n\n","sparkPlanInfo":{"nodeName":"WriteToDataSourceV2","simpleString":"WriteToDataSourceV2 org.apache.spark.sql.execution.streaming.sources.MicroBatchWrite@59b7c509","children":[{"nodeName":"WholeStageCodegen (4)","simpleString":"WholeStageCodegen (4)","children":[{"nodeName":"HashAggregate","simpleString":"HashAggregate(keys=[value#29], functions=[count(1)])","children":[{"nodeName":"InputAdapter","simpleString":"InputAdapter","children":[{"nodeName":"StateStoreSave","simpleString":"StateStoreSave [value#29], state info [ checkpoint = file:/tmp/temporary-025d7997-5b66-4def-abbf-bdcca57312b9/state, runId = e225d92f-2545-48f8-87a2-9c0309580f8a, opId = 0, ver = 3, numPartitions = 2], Complete, 0, 2","children":[{"nodeName":"WholeStageCodegen (3)","simpleString":"WholeStageCodegen (3)","children":[{"nodeName":"HashAggregate","simpleString":"HashAggregate(keys=[value#29], functions=[merge_count(1)])","children":[{"nodeName":"InputAdapter","simpleString":"InputAdapter","children":[{"nodeName":"StateStoreRestore","simpleString":"StateStoreRestore [value#29], state info [ checkpoint = file:/tmp/temporary-025d7997-5b66-4def-abbf-bdcca57312b9/state, runId = e225d92f-2545-48f8-87a2-9c0309580f8a, opId = 0, ver = 3, numPartitions = 2], 2","children":[{"nodeName":"Exchange","simpleString":"Exchange hashpartitioning(value#29, 2), true, [id=#759]","children":[{"nodeName":"WholeStageCodegen (2)","simpleString":"WholeStageCodegen (2)","children":[{"nodeName":"HashAggregate","simpleString":"HashAggregate(keys=[value#29], functions=[merge_count(1)])","children":[{"nodeName":"HashAggregate","simpleString":"HashAggregate(keys=[value#29], functions=[partial_count(1)])","children":[{"nodeName":"SerializeFromObject","simpleString":"SerializeFromObject [staticinvoke(class org.apache.spark.unsafe.types.UTF8String, StringType, fromString, input[0, java.lang.String, true], true, false) AS value#29]","children":[{"nodeName":"InputAdapter","simpleString":"InputAdapter","children":[{"nodeName":"MapPartitions","simpleString":"MapPartitions org.apache.spark.sql.Dataset$$Lambda$1321/872917583@67b99068, obj#28: java.lang.String","children":[{"nodeName":"DeserializeToObject","simpleString":"DeserializeToObject value#21.toString, obj#27: java.lang.String","children":[{"nodeName":"WholeStageCodegen (1)","simpleString":"WholeStageCodegen (1)","children":[{"nodeName":"Project","simpleString":"Project [cast(value#8 as string) AS value#21]","children":[{"nodeName":"Project","simpleString":"Project [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]","children":[{"nodeName":"InputAdapter","simpleString":"InputAdapter","children":[{"nodeName":"MicroBatchScan","simpleString":"MicroBatchScan[key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13] class org.apache.spark.sql.kafka010.KafkaSourceProvider$KafkaScan","children":[],"metadata":{},"metrics":[{"name":"number of output rows","accumulatorId":551,"metricType":"sum"}]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[{"name":"duration","accumulatorId":550,"metricType":"timing"}]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[{"name":"spill size","accumulatorId":547,"metricType":"size"},{"name":"time in aggregation build","accumulatorId":548,"metricType":"timing"},{"name":"peak memory","accumulatorId":546,"metricType":"size"},{"name":"number of output rows","accumulatorId":545,"metricType":"sum"},{"name":"avg hash probe bucket list iters","accumulatorId":549,"metricType":"average"}]}],"metadata":{},"metrics":[{"name":"spill size","accumulatorId":542,"metricType":"size"},{"name":"time in aggregation build","accumulatorId":543,"metricType":"timing"},{"name":"peak memory","accumulatorId":541,"metricType":"size"},{"name":"number of output rows","accumulatorId":540,"metricType":"sum"},{"name":"avg hash probe bucket list iters","accumulatorId":544,"metricType":"average"}]}],"metadata":{},"metrics":[{"name":"duration","accumulatorId":539,"metricType":"timing"}]}],"metadata":{},"metrics":[{"name":"shuffle records written","accumulatorId":491,"metricType":"sum"},{"name":"shuffle write time","accumulatorId":492,"metricType":"nsTiming"},{"name":"records read","accumulatorId":489,"metricType":"sum"},{"name":"local bytes read","accumulatorId":487,"metricType":"size"},{"name":"fetch wait time","accumulatorId":488,"metricType":"timing"},{"name":"remote bytes read","accumulatorId":485,"metricType":"size"},{"name":"local blocks read","accumulatorId":484,"metricType":"sum"},{"name":"remote blocks read","accumulatorId":483,"metricType":"sum"},{"name":"data size","accumulatorId":482,"metricType":"size"},{"name":"remote bytes read to disk","accumulatorId":486,"metricType":"size"},{"name":"shuffle bytes written","accumulatorId":490,"metricType":"size"}]}],"metadata":{},"metrics":[{"name":"number of output rows","accumulatorId":538,"metricType":"sum"}]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[{"name":"spill size","accumulatorId":535,"metricType":"size"},{"name":"time in aggregation build","accumulatorId":536,"metricType":"timing"},{"name":"peak memory","accumulatorId":534,"metricType":"size"},{"name":"number of output rows","accumulatorId":533,"metricType":"sum"},{"name":"avg hash probe bucket list iters","accumulatorId":537,"metricType":"average"}]}],"metadata":{},"metrics":[{"name":"duration","accumulatorId":532,"metricType":"timing"}]}],"metadata":{},"metrics":[{"name":"number of inputs which are later than watermark ('inputs' are relative to operators)","accumulatorId":522,"metricType":"sum"},{"name":"number of total state rows","accumulatorId":523,"metricType":"sum"},{"name":"memory used by state","accumulatorId":528,"metricType":"size"},{"name":"count of cache hit on states cache in provider","accumulatorId":530,"metricType":"sum"},{"name":"number of output rows","accumulatorId":521,"metricType":"sum"},{"name":"estimated size of state only on current version","accumulatorId":529,"metricType":"size"},{"name":"count of cache miss on states cache in provider","accumulatorId":531,"metricType":"sum"},{"name":"time to commit changes","accumulatorId":527,"metricType":"timing"},{"name":"time to remove","accumulatorId":526,"metricType":"timing"},{"name":"number of updated state rows","accumulatorId":524,"metricType":"sum"},{"name":"time to update","accumulatorId":525,"metricType":"timing"}]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[{"name":"spill size","accumulatorId":518,"metricType":"size"},{"name":"time in aggregation build","accumulatorId":519,"metricType":"timing"},{"name":"peak memory","accumulatorId":517,"metricType":"size"},{"name":"number of output rows","accumulatorId":516,"metricType":"sum"},{"name":"avg hash probe bucket list iters","accumulatorId":520,"metricType":"average"}]}],"metadata":{},"metrics":[{"name":"duration","accumulatorId":515,"metricType":"timing"}]}],"metadata":{},"metrics":[]},"time":1596020224419} +{"Event":"org.apache.spark.sql.execution.ui.SparkListenerSQLExecutionStart","executionId":10,"description":"\nid = 8d268dc2-bc9c-4be8-97a9-b135d2943028\nrunId = e225d92f-2545-48f8-87a2-9c0309580f8a\nbatch = 3","details":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","physicalPlanDescription":"== Physical Plan ==\nWriteToDataSourceV2 (14)\n+- * HashAggregate (13)\n +- StateStoreSave (12)\n +- * HashAggregate (11)\n +- StateStoreRestore (10)\n +- Exchange (9)\n +- * HashAggregate (8)\n +- * HashAggregate (7)\n +- * SerializeFromObject (6)\n +- MapPartitions (5)\n +- DeserializeToObject (4)\n +- * Project (3)\n +- * Project (2)\n +- MicroBatchScan (1)\n\n\n(1) MicroBatchScan\nOutput [7]: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]\nArguments: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13], org.apache.spark.sql.kafka010.KafkaSourceProvider$KafkaScan@7e7b182c, KafkaV2[Subscribe[test5]], {\"test5\":{\"0\":48705}}, {\"test5\":{\"0\":48757}}\n\n(2) Project [codegen id : 1]\nOutput [7]: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]\nInput [7]: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]\n\n(3) Project [codegen id : 1]\nOutput [1]: [cast(value#8 as string) AS value#21]\nInput [7]: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]\n\n(4) DeserializeToObject\nInput [1]: [value#21]\nArguments: value#21.toString, obj#27: java.lang.String\n\n(5) MapPartitions\nInput [1]: [obj#27]\nArguments: org.apache.spark.sql.Dataset$$Lambda$1321/872917583@67b99068, obj#28: java.lang.String\n\n(6) SerializeFromObject [codegen id : 2]\nInput [1]: [obj#28]\nArguments: [staticinvoke(class org.apache.spark.unsafe.types.UTF8String, StringType, fromString, input[0, java.lang.String, true], true, false) AS value#29]\n\n(7) HashAggregate [codegen id : 2]\nInput [1]: [value#29]\nKeys [1]: [value#29]\nFunctions [1]: [partial_count(1)]\nAggregate Attributes [1]: [count(1)#31L]\nResults [2]: [value#29, count#38L]\n\n(8) HashAggregate [codegen id : 2]\nInput [2]: [value#29, count#38L]\nKeys [1]: [value#29]\nFunctions [1]: [merge_count(1)]\nAggregate Attributes [1]: [count(1)#31L]\nResults [2]: [value#29, count#38L]\n\n(9) Exchange\nInput [2]: [value#29, count#38L]\nArguments: hashpartitioning(value#29, 2), true, [id=#911]\n\n(10) StateStoreRestore\nInput [2]: [value#29, count#38L]\nArguments: [value#29], state info [ checkpoint = , runId = 29402d2a-a5da-4bb1-8d1a-c6d1c2d998d5, opId = 0, ver = 0, numPartitions = 2], 2\n\n(11) HashAggregate [codegen id : 3]\nInput [2]: [value#29, count#38L]\nKeys [1]: [value#29]\nFunctions [1]: [merge_count(1)]\nAggregate Attributes [1]: [count(1)#31L]\nResults [2]: [value#29, count#38L]\n\n(12) StateStoreSave\nInput [2]: [value#29, count#38L]\nArguments: [value#29], state info [ checkpoint = , runId = 29402d2a-a5da-4bb1-8d1a-c6d1c2d998d5, opId = 0, ver = 0, numPartitions = 2], Append, 0, 2\n\n(13) HashAggregate [codegen id : 4]\nInput [2]: [value#29, count#38L]\nKeys [1]: [value#29]\nFunctions [1]: [count(1)]\nAggregate Attributes [1]: [count(1)#31L]\nResults [2]: [value#29, count(1)#31L AS count#32L]\n\n(14) WriteToDataSourceV2\nInput [2]: [value#29, count#32L]\nArguments: org.apache.spark.sql.execution.streaming.sources.MicroBatchWrite@59b7c509\n\n","sparkPlanInfo":{"nodeName":"WriteToDataSourceV2","simpleString":"WriteToDataSourceV2 org.apache.spark.sql.execution.streaming.sources.MicroBatchWrite@59b7c509","children":[{"nodeName":"WholeStageCodegen (4)","simpleString":"WholeStageCodegen (4)","children":[{"nodeName":"HashAggregate","simpleString":"HashAggregate(keys=[value#29], functions=[count(1)])","children":[{"nodeName":"InputAdapter","simpleString":"InputAdapter","children":[{"nodeName":"StateStoreSave","simpleString":"StateStoreSave [value#29], state info [ checkpoint = file:/tmp/temporary-025d7997-5b66-4def-abbf-bdcca57312b9/state, runId = e225d92f-2545-48f8-87a2-9c0309580f8a, opId = 0, ver = 3, numPartitions = 2], Complete, 0, 2","children":[{"nodeName":"WholeStageCodegen (3)","simpleString":"WholeStageCodegen (3)","children":[{"nodeName":"HashAggregate","simpleString":"HashAggregate(keys=[value#29], functions=[merge_count(1)])","children":[{"nodeName":"InputAdapter","simpleString":"InputAdapter","children":[{"nodeName":"StateStoreRestore","simpleString":"StateStoreRestore [value#29], state info [ checkpoint = file:/tmp/temporary-025d7997-5b66-4def-abbf-bdcca57312b9/state, runId = e225d92f-2545-48f8-87a2-9c0309580f8a, opId = 0, ver = 3, numPartitions = 2], 2","children":[{"nodeName":"Exchange","simpleString":"Exchange hashpartitioning(value#29, 2), true, [id=#759]","children":[{"nodeName":"WholeStageCodegen (2)","simpleString":"WholeStageCodegen (2)","children":[{"nodeName":"HashAggregate","simpleString":"HashAggregate(keys=[value#29], functions=[merge_count(1)])","children":[{"nodeName":"HashAggregate","simpleString":"HashAggregate(keys=[value#29], functions=[partial_count(1)])","children":[{"nodeName":"SerializeFromObject","simpleString":"SerializeFromObject [staticinvoke(class org.apache.spark.unsafe.types.UTF8String, StringType, fromString, input[0, java.lang.String, true], true, false) AS value#29]","children":[{"nodeName":"InputAdapter","simpleString":"InputAdapter","children":[{"nodeName":"MapPartitions","simpleString":"MapPartitions org.apache.spark.sql.Dataset$$Lambda$1321/872917583@67b99068, obj#28: java.lang.String","children":[{"nodeName":"DeserializeToObject","simpleString":"DeserializeToObject value#21.toString, obj#27: java.lang.String","children":[{"nodeName":"WholeStageCodegen (1)","simpleString":"WholeStageCodegen (1)","children":[{"nodeName":"Project","simpleString":"Project [cast(value#8 as string) AS value#21]","children":[{"nodeName":"Project","simpleString":"Project [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]","children":[{"nodeName":"InputAdapter","simpleString":"InputAdapter","children":[{"nodeName":"MicroBatchScan","simpleString":"MicroBatchScan[key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13] class org.apache.spark.sql.kafka010.KafkaSourceProvider$KafkaScan","children":[],"metadata":{},"metrics":[{"name":"number of output rows","accumulatorId":551,"metricType":"sum"}]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[{"name":"duration","accumulatorId":550,"metricType":"timing"}]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[{"name":"spill size","accumulatorId":547,"metricType":"size"},{"name":"time in aggregation build","accumulatorId":548,"metricType":"timing"},{"name":"peak memory","accumulatorId":546,"metricType":"size"},{"name":"number of output rows","accumulatorId":545,"metricType":"sum"},{"name":"avg hash probe bucket list iters","accumulatorId":549,"metricType":"average"}]}],"metadata":{},"metrics":[{"name":"spill size","accumulatorId":542,"metricType":"size"},{"name":"time in aggregation build","accumulatorId":543,"metricType":"timing"},{"name":"peak memory","accumulatorId":541,"metricType":"size"},{"name":"number of output rows","accumulatorId":540,"metricType":"sum"},{"name":"avg hash probe bucket list iters","accumulatorId":544,"metricType":"average"}]}],"metadata":{},"metrics":[{"name":"duration","accumulatorId":539,"metricType":"timing"}]}],"metadata":{},"metrics":[{"name":"shuffle records written","accumulatorId":491,"metricType":"sum"},{"name":"shuffle write time","accumulatorId":492,"metricType":"nsTiming"},{"name":"records read","accumulatorId":489,"metricType":"sum"},{"name":"local bytes read","accumulatorId":487,"metricType":"size"},{"name":"fetch wait time","accumulatorId":488,"metricType":"timing"},{"name":"remote bytes read","accumulatorId":485,"metricType":"size"},{"name":"local blocks read","accumulatorId":484,"metricType":"sum"},{"name":"remote blocks read","accumulatorId":483,"metricType":"sum"},{"name":"data size","accumulatorId":482,"metricType":"size"},{"name":"remote bytes read to disk","accumulatorId":486,"metricType":"size"},{"name":"shuffle bytes written","accumulatorId":490,"metricType":"size"}]}],"metadata":{},"metrics":[{"name":"number of output rows","accumulatorId":538,"metricType":"sum"}]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[{"name":"spill size","accumulatorId":535,"metricType":"size"},{"name":"time in aggregation build","accumulatorId":536,"metricType":"timing"},{"name":"peak memory","accumulatorId":534,"metricType":"size"},{"name":"number of output rows","accumulatorId":533,"metricType":"sum"},{"name":"avg hash probe bucket list iters","accumulatorId":537,"metricType":"average"}]}],"metadata":{},"metrics":[{"name":"duration","accumulatorId":532,"metricType":"timing"}]}],"metadata":{},"metrics":[{"name":"number of inputs which are later than watermark ('inputs' are relative to operators)","accumulatorId":522,"metricType":"sum"},{"name":"number of total state rows","accumulatorId":523,"metricType":"sum"},{"name":"memory used by state","accumulatorId":528,"metricType":"size"},{"name":"count of cache hit on states cache in provider","accumulatorId":530,"metricType":"sum"},{"name":"number of output rows","accumulatorId":521,"metricType":"sum"},{"name":"estimated size of state only on current version","accumulatorId":529,"metricType":"size"},{"name":"count of cache miss on states cache in provider","accumulatorId":531,"metricType":"sum"},{"name":"time to commit changes","accumulatorId":527,"metricType":"timing"},{"name":"time to remove","accumulatorId":526,"metricType":"timing"},{"name":"number of updated state rows","accumulatorId":524,"metricType":"sum"},{"name":"time to update","accumulatorId":525,"metricType":"timing"}]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[{"name":"spill size","accumulatorId":518,"metricType":"size"},{"name":"time in aggregation build","accumulatorId":519,"metricType":"timing"},{"name":"peak memory","accumulatorId":517,"metricType":"size"},{"name":"number of output rows","accumulatorId":516,"metricType":"sum"},{"name":"avg hash probe bucket list iters","accumulatorId":520,"metricType":"average"}]}],"metadata":{},"metrics":[{"name":"duration","accumulatorId":515,"metricType":"timing"}]}],"metadata":{},"metrics":[]},"time":1596020224452} +{"Event":"SparkListenerJobStart","Job ID":3,"Submission Time":1596020224533,"Stage Infos":[{"Stage ID":6,"Stage Attempt ID":0,"Stage Name":"start at StructuredKafkaWordCount.scala:86","Number of Tasks":1,"RDD Info":[{"RDD ID":42,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"107\",\"name\":\"Exchange\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[41],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":38,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"115\",\"name\":\"WholeStageCodegen (1)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[37],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":36,"Name":"DataSourceRDD","Scope":"{\"id\":\"119\",\"name\":\"MicroBatchScan\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":41,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"108\",\"name\":\"WholeStageCodegen (2)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[40],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":37,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"119\",\"name\":\"MicroBatchScan\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[36],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":40,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"113\",\"name\":\"MapPartitions\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[39],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":39,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"114\",\"name\":\"DeserializeToObject\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[38],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0}],"Parent IDs":[],"Details":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","Accumulables":[],"Resource Profile Id":0},{"Stage ID":7,"Stage Attempt ID":0,"Stage Name":"start at StructuredKafkaWordCount.scala:86","Number of Tasks":2,"RDD Info":[{"RDD ID":47,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"99\",\"name\":\"WholeStageCodegen (4)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[46],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":46,"Name":"StateStoreRDD","Scope":"{\"id\":\"102\",\"name\":\"StateStoreSave\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[45],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":45,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"103\",\"name\":\"WholeStageCodegen (3)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[44],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":44,"Name":"StateStoreRDD","Scope":"{\"id\":\"106\",\"name\":\"StateStoreRestore\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[43],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":43,"Name":"ShuffledRowRDD","Scope":"{\"id\":\"107\",\"name\":\"Exchange\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[42],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0}],"Parent IDs":[6],"Details":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","Accumulables":[],"Resource Profile Id":0}],"Stage IDs":[6,7],"Properties":{"sql.streaming.queryId":"8d268dc2-bc9c-4be8-97a9-b135d2943028","spark.driver.host":"iZbp19vpr16ix621sdw476Z","spark.eventLog.enabled":"true","spark.sql.adaptive.enabled":"false","spark.job.interruptOnCancel":"true","spark.driver.port":"46309","__fetch_continuous_blocks_in_batch_enabled":"true","spark.jars":"file:/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/./examples/jars/spark-examples_2.12-3.1.0-SNAPSHOT.jar","__is_continuous_processing":"false","spark.app.name":"StructuredKafkaWordCount","callSite.long":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","callSite.short":"start at StructuredKafkaWordCount.scala:86","spark.submit.pyFiles":"","spark.job.description":"\nid = 8d268dc2-bc9c-4be8-97a9-b135d2943028\nrunId = e225d92f-2545-48f8-87a2-9c0309580f8a\nbatch = 3","spark.executor.id":"driver","spark.sql.cbo.enabled":"false","streaming.sql.batchId":"3","spark.jobGroup.id":"e225d92f-2545-48f8-87a2-9c0309580f8a","spark.submit.deployMode":"client","spark.master":"local[*]","spark.eventLog.dir":"/tmp/spark-history","spark.sql.execution.id":"10","spark.app.id":"local-1596020211915","spark.sql.shuffle.partitions":"2"}} +{"Event":"SparkListenerStageSubmitted","Stage Info":{"Stage ID":6,"Stage Attempt ID":0,"Stage Name":"start at StructuredKafkaWordCount.scala:86","Number of Tasks":1,"RDD Info":[{"RDD ID":42,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"107\",\"name\":\"Exchange\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[41],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":38,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"115\",\"name\":\"WholeStageCodegen (1)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[37],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":36,"Name":"DataSourceRDD","Scope":"{\"id\":\"119\",\"name\":\"MicroBatchScan\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":41,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"108\",\"name\":\"WholeStageCodegen (2)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[40],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":37,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"119\",\"name\":\"MicroBatchScan\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[36],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":40,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"113\",\"name\":\"MapPartitions\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[39],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":39,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"114\",\"name\":\"DeserializeToObject\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[38],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0}],"Parent IDs":[],"Details":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","Submission Time":1596020224535,"Accumulables":[],"Resource Profile Id":0},"Properties":{"sql.streaming.queryId":"8d268dc2-bc9c-4be8-97a9-b135d2943028","spark.driver.host":"iZbp19vpr16ix621sdw476Z","spark.eventLog.enabled":"true","spark.sql.adaptive.enabled":"false","spark.job.interruptOnCancel":"true","spark.driver.port":"46309","__fetch_continuous_blocks_in_batch_enabled":"true","spark.jars":"file:/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/./examples/jars/spark-examples_2.12-3.1.0-SNAPSHOT.jar","__is_continuous_processing":"false","spark.app.name":"StructuredKafkaWordCount","callSite.long":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","callSite.short":"start at StructuredKafkaWordCount.scala:86","spark.submit.pyFiles":"","spark.job.description":"\nid = 8d268dc2-bc9c-4be8-97a9-b135d2943028\nrunId = e225d92f-2545-48f8-87a2-9c0309580f8a\nbatch = 3","spark.executor.id":"driver","spark.sql.cbo.enabled":"false","streaming.sql.batchId":"3","spark.jobGroup.id":"e225d92f-2545-48f8-87a2-9c0309580f8a","spark.submit.deployMode":"client","spark.master":"local[*]","spark.eventLog.dir":"/tmp/spark-history","spark.sql.execution.id":"10","spark.app.id":"local-1596020211915","spark.sql.shuffle.partitions":"2"}} +{"Event":"SparkListenerTaskStart","Stage ID":6,"Stage Attempt ID":0,"Task Info":{"Task ID":9,"Index":0,"Attempt":0,"Launch Time":1596020224541,"Executor ID":"driver","Host":"iZbp19vpr16ix621sdw476Z","Locality":"PROCESS_LOCAL","Speculative":false,"Getting Result Time":0,"Finish Time":0,"Failed":false,"Killed":false,"Accumulables":[]}} +{"Event":"SparkListenerTaskEnd","Stage ID":6,"Stage Attempt ID":0,"Task Type":"ShuffleMapTask","Task End Reason":{"Reason":"Success"},"Task Info":{"Task ID":9,"Index":0,"Attempt":0,"Launch Time":1596020224541,"Executor ID":"driver","Host":"iZbp19vpr16ix621sdw476Z","Locality":"PROCESS_LOCAL","Speculative":false,"Getting Result Time":0,"Finish Time":1596020224581,"Failed":false,"Killed":false,"Accumulables":[{"ID":492,"Name":"shuffle write time","Update":"643278","Value":"643278","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":491,"Name":"shuffle records written","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":490,"Name":"shuffle bytes written","Update":"168","Value":"168","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":482,"Name":"data size","Update":"128","Value":"128","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":539,"Name":"duration","Update":"20","Value":"20","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":540,"Name":"number of output rows","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":541,"Name":"peak memory","Update":"262144","Value":"262144","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":543,"Name":"time in aggregation build","Update":"13","Value":"13","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":545,"Name":"number of output rows","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":546,"Name":"peak memory","Update":"262144","Value":"262144","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":548,"Name":"time in aggregation build","Update":"9","Value":"9","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":550,"Name":"duration","Update":"20","Value":"20","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":551,"Name":"number of output rows","Update":"52","Value":"52","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":596,"Name":"internal.metrics.input.recordsRead","Update":52,"Value":52,"Internal":true,"Count Failed Values":true},{"ID":594,"Name":"internal.metrics.shuffle.write.writeTime","Update":643278,"Value":643278,"Internal":true,"Count Failed Values":true},{"ID":593,"Name":"internal.metrics.shuffle.write.recordsWritten","Update":1,"Value":1,"Internal":true,"Count Failed Values":true},{"ID":592,"Name":"internal.metrics.shuffle.write.bytesWritten","Update":168,"Value":168,"Internal":true,"Count Failed Values":true},{"ID":583,"Name":"internal.metrics.peakExecutionMemory","Update":524288,"Value":524288,"Internal":true,"Count Failed Values":true},{"ID":578,"Name":"internal.metrics.resultSize","Update":2544,"Value":2544,"Internal":true,"Count Failed Values":true},{"ID":577,"Name":"internal.metrics.executorCpuTime","Update":29099071,"Value":29099071,"Internal":true,"Count Failed Values":true},{"ID":576,"Name":"internal.metrics.executorRunTime","Update":29,"Value":29,"Internal":true,"Count Failed Values":true},{"ID":575,"Name":"internal.metrics.executorDeserializeCpuTime","Update":3091128,"Value":3091128,"Internal":true,"Count Failed Values":true},{"ID":574,"Name":"internal.metrics.executorDeserializeTime","Update":3,"Value":3,"Internal":true,"Count Failed Values":true}]},"Task Executor Metrics":{"JVMHeapMemory":0,"JVMOffHeapMemory":0,"OnHeapExecutionMemory":0,"OffHeapExecutionMemory":0,"OnHeapStorageMemory":0,"OffHeapStorageMemory":0,"OnHeapUnifiedMemory":0,"OffHeapUnifiedMemory":0,"DirectPoolMemory":0,"MappedPoolMemory":0,"ProcessTreeJVMVMemory":0,"ProcessTreeJVMRSSMemory":0,"ProcessTreePythonVMemory":0,"ProcessTreePythonRSSMemory":0,"ProcessTreeOtherVMemory":0,"ProcessTreeOtherRSSMemory":0,"MinorGCCount":0,"MinorGCTime":0,"MajorGCCount":0,"MajorGCTime":0},"Task Metrics":{"Executor Deserialize Time":3,"Executor Deserialize CPU Time":3091128,"Executor Run Time":29,"Executor CPU Time":29099071,"Peak Execution Memory":524288,"Result Size":2544,"JVM GC Time":0,"Result Serialization Time":0,"Memory Bytes Spilled":0,"Disk Bytes Spilled":0,"Shuffle Read Metrics":{"Remote Blocks Fetched":0,"Local Blocks Fetched":0,"Fetch Wait Time":0,"Remote Bytes Read":0,"Remote Bytes Read To Disk":0,"Local Bytes Read":0,"Total Records Read":0},"Shuffle Write Metrics":{"Shuffle Bytes Written":168,"Shuffle Write Time":643278,"Shuffle Records Written":1},"Input Metrics":{"Bytes Read":0,"Records Read":52},"Output Metrics":{"Bytes Written":0,"Records Written":0},"Updated Blocks":[]}} +{"Event":"SparkListenerStageCompleted","Stage Info":{"Stage ID":6,"Stage Attempt ID":0,"Stage Name":"start at StructuredKafkaWordCount.scala:86","Number of Tasks":1,"RDD Info":[{"RDD ID":42,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"107\",\"name\":\"Exchange\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[41],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":38,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"115\",\"name\":\"WholeStageCodegen (1)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[37],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":36,"Name":"DataSourceRDD","Scope":"{\"id\":\"119\",\"name\":\"MicroBatchScan\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":41,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"108\",\"name\":\"WholeStageCodegen (2)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[40],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":37,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"119\",\"name\":\"MicroBatchScan\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[36],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":40,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"113\",\"name\":\"MapPartitions\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[39],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":39,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"114\",\"name\":\"DeserializeToObject\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[38],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0}],"Parent IDs":[],"Details":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","Submission Time":1596020224535,"Completion Time":1596020224582,"Accumulables":[{"ID":550,"Name":"duration","Value":"20","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":541,"Name":"peak memory","Value":"262144","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":577,"Name":"internal.metrics.executorCpuTime","Value":29099071,"Internal":true,"Count Failed Values":true},{"ID":490,"Name":"shuffle bytes written","Value":"168","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":576,"Name":"internal.metrics.executorRunTime","Value":29,"Internal":true,"Count Failed Values":true},{"ID":540,"Name":"number of output rows","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":594,"Name":"internal.metrics.shuffle.write.writeTime","Value":643278,"Internal":true,"Count Failed Values":true},{"ID":543,"Name":"time in aggregation build","Value":"13","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":492,"Name":"shuffle write time","Value":"643278","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":546,"Name":"peak memory","Value":"262144","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":539,"Name":"duration","Value":"20","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":575,"Name":"internal.metrics.executorDeserializeCpuTime","Value":3091128,"Internal":true,"Count Failed Values":true},{"ID":593,"Name":"internal.metrics.shuffle.write.recordsWritten","Value":1,"Internal":true,"Count Failed Values":true},{"ID":548,"Name":"time in aggregation build","Value":"9","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":578,"Name":"internal.metrics.resultSize","Value":2544,"Internal":true,"Count Failed Values":true},{"ID":596,"Name":"internal.metrics.input.recordsRead","Value":52,"Internal":true,"Count Failed Values":true},{"ID":551,"Name":"number of output rows","Value":"52","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":482,"Name":"data size","Value":"128","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":491,"Name":"shuffle records written","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":545,"Name":"number of output rows","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":592,"Name":"internal.metrics.shuffle.write.bytesWritten","Value":168,"Internal":true,"Count Failed Values":true},{"ID":574,"Name":"internal.metrics.executorDeserializeTime","Value":3,"Internal":true,"Count Failed Values":true},{"ID":583,"Name":"internal.metrics.peakExecutionMemory","Value":524288,"Internal":true,"Count Failed Values":true}],"Resource Profile Id":0}} +{"Event":"SparkListenerStageSubmitted","Stage Info":{"Stage ID":7,"Stage Attempt ID":0,"Stage Name":"start at StructuredKafkaWordCount.scala:86","Number of Tasks":2,"RDD Info":[{"RDD ID":47,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"99\",\"name\":\"WholeStageCodegen (4)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[46],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":46,"Name":"StateStoreRDD","Scope":"{\"id\":\"102\",\"name\":\"StateStoreSave\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[45],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":45,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"103\",\"name\":\"WholeStageCodegen (3)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[44],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":44,"Name":"StateStoreRDD","Scope":"{\"id\":\"106\",\"name\":\"StateStoreRestore\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[43],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":43,"Name":"ShuffledRowRDD","Scope":"{\"id\":\"107\",\"name\":\"Exchange\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[42],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0}],"Parent IDs":[6],"Details":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","Submission Time":1596020224588,"Accumulables":[],"Resource Profile Id":0},"Properties":{"sql.streaming.queryId":"8d268dc2-bc9c-4be8-97a9-b135d2943028","spark.driver.host":"iZbp19vpr16ix621sdw476Z","spark.eventLog.enabled":"true","spark.sql.adaptive.enabled":"false","spark.job.interruptOnCancel":"true","spark.driver.port":"46309","__fetch_continuous_blocks_in_batch_enabled":"true","spark.jars":"file:/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/./examples/jars/spark-examples_2.12-3.1.0-SNAPSHOT.jar","__is_continuous_processing":"false","spark.app.name":"StructuredKafkaWordCount","callSite.long":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","callSite.short":"start at StructuredKafkaWordCount.scala:86","spark.submit.pyFiles":"","spark.job.description":"\nid = 8d268dc2-bc9c-4be8-97a9-b135d2943028\nrunId = e225d92f-2545-48f8-87a2-9c0309580f8a\nbatch = 3","spark.executor.id":"driver","spark.sql.cbo.enabled":"false","streaming.sql.batchId":"3","spark.jobGroup.id":"e225d92f-2545-48f8-87a2-9c0309580f8a","spark.submit.deployMode":"client","spark.master":"local[*]","spark.eventLog.dir":"/tmp/spark-history","spark.sql.execution.id":"10","spark.app.id":"local-1596020211915","spark.sql.shuffle.partitions":"2"}} +{"Event":"SparkListenerTaskStart","Stage ID":7,"Stage Attempt ID":0,"Task Info":{"Task ID":10,"Index":0,"Attempt":0,"Launch Time":1596020224596,"Executor ID":"driver","Host":"iZbp19vpr16ix621sdw476Z","Locality":"PROCESS_LOCAL","Speculative":false,"Getting Result Time":0,"Finish Time":0,"Failed":false,"Killed":false,"Accumulables":[]}} +{"Event":"SparkListenerTaskStart","Stage ID":7,"Stage Attempt ID":0,"Task Info":{"Task ID":11,"Index":1,"Attempt":0,"Launch Time":1596020224597,"Executor ID":"driver","Host":"iZbp19vpr16ix621sdw476Z","Locality":"PROCESS_LOCAL","Speculative":false,"Getting Result Time":0,"Finish Time":0,"Failed":false,"Killed":false,"Accumulables":[]}} +{"Event":"SparkListenerTaskEnd","Stage ID":7,"Stage Attempt ID":0,"Task Type":"ResultTask","Task End Reason":{"Reason":"Success"},"Task Info":{"Task ID":10,"Index":0,"Attempt":0,"Launch Time":1596020224596,"Executor ID":"driver","Host":"iZbp19vpr16ix621sdw476Z","Locality":"PROCESS_LOCAL","Speculative":false,"Getting Result Time":0,"Finish Time":1596020224670,"Failed":false,"Killed":false,"Accumulables":[{"ID":515,"Name":"duration","Update":"3","Value":"3","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":517,"Name":"peak memory","Update":"262144","Value":"262144","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":519,"Name":"time in aggregation build","Update":"0","Value":"0","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":525,"Name":"time to update","Update":"5","Value":"5","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":526,"Name":"time to remove","Update":"0","Value":"0","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":527,"Name":"time to commit changes","Update":"27","Value":"27","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":529,"Name":"estimated size of state only on current version","Update":"88","Value":"88","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":530,"Name":"count of cache hit on states cache in provider","Update":"6","Value":"6","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":528,"Name":"memory used by state","Update":"400","Value":"400","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":532,"Name":"duration","Update":"5","Value":"5","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":534,"Name":"peak memory","Update":"262144","Value":"262144","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":536,"Name":"time in aggregation build","Update":"0","Value":"0","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":616,"Name":"internal.metrics.shuffle.read.recordsRead","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":615,"Name":"internal.metrics.shuffle.read.fetchWaitTime","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":614,"Name":"internal.metrics.shuffle.read.localBytesRead","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":613,"Name":"internal.metrics.shuffle.read.remoteBytesReadToDisk","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":612,"Name":"internal.metrics.shuffle.read.remoteBytesRead","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":611,"Name":"internal.metrics.shuffle.read.localBlocksFetched","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":610,"Name":"internal.metrics.shuffle.read.remoteBlocksFetched","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":608,"Name":"internal.metrics.peakExecutionMemory","Update":524288,"Value":524288,"Internal":true,"Count Failed Values":true},{"ID":603,"Name":"internal.metrics.resultSize","Update":5311,"Value":5311,"Internal":true,"Count Failed Values":true},{"ID":602,"Name":"internal.metrics.executorCpuTime","Update":19967906,"Value":19967906,"Internal":true,"Count Failed Values":true},{"ID":601,"Name":"internal.metrics.executorRunTime","Update":62,"Value":62,"Internal":true,"Count Failed Values":true},{"ID":600,"Name":"internal.metrics.executorDeserializeCpuTime","Update":4899567,"Value":4899567,"Internal":true,"Count Failed Values":true},{"ID":599,"Name":"internal.metrics.executorDeserializeTime","Update":4,"Value":4,"Internal":true,"Count Failed Values":true}]},"Task Executor Metrics":{"JVMHeapMemory":0,"JVMOffHeapMemory":0,"OnHeapExecutionMemory":0,"OffHeapExecutionMemory":0,"OnHeapStorageMemory":0,"OffHeapStorageMemory":0,"OnHeapUnifiedMemory":0,"OffHeapUnifiedMemory":0,"DirectPoolMemory":0,"MappedPoolMemory":0,"ProcessTreeJVMVMemory":0,"ProcessTreeJVMRSSMemory":0,"ProcessTreePythonVMemory":0,"ProcessTreePythonRSSMemory":0,"ProcessTreeOtherVMemory":0,"ProcessTreeOtherRSSMemory":0,"MinorGCCount":0,"MinorGCTime":0,"MajorGCCount":0,"MajorGCTime":0},"Task Metrics":{"Executor Deserialize Time":4,"Executor Deserialize CPU Time":4899567,"Executor Run Time":62,"Executor CPU Time":19967906,"Peak Execution Memory":524288,"Result Size":5311,"JVM GC Time":0,"Result Serialization Time":0,"Memory Bytes Spilled":0,"Disk Bytes Spilled":0,"Shuffle Read Metrics":{"Remote Blocks Fetched":0,"Local Blocks Fetched":0,"Fetch Wait Time":0,"Remote Bytes Read":0,"Remote Bytes Read To Disk":0,"Local Bytes Read":0,"Total Records Read":0},"Shuffle Write Metrics":{"Shuffle Bytes Written":0,"Shuffle Write Time":0,"Shuffle Records Written":0},"Input Metrics":{"Bytes Read":0,"Records Read":0},"Output Metrics":{"Bytes Written":0,"Records Written":0},"Updated Blocks":[]}} +{"Event":"SparkListenerTaskEnd","Stage ID":7,"Stage Attempt ID":0,"Task Type":"ResultTask","Task End Reason":{"Reason":"Success"},"Task Info":{"Task ID":11,"Index":1,"Attempt":0,"Launch Time":1596020224597,"Executor ID":"driver","Host":"iZbp19vpr16ix621sdw476Z","Locality":"PROCESS_LOCAL","Speculative":false,"Getting Result Time":0,"Finish Time":1596020224687,"Failed":false,"Killed":false,"Accumulables":[{"ID":515,"Name":"duration","Update":"4","Value":"7","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":520,"Name":"avg hash probe bucket list iters","Update":"10","Value":"10","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":516,"Name":"number of output rows","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":517,"Name":"peak memory","Update":"4456448","Value":"4718592","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":519,"Name":"time in aggregation build","Update":"0","Value":"0","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":525,"Name":"time to update","Update":"17","Value":"22","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":524,"Name":"number of updated state rows","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":526,"Name":"time to remove","Update":"0","Value":"0","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":527,"Name":"time to commit changes","Update":"26","Value":"53","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":529,"Name":"estimated size of state only on current version","Update":"368","Value":"456","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":521,"Name":"number of output rows","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":530,"Name":"count of cache hit on states cache in provider","Update":"6","Value":"12","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":528,"Name":"memory used by state","Update":"784","Value":"1184","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":523,"Name":"number of total state rows","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":532,"Name":"duration","Update":"17","Value":"22","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":533,"Name":"number of output rows","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":534,"Name":"peak memory","Update":"262144","Value":"524288","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":536,"Name":"time in aggregation build","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":538,"Name":"number of output rows","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":484,"Name":"local blocks read","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":488,"Name":"fetch wait time","Update":"0","Value":"0","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":487,"Name":"local bytes read","Update":"168","Value":"168","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":489,"Name":"records read","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":616,"Name":"internal.metrics.shuffle.read.recordsRead","Update":1,"Value":1,"Internal":true,"Count Failed Values":true},{"ID":615,"Name":"internal.metrics.shuffle.read.fetchWaitTime","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":614,"Name":"internal.metrics.shuffle.read.localBytesRead","Update":168,"Value":168,"Internal":true,"Count Failed Values":true},{"ID":613,"Name":"internal.metrics.shuffle.read.remoteBytesReadToDisk","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":612,"Name":"internal.metrics.shuffle.read.remoteBytesRead","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":611,"Name":"internal.metrics.shuffle.read.localBlocksFetched","Update":1,"Value":1,"Internal":true,"Count Failed Values":true},{"ID":610,"Name":"internal.metrics.shuffle.read.remoteBlocksFetched","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":608,"Name":"internal.metrics.peakExecutionMemory","Update":4718592,"Value":5242880,"Internal":true,"Count Failed Values":true},{"ID":603,"Name":"internal.metrics.resultSize","Update":5574,"Value":10885,"Internal":true,"Count Failed Values":true},{"ID":602,"Name":"internal.metrics.executorCpuTime","Update":22402538,"Value":42370444,"Internal":true,"Count Failed Values":true},{"ID":601,"Name":"internal.metrics.executorRunTime","Update":79,"Value":141,"Internal":true,"Count Failed Values":true},{"ID":600,"Name":"internal.metrics.executorDeserializeCpuTime","Update":4671511,"Value":9571078,"Internal":true,"Count Failed Values":true},{"ID":599,"Name":"internal.metrics.executorDeserializeTime","Update":4,"Value":8,"Internal":true,"Count Failed Values":true}]},"Task Executor Metrics":{"JVMHeapMemory":0,"JVMOffHeapMemory":0,"OnHeapExecutionMemory":0,"OffHeapExecutionMemory":0,"OnHeapStorageMemory":0,"OffHeapStorageMemory":0,"OnHeapUnifiedMemory":0,"OffHeapUnifiedMemory":0,"DirectPoolMemory":0,"MappedPoolMemory":0,"ProcessTreeJVMVMemory":0,"ProcessTreeJVMRSSMemory":0,"ProcessTreePythonVMemory":0,"ProcessTreePythonRSSMemory":0,"ProcessTreeOtherVMemory":0,"ProcessTreeOtherRSSMemory":0,"MinorGCCount":0,"MinorGCTime":0,"MajorGCCount":0,"MajorGCTime":0},"Task Metrics":{"Executor Deserialize Time":4,"Executor Deserialize CPU Time":4671511,"Executor Run Time":79,"Executor CPU Time":22402538,"Peak Execution Memory":4718592,"Result Size":5574,"JVM GC Time":0,"Result Serialization Time":0,"Memory Bytes Spilled":0,"Disk Bytes Spilled":0,"Shuffle Read Metrics":{"Remote Blocks Fetched":0,"Local Blocks Fetched":1,"Fetch Wait Time":0,"Remote Bytes Read":0,"Remote Bytes Read To Disk":0,"Local Bytes Read":168,"Total Records Read":1},"Shuffle Write Metrics":{"Shuffle Bytes Written":0,"Shuffle Write Time":0,"Shuffle Records Written":0},"Input Metrics":{"Bytes Read":0,"Records Read":0},"Output Metrics":{"Bytes Written":0,"Records Written":0},"Updated Blocks":[]}} +{"Event":"SparkListenerStageCompleted","Stage Info":{"Stage ID":7,"Stage Attempt ID":0,"Stage Name":"start at StructuredKafkaWordCount.scala:86","Number of Tasks":2,"RDD Info":[{"RDD ID":47,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"99\",\"name\":\"WholeStageCodegen (4)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[46],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":46,"Name":"StateStoreRDD","Scope":"{\"id\":\"102\",\"name\":\"StateStoreSave\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[45],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":45,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"103\",\"name\":\"WholeStageCodegen (3)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[44],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":44,"Name":"StateStoreRDD","Scope":"{\"id\":\"106\",\"name\":\"StateStoreRestore\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[43],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":43,"Name":"ShuffledRowRDD","Scope":"{\"id\":\"107\",\"name\":\"Exchange\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[42],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0}],"Parent IDs":[6],"Details":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","Submission Time":1596020224588,"Completion Time":1596020224688,"Accumulables":[{"ID":523,"Name":"number of total state rows","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":532,"Name":"duration","Value":"22","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":487,"Name":"local bytes read","Value":"168","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":517,"Name":"peak memory","Value":"4718592","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":600,"Name":"internal.metrics.executorDeserializeCpuTime","Value":9571078,"Internal":true,"Count Failed Values":true},{"ID":603,"Name":"internal.metrics.resultSize","Value":10885,"Internal":true,"Count Failed Values":true},{"ID":612,"Name":"internal.metrics.shuffle.read.remoteBytesRead","Value":0,"Internal":true,"Count Failed Values":true},{"ID":516,"Name":"number of output rows","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":615,"Name":"internal.metrics.shuffle.read.fetchWaitTime","Value":0,"Internal":true,"Count Failed Values":true},{"ID":534,"Name":"peak memory","Value":"524288","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":525,"Name":"time to update","Value":"22","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":489,"Name":"records read","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":528,"Name":"memory used by state","Value":"1184","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":519,"Name":"time in aggregation build","Value":"0","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":608,"Name":"internal.metrics.peakExecutionMemory","Value":5242880,"Internal":true,"Count Failed Values":true},{"ID":599,"Name":"internal.metrics.executorDeserializeTime","Value":8,"Internal":true,"Count Failed Values":true},{"ID":521,"Name":"number of output rows","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":530,"Name":"count of cache hit on states cache in provider","Value":"12","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":611,"Name":"internal.metrics.shuffle.read.localBlocksFetched","Value":1,"Internal":true,"Count Failed Values":true},{"ID":602,"Name":"internal.metrics.executorCpuTime","Value":42370444,"Internal":true,"Count Failed Values":true},{"ID":488,"Name":"fetch wait time","Value":"0","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":515,"Name":"duration","Value":"7","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":524,"Name":"number of updated state rows","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":533,"Name":"number of output rows","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":614,"Name":"internal.metrics.shuffle.read.localBytesRead","Value":168,"Internal":true,"Count Failed Values":true},{"ID":536,"Name":"time in aggregation build","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":527,"Name":"time to commit changes","Value":"53","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":613,"Name":"internal.metrics.shuffle.read.remoteBytesReadToDisk","Value":0,"Internal":true,"Count Failed Values":true},{"ID":616,"Name":"internal.metrics.shuffle.read.recordsRead","Value":1,"Internal":true,"Count Failed Values":true},{"ID":526,"Name":"time to remove","Value":"0","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":520,"Name":"avg hash probe bucket list iters","Value":"10","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":610,"Name":"internal.metrics.shuffle.read.remoteBlocksFetched","Value":0,"Internal":true,"Count Failed Values":true},{"ID":601,"Name":"internal.metrics.executorRunTime","Value":141,"Internal":true,"Count Failed Values":true},{"ID":484,"Name":"local blocks read","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":538,"Name":"number of output rows","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":529,"Name":"estimated size of state only on current version","Value":"456","Internal":true,"Count Failed Values":true,"Metadata":"sql"}],"Resource Profile Id":0}} +{"Event":"SparkListenerJobEnd","Job ID":3,"Completion Time":1596020224689,"Job Result":{"Result":"JobSucceeded"}} +{"Event":"org.apache.spark.sql.execution.ui.SparkListenerSQLExecutionStart","executionId":11,"description":"\nid = 8d268dc2-bc9c-4be8-97a9-b135d2943028\nrunId = e225d92f-2545-48f8-87a2-9c0309580f8a\nbatch = 3","details":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","physicalPlanDescription":"== Physical Plan ==\nLocalTableScan (1)\n\n\n(1) LocalTableScan\nOutput [2]: [value#88, count#89]\nArguments: [value#88, count#89]\n\n","sparkPlanInfo":{"nodeName":"LocalTableScan","simpleString":"LocalTableScan [value#88, count#89]","children":[],"metadata":{},"metrics":[{"name":"number of output rows","accumulatorId":624,"metricType":"sum"}]},"time":1596020224709} +{"Event":"org.apache.spark.sql.execution.ui.SparkListenerSQLExecutionEnd","executionId":11,"time":1596020224713} +{"Event":"org.apache.spark.sql.execution.ui.SparkListenerSQLExecutionEnd","executionId":10,"time":1596020224714} +{"Event":"org.apache.spark.sql.execution.ui.SparkListenerSQLExecutionEnd","executionId":9,"time":1596020224714} +{"Event":"org.apache.spark.sql.streaming.StreamingQueryListener$QueryProgressEvent","progress":{"id":"8d268dc2-bc9c-4be8-97a9-b135d2943028","runId":"e225d92f-2545-48f8-87a2-9c0309580f8a","name":null,"timestamp":"2020-07-29T10:57:04.317Z","batchId":3,"batchDuration":415,"durationMs":{"triggerExecution":415,"queryPlanning":38,"getBatch":1,"latestOffset":3,"addBatch":332,"walCommit":21},"eventTime":{},"stateOperators":[{"numRowsTotal":1,"numRowsUpdated":1,"memoryUsedBytes":1184,"numLateInputs":0,"customMetrics":{"stateOnCurrentVersionSizeBytes":456,"loadedMapCacheHitCount":12,"loadedMapCacheMissCount":0}}],"sources":[{"description":"KafkaV2[Subscribe[test5]]","startOffset":"{\"test5\":{\"0\":48705}}","endOffset":"{\"test5\":{\"0\":48757}}","numInputRows":52,"inputRowsPerSecond":99.23664122137404,"processedRowsPerSecond":125.30120481927712}],"sink":{"description":"org.apache.spark.sql.execution.streaming.ConsoleTable$@514ba885","numOutputRows":1},"observedMetrics":{}}} +{"Event":"org.apache.spark.sql.execution.ui.SparkListenerSQLExecutionStart","executionId":12,"description":"\nid = 8d268dc2-bc9c-4be8-97a9-b135d2943028\nrunId = e225d92f-2545-48f8-87a2-9c0309580f8a\nbatch = 4","details":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","physicalPlanDescription":"== Physical Plan ==\nWriteToDataSourceV2 (14)\n+- * HashAggregate (13)\n +- StateStoreSave (12)\n +- * HashAggregate (11)\n +- StateStoreRestore (10)\n +- Exchange (9)\n +- * HashAggregate (8)\n +- * HashAggregate (7)\n +- * SerializeFromObject (6)\n +- MapPartitions (5)\n +- DeserializeToObject (4)\n +- * Project (3)\n +- * Project (2)\n +- MicroBatchScan (1)\n\n\n(1) MicroBatchScan\nOutput [7]: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]\nArguments: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13], org.apache.spark.sql.kafka010.KafkaSourceProvider$KafkaScan@7e7b182c, KafkaV2[Subscribe[test5]], {\"test5\":{\"0\":48757}}, {\"test5\":{\"0\":48799}}\n\n(2) Project [codegen id : 1]\nOutput [7]: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]\nInput [7]: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]\n\n(3) Project [codegen id : 1]\nOutput [1]: [cast(value#8 as string) AS value#21]\nInput [7]: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]\n\n(4) DeserializeToObject\nInput [1]: [value#21]\nArguments: value#21.toString, obj#27: java.lang.String\n\n(5) MapPartitions\nInput [1]: [obj#27]\nArguments: org.apache.spark.sql.Dataset$$Lambda$1321/872917583@67b99068, obj#28: java.lang.String\n\n(6) SerializeFromObject [codegen id : 2]\nInput [1]: [obj#28]\nArguments: [staticinvoke(class org.apache.spark.unsafe.types.UTF8String, StringType, fromString, input[0, java.lang.String, true], true, false) AS value#29]\n\n(7) HashAggregate [codegen id : 2]\nInput [1]: [value#29]\nKeys [1]: [value#29]\nFunctions [1]: [partial_count(1)]\nAggregate Attributes [1]: [count(1)#31L]\nResults [2]: [value#29, count#38L]\n\n(8) HashAggregate [codegen id : 2]\nInput [2]: [value#29, count#38L]\nKeys [1]: [value#29]\nFunctions [1]: [merge_count(1)]\nAggregate Attributes [1]: [count(1)#31L]\nResults [2]: [value#29, count#38L]\n\n(9) Exchange\nInput [2]: [value#29, count#38L]\nArguments: hashpartitioning(value#29, 2), true, [id=#1066]\n\n(10) StateStoreRestore\nInput [2]: [value#29, count#38L]\nArguments: [value#29], state info [ checkpoint = , runId = 42efe357-12ef-4061-9b83-20bf4c29a257, opId = 0, ver = 0, numPartitions = 2], 2\n\n(11) HashAggregate [codegen id : 3]\nInput [2]: [value#29, count#38L]\nKeys [1]: [value#29]\nFunctions [1]: [merge_count(1)]\nAggregate Attributes [1]: [count(1)#31L]\nResults [2]: [value#29, count#38L]\n\n(12) StateStoreSave\nInput [2]: [value#29, count#38L]\nArguments: [value#29], state info [ checkpoint = , runId = 42efe357-12ef-4061-9b83-20bf4c29a257, opId = 0, ver = 0, numPartitions = 2], Append, 0, 2\n\n(13) HashAggregate [codegen id : 4]\nInput [2]: [value#29, count#38L]\nKeys [1]: [value#29]\nFunctions [1]: [count(1)]\nAggregate Attributes [1]: [count(1)#31L]\nResults [2]: [value#29, count(1)#31L AS count#32L]\n\n(14) WriteToDataSourceV2\nInput [2]: [value#29, count#32L]\nArguments: org.apache.spark.sql.execution.streaming.sources.MicroBatchWrite@1717338b\n\n","sparkPlanInfo":{"nodeName":"WriteToDataSourceV2","simpleString":"WriteToDataSourceV2 org.apache.spark.sql.execution.streaming.sources.MicroBatchWrite@1717338b","children":[{"nodeName":"WholeStageCodegen (4)","simpleString":"WholeStageCodegen (4)","children":[{"nodeName":"HashAggregate","simpleString":"HashAggregate(keys=[value#29], functions=[count(1)])","children":[{"nodeName":"InputAdapter","simpleString":"InputAdapter","children":[{"nodeName":"StateStoreSave","simpleString":"StateStoreSave [value#29], state info [ checkpoint = file:/tmp/temporary-025d7997-5b66-4def-abbf-bdcca57312b9/state, runId = e225d92f-2545-48f8-87a2-9c0309580f8a, opId = 0, ver = 4, numPartitions = 2], Complete, 0, 2","children":[{"nodeName":"WholeStageCodegen (3)","simpleString":"WholeStageCodegen (3)","children":[{"nodeName":"HashAggregate","simpleString":"HashAggregate(keys=[value#29], functions=[merge_count(1)])","children":[{"nodeName":"InputAdapter","simpleString":"InputAdapter","children":[{"nodeName":"StateStoreRestore","simpleString":"StateStoreRestore [value#29], state info [ checkpoint = file:/tmp/temporary-025d7997-5b66-4def-abbf-bdcca57312b9/state, runId = e225d92f-2545-48f8-87a2-9c0309580f8a, opId = 0, ver = 4, numPartitions = 2], 2","children":[{"nodeName":"Exchange","simpleString":"Exchange hashpartitioning(value#29, 2), true, [id=#990]","children":[{"nodeName":"WholeStageCodegen (2)","simpleString":"WholeStageCodegen (2)","children":[{"nodeName":"HashAggregate","simpleString":"HashAggregate(keys=[value#29], functions=[merge_count(1)])","children":[{"nodeName":"HashAggregate","simpleString":"HashAggregate(keys=[value#29], functions=[partial_count(1)])","children":[{"nodeName":"SerializeFromObject","simpleString":"SerializeFromObject [staticinvoke(class org.apache.spark.unsafe.types.UTF8String, StringType, fromString, input[0, java.lang.String, true], true, false) AS value#29]","children":[{"nodeName":"InputAdapter","simpleString":"InputAdapter","children":[{"nodeName":"MapPartitions","simpleString":"MapPartitions org.apache.spark.sql.Dataset$$Lambda$1321/872917583@67b99068, obj#28: java.lang.String","children":[{"nodeName":"DeserializeToObject","simpleString":"DeserializeToObject value#21.toString, obj#27: java.lang.String","children":[{"nodeName":"WholeStageCodegen (1)","simpleString":"WholeStageCodegen (1)","children":[{"nodeName":"Project","simpleString":"Project [cast(value#8 as string) AS value#21]","children":[{"nodeName":"Project","simpleString":"Project [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]","children":[{"nodeName":"InputAdapter","simpleString":"InputAdapter","children":[{"nodeName":"MicroBatchScan","simpleString":"MicroBatchScan[key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13] class org.apache.spark.sql.kafka010.KafkaSourceProvider$KafkaScan","children":[],"metadata":{},"metrics":[{"name":"number of output rows","accumulatorId":708,"metricType":"sum"}]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[{"name":"duration","accumulatorId":707,"metricType":"timing"}]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[{"name":"spill size","accumulatorId":704,"metricType":"size"},{"name":"time in aggregation build","accumulatorId":705,"metricType":"timing"},{"name":"peak memory","accumulatorId":703,"metricType":"size"},{"name":"number of output rows","accumulatorId":702,"metricType":"sum"},{"name":"avg hash probe bucket list iters","accumulatorId":706,"metricType":"average"}]}],"metadata":{},"metrics":[{"name":"spill size","accumulatorId":699,"metricType":"size"},{"name":"time in aggregation build","accumulatorId":700,"metricType":"timing"},{"name":"peak memory","accumulatorId":698,"metricType":"size"},{"name":"number of output rows","accumulatorId":697,"metricType":"sum"},{"name":"avg hash probe bucket list iters","accumulatorId":701,"metricType":"average"}]}],"metadata":{},"metrics":[{"name":"duration","accumulatorId":696,"metricType":"timing"}]}],"metadata":{},"metrics":[{"name":"shuffle records written","accumulatorId":648,"metricType":"sum"},{"name":"shuffle write time","accumulatorId":649,"metricType":"nsTiming"},{"name":"records read","accumulatorId":646,"metricType":"sum"},{"name":"local bytes read","accumulatorId":644,"metricType":"size"},{"name":"fetch wait time","accumulatorId":645,"metricType":"timing"},{"name":"remote bytes read","accumulatorId":642,"metricType":"size"},{"name":"local blocks read","accumulatorId":641,"metricType":"sum"},{"name":"remote blocks read","accumulatorId":640,"metricType":"sum"},{"name":"data size","accumulatorId":639,"metricType":"size"},{"name":"remote bytes read to disk","accumulatorId":643,"metricType":"size"},{"name":"shuffle bytes written","accumulatorId":647,"metricType":"size"}]}],"metadata":{},"metrics":[{"name":"number of output rows","accumulatorId":695,"metricType":"sum"}]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[{"name":"spill size","accumulatorId":692,"metricType":"size"},{"name":"time in aggregation build","accumulatorId":693,"metricType":"timing"},{"name":"peak memory","accumulatorId":691,"metricType":"size"},{"name":"number of output rows","accumulatorId":690,"metricType":"sum"},{"name":"avg hash probe bucket list iters","accumulatorId":694,"metricType":"average"}]}],"metadata":{},"metrics":[{"name":"duration","accumulatorId":689,"metricType":"timing"}]}],"metadata":{},"metrics":[{"name":"number of inputs which are later than watermark ('inputs' are relative to operators)","accumulatorId":679,"metricType":"sum"},{"name":"number of total state rows","accumulatorId":680,"metricType":"sum"},{"name":"memory used by state","accumulatorId":685,"metricType":"size"},{"name":"count of cache hit on states cache in provider","accumulatorId":687,"metricType":"sum"},{"name":"number of output rows","accumulatorId":678,"metricType":"sum"},{"name":"estimated size of state only on current version","accumulatorId":686,"metricType":"size"},{"name":"count of cache miss on states cache in provider","accumulatorId":688,"metricType":"sum"},{"name":"time to commit changes","accumulatorId":684,"metricType":"timing"},{"name":"time to remove","accumulatorId":683,"metricType":"timing"},{"name":"number of updated state rows","accumulatorId":681,"metricType":"sum"},{"name":"time to update","accumulatorId":682,"metricType":"timing"}]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[{"name":"spill size","accumulatorId":675,"metricType":"size"},{"name":"time in aggregation build","accumulatorId":676,"metricType":"timing"},{"name":"peak memory","accumulatorId":674,"metricType":"size"},{"name":"number of output rows","accumulatorId":673,"metricType":"sum"},{"name":"avg hash probe bucket list iters","accumulatorId":677,"metricType":"average"}]}],"metadata":{},"metrics":[{"name":"duration","accumulatorId":672,"metricType":"timing"}]}],"metadata":{},"metrics":[]},"time":1596020224817} +{"Event":"org.apache.spark.sql.execution.ui.SparkListenerSQLExecutionStart","executionId":13,"description":"\nid = 8d268dc2-bc9c-4be8-97a9-b135d2943028\nrunId = e225d92f-2545-48f8-87a2-9c0309580f8a\nbatch = 4","details":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","physicalPlanDescription":"== Physical Plan ==\nWriteToDataSourceV2 (14)\n+- * HashAggregate (13)\n +- StateStoreSave (12)\n +- * HashAggregate (11)\n +- StateStoreRestore (10)\n +- Exchange (9)\n +- * HashAggregate (8)\n +- * HashAggregate (7)\n +- * SerializeFromObject (6)\n +- MapPartitions (5)\n +- DeserializeToObject (4)\n +- * Project (3)\n +- * Project (2)\n +- MicroBatchScan (1)\n\n\n(1) MicroBatchScan\nOutput [7]: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]\nArguments: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13], org.apache.spark.sql.kafka010.KafkaSourceProvider$KafkaScan@7e7b182c, KafkaV2[Subscribe[test5]], {\"test5\":{\"0\":48757}}, {\"test5\":{\"0\":48799}}\n\n(2) Project [codegen id : 1]\nOutput [7]: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]\nInput [7]: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]\n\n(3) Project [codegen id : 1]\nOutput [1]: [cast(value#8 as string) AS value#21]\nInput [7]: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]\n\n(4) DeserializeToObject\nInput [1]: [value#21]\nArguments: value#21.toString, obj#27: java.lang.String\n\n(5) MapPartitions\nInput [1]: [obj#27]\nArguments: org.apache.spark.sql.Dataset$$Lambda$1321/872917583@67b99068, obj#28: java.lang.String\n\n(6) SerializeFromObject [codegen id : 2]\nInput [1]: [obj#28]\nArguments: [staticinvoke(class org.apache.spark.unsafe.types.UTF8String, StringType, fromString, input[0, java.lang.String, true], true, false) AS value#29]\n\n(7) HashAggregate [codegen id : 2]\nInput [1]: [value#29]\nKeys [1]: [value#29]\nFunctions [1]: [partial_count(1)]\nAggregate Attributes [1]: [count(1)#31L]\nResults [2]: [value#29, count#38L]\n\n(8) HashAggregate [codegen id : 2]\nInput [2]: [value#29, count#38L]\nKeys [1]: [value#29]\nFunctions [1]: [merge_count(1)]\nAggregate Attributes [1]: [count(1)#31L]\nResults [2]: [value#29, count#38L]\n\n(9) Exchange\nInput [2]: [value#29, count#38L]\nArguments: hashpartitioning(value#29, 2), true, [id=#1142]\n\n(10) StateStoreRestore\nInput [2]: [value#29, count#38L]\nArguments: [value#29], state info [ checkpoint = , runId = 6fa28bd2-2924-4e01-8bbe-128888d2669b, opId = 0, ver = 0, numPartitions = 2], 2\n\n(11) HashAggregate [codegen id : 3]\nInput [2]: [value#29, count#38L]\nKeys [1]: [value#29]\nFunctions [1]: [merge_count(1)]\nAggregate Attributes [1]: [count(1)#31L]\nResults [2]: [value#29, count#38L]\n\n(12) StateStoreSave\nInput [2]: [value#29, count#38L]\nArguments: [value#29], state info [ checkpoint = , runId = 6fa28bd2-2924-4e01-8bbe-128888d2669b, opId = 0, ver = 0, numPartitions = 2], Append, 0, 2\n\n(13) HashAggregate [codegen id : 4]\nInput [2]: [value#29, count#38L]\nKeys [1]: [value#29]\nFunctions [1]: [count(1)]\nAggregate Attributes [1]: [count(1)#31L]\nResults [2]: [value#29, count(1)#31L AS count#32L]\n\n(14) WriteToDataSourceV2\nInput [2]: [value#29, count#32L]\nArguments: org.apache.spark.sql.execution.streaming.sources.MicroBatchWrite@1717338b\n\n","sparkPlanInfo":{"nodeName":"WriteToDataSourceV2","simpleString":"WriteToDataSourceV2 org.apache.spark.sql.execution.streaming.sources.MicroBatchWrite@1717338b","children":[{"nodeName":"WholeStageCodegen (4)","simpleString":"WholeStageCodegen (4)","children":[{"nodeName":"HashAggregate","simpleString":"HashAggregate(keys=[value#29], functions=[count(1)])","children":[{"nodeName":"InputAdapter","simpleString":"InputAdapter","children":[{"nodeName":"StateStoreSave","simpleString":"StateStoreSave [value#29], state info [ checkpoint = file:/tmp/temporary-025d7997-5b66-4def-abbf-bdcca57312b9/state, runId = e225d92f-2545-48f8-87a2-9c0309580f8a, opId = 0, ver = 4, numPartitions = 2], Complete, 0, 2","children":[{"nodeName":"WholeStageCodegen (3)","simpleString":"WholeStageCodegen (3)","children":[{"nodeName":"HashAggregate","simpleString":"HashAggregate(keys=[value#29], functions=[merge_count(1)])","children":[{"nodeName":"InputAdapter","simpleString":"InputAdapter","children":[{"nodeName":"StateStoreRestore","simpleString":"StateStoreRestore [value#29], state info [ checkpoint = file:/tmp/temporary-025d7997-5b66-4def-abbf-bdcca57312b9/state, runId = e225d92f-2545-48f8-87a2-9c0309580f8a, opId = 0, ver = 4, numPartitions = 2], 2","children":[{"nodeName":"Exchange","simpleString":"Exchange hashpartitioning(value#29, 2), true, [id=#990]","children":[{"nodeName":"WholeStageCodegen (2)","simpleString":"WholeStageCodegen (2)","children":[{"nodeName":"HashAggregate","simpleString":"HashAggregate(keys=[value#29], functions=[merge_count(1)])","children":[{"nodeName":"HashAggregate","simpleString":"HashAggregate(keys=[value#29], functions=[partial_count(1)])","children":[{"nodeName":"SerializeFromObject","simpleString":"SerializeFromObject [staticinvoke(class org.apache.spark.unsafe.types.UTF8String, StringType, fromString, input[0, java.lang.String, true], true, false) AS value#29]","children":[{"nodeName":"InputAdapter","simpleString":"InputAdapter","children":[{"nodeName":"MapPartitions","simpleString":"MapPartitions org.apache.spark.sql.Dataset$$Lambda$1321/872917583@67b99068, obj#28: java.lang.String","children":[{"nodeName":"DeserializeToObject","simpleString":"DeserializeToObject value#21.toString, obj#27: java.lang.String","children":[{"nodeName":"WholeStageCodegen (1)","simpleString":"WholeStageCodegen (1)","children":[{"nodeName":"Project","simpleString":"Project [cast(value#8 as string) AS value#21]","children":[{"nodeName":"Project","simpleString":"Project [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]","children":[{"nodeName":"InputAdapter","simpleString":"InputAdapter","children":[{"nodeName":"MicroBatchScan","simpleString":"MicroBatchScan[key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13] class org.apache.spark.sql.kafka010.KafkaSourceProvider$KafkaScan","children":[],"metadata":{},"metrics":[{"name":"number of output rows","accumulatorId":708,"metricType":"sum"}]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[{"name":"duration","accumulatorId":707,"metricType":"timing"}]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[{"name":"spill size","accumulatorId":704,"metricType":"size"},{"name":"time in aggregation build","accumulatorId":705,"metricType":"timing"},{"name":"peak memory","accumulatorId":703,"metricType":"size"},{"name":"number of output rows","accumulatorId":702,"metricType":"sum"},{"name":"avg hash probe bucket list iters","accumulatorId":706,"metricType":"average"}]}],"metadata":{},"metrics":[{"name":"spill size","accumulatorId":699,"metricType":"size"},{"name":"time in aggregation build","accumulatorId":700,"metricType":"timing"},{"name":"peak memory","accumulatorId":698,"metricType":"size"},{"name":"number of output rows","accumulatorId":697,"metricType":"sum"},{"name":"avg hash probe bucket list iters","accumulatorId":701,"metricType":"average"}]}],"metadata":{},"metrics":[{"name":"duration","accumulatorId":696,"metricType":"timing"}]}],"metadata":{},"metrics":[{"name":"shuffle records written","accumulatorId":648,"metricType":"sum"},{"name":"shuffle write time","accumulatorId":649,"metricType":"nsTiming"},{"name":"records read","accumulatorId":646,"metricType":"sum"},{"name":"local bytes read","accumulatorId":644,"metricType":"size"},{"name":"fetch wait time","accumulatorId":645,"metricType":"timing"},{"name":"remote bytes read","accumulatorId":642,"metricType":"size"},{"name":"local blocks read","accumulatorId":641,"metricType":"sum"},{"name":"remote blocks read","accumulatorId":640,"metricType":"sum"},{"name":"data size","accumulatorId":639,"metricType":"size"},{"name":"remote bytes read to disk","accumulatorId":643,"metricType":"size"},{"name":"shuffle bytes written","accumulatorId":647,"metricType":"size"}]}],"metadata":{},"metrics":[{"name":"number of output rows","accumulatorId":695,"metricType":"sum"}]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[{"name":"spill size","accumulatorId":692,"metricType":"size"},{"name":"time in aggregation build","accumulatorId":693,"metricType":"timing"},{"name":"peak memory","accumulatorId":691,"metricType":"size"},{"name":"number of output rows","accumulatorId":690,"metricType":"sum"},{"name":"avg hash probe bucket list iters","accumulatorId":694,"metricType":"average"}]}],"metadata":{},"metrics":[{"name":"duration","accumulatorId":689,"metricType":"timing"}]}],"metadata":{},"metrics":[{"name":"number of inputs which are later than watermark ('inputs' are relative to operators)","accumulatorId":679,"metricType":"sum"},{"name":"number of total state rows","accumulatorId":680,"metricType":"sum"},{"name":"memory used by state","accumulatorId":685,"metricType":"size"},{"name":"count of cache hit on states cache in provider","accumulatorId":687,"metricType":"sum"},{"name":"number of output rows","accumulatorId":678,"metricType":"sum"},{"name":"estimated size of state only on current version","accumulatorId":686,"metricType":"size"},{"name":"count of cache miss on states cache in provider","accumulatorId":688,"metricType":"sum"},{"name":"time to commit changes","accumulatorId":684,"metricType":"timing"},{"name":"time to remove","accumulatorId":683,"metricType":"timing"},{"name":"number of updated state rows","accumulatorId":681,"metricType":"sum"},{"name":"time to update","accumulatorId":682,"metricType":"timing"}]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[{"name":"spill size","accumulatorId":675,"metricType":"size"},{"name":"time in aggregation build","accumulatorId":676,"metricType":"timing"},{"name":"peak memory","accumulatorId":674,"metricType":"size"},{"name":"number of output rows","accumulatorId":673,"metricType":"sum"},{"name":"avg hash probe bucket list iters","accumulatorId":677,"metricType":"average"}]}],"metadata":{},"metrics":[{"name":"duration","accumulatorId":672,"metricType":"timing"}]}],"metadata":{},"metrics":[]},"time":1596020224849} +{"Event":"SparkListenerJobStart","Job ID":4,"Submission Time":1596020224928,"Stage Infos":[{"Stage ID":9,"Stage Attempt ID":0,"Stage Name":"start at StructuredKafkaWordCount.scala:86","Number of Tasks":2,"RDD Info":[{"RDD ID":59,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"132\",\"name\":\"WholeStageCodegen (4)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[58],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":55,"Name":"ShuffledRowRDD","Scope":"{\"id\":\"140\",\"name\":\"Exchange\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[54],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":56,"Name":"StateStoreRDD","Scope":"{\"id\":\"139\",\"name\":\"StateStoreRestore\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[55],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":57,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"136\",\"name\":\"WholeStageCodegen (3)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[56],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":58,"Name":"StateStoreRDD","Scope":"{\"id\":\"135\",\"name\":\"StateStoreSave\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[57],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0}],"Parent IDs":[8],"Details":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","Accumulables":[],"Resource Profile Id":0},{"Stage ID":8,"Stage Attempt ID":0,"Stage Name":"start at StructuredKafkaWordCount.scala:86","Number of Tasks":1,"RDD Info":[{"RDD ID":54,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"140\",\"name\":\"Exchange\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[53],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":53,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"141\",\"name\":\"WholeStageCodegen (2)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[52],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":51,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"147\",\"name\":\"DeserializeToObject\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[50],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":49,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"152\",\"name\":\"MicroBatchScan\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[48],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":52,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"146\",\"name\":\"MapPartitions\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[51],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":50,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"148\",\"name\":\"WholeStageCodegen (1)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[49],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":48,"Name":"DataSourceRDD","Scope":"{\"id\":\"152\",\"name\":\"MicroBatchScan\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0}],"Parent IDs":[],"Details":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","Accumulables":[],"Resource Profile Id":0}],"Stage IDs":[9,8],"Properties":{"sql.streaming.queryId":"8d268dc2-bc9c-4be8-97a9-b135d2943028","spark.driver.host":"iZbp19vpr16ix621sdw476Z","spark.eventLog.enabled":"true","spark.sql.adaptive.enabled":"false","spark.job.interruptOnCancel":"true","spark.driver.port":"46309","__fetch_continuous_blocks_in_batch_enabled":"true","spark.jars":"file:/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/./examples/jars/spark-examples_2.12-3.1.0-SNAPSHOT.jar","__is_continuous_processing":"false","spark.app.name":"StructuredKafkaWordCount","callSite.long":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","callSite.short":"start at StructuredKafkaWordCount.scala:86","spark.submit.pyFiles":"","spark.job.description":"\nid = 8d268dc2-bc9c-4be8-97a9-b135d2943028\nrunId = e225d92f-2545-48f8-87a2-9c0309580f8a\nbatch = 4","spark.executor.id":"driver","spark.sql.cbo.enabled":"false","streaming.sql.batchId":"4","spark.jobGroup.id":"e225d92f-2545-48f8-87a2-9c0309580f8a","spark.submit.deployMode":"client","spark.master":"local[*]","spark.eventLog.dir":"/tmp/spark-history","spark.sql.execution.id":"13","spark.app.id":"local-1596020211915","spark.sql.shuffle.partitions":"2"}} +{"Event":"SparkListenerStageSubmitted","Stage Info":{"Stage ID":8,"Stage Attempt ID":0,"Stage Name":"start at StructuredKafkaWordCount.scala:86","Number of Tasks":1,"RDD Info":[{"RDD ID":54,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"140\",\"name\":\"Exchange\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[53],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":53,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"141\",\"name\":\"WholeStageCodegen (2)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[52],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":51,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"147\",\"name\":\"DeserializeToObject\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[50],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":49,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"152\",\"name\":\"MicroBatchScan\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[48],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":52,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"146\",\"name\":\"MapPartitions\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[51],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":50,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"148\",\"name\":\"WholeStageCodegen (1)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[49],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":48,"Name":"DataSourceRDD","Scope":"{\"id\":\"152\",\"name\":\"MicroBatchScan\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0}],"Parent IDs":[],"Details":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","Submission Time":1596020224929,"Accumulables":[],"Resource Profile Id":0},"Properties":{"sql.streaming.queryId":"8d268dc2-bc9c-4be8-97a9-b135d2943028","spark.driver.host":"iZbp19vpr16ix621sdw476Z","spark.eventLog.enabled":"true","spark.sql.adaptive.enabled":"false","spark.job.interruptOnCancel":"true","spark.driver.port":"46309","__fetch_continuous_blocks_in_batch_enabled":"true","spark.jars":"file:/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/./examples/jars/spark-examples_2.12-3.1.0-SNAPSHOT.jar","__is_continuous_processing":"false","spark.app.name":"StructuredKafkaWordCount","callSite.long":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","callSite.short":"start at StructuredKafkaWordCount.scala:86","spark.submit.pyFiles":"","spark.job.description":"\nid = 8d268dc2-bc9c-4be8-97a9-b135d2943028\nrunId = e225d92f-2545-48f8-87a2-9c0309580f8a\nbatch = 4","spark.executor.id":"driver","spark.sql.cbo.enabled":"false","streaming.sql.batchId":"4","spark.jobGroup.id":"e225d92f-2545-48f8-87a2-9c0309580f8a","spark.submit.deployMode":"client","spark.master":"local[*]","spark.eventLog.dir":"/tmp/spark-history","spark.sql.execution.id":"13","spark.app.id":"local-1596020211915","spark.sql.shuffle.partitions":"2"}} +{"Event":"SparkListenerTaskStart","Stage ID":8,"Stage Attempt ID":0,"Task Info":{"Task ID":12,"Index":0,"Attempt":0,"Launch Time":1596020224941,"Executor ID":"driver","Host":"iZbp19vpr16ix621sdw476Z","Locality":"PROCESS_LOCAL","Speculative":false,"Getting Result Time":0,"Finish Time":0,"Failed":false,"Killed":false,"Accumulables":[]}} +{"Event":"SparkListenerTaskEnd","Stage ID":8,"Stage Attempt ID":0,"Task Type":"ShuffleMapTask","Task End Reason":{"Reason":"Success"},"Task Info":{"Task ID":12,"Index":0,"Attempt":0,"Launch Time":1596020224941,"Executor ID":"driver","Host":"iZbp19vpr16ix621sdw476Z","Locality":"PROCESS_LOCAL","Speculative":false,"Getting Result Time":0,"Finish Time":1596020224979,"Failed":false,"Killed":false,"Accumulables":[{"ID":649,"Name":"shuffle write time","Update":"572754","Value":"572754","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":648,"Name":"shuffle records written","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":647,"Name":"shuffle bytes written","Update":"168","Value":"168","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":639,"Name":"data size","Update":"128","Value":"128","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":696,"Name":"duration","Update":"19","Value":"19","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":697,"Name":"number of output rows","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":698,"Name":"peak memory","Update":"262144","Value":"262144","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":700,"Name":"time in aggregation build","Update":"13","Value":"13","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":702,"Name":"number of output rows","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":703,"Name":"peak memory","Update":"262144","Value":"262144","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":705,"Name":"time in aggregation build","Update":"9","Value":"9","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":707,"Name":"duration","Update":"19","Value":"19","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":708,"Name":"number of output rows","Update":"42","Value":"42","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":753,"Name":"internal.metrics.input.recordsRead","Update":42,"Value":42,"Internal":true,"Count Failed Values":true},{"ID":751,"Name":"internal.metrics.shuffle.write.writeTime","Update":572754,"Value":572754,"Internal":true,"Count Failed Values":true},{"ID":750,"Name":"internal.metrics.shuffle.write.recordsWritten","Update":1,"Value":1,"Internal":true,"Count Failed Values":true},{"ID":749,"Name":"internal.metrics.shuffle.write.bytesWritten","Update":168,"Value":168,"Internal":true,"Count Failed Values":true},{"ID":740,"Name":"internal.metrics.peakExecutionMemory","Update":524288,"Value":524288,"Internal":true,"Count Failed Values":true},{"ID":735,"Name":"internal.metrics.resultSize","Update":2544,"Value":2544,"Internal":true,"Count Failed Values":true},{"ID":734,"Name":"internal.metrics.executorCpuTime","Update":27800373,"Value":27800373,"Internal":true,"Count Failed Values":true},{"ID":733,"Name":"internal.metrics.executorRunTime","Update":28,"Value":28,"Internal":true,"Count Failed Values":true},{"ID":732,"Name":"internal.metrics.executorDeserializeCpuTime","Update":4768103,"Value":4768103,"Internal":true,"Count Failed Values":true},{"ID":731,"Name":"internal.metrics.executorDeserializeTime","Update":4,"Value":4,"Internal":true,"Count Failed Values":true}]},"Task Executor Metrics":{"JVMHeapMemory":0,"JVMOffHeapMemory":0,"OnHeapExecutionMemory":0,"OffHeapExecutionMemory":0,"OnHeapStorageMemory":0,"OffHeapStorageMemory":0,"OnHeapUnifiedMemory":0,"OffHeapUnifiedMemory":0,"DirectPoolMemory":0,"MappedPoolMemory":0,"ProcessTreeJVMVMemory":0,"ProcessTreeJVMRSSMemory":0,"ProcessTreePythonVMemory":0,"ProcessTreePythonRSSMemory":0,"ProcessTreeOtherVMemory":0,"ProcessTreeOtherRSSMemory":0,"MinorGCCount":0,"MinorGCTime":0,"MajorGCCount":0,"MajorGCTime":0},"Task Metrics":{"Executor Deserialize Time":4,"Executor Deserialize CPU Time":4768103,"Executor Run Time":28,"Executor CPU Time":27800373,"Peak Execution Memory":524288,"Result Size":2544,"JVM GC Time":0,"Result Serialization Time":0,"Memory Bytes Spilled":0,"Disk Bytes Spilled":0,"Shuffle Read Metrics":{"Remote Blocks Fetched":0,"Local Blocks Fetched":0,"Fetch Wait Time":0,"Remote Bytes Read":0,"Remote Bytes Read To Disk":0,"Local Bytes Read":0,"Total Records Read":0},"Shuffle Write Metrics":{"Shuffle Bytes Written":168,"Shuffle Write Time":572754,"Shuffle Records Written":1},"Input Metrics":{"Bytes Read":0,"Records Read":42},"Output Metrics":{"Bytes Written":0,"Records Written":0},"Updated Blocks":[]}} +{"Event":"SparkListenerStageCompleted","Stage Info":{"Stage ID":8,"Stage Attempt ID":0,"Stage Name":"start at StructuredKafkaWordCount.scala:86","Number of Tasks":1,"RDD Info":[{"RDD ID":54,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"140\",\"name\":\"Exchange\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[53],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":53,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"141\",\"name\":\"WholeStageCodegen (2)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[52],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":51,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"147\",\"name\":\"DeserializeToObject\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[50],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":49,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"152\",\"name\":\"MicroBatchScan\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[48],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":52,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"146\",\"name\":\"MapPartitions\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[51],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":50,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"148\",\"name\":\"WholeStageCodegen (1)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[49],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":48,"Name":"DataSourceRDD","Scope":"{\"id\":\"152\",\"name\":\"MicroBatchScan\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0}],"Parent IDs":[],"Details":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","Submission Time":1596020224929,"Completion Time":1596020224979,"Accumulables":[{"ID":732,"Name":"internal.metrics.executorDeserializeCpuTime","Value":4768103,"Internal":true,"Count Failed Values":true},{"ID":696,"Name":"duration","Value":"19","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":750,"Name":"internal.metrics.shuffle.write.recordsWritten","Value":1,"Internal":true,"Count Failed Values":true},{"ID":705,"Name":"time in aggregation build","Value":"9","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":735,"Name":"internal.metrics.resultSize","Value":2544,"Internal":true,"Count Failed Values":true},{"ID":708,"Name":"number of output rows","Value":"42","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":753,"Name":"internal.metrics.input.recordsRead","Value":42,"Internal":true,"Count Failed Values":true},{"ID":648,"Name":"shuffle records written","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":639,"Name":"data size","Value":"128","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":702,"Name":"number of output rows","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":740,"Name":"internal.metrics.peakExecutionMemory","Value":524288,"Internal":true,"Count Failed Values":true},{"ID":731,"Name":"internal.metrics.executorDeserializeTime","Value":4,"Internal":true,"Count Failed Values":true},{"ID":749,"Name":"internal.metrics.shuffle.write.bytesWritten","Value":168,"Internal":true,"Count Failed Values":true},{"ID":698,"Name":"peak memory","Value":"262144","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":734,"Name":"internal.metrics.executorCpuTime","Value":27800373,"Internal":true,"Count Failed Values":true},{"ID":707,"Name":"duration","Value":"19","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":647,"Name":"shuffle bytes written","Value":"168","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":733,"Name":"internal.metrics.executorRunTime","Value":28,"Internal":true,"Count Failed Values":true},{"ID":697,"Name":"number of output rows","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":751,"Name":"internal.metrics.shuffle.write.writeTime","Value":572754,"Internal":true,"Count Failed Values":true},{"ID":700,"Name":"time in aggregation build","Value":"13","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":649,"Name":"shuffle write time","Value":"572754","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":703,"Name":"peak memory","Value":"262144","Internal":true,"Count Failed Values":true,"Metadata":"sql"}],"Resource Profile Id":0}} +{"Event":"SparkListenerStageSubmitted","Stage Info":{"Stage ID":9,"Stage Attempt ID":0,"Stage Name":"start at StructuredKafkaWordCount.scala:86","Number of Tasks":2,"RDD Info":[{"RDD ID":59,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"132\",\"name\":\"WholeStageCodegen (4)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[58],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":55,"Name":"ShuffledRowRDD","Scope":"{\"id\":\"140\",\"name\":\"Exchange\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[54],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":56,"Name":"StateStoreRDD","Scope":"{\"id\":\"139\",\"name\":\"StateStoreRestore\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[55],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":57,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"136\",\"name\":\"WholeStageCodegen (3)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[56],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":58,"Name":"StateStoreRDD","Scope":"{\"id\":\"135\",\"name\":\"StateStoreSave\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[57],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0}],"Parent IDs":[8],"Details":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","Submission Time":1596020224987,"Accumulables":[],"Resource Profile Id":0},"Properties":{"sql.streaming.queryId":"8d268dc2-bc9c-4be8-97a9-b135d2943028","spark.driver.host":"iZbp19vpr16ix621sdw476Z","spark.eventLog.enabled":"true","spark.sql.adaptive.enabled":"false","spark.job.interruptOnCancel":"true","spark.driver.port":"46309","__fetch_continuous_blocks_in_batch_enabled":"true","spark.jars":"file:/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/./examples/jars/spark-examples_2.12-3.1.0-SNAPSHOT.jar","__is_continuous_processing":"false","spark.app.name":"StructuredKafkaWordCount","callSite.long":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","callSite.short":"start at StructuredKafkaWordCount.scala:86","spark.submit.pyFiles":"","spark.job.description":"\nid = 8d268dc2-bc9c-4be8-97a9-b135d2943028\nrunId = e225d92f-2545-48f8-87a2-9c0309580f8a\nbatch = 4","spark.executor.id":"driver","spark.sql.cbo.enabled":"false","streaming.sql.batchId":"4","spark.jobGroup.id":"e225d92f-2545-48f8-87a2-9c0309580f8a","spark.submit.deployMode":"client","spark.master":"local[*]","spark.eventLog.dir":"/tmp/spark-history","spark.sql.execution.id":"13","spark.app.id":"local-1596020211915","spark.sql.shuffle.partitions":"2"}} +{"Event":"SparkListenerTaskStart","Stage ID":9,"Stage Attempt ID":0,"Task Info":{"Task ID":13,"Index":0,"Attempt":0,"Launch Time":1596020224994,"Executor ID":"driver","Host":"iZbp19vpr16ix621sdw476Z","Locality":"PROCESS_LOCAL","Speculative":false,"Getting Result Time":0,"Finish Time":0,"Failed":false,"Killed":false,"Accumulables":[]}} +{"Event":"SparkListenerTaskStart","Stage ID":9,"Stage Attempt ID":0,"Task Info":{"Task ID":14,"Index":1,"Attempt":0,"Launch Time":1596020224994,"Executor ID":"driver","Host":"iZbp19vpr16ix621sdw476Z","Locality":"PROCESS_LOCAL","Speculative":false,"Getting Result Time":0,"Finish Time":0,"Failed":false,"Killed":false,"Accumulables":[]}} +{"Event":"SparkListenerTaskEnd","Stage ID":9,"Stage Attempt ID":0,"Task Type":"ResultTask","Task End Reason":{"Reason":"Success"},"Task Info":{"Task ID":14,"Index":1,"Attempt":0,"Launch Time":1596020224994,"Executor ID":"driver","Host":"iZbp19vpr16ix621sdw476Z","Locality":"PROCESS_LOCAL","Speculative":false,"Getting Result Time":0,"Finish Time":1596020225056,"Failed":false,"Killed":false,"Accumulables":[{"ID":672,"Name":"duration","Update":"3","Value":"3","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":677,"Name":"avg hash probe bucket list iters","Update":"10","Value":"10","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":673,"Name":"number of output rows","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":674,"Name":"peak memory","Update":"4456448","Value":"4456448","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":676,"Name":"time in aggregation build","Update":"0","Value":"0","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":682,"Name":"time to update","Update":"19","Value":"19","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":681,"Name":"number of updated state rows","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":683,"Name":"time to remove","Update":"0","Value":"0","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":684,"Name":"time to commit changes","Update":"11","Value":"11","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":686,"Name":"estimated size of state only on current version","Update":"368","Value":"368","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":678,"Name":"number of output rows","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":687,"Name":"count of cache hit on states cache in provider","Update":"8","Value":"8","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":685,"Name":"memory used by state","Update":"784","Value":"784","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":680,"Name":"number of total state rows","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":689,"Name":"duration","Update":"19","Value":"19","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":690,"Name":"number of output rows","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":691,"Name":"peak memory","Update":"262144","Value":"262144","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":693,"Name":"time in aggregation build","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":695,"Name":"number of output rows","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":641,"Name":"local blocks read","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":645,"Name":"fetch wait time","Update":"0","Value":"0","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":644,"Name":"local bytes read","Update":"168","Value":"168","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":646,"Name":"records read","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":773,"Name":"internal.metrics.shuffle.read.recordsRead","Update":1,"Value":1,"Internal":true,"Count Failed Values":true},{"ID":772,"Name":"internal.metrics.shuffle.read.fetchWaitTime","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":771,"Name":"internal.metrics.shuffle.read.localBytesRead","Update":168,"Value":168,"Internal":true,"Count Failed Values":true},{"ID":770,"Name":"internal.metrics.shuffle.read.remoteBytesReadToDisk","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":769,"Name":"internal.metrics.shuffle.read.remoteBytesRead","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":768,"Name":"internal.metrics.shuffle.read.localBlocksFetched","Update":1,"Value":1,"Internal":true,"Count Failed Values":true},{"ID":767,"Name":"internal.metrics.shuffle.read.remoteBlocksFetched","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":765,"Name":"internal.metrics.peakExecutionMemory","Update":4718592,"Value":4718592,"Internal":true,"Count Failed Values":true},{"ID":760,"Name":"internal.metrics.resultSize","Update":5574,"Value":5574,"Internal":true,"Count Failed Values":true},{"ID":759,"Name":"internal.metrics.executorCpuTime","Update":19548688,"Value":19548688,"Internal":true,"Count Failed Values":true},{"ID":758,"Name":"internal.metrics.executorRunTime","Update":52,"Value":52,"Internal":true,"Count Failed Values":true},{"ID":757,"Name":"internal.metrics.executorDeserializeCpuTime","Update":5622533,"Value":5622533,"Internal":true,"Count Failed Values":true},{"ID":756,"Name":"internal.metrics.executorDeserializeTime","Update":5,"Value":5,"Internal":true,"Count Failed Values":true}]},"Task Executor Metrics":{"JVMHeapMemory":0,"JVMOffHeapMemory":0,"OnHeapExecutionMemory":0,"OffHeapExecutionMemory":0,"OnHeapStorageMemory":0,"OffHeapStorageMemory":0,"OnHeapUnifiedMemory":0,"OffHeapUnifiedMemory":0,"DirectPoolMemory":0,"MappedPoolMemory":0,"ProcessTreeJVMVMemory":0,"ProcessTreeJVMRSSMemory":0,"ProcessTreePythonVMemory":0,"ProcessTreePythonRSSMemory":0,"ProcessTreeOtherVMemory":0,"ProcessTreeOtherRSSMemory":0,"MinorGCCount":0,"MinorGCTime":0,"MajorGCCount":0,"MajorGCTime":0},"Task Metrics":{"Executor Deserialize Time":5,"Executor Deserialize CPU Time":5622533,"Executor Run Time":52,"Executor CPU Time":19548688,"Peak Execution Memory":4718592,"Result Size":5574,"JVM GC Time":0,"Result Serialization Time":0,"Memory Bytes Spilled":0,"Disk Bytes Spilled":0,"Shuffle Read Metrics":{"Remote Blocks Fetched":0,"Local Blocks Fetched":1,"Fetch Wait Time":0,"Remote Bytes Read":0,"Remote Bytes Read To Disk":0,"Local Bytes Read":168,"Total Records Read":1},"Shuffle Write Metrics":{"Shuffle Bytes Written":0,"Shuffle Write Time":0,"Shuffle Records Written":0},"Input Metrics":{"Bytes Read":0,"Records Read":0},"Output Metrics":{"Bytes Written":0,"Records Written":0},"Updated Blocks":[]}} +{"Event":"SparkListenerTaskEnd","Stage ID":9,"Stage Attempt ID":0,"Task Type":"ResultTask","Task End Reason":{"Reason":"Success"},"Task Info":{"Task ID":13,"Index":0,"Attempt":0,"Launch Time":1596020224994,"Executor ID":"driver","Host":"iZbp19vpr16ix621sdw476Z","Locality":"PROCESS_LOCAL","Speculative":false,"Getting Result Time":0,"Finish Time":1596020225058,"Failed":false,"Killed":false,"Accumulables":[{"ID":672,"Name":"duration","Update":"2","Value":"5","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":674,"Name":"peak memory","Update":"262144","Value":"4718592","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":676,"Name":"time in aggregation build","Update":"0","Value":"0","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":682,"Name":"time to update","Update":"4","Value":"23","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":683,"Name":"time to remove","Update":"0","Value":"0","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":684,"Name":"time to commit changes","Update":"35","Value":"46","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":686,"Name":"estimated size of state only on current version","Update":"88","Value":"456","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":687,"Name":"count of cache hit on states cache in provider","Update":"8","Value":"16","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":685,"Name":"memory used by state","Update":"400","Value":"1184","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":689,"Name":"duration","Update":"4","Value":"23","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":691,"Name":"peak memory","Update":"262144","Value":"524288","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":693,"Name":"time in aggregation build","Update":"0","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":773,"Name":"internal.metrics.shuffle.read.recordsRead","Update":0,"Value":1,"Internal":true,"Count Failed Values":true},{"ID":772,"Name":"internal.metrics.shuffle.read.fetchWaitTime","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":771,"Name":"internal.metrics.shuffle.read.localBytesRead","Update":0,"Value":168,"Internal":true,"Count Failed Values":true},{"ID":770,"Name":"internal.metrics.shuffle.read.remoteBytesReadToDisk","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":769,"Name":"internal.metrics.shuffle.read.remoteBytesRead","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":768,"Name":"internal.metrics.shuffle.read.localBlocksFetched","Update":0,"Value":1,"Internal":true,"Count Failed Values":true},{"ID":767,"Name":"internal.metrics.shuffle.read.remoteBlocksFetched","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":765,"Name":"internal.metrics.peakExecutionMemory","Update":524288,"Value":5242880,"Internal":true,"Count Failed Values":true},{"ID":760,"Name":"internal.metrics.resultSize","Update":5311,"Value":10885,"Internal":true,"Count Failed Values":true},{"ID":759,"Name":"internal.metrics.executorCpuTime","Update":16813539,"Value":36362227,"Internal":true,"Count Failed Values":true},{"ID":758,"Name":"internal.metrics.executorRunTime","Update":55,"Value":107,"Internal":true,"Count Failed Values":true},{"ID":757,"Name":"internal.metrics.executorDeserializeCpuTime","Update":4322992,"Value":9945525,"Internal":true,"Count Failed Values":true},{"ID":756,"Name":"internal.metrics.executorDeserializeTime","Update":4,"Value":9,"Internal":true,"Count Failed Values":true}]},"Task Executor Metrics":{"JVMHeapMemory":0,"JVMOffHeapMemory":0,"OnHeapExecutionMemory":0,"OffHeapExecutionMemory":0,"OnHeapStorageMemory":0,"OffHeapStorageMemory":0,"OnHeapUnifiedMemory":0,"OffHeapUnifiedMemory":0,"DirectPoolMemory":0,"MappedPoolMemory":0,"ProcessTreeJVMVMemory":0,"ProcessTreeJVMRSSMemory":0,"ProcessTreePythonVMemory":0,"ProcessTreePythonRSSMemory":0,"ProcessTreeOtherVMemory":0,"ProcessTreeOtherRSSMemory":0,"MinorGCCount":0,"MinorGCTime":0,"MajorGCCount":0,"MajorGCTime":0},"Task Metrics":{"Executor Deserialize Time":4,"Executor Deserialize CPU Time":4322992,"Executor Run Time":55,"Executor CPU Time":16813539,"Peak Execution Memory":524288,"Result Size":5311,"JVM GC Time":0,"Result Serialization Time":0,"Memory Bytes Spilled":0,"Disk Bytes Spilled":0,"Shuffle Read Metrics":{"Remote Blocks Fetched":0,"Local Blocks Fetched":0,"Fetch Wait Time":0,"Remote Bytes Read":0,"Remote Bytes Read To Disk":0,"Local Bytes Read":0,"Total Records Read":0},"Shuffle Write Metrics":{"Shuffle Bytes Written":0,"Shuffle Write Time":0,"Shuffle Records Written":0},"Input Metrics":{"Bytes Read":0,"Records Read":0},"Output Metrics":{"Bytes Written":0,"Records Written":0},"Updated Blocks":[]}} +{"Event":"SparkListenerStageCompleted","Stage Info":{"Stage ID":9,"Stage Attempt ID":0,"Stage Name":"start at StructuredKafkaWordCount.scala:86","Number of Tasks":2,"RDD Info":[{"RDD ID":59,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"132\",\"name\":\"WholeStageCodegen (4)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[58],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":55,"Name":"ShuffledRowRDD","Scope":"{\"id\":\"140\",\"name\":\"Exchange\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[54],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":56,"Name":"StateStoreRDD","Scope":"{\"id\":\"139\",\"name\":\"StateStoreRestore\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[55],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":57,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"136\",\"name\":\"WholeStageCodegen (3)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[56],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":58,"Name":"StateStoreRDD","Scope":"{\"id\":\"135\",\"name\":\"StateStoreSave\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[57],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0}],"Parent IDs":[8],"Details":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","Submission Time":1596020224987,"Completion Time":1596020225059,"Accumulables":[{"ID":765,"Name":"internal.metrics.peakExecutionMemory","Value":5242880,"Internal":true,"Count Failed Values":true},{"ID":756,"Name":"internal.metrics.executorDeserializeTime","Value":9,"Internal":true,"Count Failed Values":true},{"ID":678,"Name":"number of output rows","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":759,"Name":"internal.metrics.executorCpuTime","Value":36362227,"Internal":true,"Count Failed Values":true},{"ID":768,"Name":"internal.metrics.shuffle.read.localBlocksFetched","Value":1,"Internal":true,"Count Failed Values":true},{"ID":687,"Name":"count of cache hit on states cache in provider","Value":"16","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":681,"Name":"number of updated state rows","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":771,"Name":"internal.metrics.shuffle.read.localBytesRead","Value":168,"Internal":true,"Count Failed Values":true},{"ID":690,"Name":"number of output rows","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":672,"Name":"duration","Value":"5","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":645,"Name":"fetch wait time","Value":"0","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":684,"Name":"time to commit changes","Value":"46","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":693,"Name":"time in aggregation build","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":770,"Name":"internal.metrics.shuffle.read.remoteBytesReadToDisk","Value":0,"Internal":true,"Count Failed Values":true},{"ID":683,"Name":"time to remove","Value":"0","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":773,"Name":"internal.metrics.shuffle.read.recordsRead","Value":1,"Internal":true,"Count Failed Values":true},{"ID":686,"Name":"estimated size of state only on current version","Value":"456","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":695,"Name":"number of output rows","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":677,"Name":"avg hash probe bucket list iters","Value":"10","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":767,"Name":"internal.metrics.shuffle.read.remoteBlocksFetched","Value":0,"Internal":true,"Count Failed Values":true},{"ID":641,"Name":"local blocks read","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":758,"Name":"internal.metrics.executorRunTime","Value":107,"Internal":true,"Count Failed Values":true},{"ID":644,"Name":"local bytes read","Value":"168","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":680,"Name":"number of total state rows","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":689,"Name":"duration","Value":"23","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":674,"Name":"peak memory","Value":"4718592","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":757,"Name":"internal.metrics.executorDeserializeCpuTime","Value":9945525,"Internal":true,"Count Failed Values":true},{"ID":769,"Name":"internal.metrics.shuffle.read.remoteBytesRead","Value":0,"Internal":true,"Count Failed Values":true},{"ID":760,"Name":"internal.metrics.resultSize","Value":10885,"Internal":true,"Count Failed Values":true},{"ID":772,"Name":"internal.metrics.shuffle.read.fetchWaitTime","Value":0,"Internal":true,"Count Failed Values":true},{"ID":646,"Name":"records read","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":682,"Name":"time to update","Value":"23","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":691,"Name":"peak memory","Value":"524288","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":673,"Name":"number of output rows","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":676,"Name":"time in aggregation build","Value":"0","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":685,"Name":"memory used by state","Value":"1184","Internal":true,"Count Failed Values":true,"Metadata":"sql"}],"Resource Profile Id":0}} +{"Event":"SparkListenerJobEnd","Job ID":4,"Completion Time":1596020225059,"Job Result":{"Result":"JobSucceeded"}} +{"Event":"org.apache.spark.sql.execution.ui.SparkListenerSQLExecutionStart","executionId":14,"description":"\nid = 8d268dc2-bc9c-4be8-97a9-b135d2943028\nrunId = e225d92f-2545-48f8-87a2-9c0309580f8a\nbatch = 4","details":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","physicalPlanDescription":"== Physical Plan ==\nLocalTableScan (1)\n\n\n(1) LocalTableScan\nOutput [2]: [value#102, count#103]\nArguments: [value#102, count#103]\n\n","sparkPlanInfo":{"nodeName":"LocalTableScan","simpleString":"LocalTableScan [value#102, count#103]","children":[],"metadata":{},"metrics":[{"name":"number of output rows","accumulatorId":781,"metricType":"sum"}]},"time":1596020225079} +{"Event":"org.apache.spark.sql.execution.ui.SparkListenerSQLExecutionEnd","executionId":14,"time":1596020225087} +{"Event":"org.apache.spark.sql.execution.ui.SparkListenerSQLExecutionEnd","executionId":13,"time":1596020225087} +{"Event":"org.apache.spark.sql.execution.ui.SparkListenerSQLExecutionEnd","executionId":12,"time":1596020225087} +{"Event":"org.apache.spark.sql.streaming.StreamingQueryListener$QueryProgressEvent","progress":{"id":"8d268dc2-bc9c-4be8-97a9-b135d2943028","runId":"e225d92f-2545-48f8-87a2-9c0309580f8a","name":null,"timestamp":"2020-07-29T10:57:04.734Z","batchId":4,"batchDuration":387,"durationMs":{"triggerExecution":387,"queryPlanning":30,"getBatch":1,"latestOffset":3,"addBatch":306,"walCommit":12},"eventTime":{},"stateOperators":[{"numRowsTotal":1,"numRowsUpdated":1,"memoryUsedBytes":1184,"numLateInputs":0,"customMetrics":{"stateOnCurrentVersionSizeBytes":456,"loadedMapCacheHitCount":16,"loadedMapCacheMissCount":0}}],"sources":[{"description":"KafkaV2[Subscribe[test5]]","startOffset":"{\"test5\":{\"0\":48757}}","endOffset":"{\"test5\":{\"0\":48799}}","numInputRows":42,"inputRowsPerSecond":100.71942446043165,"processedRowsPerSecond":108.52713178294573}],"sink":{"description":"org.apache.spark.sql.execution.streaming.ConsoleTable$@514ba885","numOutputRows":1},"observedMetrics":{}}} +{"Event":"org.apache.spark.sql.execution.ui.SparkListenerSQLExecutionStart","executionId":15,"description":"\nid = 8d268dc2-bc9c-4be8-97a9-b135d2943028\nrunId = e225d92f-2545-48f8-87a2-9c0309580f8a\nbatch = 5","details":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","physicalPlanDescription":"== Physical Plan ==\nWriteToDataSourceV2 (14)\n+- * HashAggregate (13)\n +- StateStoreSave (12)\n +- * HashAggregate (11)\n +- StateStoreRestore (10)\n +- Exchange (9)\n +- * HashAggregate (8)\n +- * HashAggregate (7)\n +- * SerializeFromObject (6)\n +- MapPartitions (5)\n +- DeserializeToObject (4)\n +- * Project (3)\n +- * Project (2)\n +- MicroBatchScan (1)\n\n\n(1) MicroBatchScan\nOutput [7]: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]\nArguments: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13], org.apache.spark.sql.kafka010.KafkaSourceProvider$KafkaScan@7e7b182c, KafkaV2[Subscribe[test5]], {\"test5\":{\"0\":48799}}, {\"test5\":{\"0\":48837}}\n\n(2) Project [codegen id : 1]\nOutput [7]: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]\nInput [7]: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]\n\n(3) Project [codegen id : 1]\nOutput [1]: [cast(value#8 as string) AS value#21]\nInput [7]: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]\n\n(4) DeserializeToObject\nInput [1]: [value#21]\nArguments: value#21.toString, obj#27: java.lang.String\n\n(5) MapPartitions\nInput [1]: [obj#27]\nArguments: org.apache.spark.sql.Dataset$$Lambda$1321/872917583@67b99068, obj#28: java.lang.String\n\n(6) SerializeFromObject [codegen id : 2]\nInput [1]: [obj#28]\nArguments: [staticinvoke(class org.apache.spark.unsafe.types.UTF8String, StringType, fromString, input[0, java.lang.String, true], true, false) AS value#29]\n\n(7) HashAggregate [codegen id : 2]\nInput [1]: [value#29]\nKeys [1]: [value#29]\nFunctions [1]: [partial_count(1)]\nAggregate Attributes [1]: [count(1)#31L]\nResults [2]: [value#29, count#38L]\n\n(8) HashAggregate [codegen id : 2]\nInput [2]: [value#29, count#38L]\nKeys [1]: [value#29]\nFunctions [1]: [merge_count(1)]\nAggregate Attributes [1]: [count(1)#31L]\nResults [2]: [value#29, count#38L]\n\n(9) Exchange\nInput [2]: [value#29, count#38L]\nArguments: hashpartitioning(value#29, 2), true, [id=#1297]\n\n(10) StateStoreRestore\nInput [2]: [value#29, count#38L]\nArguments: [value#29], state info [ checkpoint = , runId = 9579cc6c-8827-43f7-9678-7747602e493e, opId = 0, ver = 0, numPartitions = 2], 2\n\n(11) HashAggregate [codegen id : 3]\nInput [2]: [value#29, count#38L]\nKeys [1]: [value#29]\nFunctions [1]: [merge_count(1)]\nAggregate Attributes [1]: [count(1)#31L]\nResults [2]: [value#29, count#38L]\n\n(12) StateStoreSave\nInput [2]: [value#29, count#38L]\nArguments: [value#29], state info [ checkpoint = , runId = 9579cc6c-8827-43f7-9678-7747602e493e, opId = 0, ver = 0, numPartitions = 2], Append, 0, 2\n\n(13) HashAggregate [codegen id : 4]\nInput [2]: [value#29, count#38L]\nKeys [1]: [value#29]\nFunctions [1]: [count(1)]\nAggregate Attributes [1]: [count(1)#31L]\nResults [2]: [value#29, count(1)#31L AS count#32L]\n\n(14) WriteToDataSourceV2\nInput [2]: [value#29, count#32L]\nArguments: org.apache.spark.sql.execution.streaming.sources.MicroBatchWrite@2c214312\n\n","sparkPlanInfo":{"nodeName":"WriteToDataSourceV2","simpleString":"WriteToDataSourceV2 org.apache.spark.sql.execution.streaming.sources.MicroBatchWrite@2c214312","children":[{"nodeName":"WholeStageCodegen (4)","simpleString":"WholeStageCodegen (4)","children":[{"nodeName":"HashAggregate","simpleString":"HashAggregate(keys=[value#29], functions=[count(1)])","children":[{"nodeName":"InputAdapter","simpleString":"InputAdapter","children":[{"nodeName":"StateStoreSave","simpleString":"StateStoreSave [value#29], state info [ checkpoint = file:/tmp/temporary-025d7997-5b66-4def-abbf-bdcca57312b9/state, runId = e225d92f-2545-48f8-87a2-9c0309580f8a, opId = 0, ver = 5, numPartitions = 2], Complete, 0, 2","children":[{"nodeName":"WholeStageCodegen (3)","simpleString":"WholeStageCodegen (3)","children":[{"nodeName":"HashAggregate","simpleString":"HashAggregate(keys=[value#29], functions=[merge_count(1)])","children":[{"nodeName":"InputAdapter","simpleString":"InputAdapter","children":[{"nodeName":"StateStoreRestore","simpleString":"StateStoreRestore [value#29], state info [ checkpoint = file:/tmp/temporary-025d7997-5b66-4def-abbf-bdcca57312b9/state, runId = e225d92f-2545-48f8-87a2-9c0309580f8a, opId = 0, ver = 5, numPartitions = 2], 2","children":[{"nodeName":"Exchange","simpleString":"Exchange hashpartitioning(value#29, 2), true, [id=#1221]","children":[{"nodeName":"WholeStageCodegen (2)","simpleString":"WholeStageCodegen (2)","children":[{"nodeName":"HashAggregate","simpleString":"HashAggregate(keys=[value#29], functions=[merge_count(1)])","children":[{"nodeName":"HashAggregate","simpleString":"HashAggregate(keys=[value#29], functions=[partial_count(1)])","children":[{"nodeName":"SerializeFromObject","simpleString":"SerializeFromObject [staticinvoke(class org.apache.spark.unsafe.types.UTF8String, StringType, fromString, input[0, java.lang.String, true], true, false) AS value#29]","children":[{"nodeName":"InputAdapter","simpleString":"InputAdapter","children":[{"nodeName":"MapPartitions","simpleString":"MapPartitions org.apache.spark.sql.Dataset$$Lambda$1321/872917583@67b99068, obj#28: java.lang.String","children":[{"nodeName":"DeserializeToObject","simpleString":"DeserializeToObject value#21.toString, obj#27: java.lang.String","children":[{"nodeName":"WholeStageCodegen (1)","simpleString":"WholeStageCodegen (1)","children":[{"nodeName":"Project","simpleString":"Project [cast(value#8 as string) AS value#21]","children":[{"nodeName":"Project","simpleString":"Project [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]","children":[{"nodeName":"InputAdapter","simpleString":"InputAdapter","children":[{"nodeName":"MicroBatchScan","simpleString":"MicroBatchScan[key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13] class org.apache.spark.sql.kafka010.KafkaSourceProvider$KafkaScan","children":[],"metadata":{},"metrics":[{"name":"number of output rows","accumulatorId":865,"metricType":"sum"}]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[{"name":"duration","accumulatorId":864,"metricType":"timing"}]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[{"name":"spill size","accumulatorId":861,"metricType":"size"},{"name":"time in aggregation build","accumulatorId":862,"metricType":"timing"},{"name":"peak memory","accumulatorId":860,"metricType":"size"},{"name":"number of output rows","accumulatorId":859,"metricType":"sum"},{"name":"avg hash probe bucket list iters","accumulatorId":863,"metricType":"average"}]}],"metadata":{},"metrics":[{"name":"spill size","accumulatorId":856,"metricType":"size"},{"name":"time in aggregation build","accumulatorId":857,"metricType":"timing"},{"name":"peak memory","accumulatorId":855,"metricType":"size"},{"name":"number of output rows","accumulatorId":854,"metricType":"sum"},{"name":"avg hash probe bucket list iters","accumulatorId":858,"metricType":"average"}]}],"metadata":{},"metrics":[{"name":"duration","accumulatorId":853,"metricType":"timing"}]}],"metadata":{},"metrics":[{"name":"shuffle records written","accumulatorId":805,"metricType":"sum"},{"name":"shuffle write time","accumulatorId":806,"metricType":"nsTiming"},{"name":"records read","accumulatorId":803,"metricType":"sum"},{"name":"local bytes read","accumulatorId":801,"metricType":"size"},{"name":"fetch wait time","accumulatorId":802,"metricType":"timing"},{"name":"remote bytes read","accumulatorId":799,"metricType":"size"},{"name":"local blocks read","accumulatorId":798,"metricType":"sum"},{"name":"remote blocks read","accumulatorId":797,"metricType":"sum"},{"name":"data size","accumulatorId":796,"metricType":"size"},{"name":"remote bytes read to disk","accumulatorId":800,"metricType":"size"},{"name":"shuffle bytes written","accumulatorId":804,"metricType":"size"}]}],"metadata":{},"metrics":[{"name":"number of output rows","accumulatorId":852,"metricType":"sum"}]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[{"name":"spill size","accumulatorId":849,"metricType":"size"},{"name":"time in aggregation build","accumulatorId":850,"metricType":"timing"},{"name":"peak memory","accumulatorId":848,"metricType":"size"},{"name":"number of output rows","accumulatorId":847,"metricType":"sum"},{"name":"avg hash probe bucket list iters","accumulatorId":851,"metricType":"average"}]}],"metadata":{},"metrics":[{"name":"duration","accumulatorId":846,"metricType":"timing"}]}],"metadata":{},"metrics":[{"name":"number of inputs which are later than watermark ('inputs' are relative to operators)","accumulatorId":836,"metricType":"sum"},{"name":"number of total state rows","accumulatorId":837,"metricType":"sum"},{"name":"memory used by state","accumulatorId":842,"metricType":"size"},{"name":"count of cache hit on states cache in provider","accumulatorId":844,"metricType":"sum"},{"name":"number of output rows","accumulatorId":835,"metricType":"sum"},{"name":"estimated size of state only on current version","accumulatorId":843,"metricType":"size"},{"name":"count of cache miss on states cache in provider","accumulatorId":845,"metricType":"sum"},{"name":"time to commit changes","accumulatorId":841,"metricType":"timing"},{"name":"time to remove","accumulatorId":840,"metricType":"timing"},{"name":"number of updated state rows","accumulatorId":838,"metricType":"sum"},{"name":"time to update","accumulatorId":839,"metricType":"timing"}]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[{"name":"spill size","accumulatorId":832,"metricType":"size"},{"name":"time in aggregation build","accumulatorId":833,"metricType":"timing"},{"name":"peak memory","accumulatorId":831,"metricType":"size"},{"name":"number of output rows","accumulatorId":830,"metricType":"sum"},{"name":"avg hash probe bucket list iters","accumulatorId":834,"metricType":"average"}]}],"metadata":{},"metrics":[{"name":"duration","accumulatorId":829,"metricType":"timing"}]}],"metadata":{},"metrics":[]},"time":1596020225211} +{"Event":"org.apache.spark.sql.execution.ui.SparkListenerSQLExecutionStart","executionId":16,"description":"\nid = 8d268dc2-bc9c-4be8-97a9-b135d2943028\nrunId = e225d92f-2545-48f8-87a2-9c0309580f8a\nbatch = 5","details":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","physicalPlanDescription":"== Physical Plan ==\nWriteToDataSourceV2 (14)\n+- * HashAggregate (13)\n +- StateStoreSave (12)\n +- * HashAggregate (11)\n +- StateStoreRestore (10)\n +- Exchange (9)\n +- * HashAggregate (8)\n +- * HashAggregate (7)\n +- * SerializeFromObject (6)\n +- MapPartitions (5)\n +- DeserializeToObject (4)\n +- * Project (3)\n +- * Project (2)\n +- MicroBatchScan (1)\n\n\n(1) MicroBatchScan\nOutput [7]: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]\nArguments: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13], org.apache.spark.sql.kafka010.KafkaSourceProvider$KafkaScan@7e7b182c, KafkaV2[Subscribe[test5]], {\"test5\":{\"0\":48799}}, {\"test5\":{\"0\":48837}}\n\n(2) Project [codegen id : 1]\nOutput [7]: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]\nInput [7]: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]\n\n(3) Project [codegen id : 1]\nOutput [1]: [cast(value#8 as string) AS value#21]\nInput [7]: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]\n\n(4) DeserializeToObject\nInput [1]: [value#21]\nArguments: value#21.toString, obj#27: java.lang.String\n\n(5) MapPartitions\nInput [1]: [obj#27]\nArguments: org.apache.spark.sql.Dataset$$Lambda$1321/872917583@67b99068, obj#28: java.lang.String\n\n(6) SerializeFromObject [codegen id : 2]\nInput [1]: [obj#28]\nArguments: [staticinvoke(class org.apache.spark.unsafe.types.UTF8String, StringType, fromString, input[0, java.lang.String, true], true, false) AS value#29]\n\n(7) HashAggregate [codegen id : 2]\nInput [1]: [value#29]\nKeys [1]: [value#29]\nFunctions [1]: [partial_count(1)]\nAggregate Attributes [1]: [count(1)#31L]\nResults [2]: [value#29, count#38L]\n\n(8) HashAggregate [codegen id : 2]\nInput [2]: [value#29, count#38L]\nKeys [1]: [value#29]\nFunctions [1]: [merge_count(1)]\nAggregate Attributes [1]: [count(1)#31L]\nResults [2]: [value#29, count#38L]\n\n(9) Exchange\nInput [2]: [value#29, count#38L]\nArguments: hashpartitioning(value#29, 2), true, [id=#1373]\n\n(10) StateStoreRestore\nInput [2]: [value#29, count#38L]\nArguments: [value#29], state info [ checkpoint = , runId = b800d96e-7584-4e8d-8df8-c9b901b7f2e2, opId = 0, ver = 0, numPartitions = 2], 2\n\n(11) HashAggregate [codegen id : 3]\nInput [2]: [value#29, count#38L]\nKeys [1]: [value#29]\nFunctions [1]: [merge_count(1)]\nAggregate Attributes [1]: [count(1)#31L]\nResults [2]: [value#29, count#38L]\n\n(12) StateStoreSave\nInput [2]: [value#29, count#38L]\nArguments: [value#29], state info [ checkpoint = , runId = b800d96e-7584-4e8d-8df8-c9b901b7f2e2, opId = 0, ver = 0, numPartitions = 2], Append, 0, 2\n\n(13) HashAggregate [codegen id : 4]\nInput [2]: [value#29, count#38L]\nKeys [1]: [value#29]\nFunctions [1]: [count(1)]\nAggregate Attributes [1]: [count(1)#31L]\nResults [2]: [value#29, count(1)#31L AS count#32L]\n\n(14) WriteToDataSourceV2\nInput [2]: [value#29, count#32L]\nArguments: org.apache.spark.sql.execution.streaming.sources.MicroBatchWrite@2c214312\n\n","sparkPlanInfo":{"nodeName":"WriteToDataSourceV2","simpleString":"WriteToDataSourceV2 org.apache.spark.sql.execution.streaming.sources.MicroBatchWrite@2c214312","children":[{"nodeName":"WholeStageCodegen (4)","simpleString":"WholeStageCodegen (4)","children":[{"nodeName":"HashAggregate","simpleString":"HashAggregate(keys=[value#29], functions=[count(1)])","children":[{"nodeName":"InputAdapter","simpleString":"InputAdapter","children":[{"nodeName":"StateStoreSave","simpleString":"StateStoreSave [value#29], state info [ checkpoint = file:/tmp/temporary-025d7997-5b66-4def-abbf-bdcca57312b9/state, runId = e225d92f-2545-48f8-87a2-9c0309580f8a, opId = 0, ver = 5, numPartitions = 2], Complete, 0, 2","children":[{"nodeName":"WholeStageCodegen (3)","simpleString":"WholeStageCodegen (3)","children":[{"nodeName":"HashAggregate","simpleString":"HashAggregate(keys=[value#29], functions=[merge_count(1)])","children":[{"nodeName":"InputAdapter","simpleString":"InputAdapter","children":[{"nodeName":"StateStoreRestore","simpleString":"StateStoreRestore [value#29], state info [ checkpoint = file:/tmp/temporary-025d7997-5b66-4def-abbf-bdcca57312b9/state, runId = e225d92f-2545-48f8-87a2-9c0309580f8a, opId = 0, ver = 5, numPartitions = 2], 2","children":[{"nodeName":"Exchange","simpleString":"Exchange hashpartitioning(value#29, 2), true, [id=#1221]","children":[{"nodeName":"WholeStageCodegen (2)","simpleString":"WholeStageCodegen (2)","children":[{"nodeName":"HashAggregate","simpleString":"HashAggregate(keys=[value#29], functions=[merge_count(1)])","children":[{"nodeName":"HashAggregate","simpleString":"HashAggregate(keys=[value#29], functions=[partial_count(1)])","children":[{"nodeName":"SerializeFromObject","simpleString":"SerializeFromObject [staticinvoke(class org.apache.spark.unsafe.types.UTF8String, StringType, fromString, input[0, java.lang.String, true], true, false) AS value#29]","children":[{"nodeName":"InputAdapter","simpleString":"InputAdapter","children":[{"nodeName":"MapPartitions","simpleString":"MapPartitions org.apache.spark.sql.Dataset$$Lambda$1321/872917583@67b99068, obj#28: java.lang.String","children":[{"nodeName":"DeserializeToObject","simpleString":"DeserializeToObject value#21.toString, obj#27: java.lang.String","children":[{"nodeName":"WholeStageCodegen (1)","simpleString":"WholeStageCodegen (1)","children":[{"nodeName":"Project","simpleString":"Project [cast(value#8 as string) AS value#21]","children":[{"nodeName":"Project","simpleString":"Project [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]","children":[{"nodeName":"InputAdapter","simpleString":"InputAdapter","children":[{"nodeName":"MicroBatchScan","simpleString":"MicroBatchScan[key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13] class org.apache.spark.sql.kafka010.KafkaSourceProvider$KafkaScan","children":[],"metadata":{},"metrics":[{"name":"number of output rows","accumulatorId":865,"metricType":"sum"}]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[{"name":"duration","accumulatorId":864,"metricType":"timing"}]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[{"name":"spill size","accumulatorId":861,"metricType":"size"},{"name":"time in aggregation build","accumulatorId":862,"metricType":"timing"},{"name":"peak memory","accumulatorId":860,"metricType":"size"},{"name":"number of output rows","accumulatorId":859,"metricType":"sum"},{"name":"avg hash probe bucket list iters","accumulatorId":863,"metricType":"average"}]}],"metadata":{},"metrics":[{"name":"spill size","accumulatorId":856,"metricType":"size"},{"name":"time in aggregation build","accumulatorId":857,"metricType":"timing"},{"name":"peak memory","accumulatorId":855,"metricType":"size"},{"name":"number of output rows","accumulatorId":854,"metricType":"sum"},{"name":"avg hash probe bucket list iters","accumulatorId":858,"metricType":"average"}]}],"metadata":{},"metrics":[{"name":"duration","accumulatorId":853,"metricType":"timing"}]}],"metadata":{},"metrics":[{"name":"shuffle records written","accumulatorId":805,"metricType":"sum"},{"name":"shuffle write time","accumulatorId":806,"metricType":"nsTiming"},{"name":"records read","accumulatorId":803,"metricType":"sum"},{"name":"local bytes read","accumulatorId":801,"metricType":"size"},{"name":"fetch wait time","accumulatorId":802,"metricType":"timing"},{"name":"remote bytes read","accumulatorId":799,"metricType":"size"},{"name":"local blocks read","accumulatorId":798,"metricType":"sum"},{"name":"remote blocks read","accumulatorId":797,"metricType":"sum"},{"name":"data size","accumulatorId":796,"metricType":"size"},{"name":"remote bytes read to disk","accumulatorId":800,"metricType":"size"},{"name":"shuffle bytes written","accumulatorId":804,"metricType":"size"}]}],"metadata":{},"metrics":[{"name":"number of output rows","accumulatorId":852,"metricType":"sum"}]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[{"name":"spill size","accumulatorId":849,"metricType":"size"},{"name":"time in aggregation build","accumulatorId":850,"metricType":"timing"},{"name":"peak memory","accumulatorId":848,"metricType":"size"},{"name":"number of output rows","accumulatorId":847,"metricType":"sum"},{"name":"avg hash probe bucket list iters","accumulatorId":851,"metricType":"average"}]}],"metadata":{},"metrics":[{"name":"duration","accumulatorId":846,"metricType":"timing"}]}],"metadata":{},"metrics":[{"name":"number of inputs which are later than watermark ('inputs' are relative to operators)","accumulatorId":836,"metricType":"sum"},{"name":"number of total state rows","accumulatorId":837,"metricType":"sum"},{"name":"memory used by state","accumulatorId":842,"metricType":"size"},{"name":"count of cache hit on states cache in provider","accumulatorId":844,"metricType":"sum"},{"name":"number of output rows","accumulatorId":835,"metricType":"sum"},{"name":"estimated size of state only on current version","accumulatorId":843,"metricType":"size"},{"name":"count of cache miss on states cache in provider","accumulatorId":845,"metricType":"sum"},{"name":"time to commit changes","accumulatorId":841,"metricType":"timing"},{"name":"time to remove","accumulatorId":840,"metricType":"timing"},{"name":"number of updated state rows","accumulatorId":838,"metricType":"sum"},{"name":"time to update","accumulatorId":839,"metricType":"timing"}]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[{"name":"spill size","accumulatorId":832,"metricType":"size"},{"name":"time in aggregation build","accumulatorId":833,"metricType":"timing"},{"name":"peak memory","accumulatorId":831,"metricType":"size"},{"name":"number of output rows","accumulatorId":830,"metricType":"sum"},{"name":"avg hash probe bucket list iters","accumulatorId":834,"metricType":"average"}]}],"metadata":{},"metrics":[{"name":"duration","accumulatorId":829,"metricType":"timing"}]}],"metadata":{},"metrics":[]},"time":1596020225270} +{"Event":"SparkListenerJobStart","Job ID":5,"Submission Time":1596020225342,"Stage Infos":[{"Stage ID":10,"Stage Attempt ID":0,"Stage Name":"start at StructuredKafkaWordCount.scala:86","Number of Tasks":1,"RDD Info":[{"RDD ID":66,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"173\",\"name\":\"Exchange\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[65],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":62,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"181\",\"name\":\"WholeStageCodegen (1)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[61],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":64,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"179\",\"name\":\"MapPartitions\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[63],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":61,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"185\",\"name\":\"MicroBatchScan\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[60],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":65,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"174\",\"name\":\"WholeStageCodegen (2)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[64],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":60,"Name":"DataSourceRDD","Scope":"{\"id\":\"185\",\"name\":\"MicroBatchScan\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":63,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"180\",\"name\":\"DeserializeToObject\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[62],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0}],"Parent IDs":[],"Details":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","Accumulables":[],"Resource Profile Id":0},{"Stage ID":11,"Stage Attempt ID":0,"Stage Name":"start at StructuredKafkaWordCount.scala:86","Number of Tasks":2,"RDD Info":[{"RDD ID":71,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"165\",\"name\":\"WholeStageCodegen (4)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[70],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":70,"Name":"StateStoreRDD","Scope":"{\"id\":\"168\",\"name\":\"StateStoreSave\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[69],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":69,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"169\",\"name\":\"WholeStageCodegen (3)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[68],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":67,"Name":"ShuffledRowRDD","Scope":"{\"id\":\"173\",\"name\":\"Exchange\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[66],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":68,"Name":"StateStoreRDD","Scope":"{\"id\":\"172\",\"name\":\"StateStoreRestore\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[67],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0}],"Parent IDs":[10],"Details":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","Accumulables":[],"Resource Profile Id":0}],"Stage IDs":[10,11],"Properties":{"sql.streaming.queryId":"8d268dc2-bc9c-4be8-97a9-b135d2943028","spark.driver.host":"iZbp19vpr16ix621sdw476Z","spark.eventLog.enabled":"true","spark.sql.adaptive.enabled":"false","spark.job.interruptOnCancel":"true","spark.driver.port":"46309","__fetch_continuous_blocks_in_batch_enabled":"true","spark.jars":"file:/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/./examples/jars/spark-examples_2.12-3.1.0-SNAPSHOT.jar","__is_continuous_processing":"false","spark.app.name":"StructuredKafkaWordCount","callSite.long":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","callSite.short":"start at StructuredKafkaWordCount.scala:86","spark.submit.pyFiles":"","spark.job.description":"\nid = 8d268dc2-bc9c-4be8-97a9-b135d2943028\nrunId = e225d92f-2545-48f8-87a2-9c0309580f8a\nbatch = 5","spark.executor.id":"driver","spark.sql.cbo.enabled":"false","streaming.sql.batchId":"5","spark.jobGroup.id":"e225d92f-2545-48f8-87a2-9c0309580f8a","spark.submit.deployMode":"client","spark.master":"local[*]","spark.eventLog.dir":"/tmp/spark-history","spark.sql.execution.id":"16","spark.app.id":"local-1596020211915","spark.sql.shuffle.partitions":"2"}} +{"Event":"SparkListenerStageSubmitted","Stage Info":{"Stage ID":10,"Stage Attempt ID":0,"Stage Name":"start at StructuredKafkaWordCount.scala:86","Number of Tasks":1,"RDD Info":[{"RDD ID":66,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"173\",\"name\":\"Exchange\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[65],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":62,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"181\",\"name\":\"WholeStageCodegen (1)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[61],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":64,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"179\",\"name\":\"MapPartitions\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[63],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":61,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"185\",\"name\":\"MicroBatchScan\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[60],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":65,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"174\",\"name\":\"WholeStageCodegen (2)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[64],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":60,"Name":"DataSourceRDD","Scope":"{\"id\":\"185\",\"name\":\"MicroBatchScan\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":63,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"180\",\"name\":\"DeserializeToObject\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[62],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0}],"Parent IDs":[],"Details":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","Submission Time":1596020225343,"Accumulables":[],"Resource Profile Id":0},"Properties":{"sql.streaming.queryId":"8d268dc2-bc9c-4be8-97a9-b135d2943028","spark.driver.host":"iZbp19vpr16ix621sdw476Z","spark.eventLog.enabled":"true","spark.sql.adaptive.enabled":"false","spark.job.interruptOnCancel":"true","spark.driver.port":"46309","__fetch_continuous_blocks_in_batch_enabled":"true","spark.jars":"file:/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/./examples/jars/spark-examples_2.12-3.1.0-SNAPSHOT.jar","__is_continuous_processing":"false","spark.app.name":"StructuredKafkaWordCount","callSite.long":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","callSite.short":"start at StructuredKafkaWordCount.scala:86","spark.submit.pyFiles":"","spark.job.description":"\nid = 8d268dc2-bc9c-4be8-97a9-b135d2943028\nrunId = e225d92f-2545-48f8-87a2-9c0309580f8a\nbatch = 5","spark.executor.id":"driver","spark.sql.cbo.enabled":"false","streaming.sql.batchId":"5","spark.jobGroup.id":"e225d92f-2545-48f8-87a2-9c0309580f8a","spark.submit.deployMode":"client","spark.master":"local[*]","spark.eventLog.dir":"/tmp/spark-history","spark.sql.execution.id":"16","spark.app.id":"local-1596020211915","spark.sql.shuffle.partitions":"2"}} +{"Event":"SparkListenerTaskStart","Stage ID":10,"Stage Attempt ID":0,"Task Info":{"Task ID":15,"Index":0,"Attempt":0,"Launch Time":1596020225359,"Executor ID":"driver","Host":"iZbp19vpr16ix621sdw476Z","Locality":"PROCESS_LOCAL","Speculative":false,"Getting Result Time":0,"Finish Time":0,"Failed":false,"Killed":false,"Accumulables":[]}} +{"Event":"SparkListenerTaskEnd","Stage ID":10,"Stage Attempt ID":0,"Task Type":"ShuffleMapTask","Task End Reason":{"Reason":"Success"},"Task Info":{"Task ID":15,"Index":0,"Attempt":0,"Launch Time":1596020225359,"Executor ID":"driver","Host":"iZbp19vpr16ix621sdw476Z","Locality":"PROCESS_LOCAL","Speculative":false,"Getting Result Time":0,"Finish Time":1596020225400,"Failed":false,"Killed":false,"Accumulables":[{"ID":806,"Name":"shuffle write time","Update":"530930","Value":"530930","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":805,"Name":"shuffle records written","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":804,"Name":"shuffle bytes written","Update":"168","Value":"168","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":796,"Name":"data size","Update":"128","Value":"128","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":853,"Name":"duration","Update":"21","Value":"21","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":854,"Name":"number of output rows","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":855,"Name":"peak memory","Update":"262144","Value":"262144","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":857,"Name":"time in aggregation build","Update":"14","Value":"14","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":859,"Name":"number of output rows","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":860,"Name":"peak memory","Update":"262144","Value":"262144","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":862,"Name":"time in aggregation build","Update":"9","Value":"9","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":864,"Name":"duration","Update":"21","Value":"21","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":865,"Name":"number of output rows","Update":"38","Value":"38","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":910,"Name":"internal.metrics.input.recordsRead","Update":38,"Value":38,"Internal":true,"Count Failed Values":true},{"ID":908,"Name":"internal.metrics.shuffle.write.writeTime","Update":530930,"Value":530930,"Internal":true,"Count Failed Values":true},{"ID":907,"Name":"internal.metrics.shuffle.write.recordsWritten","Update":1,"Value":1,"Internal":true,"Count Failed Values":true},{"ID":906,"Name":"internal.metrics.shuffle.write.bytesWritten","Update":168,"Value":168,"Internal":true,"Count Failed Values":true},{"ID":897,"Name":"internal.metrics.peakExecutionMemory","Update":524288,"Value":524288,"Internal":true,"Count Failed Values":true},{"ID":892,"Name":"internal.metrics.resultSize","Update":2544,"Value":2544,"Internal":true,"Count Failed Values":true},{"ID":891,"Name":"internal.metrics.executorCpuTime","Update":22440089,"Value":22440089,"Internal":true,"Count Failed Values":true},{"ID":890,"Name":"internal.metrics.executorRunTime","Update":29,"Value":29,"Internal":true,"Count Failed Values":true},{"ID":889,"Name":"internal.metrics.executorDeserializeCpuTime","Update":6808170,"Value":6808170,"Internal":true,"Count Failed Values":true},{"ID":888,"Name":"internal.metrics.executorDeserializeTime","Update":6,"Value":6,"Internal":true,"Count Failed Values":true}]},"Task Executor Metrics":{"JVMHeapMemory":0,"JVMOffHeapMemory":0,"OnHeapExecutionMemory":0,"OffHeapExecutionMemory":0,"OnHeapStorageMemory":0,"OffHeapStorageMemory":0,"OnHeapUnifiedMemory":0,"OffHeapUnifiedMemory":0,"DirectPoolMemory":0,"MappedPoolMemory":0,"ProcessTreeJVMVMemory":0,"ProcessTreeJVMRSSMemory":0,"ProcessTreePythonVMemory":0,"ProcessTreePythonRSSMemory":0,"ProcessTreeOtherVMemory":0,"ProcessTreeOtherRSSMemory":0,"MinorGCCount":0,"MinorGCTime":0,"MajorGCCount":0,"MajorGCTime":0},"Task Metrics":{"Executor Deserialize Time":6,"Executor Deserialize CPU Time":6808170,"Executor Run Time":29,"Executor CPU Time":22440089,"Peak Execution Memory":524288,"Result Size":2544,"JVM GC Time":0,"Result Serialization Time":0,"Memory Bytes Spilled":0,"Disk Bytes Spilled":0,"Shuffle Read Metrics":{"Remote Blocks Fetched":0,"Local Blocks Fetched":0,"Fetch Wait Time":0,"Remote Bytes Read":0,"Remote Bytes Read To Disk":0,"Local Bytes Read":0,"Total Records Read":0},"Shuffle Write Metrics":{"Shuffle Bytes Written":168,"Shuffle Write Time":530930,"Shuffle Records Written":1},"Input Metrics":{"Bytes Read":0,"Records Read":38},"Output Metrics":{"Bytes Written":0,"Records Written":0},"Updated Blocks":[]}} +{"Event":"SparkListenerStageCompleted","Stage Info":{"Stage ID":10,"Stage Attempt ID":0,"Stage Name":"start at StructuredKafkaWordCount.scala:86","Number of Tasks":1,"RDD Info":[{"RDD ID":66,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"173\",\"name\":\"Exchange\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[65],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":62,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"181\",\"name\":\"WholeStageCodegen (1)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[61],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":64,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"179\",\"name\":\"MapPartitions\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[63],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":61,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"185\",\"name\":\"MicroBatchScan\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[60],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":65,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"174\",\"name\":\"WholeStageCodegen (2)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[64],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":60,"Name":"DataSourceRDD","Scope":"{\"id\":\"185\",\"name\":\"MicroBatchScan\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":63,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"180\",\"name\":\"DeserializeToObject\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[62],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0}],"Parent IDs":[],"Details":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","Submission Time":1596020225343,"Completion Time":1596020225401,"Accumulables":[{"ID":855,"Name":"peak memory","Value":"262144","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":891,"Name":"internal.metrics.executorCpuTime","Value":22440089,"Internal":true,"Count Failed Values":true},{"ID":864,"Name":"duration","Value":"21","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":804,"Name":"shuffle bytes written","Value":"168","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":908,"Name":"internal.metrics.shuffle.write.writeTime","Value":530930,"Internal":true,"Count Failed Values":true},{"ID":890,"Name":"internal.metrics.executorRunTime","Value":29,"Internal":true,"Count Failed Values":true},{"ID":857,"Name":"time in aggregation build","Value":"14","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":860,"Name":"peak memory","Value":"262144","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":806,"Name":"shuffle write time","Value":"530930","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":854,"Name":"number of output rows","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":853,"Name":"duration","Value":"21","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":862,"Name":"time in aggregation build","Value":"9","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":889,"Name":"internal.metrics.executorDeserializeCpuTime","Value":6808170,"Internal":true,"Count Failed Values":true},{"ID":907,"Name":"internal.metrics.shuffle.write.recordsWritten","Value":1,"Internal":true,"Count Failed Values":true},{"ID":892,"Name":"internal.metrics.resultSize","Value":2544,"Internal":true,"Count Failed Values":true},{"ID":910,"Name":"internal.metrics.input.recordsRead","Value":38,"Internal":true,"Count Failed Values":true},{"ID":865,"Name":"number of output rows","Value":"38","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":805,"Name":"shuffle records written","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":796,"Name":"data size","Value":"128","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":859,"Name":"number of output rows","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":888,"Name":"internal.metrics.executorDeserializeTime","Value":6,"Internal":true,"Count Failed Values":true},{"ID":897,"Name":"internal.metrics.peakExecutionMemory","Value":524288,"Internal":true,"Count Failed Values":true},{"ID":906,"Name":"internal.metrics.shuffle.write.bytesWritten","Value":168,"Internal":true,"Count Failed Values":true}],"Resource Profile Id":0}} +{"Event":"SparkListenerStageSubmitted","Stage Info":{"Stage ID":11,"Stage Attempt ID":0,"Stage Name":"start at StructuredKafkaWordCount.scala:86","Number of Tasks":2,"RDD Info":[{"RDD ID":71,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"165\",\"name\":\"WholeStageCodegen (4)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[70],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":70,"Name":"StateStoreRDD","Scope":"{\"id\":\"168\",\"name\":\"StateStoreSave\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[69],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":69,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"169\",\"name\":\"WholeStageCodegen (3)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[68],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":67,"Name":"ShuffledRowRDD","Scope":"{\"id\":\"173\",\"name\":\"Exchange\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[66],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":68,"Name":"StateStoreRDD","Scope":"{\"id\":\"172\",\"name\":\"StateStoreRestore\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[67],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0}],"Parent IDs":[10],"Details":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","Submission Time":1596020225410,"Accumulables":[],"Resource Profile Id":0},"Properties":{"sql.streaming.queryId":"8d268dc2-bc9c-4be8-97a9-b135d2943028","spark.driver.host":"iZbp19vpr16ix621sdw476Z","spark.eventLog.enabled":"true","spark.sql.adaptive.enabled":"false","spark.job.interruptOnCancel":"true","spark.driver.port":"46309","__fetch_continuous_blocks_in_batch_enabled":"true","spark.jars":"file:/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/./examples/jars/spark-examples_2.12-3.1.0-SNAPSHOT.jar","__is_continuous_processing":"false","spark.app.name":"StructuredKafkaWordCount","callSite.long":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","callSite.short":"start at StructuredKafkaWordCount.scala:86","spark.submit.pyFiles":"","spark.job.description":"\nid = 8d268dc2-bc9c-4be8-97a9-b135d2943028\nrunId = e225d92f-2545-48f8-87a2-9c0309580f8a\nbatch = 5","spark.executor.id":"driver","spark.sql.cbo.enabled":"false","streaming.sql.batchId":"5","spark.jobGroup.id":"e225d92f-2545-48f8-87a2-9c0309580f8a","spark.submit.deployMode":"client","spark.master":"local[*]","spark.eventLog.dir":"/tmp/spark-history","spark.sql.execution.id":"16","spark.app.id":"local-1596020211915","spark.sql.shuffle.partitions":"2"}} +{"Event":"SparkListenerTaskStart","Stage ID":11,"Stage Attempt ID":0,"Task Info":{"Task ID":16,"Index":0,"Attempt":0,"Launch Time":1596020225417,"Executor ID":"driver","Host":"iZbp19vpr16ix621sdw476Z","Locality":"PROCESS_LOCAL","Speculative":false,"Getting Result Time":0,"Finish Time":0,"Failed":false,"Killed":false,"Accumulables":[]}} +{"Event":"SparkListenerTaskStart","Stage ID":11,"Stage Attempt ID":0,"Task Info":{"Task ID":17,"Index":1,"Attempt":0,"Launch Time":1596020225417,"Executor ID":"driver","Host":"iZbp19vpr16ix621sdw476Z","Locality":"PROCESS_LOCAL","Speculative":false,"Getting Result Time":0,"Finish Time":0,"Failed":false,"Killed":false,"Accumulables":[]}} +{"Event":"SparkListenerTaskEnd","Stage ID":11,"Stage Attempt ID":0,"Task Type":"ResultTask","Task End Reason":{"Reason":"Success"},"Task Info":{"Task ID":17,"Index":1,"Attempt":0,"Launch Time":1596020225417,"Executor ID":"driver","Host":"iZbp19vpr16ix621sdw476Z","Locality":"PROCESS_LOCAL","Speculative":false,"Getting Result Time":0,"Finish Time":1596020225498,"Failed":false,"Killed":false,"Accumulables":[{"ID":829,"Name":"duration","Update":"3","Value":"3","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":834,"Name":"avg hash probe bucket list iters","Update":"10","Value":"10","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":830,"Name":"number of output rows","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":831,"Name":"peak memory","Update":"4456448","Value":"4456448","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":833,"Name":"time in aggregation build","Update":"0","Value":"0","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":839,"Name":"time to update","Update":"11","Value":"11","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":838,"Name":"number of updated state rows","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":840,"Name":"time to remove","Update":"0","Value":"0","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":841,"Name":"time to commit changes","Update":"37","Value":"37","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":843,"Name":"estimated size of state only on current version","Update":"368","Value":"368","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":835,"Name":"number of output rows","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":844,"Name":"count of cache hit on states cache in provider","Update":"10","Value":"10","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":842,"Name":"memory used by state","Update":"784","Value":"784","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":837,"Name":"number of total state rows","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":846,"Name":"duration","Update":"11","Value":"11","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":847,"Name":"number of output rows","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":848,"Name":"peak memory","Update":"262144","Value":"262144","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":850,"Name":"time in aggregation build","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":852,"Name":"number of output rows","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":798,"Name":"local blocks read","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":802,"Name":"fetch wait time","Update":"0","Value":"0","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":801,"Name":"local bytes read","Update":"168","Value":"168","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":803,"Name":"records read","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":930,"Name":"internal.metrics.shuffle.read.recordsRead","Update":1,"Value":1,"Internal":true,"Count Failed Values":true},{"ID":929,"Name":"internal.metrics.shuffle.read.fetchWaitTime","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":928,"Name":"internal.metrics.shuffle.read.localBytesRead","Update":168,"Value":168,"Internal":true,"Count Failed Values":true},{"ID":927,"Name":"internal.metrics.shuffle.read.remoteBytesReadToDisk","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":926,"Name":"internal.metrics.shuffle.read.remoteBytesRead","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":925,"Name":"internal.metrics.shuffle.read.localBlocksFetched","Update":1,"Value":1,"Internal":true,"Count Failed Values":true},{"ID":924,"Name":"internal.metrics.shuffle.read.remoteBlocksFetched","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":922,"Name":"internal.metrics.peakExecutionMemory","Update":4718592,"Value":4718592,"Internal":true,"Count Failed Values":true},{"ID":917,"Name":"internal.metrics.resultSize","Update":5574,"Value":5574,"Internal":true,"Count Failed Values":true},{"ID":916,"Name":"internal.metrics.executorCpuTime","Update":17945299,"Value":17945299,"Internal":true,"Count Failed Values":true},{"ID":915,"Name":"internal.metrics.executorRunTime","Update":68,"Value":68,"Internal":true,"Count Failed Values":true},{"ID":914,"Name":"internal.metrics.executorDeserializeCpuTime","Update":3451032,"Value":3451032,"Internal":true,"Count Failed Values":true},{"ID":913,"Name":"internal.metrics.executorDeserializeTime","Update":3,"Value":3,"Internal":true,"Count Failed Values":true}]},"Task Executor Metrics":{"JVMHeapMemory":0,"JVMOffHeapMemory":0,"OnHeapExecutionMemory":0,"OffHeapExecutionMemory":0,"OnHeapStorageMemory":0,"OffHeapStorageMemory":0,"OnHeapUnifiedMemory":0,"OffHeapUnifiedMemory":0,"DirectPoolMemory":0,"MappedPoolMemory":0,"ProcessTreeJVMVMemory":0,"ProcessTreeJVMRSSMemory":0,"ProcessTreePythonVMemory":0,"ProcessTreePythonRSSMemory":0,"ProcessTreeOtherVMemory":0,"ProcessTreeOtherRSSMemory":0,"MinorGCCount":0,"MinorGCTime":0,"MajorGCCount":0,"MajorGCTime":0},"Task Metrics":{"Executor Deserialize Time":3,"Executor Deserialize CPU Time":3451032,"Executor Run Time":68,"Executor CPU Time":17945299,"Peak Execution Memory":4718592,"Result Size":5574,"JVM GC Time":0,"Result Serialization Time":0,"Memory Bytes Spilled":0,"Disk Bytes Spilled":0,"Shuffle Read Metrics":{"Remote Blocks Fetched":0,"Local Blocks Fetched":1,"Fetch Wait Time":0,"Remote Bytes Read":0,"Remote Bytes Read To Disk":0,"Local Bytes Read":168,"Total Records Read":1},"Shuffle Write Metrics":{"Shuffle Bytes Written":0,"Shuffle Write Time":0,"Shuffle Records Written":0},"Input Metrics":{"Bytes Read":0,"Records Read":0},"Output Metrics":{"Bytes Written":0,"Records Written":0},"Updated Blocks":[]}} +{"Event":"SparkListenerTaskEnd","Stage ID":11,"Stage Attempt ID":0,"Task Type":"ResultTask","Task End Reason":{"Reason":"Success"},"Task Info":{"Task ID":16,"Index":0,"Attempt":0,"Launch Time":1596020225417,"Executor ID":"driver","Host":"iZbp19vpr16ix621sdw476Z","Locality":"PROCESS_LOCAL","Speculative":false,"Getting Result Time":0,"Finish Time":1596020225509,"Failed":false,"Killed":false,"Accumulables":[{"ID":829,"Name":"duration","Update":"2","Value":"5","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":831,"Name":"peak memory","Update":"262144","Value":"4718592","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":833,"Name":"time in aggregation build","Update":"0","Value":"0","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":839,"Name":"time to update","Update":"4","Value":"15","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":840,"Name":"time to remove","Update":"0","Value":"0","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":841,"Name":"time to commit changes","Update":"50","Value":"87","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":843,"Name":"estimated size of state only on current version","Update":"88","Value":"456","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":844,"Name":"count of cache hit on states cache in provider","Update":"10","Value":"20","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":842,"Name":"memory used by state","Update":"400","Value":"1184","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":846,"Name":"duration","Update":"4","Value":"15","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":848,"Name":"peak memory","Update":"262144","Value":"524288","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":850,"Name":"time in aggregation build","Update":"0","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":930,"Name":"internal.metrics.shuffle.read.recordsRead","Update":0,"Value":1,"Internal":true,"Count Failed Values":true},{"ID":929,"Name":"internal.metrics.shuffle.read.fetchWaitTime","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":928,"Name":"internal.metrics.shuffle.read.localBytesRead","Update":0,"Value":168,"Internal":true,"Count Failed Values":true},{"ID":927,"Name":"internal.metrics.shuffle.read.remoteBytesReadToDisk","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":926,"Name":"internal.metrics.shuffle.read.remoteBytesRead","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":925,"Name":"internal.metrics.shuffle.read.localBlocksFetched","Update":0,"Value":1,"Internal":true,"Count Failed Values":true},{"ID":924,"Name":"internal.metrics.shuffle.read.remoteBlocksFetched","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":922,"Name":"internal.metrics.peakExecutionMemory","Update":524288,"Value":5242880,"Internal":true,"Count Failed Values":true},{"ID":917,"Name":"internal.metrics.resultSize","Update":5311,"Value":10885,"Internal":true,"Count Failed Values":true},{"ID":916,"Name":"internal.metrics.executorCpuTime","Update":15599091,"Value":33544390,"Internal":true,"Count Failed Values":true},{"ID":915,"Name":"internal.metrics.executorRunTime","Update":84,"Value":152,"Internal":true,"Count Failed Values":true},{"ID":914,"Name":"internal.metrics.executorDeserializeCpuTime","Update":4357806,"Value":7808838,"Internal":true,"Count Failed Values":true},{"ID":913,"Name":"internal.metrics.executorDeserializeTime","Update":4,"Value":7,"Internal":true,"Count Failed Values":true}]},"Task Executor Metrics":{"JVMHeapMemory":0,"JVMOffHeapMemory":0,"OnHeapExecutionMemory":0,"OffHeapExecutionMemory":0,"OnHeapStorageMemory":0,"OffHeapStorageMemory":0,"OnHeapUnifiedMemory":0,"OffHeapUnifiedMemory":0,"DirectPoolMemory":0,"MappedPoolMemory":0,"ProcessTreeJVMVMemory":0,"ProcessTreeJVMRSSMemory":0,"ProcessTreePythonVMemory":0,"ProcessTreePythonRSSMemory":0,"ProcessTreeOtherVMemory":0,"ProcessTreeOtherRSSMemory":0,"MinorGCCount":0,"MinorGCTime":0,"MajorGCCount":0,"MajorGCTime":0},"Task Metrics":{"Executor Deserialize Time":4,"Executor Deserialize CPU Time":4357806,"Executor Run Time":84,"Executor CPU Time":15599091,"Peak Execution Memory":524288,"Result Size":5311,"JVM GC Time":0,"Result Serialization Time":0,"Memory Bytes Spilled":0,"Disk Bytes Spilled":0,"Shuffle Read Metrics":{"Remote Blocks Fetched":0,"Local Blocks Fetched":0,"Fetch Wait Time":0,"Remote Bytes Read":0,"Remote Bytes Read To Disk":0,"Local Bytes Read":0,"Total Records Read":0},"Shuffle Write Metrics":{"Shuffle Bytes Written":0,"Shuffle Write Time":0,"Shuffle Records Written":0},"Input Metrics":{"Bytes Read":0,"Records Read":0},"Output Metrics":{"Bytes Written":0,"Records Written":0},"Updated Blocks":[]}} +{"Event":"SparkListenerStageCompleted","Stage Info":{"Stage ID":11,"Stage Attempt ID":0,"Stage Name":"start at StructuredKafkaWordCount.scala:86","Number of Tasks":2,"RDD Info":[{"RDD ID":71,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"165\",\"name\":\"WholeStageCodegen (4)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[70],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":70,"Name":"StateStoreRDD","Scope":"{\"id\":\"168\",\"name\":\"StateStoreSave\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[69],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":69,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"169\",\"name\":\"WholeStageCodegen (3)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[68],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":67,"Name":"ShuffledRowRDD","Scope":"{\"id\":\"173\",\"name\":\"Exchange\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[66],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":68,"Name":"StateStoreRDD","Scope":"{\"id\":\"172\",\"name\":\"StateStoreRestore\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[67],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0}],"Parent IDs":[10],"Details":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","Submission Time":1596020225410,"Completion Time":1596020225514,"Accumulables":[{"ID":846,"Name":"duration","Value":"15","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":837,"Name":"number of total state rows","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":801,"Name":"local bytes read","Value":"168","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":831,"Name":"peak memory","Value":"4718592","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":926,"Name":"internal.metrics.shuffle.read.remoteBytesRead","Value":0,"Internal":true,"Count Failed Values":true},{"ID":917,"Name":"internal.metrics.resultSize","Value":10885,"Internal":true,"Count Failed Values":true},{"ID":830,"Name":"number of output rows","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":848,"Name":"peak memory","Value":"524288","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":803,"Name":"records read","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":839,"Name":"time to update","Value":"15","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":929,"Name":"internal.metrics.shuffle.read.fetchWaitTime","Value":0,"Internal":true,"Count Failed Values":true},{"ID":833,"Name":"time in aggregation build","Value":"0","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":842,"Name":"memory used by state","Value":"1184","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":914,"Name":"internal.metrics.executorDeserializeCpuTime","Value":7808838,"Internal":true,"Count Failed Values":true},{"ID":922,"Name":"internal.metrics.peakExecutionMemory","Value":5242880,"Internal":true,"Count Failed Values":true},{"ID":913,"Name":"internal.metrics.executorDeserializeTime","Value":7,"Internal":true,"Count Failed Values":true},{"ID":925,"Name":"internal.metrics.shuffle.read.localBlocksFetched","Value":1,"Internal":true,"Count Failed Values":true},{"ID":844,"Name":"count of cache hit on states cache in provider","Value":"20","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":835,"Name":"number of output rows","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":916,"Name":"internal.metrics.executorCpuTime","Value":33544390,"Internal":true,"Count Failed Values":true},{"ID":829,"Name":"duration","Value":"5","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":928,"Name":"internal.metrics.shuffle.read.localBytesRead","Value":168,"Internal":true,"Count Failed Values":true},{"ID":802,"Name":"fetch wait time","Value":"0","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":838,"Name":"number of updated state rows","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":847,"Name":"number of output rows","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":850,"Name":"time in aggregation build","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":841,"Name":"time to commit changes","Value":"87","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":927,"Name":"internal.metrics.shuffle.read.remoteBytesReadToDisk","Value":0,"Internal":true,"Count Failed Values":true},{"ID":930,"Name":"internal.metrics.shuffle.read.recordsRead","Value":1,"Internal":true,"Count Failed Values":true},{"ID":840,"Name":"time to remove","Value":"0","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":834,"Name":"avg hash probe bucket list iters","Value":"10","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":852,"Name":"number of output rows","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":798,"Name":"local blocks read","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":843,"Name":"estimated size of state only on current version","Value":"456","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":915,"Name":"internal.metrics.executorRunTime","Value":152,"Internal":true,"Count Failed Values":true},{"ID":924,"Name":"internal.metrics.shuffle.read.remoteBlocksFetched","Value":0,"Internal":true,"Count Failed Values":true}],"Resource Profile Id":0}} +{"Event":"SparkListenerJobEnd","Job ID":5,"Completion Time":1596020225514,"Job Result":{"Result":"JobSucceeded"}} +{"Event":"org.apache.spark.sql.execution.ui.SparkListenerSQLExecutionStart","executionId":17,"description":"\nid = 8d268dc2-bc9c-4be8-97a9-b135d2943028\nrunId = e225d92f-2545-48f8-87a2-9c0309580f8a\nbatch = 5","details":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","physicalPlanDescription":"== Physical Plan ==\nLocalTableScan (1)\n\n\n(1) LocalTableScan\nOutput [2]: [value#116, count#117]\nArguments: [value#116, count#117]\n\n","sparkPlanInfo":{"nodeName":"LocalTableScan","simpleString":"LocalTableScan [value#116, count#117]","children":[],"metadata":{},"metrics":[{"name":"number of output rows","accumulatorId":938,"metricType":"sum"}]},"time":1596020225536} +{"Event":"org.apache.spark.sql.execution.ui.SparkListenerSQLExecutionEnd","executionId":17,"time":1596020225541} +{"Event":"org.apache.spark.sql.execution.ui.SparkListenerSQLExecutionEnd","executionId":16,"time":1596020225542} +{"Event":"org.apache.spark.sql.execution.ui.SparkListenerSQLExecutionEnd","executionId":15,"time":1596020225542} +{"Event":"org.apache.spark.sql.streaming.StreamingQueryListener$QueryProgressEvent","progress":{"id":"8d268dc2-bc9c-4be8-97a9-b135d2943028","runId":"e225d92f-2545-48f8-87a2-9c0309580f8a","name":null,"timestamp":"2020-07-29T10:57:05.123Z","batchId":5,"batchDuration":437,"durationMs":{"triggerExecution":437,"queryPlanning":35,"getBatch":1,"latestOffset":3,"addBatch":361,"walCommit":18},"eventTime":{},"stateOperators":[{"numRowsTotal":1,"numRowsUpdated":1,"memoryUsedBytes":1184,"numLateInputs":0,"customMetrics":{"stateOnCurrentVersionSizeBytes":456,"loadedMapCacheHitCount":20,"loadedMapCacheMissCount":0}}],"sources":[{"description":"KafkaV2[Subscribe[test5]]","startOffset":"{\"test5\":{\"0\":48799}}","endOffset":"{\"test5\":{\"0\":48837}}","numInputRows":38,"inputRowsPerSecond":97.68637532133675,"processedRowsPerSecond":86.95652173913044}],"sink":{"description":"org.apache.spark.sql.execution.streaming.ConsoleTable$@514ba885","numOutputRows":1},"observedMetrics":{}}} +{"Event":"org.apache.spark.sql.execution.ui.SparkListenerSQLExecutionStart","executionId":18,"description":"\nid = 8d268dc2-bc9c-4be8-97a9-b135d2943028\nrunId = e225d92f-2545-48f8-87a2-9c0309580f8a\nbatch = 6","details":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","physicalPlanDescription":"== Physical Plan ==\nWriteToDataSourceV2 (14)\n+- * HashAggregate (13)\n +- StateStoreSave (12)\n +- * HashAggregate (11)\n +- StateStoreRestore (10)\n +- Exchange (9)\n +- * HashAggregate (8)\n +- * HashAggregate (7)\n +- * SerializeFromObject (6)\n +- MapPartitions (5)\n +- DeserializeToObject (4)\n +- * Project (3)\n +- * Project (2)\n +- MicroBatchScan (1)\n\n\n(1) MicroBatchScan\nOutput [7]: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]\nArguments: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13], org.apache.spark.sql.kafka010.KafkaSourceProvider$KafkaScan@7e7b182c, KafkaV2[Subscribe[test5]], {\"test5\":{\"0\":48837}}, {\"test5\":{\"0\":48881}}\n\n(2) Project [codegen id : 1]\nOutput [7]: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]\nInput [7]: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]\n\n(3) Project [codegen id : 1]\nOutput [1]: [cast(value#8 as string) AS value#21]\nInput [7]: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]\n\n(4) DeserializeToObject\nInput [1]: [value#21]\nArguments: value#21.toString, obj#27: java.lang.String\n\n(5) MapPartitions\nInput [1]: [obj#27]\nArguments: org.apache.spark.sql.Dataset$$Lambda$1321/872917583@67b99068, obj#28: java.lang.String\n\n(6) SerializeFromObject [codegen id : 2]\nInput [1]: [obj#28]\nArguments: [staticinvoke(class org.apache.spark.unsafe.types.UTF8String, StringType, fromString, input[0, java.lang.String, true], true, false) AS value#29]\n\n(7) HashAggregate [codegen id : 2]\nInput [1]: [value#29]\nKeys [1]: [value#29]\nFunctions [1]: [partial_count(1)]\nAggregate Attributes [1]: [count(1)#31L]\nResults [2]: [value#29, count#38L]\n\n(8) HashAggregate [codegen id : 2]\nInput [2]: [value#29, count#38L]\nKeys [1]: [value#29]\nFunctions [1]: [merge_count(1)]\nAggregate Attributes [1]: [count(1)#31L]\nResults [2]: [value#29, count#38L]\n\n(9) Exchange\nInput [2]: [value#29, count#38L]\nArguments: hashpartitioning(value#29, 2), true, [id=#1528]\n\n(10) StateStoreRestore\nInput [2]: [value#29, count#38L]\nArguments: [value#29], state info [ checkpoint = , runId = 6a12c2d9-8d02-4241-93fc-f53da01bb454, opId = 0, ver = 0, numPartitions = 2], 2\n\n(11) HashAggregate [codegen id : 3]\nInput [2]: [value#29, count#38L]\nKeys [1]: [value#29]\nFunctions [1]: [merge_count(1)]\nAggregate Attributes [1]: [count(1)#31L]\nResults [2]: [value#29, count#38L]\n\n(12) StateStoreSave\nInput [2]: [value#29, count#38L]\nArguments: [value#29], state info [ checkpoint = , runId = 6a12c2d9-8d02-4241-93fc-f53da01bb454, opId = 0, ver = 0, numPartitions = 2], Append, 0, 2\n\n(13) HashAggregate [codegen id : 4]\nInput [2]: [value#29, count#38L]\nKeys [1]: [value#29]\nFunctions [1]: [count(1)]\nAggregate Attributes [1]: [count(1)#31L]\nResults [2]: [value#29, count(1)#31L AS count#32L]\n\n(14) WriteToDataSourceV2\nInput [2]: [value#29, count#32L]\nArguments: org.apache.spark.sql.execution.streaming.sources.MicroBatchWrite@27ec018d\n\n","sparkPlanInfo":{"nodeName":"WriteToDataSourceV2","simpleString":"WriteToDataSourceV2 org.apache.spark.sql.execution.streaming.sources.MicroBatchWrite@27ec018d","children":[{"nodeName":"WholeStageCodegen (4)","simpleString":"WholeStageCodegen (4)","children":[{"nodeName":"HashAggregate","simpleString":"HashAggregate(keys=[value#29], functions=[count(1)])","children":[{"nodeName":"InputAdapter","simpleString":"InputAdapter","children":[{"nodeName":"StateStoreSave","simpleString":"StateStoreSave [value#29], state info [ checkpoint = file:/tmp/temporary-025d7997-5b66-4def-abbf-bdcca57312b9/state, runId = e225d92f-2545-48f8-87a2-9c0309580f8a, opId = 0, ver = 6, numPartitions = 2], Complete, 0, 2","children":[{"nodeName":"WholeStageCodegen (3)","simpleString":"WholeStageCodegen (3)","children":[{"nodeName":"HashAggregate","simpleString":"HashAggregate(keys=[value#29], functions=[merge_count(1)])","children":[{"nodeName":"InputAdapter","simpleString":"InputAdapter","children":[{"nodeName":"StateStoreRestore","simpleString":"StateStoreRestore [value#29], state info [ checkpoint = file:/tmp/temporary-025d7997-5b66-4def-abbf-bdcca57312b9/state, runId = e225d92f-2545-48f8-87a2-9c0309580f8a, opId = 0, ver = 6, numPartitions = 2], 2","children":[{"nodeName":"Exchange","simpleString":"Exchange hashpartitioning(value#29, 2), true, [id=#1452]","children":[{"nodeName":"WholeStageCodegen (2)","simpleString":"WholeStageCodegen (2)","children":[{"nodeName":"HashAggregate","simpleString":"HashAggregate(keys=[value#29], functions=[merge_count(1)])","children":[{"nodeName":"HashAggregate","simpleString":"HashAggregate(keys=[value#29], functions=[partial_count(1)])","children":[{"nodeName":"SerializeFromObject","simpleString":"SerializeFromObject [staticinvoke(class org.apache.spark.unsafe.types.UTF8String, StringType, fromString, input[0, java.lang.String, true], true, false) AS value#29]","children":[{"nodeName":"InputAdapter","simpleString":"InputAdapter","children":[{"nodeName":"MapPartitions","simpleString":"MapPartitions org.apache.spark.sql.Dataset$$Lambda$1321/872917583@67b99068, obj#28: java.lang.String","children":[{"nodeName":"DeserializeToObject","simpleString":"DeserializeToObject value#21.toString, obj#27: java.lang.String","children":[{"nodeName":"WholeStageCodegen (1)","simpleString":"WholeStageCodegen (1)","children":[{"nodeName":"Project","simpleString":"Project [cast(value#8 as string) AS value#21]","children":[{"nodeName":"Project","simpleString":"Project [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]","children":[{"nodeName":"InputAdapter","simpleString":"InputAdapter","children":[{"nodeName":"MicroBatchScan","simpleString":"MicroBatchScan[key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13] class org.apache.spark.sql.kafka010.KafkaSourceProvider$KafkaScan","children":[],"metadata":{},"metrics":[{"name":"number of output rows","accumulatorId":1022,"metricType":"sum"}]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[{"name":"duration","accumulatorId":1021,"metricType":"timing"}]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[{"name":"spill size","accumulatorId":1018,"metricType":"size"},{"name":"time in aggregation build","accumulatorId":1019,"metricType":"timing"},{"name":"peak memory","accumulatorId":1017,"metricType":"size"},{"name":"number of output rows","accumulatorId":1016,"metricType":"sum"},{"name":"avg hash probe bucket list iters","accumulatorId":1020,"metricType":"average"}]}],"metadata":{},"metrics":[{"name":"spill size","accumulatorId":1013,"metricType":"size"},{"name":"time in aggregation build","accumulatorId":1014,"metricType":"timing"},{"name":"peak memory","accumulatorId":1012,"metricType":"size"},{"name":"number of output rows","accumulatorId":1011,"metricType":"sum"},{"name":"avg hash probe bucket list iters","accumulatorId":1015,"metricType":"average"}]}],"metadata":{},"metrics":[{"name":"duration","accumulatorId":1010,"metricType":"timing"}]}],"metadata":{},"metrics":[{"name":"shuffle records written","accumulatorId":962,"metricType":"sum"},{"name":"shuffle write time","accumulatorId":963,"metricType":"nsTiming"},{"name":"records read","accumulatorId":960,"metricType":"sum"},{"name":"local bytes read","accumulatorId":958,"metricType":"size"},{"name":"fetch wait time","accumulatorId":959,"metricType":"timing"},{"name":"remote bytes read","accumulatorId":956,"metricType":"size"},{"name":"local blocks read","accumulatorId":955,"metricType":"sum"},{"name":"remote blocks read","accumulatorId":954,"metricType":"sum"},{"name":"data size","accumulatorId":953,"metricType":"size"},{"name":"remote bytes read to disk","accumulatorId":957,"metricType":"size"},{"name":"shuffle bytes written","accumulatorId":961,"metricType":"size"}]}],"metadata":{},"metrics":[{"name":"number of output rows","accumulatorId":1009,"metricType":"sum"}]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[{"name":"spill size","accumulatorId":1006,"metricType":"size"},{"name":"time in aggregation build","accumulatorId":1007,"metricType":"timing"},{"name":"peak memory","accumulatorId":1005,"metricType":"size"},{"name":"number of output rows","accumulatorId":1004,"metricType":"sum"},{"name":"avg hash probe bucket list iters","accumulatorId":1008,"metricType":"average"}]}],"metadata":{},"metrics":[{"name":"duration","accumulatorId":1003,"metricType":"timing"}]}],"metadata":{},"metrics":[{"name":"number of inputs which are later than watermark ('inputs' are relative to operators)","accumulatorId":993,"metricType":"sum"},{"name":"number of total state rows","accumulatorId":994,"metricType":"sum"},{"name":"memory used by state","accumulatorId":999,"metricType":"size"},{"name":"count of cache hit on states cache in provider","accumulatorId":1001,"metricType":"sum"},{"name":"number of output rows","accumulatorId":992,"metricType":"sum"},{"name":"estimated size of state only on current version","accumulatorId":1000,"metricType":"size"},{"name":"count of cache miss on states cache in provider","accumulatorId":1002,"metricType":"sum"},{"name":"time to commit changes","accumulatorId":998,"metricType":"timing"},{"name":"time to remove","accumulatorId":997,"metricType":"timing"},{"name":"number of updated state rows","accumulatorId":995,"metricType":"sum"},{"name":"time to update","accumulatorId":996,"metricType":"timing"}]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[{"name":"spill size","accumulatorId":989,"metricType":"size"},{"name":"time in aggregation build","accumulatorId":990,"metricType":"timing"},{"name":"peak memory","accumulatorId":988,"metricType":"size"},{"name":"number of output rows","accumulatorId":987,"metricType":"sum"},{"name":"avg hash probe bucket list iters","accumulatorId":991,"metricType":"average"}]}],"metadata":{},"metrics":[{"name":"duration","accumulatorId":986,"metricType":"timing"}]}],"metadata":{},"metrics":[]},"time":1596020225657} +{"Event":"org.apache.spark.sql.execution.ui.SparkListenerSQLExecutionStart","executionId":19,"description":"\nid = 8d268dc2-bc9c-4be8-97a9-b135d2943028\nrunId = e225d92f-2545-48f8-87a2-9c0309580f8a\nbatch = 6","details":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","physicalPlanDescription":"== Physical Plan ==\nWriteToDataSourceV2 (14)\n+- * HashAggregate (13)\n +- StateStoreSave (12)\n +- * HashAggregate (11)\n +- StateStoreRestore (10)\n +- Exchange (9)\n +- * HashAggregate (8)\n +- * HashAggregate (7)\n +- * SerializeFromObject (6)\n +- MapPartitions (5)\n +- DeserializeToObject (4)\n +- * Project (3)\n +- * Project (2)\n +- MicroBatchScan (1)\n\n\n(1) MicroBatchScan\nOutput [7]: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]\nArguments: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13], org.apache.spark.sql.kafka010.KafkaSourceProvider$KafkaScan@7e7b182c, KafkaV2[Subscribe[test5]], {\"test5\":{\"0\":48837}}, {\"test5\":{\"0\":48881}}\n\n(2) Project [codegen id : 1]\nOutput [7]: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]\nInput [7]: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]\n\n(3) Project [codegen id : 1]\nOutput [1]: [cast(value#8 as string) AS value#21]\nInput [7]: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]\n\n(4) DeserializeToObject\nInput [1]: [value#21]\nArguments: value#21.toString, obj#27: java.lang.String\n\n(5) MapPartitions\nInput [1]: [obj#27]\nArguments: org.apache.spark.sql.Dataset$$Lambda$1321/872917583@67b99068, obj#28: java.lang.String\n\n(6) SerializeFromObject [codegen id : 2]\nInput [1]: [obj#28]\nArguments: [staticinvoke(class org.apache.spark.unsafe.types.UTF8String, StringType, fromString, input[0, java.lang.String, true], true, false) AS value#29]\n\n(7) HashAggregate [codegen id : 2]\nInput [1]: [value#29]\nKeys [1]: [value#29]\nFunctions [1]: [partial_count(1)]\nAggregate Attributes [1]: [count(1)#31L]\nResults [2]: [value#29, count#38L]\n\n(8) HashAggregate [codegen id : 2]\nInput [2]: [value#29, count#38L]\nKeys [1]: [value#29]\nFunctions [1]: [merge_count(1)]\nAggregate Attributes [1]: [count(1)#31L]\nResults [2]: [value#29, count#38L]\n\n(9) Exchange\nInput [2]: [value#29, count#38L]\nArguments: hashpartitioning(value#29, 2), true, [id=#1604]\n\n(10) StateStoreRestore\nInput [2]: [value#29, count#38L]\nArguments: [value#29], state info [ checkpoint = , runId = 96456757-8d0b-46da-a006-9fe2cb6fc936, opId = 0, ver = 0, numPartitions = 2], 2\n\n(11) HashAggregate [codegen id : 3]\nInput [2]: [value#29, count#38L]\nKeys [1]: [value#29]\nFunctions [1]: [merge_count(1)]\nAggregate Attributes [1]: [count(1)#31L]\nResults [2]: [value#29, count#38L]\n\n(12) StateStoreSave\nInput [2]: [value#29, count#38L]\nArguments: [value#29], state info [ checkpoint = , runId = 96456757-8d0b-46da-a006-9fe2cb6fc936, opId = 0, ver = 0, numPartitions = 2], Append, 0, 2\n\n(13) HashAggregate [codegen id : 4]\nInput [2]: [value#29, count#38L]\nKeys [1]: [value#29]\nFunctions [1]: [count(1)]\nAggregate Attributes [1]: [count(1)#31L]\nResults [2]: [value#29, count(1)#31L AS count#32L]\n\n(14) WriteToDataSourceV2\nInput [2]: [value#29, count#32L]\nArguments: org.apache.spark.sql.execution.streaming.sources.MicroBatchWrite@27ec018d\n\n","sparkPlanInfo":{"nodeName":"WriteToDataSourceV2","simpleString":"WriteToDataSourceV2 org.apache.spark.sql.execution.streaming.sources.MicroBatchWrite@27ec018d","children":[{"nodeName":"WholeStageCodegen (4)","simpleString":"WholeStageCodegen (4)","children":[{"nodeName":"HashAggregate","simpleString":"HashAggregate(keys=[value#29], functions=[count(1)])","children":[{"nodeName":"InputAdapter","simpleString":"InputAdapter","children":[{"nodeName":"StateStoreSave","simpleString":"StateStoreSave [value#29], state info [ checkpoint = file:/tmp/temporary-025d7997-5b66-4def-abbf-bdcca57312b9/state, runId = e225d92f-2545-48f8-87a2-9c0309580f8a, opId = 0, ver = 6, numPartitions = 2], Complete, 0, 2","children":[{"nodeName":"WholeStageCodegen (3)","simpleString":"WholeStageCodegen (3)","children":[{"nodeName":"HashAggregate","simpleString":"HashAggregate(keys=[value#29], functions=[merge_count(1)])","children":[{"nodeName":"InputAdapter","simpleString":"InputAdapter","children":[{"nodeName":"StateStoreRestore","simpleString":"StateStoreRestore [value#29], state info [ checkpoint = file:/tmp/temporary-025d7997-5b66-4def-abbf-bdcca57312b9/state, runId = e225d92f-2545-48f8-87a2-9c0309580f8a, opId = 0, ver = 6, numPartitions = 2], 2","children":[{"nodeName":"Exchange","simpleString":"Exchange hashpartitioning(value#29, 2), true, [id=#1452]","children":[{"nodeName":"WholeStageCodegen (2)","simpleString":"WholeStageCodegen (2)","children":[{"nodeName":"HashAggregate","simpleString":"HashAggregate(keys=[value#29], functions=[merge_count(1)])","children":[{"nodeName":"HashAggregate","simpleString":"HashAggregate(keys=[value#29], functions=[partial_count(1)])","children":[{"nodeName":"SerializeFromObject","simpleString":"SerializeFromObject [staticinvoke(class org.apache.spark.unsafe.types.UTF8String, StringType, fromString, input[0, java.lang.String, true], true, false) AS value#29]","children":[{"nodeName":"InputAdapter","simpleString":"InputAdapter","children":[{"nodeName":"MapPartitions","simpleString":"MapPartitions org.apache.spark.sql.Dataset$$Lambda$1321/872917583@67b99068, obj#28: java.lang.String","children":[{"nodeName":"DeserializeToObject","simpleString":"DeserializeToObject value#21.toString, obj#27: java.lang.String","children":[{"nodeName":"WholeStageCodegen (1)","simpleString":"WholeStageCodegen (1)","children":[{"nodeName":"Project","simpleString":"Project [cast(value#8 as string) AS value#21]","children":[{"nodeName":"Project","simpleString":"Project [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]","children":[{"nodeName":"InputAdapter","simpleString":"InputAdapter","children":[{"nodeName":"MicroBatchScan","simpleString":"MicroBatchScan[key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13] class org.apache.spark.sql.kafka010.KafkaSourceProvider$KafkaScan","children":[],"metadata":{},"metrics":[{"name":"number of output rows","accumulatorId":1022,"metricType":"sum"}]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[{"name":"duration","accumulatorId":1021,"metricType":"timing"}]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[{"name":"spill size","accumulatorId":1018,"metricType":"size"},{"name":"time in aggregation build","accumulatorId":1019,"metricType":"timing"},{"name":"peak memory","accumulatorId":1017,"metricType":"size"},{"name":"number of output rows","accumulatorId":1016,"metricType":"sum"},{"name":"avg hash probe bucket list iters","accumulatorId":1020,"metricType":"average"}]}],"metadata":{},"metrics":[{"name":"spill size","accumulatorId":1013,"metricType":"size"},{"name":"time in aggregation build","accumulatorId":1014,"metricType":"timing"},{"name":"peak memory","accumulatorId":1012,"metricType":"size"},{"name":"number of output rows","accumulatorId":1011,"metricType":"sum"},{"name":"avg hash probe bucket list iters","accumulatorId":1015,"metricType":"average"}]}],"metadata":{},"metrics":[{"name":"duration","accumulatorId":1010,"metricType":"timing"}]}],"metadata":{},"metrics":[{"name":"shuffle records written","accumulatorId":962,"metricType":"sum"},{"name":"shuffle write time","accumulatorId":963,"metricType":"nsTiming"},{"name":"records read","accumulatorId":960,"metricType":"sum"},{"name":"local bytes read","accumulatorId":958,"metricType":"size"},{"name":"fetch wait time","accumulatorId":959,"metricType":"timing"},{"name":"remote bytes read","accumulatorId":956,"metricType":"size"},{"name":"local blocks read","accumulatorId":955,"metricType":"sum"},{"name":"remote blocks read","accumulatorId":954,"metricType":"sum"},{"name":"data size","accumulatorId":953,"metricType":"size"},{"name":"remote bytes read to disk","accumulatorId":957,"metricType":"size"},{"name":"shuffle bytes written","accumulatorId":961,"metricType":"size"}]}],"metadata":{},"metrics":[{"name":"number of output rows","accumulatorId":1009,"metricType":"sum"}]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[{"name":"spill size","accumulatorId":1006,"metricType":"size"},{"name":"time in aggregation build","accumulatorId":1007,"metricType":"timing"},{"name":"peak memory","accumulatorId":1005,"metricType":"size"},{"name":"number of output rows","accumulatorId":1004,"metricType":"sum"},{"name":"avg hash probe bucket list iters","accumulatorId":1008,"metricType":"average"}]}],"metadata":{},"metrics":[{"name":"duration","accumulatorId":1003,"metricType":"timing"}]}],"metadata":{},"metrics":[{"name":"number of inputs which are later than watermark ('inputs' are relative to operators)","accumulatorId":993,"metricType":"sum"},{"name":"number of total state rows","accumulatorId":994,"metricType":"sum"},{"name":"memory used by state","accumulatorId":999,"metricType":"size"},{"name":"count of cache hit on states cache in provider","accumulatorId":1001,"metricType":"sum"},{"name":"number of output rows","accumulatorId":992,"metricType":"sum"},{"name":"estimated size of state only on current version","accumulatorId":1000,"metricType":"size"},{"name":"count of cache miss on states cache in provider","accumulatorId":1002,"metricType":"sum"},{"name":"time to commit changes","accumulatorId":998,"metricType":"timing"},{"name":"time to remove","accumulatorId":997,"metricType":"timing"},{"name":"number of updated state rows","accumulatorId":995,"metricType":"sum"},{"name":"time to update","accumulatorId":996,"metricType":"timing"}]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[{"name":"spill size","accumulatorId":989,"metricType":"size"},{"name":"time in aggregation build","accumulatorId":990,"metricType":"timing"},{"name":"peak memory","accumulatorId":988,"metricType":"size"},{"name":"number of output rows","accumulatorId":987,"metricType":"sum"},{"name":"avg hash probe bucket list iters","accumulatorId":991,"metricType":"average"}]}],"metadata":{},"metrics":[{"name":"duration","accumulatorId":986,"metricType":"timing"}]}],"metadata":{},"metrics":[]},"time":1596020225687} +{"Event":"SparkListenerJobStart","Job ID":6,"Submission Time":1596020225759,"Stage Infos":[{"Stage ID":12,"Stage Attempt ID":0,"Stage Name":"start at StructuredKafkaWordCount.scala:86","Number of Tasks":1,"RDD Info":[{"RDD ID":78,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"206\",\"name\":\"Exchange\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[77],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":75,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"213\",\"name\":\"DeserializeToObject\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[74],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":74,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"214\",\"name\":\"WholeStageCodegen (1)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[73],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":77,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"207\",\"name\":\"WholeStageCodegen (2)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[76],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":72,"Name":"DataSourceRDD","Scope":"{\"id\":\"218\",\"name\":\"MicroBatchScan\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":73,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"218\",\"name\":\"MicroBatchScan\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[72],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":76,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"212\",\"name\":\"MapPartitions\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[75],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0}],"Parent IDs":[],"Details":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","Accumulables":[],"Resource Profile Id":0},{"Stage ID":13,"Stage Attempt ID":0,"Stage Name":"start at StructuredKafkaWordCount.scala:86","Number of Tasks":2,"RDD Info":[{"RDD ID":83,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"198\",\"name\":\"WholeStageCodegen (4)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[82],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":81,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"202\",\"name\":\"WholeStageCodegen (3)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[80],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":79,"Name":"ShuffledRowRDD","Scope":"{\"id\":\"206\",\"name\":\"Exchange\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[78],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":80,"Name":"StateStoreRDD","Scope":"{\"id\":\"205\",\"name\":\"StateStoreRestore\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[79],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":82,"Name":"StateStoreRDD","Scope":"{\"id\":\"201\",\"name\":\"StateStoreSave\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[81],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0}],"Parent IDs":[12],"Details":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","Accumulables":[],"Resource Profile Id":0}],"Stage IDs":[12,13],"Properties":{"sql.streaming.queryId":"8d268dc2-bc9c-4be8-97a9-b135d2943028","spark.driver.host":"iZbp19vpr16ix621sdw476Z","spark.eventLog.enabled":"true","spark.sql.adaptive.enabled":"false","spark.job.interruptOnCancel":"true","spark.driver.port":"46309","__fetch_continuous_blocks_in_batch_enabled":"true","spark.jars":"file:/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/./examples/jars/spark-examples_2.12-3.1.0-SNAPSHOT.jar","__is_continuous_processing":"false","spark.app.name":"StructuredKafkaWordCount","callSite.long":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","callSite.short":"start at StructuredKafkaWordCount.scala:86","spark.submit.pyFiles":"","spark.job.description":"\nid = 8d268dc2-bc9c-4be8-97a9-b135d2943028\nrunId = e225d92f-2545-48f8-87a2-9c0309580f8a\nbatch = 6","spark.executor.id":"driver","spark.sql.cbo.enabled":"false","streaming.sql.batchId":"6","spark.jobGroup.id":"e225d92f-2545-48f8-87a2-9c0309580f8a","spark.submit.deployMode":"client","spark.master":"local[*]","spark.eventLog.dir":"/tmp/spark-history","spark.sql.execution.id":"19","spark.app.id":"local-1596020211915","spark.sql.shuffle.partitions":"2"}} +{"Event":"SparkListenerStageSubmitted","Stage Info":{"Stage ID":12,"Stage Attempt ID":0,"Stage Name":"start at StructuredKafkaWordCount.scala:86","Number of Tasks":1,"RDD Info":[{"RDD ID":78,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"206\",\"name\":\"Exchange\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[77],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":75,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"213\",\"name\":\"DeserializeToObject\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[74],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":74,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"214\",\"name\":\"WholeStageCodegen (1)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[73],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":77,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"207\",\"name\":\"WholeStageCodegen (2)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[76],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":72,"Name":"DataSourceRDD","Scope":"{\"id\":\"218\",\"name\":\"MicroBatchScan\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":73,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"218\",\"name\":\"MicroBatchScan\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[72],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":76,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"212\",\"name\":\"MapPartitions\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[75],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0}],"Parent IDs":[],"Details":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","Submission Time":1596020225760,"Accumulables":[],"Resource Profile Id":0},"Properties":{"sql.streaming.queryId":"8d268dc2-bc9c-4be8-97a9-b135d2943028","spark.driver.host":"iZbp19vpr16ix621sdw476Z","spark.eventLog.enabled":"true","spark.sql.adaptive.enabled":"false","spark.job.interruptOnCancel":"true","spark.driver.port":"46309","__fetch_continuous_blocks_in_batch_enabled":"true","spark.jars":"file:/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/./examples/jars/spark-examples_2.12-3.1.0-SNAPSHOT.jar","__is_continuous_processing":"false","spark.app.name":"StructuredKafkaWordCount","callSite.long":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","callSite.short":"start at StructuredKafkaWordCount.scala:86","spark.submit.pyFiles":"","spark.job.description":"\nid = 8d268dc2-bc9c-4be8-97a9-b135d2943028\nrunId = e225d92f-2545-48f8-87a2-9c0309580f8a\nbatch = 6","spark.executor.id":"driver","spark.sql.cbo.enabled":"false","streaming.sql.batchId":"6","spark.jobGroup.id":"e225d92f-2545-48f8-87a2-9c0309580f8a","spark.submit.deployMode":"client","spark.master":"local[*]","spark.eventLog.dir":"/tmp/spark-history","spark.sql.execution.id":"19","spark.app.id":"local-1596020211915","spark.sql.shuffle.partitions":"2"}} +{"Event":"SparkListenerTaskStart","Stage ID":12,"Stage Attempt ID":0,"Task Info":{"Task ID":18,"Index":0,"Attempt":0,"Launch Time":1596020225766,"Executor ID":"driver","Host":"iZbp19vpr16ix621sdw476Z","Locality":"PROCESS_LOCAL","Speculative":false,"Getting Result Time":0,"Finish Time":0,"Failed":false,"Killed":false,"Accumulables":[]}} +{"Event":"SparkListenerTaskEnd","Stage ID":12,"Stage Attempt ID":0,"Task Type":"ShuffleMapTask","Task End Reason":{"Reason":"Success"},"Task Info":{"Task ID":18,"Index":0,"Attempt":0,"Launch Time":1596020225766,"Executor ID":"driver","Host":"iZbp19vpr16ix621sdw476Z","Locality":"PROCESS_LOCAL","Speculative":false,"Getting Result Time":0,"Finish Time":1596020225796,"Failed":false,"Killed":false,"Accumulables":[{"ID":963,"Name":"shuffle write time","Update":"543836","Value":"543836","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":962,"Name":"shuffle records written","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":961,"Name":"shuffle bytes written","Update":"168","Value":"168","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":953,"Name":"data size","Update":"128","Value":"128","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1010,"Name":"duration","Update":"17","Value":"17","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1011,"Name":"number of output rows","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1012,"Name":"peak memory","Update":"262144","Value":"262144","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1014,"Name":"time in aggregation build","Update":"11","Value":"11","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1016,"Name":"number of output rows","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1017,"Name":"peak memory","Update":"262144","Value":"262144","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1019,"Name":"time in aggregation build","Update":"8","Value":"8","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1021,"Name":"duration","Update":"17","Value":"17","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1022,"Name":"number of output rows","Update":"44","Value":"44","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1067,"Name":"internal.metrics.input.recordsRead","Update":44,"Value":44,"Internal":true,"Count Failed Values":true},{"ID":1065,"Name":"internal.metrics.shuffle.write.writeTime","Update":543836,"Value":543836,"Internal":true,"Count Failed Values":true},{"ID":1064,"Name":"internal.metrics.shuffle.write.recordsWritten","Update":1,"Value":1,"Internal":true,"Count Failed Values":true},{"ID":1063,"Name":"internal.metrics.shuffle.write.bytesWritten","Update":168,"Value":168,"Internal":true,"Count Failed Values":true},{"ID":1054,"Name":"internal.metrics.peakExecutionMemory","Update":524288,"Value":524288,"Internal":true,"Count Failed Values":true},{"ID":1049,"Name":"internal.metrics.resultSize","Update":2544,"Value":2544,"Internal":true,"Count Failed Values":true},{"ID":1048,"Name":"internal.metrics.executorCpuTime","Update":23733439,"Value":23733439,"Internal":true,"Count Failed Values":true},{"ID":1047,"Name":"internal.metrics.executorRunTime","Update":23,"Value":23,"Internal":true,"Count Failed Values":true},{"ID":1046,"Name":"internal.metrics.executorDeserializeCpuTime","Update":3714406,"Value":3714406,"Internal":true,"Count Failed Values":true},{"ID":1045,"Name":"internal.metrics.executorDeserializeTime","Update":3,"Value":3,"Internal":true,"Count Failed Values":true}]},"Task Executor Metrics":{"JVMHeapMemory":0,"JVMOffHeapMemory":0,"OnHeapExecutionMemory":0,"OffHeapExecutionMemory":0,"OnHeapStorageMemory":0,"OffHeapStorageMemory":0,"OnHeapUnifiedMemory":0,"OffHeapUnifiedMemory":0,"DirectPoolMemory":0,"MappedPoolMemory":0,"ProcessTreeJVMVMemory":0,"ProcessTreeJVMRSSMemory":0,"ProcessTreePythonVMemory":0,"ProcessTreePythonRSSMemory":0,"ProcessTreeOtherVMemory":0,"ProcessTreeOtherRSSMemory":0,"MinorGCCount":0,"MinorGCTime":0,"MajorGCCount":0,"MajorGCTime":0},"Task Metrics":{"Executor Deserialize Time":3,"Executor Deserialize CPU Time":3714406,"Executor Run Time":23,"Executor CPU Time":23733439,"Peak Execution Memory":524288,"Result Size":2544,"JVM GC Time":0,"Result Serialization Time":0,"Memory Bytes Spilled":0,"Disk Bytes Spilled":0,"Shuffle Read Metrics":{"Remote Blocks Fetched":0,"Local Blocks Fetched":0,"Fetch Wait Time":0,"Remote Bytes Read":0,"Remote Bytes Read To Disk":0,"Local Bytes Read":0,"Total Records Read":0},"Shuffle Write Metrics":{"Shuffle Bytes Written":168,"Shuffle Write Time":543836,"Shuffle Records Written":1},"Input Metrics":{"Bytes Read":0,"Records Read":44},"Output Metrics":{"Bytes Written":0,"Records Written":0},"Updated Blocks":[]}} +{"Event":"SparkListenerStageCompleted","Stage Info":{"Stage ID":12,"Stage Attempt ID":0,"Stage Name":"start at StructuredKafkaWordCount.scala:86","Number of Tasks":1,"RDD Info":[{"RDD ID":78,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"206\",\"name\":\"Exchange\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[77],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":75,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"213\",\"name\":\"DeserializeToObject\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[74],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":74,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"214\",\"name\":\"WholeStageCodegen (1)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[73],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":77,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"207\",\"name\":\"WholeStageCodegen (2)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[76],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":72,"Name":"DataSourceRDD","Scope":"{\"id\":\"218\",\"name\":\"MicroBatchScan\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":73,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"218\",\"name\":\"MicroBatchScan\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[72],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":76,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"212\",\"name\":\"MapPartitions\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[75],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0}],"Parent IDs":[],"Details":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","Submission Time":1596020225760,"Completion Time":1596020225797,"Accumulables":[{"ID":1064,"Name":"internal.metrics.shuffle.write.recordsWritten","Value":1,"Internal":true,"Count Failed Values":true},{"ID":1010,"Name":"duration","Value":"17","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1046,"Name":"internal.metrics.executorDeserializeCpuTime","Value":3714406,"Internal":true,"Count Failed Values":true},{"ID":1019,"Name":"time in aggregation build","Value":"8","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1067,"Name":"internal.metrics.input.recordsRead","Value":44,"Internal":true,"Count Failed Values":true},{"ID":1022,"Name":"number of output rows","Value":"44","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1049,"Name":"internal.metrics.resultSize","Value":2544,"Internal":true,"Count Failed Values":true},{"ID":1016,"Name":"number of output rows","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":962,"Name":"shuffle records written","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":953,"Name":"data size","Value":"128","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1054,"Name":"internal.metrics.peakExecutionMemory","Value":524288,"Internal":true,"Count Failed Values":true},{"ID":1045,"Name":"internal.metrics.executorDeserializeTime","Value":3,"Internal":true,"Count Failed Values":true},{"ID":1063,"Name":"internal.metrics.shuffle.write.bytesWritten","Value":168,"Internal":true,"Count Failed Values":true},{"ID":1048,"Name":"internal.metrics.executorCpuTime","Value":23733439,"Internal":true,"Count Failed Values":true},{"ID":1012,"Name":"peak memory","Value":"262144","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1021,"Name":"duration","Value":"17","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":961,"Name":"shuffle bytes written","Value":"168","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1065,"Name":"internal.metrics.shuffle.write.writeTime","Value":543836,"Internal":true,"Count Failed Values":true},{"ID":1047,"Name":"internal.metrics.executorRunTime","Value":23,"Internal":true,"Count Failed Values":true},{"ID":1014,"Name":"time in aggregation build","Value":"11","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":963,"Name":"shuffle write time","Value":"543836","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1017,"Name":"peak memory","Value":"262144","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1011,"Name":"number of output rows","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"}],"Resource Profile Id":0}} +{"Event":"SparkListenerStageSubmitted","Stage Info":{"Stage ID":13,"Stage Attempt ID":0,"Stage Name":"start at StructuredKafkaWordCount.scala:86","Number of Tasks":2,"RDD Info":[{"RDD ID":83,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"198\",\"name\":\"WholeStageCodegen (4)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[82],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":81,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"202\",\"name\":\"WholeStageCodegen (3)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[80],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":79,"Name":"ShuffledRowRDD","Scope":"{\"id\":\"206\",\"name\":\"Exchange\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[78],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":80,"Name":"StateStoreRDD","Scope":"{\"id\":\"205\",\"name\":\"StateStoreRestore\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[79],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":82,"Name":"StateStoreRDD","Scope":"{\"id\":\"201\",\"name\":\"StateStoreSave\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[81],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0}],"Parent IDs":[12],"Details":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","Submission Time":1596020225801,"Accumulables":[],"Resource Profile Id":0},"Properties":{"sql.streaming.queryId":"8d268dc2-bc9c-4be8-97a9-b135d2943028","spark.driver.host":"iZbp19vpr16ix621sdw476Z","spark.eventLog.enabled":"true","spark.sql.adaptive.enabled":"false","spark.job.interruptOnCancel":"true","spark.driver.port":"46309","__fetch_continuous_blocks_in_batch_enabled":"true","spark.jars":"file:/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/./examples/jars/spark-examples_2.12-3.1.0-SNAPSHOT.jar","__is_continuous_processing":"false","spark.app.name":"StructuredKafkaWordCount","callSite.long":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","callSite.short":"start at StructuredKafkaWordCount.scala:86","spark.submit.pyFiles":"","spark.job.description":"\nid = 8d268dc2-bc9c-4be8-97a9-b135d2943028\nrunId = e225d92f-2545-48f8-87a2-9c0309580f8a\nbatch = 6","spark.executor.id":"driver","spark.sql.cbo.enabled":"false","streaming.sql.batchId":"6","spark.jobGroup.id":"e225d92f-2545-48f8-87a2-9c0309580f8a","spark.submit.deployMode":"client","spark.master":"local[*]","spark.eventLog.dir":"/tmp/spark-history","spark.sql.execution.id":"19","spark.app.id":"local-1596020211915","spark.sql.shuffle.partitions":"2"}} +{"Event":"SparkListenerTaskStart","Stage ID":13,"Stage Attempt ID":0,"Task Info":{"Task ID":19,"Index":0,"Attempt":0,"Launch Time":1596020225808,"Executor ID":"driver","Host":"iZbp19vpr16ix621sdw476Z","Locality":"PROCESS_LOCAL","Speculative":false,"Getting Result Time":0,"Finish Time":0,"Failed":false,"Killed":false,"Accumulables":[]}} +{"Event":"SparkListenerTaskStart","Stage ID":13,"Stage Attempt ID":0,"Task Info":{"Task ID":20,"Index":1,"Attempt":0,"Launch Time":1596020225809,"Executor ID":"driver","Host":"iZbp19vpr16ix621sdw476Z","Locality":"PROCESS_LOCAL","Speculative":false,"Getting Result Time":0,"Finish Time":0,"Failed":false,"Killed":false,"Accumulables":[]}} +{"Event":"SparkListenerTaskEnd","Stage ID":13,"Stage Attempt ID":0,"Task Type":"ResultTask","Task End Reason":{"Reason":"Success"},"Task Info":{"Task ID":19,"Index":0,"Attempt":0,"Launch Time":1596020225808,"Executor ID":"driver","Host":"iZbp19vpr16ix621sdw476Z","Locality":"PROCESS_LOCAL","Speculative":false,"Getting Result Time":0,"Finish Time":1596020225868,"Failed":false,"Killed":false,"Accumulables":[{"ID":986,"Name":"duration","Update":"3","Value":"3","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":988,"Name":"peak memory","Update":"262144","Value":"262144","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":990,"Name":"time in aggregation build","Update":"0","Value":"0","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":996,"Name":"time to update","Update":"4","Value":"4","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":997,"Name":"time to remove","Update":"0","Value":"0","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":998,"Name":"time to commit changes","Update":"26","Value":"26","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1000,"Name":"estimated size of state only on current version","Update":"88","Value":"88","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1001,"Name":"count of cache hit on states cache in provider","Update":"12","Value":"12","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":999,"Name":"memory used by state","Update":"400","Value":"400","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1003,"Name":"duration","Update":"4","Value":"4","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1005,"Name":"peak memory","Update":"262144","Value":"262144","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1007,"Name":"time in aggregation build","Update":"0","Value":"0","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1087,"Name":"internal.metrics.shuffle.read.recordsRead","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":1086,"Name":"internal.metrics.shuffle.read.fetchWaitTime","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":1085,"Name":"internal.metrics.shuffle.read.localBytesRead","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":1084,"Name":"internal.metrics.shuffle.read.remoteBytesReadToDisk","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":1083,"Name":"internal.metrics.shuffle.read.remoteBytesRead","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":1082,"Name":"internal.metrics.shuffle.read.localBlocksFetched","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":1081,"Name":"internal.metrics.shuffle.read.remoteBlocksFetched","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":1079,"Name":"internal.metrics.peakExecutionMemory","Update":524288,"Value":524288,"Internal":true,"Count Failed Values":true},{"ID":1074,"Name":"internal.metrics.resultSize","Update":5311,"Value":5311,"Internal":true,"Count Failed Values":true},{"ID":1073,"Name":"internal.metrics.executorCpuTime","Update":17503528,"Value":17503528,"Internal":true,"Count Failed Values":true},{"ID":1072,"Name":"internal.metrics.executorRunTime","Update":50,"Value":50,"Internal":true,"Count Failed Values":true},{"ID":1071,"Name":"internal.metrics.executorDeserializeCpuTime","Update":4255703,"Value":4255703,"Internal":true,"Count Failed Values":true},{"ID":1070,"Name":"internal.metrics.executorDeserializeTime","Update":4,"Value":4,"Internal":true,"Count Failed Values":true}]},"Task Executor Metrics":{"JVMHeapMemory":0,"JVMOffHeapMemory":0,"OnHeapExecutionMemory":0,"OffHeapExecutionMemory":0,"OnHeapStorageMemory":0,"OffHeapStorageMemory":0,"OnHeapUnifiedMemory":0,"OffHeapUnifiedMemory":0,"DirectPoolMemory":0,"MappedPoolMemory":0,"ProcessTreeJVMVMemory":0,"ProcessTreeJVMRSSMemory":0,"ProcessTreePythonVMemory":0,"ProcessTreePythonRSSMemory":0,"ProcessTreeOtherVMemory":0,"ProcessTreeOtherRSSMemory":0,"MinorGCCount":0,"MinorGCTime":0,"MajorGCCount":0,"MajorGCTime":0},"Task Metrics":{"Executor Deserialize Time":4,"Executor Deserialize CPU Time":4255703,"Executor Run Time":50,"Executor CPU Time":17503528,"Peak Execution Memory":524288,"Result Size":5311,"JVM GC Time":0,"Result Serialization Time":0,"Memory Bytes Spilled":0,"Disk Bytes Spilled":0,"Shuffle Read Metrics":{"Remote Blocks Fetched":0,"Local Blocks Fetched":0,"Fetch Wait Time":0,"Remote Bytes Read":0,"Remote Bytes Read To Disk":0,"Local Bytes Read":0,"Total Records Read":0},"Shuffle Write Metrics":{"Shuffle Bytes Written":0,"Shuffle Write Time":0,"Shuffle Records Written":0},"Input Metrics":{"Bytes Read":0,"Records Read":0},"Output Metrics":{"Bytes Written":0,"Records Written":0},"Updated Blocks":[]}} +{"Event":"SparkListenerTaskEnd","Stage ID":13,"Stage Attempt ID":0,"Task Type":"ResultTask","Task End Reason":{"Reason":"Success"},"Task Info":{"Task ID":20,"Index":1,"Attempt":0,"Launch Time":1596020225809,"Executor ID":"driver","Host":"iZbp19vpr16ix621sdw476Z","Locality":"PROCESS_LOCAL","Speculative":false,"Getting Result Time":0,"Finish Time":1596020225874,"Failed":false,"Killed":false,"Accumulables":[{"ID":986,"Name":"duration","Update":"2","Value":"5","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":991,"Name":"avg hash probe bucket list iters","Update":"10","Value":"10","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":987,"Name":"number of output rows","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":988,"Name":"peak memory","Update":"4456448","Value":"4718592","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":990,"Name":"time in aggregation build","Update":"0","Value":"0","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":996,"Name":"time to update","Update":"15","Value":"19","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":995,"Name":"number of updated state rows","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":997,"Name":"time to remove","Update":"0","Value":"0","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":998,"Name":"time to commit changes","Update":"23","Value":"49","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1000,"Name":"estimated size of state only on current version","Update":"368","Value":"456","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":992,"Name":"number of output rows","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1001,"Name":"count of cache hit on states cache in provider","Update":"12","Value":"24","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":999,"Name":"memory used by state","Update":"784","Value":"1184","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":994,"Name":"number of total state rows","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1003,"Name":"duration","Update":"15","Value":"19","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1004,"Name":"number of output rows","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1005,"Name":"peak memory","Update":"262144","Value":"524288","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1007,"Name":"time in aggregation build","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1009,"Name":"number of output rows","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":955,"Name":"local blocks read","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":959,"Name":"fetch wait time","Update":"0","Value":"0","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":958,"Name":"local bytes read","Update":"168","Value":"168","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":960,"Name":"records read","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1087,"Name":"internal.metrics.shuffle.read.recordsRead","Update":1,"Value":1,"Internal":true,"Count Failed Values":true},{"ID":1086,"Name":"internal.metrics.shuffle.read.fetchWaitTime","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":1085,"Name":"internal.metrics.shuffle.read.localBytesRead","Update":168,"Value":168,"Internal":true,"Count Failed Values":true},{"ID":1084,"Name":"internal.metrics.shuffle.read.remoteBytesReadToDisk","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":1083,"Name":"internal.metrics.shuffle.read.remoteBytesRead","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":1082,"Name":"internal.metrics.shuffle.read.localBlocksFetched","Update":1,"Value":1,"Internal":true,"Count Failed Values":true},{"ID":1081,"Name":"internal.metrics.shuffle.read.remoteBlocksFetched","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":1079,"Name":"internal.metrics.peakExecutionMemory","Update":4718592,"Value":5242880,"Internal":true,"Count Failed Values":true},{"ID":1074,"Name":"internal.metrics.resultSize","Update":5574,"Value":10885,"Internal":true,"Count Failed Values":true},{"ID":1073,"Name":"internal.metrics.executorCpuTime","Update":17516707,"Value":35020235,"Internal":true,"Count Failed Values":true},{"ID":1072,"Name":"internal.metrics.executorRunTime","Update":56,"Value":106,"Internal":true,"Count Failed Values":true},{"ID":1071,"Name":"internal.metrics.executorDeserializeCpuTime","Update":3750230,"Value":8005933,"Internal":true,"Count Failed Values":true},{"ID":1070,"Name":"internal.metrics.executorDeserializeTime","Update":3,"Value":7,"Internal":true,"Count Failed Values":true}]},"Task Executor Metrics":{"JVMHeapMemory":0,"JVMOffHeapMemory":0,"OnHeapExecutionMemory":0,"OffHeapExecutionMemory":0,"OnHeapStorageMemory":0,"OffHeapStorageMemory":0,"OnHeapUnifiedMemory":0,"OffHeapUnifiedMemory":0,"DirectPoolMemory":0,"MappedPoolMemory":0,"ProcessTreeJVMVMemory":0,"ProcessTreeJVMRSSMemory":0,"ProcessTreePythonVMemory":0,"ProcessTreePythonRSSMemory":0,"ProcessTreeOtherVMemory":0,"ProcessTreeOtherRSSMemory":0,"MinorGCCount":0,"MinorGCTime":0,"MajorGCCount":0,"MajorGCTime":0},"Task Metrics":{"Executor Deserialize Time":3,"Executor Deserialize CPU Time":3750230,"Executor Run Time":56,"Executor CPU Time":17516707,"Peak Execution Memory":4718592,"Result Size":5574,"JVM GC Time":0,"Result Serialization Time":0,"Memory Bytes Spilled":0,"Disk Bytes Spilled":0,"Shuffle Read Metrics":{"Remote Blocks Fetched":0,"Local Blocks Fetched":1,"Fetch Wait Time":0,"Remote Bytes Read":0,"Remote Bytes Read To Disk":0,"Local Bytes Read":168,"Total Records Read":1},"Shuffle Write Metrics":{"Shuffle Bytes Written":0,"Shuffle Write Time":0,"Shuffle Records Written":0},"Input Metrics":{"Bytes Read":0,"Records Read":0},"Output Metrics":{"Bytes Written":0,"Records Written":0},"Updated Blocks":[]}} +{"Event":"SparkListenerStageCompleted","Stage Info":{"Stage ID":13,"Stage Attempt ID":0,"Stage Name":"start at StructuredKafkaWordCount.scala:86","Number of Tasks":2,"RDD Info":[{"RDD ID":83,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"198\",\"name\":\"WholeStageCodegen (4)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[82],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":81,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"202\",\"name\":\"WholeStageCodegen (3)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[80],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":79,"Name":"ShuffledRowRDD","Scope":"{\"id\":\"206\",\"name\":\"Exchange\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[78],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":80,"Name":"StateStoreRDD","Scope":"{\"id\":\"205\",\"name\":\"StateStoreRestore\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[79],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":82,"Name":"StateStoreRDD","Scope":"{\"id\":\"201\",\"name\":\"StateStoreSave\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[81],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0}],"Parent IDs":[12],"Details":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","Submission Time":1596020225801,"Completion Time":1596020225874,"Accumulables":[{"ID":1070,"Name":"internal.metrics.executorDeserializeTime","Value":7,"Internal":true,"Count Failed Values":true},{"ID":1079,"Name":"internal.metrics.peakExecutionMemory","Value":5242880,"Internal":true,"Count Failed Values":true},{"ID":992,"Name":"number of output rows","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1073,"Name":"internal.metrics.executorCpuTime","Value":35020235,"Internal":true,"Count Failed Values":true},{"ID":1082,"Name":"internal.metrics.shuffle.read.localBlocksFetched","Value":1,"Internal":true,"Count Failed Values":true},{"ID":1001,"Name":"count of cache hit on states cache in provider","Value":"24","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":995,"Name":"number of updated state rows","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1004,"Name":"number of output rows","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":986,"Name":"duration","Value":"5","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":959,"Name":"fetch wait time","Value":"0","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1085,"Name":"internal.metrics.shuffle.read.localBytesRead","Value":168,"Internal":true,"Count Failed Values":true},{"ID":1007,"Name":"time in aggregation build","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":998,"Name":"time to commit changes","Value":"49","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1084,"Name":"internal.metrics.shuffle.read.remoteBytesReadToDisk","Value":0,"Internal":true,"Count Failed Values":true},{"ID":997,"Name":"time to remove","Value":"0","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1087,"Name":"internal.metrics.shuffle.read.recordsRead","Value":1,"Internal":true,"Count Failed Values":true},{"ID":955,"Name":"local blocks read","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1081,"Name":"internal.metrics.shuffle.read.remoteBlocksFetched","Value":0,"Internal":true,"Count Failed Values":true},{"ID":991,"Name":"avg hash probe bucket list iters","Value":"10","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1009,"Name":"number of output rows","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1072,"Name":"internal.metrics.executorRunTime","Value":106,"Internal":true,"Count Failed Values":true},{"ID":1000,"Name":"estimated size of state only on current version","Value":"456","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":994,"Name":"number of total state rows","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1003,"Name":"duration","Value":"19","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":958,"Name":"local bytes read","Value":"168","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":988,"Name":"peak memory","Value":"4718592","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1074,"Name":"internal.metrics.resultSize","Value":10885,"Internal":true,"Count Failed Values":true},{"ID":1083,"Name":"internal.metrics.shuffle.read.remoteBytesRead","Value":0,"Internal":true,"Count Failed Values":true},{"ID":960,"Name":"records read","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1086,"Name":"internal.metrics.shuffle.read.fetchWaitTime","Value":0,"Internal":true,"Count Failed Values":true},{"ID":987,"Name":"number of output rows","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1005,"Name":"peak memory","Value":"524288","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":996,"Name":"time to update","Value":"19","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1071,"Name":"internal.metrics.executorDeserializeCpuTime","Value":8005933,"Internal":true,"Count Failed Values":true},{"ID":999,"Name":"memory used by state","Value":"1184","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":990,"Name":"time in aggregation build","Value":"0","Internal":true,"Count Failed Values":true,"Metadata":"sql"}],"Resource Profile Id":0}} +{"Event":"SparkListenerJobEnd","Job ID":6,"Completion Time":1596020225875,"Job Result":{"Result":"JobSucceeded"}} +{"Event":"org.apache.spark.sql.execution.ui.SparkListenerSQLExecutionStart","executionId":20,"description":"\nid = 8d268dc2-bc9c-4be8-97a9-b135d2943028\nrunId = e225d92f-2545-48f8-87a2-9c0309580f8a\nbatch = 6","details":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","physicalPlanDescription":"== Physical Plan ==\nLocalTableScan (1)\n\n\n(1) LocalTableScan\nOutput [2]: [value#130, count#131]\nArguments: [value#130, count#131]\n\n","sparkPlanInfo":{"nodeName":"LocalTableScan","simpleString":"LocalTableScan [value#130, count#131]","children":[],"metadata":{},"metrics":[{"name":"number of output rows","accumulatorId":1095,"metricType":"sum"}]},"time":1596020225891} +{"Event":"org.apache.spark.sql.execution.ui.SparkListenerSQLExecutionEnd","executionId":20,"time":1596020225896} +{"Event":"org.apache.spark.sql.execution.ui.SparkListenerSQLExecutionEnd","executionId":19,"time":1596020225897} +{"Event":"org.apache.spark.sql.execution.ui.SparkListenerSQLExecutionEnd","executionId":18,"time":1596020225897} +{"Event":"org.apache.spark.sql.streaming.StreamingQueryListener$QueryProgressEvent","progress":{"id":"8d268dc2-bc9c-4be8-97a9-b135d2943028","runId":"e225d92f-2545-48f8-87a2-9c0309580f8a","name":null,"timestamp":"2020-07-29T10:57:05.562Z","batchId":6,"batchDuration":351,"durationMs":{"triggerExecution":351,"queryPlanning":28,"getBatch":1,"latestOffset":6,"addBatch":273,"walCommit":25},"eventTime":{},"stateOperators":[{"numRowsTotal":1,"numRowsUpdated":1,"memoryUsedBytes":1184,"numLateInputs":0,"customMetrics":{"stateOnCurrentVersionSizeBytes":456,"loadedMapCacheHitCount":24,"loadedMapCacheMissCount":0}}],"sources":[{"description":"KafkaV2[Subscribe[test5]]","startOffset":"{\"test5\":{\"0\":48837}}","endOffset":"{\"test5\":{\"0\":48881}}","numInputRows":44,"inputRowsPerSecond":100.22779043280183,"processedRowsPerSecond":125.35612535612536}],"sink":{"description":"org.apache.spark.sql.execution.streaming.ConsoleTable$@514ba885","numOutputRows":1},"observedMetrics":{}}} +{"Event":"org.apache.spark.sql.execution.ui.SparkListenerSQLExecutionStart","executionId":21,"description":"\nid = 8d268dc2-bc9c-4be8-97a9-b135d2943028\nrunId = e225d92f-2545-48f8-87a2-9c0309580f8a\nbatch = 7","details":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","physicalPlanDescription":"== Physical Plan ==\nWriteToDataSourceV2 (14)\n+- * HashAggregate (13)\n +- StateStoreSave (12)\n +- * HashAggregate (11)\n +- StateStoreRestore (10)\n +- Exchange (9)\n +- * HashAggregate (8)\n +- * HashAggregate (7)\n +- * SerializeFromObject (6)\n +- MapPartitions (5)\n +- DeserializeToObject (4)\n +- * Project (3)\n +- * Project (2)\n +- MicroBatchScan (1)\n\n\n(1) MicroBatchScan\nOutput [7]: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]\nArguments: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13], org.apache.spark.sql.kafka010.KafkaSourceProvider$KafkaScan@7e7b182c, KafkaV2[Subscribe[test5]], {\"test5\":{\"0\":48881}}, {\"test5\":{\"0\":48917}}\n\n(2) Project [codegen id : 1]\nOutput [7]: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]\nInput [7]: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]\n\n(3) Project [codegen id : 1]\nOutput [1]: [cast(value#8 as string) AS value#21]\nInput [7]: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]\n\n(4) DeserializeToObject\nInput [1]: [value#21]\nArguments: value#21.toString, obj#27: java.lang.String\n\n(5) MapPartitions\nInput [1]: [obj#27]\nArguments: org.apache.spark.sql.Dataset$$Lambda$1321/872917583@67b99068, obj#28: java.lang.String\n\n(6) SerializeFromObject [codegen id : 2]\nInput [1]: [obj#28]\nArguments: [staticinvoke(class org.apache.spark.unsafe.types.UTF8String, StringType, fromString, input[0, java.lang.String, true], true, false) AS value#29]\n\n(7) HashAggregate [codegen id : 2]\nInput [1]: [value#29]\nKeys [1]: [value#29]\nFunctions [1]: [partial_count(1)]\nAggregate Attributes [1]: [count(1)#31L]\nResults [2]: [value#29, count#38L]\n\n(8) HashAggregate [codegen id : 2]\nInput [2]: [value#29, count#38L]\nKeys [1]: [value#29]\nFunctions [1]: [merge_count(1)]\nAggregate Attributes [1]: [count(1)#31L]\nResults [2]: [value#29, count#38L]\n\n(9) Exchange\nInput [2]: [value#29, count#38L]\nArguments: hashpartitioning(value#29, 2), true, [id=#1759]\n\n(10) StateStoreRestore\nInput [2]: [value#29, count#38L]\nArguments: [value#29], state info [ checkpoint = , runId = c0968891-bf48-4112-a19b-444014085d1d, opId = 0, ver = 0, numPartitions = 2], 2\n\n(11) HashAggregate [codegen id : 3]\nInput [2]: [value#29, count#38L]\nKeys [1]: [value#29]\nFunctions [1]: [merge_count(1)]\nAggregate Attributes [1]: [count(1)#31L]\nResults [2]: [value#29, count#38L]\n\n(12) StateStoreSave\nInput [2]: [value#29, count#38L]\nArguments: [value#29], state info [ checkpoint = , runId = c0968891-bf48-4112-a19b-444014085d1d, opId = 0, ver = 0, numPartitions = 2], Append, 0, 2\n\n(13) HashAggregate [codegen id : 4]\nInput [2]: [value#29, count#38L]\nKeys [1]: [value#29]\nFunctions [1]: [count(1)]\nAggregate Attributes [1]: [count(1)#31L]\nResults [2]: [value#29, count(1)#31L AS count#32L]\n\n(14) WriteToDataSourceV2\nInput [2]: [value#29, count#32L]\nArguments: org.apache.spark.sql.execution.streaming.sources.MicroBatchWrite@6313b68e\n\n","sparkPlanInfo":{"nodeName":"WriteToDataSourceV2","simpleString":"WriteToDataSourceV2 org.apache.spark.sql.execution.streaming.sources.MicroBatchWrite@6313b68e","children":[{"nodeName":"WholeStageCodegen (4)","simpleString":"WholeStageCodegen (4)","children":[{"nodeName":"HashAggregate","simpleString":"HashAggregate(keys=[value#29], functions=[count(1)])","children":[{"nodeName":"InputAdapter","simpleString":"InputAdapter","children":[{"nodeName":"StateStoreSave","simpleString":"StateStoreSave [value#29], state info [ checkpoint = file:/tmp/temporary-025d7997-5b66-4def-abbf-bdcca57312b9/state, runId = e225d92f-2545-48f8-87a2-9c0309580f8a, opId = 0, ver = 7, numPartitions = 2], Complete, 0, 2","children":[{"nodeName":"WholeStageCodegen (3)","simpleString":"WholeStageCodegen (3)","children":[{"nodeName":"HashAggregate","simpleString":"HashAggregate(keys=[value#29], functions=[merge_count(1)])","children":[{"nodeName":"InputAdapter","simpleString":"InputAdapter","children":[{"nodeName":"StateStoreRestore","simpleString":"StateStoreRestore [value#29], state info [ checkpoint = file:/tmp/temporary-025d7997-5b66-4def-abbf-bdcca57312b9/state, runId = e225d92f-2545-48f8-87a2-9c0309580f8a, opId = 0, ver = 7, numPartitions = 2], 2","children":[{"nodeName":"Exchange","simpleString":"Exchange hashpartitioning(value#29, 2), true, [id=#1683]","children":[{"nodeName":"WholeStageCodegen (2)","simpleString":"WholeStageCodegen (2)","children":[{"nodeName":"HashAggregate","simpleString":"HashAggregate(keys=[value#29], functions=[merge_count(1)])","children":[{"nodeName":"HashAggregate","simpleString":"HashAggregate(keys=[value#29], functions=[partial_count(1)])","children":[{"nodeName":"SerializeFromObject","simpleString":"SerializeFromObject [staticinvoke(class org.apache.spark.unsafe.types.UTF8String, StringType, fromString, input[0, java.lang.String, true], true, false) AS value#29]","children":[{"nodeName":"InputAdapter","simpleString":"InputAdapter","children":[{"nodeName":"MapPartitions","simpleString":"MapPartitions org.apache.spark.sql.Dataset$$Lambda$1321/872917583@67b99068, obj#28: java.lang.String","children":[{"nodeName":"DeserializeToObject","simpleString":"DeserializeToObject value#21.toString, obj#27: java.lang.String","children":[{"nodeName":"WholeStageCodegen (1)","simpleString":"WholeStageCodegen (1)","children":[{"nodeName":"Project","simpleString":"Project [cast(value#8 as string) AS value#21]","children":[{"nodeName":"Project","simpleString":"Project [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]","children":[{"nodeName":"InputAdapter","simpleString":"InputAdapter","children":[{"nodeName":"MicroBatchScan","simpleString":"MicroBatchScan[key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13] class org.apache.spark.sql.kafka010.KafkaSourceProvider$KafkaScan","children":[],"metadata":{},"metrics":[{"name":"number of output rows","accumulatorId":1179,"metricType":"sum"}]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[{"name":"duration","accumulatorId":1178,"metricType":"timing"}]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[{"name":"spill size","accumulatorId":1175,"metricType":"size"},{"name":"time in aggregation build","accumulatorId":1176,"metricType":"timing"},{"name":"peak memory","accumulatorId":1174,"metricType":"size"},{"name":"number of output rows","accumulatorId":1173,"metricType":"sum"},{"name":"avg hash probe bucket list iters","accumulatorId":1177,"metricType":"average"}]}],"metadata":{},"metrics":[{"name":"spill size","accumulatorId":1170,"metricType":"size"},{"name":"time in aggregation build","accumulatorId":1171,"metricType":"timing"},{"name":"peak memory","accumulatorId":1169,"metricType":"size"},{"name":"number of output rows","accumulatorId":1168,"metricType":"sum"},{"name":"avg hash probe bucket list iters","accumulatorId":1172,"metricType":"average"}]}],"metadata":{},"metrics":[{"name":"duration","accumulatorId":1167,"metricType":"timing"}]}],"metadata":{},"metrics":[{"name":"shuffle records written","accumulatorId":1119,"metricType":"sum"},{"name":"shuffle write time","accumulatorId":1120,"metricType":"nsTiming"},{"name":"records read","accumulatorId":1117,"metricType":"sum"},{"name":"local bytes read","accumulatorId":1115,"metricType":"size"},{"name":"fetch wait time","accumulatorId":1116,"metricType":"timing"},{"name":"remote bytes read","accumulatorId":1113,"metricType":"size"},{"name":"local blocks read","accumulatorId":1112,"metricType":"sum"},{"name":"remote blocks read","accumulatorId":1111,"metricType":"sum"},{"name":"data size","accumulatorId":1110,"metricType":"size"},{"name":"remote bytes read to disk","accumulatorId":1114,"metricType":"size"},{"name":"shuffle bytes written","accumulatorId":1118,"metricType":"size"}]}],"metadata":{},"metrics":[{"name":"number of output rows","accumulatorId":1166,"metricType":"sum"}]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[{"name":"spill size","accumulatorId":1163,"metricType":"size"},{"name":"time in aggregation build","accumulatorId":1164,"metricType":"timing"},{"name":"peak memory","accumulatorId":1162,"metricType":"size"},{"name":"number of output rows","accumulatorId":1161,"metricType":"sum"},{"name":"avg hash probe bucket list iters","accumulatorId":1165,"metricType":"average"}]}],"metadata":{},"metrics":[{"name":"duration","accumulatorId":1160,"metricType":"timing"}]}],"metadata":{},"metrics":[{"name":"number of inputs which are later than watermark ('inputs' are relative to operators)","accumulatorId":1150,"metricType":"sum"},{"name":"number of total state rows","accumulatorId":1151,"metricType":"sum"},{"name":"memory used by state","accumulatorId":1156,"metricType":"size"},{"name":"count of cache hit on states cache in provider","accumulatorId":1158,"metricType":"sum"},{"name":"number of output rows","accumulatorId":1149,"metricType":"sum"},{"name":"estimated size of state only on current version","accumulatorId":1157,"metricType":"size"},{"name":"count of cache miss on states cache in provider","accumulatorId":1159,"metricType":"sum"},{"name":"time to commit changes","accumulatorId":1155,"metricType":"timing"},{"name":"time to remove","accumulatorId":1154,"metricType":"timing"},{"name":"number of updated state rows","accumulatorId":1152,"metricType":"sum"},{"name":"time to update","accumulatorId":1153,"metricType":"timing"}]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[{"name":"spill size","accumulatorId":1146,"metricType":"size"},{"name":"time in aggregation build","accumulatorId":1147,"metricType":"timing"},{"name":"peak memory","accumulatorId":1145,"metricType":"size"},{"name":"number of output rows","accumulatorId":1144,"metricType":"sum"},{"name":"avg hash probe bucket list iters","accumulatorId":1148,"metricType":"average"}]}],"metadata":{},"metrics":[{"name":"duration","accumulatorId":1143,"metricType":"timing"}]}],"metadata":{},"metrics":[]},"time":1596020225988} +{"Event":"org.apache.spark.sql.execution.ui.SparkListenerSQLExecutionStart","executionId":22,"description":"\nid = 8d268dc2-bc9c-4be8-97a9-b135d2943028\nrunId = e225d92f-2545-48f8-87a2-9c0309580f8a\nbatch = 7","details":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","physicalPlanDescription":"== Physical Plan ==\nWriteToDataSourceV2 (14)\n+- * HashAggregate (13)\n +- StateStoreSave (12)\n +- * HashAggregate (11)\n +- StateStoreRestore (10)\n +- Exchange (9)\n +- * HashAggregate (8)\n +- * HashAggregate (7)\n +- * SerializeFromObject (6)\n +- MapPartitions (5)\n +- DeserializeToObject (4)\n +- * Project (3)\n +- * Project (2)\n +- MicroBatchScan (1)\n\n\n(1) MicroBatchScan\nOutput [7]: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]\nArguments: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13], org.apache.spark.sql.kafka010.KafkaSourceProvider$KafkaScan@7e7b182c, KafkaV2[Subscribe[test5]], {\"test5\":{\"0\":48881}}, {\"test5\":{\"0\":48917}}\n\n(2) Project [codegen id : 1]\nOutput [7]: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]\nInput [7]: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]\n\n(3) Project [codegen id : 1]\nOutput [1]: [cast(value#8 as string) AS value#21]\nInput [7]: [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]\n\n(4) DeserializeToObject\nInput [1]: [value#21]\nArguments: value#21.toString, obj#27: java.lang.String\n\n(5) MapPartitions\nInput [1]: [obj#27]\nArguments: org.apache.spark.sql.Dataset$$Lambda$1321/872917583@67b99068, obj#28: java.lang.String\n\n(6) SerializeFromObject [codegen id : 2]\nInput [1]: [obj#28]\nArguments: [staticinvoke(class org.apache.spark.unsafe.types.UTF8String, StringType, fromString, input[0, java.lang.String, true], true, false) AS value#29]\n\n(7) HashAggregate [codegen id : 2]\nInput [1]: [value#29]\nKeys [1]: [value#29]\nFunctions [1]: [partial_count(1)]\nAggregate Attributes [1]: [count(1)#31L]\nResults [2]: [value#29, count#38L]\n\n(8) HashAggregate [codegen id : 2]\nInput [2]: [value#29, count#38L]\nKeys [1]: [value#29]\nFunctions [1]: [merge_count(1)]\nAggregate Attributes [1]: [count(1)#31L]\nResults [2]: [value#29, count#38L]\n\n(9) Exchange\nInput [2]: [value#29, count#38L]\nArguments: hashpartitioning(value#29, 2), true, [id=#1835]\n\n(10) StateStoreRestore\nInput [2]: [value#29, count#38L]\nArguments: [value#29], state info [ checkpoint = , runId = e165b23b-1a6f-459f-9c51-288922bb2647, opId = 0, ver = 0, numPartitions = 2], 2\n\n(11) HashAggregate [codegen id : 3]\nInput [2]: [value#29, count#38L]\nKeys [1]: [value#29]\nFunctions [1]: [merge_count(1)]\nAggregate Attributes [1]: [count(1)#31L]\nResults [2]: [value#29, count#38L]\n\n(12) StateStoreSave\nInput [2]: [value#29, count#38L]\nArguments: [value#29], state info [ checkpoint = , runId = e165b23b-1a6f-459f-9c51-288922bb2647, opId = 0, ver = 0, numPartitions = 2], Append, 0, 2\n\n(13) HashAggregate [codegen id : 4]\nInput [2]: [value#29, count#38L]\nKeys [1]: [value#29]\nFunctions [1]: [count(1)]\nAggregate Attributes [1]: [count(1)#31L]\nResults [2]: [value#29, count(1)#31L AS count#32L]\n\n(14) WriteToDataSourceV2\nInput [2]: [value#29, count#32L]\nArguments: org.apache.spark.sql.execution.streaming.sources.MicroBatchWrite@6313b68e\n\n","sparkPlanInfo":{"nodeName":"WriteToDataSourceV2","simpleString":"WriteToDataSourceV2 org.apache.spark.sql.execution.streaming.sources.MicroBatchWrite@6313b68e","children":[{"nodeName":"WholeStageCodegen (4)","simpleString":"WholeStageCodegen (4)","children":[{"nodeName":"HashAggregate","simpleString":"HashAggregate(keys=[value#29], functions=[count(1)])","children":[{"nodeName":"InputAdapter","simpleString":"InputAdapter","children":[{"nodeName":"StateStoreSave","simpleString":"StateStoreSave [value#29], state info [ checkpoint = file:/tmp/temporary-025d7997-5b66-4def-abbf-bdcca57312b9/state, runId = e225d92f-2545-48f8-87a2-9c0309580f8a, opId = 0, ver = 7, numPartitions = 2], Complete, 0, 2","children":[{"nodeName":"WholeStageCodegen (3)","simpleString":"WholeStageCodegen (3)","children":[{"nodeName":"HashAggregate","simpleString":"HashAggregate(keys=[value#29], functions=[merge_count(1)])","children":[{"nodeName":"InputAdapter","simpleString":"InputAdapter","children":[{"nodeName":"StateStoreRestore","simpleString":"StateStoreRestore [value#29], state info [ checkpoint = file:/tmp/temporary-025d7997-5b66-4def-abbf-bdcca57312b9/state, runId = e225d92f-2545-48f8-87a2-9c0309580f8a, opId = 0, ver = 7, numPartitions = 2], 2","children":[{"nodeName":"Exchange","simpleString":"Exchange hashpartitioning(value#29, 2), true, [id=#1683]","children":[{"nodeName":"WholeStageCodegen (2)","simpleString":"WholeStageCodegen (2)","children":[{"nodeName":"HashAggregate","simpleString":"HashAggregate(keys=[value#29], functions=[merge_count(1)])","children":[{"nodeName":"HashAggregate","simpleString":"HashAggregate(keys=[value#29], functions=[partial_count(1)])","children":[{"nodeName":"SerializeFromObject","simpleString":"SerializeFromObject [staticinvoke(class org.apache.spark.unsafe.types.UTF8String, StringType, fromString, input[0, java.lang.String, true], true, false) AS value#29]","children":[{"nodeName":"InputAdapter","simpleString":"InputAdapter","children":[{"nodeName":"MapPartitions","simpleString":"MapPartitions org.apache.spark.sql.Dataset$$Lambda$1321/872917583@67b99068, obj#28: java.lang.String","children":[{"nodeName":"DeserializeToObject","simpleString":"DeserializeToObject value#21.toString, obj#27: java.lang.String","children":[{"nodeName":"WholeStageCodegen (1)","simpleString":"WholeStageCodegen (1)","children":[{"nodeName":"Project","simpleString":"Project [cast(value#8 as string) AS value#21]","children":[{"nodeName":"Project","simpleString":"Project [key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13]","children":[{"nodeName":"InputAdapter","simpleString":"InputAdapter","children":[{"nodeName":"MicroBatchScan","simpleString":"MicroBatchScan[key#7, value#8, topic#9, partition#10, offset#11L, timestamp#12, timestampType#13] class org.apache.spark.sql.kafka010.KafkaSourceProvider$KafkaScan","children":[],"metadata":{},"metrics":[{"name":"number of output rows","accumulatorId":1179,"metricType":"sum"}]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[{"name":"duration","accumulatorId":1178,"metricType":"timing"}]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[{"name":"spill size","accumulatorId":1175,"metricType":"size"},{"name":"time in aggregation build","accumulatorId":1176,"metricType":"timing"},{"name":"peak memory","accumulatorId":1174,"metricType":"size"},{"name":"number of output rows","accumulatorId":1173,"metricType":"sum"},{"name":"avg hash probe bucket list iters","accumulatorId":1177,"metricType":"average"}]}],"metadata":{},"metrics":[{"name":"spill size","accumulatorId":1170,"metricType":"size"},{"name":"time in aggregation build","accumulatorId":1171,"metricType":"timing"},{"name":"peak memory","accumulatorId":1169,"metricType":"size"},{"name":"number of output rows","accumulatorId":1168,"metricType":"sum"},{"name":"avg hash probe bucket list iters","accumulatorId":1172,"metricType":"average"}]}],"metadata":{},"metrics":[{"name":"duration","accumulatorId":1167,"metricType":"timing"}]}],"metadata":{},"metrics":[{"name":"shuffle records written","accumulatorId":1119,"metricType":"sum"},{"name":"shuffle write time","accumulatorId":1120,"metricType":"nsTiming"},{"name":"records read","accumulatorId":1117,"metricType":"sum"},{"name":"local bytes read","accumulatorId":1115,"metricType":"size"},{"name":"fetch wait time","accumulatorId":1116,"metricType":"timing"},{"name":"remote bytes read","accumulatorId":1113,"metricType":"size"},{"name":"local blocks read","accumulatorId":1112,"metricType":"sum"},{"name":"remote blocks read","accumulatorId":1111,"metricType":"sum"},{"name":"data size","accumulatorId":1110,"metricType":"size"},{"name":"remote bytes read to disk","accumulatorId":1114,"metricType":"size"},{"name":"shuffle bytes written","accumulatorId":1118,"metricType":"size"}]}],"metadata":{},"metrics":[{"name":"number of output rows","accumulatorId":1166,"metricType":"sum"}]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[{"name":"spill size","accumulatorId":1163,"metricType":"size"},{"name":"time in aggregation build","accumulatorId":1164,"metricType":"timing"},{"name":"peak memory","accumulatorId":1162,"metricType":"size"},{"name":"number of output rows","accumulatorId":1161,"metricType":"sum"},{"name":"avg hash probe bucket list iters","accumulatorId":1165,"metricType":"average"}]}],"metadata":{},"metrics":[{"name":"duration","accumulatorId":1160,"metricType":"timing"}]}],"metadata":{},"metrics":[{"name":"number of inputs which are later than watermark ('inputs' are relative to operators)","accumulatorId":1150,"metricType":"sum"},{"name":"number of total state rows","accumulatorId":1151,"metricType":"sum"},{"name":"memory used by state","accumulatorId":1156,"metricType":"size"},{"name":"count of cache hit on states cache in provider","accumulatorId":1158,"metricType":"sum"},{"name":"number of output rows","accumulatorId":1149,"metricType":"sum"},{"name":"estimated size of state only on current version","accumulatorId":1157,"metricType":"size"},{"name":"count of cache miss on states cache in provider","accumulatorId":1159,"metricType":"sum"},{"name":"time to commit changes","accumulatorId":1155,"metricType":"timing"},{"name":"time to remove","accumulatorId":1154,"metricType":"timing"},{"name":"number of updated state rows","accumulatorId":1152,"metricType":"sum"},{"name":"time to update","accumulatorId":1153,"metricType":"timing"}]}],"metadata":{},"metrics":[]}],"metadata":{},"metrics":[{"name":"spill size","accumulatorId":1146,"metricType":"size"},{"name":"time in aggregation build","accumulatorId":1147,"metricType":"timing"},{"name":"peak memory","accumulatorId":1145,"metricType":"size"},{"name":"number of output rows","accumulatorId":1144,"metricType":"sum"},{"name":"avg hash probe bucket list iters","accumulatorId":1148,"metricType":"average"}]}],"metadata":{},"metrics":[{"name":"duration","accumulatorId":1143,"metricType":"timing"}]}],"metadata":{},"metrics":[]},"time":1596020226019} +{"Event":"SparkListenerJobStart","Job ID":7,"Submission Time":1596020226076,"Stage Infos":[{"Stage ID":15,"Stage Attempt ID":0,"Stage Name":"start at StructuredKafkaWordCount.scala:86","Number of Tasks":2,"RDD Info":[{"RDD ID":95,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"231\",\"name\":\"WholeStageCodegen (4)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[94],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":93,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"235\",\"name\":\"WholeStageCodegen (3)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[92],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":91,"Name":"ShuffledRowRDD","Scope":"{\"id\":\"239\",\"name\":\"Exchange\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[90],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":94,"Name":"StateStoreRDD","Scope":"{\"id\":\"234\",\"name\":\"StateStoreSave\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[93],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":92,"Name":"StateStoreRDD","Scope":"{\"id\":\"238\",\"name\":\"StateStoreRestore\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[91],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0}],"Parent IDs":[14],"Details":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","Accumulables":[],"Resource Profile Id":0},{"Stage ID":14,"Stage Attempt ID":0,"Stage Name":"start at StructuredKafkaWordCount.scala:86","Number of Tasks":1,"RDD Info":[{"RDD ID":90,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"239\",\"name\":\"Exchange\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[89],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":88,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"245\",\"name\":\"MapPartitions\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[87],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":84,"Name":"DataSourceRDD","Scope":"{\"id\":\"251\",\"name\":\"MicroBatchScan\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":85,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"251\",\"name\":\"MicroBatchScan\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[84],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":89,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"240\",\"name\":\"WholeStageCodegen (2)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[88],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":86,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"247\",\"name\":\"WholeStageCodegen (1)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[85],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":87,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"246\",\"name\":\"DeserializeToObject\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[86],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0}],"Parent IDs":[],"Details":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","Accumulables":[],"Resource Profile Id":0}],"Stage IDs":[15,14],"Properties":{"sql.streaming.queryId":"8d268dc2-bc9c-4be8-97a9-b135d2943028","spark.driver.host":"iZbp19vpr16ix621sdw476Z","spark.eventLog.enabled":"true","spark.sql.adaptive.enabled":"false","spark.job.interruptOnCancel":"true","spark.driver.port":"46309","__fetch_continuous_blocks_in_batch_enabled":"true","spark.jars":"file:/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/./examples/jars/spark-examples_2.12-3.1.0-SNAPSHOT.jar","__is_continuous_processing":"false","spark.app.name":"StructuredKafkaWordCount","callSite.long":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","callSite.short":"start at StructuredKafkaWordCount.scala:86","spark.submit.pyFiles":"","spark.job.description":"\nid = 8d268dc2-bc9c-4be8-97a9-b135d2943028\nrunId = e225d92f-2545-48f8-87a2-9c0309580f8a\nbatch = 7","spark.executor.id":"driver","spark.sql.cbo.enabled":"false","streaming.sql.batchId":"7","spark.jobGroup.id":"e225d92f-2545-48f8-87a2-9c0309580f8a","spark.submit.deployMode":"client","spark.master":"local[*]","spark.eventLog.dir":"/tmp/spark-history","spark.sql.execution.id":"22","spark.app.id":"local-1596020211915","spark.sql.shuffle.partitions":"2"}} +{"Event":"SparkListenerStageSubmitted","Stage Info":{"Stage ID":14,"Stage Attempt ID":0,"Stage Name":"start at StructuredKafkaWordCount.scala:86","Number of Tasks":1,"RDD Info":[{"RDD ID":90,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"239\",\"name\":\"Exchange\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[89],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":88,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"245\",\"name\":\"MapPartitions\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[87],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":84,"Name":"DataSourceRDD","Scope":"{\"id\":\"251\",\"name\":\"MicroBatchScan\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":85,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"251\",\"name\":\"MicroBatchScan\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[84],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":89,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"240\",\"name\":\"WholeStageCodegen (2)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[88],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":86,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"247\",\"name\":\"WholeStageCodegen (1)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[85],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":87,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"246\",\"name\":\"DeserializeToObject\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[86],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0}],"Parent IDs":[],"Details":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","Submission Time":1596020226077,"Accumulables":[],"Resource Profile Id":0},"Properties":{"sql.streaming.queryId":"8d268dc2-bc9c-4be8-97a9-b135d2943028","spark.driver.host":"iZbp19vpr16ix621sdw476Z","spark.eventLog.enabled":"true","spark.sql.adaptive.enabled":"false","spark.job.interruptOnCancel":"true","spark.driver.port":"46309","__fetch_continuous_blocks_in_batch_enabled":"true","spark.jars":"file:/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/./examples/jars/spark-examples_2.12-3.1.0-SNAPSHOT.jar","__is_continuous_processing":"false","spark.app.name":"StructuredKafkaWordCount","callSite.long":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","callSite.short":"start at StructuredKafkaWordCount.scala:86","spark.submit.pyFiles":"","spark.job.description":"\nid = 8d268dc2-bc9c-4be8-97a9-b135d2943028\nrunId = e225d92f-2545-48f8-87a2-9c0309580f8a\nbatch = 7","spark.executor.id":"driver","spark.sql.cbo.enabled":"false","streaming.sql.batchId":"7","spark.jobGroup.id":"e225d92f-2545-48f8-87a2-9c0309580f8a","spark.submit.deployMode":"client","spark.master":"local[*]","spark.eventLog.dir":"/tmp/spark-history","spark.sql.execution.id":"22","spark.app.id":"local-1596020211915","spark.sql.shuffle.partitions":"2"}} +{"Event":"SparkListenerTaskStart","Stage ID":14,"Stage Attempt ID":0,"Task Info":{"Task ID":21,"Index":0,"Attempt":0,"Launch Time":1596020226086,"Executor ID":"driver","Host":"iZbp19vpr16ix621sdw476Z","Locality":"PROCESS_LOCAL","Speculative":false,"Getting Result Time":0,"Finish Time":0,"Failed":false,"Killed":false,"Accumulables":[]}} +{"Event":"SparkListenerTaskEnd","Stage ID":14,"Stage Attempt ID":0,"Task Type":"ShuffleMapTask","Task End Reason":{"Reason":"Success"},"Task Info":{"Task ID":21,"Index":0,"Attempt":0,"Launch Time":1596020226086,"Executor ID":"driver","Host":"iZbp19vpr16ix621sdw476Z","Locality":"PROCESS_LOCAL","Speculative":false,"Getting Result Time":0,"Finish Time":1596020226116,"Failed":false,"Killed":false,"Accumulables":[{"ID":1120,"Name":"shuffle write time","Update":"543034","Value":"543034","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1119,"Name":"shuffle records written","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1118,"Name":"shuffle bytes written","Update":"168","Value":"168","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1110,"Name":"data size","Update":"128","Value":"128","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1167,"Name":"duration","Update":"13","Value":"13","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1168,"Name":"number of output rows","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1169,"Name":"peak memory","Update":"262144","Value":"262144","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1171,"Name":"time in aggregation build","Update":"8","Value":"8","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1173,"Name":"number of output rows","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1174,"Name":"peak memory","Update":"262144","Value":"262144","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1176,"Name":"time in aggregation build","Update":"6","Value":"6","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1178,"Name":"duration","Update":"13","Value":"13","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1179,"Name":"number of output rows","Update":"36","Value":"36","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1224,"Name":"internal.metrics.input.recordsRead","Update":36,"Value":36,"Internal":true,"Count Failed Values":true},{"ID":1222,"Name":"internal.metrics.shuffle.write.writeTime","Update":543034,"Value":543034,"Internal":true,"Count Failed Values":true},{"ID":1221,"Name":"internal.metrics.shuffle.write.recordsWritten","Update":1,"Value":1,"Internal":true,"Count Failed Values":true},{"ID":1220,"Name":"internal.metrics.shuffle.write.bytesWritten","Update":168,"Value":168,"Internal":true,"Count Failed Values":true},{"ID":1211,"Name":"internal.metrics.peakExecutionMemory","Update":524288,"Value":524288,"Internal":true,"Count Failed Values":true},{"ID":1206,"Name":"internal.metrics.resultSize","Update":2544,"Value":2544,"Internal":true,"Count Failed Values":true},{"ID":1205,"Name":"internal.metrics.executorCpuTime","Update":19652237,"Value":19652237,"Internal":true,"Count Failed Values":true},{"ID":1204,"Name":"internal.metrics.executorRunTime","Update":19,"Value":19,"Internal":true,"Count Failed Values":true},{"ID":1203,"Name":"internal.metrics.executorDeserializeCpuTime","Update":2829254,"Value":2829254,"Internal":true,"Count Failed Values":true},{"ID":1202,"Name":"internal.metrics.executorDeserializeTime","Update":2,"Value":2,"Internal":true,"Count Failed Values":true}]},"Task Executor Metrics":{"JVMHeapMemory":0,"JVMOffHeapMemory":0,"OnHeapExecutionMemory":0,"OffHeapExecutionMemory":0,"OnHeapStorageMemory":0,"OffHeapStorageMemory":0,"OnHeapUnifiedMemory":0,"OffHeapUnifiedMemory":0,"DirectPoolMemory":0,"MappedPoolMemory":0,"ProcessTreeJVMVMemory":0,"ProcessTreeJVMRSSMemory":0,"ProcessTreePythonVMemory":0,"ProcessTreePythonRSSMemory":0,"ProcessTreeOtherVMemory":0,"ProcessTreeOtherRSSMemory":0,"MinorGCCount":0,"MinorGCTime":0,"MajorGCCount":0,"MajorGCTime":0},"Task Metrics":{"Executor Deserialize Time":2,"Executor Deserialize CPU Time":2829254,"Executor Run Time":19,"Executor CPU Time":19652237,"Peak Execution Memory":524288,"Result Size":2544,"JVM GC Time":0,"Result Serialization Time":0,"Memory Bytes Spilled":0,"Disk Bytes Spilled":0,"Shuffle Read Metrics":{"Remote Blocks Fetched":0,"Local Blocks Fetched":0,"Fetch Wait Time":0,"Remote Bytes Read":0,"Remote Bytes Read To Disk":0,"Local Bytes Read":0,"Total Records Read":0},"Shuffle Write Metrics":{"Shuffle Bytes Written":168,"Shuffle Write Time":543034,"Shuffle Records Written":1},"Input Metrics":{"Bytes Read":0,"Records Read":36},"Output Metrics":{"Bytes Written":0,"Records Written":0},"Updated Blocks":[]}} +{"Event":"SparkListenerStageCompleted","Stage Info":{"Stage ID":14,"Stage Attempt ID":0,"Stage Name":"start at StructuredKafkaWordCount.scala:86","Number of Tasks":1,"RDD Info":[{"RDD ID":90,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"239\",\"name\":\"Exchange\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[89],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":88,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"245\",\"name\":\"MapPartitions\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[87],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":84,"Name":"DataSourceRDD","Scope":"{\"id\":\"251\",\"name\":\"MicroBatchScan\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":85,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"251\",\"name\":\"MicroBatchScan\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[84],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":89,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"240\",\"name\":\"WholeStageCodegen (2)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[88],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":86,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"247\",\"name\":\"WholeStageCodegen (1)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[85],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":87,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"246\",\"name\":\"DeserializeToObject\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[86],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":1,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0}],"Parent IDs":[],"Details":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","Submission Time":1596020226077,"Completion Time":1596020226117,"Accumulables":[{"ID":1205,"Name":"internal.metrics.executorCpuTime","Value":19652237,"Internal":true,"Count Failed Values":true},{"ID":1178,"Name":"duration","Value":"13","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1169,"Name":"peak memory","Value":"262144","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1118,"Name":"shuffle bytes written","Value":"168","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1204,"Name":"internal.metrics.executorRunTime","Value":19,"Internal":true,"Count Failed Values":true},{"ID":1222,"Name":"internal.metrics.shuffle.write.writeTime","Value":543034,"Internal":true,"Count Failed Values":true},{"ID":1171,"Name":"time in aggregation build","Value":"8","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1120,"Name":"shuffle write time","Value":"543034","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1174,"Name":"peak memory","Value":"262144","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1168,"Name":"number of output rows","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1203,"Name":"internal.metrics.executorDeserializeCpuTime","Value":2829254,"Internal":true,"Count Failed Values":true},{"ID":1167,"Name":"duration","Value":"13","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1221,"Name":"internal.metrics.shuffle.write.recordsWritten","Value":1,"Internal":true,"Count Failed Values":true},{"ID":1176,"Name":"time in aggregation build","Value":"6","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1206,"Name":"internal.metrics.resultSize","Value":2544,"Internal":true,"Count Failed Values":true},{"ID":1224,"Name":"internal.metrics.input.recordsRead","Value":36,"Internal":true,"Count Failed Values":true},{"ID":1179,"Name":"number of output rows","Value":"36","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1119,"Name":"shuffle records written","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1110,"Name":"data size","Value":"128","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1173,"Name":"number of output rows","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1202,"Name":"internal.metrics.executorDeserializeTime","Value":2,"Internal":true,"Count Failed Values":true},{"ID":1211,"Name":"internal.metrics.peakExecutionMemory","Value":524288,"Internal":true,"Count Failed Values":true},{"ID":1220,"Name":"internal.metrics.shuffle.write.bytesWritten","Value":168,"Internal":true,"Count Failed Values":true}],"Resource Profile Id":0}} +{"Event":"SparkListenerStageSubmitted","Stage Info":{"Stage ID":15,"Stage Attempt ID":0,"Stage Name":"start at StructuredKafkaWordCount.scala:86","Number of Tasks":2,"RDD Info":[{"RDD ID":95,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"231\",\"name\":\"WholeStageCodegen (4)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[94],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":93,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"235\",\"name\":\"WholeStageCodegen (3)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[92],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":91,"Name":"ShuffledRowRDD","Scope":"{\"id\":\"239\",\"name\":\"Exchange\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[90],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":94,"Name":"StateStoreRDD","Scope":"{\"id\":\"234\",\"name\":\"StateStoreSave\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[93],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":92,"Name":"StateStoreRDD","Scope":"{\"id\":\"238\",\"name\":\"StateStoreRestore\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[91],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0}],"Parent IDs":[14],"Details":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","Submission Time":1596020226120,"Accumulables":[],"Resource Profile Id":0},"Properties":{"sql.streaming.queryId":"8d268dc2-bc9c-4be8-97a9-b135d2943028","spark.driver.host":"iZbp19vpr16ix621sdw476Z","spark.eventLog.enabled":"true","spark.sql.adaptive.enabled":"false","spark.job.interruptOnCancel":"true","spark.driver.port":"46309","__fetch_continuous_blocks_in_batch_enabled":"true","spark.jars":"file:/root/spark-3.1.0-SNAPSHOT-bin-hadoop2.8/./examples/jars/spark-examples_2.12-3.1.0-SNAPSHOT.jar","__is_continuous_processing":"false","spark.app.name":"StructuredKafkaWordCount","callSite.long":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","callSite.short":"start at StructuredKafkaWordCount.scala:86","spark.submit.pyFiles":"","spark.job.description":"\nid = 8d268dc2-bc9c-4be8-97a9-b135d2943028\nrunId = e225d92f-2545-48f8-87a2-9c0309580f8a\nbatch = 7","spark.executor.id":"driver","spark.sql.cbo.enabled":"false","streaming.sql.batchId":"7","spark.jobGroup.id":"e225d92f-2545-48f8-87a2-9c0309580f8a","spark.submit.deployMode":"client","spark.master":"local[*]","spark.eventLog.dir":"/tmp/spark-history","spark.sql.execution.id":"22","spark.app.id":"local-1596020211915","spark.sql.shuffle.partitions":"2"}} +{"Event":"SparkListenerTaskStart","Stage ID":15,"Stage Attempt ID":0,"Task Info":{"Task ID":22,"Index":0,"Attempt":0,"Launch Time":1596020226128,"Executor ID":"driver","Host":"iZbp19vpr16ix621sdw476Z","Locality":"PROCESS_LOCAL","Speculative":false,"Getting Result Time":0,"Finish Time":0,"Failed":false,"Killed":false,"Accumulables":[]}} +{"Event":"SparkListenerTaskStart","Stage ID":15,"Stage Attempt ID":0,"Task Info":{"Task ID":23,"Index":1,"Attempt":0,"Launch Time":1596020226129,"Executor ID":"driver","Host":"iZbp19vpr16ix621sdw476Z","Locality":"PROCESS_LOCAL","Speculative":false,"Getting Result Time":0,"Finish Time":0,"Failed":false,"Killed":false,"Accumulables":[]}} +{"Event":"SparkListenerTaskEnd","Stage ID":15,"Stage Attempt ID":0,"Task Type":"ResultTask","Task End Reason":{"Reason":"Success"},"Task Info":{"Task ID":23,"Index":1,"Attempt":0,"Launch Time":1596020226129,"Executor ID":"driver","Host":"iZbp19vpr16ix621sdw476Z","Locality":"PROCESS_LOCAL","Speculative":false,"Getting Result Time":0,"Finish Time":1596020226196,"Failed":false,"Killed":false,"Accumulables":[{"ID":1143,"Name":"duration","Update":"3","Value":"3","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1148,"Name":"avg hash probe bucket list iters","Update":"10","Value":"10","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1144,"Name":"number of output rows","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1145,"Name":"peak memory","Update":"4456448","Value":"4456448","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1147,"Name":"time in aggregation build","Update":"0","Value":"0","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1153,"Name":"time to update","Update":"21","Value":"21","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1152,"Name":"number of updated state rows","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1154,"Name":"time to remove","Update":"0","Value":"0","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1155,"Name":"time to commit changes","Update":"19","Value":"19","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1157,"Name":"estimated size of state only on current version","Update":"368","Value":"368","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1149,"Name":"number of output rows","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1158,"Name":"count of cache hit on states cache in provider","Update":"14","Value":"14","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1156,"Name":"memory used by state","Update":"784","Value":"784","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1151,"Name":"number of total state rows","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1160,"Name":"duration","Update":"21","Value":"21","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1161,"Name":"number of output rows","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1162,"Name":"peak memory","Update":"262144","Value":"262144","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1164,"Name":"time in aggregation build","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1166,"Name":"number of output rows","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1112,"Name":"local blocks read","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1116,"Name":"fetch wait time","Update":"0","Value":"0","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1115,"Name":"local bytes read","Update":"168","Value":"168","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1117,"Name":"records read","Update":"1","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1244,"Name":"internal.metrics.shuffle.read.recordsRead","Update":1,"Value":1,"Internal":true,"Count Failed Values":true},{"ID":1243,"Name":"internal.metrics.shuffle.read.fetchWaitTime","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":1242,"Name":"internal.metrics.shuffle.read.localBytesRead","Update":168,"Value":168,"Internal":true,"Count Failed Values":true},{"ID":1241,"Name":"internal.metrics.shuffle.read.remoteBytesReadToDisk","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":1240,"Name":"internal.metrics.shuffle.read.remoteBytesRead","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":1239,"Name":"internal.metrics.shuffle.read.localBlocksFetched","Update":1,"Value":1,"Internal":true,"Count Failed Values":true},{"ID":1238,"Name":"internal.metrics.shuffle.read.remoteBlocksFetched","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":1236,"Name":"internal.metrics.peakExecutionMemory","Update":4718592,"Value":4718592,"Internal":true,"Count Failed Values":true},{"ID":1231,"Name":"internal.metrics.resultSize","Update":5574,"Value":5574,"Internal":true,"Count Failed Values":true},{"ID":1230,"Name":"internal.metrics.executorCpuTime","Update":19415818,"Value":19415818,"Internal":true,"Count Failed Values":true},{"ID":1229,"Name":"internal.metrics.executorRunTime","Update":60,"Value":60,"Internal":true,"Count Failed Values":true},{"ID":1228,"Name":"internal.metrics.executorDeserializeCpuTime","Update":3845429,"Value":3845429,"Internal":true,"Count Failed Values":true},{"ID":1227,"Name":"internal.metrics.executorDeserializeTime","Update":3,"Value":3,"Internal":true,"Count Failed Values":true}]},"Task Executor Metrics":{"JVMHeapMemory":0,"JVMOffHeapMemory":0,"OnHeapExecutionMemory":0,"OffHeapExecutionMemory":0,"OnHeapStorageMemory":0,"OffHeapStorageMemory":0,"OnHeapUnifiedMemory":0,"OffHeapUnifiedMemory":0,"DirectPoolMemory":0,"MappedPoolMemory":0,"ProcessTreeJVMVMemory":0,"ProcessTreeJVMRSSMemory":0,"ProcessTreePythonVMemory":0,"ProcessTreePythonRSSMemory":0,"ProcessTreeOtherVMemory":0,"ProcessTreeOtherRSSMemory":0,"MinorGCCount":0,"MinorGCTime":0,"MajorGCCount":0,"MajorGCTime":0},"Task Metrics":{"Executor Deserialize Time":3,"Executor Deserialize CPU Time":3845429,"Executor Run Time":60,"Executor CPU Time":19415818,"Peak Execution Memory":4718592,"Result Size":5574,"JVM GC Time":0,"Result Serialization Time":0,"Memory Bytes Spilled":0,"Disk Bytes Spilled":0,"Shuffle Read Metrics":{"Remote Blocks Fetched":0,"Local Blocks Fetched":1,"Fetch Wait Time":0,"Remote Bytes Read":0,"Remote Bytes Read To Disk":0,"Local Bytes Read":168,"Total Records Read":1},"Shuffle Write Metrics":{"Shuffle Bytes Written":0,"Shuffle Write Time":0,"Shuffle Records Written":0},"Input Metrics":{"Bytes Read":0,"Records Read":0},"Output Metrics":{"Bytes Written":0,"Records Written":0},"Updated Blocks":[]}} +{"Event":"SparkListenerTaskEnd","Stage ID":15,"Stage Attempt ID":0,"Task Type":"ResultTask","Task End Reason":{"Reason":"Success"},"Task Info":{"Task ID":22,"Index":0,"Attempt":0,"Launch Time":1596020226128,"Executor ID":"driver","Host":"iZbp19vpr16ix621sdw476Z","Locality":"PROCESS_LOCAL","Speculative":false,"Getting Result Time":0,"Finish Time":1596020226204,"Failed":false,"Killed":false,"Accumulables":[{"ID":1143,"Name":"duration","Update":"2","Value":"5","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1145,"Name":"peak memory","Update":"262144","Value":"4718592","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1147,"Name":"time in aggregation build","Update":"0","Value":"0","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1153,"Name":"time to update","Update":"3","Value":"24","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1154,"Name":"time to remove","Update":"0","Value":"0","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1155,"Name":"time to commit changes","Update":"48","Value":"67","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1157,"Name":"estimated size of state only on current version","Update":"88","Value":"456","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1158,"Name":"count of cache hit on states cache in provider","Update":"14","Value":"28","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1156,"Name":"memory used by state","Update":"400","Value":"1184","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1160,"Name":"duration","Update":"3","Value":"24","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1162,"Name":"peak memory","Update":"262144","Value":"524288","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1164,"Name":"time in aggregation build","Update":"0","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1244,"Name":"internal.metrics.shuffle.read.recordsRead","Update":0,"Value":1,"Internal":true,"Count Failed Values":true},{"ID":1243,"Name":"internal.metrics.shuffle.read.fetchWaitTime","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":1242,"Name":"internal.metrics.shuffle.read.localBytesRead","Update":0,"Value":168,"Internal":true,"Count Failed Values":true},{"ID":1241,"Name":"internal.metrics.shuffle.read.remoteBytesReadToDisk","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":1240,"Name":"internal.metrics.shuffle.read.remoteBytesRead","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":1239,"Name":"internal.metrics.shuffle.read.localBlocksFetched","Update":0,"Value":1,"Internal":true,"Count Failed Values":true},{"ID":1238,"Name":"internal.metrics.shuffle.read.remoteBlocksFetched","Update":0,"Value":0,"Internal":true,"Count Failed Values":true},{"ID":1236,"Name":"internal.metrics.peakExecutionMemory","Update":524288,"Value":5242880,"Internal":true,"Count Failed Values":true},{"ID":1231,"Name":"internal.metrics.resultSize","Update":5311,"Value":10885,"Internal":true,"Count Failed Values":true},{"ID":1230,"Name":"internal.metrics.executorCpuTime","Update":14652861,"Value":34068679,"Internal":true,"Count Failed Values":true},{"ID":1229,"Name":"internal.metrics.executorRunTime","Update":65,"Value":125,"Internal":true,"Count Failed Values":true},{"ID":1228,"Name":"internal.metrics.executorDeserializeCpuTime","Update":3933877,"Value":7779306,"Internal":true,"Count Failed Values":true},{"ID":1227,"Name":"internal.metrics.executorDeserializeTime","Update":3,"Value":6,"Internal":true,"Count Failed Values":true}]},"Task Executor Metrics":{"JVMHeapMemory":0,"JVMOffHeapMemory":0,"OnHeapExecutionMemory":0,"OffHeapExecutionMemory":0,"OnHeapStorageMemory":0,"OffHeapStorageMemory":0,"OnHeapUnifiedMemory":0,"OffHeapUnifiedMemory":0,"DirectPoolMemory":0,"MappedPoolMemory":0,"ProcessTreeJVMVMemory":0,"ProcessTreeJVMRSSMemory":0,"ProcessTreePythonVMemory":0,"ProcessTreePythonRSSMemory":0,"ProcessTreeOtherVMemory":0,"ProcessTreeOtherRSSMemory":0,"MinorGCCount":0,"MinorGCTime":0,"MajorGCCount":0,"MajorGCTime":0},"Task Metrics":{"Executor Deserialize Time":3,"Executor Deserialize CPU Time":3933877,"Executor Run Time":65,"Executor CPU Time":14652861,"Peak Execution Memory":524288,"Result Size":5311,"JVM GC Time":0,"Result Serialization Time":0,"Memory Bytes Spilled":0,"Disk Bytes Spilled":0,"Shuffle Read Metrics":{"Remote Blocks Fetched":0,"Local Blocks Fetched":0,"Fetch Wait Time":0,"Remote Bytes Read":0,"Remote Bytes Read To Disk":0,"Local Bytes Read":0,"Total Records Read":0},"Shuffle Write Metrics":{"Shuffle Bytes Written":0,"Shuffle Write Time":0,"Shuffle Records Written":0},"Input Metrics":{"Bytes Read":0,"Records Read":0},"Output Metrics":{"Bytes Written":0,"Records Written":0},"Updated Blocks":[]}} +{"Event":"SparkListenerStageCompleted","Stage Info":{"Stage ID":15,"Stage Attempt ID":0,"Stage Name":"start at StructuredKafkaWordCount.scala:86","Number of Tasks":2,"RDD Info":[{"RDD ID":95,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"231\",\"name\":\"WholeStageCodegen (4)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[94],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":93,"Name":"MapPartitionsRDD","Scope":"{\"id\":\"235\",\"name\":\"WholeStageCodegen (3)\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[92],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":91,"Name":"ShuffledRowRDD","Scope":"{\"id\":\"239\",\"name\":\"Exchange\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[90],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":94,"Name":"StateStoreRDD","Scope":"{\"id\":\"234\",\"name\":\"StateStoreSave\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[93],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0},{"RDD ID":92,"Name":"StateStoreRDD","Scope":"{\"id\":\"238\",\"name\":\"StateStoreRestore\"}","Callsite":"start at StructuredKafkaWordCount.scala:86","Parent IDs":[91],"Storage Level":{"Use Disk":false,"Use Memory":false,"Deserialized":false,"Replication":1},"Barrier":false,"Number of Partitions":2,"Number of Cached Partitions":0,"Memory Size":0,"Disk Size":0}],"Parent IDs":[14],"Details":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","Submission Time":1596020226120,"Completion Time":1596020226204,"Accumulables":[{"ID":1115,"Name":"local bytes read","Value":"168","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1160,"Name":"duration","Value":"24","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1151,"Name":"number of total state rows","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1145,"Name":"peak memory","Value":"4718592","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1154,"Name":"time to remove","Value":"0","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1231,"Name":"internal.metrics.resultSize","Value":10885,"Internal":true,"Count Failed Values":true},{"ID":1240,"Name":"internal.metrics.shuffle.read.remoteBytesRead","Value":0,"Internal":true,"Count Failed Values":true},{"ID":1153,"Name":"time to update","Value":"24","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1162,"Name":"peak memory","Value":"524288","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1144,"Name":"number of output rows","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1243,"Name":"internal.metrics.shuffle.read.fetchWaitTime","Value":0,"Internal":true,"Count Failed Values":true},{"ID":1117,"Name":"records read","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1228,"Name":"internal.metrics.executorDeserializeCpuTime","Value":7779306,"Internal":true,"Count Failed Values":true},{"ID":1147,"Name":"time in aggregation build","Value":"0","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1156,"Name":"memory used by state","Value":"1184","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1236,"Name":"internal.metrics.peakExecutionMemory","Value":5242880,"Internal":true,"Count Failed Values":true},{"ID":1227,"Name":"internal.metrics.executorDeserializeTime","Value":6,"Internal":true,"Count Failed Values":true},{"ID":1158,"Name":"count of cache hit on states cache in provider","Value":"28","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1149,"Name":"number of output rows","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1239,"Name":"internal.metrics.shuffle.read.localBlocksFetched","Value":1,"Internal":true,"Count Failed Values":true},{"ID":1230,"Name":"internal.metrics.executorCpuTime","Value":34068679,"Internal":true,"Count Failed Values":true},{"ID":1152,"Name":"number of updated state rows","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1242,"Name":"internal.metrics.shuffle.read.localBytesRead","Value":168,"Internal":true,"Count Failed Values":true},{"ID":1116,"Name":"fetch wait time","Value":"0","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1161,"Name":"number of output rows","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1143,"Name":"duration","Value":"5","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1155,"Name":"time to commit changes","Value":"67","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1164,"Name":"time in aggregation build","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1241,"Name":"internal.metrics.shuffle.read.remoteBytesReadToDisk","Value":0,"Internal":true,"Count Failed Values":true},{"ID":1244,"Name":"internal.metrics.shuffle.read.recordsRead","Value":1,"Internal":true,"Count Failed Values":true},{"ID":1148,"Name":"avg hash probe bucket list iters","Value":"10","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1157,"Name":"estimated size of state only on current version","Value":"456","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1166,"Name":"number of output rows","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"},{"ID":1238,"Name":"internal.metrics.shuffle.read.remoteBlocksFetched","Value":0,"Internal":true,"Count Failed Values":true},{"ID":1229,"Name":"internal.metrics.executorRunTime","Value":125,"Internal":true,"Count Failed Values":true},{"ID":1112,"Name":"local blocks read","Value":"1","Internal":true,"Count Failed Values":true,"Metadata":"sql"}],"Resource Profile Id":0}} +{"Event":"SparkListenerJobEnd","Job ID":7,"Completion Time":1596020226204,"Job Result":{"Result":"JobSucceeded"}} +{"Event":"org.apache.spark.sql.execution.ui.SparkListenerSQLExecutionStart","executionId":23,"description":"\nid = 8d268dc2-bc9c-4be8-97a9-b135d2943028\nrunId = e225d92f-2545-48f8-87a2-9c0309580f8a\nbatch = 7","details":"org.apache.spark.sql.streaming.DataStreamWriter.start(DataStreamWriter.scala:366)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount$.main(StructuredKafkaWordCount.scala:86)\norg.apache.spark.examples.sql.streaming.StructuredKafkaWordCount.main(StructuredKafkaWordCount.scala)\nsun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\nsun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)\nsun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\njava.lang.reflect.Method.invoke(Method.java:498)\norg.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)\norg.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:934)\norg.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)\norg.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)\norg.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)\norg.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1013)\norg.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1022)\norg.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)","physicalPlanDescription":"== Physical Plan ==\nLocalTableScan (1)\n\n\n(1) LocalTableScan\nOutput [2]: [value#144, count#145]\nArguments: [value#144, count#145]\n\n","sparkPlanInfo":{"nodeName":"LocalTableScan","simpleString":"LocalTableScan [value#144, count#145]","children":[],"metadata":{},"metrics":[{"name":"number of output rows","accumulatorId":1252,"metricType":"sum"}]},"time":1596020226221} +{"Event":"org.apache.spark.sql.execution.ui.SparkListenerSQLExecutionEnd","executionId":23,"time":1596020226230} +{"Event":"org.apache.spark.sql.execution.ui.SparkListenerSQLExecutionEnd","executionId":22,"time":1596020226231} +{"Event":"org.apache.spark.sql.execution.ui.SparkListenerSQLExecutionEnd","executionId":21,"time":1596020226231} +{"Event":"org.apache.spark.sql.streaming.StreamingQueryListener$QueryProgressEvent","progress":{"id":"8d268dc2-bc9c-4be8-97a9-b135d2943028","runId":"e225d92f-2545-48f8-87a2-9c0309580f8a","name":null,"timestamp":"2020-07-29T10:57:05.916Z","batchId":7,"batchDuration":341,"durationMs":{"triggerExecution":341,"queryPlanning":24,"getBatch":0,"latestOffset":3,"addBatch":271,"walCommit":14},"eventTime":{},"stateOperators":[{"numRowsTotal":1,"numRowsUpdated":1,"memoryUsedBytes":1184,"numLateInputs":0,"customMetrics":{"stateOnCurrentVersionSizeBytes":456,"loadedMapCacheHitCount":28,"loadedMapCacheMissCount":0}}],"sources":[{"description":"KafkaV2[Subscribe[test5]]","startOffset":"{\"test5\":{\"0\":48881}}","endOffset":"{\"test5\":{\"0\":48917}}","numInputRows":36,"inputRowsPerSecond":101.69491525423729,"processedRowsPerSecond":105.57184750733137}],"sink":{"description":"org.apache.spark.sql.execution.streaming.ConsoleTable$@514ba885","numOutputRows":1},"observedMetrics":{}}} +{"Event":"SparkListenerApplicationEnd","Timestamp":1596020226301} diff --git a/sql/core/src/test/resources/sql-functions/sql-expression-schema.md b/sql/core/src/test/resources/sql-functions/sql-expression-schema.md index 855ba3f00a4e6..c681730569978 100644 --- a/sql/core/src/test/resources/sql-functions/sql-expression-schema.md +++ b/sql/core/src/test/resources/sql-functions/sql-expression-schema.md @@ -1,8 +1,8 @@ ## Summary - - Number of queries: 339 - - Number of expressions that missing example: 34 - - Expressions missing examples: and,bigint,binary,boolean,date,decimal,double,float,int,smallint,string,timestamp,tinyint,struct,cume_dist,dense_rank,input_file_block_length,input_file_block_start,input_file_name,lag,lead,monotonically_increasing_id,ntile,!,not,or,percent_rank,rank,row_number,spark_partition_id,version,window,positive,count_min_sketch + - Number of queries: 348 + - Number of expressions that missing example: 13 + - Expressions missing examples: bigint,binary,boolean,date,decimal,double,float,int,smallint,string,timestamp,tinyint,window ## Schema of Built-in Functions | Class name | Function name or alias | Query example | Output schema | | ---------- | ---------------------- | ------------- | ------------- | @@ -11,7 +11,7 @@ | org.apache.spark.sql.catalyst.expressions.Acosh | acosh | SELECT acosh(1) | struct | | org.apache.spark.sql.catalyst.expressions.Add | + | SELECT 1 + 2 | struct<(1 + 2):int> | | org.apache.spark.sql.catalyst.expressions.AddMonths | add_months | SELECT add_months('2016-08-31', 1) | struct | -| org.apache.spark.sql.catalyst.expressions.And | and | N/A | N/A | +| org.apache.spark.sql.catalyst.expressions.And | and | SELECT true and true | struct<(true AND true):boolean> | | org.apache.spark.sql.catalyst.expressions.ArrayAggregate | aggregate | SELECT aggregate(array(1, 2, 3), 0, (acc, x) -> acc + x) | struct | | org.apache.spark.sql.catalyst.expressions.ArrayContains | array_contains | SELECT array_contains(array(1, 2, 3), 2) | struct | | org.apache.spark.sql.catalyst.expressions.ArrayDistinct | array_distinct | SELECT array_distinct(array(1, 2, 3, null, 3)) | struct> | @@ -34,7 +34,7 @@ | org.apache.spark.sql.catalyst.expressions.Ascii | ascii | SELECT ascii('222') | struct | | org.apache.spark.sql.catalyst.expressions.Asin | asin | SELECT asin(0) | struct | | org.apache.spark.sql.catalyst.expressions.Asinh | asinh | SELECT asinh(0) | struct | -| org.apache.spark.sql.catalyst.expressions.AssertTrue | assert_true | SELECT assert_true(0 < 1) | struct | +| org.apache.spark.sql.catalyst.expressions.AssertTrue | assert_true | SELECT assert_true(0 < 1) | struct | | org.apache.spark.sql.catalyst.expressions.Atan | atan | SELECT atan(0) | struct | | org.apache.spark.sql.catalyst.expressions.Atan2 | atan2 | SELECT atan2(0, 0) | struct | | org.apache.spark.sql.catalyst.expressions.Atanh | atanh | SELECT atanh(0) | struct | @@ -79,17 +79,19 @@ | org.apache.spark.sql.catalyst.expressions.CreateArray | array | SELECT array(1, 2, 3) | struct> | | org.apache.spark.sql.catalyst.expressions.CreateMap | map | SELECT map(1.0, '2', 3.0, '4') | struct> | | org.apache.spark.sql.catalyst.expressions.CreateNamedStruct | named_struct | SELECT named_struct("a", 1, "b", 2, "c", 3) | struct> | -| org.apache.spark.sql.catalyst.expressions.CreateNamedStruct | struct | N/A | N/A | +| org.apache.spark.sql.catalyst.expressions.CreateNamedStruct | struct | SELECT struct(1, 2, 3) | struct> | | org.apache.spark.sql.catalyst.expressions.CsvToStructs | from_csv | SELECT from_csv('1, 0.8', 'a INT, b DOUBLE') | struct> | | org.apache.spark.sql.catalyst.expressions.Cube | cube | SELECT name, age, count(*) FROM VALUES (2, 'Alice'), (5, 'Bob') people(age, name) GROUP BY cube(name, age) | struct | -| org.apache.spark.sql.catalyst.expressions.CumeDist | cume_dist | N/A | N/A | +| org.apache.spark.sql.catalyst.expressions.CumeDist | cume_dist | SELECT a, b, cume_dist() OVER (PARTITION BY a ORDER BY b) FROM VALUES ('A1', 2), ('A1', 1), ('A2', 3), ('A1', 1) tab(a, b) | struct | | org.apache.spark.sql.catalyst.expressions.CurrentCatalog | current_catalog | SELECT current_catalog() | struct | | org.apache.spark.sql.catalyst.expressions.CurrentDatabase | current_database | SELECT current_database() | struct | | org.apache.spark.sql.catalyst.expressions.CurrentDate | current_date | SELECT current_date() | struct | +| org.apache.spark.sql.catalyst.expressions.CurrentTimeZone | current_timezone | SELECT current_timezone() | struct | | org.apache.spark.sql.catalyst.expressions.CurrentTimestamp | current_timestamp | SELECT current_timestamp() | struct | | org.apache.spark.sql.catalyst.expressions.DateAdd | date_add | SELECT date_add('2016-07-30', 1) | struct | | org.apache.spark.sql.catalyst.expressions.DateDiff | datediff | SELECT datediff('2009-07-31', '2009-07-30') | struct | | org.apache.spark.sql.catalyst.expressions.DateFormatClass | date_format | SELECT date_format('2016-04-08', 'y') | struct | +| org.apache.spark.sql.catalyst.expressions.DateFromUnixDate | date_from_unix_date | SELECT date_from_unix_date(1) | struct | | org.apache.spark.sql.catalyst.expressions.DatePart | date_part | SELECT date_part('YEAR', TIMESTAMP '2019-08-12 01:00:00.123456') | struct | | org.apache.spark.sql.catalyst.expressions.DateSub | date_sub | SELECT date_sub('2016-07-30', 1) | struct | | org.apache.spark.sql.catalyst.expressions.DayOfMonth | day | SELECT day('2009-07-30') | struct | @@ -97,7 +99,7 @@ | org.apache.spark.sql.catalyst.expressions.DayOfWeek | dayofweek | SELECT dayofweek('2009-07-30') | struct | | org.apache.spark.sql.catalyst.expressions.DayOfYear | dayofyear | SELECT dayofyear('2016-04-09') | struct | | org.apache.spark.sql.catalyst.expressions.Decode | decode | SELECT decode(encode('abc', 'utf-8'), 'utf-8') | struct | -| org.apache.spark.sql.catalyst.expressions.DenseRank | dense_rank | N/A | N/A | +| org.apache.spark.sql.catalyst.expressions.DenseRank | dense_rank | SELECT a, b, dense_rank(b) OVER (PARTITION BY a ORDER BY b) FROM VALUES ('A1', 2), ('A1', 1), ('A2', 3), ('A1', 1) tab(a, b) | struct | | org.apache.spark.sql.catalyst.expressions.Divide | / | SELECT 3 / 2 | struct<(CAST(3 AS DOUBLE) / CAST(2 AS DOUBLE)):double> | | org.apache.spark.sql.catalyst.expressions.ElementAt | element_at | SELECT element_at(array(1, 2, 3), 2) | struct | | org.apache.spark.sql.catalyst.expressions.Elt | elt | SELECT elt(1, 'scala', 'java') | struct | @@ -135,9 +137,9 @@ | org.apache.spark.sql.catalyst.expressions.InitCap | initcap | SELECT initcap('sPark sql') | struct | | org.apache.spark.sql.catalyst.expressions.Inline | inline | SELECT inline(array(struct(1, 'a'), struct(2, 'b'))) | struct | | org.apache.spark.sql.catalyst.expressions.Inline | inline_outer | SELECT inline_outer(array(struct(1, 'a'), struct(2, 'b'))) | struct | -| org.apache.spark.sql.catalyst.expressions.InputFileBlockLength | input_file_block_length | N/A | N/A | -| org.apache.spark.sql.catalyst.expressions.InputFileBlockStart | input_file_block_start | N/A | N/A | -| org.apache.spark.sql.catalyst.expressions.InputFileName | input_file_name | N/A | N/A | +| org.apache.spark.sql.catalyst.expressions.InputFileBlockLength | input_file_block_length | SELECT input_file_block_length() | struct | +| org.apache.spark.sql.catalyst.expressions.InputFileBlockStart | input_file_block_start | SELECT input_file_block_start() | struct | +| org.apache.spark.sql.catalyst.expressions.InputFileName | input_file_name | SELECT input_file_name() | struct | | org.apache.spark.sql.catalyst.expressions.IntegralDivide | div | SELECT 3 div 2 | struct<(CAST(3 AS BIGINT) div CAST(2 AS BIGINT)):bigint> | | org.apache.spark.sql.catalyst.expressions.IsNaN | isnan | SELECT isnan(cast('NaN' as double)) | struct | | org.apache.spark.sql.catalyst.expressions.IsNotNull | isnotnull | SELECT isnotnull(1) | struct<(1 IS NOT NULL):boolean> | @@ -145,9 +147,9 @@ | org.apache.spark.sql.catalyst.expressions.JsonObjectKeys | json_object_keys | SELECT json_object_keys('{}') | struct> | | org.apache.spark.sql.catalyst.expressions.JsonToStructs | from_json | SELECT from_json('{"a":1, "b":0.8}', 'a INT, b DOUBLE') | struct> | | org.apache.spark.sql.catalyst.expressions.JsonTuple | json_tuple | SELECT json_tuple('{"a":1, "b":2}', 'a', 'b') | struct | -| org.apache.spark.sql.catalyst.expressions.Lag | lag | N/A | N/A | +| org.apache.spark.sql.catalyst.expressions.Lag | lag | SELECT a, b, lag(b) OVER (PARTITION BY a ORDER BY b) FROM VALUES ('A1', 2), ('A1', 1), ('A2', 3), ('A1', 1) tab(a, b) | struct | | org.apache.spark.sql.catalyst.expressions.LastDay | last_day | SELECT last_day('2009-01-12') | struct | -| org.apache.spark.sql.catalyst.expressions.Lead | lead | N/A | N/A | +| org.apache.spark.sql.catalyst.expressions.Lead | lead | SELECT a, b, lead(b) OVER (PARTITION BY a ORDER BY b) FROM VALUES ('A1', 2), ('A1', 1), ('A2', 3), ('A1', 1) tab(a, b) | struct | | org.apache.spark.sql.catalyst.expressions.Least | least | SELECT least(10, 9, 2, 4, 3) | struct | | org.apache.spark.sql.catalyst.expressions.Left | left | SELECT left('Spark SQL', 3) | struct | | org.apache.spark.sql.catalyst.expressions.Length | char_length | SELECT char_length('Spark SQL ') | struct | @@ -180,27 +182,28 @@ | org.apache.spark.sql.catalyst.expressions.MicrosToTimestamp | timestamp_micros | SELECT timestamp_micros(1230219000123123) | struct | | org.apache.spark.sql.catalyst.expressions.MillisToTimestamp | timestamp_millis | SELECT timestamp_millis(1230219000123) | struct | | org.apache.spark.sql.catalyst.expressions.Minute | minute | SELECT minute('2009-07-30 12:58:59') | struct | -| org.apache.spark.sql.catalyst.expressions.MonotonicallyIncreasingID | monotonically_increasing_id | N/A | N/A | +| org.apache.spark.sql.catalyst.expressions.MonotonicallyIncreasingID | monotonically_increasing_id | SELECT monotonically_increasing_id() | struct | | org.apache.spark.sql.catalyst.expressions.Month | month | SELECT month('2016-07-30') | struct | | org.apache.spark.sql.catalyst.expressions.MonthsBetween | months_between | SELECT months_between('1997-02-28 10:30:00', '1996-10-30') | struct | | org.apache.spark.sql.catalyst.expressions.Multiply | * | SELECT 2 * 3 | struct<(2 * 3):int> | | org.apache.spark.sql.catalyst.expressions.Murmur3Hash | hash | SELECT hash('Spark', array(123), 2) | struct | -| org.apache.spark.sql.catalyst.expressions.NTile | ntile | N/A | N/A | +| org.apache.spark.sql.catalyst.expressions.NTile | ntile | SELECT a, b, ntile(2) OVER (PARTITION BY a ORDER BY b) FROM VALUES ('A1', 2), ('A1', 1), ('A2', 3), ('A1', 1) tab(a, b) | struct | | org.apache.spark.sql.catalyst.expressions.NaNvl | nanvl | SELECT nanvl(cast('NaN' as double), 123) | struct | | org.apache.spark.sql.catalyst.expressions.NextDay | next_day | SELECT next_day('2015-01-14', 'TU') | struct | -| org.apache.spark.sql.catalyst.expressions.Not | ! | N/A | N/A | -| org.apache.spark.sql.catalyst.expressions.Not | not | N/A | N/A | +| org.apache.spark.sql.catalyst.expressions.Not | ! | SELECT ! true | struct<(NOT true):boolean> | +| org.apache.spark.sql.catalyst.expressions.Not | not | SELECT not true | struct<(NOT true):boolean> | | org.apache.spark.sql.catalyst.expressions.Now | now | SELECT now() | struct | +| org.apache.spark.sql.catalyst.expressions.NthValue | nth_value | SELECT a, b, nth_value(b, 2) OVER (PARTITION BY a ORDER BY b) FROM VALUES ('A1', 2), ('A1', 1), ('A2', 3), ('A1', 1) tab(a, b) | struct | | org.apache.spark.sql.catalyst.expressions.NullIf | nullif | SELECT nullif(2, 2) | struct | | org.apache.spark.sql.catalyst.expressions.Nvl | nvl | SELECT nvl(NULL, array('2')) | struct> | | org.apache.spark.sql.catalyst.expressions.Nvl2 | nvl2 | SELECT nvl2(NULL, 2, 1) | struct | | org.apache.spark.sql.catalyst.expressions.OctetLength | octet_length | SELECT octet_length('Spark SQL') | struct | -| org.apache.spark.sql.catalyst.expressions.Or | or | N/A | N/A | +| org.apache.spark.sql.catalyst.expressions.Or | or | SELECT true or false | struct<(true OR false):boolean> | | org.apache.spark.sql.catalyst.expressions.Overlay | overlay | SELECT overlay('Spark SQL' PLACING '_' FROM 6) | struct | | org.apache.spark.sql.catalyst.expressions.ParseToDate | to_date | SELECT to_date('2009-07-30 04:17:52') | struct | | org.apache.spark.sql.catalyst.expressions.ParseToTimestamp | to_timestamp | SELECT to_timestamp('2016-12-31 00:12:00') | struct | | org.apache.spark.sql.catalyst.expressions.ParseUrl | parse_url | SELECT parse_url('http://spark.apache.org/path?query=1', 'HOST') | struct | -| org.apache.spark.sql.catalyst.expressions.PercentRank | percent_rank | N/A | N/A | +| org.apache.spark.sql.catalyst.expressions.PercentRank | percent_rank | SELECT a, b, percent_rank(b) OVER (PARTITION BY a ORDER BY b) FROM VALUES ('A1', 2), ('A1', 1), ('A2', 3), ('A1', 1) tab(a, b) | struct | | org.apache.spark.sql.catalyst.expressions.Pi | pi | SELECT pi() | struct | | org.apache.spark.sql.catalyst.expressions.Pmod | pmod | SELECT pmod(10, 3) | struct | | org.apache.spark.sql.catalyst.expressions.PosExplode | posexplode | SELECT posexplode(array(10,20)) | struct | @@ -208,14 +211,16 @@ | org.apache.spark.sql.catalyst.expressions.Pow | pow | SELECT pow(2, 3) | struct | | org.apache.spark.sql.catalyst.expressions.Pow | power | SELECT power(2, 3) | struct | | org.apache.spark.sql.catalyst.expressions.Quarter | quarter | SELECT quarter('2016-08-31') | struct | -| org.apache.spark.sql.catalyst.expressions.RLike | rlike | SELECT '%SystemDrive%\Users\John' rlike '%SystemDrive%\\Users.*' | struct<%SystemDrive%UsersJohn RLIKE %SystemDrive%\Users.*:boolean> | +| org.apache.spark.sql.catalyst.expressions.RLike | regexp_like | SELECT regexp_like('%SystemDrive%\Users\John', '%SystemDrive%\\Users.*') | struct | +| org.apache.spark.sql.catalyst.expressions.RLike | rlike | SELECT rlike('%SystemDrive%\Users\John', '%SystemDrive%\\Users.*') | struct | +| org.apache.spark.sql.catalyst.expressions.RaiseError | raise_error | SELECT raise_error('custom error message') | struct | | org.apache.spark.sql.catalyst.expressions.Rand | rand | SELECT rand() | struct | | org.apache.spark.sql.catalyst.expressions.Rand | random | SELECT random() | struct | | org.apache.spark.sql.catalyst.expressions.Randn | randn | SELECT randn() | struct | -| org.apache.spark.sql.catalyst.expressions.Rank | rank | N/A | N/A | +| org.apache.spark.sql.catalyst.expressions.Rank | rank | SELECT a, b, rank(b) OVER (PARTITION BY a ORDER BY b) FROM VALUES ('A1', 2), ('A1', 1), ('A2', 3), ('A1', 1) tab(a, b) | struct | | org.apache.spark.sql.catalyst.expressions.RegExpExtract | regexp_extract | SELECT regexp_extract('100-200', '(\\d+)-(\\d+)', 1) | struct | | org.apache.spark.sql.catalyst.expressions.RegExpExtractAll | regexp_extract_all | SELECT regexp_extract_all('100-200, 300-400', '(\\d+)-(\\d+)', 1) | struct> | -| org.apache.spark.sql.catalyst.expressions.RegExpReplace | regexp_replace | SELECT regexp_replace('100-200', '(\\d+)', 'num') | struct | +| org.apache.spark.sql.catalyst.expressions.RegExpReplace | regexp_replace | SELECT regexp_replace('100-200', '(\\d+)', 'num') | struct | | org.apache.spark.sql.catalyst.expressions.Remainder | % | SELECT 2 % 1.8 | struct<(CAST(CAST(2 AS DECIMAL(1,0)) AS DECIMAL(2,1)) % CAST(1.8 AS DECIMAL(2,1))):decimal(2,1)> | | org.apache.spark.sql.catalyst.expressions.Remainder | mod | SELECT 2 % 1.8 | struct<(CAST(CAST(2 AS DECIMAL(1,0)) AS DECIMAL(2,1)) % CAST(1.8 AS DECIMAL(2,1))):decimal(2,1)> | | org.apache.spark.sql.catalyst.expressions.Reverse | reverse | SELECT reverse('Spark SQL') | struct | @@ -223,7 +228,7 @@ | org.apache.spark.sql.catalyst.expressions.Rint | rint | SELECT rint(12.3456) | struct | | org.apache.spark.sql.catalyst.expressions.Rollup | rollup | SELECT name, age, count(*) FROM VALUES (2, 'Alice'), (5, 'Bob') people(age, name) GROUP BY rollup(name, age) | struct | | org.apache.spark.sql.catalyst.expressions.Round | round | SELECT round(2.5, 0) | struct | -| org.apache.spark.sql.catalyst.expressions.RowNumber | row_number | N/A | N/A | +| org.apache.spark.sql.catalyst.expressions.RowNumber | row_number | SELECT a, b, row_number() OVER (PARTITION BY a ORDER BY b) FROM VALUES ('A1', 2), ('A1', 1), ('A2', 3), ('A1', 1) tab(a, b) | struct | | org.apache.spark.sql.catalyst.expressions.SchemaOfCsv | schema_of_csv | SELECT schema_of_csv('1,abc') | struct | | org.apache.spark.sql.catalyst.expressions.SchemaOfJson | schema_of_json | SELECT schema_of_json('[{"col":0}]') | struct | | org.apache.spark.sql.catalyst.expressions.Second | second | SELECT second('2009-07-30 12:58:59') | struct | @@ -246,8 +251,8 @@ | org.apache.spark.sql.catalyst.expressions.Slice | slice | SELECT slice(array(1, 2, 3, 4), 2, 2) | struct> | | org.apache.spark.sql.catalyst.expressions.SortArray | sort_array | SELECT sort_array(array('b', 'd', null, 'c', 'a'), true) | struct> | | org.apache.spark.sql.catalyst.expressions.SoundEx | soundex | SELECT soundex('Miller') | struct | -| org.apache.spark.sql.catalyst.expressions.SparkPartitionID | spark_partition_id | N/A | N/A | -| org.apache.spark.sql.catalyst.expressions.SparkVersion | version | N/A | N/A | +| org.apache.spark.sql.catalyst.expressions.SparkPartitionID | spark_partition_id | SELECT spark_partition_id() | struct | +| org.apache.spark.sql.catalyst.expressions.SparkVersion | version | SELECT version() | struct | | org.apache.spark.sql.catalyst.expressions.Sqrt | sqrt | SELECT sqrt(4) | struct | | org.apache.spark.sql.catalyst.expressions.Stack | stack | SELECT stack(2, 1, 2, 3) | struct | | org.apache.spark.sql.catalyst.expressions.StringInstr | instr | SELECT instr('SparkSQL', 'SQL') | struct | @@ -284,8 +289,12 @@ | org.apache.spark.sql.catalyst.expressions.TypeOf | typeof | SELECT typeof(1) | struct | | org.apache.spark.sql.catalyst.expressions.UnBase64 | unbase64 | SELECT unbase64('U3BhcmsgU1FM') | struct | | org.apache.spark.sql.catalyst.expressions.UnaryMinus | negative | SELECT negative(1) | struct | -| org.apache.spark.sql.catalyst.expressions.UnaryPositive | positive | N/A | N/A | +| org.apache.spark.sql.catalyst.expressions.UnaryPositive | positive | SELECT positive(1) | struct<(+ 1):int> | | org.apache.spark.sql.catalyst.expressions.Unhex | unhex | SELECT decode(unhex('537061726B2053514C'), 'UTF-8') | struct | +| org.apache.spark.sql.catalyst.expressions.UnixDate | unix_date | SELECT unix_date(DATE("1970-01-02")) | struct | +| org.apache.spark.sql.catalyst.expressions.UnixMicros | unix_micros | SELECT unix_micros(TIMESTAMP('1970-01-01 00:00:01Z')) | struct | +| org.apache.spark.sql.catalyst.expressions.UnixMillis | unix_millis | SELECT unix_millis(TIMESTAMP('1970-01-01 00:00:01Z')) | struct | +| org.apache.spark.sql.catalyst.expressions.UnixSeconds | unix_seconds | SELECT unix_seconds(TIMESTAMP('1970-01-01 00:00:01Z')) | struct | | org.apache.spark.sql.catalyst.expressions.UnixTimestamp | unix_timestamp | SELECT unix_timestamp() | struct | | org.apache.spark.sql.catalyst.expressions.Upper | ucase | SELECT ucase('SparkSql') | struct | | org.apache.spark.sql.catalyst.expressions.Upper | upper | SELECT upper('SparkSql') | struct | @@ -296,8 +305,8 @@ | org.apache.spark.sql.catalyst.expressions.XxHash64 | xxhash64 | SELECT xxhash64('Spark', array(123), 2) | struct | | org.apache.spark.sql.catalyst.expressions.Year | year | SELECT year('2016-07-30') | struct | | org.apache.spark.sql.catalyst.expressions.ZipWith | zip_with | SELECT zip_with(array(1, 2, 3), array('a', 'b', 'c'), (x, y) -> (y, x)) | struct>> | -| org.apache.spark.sql.catalyst.expressions.aggregate.ApproximatePercentile | approx_percentile | SELECT approx_percentile(10.0, array(0.5, 0.4, 0.1), 100) | struct> | -| org.apache.spark.sql.catalyst.expressions.aggregate.ApproximatePercentile | percentile_approx | SELECT percentile_approx(10.0, array(0.5, 0.4, 0.1), 100) | struct> | +| org.apache.spark.sql.catalyst.expressions.aggregate.ApproximatePercentile | approx_percentile | SELECT approx_percentile(col, array(0.5, 0.4, 0.1), 100) FROM VALUES (0), (1), (2), (10) AS tab(col) | struct> | +| org.apache.spark.sql.catalyst.expressions.aggregate.ApproximatePercentile | percentile_approx | SELECT percentile_approx(col, array(0.5, 0.4, 0.1), 100) FROM VALUES (0), (1), (2), (10) AS tab(col) | struct> | | org.apache.spark.sql.catalyst.expressions.aggregate.Average | avg | SELECT avg(col) FROM VALUES (1), (2), (3) AS tab(col) | struct | | org.apache.spark.sql.catalyst.expressions.aggregate.Average | mean | SELECT mean(col) FROM VALUES (1), (2), (3) AS tab(col) | struct | | org.apache.spark.sql.catalyst.expressions.aggregate.BitAndAgg | bit_and | SELECT bit_and(col) FROM VALUES (3), (5) AS tab(col) | struct | @@ -313,7 +322,7 @@ | org.apache.spark.sql.catalyst.expressions.aggregate.Corr | corr | SELECT corr(c1, c2) FROM VALUES (3, 2), (3, 3), (6, 4) as tab(c1, c2) | struct | | org.apache.spark.sql.catalyst.expressions.aggregate.Count | count | SELECT count(*) FROM VALUES (NULL), (5), (5), (20) AS tab(col) | struct | | org.apache.spark.sql.catalyst.expressions.aggregate.CountIf | count_if | SELECT count_if(col % 2 = 0) FROM VALUES (NULL), (0), (1), (2), (3) AS tab(col) | struct | -| org.apache.spark.sql.catalyst.expressions.aggregate.CountMinSketchAgg | count_min_sketch | N/A | N/A | +| org.apache.spark.sql.catalyst.expressions.aggregate.CountMinSketchAgg | count_min_sketch | SELECT hex(count_min_sketch(col, 0.5d, 0.5d, 1)) FROM VALUES (1), (2), (1) AS tab(col) | struct | | org.apache.spark.sql.catalyst.expressions.aggregate.CovPopulation | covar_pop | SELECT covar_pop(c1, c2) FROM VALUES (1,1), (2,2), (3,3) AS tab(c1, c2) | struct | | org.apache.spark.sql.catalyst.expressions.aggregate.CovSample | covar_samp | SELECT covar_samp(c1, c2) FROM VALUES (1,1), (2,2), (3,3) AS tab(c1, c2) | struct | | org.apache.spark.sql.catalyst.expressions.aggregate.First | first | SELECT first(col) FROM VALUES (10), (5), (20) AS tab(col) | struct | diff --git a/sql/core/src/test/resources/sql-tests/inputs/ansi/array.sql b/sql/core/src/test/resources/sql-tests/inputs/ansi/array.sql new file mode 100644 index 0000000000000..662756cbfb0b0 --- /dev/null +++ b/sql/core/src/test/resources/sql-tests/inputs/ansi/array.sql @@ -0,0 +1 @@ +--IMPORT array.sql diff --git a/sql/core/src/test/resources/sql-tests/inputs/ansi/decimalArithmeticOperations.sql b/sql/core/src/test/resources/sql-tests/inputs/ansi/decimalArithmeticOperations.sql index d190f38345d6b..d843847e6a149 100644 --- a/sql/core/src/test/resources/sql-tests/inputs/ansi/decimalArithmeticOperations.sql +++ b/sql/core/src/test/resources/sql-tests/inputs/ansi/decimalArithmeticOperations.sql @@ -1,6 +1,6 @@ -- SPARK-23179: SQL ANSI 2011 states that in case of overflow during arithmetic operations, -- an exception should be thrown instead of returning NULL. --- This is what most of the SQL DBs do (eg. SQLServer, DB2). +-- This is what most of the SQL DBs do (e.g. SQLServer, DB2). -- tests for decimals handling in operations create table decimals_test(id int, a decimal(38,18), b decimal(38,18)) using parquet; diff --git a/sql/core/src/test/resources/sql-tests/inputs/ansi/map.sql b/sql/core/src/test/resources/sql-tests/inputs/ansi/map.sql new file mode 100644 index 0000000000000..23e5b9562973b --- /dev/null +++ b/sql/core/src/test/resources/sql-tests/inputs/ansi/map.sql @@ -0,0 +1 @@ +--IMPORT map.sql diff --git a/sql/core/src/test/resources/sql-tests/inputs/ansi/parse-schema-string.sql b/sql/core/src/test/resources/sql-tests/inputs/ansi/parse-schema-string.sql new file mode 100644 index 0000000000000..42775102e650e --- /dev/null +++ b/sql/core/src/test/resources/sql-tests/inputs/ansi/parse-schema-string.sql @@ -0,0 +1 @@ +--IMPORT parse-schema-string.sql diff --git a/sql/core/src/test/resources/sql-tests/inputs/array.sql b/sql/core/src/test/resources/sql-tests/inputs/array.sql index 984321ab795fc..f73b653659eb4 100644 --- a/sql/core/src/test/resources/sql-tests/inputs/array.sql +++ b/sql/core/src/test/resources/sql-tests/inputs/array.sql @@ -90,3 +90,15 @@ select size(date_array), size(timestamp_array) from primitive_arrays; + +-- index out of range for array elements +select element_at(array(1, 2, 3), 5); +select element_at(array(1, 2, 3), -5); +select element_at(array(1, 2, 3), 0); + +select elt(4, '123', '456'); +select elt(0, '123', '456'); +select elt(-1, '123', '456'); + +select array(1, 2, 3)[5]; +select array(1, 2, 3)[-1]; diff --git a/sql/core/src/test/resources/sql-tests/inputs/charvarchar.sql b/sql/core/src/test/resources/sql-tests/inputs/charvarchar.sql new file mode 100644 index 0000000000000..dbdb8ccee738c --- /dev/null +++ b/sql/core/src/test/resources/sql-tests/inputs/charvarchar.sql @@ -0,0 +1,62 @@ +create table char_tbl(c char(5), v varchar(6)) using parquet; +desc formatted char_tbl; +desc formatted char_tbl c; +show create table char_tbl; + +create table char_tbl2 using parquet as select * from char_tbl; +show create table char_tbl2; +desc formatted char_tbl2; +desc formatted char_tbl2 c; + +create table char_tbl3 like char_tbl; +desc formatted char_tbl3; +desc formatted char_tbl3 c; +show create table char_tbl3; + +create view char_view as select * from char_tbl; +desc formatted char_view; +desc formatted char_view c; +show create table char_view; + +alter table char_tbl rename to char_tbl1; +desc formatted char_tbl1; + +alter table char_tbl1 change column c type char(6); +alter table char_tbl1 change column c type char(5); +desc formatted char_tbl1; + +alter table char_tbl1 add columns (d char(5)); +desc formatted char_tbl1; + +alter view char_view as select * from char_tbl2; +desc formatted char_view; + +alter table char_tbl1 SET TBLPROPERTIES('yes'='no'); +desc formatted char_tbl1; + +alter view char_view SET TBLPROPERTIES('yes'='no'); +desc formatted char_view; + +alter table char_tbl1 UNSET TBLPROPERTIES('yes'); +desc formatted char_tbl1; + +alter view char_view UNSET TBLPROPERTIES('yes'); +desc formatted char_view; + +alter table char_tbl1 SET SERDEPROPERTIES('yes'='no'); +desc formatted char_tbl1; + +create table char_part(c1 char(5), c2 char(2), v1 varchar(6), v2 varchar(2)) using parquet partitioned by (v2, c2); +desc formatted char_part; + +alter table char_part add partition (v2='ke', c2='nt') location 'loc1'; +desc formatted char_part; + +alter table char_part partition (v2='ke') rename to partition (v2='nt'); +desc formatted char_part; + +alter table char_part partition (v2='ke', c2='nt') set location 'loc2'; +desc formatted char_part; + +MSCK REPAIR TABLE char_part; +desc formatted char_part; diff --git a/sql/core/src/test/resources/sql-tests/inputs/count.sql b/sql/core/src/test/resources/sql-tests/inputs/count.sql index 203f04c589373..fc0d66258ea29 100644 --- a/sql/core/src/test/resources/sql-tests/inputs/count.sql +++ b/sql/core/src/test/resources/sql-tests/inputs/count.sql @@ -35,3 +35,6 @@ SELECT count(DISTINCT a), count(DISTINCT 3,2) FROM testData; SELECT count(DISTINCT a), count(DISTINCT 2), count(DISTINCT 2,3) FROM testData; SELECT count(DISTINCT a), count(DISTINCT 2), count(DISTINCT 3,2) FROM testData; SELECT count(distinct 0.8), percentile_approx(distinct a, 0.8) FROM testData; + +-- count without expressions +SELECT count() FROM testData; diff --git a/sql/core/src/test/resources/sql-tests/inputs/datetime.sql b/sql/core/src/test/resources/sql-tests/inputs/datetime.sql index 0445c7864946c..0493d8653c01f 100644 --- a/sql/core/src/test/resources/sql-tests/inputs/datetime.sql +++ b/sql/core/src/test/resources/sql-tests/inputs/datetime.sql @@ -14,7 +14,14 @@ select TIMESTAMP_MILLIS(-92233720368547758); select TIMESTAMP_SECONDS(0.1234567); -- truncation is OK for float/double select TIMESTAMP_SECONDS(0.1234567d), TIMESTAMP_SECONDS(FLOAT(0.1234567)); - +-- UNIX_SECONDS, UNIX_MILLISECONDS and UNIX_MICROSECONDS +select UNIX_SECONDS(TIMESTAMP('2020-12-01 14:30:08Z')), UNIX_SECONDS(TIMESTAMP('2020-12-01 14:30:08.999999Z')), UNIX_SECONDS(null); +select UNIX_MILLIS(TIMESTAMP('2020-12-01 14:30:08Z')), UNIX_MILLIS(TIMESTAMP('2020-12-01 14:30:08.999999Z')), UNIX_MILLIS(null); +select UNIX_MICROS(TIMESTAMP('2020-12-01 14:30:08Z')), UNIX_MICROS(TIMESTAMP('2020-12-01 14:30:08.999999Z')), UNIX_MICROS(null); +-- DATE_FROM_UNIX_DATE +select DATE_FROM_UNIX_DATE(0), DATE_FROM_UNIX_DATE(1000), DATE_FROM_UNIX_DATE(null); +-- UNIX_DATE +select UNIX_DATE(DATE('1970-01-01')), UNIX_DATE(DATE('2020-12-04')), UNIX_DATE(null); -- [SPARK-16836] current_date and current_timestamp literals select current_date = current_date(), current_timestamp = current_timestamp(); @@ -149,7 +156,26 @@ select to_timestamp('2019-10-06 A', 'yyyy-MM-dd GGGGG'); select to_timestamp('22 05 2020 Friday', 'dd MM yyyy EEEEEE'); select to_timestamp('22 05 2020 Friday', 'dd MM yyyy EEEEE'); select unix_timestamp('22 05 2020 Friday', 'dd MM yyyy EEEEE'); -select from_json('{"time":"26/October/2015"}', 'time Timestamp', map('timestampFormat', 'dd/MMMMM/yyyy')); -select from_json('{"date":"26/October/2015"}', 'date Date', map('dateFormat', 'dd/MMMMM/yyyy')); -select from_csv('26/October/2015', 'time Timestamp', map('timestampFormat', 'dd/MMMMM/yyyy')); -select from_csv('26/October/2015', 'date Date', map('dateFormat', 'dd/MMMMM/yyyy')); +select from_json('{"t":"26/October/2015"}', 't Timestamp', map('timestampFormat', 'dd/MMMMM/yyyy')); +select from_json('{"d":"26/October/2015"}', 'd Date', map('dateFormat', 'dd/MMMMM/yyyy')); +select from_csv('26/October/2015', 't Timestamp', map('timestampFormat', 'dd/MMMMM/yyyy')); +select from_csv('26/October/2015', 'd Date', map('dateFormat', 'dd/MMMMM/yyyy')); + +-- Datetime types parse error +select to_date("2020-01-27T20:06:11.847", "yyyy-MM-dd HH:mm:ss.SSS"); +select to_date("Unparseable", "yyyy-MM-dd HH:mm:ss.SSS"); +select to_timestamp("2020-01-27T20:06:11.847", "yyyy-MM-dd HH:mm:ss.SSS"); +select to_timestamp("Unparseable", "yyyy-MM-dd HH:mm:ss.SSS"); +select unix_timestamp("2020-01-27T20:06:11.847", "yyyy-MM-dd HH:mm:ss.SSS"); +select unix_timestamp("Unparseable", "yyyy-MM-dd HH:mm:ss.SSS"); +select to_unix_timestamp("2020-01-27T20:06:11.847", "yyyy-MM-dd HH:mm:ss.SSS"); +select to_unix_timestamp("Unparseable", "yyyy-MM-dd HH:mm:ss.SSS"); +select cast("Unparseable" as timestamp); +select cast("Unparseable" as date); + +-- next_day +select next_day("2015-07-23", "Mon"); +select next_day("2015-07-23", "xx"); +select next_day("xx", "Mon"); +select next_day(null, "Mon"); +select next_day(null, "xx"); diff --git a/sql/core/src/test/resources/sql-tests/inputs/describe-table-column.sql b/sql/core/src/test/resources/sql-tests/inputs/describe-table-column.sql index d55e398329b76..146977c806182 100644 --- a/sql/core/src/test/resources/sql-tests/inputs/describe-table-column.sql +++ b/sql/core/src/test/resources/sql-tests/inputs/describe-table-column.sql @@ -1,5 +1,5 @@ -- Test temp table -CREATE TEMPORARY VIEW desc_col_temp_view (key int COMMENT 'column_comment') USING PARQUET; +CREATE TEMPORARY VIEW desc_col_temp_view (key int COMMENT 'column_comment', col struct) USING PARQUET; DESC desc_col_temp_view key; @@ -13,6 +13,9 @@ DESC FORMATTED desc_col_temp_view desc_col_temp_view.key; -- Describe a non-existent column DESC desc_col_temp_view key1; +-- Describe a nested column +DESC desc_col_temp_view col.x; + -- Test persistent table CREATE TABLE desc_col_table (key int COMMENT 'column_comment') USING PARQUET; @@ -24,6 +27,9 @@ DESC EXTENDED desc_col_table key; DESC FORMATTED desc_col_table key; +-- Describe a non-existent column +DESC desc_col_table key1; + -- Test complex columns CREATE TABLE desc_complex_col_table (`a.b` int, col struct) USING PARQUET; diff --git a/sql/core/src/test/resources/sql-tests/inputs/explain-aqe.sql b/sql/core/src/test/resources/sql-tests/inputs/explain-aqe.sql index f4afa2b77a9d7..7aef901da4fb5 100644 --- a/sql/core/src/test/resources/sql-tests/inputs/explain-aqe.sql +++ b/sql/core/src/test/resources/sql-tests/inputs/explain-aqe.sql @@ -1,3 +1,4 @@ --IMPORT explain.sql --SET spark.sql.adaptive.enabled=true +--SET spark.sql.maxMetadataStringLength = 500 diff --git a/sql/core/src/test/resources/sql-tests/inputs/explain.sql b/sql/core/src/test/resources/sql-tests/inputs/explain.sql index 80bf258704c70..fdff1b4eef941 100644 --- a/sql/core/src/test/resources/sql-tests/inputs/explain.sql +++ b/sql/core/src/test/resources/sql-tests/inputs/explain.sql @@ -1,5 +1,6 @@ --SET spark.sql.codegen.wholeStage = true --SET spark.sql.adaptive.enabled = false +--SET spark.sql.maxMetadataStringLength = 500 -- Test tables CREATE table explain_temp1 (key int, val int) USING PARQUET; @@ -9,6 +10,11 @@ CREATE table explain_temp4 (key int, val string) USING PARQUET; SET spark.sql.codegen.wholeStage = true; +-- distinct func +EXPLAIN EXTENDED + SELECT sum(distinct val) + FROM explain_temp1; + -- single table EXPLAIN FORMATTED SELECT key, max(val) diff --git a/sql/core/src/test/resources/sql-tests/inputs/group-by-filter.sql b/sql/core/src/test/resources/sql-tests/inputs/group-by-filter.sql index e4193d845f2e2..c1ccb654ee085 100644 --- a/sql/core/src/test/resources/sql-tests/inputs/group-by-filter.sql +++ b/sql/core/src/test/resources/sql-tests/inputs/group-by-filter.sql @@ -1,4 +1,7 @@ --- Test filter clause for aggregate expression. +-- Test filter clause for aggregate expression with codegen on and off. +--CONFIG_DIM1 spark.sql.codegen.wholeStage=true +--CONFIG_DIM1 spark.sql.codegen.wholeStage=false,spark.sql.codegen.factoryMode=CODEGEN_ONLY +--CONFIG_DIM1 spark.sql.codegen.wholeStage=false,spark.sql.codegen.factoryMode=NO_CODEGEN --CONFIG_DIM1 spark.sql.optimizeNullAwareAntiJoin=true --CONFIG_DIM1 spark.sql.optimizeNullAwareAntiJoin=false diff --git a/sql/core/src/test/resources/sql-tests/inputs/group-by.sql b/sql/core/src/test/resources/sql-tests/inputs/group-by.sql index fedf03d774e42..6ee1014739759 100644 --- a/sql/core/src/test/resources/sql-tests/inputs/group-by.sql +++ b/sql/core/src/test/resources/sql-tests/inputs/group-by.sql @@ -86,6 +86,16 @@ SELECT 1 FROM range(10) HAVING MAX(id) > 0; SELECT id FROM range(10) HAVING id > 0; +SET spark.sql.legacy.parser.havingWithoutGroupByAsWhere=true; + +SELECT 1 FROM range(10) HAVING true; + +SELECT 1 FROM range(10) HAVING MAX(id) > 0; + +SELECT id FROM range(10) HAVING id > 0; + +SET spark.sql.legacy.parser.havingWithoutGroupByAsWhere=false; + -- Test data CREATE OR REPLACE TEMPORARY VIEW test_agg AS SELECT * FROM VALUES (1, true), (1, false), @@ -166,3 +176,6 @@ SELECT * FROM (SELECT COUNT(*) AS cnt FROM test_agg) WHERE cnt > 1L; SELECT count(*) FROM test_agg WHERE count(*) > 1L; SELECT count(*) FROM test_agg WHERE count(*) + 1L > 1L; SELECT count(*) FROM test_agg WHERE k = 1 or k = 2 or count(*) + 1L > 1L or max(k) > 1; + +-- Aggregate with multiple distinct decimal columns +SELECT AVG(DISTINCT decimal_col), SUM(DISTINCT decimal_col) FROM VALUES (CAST(1 AS DECIMAL(9, 0))) t(decimal_col); diff --git a/sql/core/src/test/resources/sql-tests/inputs/having.sql b/sql/core/src/test/resources/sql-tests/inputs/having.sql index 3b75be19b5677..2799b1a94d085 100644 --- a/sql/core/src/test/resources/sql-tests/inputs/having.sql +++ b/sql/core/src/test/resources/sql-tests/inputs/having.sql @@ -24,3 +24,9 @@ SELECT SUM(a) AS b, CAST('2020-01-01' AS DATE) AS fake FROM VALUES (1, 10), (2, SELECT SUM(a) AS b FROM VALUES (1, 10), (2, 20) AS T(a, b) GROUP BY GROUPING SETS ((b), (a, b)) HAVING b > 10; SELECT SUM(a) AS b FROM VALUES (1, 10), (2, 20) AS T(a, b) GROUP BY CUBE(a, b) HAVING b > 10; SELECT SUM(a) AS b FROM VALUES (1, 10), (2, 20) AS T(a, b) GROUP BY ROLLUP(a, b) HAVING b > 10; + +-- SPARK-33131: Grouping sets with having clause can not resolve qualified col name. +SELECT c1 FROM VALUES (1, 2) as t(c1, c2) GROUP BY GROUPING SETS(t.c1) HAVING t.c1 = 1; +SELECT c1 FROM VALUES (1, 2) as t(c1, c2) GROUP BY CUBE(t.c1) HAVING t.c1 = 1; +SELECT c1 FROM VALUES (1, 2) as t(c1, c2) GROUP BY ROLLUP(t.c1) HAVING t.c1 = 1; +SELECT c1 FROM VALUES (1, 2) as t(c1, c2) GROUP BY t.c1 HAVING t.c1 = 1; diff --git a/sql/core/src/test/resources/sql-tests/inputs/like-all.sql b/sql/core/src/test/resources/sql-tests/inputs/like-all.sql index a084dbef61a0c..51b689607e8e3 100644 --- a/sql/core/src/test/resources/sql-tests/inputs/like-all.sql +++ b/sql/core/src/test/resources/sql-tests/inputs/like-all.sql @@ -1,3 +1,5 @@ +-- test cases for like all + CREATE OR REPLACE TEMPORARY VIEW like_all_table AS SELECT * FROM (VALUES ('google', '%oo%'), ('facebook', '%oo%'), diff --git a/sql/core/src/test/resources/sql-tests/inputs/like-any.sql b/sql/core/src/test/resources/sql-tests/inputs/like-any.sql index 5758a2a494944..a6e9827d58d94 100644 --- a/sql/core/src/test/resources/sql-tests/inputs/like-any.sql +++ b/sql/core/src/test/resources/sql-tests/inputs/like-any.sql @@ -1,3 +1,5 @@ +-- test cases for like any + CREATE OR REPLACE TEMPORARY VIEW like_any_table AS SELECT * FROM (VALUES ('google', '%oo%'), ('facebook', '%oo%'), diff --git a/sql/core/src/test/resources/sql-tests/inputs/map.sql b/sql/core/src/test/resources/sql-tests/inputs/map.sql new file mode 100644 index 0000000000000..e2d855fba154e --- /dev/null +++ b/sql/core/src/test/resources/sql-tests/inputs/map.sql @@ -0,0 +1,5 @@ +-- test cases for map functions + +-- key does not exist +select element_at(map(1, 'a', 2, 'b'), 5); +select map(1, 'a', 2, 'b')[5]; diff --git a/sql/core/src/test/resources/sql-tests/inputs/misc-functions.sql b/sql/core/src/test/resources/sql-tests/inputs/misc-functions.sql index 95f71925e9294..907ff33000d8e 100644 --- a/sql/core/src/test/resources/sql-tests/inputs/misc-functions.sql +++ b/sql/core/src/test/resources/sql-tests/inputs/misc-functions.sql @@ -8,3 +8,15 @@ select typeof(cast(1.0 as float)), typeof(1.0D), typeof(1.2); select typeof(date '1986-05-23'), typeof(timestamp '1986-05-23'), typeof(interval '23 days'); select typeof(x'ABCD'), typeof('SPARK'); select typeof(array(1, 2)), typeof(map(1, 2)), typeof(named_struct('a', 1, 'b', 'spark')); + +-- Spark-32793: Rewrite AssertTrue with RaiseError +SELECT assert_true(true), assert_true(boolean(1)); +SELECT assert_true(false); +SELECT assert_true(boolean(0)); +SELECT assert_true(null); +SELECT assert_true(boolean(null)); +SELECT assert_true(false, 'custom error message'); + +CREATE TEMPORARY VIEW tbl_misc AS SELECT * FROM (VALUES (1), (8), (2)) AS T(v); +SELECT raise_error('error message'); +SELECT if(v > 5, raise_error('too big: ' || v), v + 1) FROM tbl_misc; diff --git a/sql/core/src/test/resources/sql-tests/inputs/parse-schema-string.sql b/sql/core/src/test/resources/sql-tests/inputs/parse-schema-string.sql new file mode 100644 index 0000000000000..c67d45139fd6c --- /dev/null +++ b/sql/core/src/test/resources/sql-tests/inputs/parse-schema-string.sql @@ -0,0 +1,5 @@ +-- Use keywords as attribute names +select from_csv('1', 'create INT'); +select from_csv('1', 'cube INT'); +select from_json('{"create":1}', 'create INT'); +select from_json('{"cube":1}', 'cube INT'); diff --git a/sql/core/src/test/resources/sql-tests/inputs/postgreSQL/case.sql b/sql/core/src/test/resources/sql-tests/inputs/postgreSQL/case.sql index 6d9c44c67a96b..b39ccb85fb366 100644 --- a/sql/core/src/test/resources/sql-tests/inputs/postgreSQL/case.sql +++ b/sql/core/src/test/resources/sql-tests/inputs/postgreSQL/case.sql @@ -65,11 +65,11 @@ SELECT '7' AS `None`, CASE WHEN rand() < 0 THEN 1 END AS `NULL on no matches`; +-- [SPARK-33008] Spark SQL throws an exception -- Constant-expression folding shouldn't evaluate unreachable subexpressions SELECT CASE WHEN 1=0 THEN 1/0 WHEN 1=1 THEN 1 ELSE 2/0 END; SELECT CASE 1 WHEN 0 THEN 1/0 WHEN 1 THEN 1 ELSE 2/0 END; --- [SPARK-27923] PostgreSQL throws an exception but Spark SQL is NULL -- However we do not currently suppress folding of potentially -- reachable subexpressions SELECT CASE WHEN i > 100 THEN 1/0 ELSE 0 END FROM case_tbl; diff --git a/sql/core/src/test/resources/sql-tests/inputs/postgreSQL/create_view.sql b/sql/core/src/test/resources/sql-tests/inputs/postgreSQL/create_view.sql index 21ffd85f7d01f..2889941c1fcc1 100644 --- a/sql/core/src/test/resources/sql-tests/inputs/postgreSQL/create_view.sql +++ b/sql/core/src/test/resources/sql-tests/inputs/postgreSQL/create_view.sql @@ -636,7 +636,7 @@ DESC TABLE vv6; -- Check cases involving dropped/altered columns in a function's rowtype result -- --- Skip the tests below because Spark does't support PostgreSQL-specific UDFs/transactions +-- Skip the tests below because Spark doesn't support PostgreSQL-specific UDFs/transactions -- create table tt14t (f1 text, f2 text, f3 text, f4 text); -- insert into tt14t values('foo', 'bar', 'baz', '42'); -- diff --git a/sql/core/src/test/resources/sql-tests/inputs/postgreSQL/select_having.sql b/sql/core/src/test/resources/sql-tests/inputs/postgreSQL/select_having.sql index 2edde8df08047..0efe0877e9b3e 100644 --- a/sql/core/src/test/resources/sql-tests/inputs/postgreSQL/select_having.sql +++ b/sql/core/src/test/resources/sql-tests/inputs/postgreSQL/select_having.sql @@ -49,6 +49,7 @@ SELECT 1 AS one FROM test_having HAVING a > 1; SELECT 1 AS one FROM test_having HAVING 1 > 2; SELECT 1 AS one FROM test_having HAVING 1 < 2; +-- [SPARK-33008] Spark SQL throws an exception -- and just to prove that we aren't scanning the table: SELECT 1 AS one FROM test_having WHERE 1/a = 1 HAVING 1 < 2; diff --git a/sql/core/src/test/resources/sql-tests/inputs/postgreSQL/window_part1.sql b/sql/core/src/test/resources/sql-tests/inputs/postgreSQL/window_part1.sql index 6e95aca7aff62..d12bee6e47223 100644 --- a/sql/core/src/test/resources/sql-tests/inputs/postgreSQL/window_part1.sql +++ b/sql/core/src/test/resources/sql-tests/inputs/postgreSQL/window_part1.sql @@ -95,7 +95,7 @@ SELECT last(ten) OVER (PARTITION BY four), ten, four FROM (SELECT * FROM tenk1 WHERE unique2 < 10 ORDER BY four, ten)s ORDER BY four, ten; --- [SPARK-27951] ANSI SQL: NTH_VALUE function +-- [SPARK-30707] Lead/Lag window function throws AnalysisException without ORDER BY clause -- SELECT nth_value(ten, four + 1) OVER (PARTITION BY four), ten, four -- FROM (SELECT * FROM tenk1 WHERE unique2 < 10 ORDER BY four, ten)s; @@ -301,7 +301,7 @@ FROM tenk1 WHERE unique1 < 10; -- unique1, four -- FROM tenk1 WHERE unique1 < 10 WINDOW w AS (order by four); --- [SPARK-27951] ANSI SQL: NTH_VALUE function +-- [SPARK-30707] Lead/Lag window function throws AnalysisException without ORDER BY clause -- SELECT first_value(unique1) over w, -- nth_value(unique1, 2) over w AS nth_2, -- last_value(unique1) over w, unique1, four diff --git a/sql/core/src/test/resources/sql-tests/inputs/postgreSQL/window_part2.sql b/sql/core/src/test/resources/sql-tests/inputs/postgreSQL/window_part2.sql index ba1acc9f56b4a..50c0bc3410312 100644 --- a/sql/core/src/test/resources/sql-tests/inputs/postgreSQL/window_part2.sql +++ b/sql/core/src/test/resources/sql-tests/inputs/postgreSQL/window_part2.sql @@ -105,7 +105,7 @@ FROM tenk1 WHERE unique1 < 10; -- select sum(salary) over (order by enroll_date range between '1 year' preceding and '1 year' following -- exclude ties), salary, enroll_date from empsalary; --- [SPARK-27951] ANSI SQL: NTH_VALUE function +-- [SPARK-28310] ANSI SQL grammar support: first_value/last_value(expression, [RESPECT NULLS | IGNORE NULLS]) -- select first_value(salary) over(order by salary range between 1000 preceding and 1000 following), -- lead(salary) over(order by salary range between 1000 preceding and 1000 following), -- nth_value(salary, 1) over(order by salary range between 1000 preceding and 1000 following), @@ -116,7 +116,7 @@ FROM tenk1 WHERE unique1 < 10; -- lag(salary) over(order by salary range between 1000 preceding and 1000 following), -- salary from empsalary; --- [SPARK-27951] ANSI SQL: NTH_VALUE function +-- [SPARK-28310] ANSI SQL grammar support: first_value/last_value(expression, [RESPECT NULLS | IGNORE NULLS]) -- select first_value(salary) over(order by salary range between 1000 following and 3000 following -- exclude current row), -- lead(salary) over(order by salary range between 1000 following and 3000 following exclude ties), diff --git a/sql/core/src/test/resources/sql-tests/inputs/postgreSQL/window_part3.sql b/sql/core/src/test/resources/sql-tests/inputs/postgreSQL/window_part3.sql index f4b8454da0d82..6f33a07631f7a 100644 --- a/sql/core/src/test/resources/sql-tests/inputs/postgreSQL/window_part3.sql +++ b/sql/core/src/test/resources/sql-tests/inputs/postgreSQL/window_part3.sql @@ -399,8 +399,7 @@ SELECT range(1, 100) OVER () FROM empsalary; SELECT ntile(0) OVER (ORDER BY ten), ten, four FROM tenk1; --- [SPARK-27951] ANSI SQL: NTH_VALUE function --- SELECT nth_value(four, 0) OVER (ORDER BY ten), ten, four FROM tenk1; +SELECT nth_value(four, 0) OVER (ORDER BY ten), ten, four FROM tenk1; -- filter diff --git a/sql/core/src/test/resources/sql-tests/inputs/regexp-functions.sql b/sql/core/src/test/resources/sql-tests/inputs/regexp-functions.sql index 7128dee0a00d7..12b34ff7d54b1 100644 --- a/sql/core/src/test/resources/sql-tests/inputs/regexp-functions.sql +++ b/sql/core/src/test/resources/sql-tests/inputs/regexp-functions.sql @@ -31,3 +31,19 @@ SELECT regexp_extract_all('1a 2b 14m', '(\\d+)([a-z]+)', 3); SELECT regexp_extract_all('1a 2b 14m', '(\\d+)([a-z]+)', -1); SELECT regexp_extract_all('1a 2b 14m', '(\\d+)?([a-z]+)', 1); SELECT regexp_extract_all('a 2b 14m', '(\\d+)?([a-z]+)', 1); + +-- regexp_replace +SELECT regexp_replace('healthy, wealthy, and wise', '\\w+thy', 'something'); +SELECT regexp_replace('healthy, wealthy, and wise', '\\w+thy', 'something', -2); +SELECT regexp_replace('healthy, wealthy, and wise', '\\w+thy', 'something', 0); +SELECT regexp_replace('healthy, wealthy, and wise', '\\w+thy', 'something', 1); +SELECT regexp_replace('healthy, wealthy, and wise', '\\w+thy', 'something', 2); +SELECT regexp_replace('healthy, wealthy, and wise', '\\w+thy', 'something', 8); +SELECT regexp_replace('healthy, wealthy, and wise', '\\w', 'something', 26); +SELECT regexp_replace('healthy, wealthy, and wise', '\\w', 'something', 27); +SELECT regexp_replace('healthy, wealthy, and wise', '\\w', 'something', 30); +SELECT regexp_replace('healthy, wealthy, and wise', '\\w', 'something', null); + +-- regexp_like +SELECT regexp_like('1a 2b 14m', '\\d+b'); +SELECT regexp_like('1a 2b 14m', '[a-z]+b'); \ No newline at end of file diff --git a/sql/core/src/test/resources/sql-tests/inputs/string-functions.sql b/sql/core/src/test/resources/sql-tests/inputs/string-functions.sql index f5ed2036dc8ac..80b4b8ca8cd54 100644 --- a/sql/core/src/test/resources/sql-tests/inputs/string-functions.sql +++ b/sql/core/src/test/resources/sql-tests/inputs/string-functions.sql @@ -53,3 +53,13 @@ SELECT trim(TRAILING 'xy' FROM 'TURNERyxXxy'); -- Check lpad/rpad with invalid length parameter SELECT lpad('hi', 'invalid_length'); SELECT rpad('hi', 'invalid_length'); + +-- decode +select decode(); +select decode(encode('abc', 'utf-8')); +select decode(encode('abc', 'utf-8'), 'utf-8'); +select decode(1, 1, 'Southlake'); +select decode(2, 1, 'Southlake'); +select decode(2, 1, 'Southlake', 2, 'San Francisco', 3, 'New Jersey', 4, 'Seattle', 'Non domestic'); +select decode(6, 1, 'Southlake', 2, 'San Francisco', 3, 'New Jersey', 4, 'Seattle', 'Non domestic'); +select decode(6, 1, 'Southlake', 2, 'San Francisco', 3, 'New Jersey', 4, 'Seattle'); \ No newline at end of file diff --git a/sql/core/src/test/resources/sql-tests/inputs/subexp-elimination.sql b/sql/core/src/test/resources/sql-tests/inputs/subexp-elimination.sql new file mode 100644 index 0000000000000..9a594e0928ddc --- /dev/null +++ b/sql/core/src/test/resources/sql-tests/inputs/subexp-elimination.sql @@ -0,0 +1,37 @@ +-- Test for subexpression elimination. + +--SET spark.sql.optimizer.enableJsonExpressionOptimization=false + +--CONFIG_DIM1 spark.sql.codegen.wholeStage=true +--CONFIG_DIM1 spark.sql.codegen.wholeStage=false + +--CONFIG_DIM2 spark.sql.codegen.factoryMode=CODEGEN_ONLY +--CONFIG_DIM2 spark.sql.codegen.factoryMode=NO_CODEGEN + +--CONFIG_DIM3 spark.sql.subexpressionElimination.enabled=true +--CONFIG_DIM3 spark.sql.subexpressionElimination.enabled=false + +-- Test data. +CREATE OR REPLACE TEMPORARY VIEW testData AS SELECT * FROM VALUES +('{"a":1, "b":"2"}', '[{"a": 1, "b":2}, {"a":2, "b":2}]'), ('{"a":1, "b":"2"}', null), ('{"a":2, "b":"3"}', '[{"a": 3, "b":4}, {"a":4, "b":5}]'), ('{"a":5, "b":"6"}', '[{"a": 6, "b":7}, {"a":8, "b":9}]'), (null, '[{"a": 1, "b":2}, {"a":2, "b":2}]') +AS testData(a, b); + +SELECT from_json(a, 'struct').a, from_json(a, 'struct').b, from_json(b, 'array>')[0].a, from_json(b, 'array>')[0].b FROM testData; + +SELECT if(from_json(a, 'struct').a > 1, from_json(b, 'array>')[0].a, from_json(b, 'array>')[0].a + 1) FROM testData; + +SELECT if(isnull(from_json(a, 'struct').a), from_json(b, 'array>')[0].b + 1, from_json(b, 'array>')[0].b) FROM testData; + +SELECT case when from_json(a, 'struct').a > 5 then from_json(a, 'struct').b when from_json(a, 'struct').a > 4 then from_json(a, 'struct').b + 1 else from_json(a, 'struct').b + 2 end FROM testData; + +SELECT case when from_json(a, 'struct').a > 5 then from_json(b, 'array>')[0].b when from_json(a, 'struct').a > 4 then from_json(b, 'array>')[0].b + 1 else from_json(b, 'array>')[0].b + 2 end FROM testData; + +-- With non-deterministic expressions. +SELECT from_json(a, 'struct').a + random() > 2, from_json(a, 'struct').b, from_json(b, 'array>')[0].a, from_json(b, 'array>')[0].b + + random() > 2 FROM testData; + +SELECT if(from_json(a, 'struct').a + random() > 5, from_json(b, 'array>')[0].a, from_json(b, 'array>')[0].a + 1) FROM testData; + +SELECT case when from_json(a, 'struct').a > 5 then from_json(a, 'struct').b + random() > 5 when from_json(a, 'struct').a > 4 then from_json(a, 'struct').b + 1 + random() > 2 else from_json(a, 'struct').b + 2 + random() > 5 end FROM testData; + +-- Clean up +DROP VIEW IF EXISTS testData; \ No newline at end of file diff --git a/sql/core/src/test/resources/sql-tests/inputs/transform.sql b/sql/core/src/test/resources/sql-tests/inputs/transform.sql new file mode 100644 index 0000000000000..3f39700a95913 --- /dev/null +++ b/sql/core/src/test/resources/sql-tests/inputs/transform.sql @@ -0,0 +1,185 @@ +-- Test data. +CREATE OR REPLACE TEMPORARY VIEW t AS SELECT * FROM VALUES +('1', true, unhex('537061726B2053514C'), tinyint(1), 1, smallint(100), bigint(1), float(1.0), 1.0, Decimal(1.0), timestamp('1997-01-02'), date('2000-04-01')), +('2', false, unhex('537061726B2053514C'), tinyint(2), 2, smallint(200), bigint(2), float(2.0), 2.0, Decimal(2.0), timestamp('1997-01-02 03:04:05'), date('2000-04-02')), +('3', true, unhex('537061726B2053514C'), tinyint(3), 3, smallint(300), bigint(3), float(3.0), 3.0, Decimal(3.0), timestamp('1997-02-10 17:32:01-08'), date('2000-04-03')) +AS t(a, b, c, d, e, f, g, h, i, j, k, l); + +SELECT TRANSFORM(a) +USING 'cat' AS (a) +FROM t; + +-- common supported data types between no serde and serde transform +SELECT a, b, decode(c, 'UTF-8'), d, e, f, g, h, i, j, k, l FROM ( + SELECT TRANSFORM(a, b, c, d, e, f, g, h, i, j, k, l) + USING 'cat' AS ( + a string, + b boolean, + c binary, + d tinyint, + e int, + f smallint, + g long, + h float, + i double, + j decimal(38, 18), + k timestamp, + l date) + FROM t +) tmp; + +-- common supported data types between no serde and serde transform +SELECT a, b, decode(c, 'UTF-8'), d, e, f, g, h, i, j, k, l FROM ( + SELECT TRANSFORM(a, b, c, d, e, f, g, h, i, j, k, l) + USING 'cat' AS ( + a string, + b string, + c string, + d string, + e string, + f string, + g string, + h string, + i string, + j string, + k string, + l string) + FROM t +) tmp; + +-- SPARK-32388 handle schema less +SELECT TRANSFORM(a) +USING 'cat' +FROM t; + +SELECT TRANSFORM(a, b) +USING 'cat' +FROM t; + +SELECT TRANSFORM(a, b, c) +USING 'cat' +FROM t; + +-- return null when return string incompatible (no serde) +SELECT TRANSFORM(a, b, c, d, e, f, g, h, i) +USING 'cat' AS (a int, b short, c long, d byte, e float, f double, g decimal(38, 18), h date, i timestamp) +FROM VALUES +('a','','1231a','a','213.21a','213.21a','0a.21d','2000-04-01123','1997-0102 00:00:') tmp(a, b, c, d, e, f, g, h, i); + +-- SPARK-28227: transform can't run with aggregation +SELECT TRANSFORM(b, max(a), sum(f)) +USING 'cat' AS (a, b) +FROM t +GROUP BY b; + +-- transform use MAP +MAP a, b USING 'cat' AS (a, b) FROM t; + +-- transform use REDUCE +REDUCE a, b USING 'cat' AS (a, b) FROM t; + +-- transform with defined row format delimit +SELECT TRANSFORM(a, b, c, null) + ROW FORMAT DELIMITED + FIELDS TERMINATED BY '@' + LINES TERMINATED BY '\n' + NULL DEFINED AS 'NULL' +USING 'cat' AS (a, b, c, d) + ROW FORMAT DELIMITED + FIELDS TERMINATED BY '@' + LINES TERMINATED BY '\n' + NULL DEFINED AS 'NULL' +FROM t; + +SELECT TRANSFORM(a, b, c, null) + ROW FORMAT DELIMITED + FIELDS TERMINATED BY '@' + LINES TERMINATED BY '\n' + NULL DEFINED AS 'NULL' +USING 'cat' AS (d) + ROW FORMAT DELIMITED + FIELDS TERMINATED BY '@' + LINES TERMINATED BY '\n' + NULL DEFINED AS 'NULL' +FROM t; + +-- transform with defined row format delimit handle schema with correct type +SELECT a, b, decode(c, 'UTF-8'), d, e, f, g, h, i, j, k, l FROM ( + SELECT TRANSFORM(a, b, c, d, e, f, g, h, i, j, k, l) + ROW FORMAT DELIMITED + FIELDS TERMINATED BY ',' + LINES TERMINATED BY '\n' + NULL DEFINED AS 'NULL' + USING 'cat' AS ( + a string, + b boolean, + c binary, + d tinyint, + e int, + f smallint, + g long, + h float, + i double, + j decimal(38, 18), + k timestamp, + l date) + ROW FORMAT DELIMITED + FIELDS TERMINATED BY ',' + LINES TERMINATED BY '\n' + NULL DEFINED AS 'NULL' + FROM t +) tmp; + +-- transform with defined row format delimit handle schema with wrong type +SELECT a, b, decode(c, 'UTF-8'), d, e, f, g, h, i, j, k, l FROM ( + SELECT TRANSFORM(a, b, c, d, e, f, g, h, i, j, k, l) + ROW FORMAT DELIMITED + FIELDS TERMINATED BY ',' + LINES TERMINATED BY '\n' + NULL DEFINED AS 'NULL' + USING 'cat' AS ( + a string, + b long, + c binary, + d tinyint, + e int, + f smallint, + g long, + h float, + i double, + j decimal(38, 18), + k int, + l long) + ROW FORMAT DELIMITED + FIELDS TERMINATED BY ',' + LINES TERMINATED BY '\n' + NULL DEFINED AS 'NULL' + FROM t +) tmp; + +-- transform with defined row format delimit LINE TERMINATED BY only support '\n' +SELECT a, b, decode(c, 'UTF-8'), d, e, f, g, h, i, j, k, l FROM ( + SELECT TRANSFORM(a, b, c, d, e, f, g, h, i, j, k, l) + ROW FORMAT DELIMITED + FIELDS TERMINATED BY ',' + LINES TERMINATED BY '@' + NULL DEFINED AS 'NULL' + USING 'cat' AS ( + a string, + b string, + c string, + d string, + e string, + f string, + g string, + h string, + i string, + j string, + k string, + l string) + ROW FORMAT DELIMITED + FIELDS TERMINATED BY ',' + LINES TERMINATED BY '@' + NULL DEFINED AS 'NULL' + FROM t +) tmp; diff --git a/sql/core/src/test/resources/sql-tests/inputs/udf/postgreSQL/udf-case.sql b/sql/core/src/test/resources/sql-tests/inputs/udf/postgreSQL/udf-case.sql index 8fa3c0a6dfec9..5322c1b502439 100644 --- a/sql/core/src/test/resources/sql-tests/inputs/udf/postgreSQL/udf-case.sql +++ b/sql/core/src/test/resources/sql-tests/inputs/udf/postgreSQL/udf-case.sql @@ -67,11 +67,11 @@ SELECT '7' AS `None`, CASE WHEN rand() < udf(0) THEN 1 END AS `NULL on no matches`; +-- [SPARK-33008] Spark SQL throws an exception -- Constant-expression folding shouldn't evaluate unreachable subexpressions SELECT CASE WHEN udf(1=0) THEN 1/0 WHEN 1=1 THEN 1 ELSE 2/0 END; SELECT CASE 1 WHEN 0 THEN 1/udf(0) WHEN 1 THEN 1 ELSE 2/0 END; --- [SPARK-27923] PostgreSQL throws an exception but Spark SQL is NULL -- However we do not currently suppress folding of potentially -- reachable subexpressions SELECT CASE WHEN i > 100 THEN udf(1/0) ELSE udf(0) END FROM case_tbl; diff --git a/sql/core/src/test/resources/sql-tests/inputs/udf/postgreSQL/udf-select_having.sql b/sql/core/src/test/resources/sql-tests/inputs/udf/postgreSQL/udf-select_having.sql index 412d45b49a184..76c0b198aa439 100644 --- a/sql/core/src/test/resources/sql-tests/inputs/udf/postgreSQL/udf-select_having.sql +++ b/sql/core/src/test/resources/sql-tests/inputs/udf/postgreSQL/udf-select_having.sql @@ -51,6 +51,7 @@ SELECT 1 AS one FROM test_having HAVING udf(a) > 1; SELECT 1 AS one FROM test_having HAVING udf(udf(1) > udf(2)); SELECT 1 AS one FROM test_having HAVING udf(udf(1) < udf(2)); +-- [SPARK-33008] Spark SQL throws an exception -- and just to prove that we aren't scanning the table: SELECT 1 AS one FROM test_having WHERE 1/udf(a) = 1 HAVING 1 < 2; diff --git a/sql/core/src/test/resources/sql-tests/inputs/window.sql b/sql/core/src/test/resources/sql-tests/inputs/window.sql index 72d812d6a4e49..56f2b0b20c165 100644 --- a/sql/core/src/test/resources/sql-tests/inputs/window.sql +++ b/sql/core/src/test/resources/sql-tests/inputs/window.sql @@ -16,6 +16,38 @@ CREATE OR REPLACE TEMPORARY VIEW testData AS SELECT * FROM VALUES (3, 1L, 1.0D, date("2017-08-01"), timestamp_seconds(1501545600), null) AS testData(val, val_long, val_double, val_date, val_timestamp, cate); +CREATE OR REPLACE TEMPORARY VIEW basic_pays AS SELECT * FROM VALUES +('Diane Murphy','Accounting',8435), +('Mary Patterson','Accounting',9998), +('Jeff Firrelli','Accounting',8992), +('William Patterson','Accounting',8870), +('Gerard Bondur','Accounting',11472), +('Anthony Bow','Accounting',6627), +('Leslie Jennings','IT',8113), +('Leslie Thompson','IT',5186), +('Julie Firrelli','Sales',9181), +('Steve Patterson','Sales',9441), +('Foon Yue Tseng','Sales',6660), +('George Vanauf','Sales',10563), +('Loui Bondur','SCM',10449), +('Gerard Hernandez','SCM',6949), +('Pamela Castillo','SCM',11303), +('Larry Bott','SCM',11798), +('Barry Jones','SCM',10586) +AS basic_pays(employee_name, department, salary); + +CREATE OR REPLACE TEMPORARY VIEW test_ignore_null AS SELECT * FROM VALUES +('a', 0, null), +('a', 1, 'x'), +('b', 2, null), +('c', 3, null), +('a', 4, 'y'), +('b', 5, null), +('a', 6, 'z'), +('a', 7, 'v'), +('a', 8, null) +AS test_ignore_null(content, id, v); + -- RowsBetween SELECT val, cate, count(val) OVER(PARTITION BY cate ORDER BY val ROWS CURRENT ROW) FROM testData ORDER BY cate, val; @@ -124,4 +156,256 @@ WINDOW w AS (PARTITION BY cate ORDER BY val); -- with filter predicate SELECT val, cate, count(val) FILTER (WHERE val > 1) OVER(PARTITION BY cate) -FROM testData ORDER BY cate, val; \ No newline at end of file +FROM testData ORDER BY cate, val; + +-- nth_value()/first_value() over () +SELECT + employee_name, + salary, + first_value(employee_name) OVER w highest_salary, + nth_value(employee_name, 2) OVER w second_highest_salary +FROM + basic_pays +WINDOW w AS (ORDER BY salary DESC) +ORDER BY salary DESC; + +SELECT + employee_name, + salary, + first_value(employee_name) OVER w highest_salary, + nth_value(employee_name, 2) OVER w second_highest_salary +FROM + basic_pays +WINDOW w AS (ORDER BY salary DESC RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) +ORDER BY salary DESC; + +SELECT + employee_name, + salary, + first_value(employee_name) OVER w highest_salary, + nth_value(employee_name, 2) OVER w second_highest_salary +FROM + basic_pays +WINDOW w AS (ORDER BY salary DESC ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) +ORDER BY salary DESC; + +SELECT + employee_name, + salary, + first_value(employee_name) OVER w highest_salary, + nth_value(employee_name, 2) OVER w second_highest_salary +FROM + basic_pays +WINDOW w AS (ORDER BY salary RANGE BETWEEN 2000 PRECEDING AND 1000 FOLLOWING) +ORDER BY salary; + +SELECT + employee_name, + salary, + first_value(employee_name) OVER w highest_salary, + nth_value(employee_name, 2) OVER w second_highest_salary +FROM + basic_pays +WINDOW w AS (ORDER BY salary DESC ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING) +ORDER BY salary DESC; + +SELECT + employee_name, + salary, + first_value(employee_name) OVER w highest_salary, + nth_value(employee_name, 2) OVER w second_highest_salary +FROM + basic_pays +WINDOW w AS (ORDER BY salary DESC RANGE BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) +ORDER BY salary DESC; + +SELECT + employee_name, + salary, + first_value(employee_name) OVER w highest_salary, + nth_value(employee_name, 2) OVER w second_highest_salary +FROM + basic_pays +WINDOW w AS (ORDER BY salary DESC RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) +ORDER BY salary DESC; + +SELECT + employee_name, + salary, + first_value(employee_name) OVER w highest_salary, + nth_value(employee_name, 2) OVER w second_highest_salary +FROM + basic_pays +WINDOW w AS (ORDER BY salary DESC ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) +ORDER BY salary DESC; + +SELECT + employee_name, + salary, + first_value(employee_name) OVER w highest_salary, + nth_value(employee_name, 2) OVER w second_highest_salary +FROM + basic_pays +WINDOW w AS (ORDER BY salary DESC ROWS BETWEEN UNBOUNDED PRECEDING AND 1 FOLLOWING) +ORDER BY salary DESC; + +SELECT + employee_name, + department, + salary, + FIRST_VALUE(employee_name) OVER w highest_salary, + NTH_VALUE(employee_name, 2) OVER w second_highest_salary +FROM + basic_pays +WINDOW w AS ( + PARTITION BY department + ORDER BY salary DESC + RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING +) +ORDER BY department; + +SELECT + employee_name, + salary, + first_value(employee_name) OVER w highest_salary, + nth_value(employee_name, 2) OVER w second_highest_salary +FROM + basic_pays +WINDOW + w AS (ORDER BY salary DESC ROWS BETWEEN UNBOUNDED PRECEDING AND 1 FOLLOWING), + w AS (ORDER BY salary DESC ROWS BETWEEN UNBOUNDED PRECEDING AND 2 FOLLOWING) +ORDER BY salary DESC; + +SELECT + content, + id, + v, + lead(v, 0) IGNORE NULLS OVER w lead_0, + lead(v, 1) IGNORE NULLS OVER w lead_1, + lead(v, 2) IGNORE NULLS OVER w lead_2, + lead(v, 3) IGNORE NULLS OVER w lead_3, + lag(v, 0) IGNORE NULLS OVER w lag_0, + lag(v, 1) IGNORE NULLS OVER w lag_1, + lag(v, 2) IGNORE NULLS OVER w lag_2, + lag(v, 3) IGNORE NULLS OVER w lag_3, + nth_value(v, 1) IGNORE NULLS OVER w nth_value_1, + nth_value(v, 2) IGNORE NULLS OVER w nth_value_2, + nth_value(v, 3) IGNORE NULLS OVER w nth_value_3, + first_value(v) IGNORE NULLS OVER w first_value, + last_value(v) IGNORE NULLS OVER w last_value +FROM + test_ignore_null +WINDOW w AS (ORDER BY id) +ORDER BY id; + +SELECT + content, + id, + v, + nth_value(v, 1) IGNORE NULLS OVER w nth_value_1, + nth_value(v, 2) IGNORE NULLS OVER w nth_value_2, + nth_value(v, 3) IGNORE NULLS OVER w nth_value_3, + first_value(v) IGNORE NULLS OVER w first_value, + last_value(v) IGNORE NULLS OVER w last_value +FROM + test_ignore_null +WINDOW w AS (ORDER BY id RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) +ORDER BY id; + +SELECT + content, + id, + v, + nth_value(v, 1) IGNORE NULLS OVER w nth_value_1, + nth_value(v, 2) IGNORE NULLS OVER w nth_value_2, + nth_value(v, 3) IGNORE NULLS OVER w nth_value_3, + first_value(v) IGNORE NULLS OVER w first_value, + last_value(v) IGNORE NULLS OVER w last_value +FROM + test_ignore_null +WINDOW w AS (ORDER BY id ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) +ORDER BY id; + +SELECT + content, + id, + v, + nth_value(v, 1) IGNORE NULLS OVER w nth_value_1, + nth_value(v, 2) IGNORE NULLS OVER w nth_value_2, + nth_value(v, 3) IGNORE NULLS OVER w nth_value_3, + first_value(v) IGNORE NULLS OVER w first_value, + last_value(v) IGNORE NULLS OVER w last_value +FROM + test_ignore_null +WINDOW w AS (ORDER BY id RANGE BETWEEN 2 PRECEDING AND 2 FOLLOWING) +ORDER BY id; + +SELECT + content, + id, + v, + nth_value(v, 1) IGNORE NULLS OVER w nth_value_1, + nth_value(v, 2) IGNORE NULLS OVER w nth_value_2, + nth_value(v, 3) IGNORE NULLS OVER w nth_value_3, + first_value(v) IGNORE NULLS OVER w first_value, + last_value(v) IGNORE NULLS OVER w last_value +FROM + test_ignore_null +WINDOW w AS (ORDER BY id ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING) +ORDER BY id; + +SELECT + content, + id, + v, + nth_value(v, 1) IGNORE NULLS OVER w nth_value_1, + nth_value(v, 2) IGNORE NULLS OVER w nth_value_2, + nth_value(v, 3) IGNORE NULLS OVER w nth_value_3, + first_value(v) IGNORE NULLS OVER w first_value, + last_value(v) IGNORE NULLS OVER w last_value +FROM + test_ignore_null +WINDOW w AS (ORDER BY id RANGE BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) +ORDER BY id; + +SELECT + content, + id, + v, + nth_value(v, 1) IGNORE NULLS OVER w nth_value_1, + nth_value(v, 2) IGNORE NULLS OVER w nth_value_2, + nth_value(v, 3) IGNORE NULLS OVER w nth_value_3, + first_value(v) IGNORE NULLS OVER w first_value, + last_value(v) IGNORE NULLS OVER w last_value +FROM + test_ignore_null +WINDOW w AS (ORDER BY id RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) +ORDER BY id; + +SELECT + content, + id, + v, + nth_value(v, 1) IGNORE NULLS OVER w nth_value_1, + nth_value(v, 2) IGNORE NULLS OVER w nth_value_2, + nth_value(v, 3) IGNORE NULLS OVER w nth_value_3, + first_value(v) IGNORE NULLS OVER w first_value, + last_value(v) IGNORE NULLS OVER w last_value +FROM + test_ignore_null +WINDOW w AS (ORDER BY id ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) +ORDER BY id; + +SELECT + content, + id, + v, + nth_value(v, 1) IGNORE NULLS OVER w nth_value_1, + nth_value(v, 2) IGNORE NULLS OVER w nth_value_2, + nth_value(v, 3) IGNORE NULLS OVER w nth_value_3, + first_value(v) IGNORE NULLS OVER w first_value, + last_value(v) IGNORE NULLS OVER w last_value +FROM + test_ignore_null +WINDOW w AS (ORDER BY id ROWS BETWEEN UNBOUNDED PRECEDING AND 1 FOLLOWING) +ORDER BY id; \ No newline at end of file diff --git a/sql/core/src/test/resources/sql-tests/results/ansi/array.sql.out b/sql/core/src/test/resources/sql-tests/results/ansi/array.sql.out new file mode 100644 index 0000000000000..12a77e36273fa --- /dev/null +++ b/sql/core/src/test/resources/sql-tests/results/ansi/array.sql.out @@ -0,0 +1,234 @@ +-- Automatically generated by SQLQueryTestSuite +-- Number of queries: 20 + + +-- !query +create temporary view data as select * from values + ("one", array(11, 12, 13), array(array(111, 112, 113), array(121, 122, 123))), + ("two", array(21, 22, 23), array(array(211, 212, 213), array(221, 222, 223))) + as data(a, b, c) +-- !query schema +struct<> +-- !query output + + + +-- !query +select * from data +-- !query schema +struct,c:array>> +-- !query output +one [11,12,13] [[111,112,113],[121,122,123]] +two [21,22,23] [[211,212,213],[221,222,223]] + + +-- !query +select a, b[0], b[0] + b[1] from data +-- !query schema +struct +-- !query output +one 11 23 +two 21 43 + + +-- !query +select a, c[0][0] + c[0][0 + 1] from data +-- !query schema +struct +-- !query output +one 223 +two 423 + + +-- !query +create temporary view primitive_arrays as select * from values ( + array(true), + array(2Y, 1Y), + array(2S, 1S), + array(2, 1), + array(2L, 1L), + array(9223372036854775809, 9223372036854775808), + array(2.0D, 1.0D), + array(float(2.0), float(1.0)), + array(date '2016-03-14', date '2016-03-13'), + array(timestamp '2016-11-15 20:54:00.000', timestamp '2016-11-12 20:54:00.000') +) as primitive_arrays( + boolean_array, + tinyint_array, + smallint_array, + int_array, + bigint_array, + decimal_array, + double_array, + float_array, + date_array, + timestamp_array +) +-- !query schema +struct<> +-- !query output + + + +-- !query +select * from primitive_arrays +-- !query schema +struct,tinyint_array:array,smallint_array:array,int_array:array,bigint_array:array,decimal_array:array,double_array:array,float_array:array,date_array:array,timestamp_array:array> +-- !query output +[true] [2,1] [2,1] [2,1] [2,1] [9223372036854775809,9223372036854775808] [2.0,1.0] [2.0,1.0] [2016-03-14,2016-03-13] [2016-11-15 20:54:00,2016-11-12 20:54:00] + + +-- !query +select + array_contains(boolean_array, true), array_contains(boolean_array, false), + array_contains(tinyint_array, 2Y), array_contains(tinyint_array, 0Y), + array_contains(smallint_array, 2S), array_contains(smallint_array, 0S), + array_contains(int_array, 2), array_contains(int_array, 0), + array_contains(bigint_array, 2L), array_contains(bigint_array, 0L), + array_contains(decimal_array, 9223372036854775809), array_contains(decimal_array, 1), + array_contains(double_array, 2.0D), array_contains(double_array, 0.0D), + array_contains(float_array, float(2.0)), array_contains(float_array, float(0.0)), + array_contains(date_array, date '2016-03-14'), array_contains(date_array, date '2016-01-01'), + array_contains(timestamp_array, timestamp '2016-11-15 20:54:00.000'), array_contains(timestamp_array, timestamp '2016-01-01 20:54:00.000') +from primitive_arrays +-- !query schema +struct +-- !query output +true false true false true false true false true false true false true false true false true false true false + + +-- !query +select array_contains(b, 11), array_contains(c, array(111, 112, 113)) from data +-- !query schema +struct +-- !query output +false false +true true + + +-- !query +select + sort_array(boolean_array), + sort_array(tinyint_array), + sort_array(smallint_array), + sort_array(int_array), + sort_array(bigint_array), + sort_array(decimal_array), + sort_array(double_array), + sort_array(float_array), + sort_array(date_array), + sort_array(timestamp_array) +from primitive_arrays +-- !query schema +struct,sort_array(tinyint_array, true):array,sort_array(smallint_array, true):array,sort_array(int_array, true):array,sort_array(bigint_array, true):array,sort_array(decimal_array, true):array,sort_array(double_array, true):array,sort_array(float_array, true):array,sort_array(date_array, true):array,sort_array(timestamp_array, true):array> +-- !query output +[true] [1,2] [1,2] [1,2] [1,2] [9223372036854775808,9223372036854775809] [1.0,2.0] [1.0,2.0] [2016-03-13,2016-03-14] [2016-11-12 20:54:00,2016-11-15 20:54:00] + + +-- !query +select sort_array(array('b', 'd'), '1') +-- !query schema +struct<> +-- !query output +org.apache.spark.sql.AnalysisException +cannot resolve 'sort_array(array('b', 'd'), '1')' due to data type mismatch: Sort order in second argument requires a boolean literal.; line 1 pos 7 + + +-- !query +select sort_array(array('b', 'd'), cast(NULL as boolean)) +-- !query schema +struct<> +-- !query output +org.apache.spark.sql.AnalysisException +cannot resolve 'sort_array(array('b', 'd'), CAST(NULL AS BOOLEAN))' due to data type mismatch: Sort order in second argument requires a boolean literal.; line 1 pos 7 + + +-- !query +select + size(boolean_array), + size(tinyint_array), + size(smallint_array), + size(int_array), + size(bigint_array), + size(decimal_array), + size(double_array), + size(float_array), + size(date_array), + size(timestamp_array) +from primitive_arrays +-- !query schema +struct +-- !query output +1 2 2 2 2 2 2 2 2 2 + + +-- !query +select element_at(array(1, 2, 3), 5) +-- !query schema +struct<> +-- !query output +java.lang.ArrayIndexOutOfBoundsException +Invalid index: 5, numElements: 3 + + +-- !query +select element_at(array(1, 2, 3), -5) +-- !query schema +struct<> +-- !query output +java.lang.ArrayIndexOutOfBoundsException +Invalid index: -5, numElements: 3 + + +-- !query +select element_at(array(1, 2, 3), 0) +-- !query schema +struct<> +-- !query output +java.lang.ArrayIndexOutOfBoundsException +SQL array indices start at 1 + + +-- !query +select elt(4, '123', '456') +-- !query schema +struct<> +-- !query output +java.lang.ArrayIndexOutOfBoundsException +Invalid index: 4, numElements: 2 + + +-- !query +select elt(0, '123', '456') +-- !query schema +struct<> +-- !query output +java.lang.ArrayIndexOutOfBoundsException +Invalid index: 0, numElements: 2 + + +-- !query +select elt(-1, '123', '456') +-- !query schema +struct<> +-- !query output +java.lang.ArrayIndexOutOfBoundsException +Invalid index: -1, numElements: 2 + + +-- !query +select array(1, 2, 3)[5] +-- !query schema +struct<> +-- !query output +java.lang.ArrayIndexOutOfBoundsException +Invalid index: 5, numElements: 3 + + +-- !query +select array(1, 2, 3)[-1] +-- !query schema +struct<> +-- !query output +java.lang.ArrayIndexOutOfBoundsException +Invalid index: -1, numElements: 3 diff --git a/sql/core/src/test/resources/sql-tests/results/ansi/datetime.sql.out b/sql/core/src/test/resources/sql-tests/results/ansi/datetime.sql.out index 5fe0bd56bf8af..9a0c8ff02c5bb 100644 --- a/sql/core/src/test/resources/sql-tests/results/ansi/datetime.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/ansi/datetime.sql.out @@ -1,5 +1,5 @@ -- Automatically generated by SQLQueryTestSuite --- Number of queries: 108 +-- Number of queries: 128 -- !query @@ -87,6 +87,46 @@ struct +-- !query output +1606833008 1606833008 NULL + + +-- !query +select UNIX_MILLIS(TIMESTAMP('2020-12-01 14:30:08Z')), UNIX_MILLIS(TIMESTAMP('2020-12-01 14:30:08.999999Z')), UNIX_MILLIS(null) +-- !query schema +struct +-- !query output +1606833008000 1606833008999 NULL + + +-- !query +select UNIX_MICROS(TIMESTAMP('2020-12-01 14:30:08Z')), UNIX_MICROS(TIMESTAMP('2020-12-01 14:30:08.999999Z')), UNIX_MICROS(null) +-- !query schema +struct +-- !query output +1606833008000000 1606833008999999 NULL + + +-- !query +select DATE_FROM_UNIX_DATE(0), DATE_FROM_UNIX_DATE(1000), DATE_FROM_UNIX_DATE(null) +-- !query schema +struct +-- !query output +1970-01-01 1972-09-27 NULL + + +-- !query +select UNIX_DATE(DATE('1970-01-01')), UNIX_DATE(DATE('2020-12-04')), UNIX_DATE(null) +-- !query schema +struct +-- !query output +0 18600 NULL + + -- !query select current_date = current_date(), current_timestamp = current_timestamp() -- !query schema @@ -301,9 +341,10 @@ struct -- !query select '1' - interval '2' second -- !query schema -struct +struct<> -- !query output -NULL +java.time.DateTimeException +Cannot cast 1 to TimestampType. -- !query @@ -412,7 +453,7 @@ select date_add('2011-11-11', '1.2') struct<> -- !query output org.apache.spark.sql.AnalysisException -The second argument of 'date_add' function needs to be an integer.; +The second argument of 'date_add' function needs to be an integer. -- !query @@ -453,7 +494,7 @@ select date_sub(date'2011-11-11', '1.2') struct<> -- !query output org.apache.spark.sql.AnalysisException -The second argument of 'date_sub' function needs to be an integer.; +The second argument of 'date_sub' function needs to be an integer. -- !query @@ -600,9 +641,10 @@ struct -- !query select to_timestamp('2019-10-06 10:11:12.', 'yyyy-MM-dd HH:mm:ss.SSSSSS[zzz]') -- !query schema -struct +struct<> -- !query output -NULL +java.time.format.DateTimeParseException +Text '2019-10-06 10:11:12.' could not be parsed at index 20 -- !query @@ -664,9 +706,10 @@ struct +struct<> -- !query output -NULL +java.time.format.DateTimeParseException +Text '2019-10-06 10:11:12.1234567PST' could not be parsed, unparsed text found at index 26 -- !query @@ -680,9 +723,10 @@ struct +struct<> -- !query output -NULL +java.time.format.DateTimeParseException +Text '223456 2019-10-06 10:11:12.123456PST' could not be parsed at index 27 -- !query @@ -744,17 +788,19 @@ struct +struct<> -- !query output -NULL +java.time.format.DateTimeParseException +Text '12.1232019-10-06S10:11' could not be parsed at index 7 -- !query select to_timestamp("12.1232019-10-06S10:11", "ss.SSSSyy-MM-dd'S'HH:mm") -- !query schema -struct +struct<> -- !query output -NULL +java.time.format.DateTimeParseException +Text '12.1232019-10-06S10:11' could not be parsed at index 9 -- !query @@ -824,9 +870,10 @@ struct -- !query select to_timestamp("02-29", "MM-dd") -- !query schema -struct +struct<> -- !query output -NULL +java.time.DateTimeException +Invalid date 'February 29' as '1970' is not a leap year -- !query @@ -840,9 +887,10 @@ struct -- !query select to_date("02-29", "MM-dd") -- !query schema -struct +struct<> -- !query output -NULL +java.time.DateTimeException +Invalid date 'February 29' as '1970' is not a leap year -- !query @@ -898,7 +946,7 @@ You may get a different result due to the upgrading of Spark 3.0: Fail to recogn -- !query -select from_json('{"time":"26/October/2015"}', 'time Timestamp', map('timestampFormat', 'dd/MMMMM/yyyy')) +select from_json('{"t":"26/October/2015"}', 't Timestamp', map('timestampFormat', 'dd/MMMMM/yyyy')) -- !query schema struct<> -- !query output @@ -907,7 +955,7 @@ You may get a different result due to the upgrading of Spark 3.0: Fail to recogn -- !query -select from_json('{"date":"26/October/2015"}', 'date Date', map('dateFormat', 'dd/MMMMM/yyyy')) +select from_json('{"d":"26/October/2015"}', 'd Date', map('dateFormat', 'dd/MMMMM/yyyy')) -- !query schema struct<> -- !query output @@ -916,7 +964,7 @@ You may get a different result due to the upgrading of Spark 3.0: Fail to recogn -- !query -select from_csv('26/October/2015', 'time Timestamp', map('timestampFormat', 'dd/MMMMM/yyyy')) +select from_csv('26/October/2015', 't Timestamp', map('timestampFormat', 'dd/MMMMM/yyyy')) -- !query schema struct<> -- !query output @@ -925,9 +973,141 @@ You may get a different result due to the upgrading of Spark 3.0: Fail to recogn -- !query -select from_csv('26/October/2015', 'date Date', map('dateFormat', 'dd/MMMMM/yyyy')) +select from_csv('26/October/2015', 'd Date', map('dateFormat', 'dd/MMMMM/yyyy')) -- !query schema struct<> -- !query output org.apache.spark.SparkUpgradeException You may get a different result due to the upgrading of Spark 3.0: Fail to recognize 'dd/MMMMM/yyyy' pattern in the DateTimeFormatter. 1) You can set spark.sql.legacy.timeParserPolicy to LEGACY to restore the behavior before Spark 3.0. 2) You can form a valid datetime pattern with the guide from https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html + + +-- !query +select to_date("2020-01-27T20:06:11.847", "yyyy-MM-dd HH:mm:ss.SSS") +-- !query schema +struct<> +-- !query output +java.time.format.DateTimeParseException +Text '2020-01-27T20:06:11.847' could not be parsed at index 10 + + +-- !query +select to_date("Unparseable", "yyyy-MM-dd HH:mm:ss.SSS") +-- !query schema +struct<> +-- !query output +java.time.format.DateTimeParseException +Text 'Unparseable' could not be parsed at index 0 + + +-- !query +select to_timestamp("2020-01-27T20:06:11.847", "yyyy-MM-dd HH:mm:ss.SSS") +-- !query schema +struct<> +-- !query output +java.time.format.DateTimeParseException +Text '2020-01-27T20:06:11.847' could not be parsed at index 10 + + +-- !query +select to_timestamp("Unparseable", "yyyy-MM-dd HH:mm:ss.SSS") +-- !query schema +struct<> +-- !query output +java.time.format.DateTimeParseException +Text 'Unparseable' could not be parsed at index 0 + + +-- !query +select unix_timestamp("2020-01-27T20:06:11.847", "yyyy-MM-dd HH:mm:ss.SSS") +-- !query schema +struct<> +-- !query output +java.time.format.DateTimeParseException +Text '2020-01-27T20:06:11.847' could not be parsed at index 10 + + +-- !query +select unix_timestamp("Unparseable", "yyyy-MM-dd HH:mm:ss.SSS") +-- !query schema +struct<> +-- !query output +java.time.format.DateTimeParseException +Text 'Unparseable' could not be parsed at index 0 + + +-- !query +select to_unix_timestamp("2020-01-27T20:06:11.847", "yyyy-MM-dd HH:mm:ss.SSS") +-- !query schema +struct<> +-- !query output +java.time.format.DateTimeParseException +Text '2020-01-27T20:06:11.847' could not be parsed at index 10 + + +-- !query +select to_unix_timestamp("Unparseable", "yyyy-MM-dd HH:mm:ss.SSS") +-- !query schema +struct<> +-- !query output +java.time.format.DateTimeParseException +Text 'Unparseable' could not be parsed at index 0 + + +-- !query +select cast("Unparseable" as timestamp) +-- !query schema +struct<> +-- !query output +java.time.DateTimeException +Cannot cast Unparseable to TimestampType. + + +-- !query +select cast("Unparseable" as date) +-- !query schema +struct<> +-- !query output +java.time.DateTimeException +Cannot cast Unparseable to DateType. + + +-- !query +select next_day("2015-07-23", "Mon") +-- !query schema +struct +-- !query output +2015-07-27 + + +-- !query +select next_day("2015-07-23", "xx") +-- !query schema +struct<> +-- !query output +java.lang.IllegalArgumentException +Illegal input for day of week: xx + + +-- !query +select next_day("xx", "Mon") +-- !query schema +struct<> +-- !query output +java.time.DateTimeException +Cannot cast xx to DateType. + + +-- !query +select next_day(null, "Mon") +-- !query schema +struct +-- !query output +NULL + + +-- !query +select next_day(null, "xx") +-- !query schema +struct +-- !query output +NULL diff --git a/sql/core/src/test/resources/sql-tests/results/ansi/map.sql.out b/sql/core/src/test/resources/sql-tests/results/ansi/map.sql.out new file mode 100644 index 0000000000000..12c599812cdee --- /dev/null +++ b/sql/core/src/test/resources/sql-tests/results/ansi/map.sql.out @@ -0,0 +1,20 @@ +-- Automatically generated by SQLQueryTestSuite +-- Number of queries: 2 + + +-- !query +select element_at(map(1, 'a', 2, 'b'), 5) +-- !query schema +struct<> +-- !query output +java.util.NoSuchElementException +Key 5 does not exist. + + +-- !query +select map(1, 'a', 2, 'b')[5] +-- !query schema +struct<> +-- !query output +java.util.NoSuchElementException +Key 5 does not exist. diff --git a/sql/core/src/test/resources/sql-tests/results/ansi/parse-schema-string.sql.out b/sql/core/src/test/resources/sql-tests/results/ansi/parse-schema-string.sql.out new file mode 100644 index 0000000000000..bfbf11d54489c --- /dev/null +++ b/sql/core/src/test/resources/sql-tests/results/ansi/parse-schema-string.sql.out @@ -0,0 +1,62 @@ +-- Automatically generated by SQLQueryTestSuite +-- Number of queries: 4 + + +-- !query +select from_csv('1', 'create INT') +-- !query schema +struct<> +-- !query output +org.apache.spark.sql.AnalysisException +Cannot parse the data type: +no viable alternative at input 'create'(line 1, pos 0) + +== SQL == +create INT +^^^ + +Failed fallback parsing: +no viable alternative at input 'create'(line 1, pos 0) + +== SQL == +create INT +^^^ +; line 1 pos 7 + + +-- !query +select from_csv('1', 'cube INT') +-- !query schema +struct> +-- !query output +{"cube":1} + + +-- !query +select from_json('{"create":1}', 'create INT') +-- !query schema +struct<> +-- !query output +org.apache.spark.sql.AnalysisException +Cannot parse the data type: +no viable alternative at input 'create'(line 1, pos 0) + +== SQL == +create INT +^^^ + +Failed fallback parsing: +no viable alternative at input 'create'(line 1, pos 0) + +== SQL == +create INT +^^^ +; line 1 pos 7 + + +-- !query +select from_json('{"cube":1}', 'cube INT') +-- !query schema +struct> +-- !query output +{"cube":1} diff --git a/sql/core/src/test/resources/sql-tests/results/ansi/string-functions.sql.out b/sql/core/src/test/resources/sql-tests/results/ansi/string-functions.sql.out index d5c0acb40bb1e..dd085a6437e13 100644 --- a/sql/core/src/test/resources/sql-tests/results/ansi/string-functions.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/ansi/string-functions.sql.out @@ -1,5 +1,5 @@ -- Automatically generated by SQLQueryTestSuite --- Number of queries: 36 +-- Number of queries: 44 -- !query @@ -294,3 +294,69 @@ struct<> -- !query output java.lang.NumberFormatException invalid input syntax for type numeric: invalid_length + + +-- !query +select decode() +-- !query schema +struct<> +-- !query output +org.apache.spark.sql.AnalysisException +Invalid number of arguments for function decode. Expected: 2; Found: 0; line 1 pos 7 + + +-- !query +select decode(encode('abc', 'utf-8')) +-- !query schema +struct<> +-- !query output +org.apache.spark.sql.AnalysisException +Invalid number of arguments for function decode. Expected: 2; Found: 1; line 1 pos 7 + + +-- !query +select decode(encode('abc', 'utf-8'), 'utf-8') +-- !query schema +struct +-- !query output +abc + + +-- !query +select decode(1, 1, 'Southlake') +-- !query schema +struct +-- !query output +Southlake + + +-- !query +select decode(2, 1, 'Southlake') +-- !query schema +struct +-- !query output +NULL + + +-- !query +select decode(2, 1, 'Southlake', 2, 'San Francisco', 3, 'New Jersey', 4, 'Seattle', 'Non domestic') +-- !query schema +struct +-- !query output +San Francisco + + +-- !query +select decode(6, 1, 'Southlake', 2, 'San Francisco', 3, 'New Jersey', 4, 'Seattle', 'Non domestic') +-- !query schema +struct +-- !query output +Non domestic + + +-- !query +select decode(6, 1, 'Southlake', 2, 'San Francisco', 3, 'New Jersey', 4, 'Seattle') +-- !query schema +struct +-- !query output +NULL diff --git a/sql/core/src/test/resources/sql-tests/results/array.sql.out b/sql/core/src/test/resources/sql-tests/results/array.sql.out index 2c2b1a7856304..9bf0d89ed71fe 100644 --- a/sql/core/src/test/resources/sql-tests/results/array.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/array.sql.out @@ -1,5 +1,5 @@ -- Automatically generated by SQLQueryTestSuite --- Number of queries: 12 +-- Number of queries: 20 -- !query @@ -160,3 +160,68 @@ from primitive_arrays struct -- !query output 1 2 2 2 2 2 2 2 2 2 + + +-- !query +select element_at(array(1, 2, 3), 5) +-- !query schema +struct +-- !query output +NULL + + +-- !query +select element_at(array(1, 2, 3), -5) +-- !query schema +struct +-- !query output +NULL + + +-- !query +select element_at(array(1, 2, 3), 0) +-- !query schema +struct<> +-- !query output +java.lang.ArrayIndexOutOfBoundsException +SQL array indices start at 1 + + +-- !query +select elt(4, '123', '456') +-- !query schema +struct +-- !query output +NULL + + +-- !query +select elt(0, '123', '456') +-- !query schema +struct +-- !query output +NULL + + +-- !query +select elt(-1, '123', '456') +-- !query schema +struct +-- !query output +NULL + + +-- !query +select array(1, 2, 3)[5] +-- !query schema +struct +-- !query output +NULL + + +-- !query +select array(1, 2, 3)[-1] +-- !query schema +struct +-- !query output +NULL diff --git a/sql/core/src/test/resources/sql-tests/results/cast.sql.out b/sql/core/src/test/resources/sql-tests/results/cast.sql.out index d4872ca03199b..42d12b80be989 100644 --- a/sql/core/src/test/resources/sql-tests/results/cast.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/cast.sql.out @@ -269,6 +269,8 @@ Class: org.apache.spark.sql.catalyst.expressions.Cast Extended Usage: No example/argument for boolean. + Since: 2.0.1 + Function: boolean Usage: boolean(expr) - Casts the value `expr` to the target data type `boolean`. diff --git a/sql/core/src/test/resources/sql-tests/results/change-column.sql.out b/sql/core/src/test/resources/sql-tests/results/change-column.sql.out index b1a32ad1f63e9..96b28d734f5a7 100644 --- a/sql/core/src/test/resources/sql-tests/results/change-column.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/change-column.sql.out @@ -50,7 +50,7 @@ ALTER TABLE test_change RENAME COLUMN a TO a1 struct<> -- !query output org.apache.spark.sql.AnalysisException -RENAME COLUMN is only supported with v2 tables.; +RENAME COLUMN is only supported with v2 tables. -- !query @@ -69,7 +69,7 @@ ALTER TABLE test_change CHANGE a TYPE STRING struct<> -- !query output org.apache.spark.sql.AnalysisException -ALTER TABLE CHANGE COLUMN is not supported for changing column 'a' with type 'IntegerType' to 'a' with type 'StringType'; +ALTER TABLE CHANGE COLUMN is not supported for changing column 'a' with type 'IntegerType' to 'a' with type 'StringType' -- !query @@ -88,7 +88,7 @@ ALTER TABLE test_change CHANGE a AFTER b struct<> -- !query output org.apache.spark.sql.AnalysisException -ALTER COLUMN ... FIRST | ALTER is only supported with v2 tables.; +ALTER COLUMN ... FIRST | ALTER is only supported with v2 tables. -- !query @@ -97,7 +97,7 @@ ALTER TABLE test_change CHANGE b FIRST struct<> -- !query output org.apache.spark.sql.AnalysisException -ALTER COLUMN ... FIRST | ALTER is only supported with v2 tables.; +ALTER COLUMN ... FIRST | ALTER is only supported with v2 tables. -- !query @@ -176,7 +176,7 @@ ALTER TABLE test_change CHANGE invalid_col TYPE INT struct<> -- !query output org.apache.spark.sql.AnalysisException -Can't find column `invalid_col` given table data columns [`a`, `b`, `c`]; +Can't find column `invalid_col` given table data columns [`a`, `b`, `c`] -- !query diff --git a/sql/core/src/test/resources/sql-tests/results/charvarchar.sql.out b/sql/core/src/test/resources/sql-tests/results/charvarchar.sql.out new file mode 100644 index 0000000000000..774235e084191 --- /dev/null +++ b/sql/core/src/test/resources/sql-tests/results/charvarchar.sql.out @@ -0,0 +1,663 @@ +-- Automatically generated by SQLQueryTestSuite +-- Number of queries: 45 + + +-- !query +create table char_tbl(c char(5), v varchar(6)) using parquet +-- !query schema +struct<> +-- !query output + + + +-- !query +desc formatted char_tbl +-- !query schema +struct +-- !query output +c char(5) +v varchar(6) + +# Detailed Table Information +Database default +Table char_tbl +Created Time [not included in comparison] +Last Access [not included in comparison] +Created By [not included in comparison] +Type MANAGED +Provider parquet +Location [not included in comparison]/{warehouse_dir}/char_tbl + + +-- !query +desc formatted char_tbl c +-- !query schema +struct +-- !query output +col_name c +data_type char(5) +comment NULL +min NULL +max NULL +num_nulls NULL +distinct_count NULL +avg_col_len NULL +max_col_len NULL +histogram NULL + + +-- !query +show create table char_tbl +-- !query schema +struct +-- !query output +CREATE TABLE `default`.`char_tbl` ( + `c` CHAR(5), + `v` VARCHAR(6)) +USING parquet + + +-- !query +create table char_tbl2 using parquet as select * from char_tbl +-- !query schema +struct<> +-- !query output + + + +-- !query +show create table char_tbl2 +-- !query schema +struct +-- !query output +CREATE TABLE `default`.`char_tbl2` ( + `c` CHAR(5), + `v` VARCHAR(6)) +USING parquet + + +-- !query +desc formatted char_tbl2 +-- !query schema +struct +-- !query output +c char(5) +v varchar(6) + +# Detailed Table Information +Database default +Table char_tbl2 +Created Time [not included in comparison] +Last Access [not included in comparison] +Created By [not included in comparison] +Type MANAGED +Provider parquet +Location [not included in comparison]/{warehouse_dir}/char_tbl2 + + +-- !query +desc formatted char_tbl2 c +-- !query schema +struct +-- !query output +col_name c +data_type char(5) +comment NULL +min NULL +max NULL +num_nulls NULL +distinct_count NULL +avg_col_len NULL +max_col_len NULL +histogram NULL + + +-- !query +create table char_tbl3 like char_tbl +-- !query schema +struct<> +-- !query output + + + +-- !query +desc formatted char_tbl3 +-- !query schema +struct +-- !query output +c char(5) +v varchar(6) + +# Detailed Table Information +Database default +Table char_tbl3 +Created Time [not included in comparison] +Last Access [not included in comparison] +Created By [not included in comparison] +Type MANAGED +Provider parquet +Location [not included in comparison]/{warehouse_dir}/char_tbl3 + + +-- !query +desc formatted char_tbl3 c +-- !query schema +struct +-- !query output +col_name c +data_type char(5) +comment NULL +min NULL +max NULL +num_nulls NULL +distinct_count NULL +avg_col_len NULL +max_col_len NULL +histogram NULL + + +-- !query +show create table char_tbl3 +-- !query schema +struct +-- !query output +CREATE TABLE `default`.`char_tbl3` ( + `c` CHAR(5), + `v` VARCHAR(6)) +USING parquet + + +-- !query +create view char_view as select * from char_tbl +-- !query schema +struct<> +-- !query output + + + +-- !query +desc formatted char_view +-- !query schema +struct +-- !query output +c char(5) +v varchar(6) + +# Detailed Table Information +Database default +Table char_view +Created Time [not included in comparison] +Last Access [not included in comparison] +Created By [not included in comparison] +Type VIEW +View Text select * from char_tbl +View Original Text select * from char_tbl +View Catalog and Namespace spark_catalog.default +View Query Output Columns [c, v] +Table Properties [view.catalogAndNamespace.numParts=2, view.catalogAndNamespace.part.0=spark_catalog, view.catalogAndNamespace.part.1=default, view.query.out.col.0=c, view.query.out.col.1=v, view.query.out.numCols=2, view.referredTempFunctionsNames=[], view.referredTempViewNames=[]] + + +-- !query +desc formatted char_view c +-- !query schema +struct +-- !query output +col_name c +data_type char(5) +comment NULL +min NULL +max NULL +num_nulls NULL +distinct_count NULL +avg_col_len NULL +max_col_len NULL +histogram NULL + + +-- !query +show create table char_view +-- !query schema +struct +-- !query output +CREATE VIEW `default`.`char_view` ( + `c`, + `v`) +AS select * from char_tbl + + +-- !query +alter table char_tbl rename to char_tbl1 +-- !query schema +struct<> +-- !query output + + + +-- !query +desc formatted char_tbl1 +-- !query schema +struct +-- !query output +c char(5) +v varchar(6) + +# Detailed Table Information +Database default +Table char_tbl1 +Created Time [not included in comparison] +Last Access [not included in comparison] +Created By [not included in comparison] +Type MANAGED +Provider parquet +Location [not included in comparison]/{warehouse_dir}/char_tbl1 + + +-- !query +alter table char_tbl1 change column c type char(6) +-- !query schema +struct<> +-- !query output +org.apache.spark.sql.AnalysisException +ALTER TABLE CHANGE COLUMN is not supported for changing column 'c' with type 'CharType(5)' to 'c' with type 'CharType(6)' + + +-- !query +alter table char_tbl1 change column c type char(5) +-- !query schema +struct<> +-- !query output + + + +-- !query +desc formatted char_tbl1 +-- !query schema +struct +-- !query output +c char(5) +v varchar(6) + +# Detailed Table Information +Database default +Table char_tbl1 +Created Time [not included in comparison] +Last Access [not included in comparison] +Created By [not included in comparison] +Type MANAGED +Provider parquet +Location [not included in comparison]/{warehouse_dir}/char_tbl1 + + +-- !query +alter table char_tbl1 add columns (d char(5)) +-- !query schema +struct<> +-- !query output + + + +-- !query +desc formatted char_tbl1 +-- !query schema +struct +-- !query output +c char(5) +v varchar(6) +d char(5) + +# Detailed Table Information +Database default +Table char_tbl1 +Created Time [not included in comparison] +Last Access [not included in comparison] +Created By [not included in comparison] +Type MANAGED +Provider parquet +Location [not included in comparison]/{warehouse_dir}/char_tbl1 + + +-- !query +alter view char_view as select * from char_tbl2 +-- !query schema +struct<> +-- !query output + + + +-- !query +desc formatted char_view +-- !query schema +struct +-- !query output +c char(5) +v varchar(6) + +# Detailed Table Information +Database default +Table char_view +Created Time [not included in comparison] +Last Access [not included in comparison] +Created By [not included in comparison] +Type VIEW +View Text select * from char_tbl2 +View Original Text select * from char_tbl2 +View Catalog and Namespace spark_catalog.default +View Query Output Columns [c, v] +Table Properties [view.catalogAndNamespace.numParts=2, view.catalogAndNamespace.part.0=spark_catalog, view.catalogAndNamespace.part.1=default, view.query.out.col.0=c, view.query.out.col.1=v, view.query.out.numCols=2, view.referredTempFunctionsNames=[], view.referredTempViewNames=[]] + + +-- !query +alter table char_tbl1 SET TBLPROPERTIES('yes'='no') +-- !query schema +struct<> +-- !query output + + + +-- !query +desc formatted char_tbl1 +-- !query schema +struct +-- !query output +c char(5) +v varchar(6) +d char(5) + +# Detailed Table Information +Database default +Table char_tbl1 +Created Time [not included in comparison] +Last Access [not included in comparison] +Created By [not included in comparison] +Type MANAGED +Provider parquet +Table Properties [yes=no] +Location [not included in comparison]/{warehouse_dir}/char_tbl1 + + +-- !query +alter view char_view SET TBLPROPERTIES('yes'='no') +-- !query schema +struct<> +-- !query output + + + +-- !query +desc formatted char_view +-- !query schema +struct +-- !query output +c char(5) +v varchar(6) + +# Detailed Table Information +Database default +Table char_view +Created Time [not included in comparison] +Last Access [not included in comparison] +Created By [not included in comparison] +Type VIEW +View Text select * from char_tbl2 +View Original Text select * from char_tbl2 +View Catalog and Namespace spark_catalog.default +View Query Output Columns [c, v] +Table Properties [view.catalogAndNamespace.numParts=2, view.catalogAndNamespace.part.0=spark_catalog, view.catalogAndNamespace.part.1=default, view.query.out.col.0=c, view.query.out.col.1=v, view.query.out.numCols=2, view.referredTempFunctionsNames=[], view.referredTempViewNames=[], yes=no] + + +-- !query +alter table char_tbl1 UNSET TBLPROPERTIES('yes') +-- !query schema +struct<> +-- !query output + + + +-- !query +desc formatted char_tbl1 +-- !query schema +struct +-- !query output +c char(5) +v varchar(6) +d char(5) + +# Detailed Table Information +Database default +Table char_tbl1 +Created Time [not included in comparison] +Last Access [not included in comparison] +Created By [not included in comparison] +Type MANAGED +Provider parquet +Location [not included in comparison]/{warehouse_dir}/char_tbl1 + + +-- !query +alter view char_view UNSET TBLPROPERTIES('yes') +-- !query schema +struct<> +-- !query output + + + +-- !query +desc formatted char_view +-- !query schema +struct +-- !query output +c char(5) +v varchar(6) + +# Detailed Table Information +Database default +Table char_view +Created Time [not included in comparison] +Last Access [not included in comparison] +Created By [not included in comparison] +Type VIEW +View Text select * from char_tbl2 +View Original Text select * from char_tbl2 +View Catalog and Namespace spark_catalog.default +View Query Output Columns [c, v] +Table Properties [view.catalogAndNamespace.numParts=2, view.catalogAndNamespace.part.0=spark_catalog, view.catalogAndNamespace.part.1=default, view.query.out.col.0=c, view.query.out.col.1=v, view.query.out.numCols=2, view.referredTempFunctionsNames=[], view.referredTempViewNames=[]] + + +-- !query +alter table char_tbl1 SET SERDEPROPERTIES('yes'='no') +-- !query schema +struct<> +-- !query output + + + +-- !query +desc formatted char_tbl1 +-- !query schema +struct +-- !query output +c char(5) +v varchar(6) +d char(5) + +# Detailed Table Information +Database default +Table char_tbl1 +Created Time [not included in comparison] +Last Access [not included in comparison] +Created By [not included in comparison] +Type MANAGED +Provider parquet +Location [not included in comparison]/{warehouse_dir}/char_tbl1 +Storage Properties [yes=no] + + +-- !query +create table char_part(c1 char(5), c2 char(2), v1 varchar(6), v2 varchar(2)) using parquet partitioned by (v2, c2) +-- !query schema +struct<> +-- !query output + + + +-- !query +desc formatted char_part +-- !query schema +struct +-- !query output +c1 char(5) +v1 varchar(6) +v2 varchar(2) +c2 char(2) +# Partition Information +# col_name data_type comment +v2 varchar(2) +c2 char(2) + +# Detailed Table Information +Database default +Table char_part +Created Time [not included in comparison] +Last Access [not included in comparison] +Created By [not included in comparison] +Type MANAGED +Provider parquet +Location [not included in comparison]/{warehouse_dir}/char_part +Partition Provider Catalog + + +-- !query +alter table char_part add partition (v2='ke', c2='nt') location 'loc1' +-- !query schema +struct<> +-- !query output + + + +-- !query +desc formatted char_part +-- !query schema +struct +-- !query output +c1 char(5) +v1 varchar(6) +v2 varchar(2) +c2 char(2) +# Partition Information +# col_name data_type comment +v2 varchar(2) +c2 char(2) + +# Detailed Table Information +Database default +Table char_part +Created Time [not included in comparison] +Last Access [not included in comparison] +Created By [not included in comparison] +Type MANAGED +Provider parquet +Location [not included in comparison]/{warehouse_dir}/char_part +Partition Provider Catalog + + +-- !query +alter table char_part partition (v2='ke') rename to partition (v2='nt') +-- !query schema +struct<> +-- !query output +org.apache.spark.sql.AnalysisException +Partition spec is invalid. The spec (v2) must match the partition spec (v2, c2) defined in table '`default`.`char_part`' + + +-- !query +desc formatted char_part +-- !query schema +struct +-- !query output +c1 char(5) +v1 varchar(6) +v2 varchar(2) +c2 char(2) +# Partition Information +# col_name data_type comment +v2 varchar(2) +c2 char(2) + +# Detailed Table Information +Database default +Table char_part +Created Time [not included in comparison] +Last Access [not included in comparison] +Created By [not included in comparison] +Type MANAGED +Provider parquet +Location [not included in comparison]/{warehouse_dir}/char_part +Partition Provider Catalog + + +-- !query +alter table char_part partition (v2='ke', c2='nt') set location 'loc2' +-- !query schema +struct<> +-- !query output + + + +-- !query +desc formatted char_part +-- !query schema +struct +-- !query output +c1 char(5) +v1 varchar(6) +v2 varchar(2) +c2 char(2) +# Partition Information +# col_name data_type comment +v2 varchar(2) +c2 char(2) + +# Detailed Table Information +Database default +Table char_part +Created Time [not included in comparison] +Last Access [not included in comparison] +Created By [not included in comparison] +Type MANAGED +Provider parquet +Location [not included in comparison]/{warehouse_dir}/char_part +Partition Provider Catalog + + +-- !query +MSCK REPAIR TABLE char_part +-- !query schema +struct<> +-- !query output + + + +-- !query +desc formatted char_part +-- !query schema +struct +-- !query output +c1 char(5) +v1 varchar(6) +v2 varchar(2) +c2 char(2) +# Partition Information +# col_name data_type comment +v2 varchar(2) +c2 char(2) + +# Detailed Table Information +Database default +Table char_part +Created Time [not included in comparison] +Last Access [not included in comparison] +Created By [not included in comparison] +Type MANAGED +Provider parquet +Location [not included in comparison]/{warehouse_dir}/char_part +Partition Provider Catalog diff --git a/sql/core/src/test/resources/sql-tests/results/columnresolution-negative.sql.out b/sql/core/src/test/resources/sql-tests/results/columnresolution-negative.sql.out index 04ddfe0ac128c..ea321638b219e 100644 --- a/sql/core/src/test/resources/sql-tests/results/columnresolution-negative.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/columnresolution-negative.sql.out @@ -195,7 +195,7 @@ SELECT t1.x.y.* FROM t1 struct<> -- !query output org.apache.spark.sql.AnalysisException -cannot resolve 't1.x.y.*' given input columns 'i1'; +cannot resolve 't1.x.y.*' given input columns 'i1' -- !query diff --git a/sql/core/src/test/resources/sql-tests/results/count.sql.out b/sql/core/src/test/resources/sql-tests/results/count.sql.out index c0cdd0d697538..ffd75d6a09e1c 100644 --- a/sql/core/src/test/resources/sql-tests/results/count.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/count.sql.out @@ -1,5 +1,5 @@ -- Automatically generated by SQLQueryTestSuite --- Number of queries: 13 +-- Number of queries: 14 -- !query @@ -116,4 +116,13 @@ SELECT count(distinct 0.8), percentile_approx(distinct a, 0.8) FROM testData -- !query schema struct -- !query output -1 2 \ No newline at end of file +1 2 + + +-- !query +SELECT count() FROM testData +-- !query schema +struct<> +-- !query output +org.apache.spark.sql.AnalysisException +cannot resolve 'count()' due to data type mismatch: count requires at least one argument.; line 1 pos 7 diff --git a/sql/core/src/test/resources/sql-tests/results/csv-functions.sql.out b/sql/core/src/test/resources/sql-tests/results/csv-functions.sql.out index 1e3173172a528..2131487f3500a 100644 --- a/sql/core/src/test/resources/sql-tests/results/csv-functions.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/csv-functions.sql.out @@ -24,7 +24,7 @@ select from_csv('1', 1) struct<> -- !query output org.apache.spark.sql.AnalysisException -The expression '1' is not a valid schema string.;; line 1 pos 7 +The expression '1' is not a valid schema string.; line 1 pos 7 -- !query @@ -33,7 +33,14 @@ select from_csv('1', 'a InvalidType') struct<> -- !query output org.apache.spark.sql.AnalysisException +Cannot parse the data type: +extraneous input 'InvalidType' expecting (line 1, pos 2) +== SQL == +a InvalidType +--^^^ + +Failed fallback parsing: DataType invalidtype is not supported.(line 1, pos 2) == SQL == @@ -48,7 +55,7 @@ select from_csv('1', 'a INT', named_struct('mode', 'PERMISSIVE')) struct<> -- !query output org.apache.spark.sql.AnalysisException -Must use a map() function for options;; line 1 pos 7 +Must use a map() function for options; line 1 pos 7 -- !query @@ -57,7 +64,7 @@ select from_csv('1', 'a INT', map('mode', 1)) struct<> -- !query output org.apache.spark.sql.AnalysisException -A type of keys and values in map() must be string, but got map;; line 1 pos 7 +A type of keys and values in map() must be string, but got map; line 1 pos 7 -- !query @@ -82,7 +89,7 @@ select schema_of_csv('1|abc', map('delimiter', '|')) -- !query schema struct -- !query output -struct<_c0:int,_c1:string> +STRUCT<`_c0`: INT, `_c1`: STRING> -- !query @@ -141,7 +148,7 @@ select to_csv(named_struct('a', 1, 'b', 2), named_struct('mode', 'PERMISSIVE')) struct<> -- !query output org.apache.spark.sql.AnalysisException -Must use a map() function for options;; line 1 pos 7 +Must use a map() function for options; line 1 pos 7 -- !query @@ -150,4 +157,4 @@ select to_csv(named_struct('a', 1, 'b', 2), map('mode', 1)) struct<> -- !query output org.apache.spark.sql.AnalysisException -A type of keys and values in map() must be string, but got map;; line 1 pos 7 +A type of keys and values in map() must be string, but got map; line 1 pos 7 diff --git a/sql/core/src/test/resources/sql-tests/results/cte-nested.sql.out b/sql/core/src/test/resources/sql-tests/results/cte-nested.sql.out index 2f736c7b4978f..a8db4599dafcc 100644 --- a/sql/core/src/test/resources/sql-tests/results/cte-nested.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/cte-nested.sql.out @@ -48,7 +48,7 @@ SELECT * FROM t2 struct<> -- !query output org.apache.spark.sql.AnalysisException -Name t is ambiguous in nested CTE. Please set spark.sql.legacy.ctePrecedencePolicy to CORRECTED so that name defined in inner CTE takes precedence. If set it to LEGACY, outer CTE definitions will take precedence. See more details in SPARK-28228.; +Name t is ambiguous in nested CTE. Please set spark.sql.legacy.ctePrecedencePolicy to CORRECTED so that name defined in inner CTE takes precedence. If set it to LEGACY, outer CTE definitions will take precedence. See more details in SPARK-28228. -- !query @@ -85,7 +85,7 @@ SELECT * FROM t2 struct<> -- !query output org.apache.spark.sql.AnalysisException -Name t is ambiguous in nested CTE. Please set spark.sql.legacy.ctePrecedencePolicy to CORRECTED so that name defined in inner CTE takes precedence. If set it to LEGACY, outer CTE definitions will take precedence. See more details in SPARK-28228.; +Name t is ambiguous in nested CTE. Please set spark.sql.legacy.ctePrecedencePolicy to CORRECTED so that name defined in inner CTE takes precedence. If set it to LEGACY, outer CTE definitions will take precedence. See more details in SPARK-28228. -- !query @@ -139,7 +139,7 @@ SELECT ( struct<> -- !query output org.apache.spark.sql.AnalysisException -Name t is ambiguous in nested CTE. Please set spark.sql.legacy.ctePrecedencePolicy to CORRECTED so that name defined in inner CTE takes precedence. If set it to LEGACY, outer CTE definitions will take precedence. See more details in SPARK-28228.; +Name t is ambiguous in nested CTE. Please set spark.sql.legacy.ctePrecedencePolicy to CORRECTED so that name defined in inner CTE takes precedence. If set it to LEGACY, outer CTE definitions will take precedence. See more details in SPARK-28228. -- !query @@ -154,7 +154,7 @@ SELECT ( struct<> -- !query output org.apache.spark.sql.AnalysisException -Name t is ambiguous in nested CTE. Please set spark.sql.legacy.ctePrecedencePolicy to CORRECTED so that name defined in inner CTE takes precedence. If set it to LEGACY, outer CTE definitions will take precedence. See more details in SPARK-28228.; +Name t is ambiguous in nested CTE. Please set spark.sql.legacy.ctePrecedencePolicy to CORRECTED so that name defined in inner CTE takes precedence. If set it to LEGACY, outer CTE definitions will take precedence. See more details in SPARK-28228. -- !query @@ -170,7 +170,7 @@ SELECT ( struct<> -- !query output org.apache.spark.sql.AnalysisException -Name t is ambiguous in nested CTE. Please set spark.sql.legacy.ctePrecedencePolicy to CORRECTED so that name defined in inner CTE takes precedence. If set it to LEGACY, outer CTE definitions will take precedence. See more details in SPARK-28228.; +Name t is ambiguous in nested CTE. Please set spark.sql.legacy.ctePrecedencePolicy to CORRECTED so that name defined in inner CTE takes precedence. If set it to LEGACY, outer CTE definitions will take precedence. See more details in SPARK-28228. -- !query @@ -184,7 +184,7 @@ WHERE c IN ( struct<> -- !query output org.apache.spark.sql.AnalysisException -Name t is ambiguous in nested CTE. Please set spark.sql.legacy.ctePrecedencePolicy to CORRECTED so that name defined in inner CTE takes precedence. If set it to LEGACY, outer CTE definitions will take precedence. See more details in SPARK-28228.; +Name t is ambiguous in nested CTE. Please set spark.sql.legacy.ctePrecedencePolicy to CORRECTED so that name defined in inner CTE takes precedence. If set it to LEGACY, outer CTE definitions will take precedence. See more details in SPARK-28228. -- !query @@ -213,7 +213,7 @@ SELECT * FROM t struct<> -- !query output org.apache.spark.sql.AnalysisException -Name aBc is ambiguous in nested CTE. Please set spark.sql.legacy.ctePrecedencePolicy to CORRECTED so that name defined in inner CTE takes precedence. If set it to LEGACY, outer CTE definitions will take precedence. See more details in SPARK-28228.; +Name aBc is ambiguous in nested CTE. Please set spark.sql.legacy.ctePrecedencePolicy to CORRECTED so that name defined in inner CTE takes precedence. If set it to LEGACY, outer CTE definitions will take precedence. See more details in SPARK-28228. -- !query @@ -226,4 +226,4 @@ SELECT ( struct<> -- !query output org.apache.spark.sql.AnalysisException -Name aBc is ambiguous in nested CTE. Please set spark.sql.legacy.ctePrecedencePolicy to CORRECTED so that name defined in inner CTE takes precedence. If set it to LEGACY, outer CTE definitions will take precedence. See more details in SPARK-28228.; +Name aBc is ambiguous in nested CTE. Please set spark.sql.legacy.ctePrecedencePolicy to CORRECTED so that name defined in inner CTE takes precedence. If set it to LEGACY, outer CTE definitions will take precedence. See more details in SPARK-28228. diff --git a/sql/core/src/test/resources/sql-tests/results/datetime-legacy.sql.out b/sql/core/src/test/resources/sql-tests/results/datetime-legacy.sql.out index 3806764856f5b..d93843b231804 100644 --- a/sql/core/src/test/resources/sql-tests/results/datetime-legacy.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/datetime-legacy.sql.out @@ -1,5 +1,5 @@ -- Automatically generated by SQLQueryTestSuite --- Number of queries: 108 +-- Number of queries: 128 -- !query @@ -87,6 +87,46 @@ struct +-- !query output +1606833008 1606833008 NULL + + +-- !query +select UNIX_MILLIS(TIMESTAMP('2020-12-01 14:30:08Z')), UNIX_MILLIS(TIMESTAMP('2020-12-01 14:30:08.999999Z')), UNIX_MILLIS(null) +-- !query schema +struct +-- !query output +1606833008000 1606833008999 NULL + + +-- !query +select UNIX_MICROS(TIMESTAMP('2020-12-01 14:30:08Z')), UNIX_MICROS(TIMESTAMP('2020-12-01 14:30:08.999999Z')), UNIX_MICROS(null) +-- !query schema +struct +-- !query output +1606833008000000 1606833008999999 NULL + + +-- !query +select DATE_FROM_UNIX_DATE(0), DATE_FROM_UNIX_DATE(1000), DATE_FROM_UNIX_DATE(null) +-- !query schema +struct +-- !query output +1970-01-01 1972-09-27 NULL + + +-- !query +select UNIX_DATE(DATE('1970-01-01')), UNIX_DATE(DATE('2020-12-04')), UNIX_DATE(null) +-- !query schema +struct +-- !query output +0 18600 NULL + + -- !query select current_date = current_date(), current_timestamp = current_timestamp() -- !query schema @@ -390,7 +430,7 @@ select date_add('2011-11-11', '1.2') struct<> -- !query output org.apache.spark.sql.AnalysisException -The second argument of 'date_add' function needs to be an integer.; +The second argument of 'date_add' function needs to be an integer. -- !query @@ -431,7 +471,7 @@ select date_sub(date'2011-11-11', '1.2') struct<> -- !query output org.apache.spark.sql.AnalysisException -The second argument of 'date_sub' function needs to be an integer.; +The second argument of 'date_sub' function needs to be an integer. -- !query @@ -872,32 +912,152 @@ struct -- !query -select from_json('{"time":"26/October/2015"}', 'time Timestamp', map('timestampFormat', 'dd/MMMMM/yyyy')) +select from_json('{"t":"26/October/2015"}', 't Timestamp', map('timestampFormat', 'dd/MMMMM/yyyy')) +-- !query schema +struct> +-- !query output +{"t":2015-10-26 00:00:00} + + +-- !query +select from_json('{"d":"26/October/2015"}', 'd Date', map('dateFormat', 'dd/MMMMM/yyyy')) +-- !query schema +struct> +-- !query output +{"d":2015-10-26} + + +-- !query +select from_csv('26/October/2015', 't Timestamp', map('timestampFormat', 'dd/MMMMM/yyyy')) +-- !query schema +struct> +-- !query output +{"t":2015-10-26 00:00:00} + + +-- !query +select from_csv('26/October/2015', 'd Date', map('dateFormat', 'dd/MMMMM/yyyy')) +-- !query schema +struct> +-- !query output +{"d":2015-10-26} + + +-- !query +select to_date("2020-01-27T20:06:11.847", "yyyy-MM-dd HH:mm:ss.SSS") +-- !query schema +struct +-- !query output +NULL + + +-- !query +select to_date("Unparseable", "yyyy-MM-dd HH:mm:ss.SSS") +-- !query schema +struct +-- !query output +NULL + + +-- !query +select to_timestamp("2020-01-27T20:06:11.847", "yyyy-MM-dd HH:mm:ss.SSS") -- !query schema -struct> +struct -- !query output -{"time":2015-10-26 00:00:00} +NULL + + +-- !query +select to_timestamp("Unparseable", "yyyy-MM-dd HH:mm:ss.SSS") +-- !query schema +struct +-- !query output +NULL -- !query -select from_json('{"date":"26/October/2015"}', 'date Date', map('dateFormat', 'dd/MMMMM/yyyy')) +select unix_timestamp("2020-01-27T20:06:11.847", "yyyy-MM-dd HH:mm:ss.SSS") -- !query schema -struct> +struct -- !query output -{"date":2015-10-26} +NULL -- !query -select from_csv('26/October/2015', 'time Timestamp', map('timestampFormat', 'dd/MMMMM/yyyy')) +select unix_timestamp("Unparseable", "yyyy-MM-dd HH:mm:ss.SSS") -- !query schema -struct> +struct -- !query output -{"time":2015-10-26 00:00:00} +NULL + + +-- !query +select to_unix_timestamp("2020-01-27T20:06:11.847", "yyyy-MM-dd HH:mm:ss.SSS") +-- !query schema +struct +-- !query output +NULL -- !query -select from_csv('26/October/2015', 'date Date', map('dateFormat', 'dd/MMMMM/yyyy')) +select to_unix_timestamp("Unparseable", "yyyy-MM-dd HH:mm:ss.SSS") -- !query schema -struct> +struct -- !query output -{"date":2015-10-26} +NULL + + +-- !query +select cast("Unparseable" as timestamp) +-- !query schema +struct +-- !query output +NULL + + +-- !query +select cast("Unparseable" as date) +-- !query schema +struct +-- !query output +NULL + + +-- !query +select next_day("2015-07-23", "Mon") +-- !query schema +struct +-- !query output +2015-07-27 + + +-- !query +select next_day("2015-07-23", "xx") +-- !query schema +struct +-- !query output +NULL + + +-- !query +select next_day("xx", "Mon") +-- !query schema +struct +-- !query output +NULL + + +-- !query +select next_day(null, "Mon") +-- !query schema +struct +-- !query output +NULL + + +-- !query +select next_day(null, "xx") +-- !query schema +struct +-- !query output +NULL diff --git a/sql/core/src/test/resources/sql-tests/results/datetime.sql.out b/sql/core/src/test/resources/sql-tests/results/datetime.sql.out index 5feeaa9addef7..b07b68ce2600d 100755 --- a/sql/core/src/test/resources/sql-tests/results/datetime.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/datetime.sql.out @@ -1,5 +1,5 @@ -- Automatically generated by SQLQueryTestSuite --- Number of queries: 108 +-- Number of queries: 128 -- !query @@ -87,6 +87,46 @@ struct +-- !query output +1606833008 1606833008 NULL + + +-- !query +select UNIX_MILLIS(TIMESTAMP('2020-12-01 14:30:08Z')), UNIX_MILLIS(TIMESTAMP('2020-12-01 14:30:08.999999Z')), UNIX_MILLIS(null) +-- !query schema +struct +-- !query output +1606833008000 1606833008999 NULL + + +-- !query +select UNIX_MICROS(TIMESTAMP('2020-12-01 14:30:08Z')), UNIX_MICROS(TIMESTAMP('2020-12-01 14:30:08.999999Z')), UNIX_MICROS(null) +-- !query schema +struct +-- !query output +1606833008000000 1606833008999999 NULL + + +-- !query +select DATE_FROM_UNIX_DATE(0), DATE_FROM_UNIX_DATE(1000), DATE_FROM_UNIX_DATE(null) +-- !query schema +struct +-- !query output +1970-01-01 1972-09-27 NULL + + +-- !query +select UNIX_DATE(DATE('1970-01-01')), UNIX_DATE(DATE('2020-12-04')), UNIX_DATE(null) +-- !query schema +struct +-- !query output +0 18600 NULL + + -- !query select current_date = current_date(), current_timestamp = current_timestamp() -- !query schema @@ -390,7 +430,7 @@ select date_add('2011-11-11', '1.2') struct<> -- !query output org.apache.spark.sql.AnalysisException -The second argument of 'date_add' function needs to be an integer.; +The second argument of 'date_add' function needs to be an integer. -- !query @@ -431,7 +471,7 @@ select date_sub(date'2011-11-11', '1.2') struct<> -- !query output org.apache.spark.sql.AnalysisException -The second argument of 'date_sub' function needs to be an integer.; +The second argument of 'date_sub' function needs to be an integer. -- !query @@ -876,7 +916,7 @@ You may get a different result due to the upgrading of Spark 3.0: Fail to recogn -- !query -select from_json('{"time":"26/October/2015"}', 'time Timestamp', map('timestampFormat', 'dd/MMMMM/yyyy')) +select from_json('{"t":"26/October/2015"}', 't Timestamp', map('timestampFormat', 'dd/MMMMM/yyyy')) -- !query schema struct<> -- !query output @@ -885,7 +925,7 @@ You may get a different result due to the upgrading of Spark 3.0: Fail to recogn -- !query -select from_json('{"date":"26/October/2015"}', 'date Date', map('dateFormat', 'dd/MMMMM/yyyy')) +select from_json('{"d":"26/October/2015"}', 'd Date', map('dateFormat', 'dd/MMMMM/yyyy')) -- !query schema struct<> -- !query output @@ -894,7 +934,7 @@ You may get a different result due to the upgrading of Spark 3.0: Fail to recogn -- !query -select from_csv('26/October/2015', 'time Timestamp', map('timestampFormat', 'dd/MMMMM/yyyy')) +select from_csv('26/October/2015', 't Timestamp', map('timestampFormat', 'dd/MMMMM/yyyy')) -- !query schema struct<> -- !query output @@ -903,9 +943,129 @@ You may get a different result due to the upgrading of Spark 3.0: Fail to recogn -- !query -select from_csv('26/October/2015', 'date Date', map('dateFormat', 'dd/MMMMM/yyyy')) +select from_csv('26/October/2015', 'd Date', map('dateFormat', 'dd/MMMMM/yyyy')) -- !query schema struct<> -- !query output org.apache.spark.SparkUpgradeException You may get a different result due to the upgrading of Spark 3.0: Fail to recognize 'dd/MMMMM/yyyy' pattern in the DateTimeFormatter. 1) You can set spark.sql.legacy.timeParserPolicy to LEGACY to restore the behavior before Spark 3.0. 2) You can form a valid datetime pattern with the guide from https://spark.apache.org/docs/latest/sql-ref-datetime-pattern.html + + +-- !query +select to_date("2020-01-27T20:06:11.847", "yyyy-MM-dd HH:mm:ss.SSS") +-- !query schema +struct +-- !query output +NULL + + +-- !query +select to_date("Unparseable", "yyyy-MM-dd HH:mm:ss.SSS") +-- !query schema +struct +-- !query output +NULL + + +-- !query +select to_timestamp("2020-01-27T20:06:11.847", "yyyy-MM-dd HH:mm:ss.SSS") +-- !query schema +struct +-- !query output +NULL + + +-- !query +select to_timestamp("Unparseable", "yyyy-MM-dd HH:mm:ss.SSS") +-- !query schema +struct +-- !query output +NULL + + +-- !query +select unix_timestamp("2020-01-27T20:06:11.847", "yyyy-MM-dd HH:mm:ss.SSS") +-- !query schema +struct +-- !query output +NULL + + +-- !query +select unix_timestamp("Unparseable", "yyyy-MM-dd HH:mm:ss.SSS") +-- !query schema +struct +-- !query output +NULL + + +-- !query +select to_unix_timestamp("2020-01-27T20:06:11.847", "yyyy-MM-dd HH:mm:ss.SSS") +-- !query schema +struct +-- !query output +NULL + + +-- !query +select to_unix_timestamp("Unparseable", "yyyy-MM-dd HH:mm:ss.SSS") +-- !query schema +struct +-- !query output +NULL + + +-- !query +select cast("Unparseable" as timestamp) +-- !query schema +struct +-- !query output +NULL + + +-- !query +select cast("Unparseable" as date) +-- !query schema +struct +-- !query output +NULL + + +-- !query +select next_day("2015-07-23", "Mon") +-- !query schema +struct +-- !query output +2015-07-27 + + +-- !query +select next_day("2015-07-23", "xx") +-- !query schema +struct +-- !query output +NULL + + +-- !query +select next_day("xx", "Mon") +-- !query schema +struct +-- !query output +NULL + + +-- !query +select next_day(null, "Mon") +-- !query schema +struct +-- !query output +NULL + + +-- !query +select next_day(null, "xx") +-- !query schema +struct +-- !query output +NULL diff --git a/sql/core/src/test/resources/sql-tests/results/describe-table-column.sql.out b/sql/core/src/test/resources/sql-tests/results/describe-table-column.sql.out index c6d3d45879eb1..cc5b836b74109 100644 --- a/sql/core/src/test/resources/sql-tests/results/describe-table-column.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/describe-table-column.sql.out @@ -1,9 +1,9 @@ -- Automatically generated by SQLQueryTestSuite --- Number of queries: 28 +-- Number of queries: 30 -- !query -CREATE TEMPORARY VIEW desc_col_temp_view (key int COMMENT 'column_comment') USING PARQUET +CREATE TEMPORARY VIEW desc_col_temp_view (key int COMMENT 'column_comment', col struct) USING PARQUET -- !query schema struct<> -- !query output @@ -77,7 +77,16 @@ DESC desc_col_temp_view key1 struct<> -- !query output org.apache.spark.sql.AnalysisException -Column key1 does not exist; +Column key1 does not exist + + +-- !query +DESC desc_col_temp_view col.x +-- !query schema +struct<> +-- !query output +org.apache.spark.sql.AnalysisException +DESC TABLE COLUMN does not support nested column: col.x -- !query @@ -140,6 +149,15 @@ max_col_len 4 histogram NULL +-- !query +DESC desc_col_table key1 +-- !query schema +struct<> +-- !query output +org.apache.spark.sql.AnalysisException +Column key1 does not exist + + -- !query CREATE TABLE desc_complex_col_table (`a.b` int, col struct) USING PARQUET -- !query schema @@ -188,7 +206,7 @@ DESC FORMATTED desc_complex_col_table col.x struct<> -- !query output org.apache.spark.sql.AnalysisException -DESC TABLE COLUMN command does not support nested data types: col.x; +DESC TABLE COLUMN does not support nested column: col.x -- !query diff --git a/sql/core/src/test/resources/sql-tests/results/describe.sql.out b/sql/core/src/test/resources/sql-tests/results/describe.sql.out index a7de033e3a1ac..3b5d8a1396283 100644 --- a/sql/core/src/test/resources/sql-tests/results/describe.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/describe.sql.out @@ -130,7 +130,7 @@ Num Buckets 2 Bucket Columns [`a`] Sort Columns [`b`] Comment table_comment -Table Properties [t=test, e=3] +Table Properties [e=3, t=test] Location [not included in comparison]/{warehouse_dir}/t Storage Properties [a=1, b=2] Partition Provider Catalog @@ -162,7 +162,7 @@ Num Buckets 2 Bucket Columns [`a`] Sort Columns [`b`] Comment table_comment -Table Properties [t=test, e=3] +Table Properties [e=3, t=test] Location [not included in comparison]/{warehouse_dir}/t Storage Properties [a=1, b=2] Partition Provider Catalog @@ -332,7 +332,7 @@ struct<> org.apache.spark.sql.catalyst.analysis.NoSuchPartitionException Partition not found in table 't' database 'default': c -> Us -d -> 2; +d -> 2 -- !query @@ -341,7 +341,7 @@ DESC t PARTITION (c='Us') struct<> -- !query output org.apache.spark.sql.AnalysisException -Partition spec is invalid. The spec (c) must match the partition spec (c, d) defined in table '`default`.`t`'; +Partition spec is invalid. The spec (c) must match the partition spec (c, d) defined in table '`default`.`t`' -- !query @@ -431,7 +431,7 @@ DESC temp_v PARTITION (c='Us', d=1) struct<> -- !query output org.apache.spark.sql.AnalysisException -DESC PARTITION is not allowed on a temporary view: temp_v; +DESC PARTITION is not allowed on a temporary view: temp_v -- !query @@ -477,7 +477,7 @@ View Text SELECT * FROM t View Original Text SELECT * FROM t View Catalog and Namespace spark_catalog.default View Query Output Columns [a, b, c, d] -Table Properties [view.query.out.col.3=d, view.catalogAndNamespace.numParts=2, view.query.out.col.0=a, view.query.out.numCols=4, view.query.out.col.1=b, view.catalogAndNamespace.part.0=spark_catalog, view.query.out.col.2=c, view.catalogAndNamespace.part.1=default] +Table Properties [view.catalogAndNamespace.numParts=2, view.catalogAndNamespace.part.0=spark_catalog, view.catalogAndNamespace.part.1=default, view.query.out.col.0=a, view.query.out.col.1=b, view.query.out.col.2=c, view.query.out.col.3=d, view.query.out.numCols=4, view.referredTempFunctionsNames=[], view.referredTempViewNames=[]] -- !query @@ -501,7 +501,7 @@ View Text SELECT * FROM t View Original Text SELECT * FROM t View Catalog and Namespace spark_catalog.default View Query Output Columns [a, b, c, d] -Table Properties [view.query.out.col.3=d, view.catalogAndNamespace.numParts=2, view.query.out.col.0=a, view.query.out.numCols=4, view.query.out.col.1=b, view.catalogAndNamespace.part.0=spark_catalog, view.query.out.col.2=c, view.catalogAndNamespace.part.1=default] +Table Properties [view.catalogAndNamespace.numParts=2, view.catalogAndNamespace.part.0=spark_catalog, view.catalogAndNamespace.part.1=default, view.query.out.col.0=a, view.query.out.col.1=b, view.query.out.col.2=c, view.query.out.col.3=d, view.query.out.numCols=4, view.referredTempFunctionsNames=[], view.referredTempViewNames=[]] -- !query @@ -510,7 +510,7 @@ DESC v PARTITION (c='Us', d=1) struct<> -- !query output org.apache.spark.sql.AnalysisException -DESC PARTITION is not allowed on a view: v; +DESC PARTITION is not allowed on a view: v -- !query @@ -540,7 +540,7 @@ struct -- !query output == Parsed Logical Plan == 'DescribeRelation false -+- 'UnresolvedTableOrView [t] ++- 'UnresolvedTableOrView [t], DESCRIBE TABLE, true == Analyzed Logical Plan == col_name: string, data_type: string, comment: string @@ -561,7 +561,7 @@ struct -- !query output == Physical Plan == Execute DescribeColumnCommand - +- DescribeColumnCommand `default`.`t`, [b], false + +- DescribeColumnCommand `default`.`t`, [spark_catalog, default, t, b], false -- !query diff --git a/sql/core/src/test/resources/sql-tests/results/except-all.sql.out b/sql/core/src/test/resources/sql-tests/results/except-all.sql.out index 601ff8f024214..a1fe952e2c032 100644 --- a/sql/core/src/test/resources/sql-tests/results/except-all.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/except-all.sql.out @@ -141,7 +141,7 @@ SELECT array(1) struct<> -- !query output org.apache.spark.sql.AnalysisException -ExceptAll can only be performed on tables with the compatible column types. array <> int at the first column of the second table; +ExceptAll can only be performed on tables with the compatible column types. array <> int at the first column of the second table -- !query @@ -213,7 +213,7 @@ SELECT k, v FROM tab4 struct<> -- !query output org.apache.spark.sql.AnalysisException -ExceptAll can only be performed on tables with the same number of columns, but the first table has 1 columns and the second table has 2 columns; +ExceptAll can only be performed on tables with the same number of columns, but the first table has 1 columns and the second table has 2 columns -- !query diff --git a/sql/core/src/test/resources/sql-tests/results/explain-aqe.sql.out b/sql/core/src/test/resources/sql-tests/results/explain-aqe.sql.out index 5a59ffa03880f..d68989524d486 100644 --- a/sql/core/src/test/resources/sql-tests/results/explain-aqe.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/explain-aqe.sql.out @@ -1,5 +1,5 @@ -- Automatically generated by SQLQueryTestSuite --- Number of queries: 23 +-- Number of queries: 24 -- !query @@ -42,6 +42,39 @@ struct spark.sql.codegen.wholeStage true +-- !query +EXPLAIN EXTENDED + SELECT sum(distinct val) + FROM explain_temp1 +-- !query schema +struct +-- !query output +== Parsed Logical Plan == +'Project [unresolvedalias('sum(distinct 'val), None)] ++- 'UnresolvedRelation [explain_temp1], [], false + +== Analyzed Logical Plan == +sum(DISTINCT val): bigint +Aggregate [sum(distinct cast(val#x as bigint)) AS sum(DISTINCT val)#xL] ++- SubqueryAlias spark_catalog.default.explain_temp1 + +- Relation[key#x,val#x] parquet + +== Optimized Logical Plan == +Aggregate [sum(distinct cast(val#x as bigint)) AS sum(DISTINCT val)#xL] ++- Project [val#x] + +- Relation[key#x,val#x] parquet + +== Physical Plan == +AdaptiveSparkPlan isFinalPlan=false ++- HashAggregate(keys=[], functions=[sum(distinct cast(val#x as bigint)#xL)], output=[sum(DISTINCT val)#xL]) + +- Exchange SinglePartition, ENSURE_REQUIREMENTS, [id=#x] + +- HashAggregate(keys=[], functions=[partial_sum(distinct cast(val#x as bigint)#xL)], output=[sum#xL]) + +- HashAggregate(keys=[cast(val#x as bigint)#xL], functions=[], output=[cast(val#x as bigint)#xL]) + +- Exchange hashpartitioning(cast(val#x as bigint)#xL, 4), ENSURE_REQUIREMENTS, [id=#x] + +- HashAggregate(keys=[cast(val#x as bigint) AS cast(val#x as bigint)#xL], functions=[], output=[cast(val#x as bigint)#xL]) + +- FileScan parquet default.explain_temp1[val#x] Batched: true, DataFilters: [], Format: Parquet, Location [not included in comparison]/{warehouse_dir}/explain_temp1], PartitionFilters: [], PushedFilters: [], ReadSchema: struct + + -- !query EXPLAIN FORMATTED SELECT key, max(val) @@ -54,16 +87,7 @@ struct -- !query output == Physical Plan == AdaptiveSparkPlan (8) -+- == Current Plan == - Sort (7) - +- Exchange (6) - +- HashAggregate (5) - +- Exchange (4) - +- HashAggregate (3) - +- Filter (2) - +- Scan parquet default.explain_temp1 (1) -+- == Initial Plan == - Sort (7) ++- Sort (7) +- Exchange (6) +- HashAggregate (5) +- Exchange (4) @@ -92,7 +116,7 @@ Results [2]: [key#x, max#x] (4) Exchange Input [2]: [key#x, max#x] -Arguments: hashpartitioning(key#x, 4), true, [id=#x] +Arguments: hashpartitioning(key#x, 4), ENSURE_REQUIREMENTS, [id=#x] (5) HashAggregate Input [2]: [key#x, max#x] @@ -103,7 +127,7 @@ Results [2]: [key#x, max(val#x)#x AS max(val)#x] (6) Exchange Input [2]: [key#x, max(val)#x] -Arguments: rangepartitioning(key#x ASC NULLS FIRST, 4), true, [id=#x] +Arguments: rangepartitioning(key#x ASC NULLS FIRST, 4), ENSURE_REQUIREMENTS, [id=#x] (7) Sort Input [2]: [key#x, max(val)#x] @@ -126,16 +150,7 @@ struct -- !query output == Physical Plan == AdaptiveSparkPlan (8) -+- == Current Plan == - Project (7) - +- Filter (6) - +- HashAggregate (5) - +- Exchange (4) - +- HashAggregate (3) - +- Filter (2) - +- Scan parquet default.explain_temp1 (1) -+- == Initial Plan == - Project (7) ++- Project (7) +- Filter (6) +- HashAggregate (5) +- Exchange (4) @@ -164,7 +179,7 @@ Results [2]: [key#x, max#x] (4) Exchange Input [2]: [key#x, max#x] -Arguments: hashpartitioning(key#x, 4), true, [id=#x] +Arguments: hashpartitioning(key#x, 4), ENSURE_REQUIREMENTS, [id=#x] (5) HashAggregate Input [2]: [key#x, max#x] @@ -196,17 +211,7 @@ struct -- !query output == Physical Plan == AdaptiveSparkPlan (9) -+- == Current Plan == - HashAggregate (8) - +- Exchange (7) - +- HashAggregate (6) - +- Union (5) - :- Filter (2) - : +- Scan parquet default.explain_temp1 (1) - +- Filter (4) - +- Scan parquet default.explain_temp1 (3) -+- == Initial Plan == - HashAggregate (8) ++- HashAggregate (8) +- Exchange (7) +- HashAggregate (6) +- Union (5) @@ -249,7 +254,7 @@ Results [2]: [key#x, val#x] (7) Exchange Input [2]: [key#x, val#x] -Arguments: hashpartitioning(key#x, val#x, 4), true, [id=#x] +Arguments: hashpartitioning(key#x, val#x, 4), ENSURE_REQUIREMENTS, [id=#x] (8) HashAggregate Input [2]: [key#x, val#x] @@ -274,15 +279,7 @@ struct -- !query output == Physical Plan == AdaptiveSparkPlan (7) -+- == Current Plan == - BroadcastHashJoin Inner BuildRight (6) - :- Filter (2) - : +- Scan parquet default.explain_temp1 (1) - +- BroadcastExchange (5) - +- Filter (4) - +- Scan parquet default.explain_temp2 (3) -+- == Initial Plan == - BroadcastHashJoin Inner BuildRight (6) ++- BroadcastHashJoin Inner BuildRight (6) :- Filter (2) : +- Scan parquet default.explain_temp1 (1) +- BroadcastExchange (5) @@ -337,14 +334,7 @@ struct -- !query output == Physical Plan == AdaptiveSparkPlan (6) -+- == Current Plan == - BroadcastHashJoin LeftOuter BuildRight (5) - :- Scan parquet default.explain_temp1 (1) - +- BroadcastExchange (4) - +- Filter (3) - +- Scan parquet default.explain_temp2 (2) -+- == Initial Plan == - BroadcastHashJoin LeftOuter BuildRight (5) ++- BroadcastHashJoin LeftOuter BuildRight (5) :- Scan parquet default.explain_temp1 (1) +- BroadcastExchange (4) +- Filter (3) @@ -398,11 +388,7 @@ struct -- !query output == Physical Plan == AdaptiveSparkPlan (3) -+- == Current Plan == - Filter (2) - +- Scan parquet default.explain_temp1 (1) -+- == Initial Plan == - Filter (2) ++- Filter (2) +- Scan parquet default.explain_temp1 (1) @@ -421,6 +407,101 @@ Condition : (((isnotnull(key#x) AND isnotnull(val#x)) AND (key#x = Subquery subq Output [2]: [key#x, val#x] Arguments: isFinalPlan=false +===== Subqueries ===== + +Subquery:1 Hosting operator id = 2 Hosting Expression = Subquery subquery#x, [id=#x] +AdaptiveSparkPlan (10) ++- HashAggregate (9) + +- Exchange (8) + +- HashAggregate (7) + +- Project (6) + +- Filter (5) + +- Scan parquet default.explain_temp2 (4) + + +(4) Scan parquet default.explain_temp2 +Output [2]: [key#x, val#x] +Batched: true +Location [not included in comparison]/{warehouse_dir}/explain_temp2] +PushedFilters: [IsNotNull(key), IsNotNull(val), EqualTo(val,2)] +ReadSchema: struct + +(5) Filter +Input [2]: [key#x, val#x] +Condition : (((isnotnull(key#x) AND isnotnull(val#x)) AND (key#x = Subquery subquery#x, [id=#x])) AND (val#x = 2)) + +(6) Project +Output [1]: [key#x] +Input [2]: [key#x, val#x] + +(7) HashAggregate +Input [1]: [key#x] +Keys: [] +Functions [1]: [partial_max(key#x)] +Aggregate Attributes [1]: [max#x] +Results [1]: [max#x] + +(8) Exchange +Input [1]: [max#x] +Arguments: SinglePartition, ENSURE_REQUIREMENTS, [id=#x] + +(9) HashAggregate +Input [1]: [max#x] +Keys: [] +Functions [1]: [max(key#x)] +Aggregate Attributes [1]: [max(key#x)#x] +Results [1]: [max(key#x)#x AS max(key)#x] + +(10) AdaptiveSparkPlan +Output [1]: [max(key)#x] +Arguments: isFinalPlan=false + +Subquery:2 Hosting operator id = 5 Hosting Expression = Subquery subquery#x, [id=#x] +AdaptiveSparkPlan (17) ++- HashAggregate (16) + +- Exchange (15) + +- HashAggregate (14) + +- Project (13) + +- Filter (12) + +- Scan parquet default.explain_temp3 (11) + + +(11) Scan parquet default.explain_temp3 +Output [2]: [key#x, val#x] +Batched: true +Location [not included in comparison]/{warehouse_dir}/explain_temp3] +PushedFilters: [IsNotNull(val), GreaterThan(val,0)] +ReadSchema: struct + +(12) Filter +Input [2]: [key#x, val#x] +Condition : (isnotnull(val#x) AND (val#x > 0)) + +(13) Project +Output [1]: [key#x] +Input [2]: [key#x, val#x] + +(14) HashAggregate +Input [1]: [key#x] +Keys: [] +Functions [1]: [partial_max(key#x)] +Aggregate Attributes [1]: [max#x] +Results [1]: [max#x] + +(15) Exchange +Input [1]: [max#x] +Arguments: SinglePartition, ENSURE_REQUIREMENTS, [id=#x] + +(16) HashAggregate +Input [1]: [max#x] +Keys: [] +Functions [1]: [max(key#x)] +Aggregate Attributes [1]: [max(key#x)#x] +Results [1]: [max(key#x)#x AS max(key)#x] + +(17) AdaptiveSparkPlan +Output [1]: [max(key)#x] +Arguments: isFinalPlan=false -- !query EXPLAIN FORMATTED @@ -438,11 +519,7 @@ struct -- !query output == Physical Plan == AdaptiveSparkPlan (3) -+- == Current Plan == - Filter (2) - +- Scan parquet default.explain_temp1 (1) -+- == Initial Plan == - Filter (2) ++- Filter (2) +- Scan parquet default.explain_temp1 (1) @@ -460,6 +537,101 @@ Condition : ((key#x = Subquery subquery#x, [id=#x]) OR (cast(key#x as double) = Output [2]: [key#x, val#x] Arguments: isFinalPlan=false +===== Subqueries ===== + +Subquery:1 Hosting operator id = 2 Hosting Expression = Subquery subquery#x, [id=#x] +AdaptiveSparkPlan (10) ++- HashAggregate (9) + +- Exchange (8) + +- HashAggregate (7) + +- Project (6) + +- Filter (5) + +- Scan parquet default.explain_temp2 (4) + + +(4) Scan parquet default.explain_temp2 +Output [2]: [key#x, val#x] +Batched: true +Location [not included in comparison]/{warehouse_dir}/explain_temp2] +PushedFilters: [IsNotNull(val), GreaterThan(val,0)] +ReadSchema: struct + +(5) Filter +Input [2]: [key#x, val#x] +Condition : (isnotnull(val#x) AND (val#x > 0)) + +(6) Project +Output [1]: [key#x] +Input [2]: [key#x, val#x] + +(7) HashAggregate +Input [1]: [key#x] +Keys: [] +Functions [1]: [partial_max(key#x)] +Aggregate Attributes [1]: [max#x] +Results [1]: [max#x] + +(8) Exchange +Input [1]: [max#x] +Arguments: SinglePartition, ENSURE_REQUIREMENTS, [id=#x] + +(9) HashAggregate +Input [1]: [max#x] +Keys: [] +Functions [1]: [max(key#x)] +Aggregate Attributes [1]: [max(key#x)#x] +Results [1]: [max(key#x)#x AS max(key)#x] + +(10) AdaptiveSparkPlan +Output [1]: [max(key)#x] +Arguments: isFinalPlan=false + +Subquery:2 Hosting operator id = 2 Hosting Expression = Subquery subquery#x, [id=#x] +AdaptiveSparkPlan (17) ++- HashAggregate (16) + +- Exchange (15) + +- HashAggregate (14) + +- Project (13) + +- Filter (12) + +- Scan parquet default.explain_temp3 (11) + + +(11) Scan parquet default.explain_temp3 +Output [2]: [key#x, val#x] +Batched: true +Location [not included in comparison]/{warehouse_dir}/explain_temp3] +PushedFilters: [IsNotNull(val), GreaterThan(val,0)] +ReadSchema: struct + +(12) Filter +Input [2]: [key#x, val#x] +Condition : (isnotnull(val#x) AND (val#x > 0)) + +(13) Project +Output [1]: [key#x] +Input [2]: [key#x, val#x] + +(14) HashAggregate +Input [1]: [key#x] +Keys: [] +Functions [1]: [partial_avg(cast(key#x as bigint))] +Aggregate Attributes [2]: [sum#x, count#xL] +Results [2]: [sum#x, count#xL] + +(15) Exchange +Input [2]: [sum#x, count#xL] +Arguments: SinglePartition, ENSURE_REQUIREMENTS, [id=#x] + +(16) HashAggregate +Input [2]: [sum#x, count#xL] +Keys: [] +Functions [1]: [avg(cast(key#x as bigint))] +Aggregate Attributes [1]: [avg(cast(key#x as bigint))#x] +Results [1]: [avg(cast(key#x as bigint))#x AS avg(key)#x] + +(17) AdaptiveSparkPlan +Output [1]: [avg(key)#x] +Arguments: isFinalPlan=false -- !query EXPLAIN FORMATTED @@ -470,11 +642,7 @@ struct -- !query output == Physical Plan == AdaptiveSparkPlan (3) -+- == Current Plan == - Project (2) - +- Scan parquet default.explain_temp1 (1) -+- == Initial Plan == - Project (2) ++- Project (2) +- Scan parquet default.explain_temp1 (1) @@ -492,6 +660,79 @@ Input: [] Output [1]: [(scalarsubquery() + scalarsubquery())#x] Arguments: isFinalPlan=false +===== Subqueries ===== + +Subquery:1 Hosting operator id = 2 Hosting Expression = Subquery subquery#x, [id=#x] +AdaptiveSparkPlan (8) ++- HashAggregate (7) + +- Exchange (6) + +- HashAggregate (5) + +- Scan parquet default.explain_temp1 (4) + + +(4) Scan parquet default.explain_temp1 +Output [1]: [key#x] +Batched: true +Location [not included in comparison]/{warehouse_dir}/explain_temp1] +ReadSchema: struct + +(5) HashAggregate +Input [1]: [key#x] +Keys: [] +Functions [1]: [partial_avg(cast(key#x as bigint))] +Aggregate Attributes [2]: [sum#x, count#xL] +Results [2]: [sum#x, count#xL] + +(6) Exchange +Input [2]: [sum#x, count#xL] +Arguments: SinglePartition, ENSURE_REQUIREMENTS, [id=#x] + +(7) HashAggregate +Input [2]: [sum#x, count#xL] +Keys: [] +Functions [1]: [avg(cast(key#x as bigint))] +Aggregate Attributes [1]: [avg(cast(key#x as bigint))#x] +Results [1]: [avg(cast(key#x as bigint))#x AS avg(key)#x] + +(8) AdaptiveSparkPlan +Output [1]: [avg(key)#x] +Arguments: isFinalPlan=false + +Subquery:2 Hosting operator id = 2 Hosting Expression = Subquery subquery#x, [id=#x] +AdaptiveSparkPlan (13) ++- HashAggregate (12) + +- Exchange (11) + +- HashAggregate (10) + +- Scan parquet default.explain_temp1 (9) + + +(9) Scan parquet default.explain_temp1 +Output [1]: [key#x] +Batched: true +Location [not included in comparison]/{warehouse_dir}/explain_temp1] +ReadSchema: struct + +(10) HashAggregate +Input [1]: [key#x] +Keys: [] +Functions [1]: [partial_avg(cast(key#x as bigint))] +Aggregate Attributes [2]: [sum#x, count#xL] +Results [2]: [sum#x, count#xL] + +(11) Exchange +Input [2]: [sum#x, count#xL] +Arguments: SinglePartition, ENSURE_REQUIREMENTS, [id=#x] + +(12) HashAggregate +Input [2]: [sum#x, count#xL] +Keys: [] +Functions [1]: [avg(cast(key#x as bigint))] +Aggregate Attributes [1]: [avg(cast(key#x as bigint))#x] +Results [1]: [avg(cast(key#x as bigint))#x AS avg(key)#x] + +(13) AdaptiveSparkPlan +Output [1]: [avg(key)#x] +Arguments: isFinalPlan=false -- !query EXPLAIN FORMATTED @@ -506,15 +747,7 @@ struct -- !query output == Physical Plan == AdaptiveSparkPlan (7) -+- == Current Plan == - BroadcastHashJoin Inner BuildRight (6) - :- Filter (2) - : +- Scan parquet default.explain_temp1 (1) - +- BroadcastExchange (5) - +- Filter (4) - +- Scan parquet default.explain_temp1 (3) -+- == Initial Plan == - BroadcastHashJoin Inner BuildRight (6) ++- BroadcastHashJoin Inner BuildRight (6) :- Filter (2) : +- Scan parquet default.explain_temp1 (1) +- BroadcastExchange (5) @@ -572,21 +805,7 @@ struct -- !query output == Physical Plan == AdaptiveSparkPlan (13) -+- == Current Plan == - BroadcastHashJoin Inner BuildRight (12) - :- HashAggregate (5) - : +- Exchange (4) - : +- HashAggregate (3) - : +- Filter (2) - : +- Scan parquet default.explain_temp1 (1) - +- BroadcastExchange (11) - +- HashAggregate (10) - +- Exchange (9) - +- HashAggregate (8) - +- Filter (7) - +- Scan parquet default.explain_temp1 (6) -+- == Initial Plan == - BroadcastHashJoin Inner BuildRight (12) ++- BroadcastHashJoin Inner BuildRight (12) :- HashAggregate (5) : +- Exchange (4) : +- HashAggregate (3) @@ -620,7 +839,7 @@ Results [2]: [key#x, max#x] (4) Exchange Input [2]: [key#x, max#x] -Arguments: hashpartitioning(key#x, 4), true, [id=#x] +Arguments: hashpartitioning(key#x, 4), ENSURE_REQUIREMENTS, [id=#x] (5) HashAggregate Input [2]: [key#x, max#x] @@ -649,7 +868,7 @@ Results [2]: [key#x, max#x] (9) Exchange Input [2]: [key#x, max#x] -Arguments: hashpartitioning(key#x, 4), true, [id=#x] +Arguments: hashpartitioning(key#x, 4), ENSURE_REQUIREMENTS, [id=#x] (10) HashAggregate Input [2]: [key#x, max#x] @@ -693,7 +912,7 @@ Output: [] Arguments: `default`.`explain_view`, SELECT key, val FROM explain_temp1, false, false, PersistedView (3) UnresolvedRelation -Arguments: [explain_temp1], [] +Arguments: [explain_temp1], [], false (4) Project Arguments: ['key, 'val] @@ -710,13 +929,7 @@ struct -- !query output == Physical Plan == AdaptiveSparkPlan (5) -+- == Current Plan == - HashAggregate (4) - +- Exchange (3) - +- HashAggregate (2) - +- Scan parquet default.explain_temp1 (1) -+- == Initial Plan == - HashAggregate (4) ++- HashAggregate (4) +- Exchange (3) +- HashAggregate (2) +- Scan parquet default.explain_temp1 (1) @@ -737,7 +950,7 @@ Results [3]: [count#xL, sum#xL, count#xL] (3) Exchange Input [3]: [count#xL, sum#xL, count#xL] -Arguments: SinglePartition, true, [id=#x] +Arguments: SinglePartition, ENSURE_REQUIREMENTS, [id=#x] (4) HashAggregate Input [3]: [count#xL, sum#xL, count#xL] @@ -761,13 +974,7 @@ struct -- !query output == Physical Plan == AdaptiveSparkPlan (5) -+- == Current Plan == - ObjectHashAggregate (4) - +- Exchange (3) - +- ObjectHashAggregate (2) - +- Scan parquet default.explain_temp4 (1) -+- == Initial Plan == - ObjectHashAggregate (4) ++- ObjectHashAggregate (4) +- Exchange (3) +- ObjectHashAggregate (2) +- Scan parquet default.explain_temp4 (1) @@ -788,7 +995,7 @@ Results [2]: [key#x, buf#x] (3) Exchange Input [2]: [key#x, buf#x] -Arguments: hashpartitioning(key#x, 4), true, [id=#x] +Arguments: hashpartitioning(key#x, 4), ENSURE_REQUIREMENTS, [id=#x] (4) ObjectHashAggregate Input [2]: [key#x, buf#x] @@ -812,15 +1019,7 @@ struct -- !query output == Physical Plan == AdaptiveSparkPlan (7) -+- == Current Plan == - SortAggregate (6) - +- Sort (5) - +- Exchange (4) - +- SortAggregate (3) - +- Sort (2) - +- Scan parquet default.explain_temp4 (1) -+- == Initial Plan == - SortAggregate (6) ++- SortAggregate (6) +- Sort (5) +- Exchange (4) +- SortAggregate (3) @@ -847,7 +1046,7 @@ Results [2]: [key#x, min#x] (4) Exchange Input [2]: [key#x, min#x] -Arguments: hashpartitioning(key#x, 4), true, [id=#x] +Arguments: hashpartitioning(key#x, 4), ENSURE_REQUIREMENTS, [id=#x] (5) Sort Input [2]: [key#x, min#x] diff --git a/sql/core/src/test/resources/sql-tests/results/explain.sql.out b/sql/core/src/test/resources/sql-tests/results/explain.sql.out index f28c408407c3f..a4c92382750e8 100644 --- a/sql/core/src/test/resources/sql-tests/results/explain.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/explain.sql.out @@ -1,5 +1,5 @@ -- Automatically generated by SQLQueryTestSuite --- Number of queries: 23 +-- Number of queries: 24 -- !query @@ -42,6 +42,39 @@ struct spark.sql.codegen.wholeStage true +-- !query +EXPLAIN EXTENDED + SELECT sum(distinct val) + FROM explain_temp1 +-- !query schema +struct +-- !query output +== Parsed Logical Plan == +'Project [unresolvedalias('sum(distinct 'val), None)] ++- 'UnresolvedRelation [explain_temp1], [], false + +== Analyzed Logical Plan == +sum(DISTINCT val): bigint +Aggregate [sum(distinct cast(val#x as bigint)) AS sum(DISTINCT val)#xL] ++- SubqueryAlias spark_catalog.default.explain_temp1 + +- Relation[key#x,val#x] parquet + +== Optimized Logical Plan == +Aggregate [sum(distinct cast(val#x as bigint)) AS sum(DISTINCT val)#xL] ++- Project [val#x] + +- Relation[key#x,val#x] parquet + +== Physical Plan == +*HashAggregate(keys=[], functions=[sum(distinct cast(val#x as bigint)#xL)], output=[sum(DISTINCT val)#xL]) ++- Exchange SinglePartition, ENSURE_REQUIREMENTS, [id=#x] + +- *HashAggregate(keys=[], functions=[partial_sum(distinct cast(val#x as bigint)#xL)], output=[sum#xL]) + +- *HashAggregate(keys=[cast(val#x as bigint)#xL], functions=[], output=[cast(val#x as bigint)#xL]) + +- Exchange hashpartitioning(cast(val#x as bigint)#xL, 4), ENSURE_REQUIREMENTS, [id=#x] + +- *HashAggregate(keys=[cast(val#x as bigint) AS cast(val#x as bigint)#xL], functions=[], output=[cast(val#x as bigint)#xL]) + +- *ColumnarToRow + +- FileScan parquet default.explain_temp1[val#x] Batched: true, DataFilters: [], Format: Parquet, Location [not included in comparison]/{warehouse_dir}/explain_temp1], PartitionFilters: [], PushedFilters: [], ReadSchema: struct + + -- !query EXPLAIN FORMATTED SELECT key, max(val) @@ -86,7 +119,7 @@ Results [2]: [key#x, max#x] (5) Exchange Input [2]: [key#x, max#x] -Arguments: hashpartitioning(key#x, 4), true, [id=#x] +Arguments: hashpartitioning(key#x, 4), ENSURE_REQUIREMENTS, [id=#x] (6) HashAggregate [codegen id : 2] Input [2]: [key#x, max#x] @@ -97,7 +130,7 @@ Results [2]: [key#x, max(val#x)#x AS max(val)#x] (7) Exchange Input [2]: [key#x, max(val)#x] -Arguments: rangepartitioning(key#x ASC NULLS FIRST, 4), true, [id=#x] +Arguments: rangepartitioning(key#x ASC NULLS FIRST, 4), ENSURE_REQUIREMENTS, [id=#x] (8) Sort [codegen id : 3] Input [2]: [key#x, max(val)#x] @@ -148,7 +181,7 @@ Results [2]: [key#x, max#x] (5) Exchange Input [2]: [key#x, max#x] -Arguments: hashpartitioning(key#x, 4), true, [id=#x] +Arguments: hashpartitioning(key#x, 4), ENSURE_REQUIREMENTS, [id=#x] (6) HashAggregate [codegen id : 2] Input [2]: [key#x, max#x] @@ -226,7 +259,7 @@ Results [2]: [key#x, val#x] (9) Exchange Input [2]: [key#x, val#x] -Arguments: hashpartitioning(key#x, val#x, 4), true, [id=#x] +Arguments: hashpartitioning(key#x, val#x, 4), ENSURE_REQUIREMENTS, [id=#x] (10) HashAggregate [codegen id : 4] Input [2]: [key#x, val#x] @@ -419,7 +452,7 @@ Results [1]: [max#x] (9) Exchange Input [1]: [max#x] -Arguments: SinglePartition, true, [id=#x] +Arguments: SinglePartition, ENSURE_REQUIREMENTS, [id=#x] (10) HashAggregate [codegen id : 2] Input [1]: [max#x] @@ -465,7 +498,7 @@ Results [1]: [max#x] (16) Exchange Input [1]: [max#x] -Arguments: SinglePartition, true, [id=#x] +Arguments: SinglePartition, ENSURE_REQUIREMENTS, [id=#x] (17) HashAggregate [codegen id : 2] Input [1]: [max#x] @@ -547,7 +580,7 @@ Results [1]: [max#x] (9) Exchange Input [1]: [max#x] -Arguments: SinglePartition, true, [id=#x] +Arguments: SinglePartition, ENSURE_REQUIREMENTS, [id=#x] (10) HashAggregate [codegen id : 2] Input [1]: [max#x] @@ -593,7 +626,7 @@ Results [2]: [sum#x, count#xL] (16) Exchange Input [2]: [sum#x, count#xL] -Arguments: SinglePartition, true, [id=#x] +Arguments: SinglePartition, ENSURE_REQUIREMENTS, [id=#x] (17) HashAggregate [codegen id : 2] Input [2]: [sum#x, count#xL] @@ -657,7 +690,7 @@ Results [2]: [sum#x, count#xL] (7) Exchange Input [2]: [sum#x, count#xL] -Arguments: SinglePartition, true, [id=#x] +Arguments: SinglePartition, ENSURE_REQUIREMENTS, [id=#x] (8) HashAggregate [codegen id : 2] Input [2]: [sum#x, count#xL] @@ -777,7 +810,7 @@ Results [2]: [key#x, max#x] (5) Exchange Input [2]: [key#x, max#x] -Arguments: hashpartitioning(key#x, 4), true, [id=#x] +Arguments: hashpartitioning(key#x, 4), ENSURE_REQUIREMENTS, [id=#x] (6) HashAggregate [codegen id : 4] Input [2]: [key#x, max#x] @@ -827,7 +860,7 @@ Output: [] Arguments: `default`.`explain_view`, SELECT key, val FROM explain_temp1, false, false, PersistedView (3) UnresolvedRelation -Arguments: [explain_temp1], [] +Arguments: [explain_temp1], [], false (4) Project Arguments: ['key, 'val] @@ -845,7 +878,7 @@ struct == Physical Plan == * HashAggregate (5) +- Exchange (4) - +- HashAggregate (3) + +- * HashAggregate (3) +- * ColumnarToRow (2) +- Scan parquet default.explain_temp1 (1) @@ -859,7 +892,7 @@ ReadSchema: struct (2) ColumnarToRow [codegen id : 1] Input [2]: [key#x, val#x] -(3) HashAggregate +(3) HashAggregate [codegen id : 1] Input [2]: [key#x, val#x] Keys: [] Functions [3]: [partial_count(val#x), partial_sum(cast(key#x as bigint)), partial_count(key#x) FILTER (WHERE (val#x > 1))] @@ -868,7 +901,7 @@ Results [3]: [count#xL, sum#xL, count#xL] (4) Exchange Input [3]: [count#xL, sum#xL, count#xL] -Arguments: SinglePartition, true, [id=#x] +Arguments: SinglePartition, ENSURE_REQUIREMENTS, [id=#x] (5) HashAggregate [codegen id : 2] Input [3]: [count#xL, sum#xL, count#xL] @@ -912,7 +945,7 @@ Results [2]: [key#x, buf#x] (4) Exchange Input [2]: [key#x, buf#x] -Arguments: hashpartitioning(key#x, 4), true, [id=#x] +Arguments: hashpartitioning(key#x, 4), ENSURE_REQUIREMENTS, [id=#x] (5) ObjectHashAggregate Input [2]: [key#x, buf#x] @@ -962,7 +995,7 @@ Results [2]: [key#x, min#x] (5) Exchange Input [2]: [key#x, min#x] -Arguments: hashpartitioning(key#x, 4), true, [id=#x] +Arguments: hashpartitioning(key#x, 4), ENSURE_REQUIREMENTS, [id=#x] (6) Sort [codegen id : 2] Input [2]: [key#x, min#x] diff --git a/sql/core/src/test/resources/sql-tests/results/extract.sql.out b/sql/core/src/test/resources/sql-tests/results/extract.sql.out index 9d3fe5d17fafa..5415b2c30a308 100644 --- a/sql/core/src/test/resources/sql-tests/results/extract.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/extract.sql.out @@ -320,7 +320,7 @@ select extract(not_supported from c) from t struct<> -- !query output org.apache.spark.sql.AnalysisException -Literals of type 'not_supported' are currently not supported for the string type.;; line 1 pos 7 +Literals of type 'not_supported' are currently not supported for the string type.; line 1 pos 7 -- !query @@ -329,7 +329,7 @@ select extract(not_supported from i) from t struct<> -- !query output org.apache.spark.sql.AnalysisException -Literals of type 'not_supported' are currently not supported for the interval type.;; line 1 pos 7 +Literals of type 'not_supported' are currently not supported for the interval type.; line 1 pos 7 -- !query @@ -642,7 +642,7 @@ select date_part('not_supported', c) from t struct<> -- !query output org.apache.spark.sql.AnalysisException -Literals of type 'not_supported' are currently not supported for the string type.;; line 1 pos 7 +Literals of type 'not_supported' are currently not supported for the string type.; line 1 pos 7 -- !query @@ -651,7 +651,7 @@ select date_part(c, c) from t struct<> -- !query output org.apache.spark.sql.AnalysisException -The field parameter needs to be a foldable string value.;; line 1 pos 7 +The field parameter needs to be a foldable string value.; line 1 pos 7 -- !query @@ -668,7 +668,7 @@ select date_part(i, i) from t struct<> -- !query output org.apache.spark.sql.AnalysisException -The field parameter needs to be a foldable string value.;; line 1 pos 7 +The field parameter needs to be a foldable string value.; line 1 pos 7 -- !query diff --git a/sql/core/src/test/resources/sql-tests/results/group-analytics.sql.out b/sql/core/src/test/resources/sql-tests/results/group-analytics.sql.out index c4f9ea1fe026a..b820fb49b09ba 100644 --- a/sql/core/src/test/resources/sql-tests/results/group-analytics.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/group-analytics.sql.out @@ -210,7 +210,7 @@ SELECT course, year, GROUPING(course) FROM courseSales GROUP BY course, year struct<> -- !query output org.apache.spark.sql.AnalysisException -grouping() can only be used with GroupingSets/Cube/Rollup; +grouping() can only be used with GroupingSets/Cube/Rollup -- !query @@ -219,7 +219,7 @@ SELECT course, year, GROUPING_ID(course, year) FROM courseSales GROUP BY course, struct<> -- !query output org.apache.spark.sql.AnalysisException -grouping_id() can only be used with GroupingSets/Cube/Rollup; +grouping_id() can only be used with GroupingSets/Cube/Rollup -- !query @@ -255,7 +255,7 @@ SELECT course, year FROM courseSales GROUP BY course, year HAVING GROUPING(cours struct<> -- !query output org.apache.spark.sql.AnalysisException -grouping()/grouping_id() can only be used with GroupingSets/Cube/Rollup; +grouping()/grouping_id() can only be used with GroupingSets/Cube/Rollup -- !query @@ -264,7 +264,7 @@ SELECT course, year FROM courseSales GROUP BY course, year HAVING GROUPING_ID(co struct<> -- !query output org.apache.spark.sql.AnalysisException -grouping()/grouping_id() can only be used with GroupingSets/Cube/Rollup; +grouping()/grouping_id() can only be used with GroupingSets/Cube/Rollup -- !query @@ -319,7 +319,7 @@ SELECT course, year FROM courseSales GROUP BY course, year ORDER BY GROUPING(cou struct<> -- !query output org.apache.spark.sql.AnalysisException -grouping()/grouping_id() can only be used with GroupingSets/Cube/Rollup; +grouping()/grouping_id() can only be used with GroupingSets/Cube/Rollup -- !query @@ -328,7 +328,7 @@ SELECT course, year FROM courseSales GROUP BY course, year ORDER BY GROUPING_ID( struct<> -- !query output org.apache.spark.sql.AnalysisException -grouping()/grouping_id() can only be used with GroupingSets/Cube/Rollup; +grouping()/grouping_id() can only be used with GroupingSets/Cube/Rollup -- !query diff --git a/sql/core/src/test/resources/sql-tests/results/group-by-filter.sql.out b/sql/core/src/test/resources/sql-tests/results/group-by-filter.sql.out index 89a4da116a6b3..55a41907dd3b4 100644 --- a/sql/core/src/test/resources/sql-tests/results/group-by-filter.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/group-by-filter.sql.out @@ -51,7 +51,7 @@ SELECT a, COUNT(b) FILTER (WHERE a >= 2) FROM testData struct<> -- !query output org.apache.spark.sql.AnalysisException -grouping expressions sequence is empty, and 'testdata.`a`' is not an aggregate function. Wrap '(count(testdata.`b`) FILTER (WHERE (testdata.`a` >= 2)) AS `count(b) FILTER (WHERE (a >= 2))`)' in windowing function(s) or wrap 'testdata.`a`' in first() (or first_value) if you don't care which value you get.; +grouping expressions sequence is empty, and 'testdata.`a`' is not an aggregate function. Wrap '(count(testdata.`b`) FILTER (WHERE (testdata.`a` >= 2)) AS `count(b) FILTER (WHERE (a >= 2))`)' in windowing function(s) or wrap 'testdata.`a`' in first() (or first_value) if you don't care which value you get. -- !query @@ -231,7 +231,7 @@ SELECT a, COUNT(b) FILTER (WHERE a != 2) FROM testData GROUP BY b struct<> -- !query output org.apache.spark.sql.AnalysisException -expression 'testdata.`a`' is neither present in the group by, nor is it an aggregate function. Add to group by or wrap in first() (or first_value) if you don't care which value you get.; +expression 'testdata.`a`' is neither present in the group by, nor is it an aggregate function. Add to group by or wrap in first() (or first_value) if you don't care which value you get. -- !query @@ -711,7 +711,7 @@ SELECT a + 2, COUNT(b) FILTER (WHERE b IN (1, 2)) FROM testData GROUP BY a + 1 struct<> -- !query output org.apache.spark.sql.AnalysisException -expression 'testdata.`a`' is neither present in the group by, nor is it an aggregate function. Add to group by or wrap in first() (or first_value) if you don't care which value you get.; +expression 'testdata.`a`' is neither present in the group by, nor is it an aggregate function. Add to group by or wrap in first() (or first_value) if you don't care which value you get. -- !query @@ -795,14 +795,15 @@ IN/EXISTS predicate sub-queries can only be used in Filter/Join and a few comman : +- Project [state#x] : +- Filter (dept_id#x = outer(dept_id#x)) : +- SubqueryAlias dept -: +- Project [dept_id#x, dept_name#x, state#x] -: +- SubqueryAlias DEPT -: +- LocalRelation [dept_id#x, dept_name#x, state#x] +: +- View (`DEPT`, [dept_id#x,dept_name#x,state#x]) +: +- Project [dept_id#x, dept_name#x, state#x] +: +- SubqueryAlias DEPT +: +- LocalRelation [dept_id#x, dept_name#x, state#x] +- SubqueryAlias emp - +- Project [id#x, emp_name#x, hiredate#x, salary#x, dept_id#x] - +- SubqueryAlias EMP - +- LocalRelation [id#x, emp_name#x, hiredate#x, salary#x, dept_id#x] -; + +- View (`EMP`, [id#x,emp_name#x,hiredate#x,salary#x,dept_id#x]) + +- Project [id#x, emp_name#x, hiredate#x, salary#x, dept_id#x] + +- SubqueryAlias EMP + +- LocalRelation [id#x, emp_name#x, hiredate#x, salary#x, dept_id#x] -- !query @@ -821,14 +822,15 @@ IN/EXISTS predicate sub-queries can only be used in Filter/Join and a few comman : +- Project [state#x] : +- Filter (dept_id#x = outer(dept_id#x)) : +- SubqueryAlias dept -: +- Project [dept_id#x, dept_name#x, state#x] -: +- SubqueryAlias DEPT -: +- LocalRelation [dept_id#x, dept_name#x, state#x] +: +- View (`DEPT`, [dept_id#x,dept_name#x,state#x]) +: +- Project [dept_id#x, dept_name#x, state#x] +: +- SubqueryAlias DEPT +: +- LocalRelation [dept_id#x, dept_name#x, state#x] +- SubqueryAlias emp - +- Project [id#x, emp_name#x, hiredate#x, salary#x, dept_id#x] - +- SubqueryAlias EMP - +- LocalRelation [id#x, emp_name#x, hiredate#x, salary#x, dept_id#x] -; + +- View (`EMP`, [id#x,emp_name#x,hiredate#x,salary#x,dept_id#x]) + +- Project [id#x, emp_name#x, hiredate#x, salary#x, dept_id#x] + +- SubqueryAlias EMP + +- LocalRelation [id#x, emp_name#x, hiredate#x, salary#x, dept_id#x] -- !query @@ -846,14 +848,15 @@ IN/EXISTS predicate sub-queries can only be used in Filter/Join and a few comman : +- Distinct : +- Project [dept_id#x] : +- SubqueryAlias dept -: +- Project [dept_id#x, dept_name#x, state#x] -: +- SubqueryAlias DEPT -: +- LocalRelation [dept_id#x, dept_name#x, state#x] +: +- View (`DEPT`, [dept_id#x,dept_name#x,state#x]) +: +- Project [dept_id#x, dept_name#x, state#x] +: +- SubqueryAlias DEPT +: +- LocalRelation [dept_id#x, dept_name#x, state#x] +- SubqueryAlias emp - +- Project [id#x, emp_name#x, hiredate#x, salary#x, dept_id#x] - +- SubqueryAlias EMP - +- LocalRelation [id#x, emp_name#x, hiredate#x, salary#x, dept_id#x] -; + +- View (`EMP`, [id#x,emp_name#x,hiredate#x,salary#x,dept_id#x]) + +- Project [id#x, emp_name#x, hiredate#x, salary#x, dept_id#x] + +- SubqueryAlias EMP + +- LocalRelation [id#x, emp_name#x, hiredate#x, salary#x, dept_id#x] -- !query @@ -871,14 +874,15 @@ IN/EXISTS predicate sub-queries can only be used in Filter/Join and a few comman : +- Distinct : +- Project [dept_id#x] : +- SubqueryAlias dept -: +- Project [dept_id#x, dept_name#x, state#x] -: +- SubqueryAlias DEPT -: +- LocalRelation [dept_id#x, dept_name#x, state#x] +: +- View (`DEPT`, [dept_id#x,dept_name#x,state#x]) +: +- Project [dept_id#x, dept_name#x, state#x] +: +- SubqueryAlias DEPT +: +- LocalRelation [dept_id#x, dept_name#x, state#x] +- SubqueryAlias emp - +- Project [id#x, emp_name#x, hiredate#x, salary#x, dept_id#x] - +- SubqueryAlias EMP - +- LocalRelation [id#x, emp_name#x, hiredate#x, salary#x, dept_id#x] -; + +- View (`EMP`, [id#x,emp_name#x,hiredate#x,salary#x,dept_id#x]) + +- Project [id#x, emp_name#x, hiredate#x, salary#x, dept_id#x] + +- SubqueryAlias EMP + +- LocalRelation [id#x, emp_name#x, hiredate#x, salary#x, dept_id#x] -- !query diff --git a/sql/core/src/test/resources/sql-tests/results/group-by-ordinal.sql.out b/sql/core/src/test/resources/sql-tests/results/group-by-ordinal.sql.out index bf9f606a2224e..fedc7205ae559 100644 --- a/sql/core/src/test/resources/sql-tests/results/group-by-ordinal.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/group-by-ordinal.sql.out @@ -122,7 +122,7 @@ select a, b, sum(b) from data group by 3 struct<> -- !query output org.apache.spark.sql.AnalysisException -aggregate functions are not allowed in GROUP BY, but found sum(CAST(data.`b` AS BIGINT)); +aggregate functions are not allowed in GROUP BY, but found sum(CAST(data.`b` AS BIGINT)) -- !query @@ -131,7 +131,7 @@ select a, b, sum(b) + 2 from data group by 3 struct<> -- !query output org.apache.spark.sql.AnalysisException -aggregate functions are not allowed in GROUP BY, but found (sum(CAST(data.`b` AS BIGINT)) + CAST(2 AS BIGINT)); +aggregate functions are not allowed in GROUP BY, but found (sum(CAST(data.`b` AS BIGINT)) + CAST(2 AS BIGINT)) -- !query @@ -155,7 +155,7 @@ select * from data group by a, b, 1 struct<> -- !query output org.apache.spark.sql.AnalysisException -Star (*) is not allowed in select list when GROUP BY ordinal position is used; +Star (*) is not allowed in select list when GROUP BY ordinal position is used -- !query diff --git a/sql/core/src/test/resources/sql-tests/results/group-by.sql.out b/sql/core/src/test/resources/sql-tests/results/group-by.sql.out index 50eb2a9f22f69..cc07cd64f3a89 100644 --- a/sql/core/src/test/resources/sql-tests/results/group-by.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/group-by.sql.out @@ -1,5 +1,5 @@ -- Automatically generated by SQLQueryTestSuite --- Number of queries: 56 +-- Number of queries: 62 -- !query @@ -18,7 +18,7 @@ SELECT a, COUNT(b) FROM testData struct<> -- !query output org.apache.spark.sql.AnalysisException -grouping expressions sequence is empty, and 'testdata.`a`' is not an aggregate function. Wrap '(count(testdata.`b`) AS `count(b)`)' in windowing function(s) or wrap 'testdata.`a`' in first() (or first_value) if you don't care which value you get.; +grouping expressions sequence is empty, and 'testdata.`a`' is not an aggregate function. Wrap '(count(testdata.`b`) AS `count(b)`)' in windowing function(s) or wrap 'testdata.`a`' in first() (or first_value) if you don't care which value you get. -- !query @@ -46,7 +46,7 @@ SELECT a, COUNT(b) FROM testData GROUP BY b struct<> -- !query output org.apache.spark.sql.AnalysisException -expression 'testdata.`a`' is neither present in the group by, nor is it an aggregate function. Add to group by or wrap in first() (or first_value) if you don't care which value you get.; +expression 'testdata.`a`' is neither present in the group by, nor is it an aggregate function. Add to group by or wrap in first() (or first_value) if you don't care which value you get. -- !query @@ -110,7 +110,7 @@ SELECT a + 2, COUNT(b) FROM testData GROUP BY a + 1 struct<> -- !query output org.apache.spark.sql.AnalysisException -expression 'testdata.`a`' is neither present in the group by, nor is it an aggregate function. Add to group by or wrap in first() (or first_value) if you don't care which value you get.; +expression 'testdata.`a`' is neither present in the group by, nor is it an aggregate function. Add to group by or wrap in first() (or first_value) if you don't care which value you get. -- !query @@ -167,7 +167,7 @@ SELECT COUNT(b) AS k FROM testData GROUP BY k struct<> -- !query output org.apache.spark.sql.AnalysisException -aggregate functions are not allowed in GROUP BY, but found count(testdata.`b`); +aggregate functions are not allowed in GROUP BY, but found count(testdata.`b`) -- !query @@ -185,7 +185,7 @@ SELECT k AS a, COUNT(v) FROM testDataHasSameNameWithAlias GROUP BY a struct<> -- !query output org.apache.spark.sql.AnalysisException -expression 'testdatahassamenamewithalias.`k`' is neither present in the group by, nor is it an aggregate function. Add to group by or wrap in first() (or first_value) if you don't care which value you get.; +expression 'testdatahassamenamewithalias.`k`' is neither present in the group by, nor is it an aggregate function. Add to group by or wrap in first() (or first_value) if you don't care which value you get. -- !query @@ -274,7 +274,68 @@ SELECT id FROM range(10) HAVING id > 0 struct<> -- !query output org.apache.spark.sql.AnalysisException -grouping expressions sequence is empty, and '`id`' is not an aggregate function. Wrap '()' in windowing function(s) or wrap '`id`' in first() (or first_value) if you don't care which value you get.; +grouping expressions sequence is empty, and '`id`' is not an aggregate function. Wrap '()' in windowing function(s) or wrap '`id`' in first() (or first_value) if you don't care which value you get. + + +-- !query +SET spark.sql.legacy.parser.havingWithoutGroupByAsWhere=true +-- !query schema +struct +-- !query output +spark.sql.legacy.parser.havingWithoutGroupByAsWhere true + + +-- !query +SELECT 1 FROM range(10) HAVING true +-- !query schema +struct<1:int> +-- !query output +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 + + +-- !query +SELECT 1 FROM range(10) HAVING MAX(id) > 0 +-- !query schema +struct<> +-- !query output +org.apache.spark.sql.AnalysisException + +Aggregate/Window/Generate expressions are not valid in where clause of the query. +Expression in where clause: [(max(`id`) > CAST(0 AS BIGINT))] +Invalid expressions: [max(`id`)] + + +-- !query +SELECT id FROM range(10) HAVING id > 0 +-- !query schema +struct +-- !query output +1 +2 +3 +4 +5 +6 +7 +8 +9 + + +-- !query +SET spark.sql.legacy.parser.havingWithoutGroupByAsWhere=false +-- !query schema +struct +-- !query output +spark.sql.legacy.parser.havingWithoutGroupByAsWhere false -- !query @@ -548,7 +609,7 @@ org.apache.spark.sql.AnalysisException Aggregate/Window/Generate expressions are not valid in where clause of the query. Expression in where clause: [(count(1) > 1L)] -Invalid expressions: [count(1)]; +Invalid expressions: [count(1)] -- !query @@ -560,7 +621,7 @@ org.apache.spark.sql.AnalysisException Aggregate/Window/Generate expressions are not valid in where clause of the query. Expression in where clause: [((count(1) + 1L) > 1L)] -Invalid expressions: [count(1)]; +Invalid expressions: [count(1)] -- !query @@ -572,4 +633,12 @@ org.apache.spark.sql.AnalysisException Aggregate/Window/Generate expressions are not valid in where clause of the query. Expression in where clause: [(((test_agg.`k` = 1) OR (test_agg.`k` = 2)) OR (((count(1) + 1L) > 1L) OR (max(test_agg.`k`) > 1)))] -Invalid expressions: [count(1), max(test_agg.`k`)]; +Invalid expressions: [count(1), max(test_agg.`k`)] + + +-- !query +SELECT AVG(DISTINCT decimal_col), SUM(DISTINCT decimal_col) FROM VALUES (CAST(1 AS DECIMAL(9, 0))) t(decimal_col) +-- !query schema +struct +-- !query output +1.0000 1 diff --git a/sql/core/src/test/resources/sql-tests/results/grouping_set.sql.out b/sql/core/src/test/resources/sql-tests/results/grouping_set.sql.out index 7089e10cdef27..e1f94ddd02fe5 100644 --- a/sql/core/src/test/resources/sql-tests/results/grouping_set.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/grouping_set.sql.out @@ -165,7 +165,7 @@ SELECT c1 FROM (values (1,2), (3,2)) t(c1, c2) GROUP BY GROUPING SETS (()) struct<> -- !query output org.apache.spark.sql.AnalysisException -expression '`c1`' is neither present in the group by, nor is it an aggregate function. Add to group by or wrap in first() (or first_value) if you don't care which value you get.; +expression '`c1`' is neither present in the group by, nor is it an aggregate function. Add to group by or wrap in first() (or first_value) if you don't care which value you get. -- !query diff --git a/sql/core/src/test/resources/sql-tests/results/having.sql.out b/sql/core/src/test/resources/sql-tests/results/having.sql.out index 1b3ac7865159f..237015d06ce81 100644 --- a/sql/core/src/test/resources/sql-tests/results/having.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/having.sql.out @@ -1,5 +1,5 @@ -- Automatically generated by SQLQueryTestSuite --- Number of queries: 9 +-- Number of queries: 13 -- !query @@ -81,3 +81,35 @@ SELECT SUM(a) AS b FROM VALUES (1, 10), (2, 20) AS T(a, b) GROUP BY ROLLUP(a, b) struct -- !query output 2 + + +-- !query +SELECT c1 FROM VALUES (1, 2) as t(c1, c2) GROUP BY GROUPING SETS(t.c1) HAVING t.c1 = 1 +-- !query schema +struct +-- !query output +1 + + +-- !query +SELECT c1 FROM VALUES (1, 2) as t(c1, c2) GROUP BY CUBE(t.c1) HAVING t.c1 = 1 +-- !query schema +struct +-- !query output +1 + + +-- !query +SELECT c1 FROM VALUES (1, 2) as t(c1, c2) GROUP BY ROLLUP(t.c1) HAVING t.c1 = 1 +-- !query schema +struct +-- !query output +1 + + +-- !query +SELECT c1 FROM VALUES (1, 2) as t(c1, c2) GROUP BY t.c1 HAVING t.c1 = 1 +-- !query schema +struct +-- !query output +1 diff --git a/sql/core/src/test/resources/sql-tests/results/intersect-all.sql.out b/sql/core/src/test/resources/sql-tests/results/intersect-all.sql.out index b99f63393cc4d..caba8c6942c55 100644 --- a/sql/core/src/test/resources/sql-tests/results/intersect-all.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/intersect-all.sql.out @@ -98,7 +98,7 @@ SELECT array(1), 2 struct<> -- !query output org.apache.spark.sql.AnalysisException -IntersectAll can only be performed on tables with the compatible column types. array <> int at the first column of the second table; +IntersectAll can only be performed on tables with the compatible column types. array <> int at the first column of the second table -- !query @@ -109,7 +109,7 @@ SELECT k, v FROM tab2 struct<> -- !query output org.apache.spark.sql.AnalysisException -IntersectAll can only be performed on tables with the same number of columns, but the first table has 1 columns and the second table has 2 columns; +IntersectAll can only be performed on tables with the same number of columns, but the first table has 1 columns and the second table has 2 columns -- !query diff --git a/sql/core/src/test/resources/sql-tests/results/json-functions.sql.out b/sql/core/src/test/resources/sql-tests/results/json-functions.sql.out index 34a329627f5dd..b14e3e1558fb0 100644 --- a/sql/core/src/test/resources/sql-tests/results/json-functions.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/json-functions.sql.out @@ -72,7 +72,7 @@ select to_json(named_struct('a', 1, 'b', 2), named_struct('mode', 'PERMISSIVE')) struct<> -- !query output org.apache.spark.sql.AnalysisException -Must use a map() function for options;; line 1 pos 7 +Must use a map() function for options; line 1 pos 7 -- !query @@ -81,7 +81,7 @@ select to_json(named_struct('a', 1, 'b', 2), map('mode', 1)) struct<> -- !query output org.apache.spark.sql.AnalysisException -A type of keys and values in map() must be string, but got map;; line 1 pos 7 +A type of keys and values in map() must be string, but got map; line 1 pos 7 -- !query @@ -115,7 +115,7 @@ select from_json('{"a":1}', 1) struct<> -- !query output org.apache.spark.sql.AnalysisException -The expression '1' is not a valid schema string.;; line 1 pos 7 +The expression '1' is not a valid schema string.; line 1 pos 7 -- !query @@ -124,7 +124,14 @@ select from_json('{"a":1}', 'a InvalidType') struct<> -- !query output org.apache.spark.sql.AnalysisException +Cannot parse the data type: +extraneous input 'InvalidType' expecting (line 1, pos 2) +== SQL == +a InvalidType +--^^^ + +Failed fallback parsing: DataType invalidtype is not supported.(line 1, pos 2) == SQL == @@ -139,7 +146,7 @@ select from_json('{"a":1}', 'a INT', named_struct('mode', 'PERMISSIVE')) struct<> -- !query output org.apache.spark.sql.AnalysisException -Must use a map() function for options;; line 1 pos 7 +Must use a map() function for options; line 1 pos 7 -- !query @@ -148,7 +155,7 @@ select from_json('{"a":1}', 'a INT', map('mode', 1)) struct<> -- !query output org.apache.spark.sql.AnalysisException -A type of keys and values in map() must be string, but got map;; line 1 pos 7 +A type of keys and values in map() must be string, but got map; line 1 pos 7 -- !query @@ -213,7 +220,7 @@ select schema_of_json('{"c1":0, "c2":[1]}') -- !query schema struct -- !query output -struct> +STRUCT<`c1`: BIGINT, `c2`: ARRAY> -- !query @@ -352,7 +359,7 @@ select schema_of_json('{"c1":1}', map('primitivesAsString', 'true')) -- !query schema struct -- !query output -struct +STRUCT<`c1`: STRING> -- !query @@ -360,7 +367,7 @@ select schema_of_json('{"c1":01, "c2":0.1}', map('allowNumericLeadingZeros', 'tr -- !query schema struct -- !query output -struct +STRUCT<`c1`: BIGINT, `c2`: DECIMAL(1,1)> -- !query diff --git a/sql/core/src/test/resources/sql-tests/results/limit.sql.out b/sql/core/src/test/resources/sql-tests/results/limit.sql.out index 074e7a6d28c47..8e324628c6299 100644 --- a/sql/core/src/test/resources/sql-tests/results/limit.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/limit.sql.out @@ -53,7 +53,7 @@ SELECT * FROM testdata LIMIT -1 struct<> -- !query output org.apache.spark.sql.AnalysisException -The limit expression must be equal to or greater than 0, but got -1; +The limit expression must be equal to or greater than 0, but got -1 -- !query @@ -62,7 +62,7 @@ SELECT * FROM testData TABLESAMPLE (-1 ROWS) struct<> -- !query output org.apache.spark.sql.AnalysisException -The limit expression must be equal to or greater than 0, but got -1; +The limit expression must be equal to or greater than 0, but got -1 -- !query @@ -79,7 +79,7 @@ SELECT * FROM testdata LIMIT CAST(NULL AS INT) struct<> -- !query output org.apache.spark.sql.AnalysisException -The evaluated limit expression must not be null, but got CAST(NULL AS INT); +The evaluated limit expression must not be null, but got CAST(NULL AS INT) -- !query @@ -88,7 +88,7 @@ SELECT * FROM testdata LIMIT key > 3 struct<> -- !query output org.apache.spark.sql.AnalysisException -The limit expression must evaluate to a constant value, but got (spark_catalog.default.testdata.`key` > 3); +The limit expression must evaluate to a constant value, but got (spark_catalog.default.testdata.`key` > 3) -- !query @@ -97,7 +97,7 @@ SELECT * FROM testdata LIMIT true struct<> -- !query output org.apache.spark.sql.AnalysisException -The limit expression must be integer type, but got boolean; +The limit expression must be integer type, but got boolean -- !query @@ -106,7 +106,7 @@ SELECT * FROM testdata LIMIT 'a' struct<> -- !query output org.apache.spark.sql.AnalysisException -The limit expression must be integer type, but got string; +The limit expression must be integer type, but got string -- !query diff --git a/sql/core/src/test/resources/sql-tests/results/map.sql.out b/sql/core/src/test/resources/sql-tests/results/map.sql.out new file mode 100644 index 0000000000000..7a0c0d776ca2b --- /dev/null +++ b/sql/core/src/test/resources/sql-tests/results/map.sql.out @@ -0,0 +1,18 @@ +-- Automatically generated by SQLQueryTestSuite +-- Number of queries: 2 + + +-- !query +select element_at(map(1, 'a', 2, 'b'), 5) +-- !query schema +struct +-- !query output +NULL + + +-- !query +select map(1, 'a', 2, 'b')[5] +-- !query schema +struct +-- !query output +NULL diff --git a/sql/core/src/test/resources/sql-tests/results/misc-functions.sql.out b/sql/core/src/test/resources/sql-tests/results/misc-functions.sql.out index bd8ffb82ee129..bf45ec3d10215 100644 --- a/sql/core/src/test/resources/sql-tests/results/misc-functions.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/misc-functions.sql.out @@ -1,5 +1,5 @@ -- Automatically generated by SQLQueryTestSuite --- Number of queries: 7 +-- Number of queries: 16 -- !query @@ -56,3 +56,82 @@ select typeof(array(1, 2)), typeof(map(1, 2)), typeof(named_struct('a', 1, 'b', struct -- !query output array map struct + + +-- !query +SELECT assert_true(true), assert_true(boolean(1)) +-- !query schema +struct +-- !query output +NULL NULL + + +-- !query +SELECT assert_true(false) +-- !query schema +struct<> +-- !query output +java.lang.RuntimeException +'false' is not true! + + +-- !query +SELECT assert_true(boolean(0)) +-- !query schema +struct<> +-- !query output +java.lang.RuntimeException +'cast(0 as boolean)' is not true! + + +-- !query +SELECT assert_true(null) +-- !query schema +struct<> +-- !query output +java.lang.RuntimeException +'null' is not true! + + +-- !query +SELECT assert_true(boolean(null)) +-- !query schema +struct<> +-- !query output +java.lang.RuntimeException +'cast(null as boolean)' is not true! + + +-- !query +SELECT assert_true(false, 'custom error message') +-- !query schema +struct<> +-- !query output +java.lang.RuntimeException +custom error message + + +-- !query +CREATE TEMPORARY VIEW tbl_misc AS SELECT * FROM (VALUES (1), (8), (2)) AS T(v) +-- !query schema +struct<> +-- !query output + + + +-- !query +SELECT raise_error('error message') +-- !query schema +struct<> +-- !query output +java.lang.RuntimeException +error message + + +-- !query +SELECT if(v > 5, raise_error('too big: ' || v), v + 1) FROM tbl_misc +-- !query schema +struct<> +-- !query output +java.lang.RuntimeException +too big: 8 diff --git a/sql/core/src/test/resources/sql-tests/results/parse-schema-string.sql.out b/sql/core/src/test/resources/sql-tests/results/parse-schema-string.sql.out new file mode 100644 index 0000000000000..4440dd763bd2b --- /dev/null +++ b/sql/core/src/test/resources/sql-tests/results/parse-schema-string.sql.out @@ -0,0 +1,34 @@ +-- Automatically generated by SQLQueryTestSuite +-- Number of queries: 4 + + +-- !query +select from_csv('1', 'create INT') +-- !query schema +struct> +-- !query output +{"create":1} + + +-- !query +select from_csv('1', 'cube INT') +-- !query schema +struct> +-- !query output +{"cube":1} + + +-- !query +select from_json('{"create":1}', 'create INT') +-- !query schema +struct> +-- !query output +{"create":1} + + +-- !query +select from_json('{"cube":1}', 'cube INT') +-- !query schema +struct> +-- !query output +{"cube":1} diff --git a/sql/core/src/test/resources/sql-tests/results/pivot.sql.out b/sql/core/src/test/resources/sql-tests/results/pivot.sql.out index bb0d452fa04a1..968319fbb7efe 100644 --- a/sql/core/src/test/resources/sql-tests/results/pivot.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/pivot.sql.out @@ -202,7 +202,7 @@ PIVOT ( struct<> -- !query output org.apache.spark.sql.AnalysisException -Aggregate expression required for pivot, but 'coursesales.`earnings`' did not appear in any aggregate function.; +Aggregate expression required for pivot, but 'coursesales.`earnings`' did not appear in any aggregate function. -- !query @@ -217,7 +217,7 @@ PIVOT ( struct<> -- !query output org.apache.spark.sql.AnalysisException -Aggregate expression required for pivot, but '__auto_generated_subquery_name.`year`' did not appear in any aggregate function.; +Aggregate expression required for pivot, but '__auto_generated_subquery_name.`year`' did not appear in any aggregate function. -- !query @@ -262,7 +262,7 @@ PIVOT ( struct<> -- !query output org.apache.spark.sql.AnalysisException -It is not allowed to use an aggregate function in the argument of another aggregate function. Please use the inner aggregate function in a sub-query.; +It is not allowed to use an aggregate function in the argument of another aggregate function. Please use the inner aggregate function in a sub-query. -- !query @@ -313,7 +313,7 @@ PIVOT ( struct<> -- !query output org.apache.spark.sql.AnalysisException -Invalid pivot value 'dotNET': value data type string does not match pivot column data type struct; +Invalid pivot value 'dotNET': value data type string does not match pivot column data type struct -- !query @@ -339,7 +339,7 @@ PIVOT ( struct<> -- !query output org.apache.spark.sql.AnalysisException -Literal expressions required for pivot values, found 'course#x'; +Literal expressions required for pivot values, found 'course#x' -- !query @@ -458,7 +458,7 @@ PIVOT ( struct<> -- !query output org.apache.spark.sql.AnalysisException -Invalid pivot column 'm#x'. Pivot columns must be comparable.; +Invalid pivot column 'm#x'. Pivot columns must be comparable. -- !query @@ -475,7 +475,7 @@ PIVOT ( struct<> -- !query output org.apache.spark.sql.AnalysisException -Invalid pivot column 'named_struct(course, course#x, m, m#x)'. Pivot columns must be comparable.; +Invalid pivot column 'named_struct(course, course#x, m, m#x)'. Pivot columns must be comparable. -- !query diff --git a/sql/core/src/test/resources/sql-tests/results/postgreSQL/aggregates_part1.sql.out b/sql/core/src/test/resources/sql-tests/results/postgreSQL/aggregates_part1.sql.out index f7bba96738eab..cc8f99ff4f453 100644 --- a/sql/core/src/test/resources/sql-tests/results/postgreSQL/aggregates_part1.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/postgreSQL/aggregates_part1.sql.out @@ -143,7 +143,7 @@ SELECT var_pop(1.0), var_samp(2.0) -- !query schema struct -- !query output -0.0 NaN +0.0 NULL -- !query @@ -151,7 +151,7 @@ SELECT stddev_pop(CAST(3.0 AS Decimal(38,0))), stddev_samp(CAST(4.0 AS Decimal(3 -- !query schema struct -- !query output -0.0 NaN +0.0 NULL -- !query @@ -382,7 +382,7 @@ org.apache.spark.sql.AnalysisException Aggregate/Window/Generate expressions are not valid in where clause of the query. Expression in where clause: [(sum(DISTINCT CAST((outer(a.`four`) + b.`four`) AS BIGINT)) = CAST(b.`four` AS BIGINT))] -Invalid expressions: [sum(DISTINCT CAST((outer(a.`four`) + b.`four`) AS BIGINT))]; +Invalid expressions: [sum(DISTINCT CAST((outer(a.`four`) + b.`four`) AS BIGINT))] -- !query diff --git a/sql/core/src/test/resources/sql-tests/results/postgreSQL/aggregates_part3.sql.out b/sql/core/src/test/resources/sql-tests/results/postgreSQL/aggregates_part3.sql.out index e1f735e5fe1dc..86ebb575ebce9 100644 --- a/sql/core/src/test/resources/sql-tests/results/postgreSQL/aggregates_part3.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/postgreSQL/aggregates_part3.sql.out @@ -8,7 +8,7 @@ select max(min(unique1)) from tenk1 struct<> -- !query output org.apache.spark.sql.AnalysisException -It is not allowed to use an aggregate function in the argument of another aggregate function. Please use the inner aggregate function in a sub-query.; +It is not allowed to use an aggregate function in the argument of another aggregate function. Please use the inner aggregate function in a sub-query. -- !query diff --git a/sql/core/src/test/resources/sql-tests/results/postgreSQL/case.sql.out b/sql/core/src/test/resources/sql-tests/results/postgreSQL/case.sql.out index 1b002c3f48ae2..0006768dbcb0f 100644 --- a/sql/core/src/test/resources/sql-tests/results/postgreSQL/case.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/postgreSQL/case.sql.out @@ -176,28 +176,28 @@ struct -- !query SELECT CASE WHEN 1=0 THEN 1/0 WHEN 1=1 THEN 1 ELSE 2/0 END -- !query schema -struct +struct<> -- !query output -1.0 +java.lang.ArithmeticException +divide by zero -- !query SELECT CASE 1 WHEN 0 THEN 1/0 WHEN 1 THEN 1 ELSE 2/0 END -- !query schema -struct +struct<> -- !query output -1.0 +java.lang.ArithmeticException +divide by zero -- !query SELECT CASE WHEN i > 100 THEN 1/0 ELSE 0 END FROM case_tbl -- !query schema -struct 100) THEN (CAST(1 AS DOUBLE) / CAST(0 AS DOUBLE)) ELSE CAST(0 AS DOUBLE) END:double> +struct<> -- !query output -0.0 -0.0 -0.0 -0.0 +java.lang.ArithmeticException +divide by zero -- !query diff --git a/sql/core/src/test/resources/sql-tests/results/postgreSQL/create_view.sql.out b/sql/core/src/test/resources/sql-tests/results/postgreSQL/create_view.sql.out index ae1cb2f171704..c05c9abbcee31 100644 --- a/sql/core/src/test/resources/sql-tests/results/postgreSQL/create_view.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/postgreSQL/create_view.sql.out @@ -56,7 +56,7 @@ CREATE VIEW key_dependent_view AS struct<> -- !query output org.apache.spark.sql.AnalysisException -expression 'spark_catalog.default.view_base_table.`data`' is neither present in the group by, nor is it an aggregate function. Add to group by or wrap in first() (or first_value) if you don't care which value you get.; +expression 'spark_catalog.default.view_base_table.`data`' is neither present in the group by, nor is it an aggregate function. Add to group by or wrap in first() (or first_value) if you don't care which value you get. -- !query @@ -257,7 +257,7 @@ View Text SELECT * FROM base_table View Original Text SELECT * FROM base_table View Catalog and Namespace spark_catalog.temp_view_test View Query Output Columns [a, id] -Table Properties [view.catalogAndNamespace.numParts=2, view.query.out.col.0=a, view.query.out.numCols=2, view.query.out.col.1=id, view.catalogAndNamespace.part.0=spark_catalog, view.catalogAndNamespace.part.1=temp_view_test] +Table Properties [view.catalogAndNamespace.numParts=2, view.catalogAndNamespace.part.0=spark_catalog, view.catalogAndNamespace.part.1=temp_view_test, view.query.out.col.0=a, view.query.out.col.1=id, view.query.out.numCols=2, view.referredTempFunctionsNames=[], view.referredTempViewNames=[], view.sqlConfig.spark.sql.ansi.enabled=true] -- !query @@ -266,7 +266,7 @@ CREATE VIEW v1_temp AS SELECT * FROM temp_table struct<> -- !query output org.apache.spark.sql.AnalysisException -Not allowed to create a permanent view `temp_view_test`.`v1_temp` by referencing a temporary view temp_table. Please create a temp view instead by CREATE TEMP VIEW; +Not allowed to create a permanent view `temp_view_test`.`v1_temp` by referencing a temporary view temp_table. Please create a temp view instead by CREATE TEMP VIEW -- !query @@ -313,7 +313,7 @@ View Text SELECT * FROM base_table View Original Text SELECT * FROM base_table View Catalog and Namespace spark_catalog.temp_view_test View Query Output Columns [a, id] -Table Properties [view.catalogAndNamespace.numParts=2, view.query.out.col.0=a, view.query.out.numCols=2, view.query.out.col.1=id, view.catalogAndNamespace.part.0=spark_catalog, view.catalogAndNamespace.part.1=temp_view_test] +Table Properties [view.catalogAndNamespace.numParts=2, view.catalogAndNamespace.part.0=spark_catalog, view.catalogAndNamespace.part.1=temp_view_test, view.query.out.col.0=a, view.query.out.col.1=id, view.query.out.numCols=2, view.referredTempFunctionsNames=[], view.referredTempViewNames=[], view.sqlConfig.spark.sql.ansi.enabled=true] -- !query @@ -322,7 +322,7 @@ CREATE VIEW temp_view_test.v3_temp AS SELECT * FROM temp_table struct<> -- !query output org.apache.spark.sql.AnalysisException -Not allowed to create a permanent view `temp_view_test`.`v3_temp` by referencing a temporary view temp_table. Please create a temp view instead by CREATE TEMP VIEW; +Not allowed to create a permanent view `temp_view_test`.`v3_temp` by referencing a temporary view temp_table. Please create a temp view instead by CREATE TEMP VIEW -- !query @@ -359,7 +359,7 @@ View Original Text SELECT t1.a AS t1_a, t2.a AS t2_a WHERE t1.id = t2.id View Catalog and Namespace spark_catalog.temp_view_test View Query Output Columns [t1_a, t2_a] -Table Properties [view.catalogAndNamespace.numParts=2, view.query.out.col.0=t1_a, view.query.out.numCols=2, view.query.out.col.1=t2_a, view.catalogAndNamespace.part.0=spark_catalog, view.catalogAndNamespace.part.1=temp_view_test] +Table Properties [view.catalogAndNamespace.numParts=2, view.catalogAndNamespace.part.0=spark_catalog, view.catalogAndNamespace.part.1=temp_view_test, view.query.out.col.0=t1_a, view.query.out.col.1=t2_a, view.query.out.numCols=2, view.referredTempFunctionsNames=[], view.referredTempViewNames=[], view.sqlConfig.spark.sql.ansi.enabled=true] -- !query @@ -371,7 +371,7 @@ CREATE VIEW v4_temp AS struct<> -- !query output org.apache.spark.sql.AnalysisException -Not allowed to create a permanent view `temp_view_test`.`v4_temp` by referencing a temporary view temp_table. Please create a temp view instead by CREATE TEMP VIEW; +Not allowed to create a permanent view `temp_view_test`.`v4_temp` by referencing a temporary view temp_table. Please create a temp view instead by CREATE TEMP VIEW -- !query @@ -383,7 +383,7 @@ CREATE VIEW v5_temp AS struct<> -- !query output org.apache.spark.sql.AnalysisException -Not allowed to create a permanent view `temp_view_test`.`v5_temp` by referencing a temporary view temp_table. Please create a temp view instead by CREATE TEMP VIEW; +Not allowed to create a permanent view `temp_view_test`.`v5_temp` by referencing a temporary view temp_table. Please create a temp view instead by CREATE TEMP VIEW -- !query @@ -413,7 +413,7 @@ View Text SELECT * FROM base_table WHERE id IN (SELECT id FROM base_t View Original Text SELECT * FROM base_table WHERE id IN (SELECT id FROM base_table2) View Catalog and Namespace spark_catalog.temp_view_test View Query Output Columns [a, id] -Table Properties [view.catalogAndNamespace.numParts=2, view.query.out.col.0=a, view.query.out.numCols=2, view.query.out.col.1=id, view.catalogAndNamespace.part.0=spark_catalog, view.catalogAndNamespace.part.1=temp_view_test] +Table Properties [view.catalogAndNamespace.numParts=2, view.catalogAndNamespace.part.0=spark_catalog, view.catalogAndNamespace.part.1=temp_view_test, view.query.out.col.0=a, view.query.out.col.1=id, view.query.out.numCols=2, view.referredTempFunctionsNames=[], view.referredTempViewNames=[], view.sqlConfig.spark.sql.ansi.enabled=true] -- !query @@ -443,7 +443,7 @@ View Text SELECT t1.id, t2.a FROM base_table t1, (SELECT * FROM base_ View Original Text SELECT t1.id, t2.a FROM base_table t1, (SELECT * FROM base_table2) t2 View Catalog and Namespace spark_catalog.temp_view_test View Query Output Columns [id, a] -Table Properties [view.catalogAndNamespace.numParts=2, view.query.out.col.0=id, view.query.out.numCols=2, view.query.out.col.1=a, view.catalogAndNamespace.part.0=spark_catalog, view.catalogAndNamespace.part.1=temp_view_test] +Table Properties [view.catalogAndNamespace.numParts=2, view.catalogAndNamespace.part.0=spark_catalog, view.catalogAndNamespace.part.1=temp_view_test, view.query.out.col.0=id, view.query.out.col.1=a, view.query.out.numCols=2, view.referredTempFunctionsNames=[], view.referredTempViewNames=[], view.sqlConfig.spark.sql.ansi.enabled=true] -- !query @@ -473,7 +473,7 @@ View Text SELECT * FROM base_table WHERE EXISTS (SELECT 1 FROM base_t View Original Text SELECT * FROM base_table WHERE EXISTS (SELECT 1 FROM base_table2) View Catalog and Namespace spark_catalog.temp_view_test View Query Output Columns [a, id] -Table Properties [view.catalogAndNamespace.numParts=2, view.query.out.col.0=a, view.query.out.numCols=2, view.query.out.col.1=id, view.catalogAndNamespace.part.0=spark_catalog, view.catalogAndNamespace.part.1=temp_view_test] +Table Properties [view.catalogAndNamespace.numParts=2, view.catalogAndNamespace.part.0=spark_catalog, view.catalogAndNamespace.part.1=temp_view_test, view.query.out.col.0=a, view.query.out.col.1=id, view.query.out.numCols=2, view.referredTempFunctionsNames=[], view.referredTempViewNames=[], view.sqlConfig.spark.sql.ansi.enabled=true] -- !query @@ -503,7 +503,7 @@ View Text SELECT * FROM base_table WHERE NOT EXISTS (SELECT 1 FROM ba View Original Text SELECT * FROM base_table WHERE NOT EXISTS (SELECT 1 FROM base_table2) View Catalog and Namespace spark_catalog.temp_view_test View Query Output Columns [a, id] -Table Properties [view.catalogAndNamespace.numParts=2, view.query.out.col.0=a, view.query.out.numCols=2, view.query.out.col.1=id, view.catalogAndNamespace.part.0=spark_catalog, view.catalogAndNamespace.part.1=temp_view_test] +Table Properties [view.catalogAndNamespace.numParts=2, view.catalogAndNamespace.part.0=spark_catalog, view.catalogAndNamespace.part.1=temp_view_test, view.query.out.col.0=a, view.query.out.col.1=id, view.query.out.numCols=2, view.referredTempFunctionsNames=[], view.referredTempViewNames=[], view.sqlConfig.spark.sql.ansi.enabled=true] -- !query @@ -533,7 +533,7 @@ View Text SELECT * FROM base_table WHERE EXISTS (SELECT 1) View Original Text SELECT * FROM base_table WHERE EXISTS (SELECT 1) View Catalog and Namespace spark_catalog.temp_view_test View Query Output Columns [a, id] -Table Properties [view.catalogAndNamespace.numParts=2, view.query.out.col.0=a, view.query.out.numCols=2, view.query.out.col.1=id, view.catalogAndNamespace.part.0=spark_catalog, view.catalogAndNamespace.part.1=temp_view_test] +Table Properties [view.catalogAndNamespace.numParts=2, view.catalogAndNamespace.part.0=spark_catalog, view.catalogAndNamespace.part.1=temp_view_test, view.query.out.col.0=a, view.query.out.col.1=id, view.query.out.numCols=2, view.referredTempFunctionsNames=[], view.referredTempViewNames=[], view.sqlConfig.spark.sql.ansi.enabled=true] -- !query @@ -542,7 +542,7 @@ CREATE VIEW v6_temp AS SELECT * FROM base_table WHERE id IN (SELECT id FROM temp struct<> -- !query output org.apache.spark.sql.AnalysisException -Not allowed to create a permanent view `temp_view_test`.`v6_temp` by referencing a temporary view temp_table. Please create a temp view instead by CREATE TEMP VIEW; +Not allowed to create a permanent view `temp_view_test`.`v6_temp` by referencing a temporary view temp_table. Please create a temp view instead by CREATE TEMP VIEW -- !query @@ -551,7 +551,7 @@ CREATE VIEW v7_temp AS SELECT t1.id, t2.a FROM base_table t1, (SELECT * FROM tem struct<> -- !query output org.apache.spark.sql.AnalysisException -Not allowed to create a permanent view `temp_view_test`.`v7_temp` by referencing a temporary view temp_table. Please create a temp view instead by CREATE TEMP VIEW; +Not allowed to create a permanent view `temp_view_test`.`v7_temp` by referencing a temporary view temp_table. Please create a temp view instead by CREATE TEMP VIEW -- !query @@ -560,7 +560,7 @@ CREATE VIEW v8_temp AS SELECT * FROM base_table WHERE EXISTS (SELECT 1 FROM temp struct<> -- !query output org.apache.spark.sql.AnalysisException -Not allowed to create a permanent view `temp_view_test`.`v8_temp` by referencing a temporary view temp_table. Please create a temp view instead by CREATE TEMP VIEW; +Not allowed to create a permanent view `temp_view_test`.`v8_temp` by referencing a temporary view temp_table. Please create a temp view instead by CREATE TEMP VIEW -- !query @@ -569,7 +569,7 @@ CREATE VIEW v9_temp AS SELECT * FROM base_table WHERE NOT EXISTS (SELECT 1 FROM struct<> -- !query output org.apache.spark.sql.AnalysisException -Not allowed to create a permanent view `temp_view_test`.`v9_temp` by referencing a temporary view temp_table. Please create a temp view instead by CREATE TEMP VIEW; +Not allowed to create a permanent view `temp_view_test`.`v9_temp` by referencing a temporary view temp_table. Please create a temp view instead by CREATE TEMP VIEW -- !query @@ -669,7 +669,7 @@ View Text SELECT * FROM t1 CROSS JOIN t2 View Original Text SELECT * FROM t1 CROSS JOIN t2 View Catalog and Namespace spark_catalog.testviewschm2 View Query Output Columns [num, name, num2, value] -Table Properties [view.query.out.col.3=value, view.catalogAndNamespace.numParts=2, view.query.out.col.0=num, view.query.out.numCols=4, view.query.out.col.1=name, view.catalogAndNamespace.part.0=spark_catalog, view.query.out.col.2=num2, view.catalogAndNamespace.part.1=testviewschm2] +Table Properties [view.catalogAndNamespace.numParts=2, view.catalogAndNamespace.part.0=spark_catalog, view.catalogAndNamespace.part.1=testviewschm2, view.query.out.col.0=num, view.query.out.col.1=name, view.query.out.col.2=num2, view.query.out.col.3=value, view.query.out.numCols=4, view.referredTempFunctionsNames=[], view.referredTempViewNames=[], view.sqlConfig.spark.sql.ansi.enabled=true] -- !query @@ -678,7 +678,7 @@ CREATE VIEW temporal1 AS SELECT * FROM t1 CROSS JOIN tt struct<> -- !query output org.apache.spark.sql.AnalysisException -Not allowed to create a permanent view `testviewschm2`.`temporal1` by referencing a temporary view tt. Please create a temp view instead by CREATE TEMP VIEW; +Not allowed to create a permanent view `testviewschm2`.`temporal1` by referencing a temporary view tt. Please create a temp view instead by CREATE TEMP VIEW -- !query @@ -710,7 +710,7 @@ View Text SELECT * FROM t1 INNER JOIN t2 ON t1.num = t2.num2 View Original Text SELECT * FROM t1 INNER JOIN t2 ON t1.num = t2.num2 View Catalog and Namespace spark_catalog.testviewschm2 View Query Output Columns [num, name, num2, value] -Table Properties [view.query.out.col.3=value, view.catalogAndNamespace.numParts=2, view.query.out.col.0=num, view.query.out.numCols=4, view.query.out.col.1=name, view.catalogAndNamespace.part.0=spark_catalog, view.query.out.col.2=num2, view.catalogAndNamespace.part.1=testviewschm2] +Table Properties [view.catalogAndNamespace.numParts=2, view.catalogAndNamespace.part.0=spark_catalog, view.catalogAndNamespace.part.1=testviewschm2, view.query.out.col.0=num, view.query.out.col.1=name, view.query.out.col.2=num2, view.query.out.col.3=value, view.query.out.numCols=4, view.referredTempFunctionsNames=[], view.referredTempViewNames=[], view.sqlConfig.spark.sql.ansi.enabled=true] -- !query @@ -719,7 +719,7 @@ CREATE VIEW temporal2 AS SELECT * FROM t1 INNER JOIN tt ON t1.num = tt.num2 struct<> -- !query output org.apache.spark.sql.AnalysisException -Not allowed to create a permanent view `testviewschm2`.`temporal2` by referencing a temporary view tt. Please create a temp view instead by CREATE TEMP VIEW; +Not allowed to create a permanent view `testviewschm2`.`temporal2` by referencing a temporary view tt. Please create a temp view instead by CREATE TEMP VIEW -- !query @@ -751,7 +751,7 @@ View Text SELECT * FROM t1 LEFT JOIN t2 ON t1.num = t2.num2 View Original Text SELECT * FROM t1 LEFT JOIN t2 ON t1.num = t2.num2 View Catalog and Namespace spark_catalog.testviewschm2 View Query Output Columns [num, name, num2, value] -Table Properties [view.query.out.col.3=value, view.catalogAndNamespace.numParts=2, view.query.out.col.0=num, view.query.out.numCols=4, view.query.out.col.1=name, view.catalogAndNamespace.part.0=spark_catalog, view.query.out.col.2=num2, view.catalogAndNamespace.part.1=testviewschm2] +Table Properties [view.catalogAndNamespace.numParts=2, view.catalogAndNamespace.part.0=spark_catalog, view.catalogAndNamespace.part.1=testviewschm2, view.query.out.col.0=num, view.query.out.col.1=name, view.query.out.col.2=num2, view.query.out.col.3=value, view.query.out.numCols=4, view.referredTempFunctionsNames=[], view.referredTempViewNames=[], view.sqlConfig.spark.sql.ansi.enabled=true] -- !query @@ -760,7 +760,7 @@ CREATE VIEW temporal3 AS SELECT * FROM t1 LEFT JOIN tt ON t1.num = tt.num2 struct<> -- !query output org.apache.spark.sql.AnalysisException -Not allowed to create a permanent view `testviewschm2`.`temporal3` by referencing a temporary view tt. Please create a temp view instead by CREATE TEMP VIEW; +Not allowed to create a permanent view `testviewschm2`.`temporal3` by referencing a temporary view tt. Please create a temp view instead by CREATE TEMP VIEW -- !query @@ -792,7 +792,7 @@ View Text SELECT * FROM t1 LEFT JOIN t2 ON t1.num = t2.num2 AND t2.va View Original Text SELECT * FROM t1 LEFT JOIN t2 ON t1.num = t2.num2 AND t2.value = 'xxx' View Catalog and Namespace spark_catalog.testviewschm2 View Query Output Columns [num, name, num2, value] -Table Properties [view.query.out.col.3=value, view.catalogAndNamespace.numParts=2, view.query.out.col.0=num, view.query.out.numCols=4, view.query.out.col.1=name, view.catalogAndNamespace.part.0=spark_catalog, view.query.out.col.2=num2, view.catalogAndNamespace.part.1=testviewschm2] +Table Properties [view.catalogAndNamespace.numParts=2, view.catalogAndNamespace.part.0=spark_catalog, view.catalogAndNamespace.part.1=testviewschm2, view.query.out.col.0=num, view.query.out.col.1=name, view.query.out.col.2=num2, view.query.out.col.3=value, view.query.out.numCols=4, view.referredTempFunctionsNames=[], view.referredTempViewNames=[], view.sqlConfig.spark.sql.ansi.enabled=true] -- !query @@ -801,7 +801,7 @@ CREATE VIEW temporal4 AS SELECT * FROM t1 LEFT JOIN tt ON t1.num = tt.num2 AND t struct<> -- !query output org.apache.spark.sql.AnalysisException -Not allowed to create a permanent view `testviewschm2`.`temporal4` by referencing a temporary view tt. Please create a temp view instead by CREATE TEMP VIEW; +Not allowed to create a permanent view `testviewschm2`.`temporal4` by referencing a temporary view tt. Please create a temp view instead by CREATE TEMP VIEW -- !query @@ -810,7 +810,7 @@ CREATE VIEW temporal5 AS SELECT * FROM t1 WHERE num IN (SELECT num FROM t1 WHERE struct<> -- !query output org.apache.spark.sql.AnalysisException -Not allowed to create a permanent view `testviewschm2`.`temporal5` by referencing a temporary view tt. Please create a temp view instead by CREATE TEMP VIEW; +Not allowed to create a permanent view `testviewschm2`.`temporal5` by referencing a temporary view tt. Please create a temp view instead by CREATE TEMP VIEW -- !query @@ -894,7 +894,7 @@ BETWEEN (SELECT d FROM tbl2 WHERE c = 1) AND (SELECT e FROM tbl3 WHERE f = 2) AND EXISTS (SELECT g FROM tbl4 LEFT JOIN tbl3 ON tbl4.h = tbl3.f) View Catalog and Namespace spark_catalog.testviewschm2 View Query Output Columns [a, b] -Table Properties [view.catalogAndNamespace.numParts=2, view.query.out.col.0=a, view.query.out.numCols=2, view.query.out.col.1=b, view.catalogAndNamespace.part.0=spark_catalog, view.catalogAndNamespace.part.1=testviewschm2] +Table Properties [view.catalogAndNamespace.numParts=2, view.catalogAndNamespace.part.0=spark_catalog, view.catalogAndNamespace.part.1=testviewschm2, view.query.out.col.0=a, view.query.out.col.1=b, view.query.out.numCols=2, view.referredTempFunctionsNames=[], view.referredTempViewNames=[], view.sqlConfig.spark.sql.ansi.enabled=true] -- !query @@ -933,7 +933,7 @@ AND EXISTS (SELECT g FROM tbl4 LEFT JOIN tbl3 ON tbl4.h = tbl3.f) AND NOT EXISTS (SELECT g FROM tbl4 LEFT JOIN tmptbl ON tbl4.h = tmptbl.j) View Catalog and Namespace spark_catalog.testviewschm2 View Query Output Columns [a, b] -Table Properties [view.catalogAndNamespace.numParts=2, view.query.out.col.0=a, view.query.out.numCols=2, view.query.out.col.1=b, view.catalogAndNamespace.part.0=spark_catalog, view.catalogAndNamespace.part.1=testviewschm2] +Table Properties [view.catalogAndNamespace.numParts=2, view.catalogAndNamespace.part.0=spark_catalog, view.catalogAndNamespace.part.1=testviewschm2, view.query.out.col.0=a, view.query.out.col.1=b, view.query.out.numCols=2, view.referredTempFunctionsNames=[], view.referredTempViewNames=[], view.sqlConfig.spark.sql.ansi.enabled=true] -- !query diff --git a/sql/core/src/test/resources/sql-tests/results/postgreSQL/date.sql.out b/sql/core/src/test/resources/sql-tests/results/postgreSQL/date.sql.out index 151fa1e28d725..a959284750483 100755 --- a/sql/core/src/test/resources/sql-tests/results/postgreSQL/date.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/postgreSQL/date.sql.out @@ -590,25 +590,28 @@ struct -- !query select make_date(2013, 2, 30) -- !query schema -struct +struct<> -- !query output -NULL +java.time.DateTimeException +Invalid date 'FEBRUARY 30' -- !query select make_date(2013, 13, 1) -- !query schema -struct +struct<> -- !query output -NULL +java.time.DateTimeException +Invalid value for MonthOfYear (valid values 1 - 12): 13 -- !query select make_date(2013, 11, -1) -- !query schema -struct +struct<> -- !query output -NULL +java.time.DateTimeException +Invalid value for DayOfMonth (valid values 1 - 28/31): -1 -- !query diff --git a/sql/core/src/test/resources/sql-tests/results/postgreSQL/int8.sql.out b/sql/core/src/test/resources/sql-tests/results/postgreSQL/int8.sql.out index 18b0c821ae70f..6f98e2f9eeee7 100755 --- a/sql/core/src/test/resources/sql-tests/results/postgreSQL/int8.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/postgreSQL/int8.sql.out @@ -569,25 +569,28 @@ struct -- !query select bigint('9223372036854775800') / bigint('0') -- !query schema -struct<(CAST(CAST(9223372036854775800 AS BIGINT) AS DOUBLE) / CAST(CAST(0 AS BIGINT) AS DOUBLE)):double> +struct<> -- !query output -NULL +java.lang.ArithmeticException +divide by zero -- !query select bigint('-9223372036854775808') / smallint('0') -- !query schema -struct<(CAST(CAST(-9223372036854775808 AS BIGINT) AS DOUBLE) / CAST(CAST(0 AS SMALLINT) AS DOUBLE)):double> +struct<> -- !query output -NULL +java.lang.ArithmeticException +divide by zero -- !query select smallint('100') / bigint('0') -- !query schema -struct<(CAST(CAST(100 AS SMALLINT) AS DOUBLE) / CAST(CAST(0 AS BIGINT) AS DOUBLE)):double> +struct<> -- !query output -NULL +java.lang.ArithmeticException +divide by zero -- !query diff --git a/sql/core/src/test/resources/sql-tests/results/postgreSQL/limit.sql.out b/sql/core/src/test/resources/sql-tests/results/postgreSQL/limit.sql.out index 2c8bc31dbc6ca..b0f3482f0a282 100644 --- a/sql/core/src/test/resources/sql-tests/results/postgreSQL/limit.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/postgreSQL/limit.sql.out @@ -59,7 +59,7 @@ select * from int8_tbl limit (case when random() < 0.5 then bigint(null) end) struct<> -- !query output org.apache.spark.sql.AnalysisException -The limit expression must evaluate to a constant value, but got CASE WHEN (`_nondeterministic` < CAST(0.5BD AS DOUBLE)) THEN CAST(NULL AS BIGINT) END; +The limit expression must evaluate to a constant value, but got CASE WHEN (`_nondeterministic` < CAST(0.5BD AS DOUBLE)) THEN CAST(NULL AS BIGINT) END -- !query diff --git a/sql/core/src/test/resources/sql-tests/results/postgreSQL/numeric.sql.out b/sql/core/src/test/resources/sql-tests/results/postgreSQL/numeric.sql.out index d97853d5fc6d0..fdad837e14b61 100644 --- a/sql/core/src/test/resources/sql-tests/results/postgreSQL/numeric.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/postgreSQL/numeric.sql.out @@ -3830,7 +3830,7 @@ INSERT INTO num_result SELECT t1.id, t2.id, t1.val, t2.val, t1.val * t2.val struct<> -- !query output org.apache.spark.sql.AnalysisException -`default`.`num_result` requires that the data to be inserted have the same number of columns as the target table: target table has 3 column(s) but the inserted data has 5 column(s), including 0 partition column(s) having constant value(s).; +`default`.`num_result` requires that the data to be inserted have the same number of columns as the target table: target table has 3 column(s) but the inserted data has 5 column(s), including 0 partition column(s) having constant value(s). -- !query @@ -4673,7 +4673,7 @@ struct<(CAST(CAST(999999999999999999999 AS DECIMAL(38,0)) AS DECIMAL(38,0)) div -- !query select mod(cast(999999999999999999999 as decimal(38, 0)),1000000000000000000000) -- !query schema -struct<(CAST(CAST(999999999999999999999 AS DECIMAL(38,0)) AS DECIMAL(38,0)) % CAST(1000000000000000000000 AS DECIMAL(38,0))):decimal(22,0)> +struct -- !query output 999999999999999999999 @@ -4689,7 +4689,7 @@ struct<(CAST(CAST(-9999999999999999999999 AS DECIMAL(38,0)) AS DECIMAL(38,0)) di -- !query select mod(cast(-9999999999999999999999 as decimal(38, 0)),1000000000000000000000) -- !query schema -struct<(CAST(CAST(-9999999999999999999999 AS DECIMAL(38,0)) AS DECIMAL(38,0)) % CAST(1000000000000000000000 AS DECIMAL(38,0))):decimal(22,0)> +struct -- !query output -999999999999999999999 @@ -4697,7 +4697,7 @@ struct<(CAST(CAST(-9999999999999999999999 AS DECIMAL(38,0)) AS DECIMAL(38,0)) % -- !query select div(cast(-9999999999999999999999 as decimal(38, 0)),1000000000000000000000)*1000000000000000000000 + mod(cast(-9999999999999999999999 as decimal(38, 0)),1000000000000000000000) -- !query schema -struct<(CAST((CAST(CAST((CAST(CAST(-9999999999999999999999 AS DECIMAL(38,0)) AS DECIMAL(38,0)) div CAST(1000000000000000000000 AS DECIMAL(38,0))) AS DECIMAL(20,0)) AS DECIMAL(22,0)) * CAST(1000000000000000000000 AS DECIMAL(22,0))) AS DECIMAL(38,0)) + CAST((CAST(CAST(-9999999999999999999999 AS DECIMAL(38,0)) AS DECIMAL(38,0)) % CAST(1000000000000000000000 AS DECIMAL(38,0))) AS DECIMAL(38,0))):decimal(38,0)> +struct<(CAST((CAST(CAST((CAST(CAST(-9999999999999999999999 AS DECIMAL(38,0)) AS DECIMAL(38,0)) div CAST(1000000000000000000000 AS DECIMAL(38,0))) AS DECIMAL(20,0)) AS DECIMAL(22,0)) * CAST(1000000000000000000000 AS DECIMAL(22,0))) AS DECIMAL(38,0)) + CAST(mod(CAST(CAST(-9999999999999999999999 AS DECIMAL(38,0)) AS DECIMAL(38,0)), CAST(1000000000000000000000 AS DECIMAL(38,0))) AS DECIMAL(38,0))):decimal(38,0)> -- !query output -9999999999999999999999 @@ -4705,7 +4705,7 @@ struct<(CAST((CAST(CAST((CAST(CAST(-9999999999999999999999 AS DECIMAL(38,0)) AS -- !query select mod (70.0,70) -- !query schema -struct<(CAST(70.0 AS DECIMAL(3,1)) % CAST(CAST(70 AS DECIMAL(2,0)) AS DECIMAL(3,1))):decimal(3,1)> +struct -- !query output 0.0 diff --git a/sql/core/src/test/resources/sql-tests/results/postgreSQL/select_having.sql.out b/sql/core/src/test/resources/sql-tests/results/postgreSQL/select_having.sql.out index d8d33d92a7cc4..f504e4b6c6dad 100644 --- a/sql/core/src/test/resources/sql-tests/results/postgreSQL/select_having.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/postgreSQL/select_having.sql.out @@ -143,7 +143,7 @@ SELECT a FROM test_having HAVING min(a) < max(a) struct<> -- !query output org.apache.spark.sql.AnalysisException -grouping expressions sequence is empty, and 'spark_catalog.default.test_having.`a`' is not an aggregate function. Wrap '(min(spark_catalog.default.test_having.`a`) AS `min(a#x)`, max(spark_catalog.default.test_having.`a`) AS `max(a#x)`)' in windowing function(s) or wrap 'spark_catalog.default.test_having.`a`' in first() (or first_value) if you don't care which value you get.; +grouping expressions sequence is empty, and 'spark_catalog.default.test_having.`a`' is not an aggregate function. Wrap '(min(spark_catalog.default.test_having.`a`) AS `min(a#x)`, max(spark_catalog.default.test_having.`a`) AS `max(a#x)`)' in windowing function(s) or wrap 'spark_catalog.default.test_having.`a`' in first() (or first_value) if you don't care which value you get. -- !query @@ -174,9 +174,10 @@ struct -- !query SELECT 1 AS one FROM test_having WHERE 1/a = 1 HAVING 1 < 2 -- !query schema -struct +struct<> -- !query output -1 +java.lang.ArithmeticException +divide by zero -- !query diff --git a/sql/core/src/test/resources/sql-tests/results/postgreSQL/strings.sql.out b/sql/core/src/test/resources/sql-tests/results/postgreSQL/strings.sql.out index e8a3a9b9731a6..13cc8a8754025 100644 --- a/sql/core/src/test/resources/sql-tests/results/postgreSQL/strings.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/postgreSQL/strings.sql.out @@ -446,7 +446,7 @@ SELECT 'maca' LIKE 'm%aca' ESCAPE '%' AS `true` struct<> -- !query output org.apache.spark.sql.AnalysisException -the pattern 'm%aca' is invalid, the escape character is not allowed to precede 'a'; +the pattern 'm%aca' is invalid, the escape character is not allowed to precede 'a' -- !query @@ -455,7 +455,7 @@ SELECT 'maca' NOT LIKE 'm%aca' ESCAPE '%' AS `false` struct<> -- !query output org.apache.spark.sql.AnalysisException -the pattern 'm%aca' is invalid, the escape character is not allowed to precede 'a'; +the pattern 'm%aca' is invalid, the escape character is not allowed to precede 'a' -- !query @@ -464,7 +464,7 @@ SELECT 'ma%a' LIKE 'm%a%%a' ESCAPE '%' AS `true` struct<> -- !query output org.apache.spark.sql.AnalysisException -the pattern 'm%a%%a' is invalid, the escape character is not allowed to precede 'a'; +the pattern 'm%a%%a' is invalid, the escape character is not allowed to precede 'a' -- !query @@ -473,7 +473,7 @@ SELECT 'ma%a' NOT LIKE 'm%a%%a' ESCAPE '%' AS `false` struct<> -- !query output org.apache.spark.sql.AnalysisException -the pattern 'm%a%%a' is invalid, the escape character is not allowed to precede 'a'; +the pattern 'm%a%%a' is invalid, the escape character is not allowed to precede 'a' -- !query @@ -482,7 +482,7 @@ SELECT 'bear' LIKE 'b_ear' ESCAPE '_' AS `true` struct<> -- !query output org.apache.spark.sql.AnalysisException -the pattern 'b_ear' is invalid, the escape character is not allowed to precede 'e'; +the pattern 'b_ear' is invalid, the escape character is not allowed to precede 'e' -- !query @@ -491,7 +491,7 @@ SELECT 'bear' NOT LIKE 'b_ear' ESCAPE '_' AS `false` struct<> -- !query output org.apache.spark.sql.AnalysisException -the pattern 'b_ear' is invalid, the escape character is not allowed to precede 'e'; +the pattern 'b_ear' is invalid, the escape character is not allowed to precede 'e' -- !query @@ -500,7 +500,7 @@ SELECT 'be_r' LIKE 'b_e__r' ESCAPE '_' AS `true` struct<> -- !query output org.apache.spark.sql.AnalysisException -the pattern 'b_e__r' is invalid, the escape character is not allowed to precede 'e'; +the pattern 'b_e__r' is invalid, the escape character is not allowed to precede 'e' -- !query @@ -509,7 +509,7 @@ SELECT 'be_r' NOT LIKE 'b_e__r' ESCAPE '_' AS `false` struct<> -- !query output org.apache.spark.sql.AnalysisException -the pattern 'b_e__r' is invalid, the escape character is not allowed to precede 'e'; +the pattern 'b_e__r' is invalid, the escape character is not allowed to precede 'e' -- !query diff --git a/sql/core/src/test/resources/sql-tests/results/postgreSQL/window_part3.sql.out b/sql/core/src/test/resources/sql-tests/results/postgreSQL/window_part3.sql.out index 08eba6797b01d..88aee38c4504e 100644 --- a/sql/core/src/test/resources/sql-tests/results/postgreSQL/window_part3.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/postgreSQL/window_part3.sql.out @@ -1,5 +1,5 @@ -- Automatically generated by SQLQueryTestSuite --- Number of queries: 29 +-- Number of queries: 30 -- !query @@ -71,7 +71,8 @@ insert into datetimes values -- !query schema struct<> -- !query output - +org.apache.spark.sql.AnalysisException +failed to evaluate expression CAST('11:00 BST' AS TIMESTAMP): Cannot cast 11:00 BST to TimestampType.; line 1 pos 22 -- !query @@ -294,7 +295,7 @@ SELECT * FROM empsalary WHERE row_number() OVER (ORDER BY salary) < 10 struct<> -- !query output org.apache.spark.sql.AnalysisException -It is not allowed to use window functions inside WHERE clause; +It is not allowed to use window functions inside WHERE clause -- !query @@ -306,7 +307,7 @@ org.apache.spark.sql.AnalysisException The query operator `Join` contains one or more unsupported expression types Aggregate, Window or Generate. -Invalid expressions: [row_number() OVER (ORDER BY spark_catalog.default.empsalary.`salary` ASC NULLS FIRST ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW)]; +Invalid expressions: [row_number() OVER (ORDER BY spark_catalog.default.empsalary.`salary` ASC NULLS FIRST ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW)] -- !query @@ -318,7 +319,7 @@ org.apache.spark.sql.AnalysisException The query operator `Aggregate` contains one or more unsupported expression types Aggregate, Window or Generate. -Invalid expressions: [RANK() OVER (ORDER BY 1 ASC NULLS FIRST ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW)]; +Invalid expressions: [RANK() OVER (ORDER BY 1 ASC NULLS FIRST ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW)] -- !query @@ -341,7 +342,7 @@ SELECT * FROM empsalary WHERE (rank() OVER (ORDER BY random())) > 10 struct<> -- !query output org.apache.spark.sql.AnalysisException -It is not allowed to use window functions inside WHERE clause; +It is not allowed to use window functions inside WHERE clause -- !query @@ -350,7 +351,7 @@ SELECT * FROM empsalary WHERE rank() OVER (ORDER BY random()) struct<> -- !query output org.apache.spark.sql.AnalysisException -It is not allowed to use window functions inside WHERE clause; +It is not allowed to use window functions inside WHERE clause -- !query @@ -385,6 +386,15 @@ org.apache.spark.sql.AnalysisException cannot resolve 'ntile(0)' due to data type mismatch: Buckets expression must be positive, but got: 0; line 1 pos 7 +-- !query +SELECT nth_value(four, 0) OVER (ORDER BY ten), ten, four FROM tenk1 +-- !query schema +struct<> +-- !query output +org.apache.spark.sql.AnalysisException +cannot resolve 'nth_value(spark_catalog.default.tenk1.`four`, 0)' due to data type mismatch: The 'offset' argument of nth_value must be greater than zero but it is 0.; line 1 pos 7 + + -- !query DROP TABLE empsalary -- !query schema diff --git a/sql/core/src/test/resources/sql-tests/results/postgreSQL/window_part4.sql.out b/sql/core/src/test/resources/sql-tests/results/postgreSQL/window_part4.sql.out index 4dd4712345a89..f7439d873b4eb 100644 --- a/sql/core/src/test/resources/sql-tests/results/postgreSQL/window_part4.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/postgreSQL/window_part4.sql.out @@ -195,7 +195,7 @@ struct -- !query output org.apache.spark.sql.AnalysisException -Table not found: test; +Table not found: test -- !query diff --git a/sql/core/src/test/resources/sql-tests/results/regexp-functions.sql.out b/sql/core/src/test/resources/sql-tests/results/regexp-functions.sql.out index 2eef926f63e37..60b3e7dbb74f1 100644 --- a/sql/core/src/test/resources/sql-tests/results/regexp-functions.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/regexp-functions.sql.out @@ -1,5 +1,5 @@ -- Automatically generated by SQLQueryTestSuite --- Number of queries: 30 +-- Number of queries: 42 -- !query @@ -252,3 +252,101 @@ SELECT regexp_extract_all('a 2b 14m', '(\\d+)?([a-z]+)', 1) struct> -- !query output ["","2","14"] + + +-- !query +SELECT regexp_replace('healthy, wealthy, and wise', '\\w+thy', 'something') +-- !query schema +struct +-- !query output +something, something, and wise + + +-- !query +SELECT regexp_replace('healthy, wealthy, and wise', '\\w+thy', 'something', -2) +-- !query schema +struct<> +-- !query output +org.apache.spark.sql.AnalysisException +cannot resolve 'regexp_replace('healthy, wealthy, and wise', '\\w+thy', 'something', -2)' due to data type mismatch: Position expression must be positive, but got: -2; line 1 pos 7 + + +-- !query +SELECT regexp_replace('healthy, wealthy, and wise', '\\w+thy', 'something', 0) +-- !query schema +struct<> +-- !query output +org.apache.spark.sql.AnalysisException +cannot resolve 'regexp_replace('healthy, wealthy, and wise', '\\w+thy', 'something', 0)' due to data type mismatch: Position expression must be positive, but got: 0; line 1 pos 7 + + +-- !query +SELECT regexp_replace('healthy, wealthy, and wise', '\\w+thy', 'something', 1) +-- !query schema +struct +-- !query output +something, something, and wise + + +-- !query +SELECT regexp_replace('healthy, wealthy, and wise', '\\w+thy', 'something', 2) +-- !query schema +struct +-- !query output +hsomething, something, and wise + + +-- !query +SELECT regexp_replace('healthy, wealthy, and wise', '\\w+thy', 'something', 8) +-- !query schema +struct +-- !query output +healthy, something, and wise + + +-- !query +SELECT regexp_replace('healthy, wealthy, and wise', '\\w', 'something', 26) +-- !query schema +struct +-- !query output +healthy, wealthy, and wissomething + + +-- !query +SELECT regexp_replace('healthy, wealthy, and wise', '\\w', 'something', 27) +-- !query schema +struct +-- !query output +healthy, wealthy, and wise + + +-- !query +SELECT regexp_replace('healthy, wealthy, and wise', '\\w', 'something', 30) +-- !query schema +struct +-- !query output +healthy, wealthy, and wise + + +-- !query +SELECT regexp_replace('healthy, wealthy, and wise', '\\w', 'something', null) +-- !query schema +struct +-- !query output +NULL + + +-- !query +SELECT regexp_like('1a 2b 14m', '\\d+b') +-- !query schema +struct +-- !query output +true + + +-- !query +SELECT regexp_like('1a 2b 14m', '[a-z]+b') +-- !query schema +struct +-- !query output +false \ No newline at end of file diff --git a/sql/core/src/test/resources/sql-tests/results/show-tables.sql.out b/sql/core/src/test/resources/sql-tests/results/show-tables.sql.out index a95b02c7f7743..611b0b750c2cd 100644 --- a/sql/core/src/test/resources/sql-tests/results/show-tables.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/show-tables.sql.out @@ -206,7 +206,7 @@ SHOW TABLE EXTENDED LIKE 'show_t*' PARTITION(c='Us', d=1) struct<> -- !query output org.apache.spark.sql.catalyst.analysis.NoSuchTableException -Table or view 'show_t*' not found in database 'showdb'; +Table or view 'show_t*' not found in database 'showdb' -- !query @@ -215,7 +215,7 @@ SHOW TABLE EXTENDED LIKE 'show_t1' PARTITION(c='Us') struct<> -- !query output org.apache.spark.sql.AnalysisException -Partition spec is invalid. The spec (c) must match the partition spec (c, d) defined in table '`showdb`.`show_t1`'; +Partition spec is invalid. The spec (c) must match the partition spec (c, d) defined in table '`showdb`.`show_t1`' -- !query @@ -224,7 +224,7 @@ SHOW TABLE EXTENDED LIKE 'show_t1' PARTITION(a='Us', d=1) struct<> -- !query output org.apache.spark.sql.AnalysisException -Partition spec is invalid. The spec (a, d) must match the partition spec (c, d) defined in table '`showdb`.`show_t1`'; +a is not a valid partition column in table `showdb`.`show_t1`. -- !query @@ -235,7 +235,7 @@ struct<> org.apache.spark.sql.catalyst.analysis.NoSuchPartitionException Partition not found in table 'show_t1' database 'showdb': c -> Ch -d -> 1; +d -> 1 -- !query diff --git a/sql/core/src/test/resources/sql-tests/results/show-tblproperties.sql.out b/sql/core/src/test/resources/sql-tests/results/show-tblproperties.sql.out index eaaf894590d35..3fb948056dc01 100644 --- a/sql/core/src/test/resources/sql-tests/results/show-tblproperties.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/show-tblproperties.sql.out @@ -64,6 +64,8 @@ view.catalogAndNamespace.part.0 spark_catalog view.catalogAndNamespace.part.1 default view.query.out.col.0 c1 view.query.out.numCols 1 +view.referredTempFunctionsNames [] +view.referredTempViewNames [] -- !query diff --git a/sql/core/src/test/resources/sql-tests/results/show-views.sql.out b/sql/core/src/test/resources/sql-tests/results/show-views.sql.out index d88790d8b5ec8..c80f8fab433fb 100644 --- a/sql/core/src/test/resources/sql-tests/results/show-views.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/show-views.sql.out @@ -142,7 +142,7 @@ SHOW VIEWS IN wrongdb LIKE 'view_*' struct<> -- !query output org.apache.spark.sql.catalyst.analysis.NoSuchDatabaseException -Database 'wrongdb' not found; +Database 'wrongdb' not found -- !query diff --git a/sql/core/src/test/resources/sql-tests/results/show_columns.sql.out b/sql/core/src/test/resources/sql-tests/results/show_columns.sql.out index 4f5db7f6c6b2f..3535b30d29c44 100644 --- a/sql/core/src/test/resources/sql-tests/results/show_columns.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/show_columns.sql.out @@ -93,8 +93,8 @@ SHOW COLUMNS IN badtable FROM showdb -- !query schema struct<> -- !query output -org.apache.spark.sql.catalyst.analysis.NoSuchTableException -Table or view 'badtable' not found in database 'showdb'; +org.apache.spark.sql.AnalysisException +Table or view not found: showdb.badtable; line 1 pos 0 -- !query @@ -112,7 +112,7 @@ SHOW COLUMNS IN showdb.showcolumn1 FROM baddb struct<> -- !query output org.apache.spark.sql.AnalysisException -SHOW COLUMNS with conflicting databases: 'baddb' != 'showdb'; +SHOW COLUMNS with conflicting databases: 'baddb' != 'showdb' -- !query @@ -129,8 +129,8 @@ SHOW COLUMNS IN showdb.showcolumn3 -- !query schema struct<> -- !query output -org.apache.spark.sql.catalyst.analysis.NoSuchTableException -Table or view 'showcolumn3' not found in database 'showdb'; +org.apache.spark.sql.AnalysisException +Table or view not found: showdb.showcolumn3; line 1 pos 0 -- !query @@ -138,8 +138,8 @@ SHOW COLUMNS IN showcolumn3 FROM showdb -- !query schema struct<> -- !query output -org.apache.spark.sql.catalyst.analysis.NoSuchTableException -Table or view 'showcolumn3' not found in database 'showdb'; +org.apache.spark.sql.AnalysisException +Table or view not found: showdb.showcolumn3; line 1 pos 0 -- !query @@ -147,8 +147,8 @@ SHOW COLUMNS IN showcolumn4 -- !query schema struct<> -- !query output -org.apache.spark.sql.catalyst.analysis.NoSuchTableException -Table or view 'showcolumn4' not found in database 'showdb'; +org.apache.spark.sql.AnalysisException +Table or view not found: showcolumn4; line 1 pos 0 -- !query diff --git a/sql/core/src/test/resources/sql-tests/results/string-functions.sql.out b/sql/core/src/test/resources/sql-tests/results/string-functions.sql.out index 20c31b140b009..74627e7786997 100644 --- a/sql/core/src/test/resources/sql-tests/results/string-functions.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/string-functions.sql.out @@ -1,5 +1,5 @@ -- Automatically generated by SQLQueryTestSuite --- Number of queries: 36 +-- Number of queries: 44 -- !query @@ -290,3 +290,69 @@ SELECT rpad('hi', 'invalid_length') struct -- !query output NULL + + +-- !query +select decode() +-- !query schema +struct<> +-- !query output +org.apache.spark.sql.AnalysisException +Invalid number of arguments for function decode. Expected: 2; Found: 0; line 1 pos 7 + + +-- !query +select decode(encode('abc', 'utf-8')) +-- !query schema +struct<> +-- !query output +org.apache.spark.sql.AnalysisException +Invalid number of arguments for function decode. Expected: 2; Found: 1; line 1 pos 7 + + +-- !query +select decode(encode('abc', 'utf-8'), 'utf-8') +-- !query schema +struct +-- !query output +abc + + +-- !query +select decode(1, 1, 'Southlake') +-- !query schema +struct +-- !query output +Southlake + + +-- !query +select decode(2, 1, 'Southlake') +-- !query schema +struct +-- !query output +NULL + + +-- !query +select decode(2, 1, 'Southlake', 2, 'San Francisco', 3, 'New Jersey', 4, 'Seattle', 'Non domestic') +-- !query schema +struct +-- !query output +San Francisco + + +-- !query +select decode(6, 1, 'Southlake', 2, 'San Francisco', 3, 'New Jersey', 4, 'Seattle', 'Non domestic') +-- !query schema +struct +-- !query output +Non domestic + + +-- !query +select decode(6, 1, 'Southlake', 2, 'San Francisco', 3, 'New Jersey', 4, 'Seattle') +-- !query schema +struct +-- !query output +NULL diff --git a/sql/core/src/test/resources/sql-tests/results/subexp-elimination.sql.out b/sql/core/src/test/resources/sql-tests/results/subexp-elimination.sql.out new file mode 100644 index 0000000000000..d161eef79b984 --- /dev/null +++ b/sql/core/src/test/resources/sql-tests/results/subexp-elimination.sql.out @@ -0,0 +1,116 @@ +-- Automatically generated by SQLQueryTestSuite +-- Number of queries: 10 + + +-- !query +CREATE OR REPLACE TEMPORARY VIEW testData AS SELECT * FROM VALUES +('{"a":1, "b":"2"}', '[{"a": 1, "b":2}, {"a":2, "b":2}]'), ('{"a":1, "b":"2"}', null), ('{"a":2, "b":"3"}', '[{"a": 3, "b":4}, {"a":4, "b":5}]'), ('{"a":5, "b":"6"}', '[{"a": 6, "b":7}, {"a":8, "b":9}]'), (null, '[{"a": 1, "b":2}, {"a":2, "b":2}]') +AS testData(a, b) +-- !query schema +struct<> +-- !query output + + + +-- !query +SELECT from_json(a, 'struct').a, from_json(a, 'struct').b, from_json(b, 'array>')[0].a, from_json(b, 'array>')[0].b FROM testData +-- !query schema +struct +-- !query output +1 2 1 2 +1 2 NULL NULL +2 3 3 4 +5 6 6 7 +NULL NULL 1 2 + + +-- !query +SELECT if(from_json(a, 'struct').a > 1, from_json(b, 'array>')[0].a, from_json(b, 'array>')[0].a + 1) FROM testData +-- !query schema +struct<(IF((from_json(a).a > 1), from_json(b)[0].a, (from_json(b)[0].a + 1))):int> +-- !query output +2 +2 +3 +6 +NULL + + +-- !query +SELECT if(isnull(from_json(a, 'struct').a), from_json(b, 'array>')[0].b + 1, from_json(b, 'array>')[0].b) FROM testData +-- !query schema +struct<(IF((from_json(a).a IS NULL), (from_json(b)[0].b + 1), from_json(b)[0].b)):int> +-- !query output +2 +3 +4 +7 +NULL + + +-- !query +SELECT case when from_json(a, 'struct').a > 5 then from_json(a, 'struct').b when from_json(a, 'struct').a > 4 then from_json(a, 'struct').b + 1 else from_json(a, 'struct').b + 2 end FROM testData +-- !query schema +struct 5) THEN from_json(a).b WHEN (from_json(a).a > 4) THEN CAST((CAST(from_json(a).b AS DOUBLE) + CAST(1 AS DOUBLE)) AS STRING) ELSE CAST((CAST(from_json(a).b AS DOUBLE) + CAST(2 AS DOUBLE)) AS STRING) END:string> +-- !query output +4.0 +4.0 +5.0 +7.0 +NULL + + +-- !query +SELECT case when from_json(a, 'struct').a > 5 then from_json(b, 'array>')[0].b when from_json(a, 'struct').a > 4 then from_json(b, 'array>')[0].b + 1 else from_json(b, 'array>')[0].b + 2 end FROM testData +-- !query schema +struct 5) THEN from_json(b)[0].b WHEN (from_json(a).a > 4) THEN (from_json(b)[0].b + 1) ELSE (from_json(b)[0].b + 2) END:int> +-- !query output +4 +4 +6 +8 +NULL + + +-- !query +SELECT from_json(a, 'struct').a + random() > 2, from_json(a, 'struct').b, from_json(b, 'array>')[0].a, from_json(b, 'array>')[0].b + + random() > 2 FROM testData +-- !query schema +struct<((CAST(from_json(a).a AS DOUBLE) + rand()) > CAST(2 AS DOUBLE)):boolean,from_json(a).b:string,from_json(b)[0].a:int,((CAST(from_json(b)[0].b AS DOUBLE) + (+ rand())) > CAST(2 AS DOUBLE)):boolean> +-- !query output +NULL NULL 1 true +false 2 1 true +false 2 NULL NULL +true 3 3 true +true 6 6 true + + +-- !query +SELECT if(from_json(a, 'struct').a + random() > 5, from_json(b, 'array>')[0].a, from_json(b, 'array>')[0].a + 1) FROM testData +-- !query schema +struct<(IF(((CAST(from_json(a).a AS DOUBLE) + rand()) > CAST(5 AS DOUBLE)), from_json(b)[0].a, (from_json(b)[0].a + 1))):int> +-- !query output +2 +2 +4 +6 +NULL + + +-- !query +SELECT case when from_json(a, 'struct').a > 5 then from_json(a, 'struct').b + random() > 5 when from_json(a, 'struct').a > 4 then from_json(a, 'struct').b + 1 + random() > 2 else from_json(a, 'struct').b + 2 + random() > 5 end FROM testData +-- !query schema +struct 5) THEN ((CAST(from_json(a).b AS DOUBLE) + rand()) > CAST(5 AS DOUBLE)) WHEN (from_json(a).a > 4) THEN (((CAST(from_json(a).b AS DOUBLE) + CAST(1 AS DOUBLE)) + rand()) > CAST(2 AS DOUBLE)) ELSE (((CAST(from_json(a).b AS DOUBLE) + CAST(2 AS DOUBLE)) + rand()) > CAST(5 AS DOUBLE)) END:boolean> +-- !query output +NULL +false +false +true +true + + +-- !query +DROP VIEW IF EXISTS testData +-- !query schema +struct<> +-- !query output + diff --git a/sql/core/src/test/resources/sql-tests/results/subquery/in-subquery/in-basic.sql.out b/sql/core/src/test/resources/sql-tests/results/subquery/in-subquery/in-basic.sql.out index a33f78abf27f9..639fe1775d2dc 100644 --- a/sql/core/src/test/resources/sql-tests/results/subquery/in-subquery/in-basic.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/subquery/in-subquery/in-basic.sql.out @@ -49,7 +49,7 @@ number of columns in the output of subquery. Left side columns: [tab_a.`a1`, tab_a.`b1`]. Right side columns: -[`named_struct(a2, a2, b2, b2)`].; +[`named_struct(a2, a2, b2, b2)`]. -- !query diff --git a/sql/core/src/test/resources/sql-tests/results/subquery/negative-cases/invalid-correlation.sql.out b/sql/core/src/test/resources/sql-tests/results/subquery/negative-cases/invalid-correlation.sql.out index d703d4e9112e9..e77afd886aeab 100644 --- a/sql/core/src/test/resources/sql-tests/results/subquery/negative-cases/invalid-correlation.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/subquery/negative-cases/invalid-correlation.sql.out @@ -46,7 +46,7 @@ AND t2b = (SELECT max(avg) struct<> -- !query output org.apache.spark.sql.AnalysisException -grouping expressions sequence is empty, and 't2.`t2b`' is not an aggregate function. Wrap '(avg(CAST(t2.`t2b` AS BIGINT)) AS `avg`)' in windowing function(s) or wrap 't2.`t2b`' in first() (or first_value) if you don't care which value you get.; +grouping expressions sequence is empty, and 't2.`t2b`' is not an aggregate function. Wrap '(avg(CAST(t2.`t2b` AS BIGINT)) AS `avg`)' in windowing function(s) or wrap 't2.`t2b`' in first() (or first_value) if you don't care which value you get. -- !query @@ -63,7 +63,7 @@ WHERE t1a IN (SELECT min(t2a) struct<> -- !query output org.apache.spark.sql.AnalysisException -Resolved attribute(s) t2b#x missing from min(t2a)#x,t2c#x in operator !Filter t2c#x IN (list#x [t2b#x]).; +Resolved attribute(s) t2b#x missing from min(t2a)#x,t2c#x in operator !Filter t2c#x IN (list#x [t2b#x]). -- !query @@ -78,7 +78,7 @@ HAVING EXISTS (SELECT t2a struct<> -- !query output org.apache.spark.sql.AnalysisException -Found an aggregate expression in a correlated predicate that has both outer and local references, which is not supported yet. Aggregate expression: min((t1.`t1a` + t2.`t2a`)), Outer references: t1.`t1a`, Local references: t2.`t2a`.; +Found an aggregate expression in a correlated predicate that has both outer and local references, which is not supported yet. Aggregate expression: min((t1.`t1a` + t2.`t2a`)), Outer references: t1.`t1a`, Local references: t2.`t2a`. -- !query @@ -94,7 +94,7 @@ WHERE t1a IN (SELECT t2a struct<> -- !query output org.apache.spark.sql.AnalysisException -Found an aggregate expression in a correlated predicate that has both outer and local references, which is not supported yet. Aggregate expression: min((t2.`t2a` + t3.`t3a`)), Outer references: t2.`t2a`, Local references: t3.`t3a`.; +Found an aggregate expression in a correlated predicate that has both outer and local references, which is not supported yet. Aggregate expression: min((t2.`t2a` + t3.`t3a`)), Outer references: t2.`t2a`, Local references: t3.`t3a`. -- !query @@ -111,7 +111,7 @@ org.apache.spark.sql.AnalysisException Expressions referencing the outer query are not supported outside of WHERE/HAVING clauses: Aggregate [min(outer(t2a#x)) AS min(outer(t2.`t2a`))#x] +- SubqueryAlias t3 - +- Project [t3a#x, t3b#x, t3c#x] - +- SubqueryAlias t3 - +- LocalRelation [t3a#x, t3b#x, t3c#x] -; + +- View (`t3`, [t3a#x,t3b#x,t3c#x]) + +- Project [t3a#x, t3b#x, t3c#x] + +- SubqueryAlias t3 + +- LocalRelation [t3a#x, t3b#x, t3c#x] diff --git a/sql/core/src/test/resources/sql-tests/results/subquery/negative-cases/subq-input-typecheck.sql.out b/sql/core/src/test/resources/sql-tests/results/subquery/negative-cases/subq-input-typecheck.sql.out index 776598127075b..a470775308092 100644 --- a/sql/core/src/test/resources/sql-tests/results/subquery/negative-cases/subq-input-typecheck.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/subquery/negative-cases/subq-input-typecheck.sql.out @@ -64,7 +64,7 @@ FROM t1 struct<> -- !query output org.apache.spark.sql.AnalysisException -Scalar subquery must return only one column, but got 2; +Scalar subquery must return only one column, but got 2 -- !query @@ -79,7 +79,7 @@ FROM t1 struct<> -- !query output org.apache.spark.sql.AnalysisException -Scalar subquery must return only one column, but got 2; +Scalar subquery must return only one column, but got 2 -- !query @@ -100,7 +100,7 @@ number of columns in the output of subquery. Left side columns: [t1.`t1a`]. Right side columns: -[t2.`t2a`, t2.`t2b`].; +[t2.`t2a`, t2.`t2b`]. -- !query @@ -121,7 +121,7 @@ number of columns in the output of subquery. Left side columns: [t1.`t1a`, t1.`t1b`]. Right side columns: -[t2.`t2a`].; +[t2.`t2a`]. -- !query @@ -143,4 +143,4 @@ Mismatched columns: Left side: [double, string, string]. Right side: -[timestamp, string, bigint].; +[timestamp, string, bigint]. diff --git a/sql/core/src/test/resources/sql-tests/results/transform.sql.out b/sql/core/src/test/resources/sql-tests/results/transform.sql.out new file mode 100644 index 0000000000000..3267a7625a7d9 --- /dev/null +++ b/sql/core/src/test/resources/sql-tests/results/transform.sql.out @@ -0,0 +1,335 @@ +-- Automatically generated by SQLQueryTestSuite +-- Number of queries: 16 + + +-- !query +CREATE OR REPLACE TEMPORARY VIEW t AS SELECT * FROM VALUES +('1', true, unhex('537061726B2053514C'), tinyint(1), 1, smallint(100), bigint(1), float(1.0), 1.0, Decimal(1.0), timestamp('1997-01-02'), date('2000-04-01')), +('2', false, unhex('537061726B2053514C'), tinyint(2), 2, smallint(200), bigint(2), float(2.0), 2.0, Decimal(2.0), timestamp('1997-01-02 03:04:05'), date('2000-04-02')), +('3', true, unhex('537061726B2053514C'), tinyint(3), 3, smallint(300), bigint(3), float(3.0), 3.0, Decimal(3.0), timestamp('1997-02-10 17:32:01-08'), date('2000-04-03')) +AS t(a, b, c, d, e, f, g, h, i, j, k, l) +-- !query schema +struct<> +-- !query output + + + +-- !query +SELECT TRANSFORM(a) +USING 'cat' AS (a) +FROM t +-- !query schema +struct +-- !query output +1 +2 +3 + + +-- !query +SELECT a, b, decode(c, 'UTF-8'), d, e, f, g, h, i, j, k, l FROM ( + SELECT TRANSFORM(a, b, c, d, e, f, g, h, i, j, k, l) + USING 'cat' AS ( + a string, + b boolean, + c binary, + d tinyint, + e int, + f smallint, + g long, + h float, + i double, + j decimal(38, 18), + k timestamp, + l date) + FROM t +) tmp +-- !query schema +struct +-- !query output +1 true Spark SQL 1 1 100 1 1.0 1.0 1.000000000000000000 1997-01-02 00:00:00 2000-04-01 +2 false Spark SQL 2 2 200 2 2.0 2.0 2.000000000000000000 1997-01-02 03:04:05 2000-04-02 +3 true Spark SQL 3 3 300 3 3.0 3.0 3.000000000000000000 1997-02-10 17:32:01 2000-04-03 + + +-- !query +SELECT a, b, decode(c, 'UTF-8'), d, e, f, g, h, i, j, k, l FROM ( + SELECT TRANSFORM(a, b, c, d, e, f, g, h, i, j, k, l) + USING 'cat' AS ( + a string, + b string, + c string, + d string, + e string, + f string, + g string, + h string, + i string, + j string, + k string, + l string) + FROM t +) tmp +-- !query schema +struct +-- !query output +1 true Spark SQL 1 1 100 1 1.0 1.0 1 1997-01-02 00:00:00 2000-04-01 +2 false Spark SQL 2 2 200 2 2.0 2.0 2 1997-01-02 03:04:05 2000-04-02 +3 true Spark SQL 3 3 300 3 3.0 3.0 3 1997-02-10 17:32:01 2000-04-03 + + +-- !query +SELECT TRANSFORM(a) +USING 'cat' +FROM t +-- !query schema +struct +-- !query output +1 NULL +2 NULL +3 NULL + + +-- !query +SELECT TRANSFORM(a, b) +USING 'cat' +FROM t +-- !query schema +struct +-- !query output +1 true +2 false +3 true + + +-- !query +SELECT TRANSFORM(a, b, c) +USING 'cat' +FROM t +-- !query schema +struct +-- !query output +1 true +2 false +3 true + + +-- !query +SELECT TRANSFORM(a, b, c, d, e, f, g, h, i) +USING 'cat' AS (a int, b short, c long, d byte, e float, f double, g decimal(38, 18), h date, i timestamp) +FROM VALUES +('a','','1231a','a','213.21a','213.21a','0a.21d','2000-04-01123','1997-0102 00:00:') tmp(a, b, c, d, e, f, g, h, i) +-- !query schema +struct +-- !query output +NULL NULL NULL NULL NULL NULL NULL NULL NULL + + +-- !query +SELECT TRANSFORM(b, max(a), sum(f)) +USING 'cat' AS (a, b) +FROM t +GROUP BY b +-- !query schema +struct<> +-- !query output +org.apache.spark.sql.catalyst.parser.ParseException + +mismatched input 'GROUP' expecting {, ';'}(line 4, pos 0) + +== SQL == +SELECT TRANSFORM(b, max(a), sum(f)) +USING 'cat' AS (a, b) +FROM t +GROUP BY b +^^^ + + +-- !query +MAP a, b USING 'cat' AS (a, b) FROM t +-- !query schema +struct +-- !query output +1 true +2 false +3 true + + +-- !query +REDUCE a, b USING 'cat' AS (a, b) FROM t +-- !query schema +struct +-- !query output +1 true +2 false +3 true + + +-- !query +SELECT TRANSFORM(a, b, c, null) + ROW FORMAT DELIMITED + FIELDS TERMINATED BY '@' + LINES TERMINATED BY '\n' + NULL DEFINED AS 'NULL' +USING 'cat' AS (a, b, c, d) + ROW FORMAT DELIMITED + FIELDS TERMINATED BY '@' + LINES TERMINATED BY '\n' + NULL DEFINED AS 'NULL' +FROM t +-- !query schema +struct +-- !query output +1 true Spark SQL null +2 false Spark SQL null +3 true Spark SQL null + + +-- !query +SELECT TRANSFORM(a, b, c, null) + ROW FORMAT DELIMITED + FIELDS TERMINATED BY '@' + LINES TERMINATED BY '\n' + NULL DEFINED AS 'NULL' +USING 'cat' AS (d) + ROW FORMAT DELIMITED + FIELDS TERMINATED BY '@' + LINES TERMINATED BY '\n' + NULL DEFINED AS 'NULL' +FROM t +-- !query schema +struct +-- !query output +1 +2 +3 + + +-- !query +SELECT a, b, decode(c, 'UTF-8'), d, e, f, g, h, i, j, k, l FROM ( + SELECT TRANSFORM(a, b, c, d, e, f, g, h, i, j, k, l) + ROW FORMAT DELIMITED + FIELDS TERMINATED BY ',' + LINES TERMINATED BY '\n' + NULL DEFINED AS 'NULL' + USING 'cat' AS ( + a string, + b boolean, + c binary, + d tinyint, + e int, + f smallint, + g long, + h float, + i double, + j decimal(38, 18), + k timestamp, + l date) + ROW FORMAT DELIMITED + FIELDS TERMINATED BY ',' + LINES TERMINATED BY '\n' + NULL DEFINED AS 'NULL' + FROM t +) tmp +-- !query schema +struct +-- !query output +1 true Spark SQL 1 1 100 1 1.0 1.0 1.000000000000000000 1997-01-02 00:00:00 2000-04-01 +2 false Spark SQL 2 2 200 2 2.0 2.0 2.000000000000000000 1997-01-02 03:04:05 2000-04-02 +3 true Spark SQL 3 3 300 3 3.0 3.0 3.000000000000000000 1997-02-10 17:32:01 2000-04-03 + + +-- !query +SELECT a, b, decode(c, 'UTF-8'), d, e, f, g, h, i, j, k, l FROM ( + SELECT TRANSFORM(a, b, c, d, e, f, g, h, i, j, k, l) + ROW FORMAT DELIMITED + FIELDS TERMINATED BY ',' + LINES TERMINATED BY '\n' + NULL DEFINED AS 'NULL' + USING 'cat' AS ( + a string, + b long, + c binary, + d tinyint, + e int, + f smallint, + g long, + h float, + i double, + j decimal(38, 18), + k int, + l long) + ROW FORMAT DELIMITED + FIELDS TERMINATED BY ',' + LINES TERMINATED BY '\n' + NULL DEFINED AS 'NULL' + FROM t +) tmp +-- !query schema +struct +-- !query output +1 NULL Spark SQL 1 1 100 1 1.0 1.0 1.000000000000000000 NULL NULL +2 NULL Spark SQL 2 2 200 2 2.0 2.0 2.000000000000000000 NULL NULL +3 NULL Spark SQL 3 3 300 3 3.0 3.0 3.000000000000000000 NULL NULL + + +-- !query +SELECT a, b, decode(c, 'UTF-8'), d, e, f, g, h, i, j, k, l FROM ( + SELECT TRANSFORM(a, b, c, d, e, f, g, h, i, j, k, l) + ROW FORMAT DELIMITED + FIELDS TERMINATED BY ',' + LINES TERMINATED BY '@' + NULL DEFINED AS 'NULL' + USING 'cat' AS ( + a string, + b string, + c string, + d string, + e string, + f string, + g string, + h string, + i string, + j string, + k string, + l string) + ROW FORMAT DELIMITED + FIELDS TERMINATED BY ',' + LINES TERMINATED BY '@' + NULL DEFINED AS 'NULL' + FROM t +) tmp +-- !query schema +struct<> +-- !query output +org.apache.spark.sql.catalyst.parser.ParseException + +LINES TERMINATED BY only supports newline '\n' right now: @(line 3, pos 4) + +== SQL == +SELECT a, b, decode(c, 'UTF-8'), d, e, f, g, h, i, j, k, l FROM ( + SELECT TRANSFORM(a, b, c, d, e, f, g, h, i, j, k, l) + ROW FORMAT DELIMITED +----^^^ + FIELDS TERMINATED BY ',' + LINES TERMINATED BY '@' + NULL DEFINED AS 'NULL' + USING 'cat' AS ( + a string, + b string, + c string, + d string, + e string, + f string, + g string, + h string, + i string, + j string, + k string, + l string) + ROW FORMAT DELIMITED + FIELDS TERMINATED BY ',' + LINES TERMINATED BY '@' + NULL DEFINED AS 'NULL' + FROM t +) tmp diff --git a/sql/core/src/test/resources/sql-tests/results/typeCoercion/native/promoteStrings.sql.out b/sql/core/src/test/resources/sql-tests/results/typeCoercion/native/promoteStrings.sql.out index b8c190beeae19..08941f2890cf2 100644 --- a/sql/core/src/test/resources/sql-tests/results/typeCoercion/native/promoteStrings.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/typeCoercion/native/promoteStrings.sql.out @@ -2526,7 +2526,7 @@ SELECT stddev_samp('1') FROM t -- !query schema struct -- !query output -NaN +NULL -- !query @@ -2558,7 +2558,7 @@ SELECT var_samp('1') FROM t -- !query schema struct -- !query output -NaN +NULL -- !query @@ -2566,7 +2566,7 @@ SELECT skewness('1') FROM t -- !query schema struct -- !query output -NaN +NULL -- !query @@ -2574,4 +2574,4 @@ SELECT kurtosis('1') FROM t -- !query schema struct -- !query output -NaN +NULL diff --git a/sql/core/src/test/resources/sql-tests/results/typeCoercion/native/widenSetOperationTypes.sql.out b/sql/core/src/test/resources/sql-tests/results/typeCoercion/native/widenSetOperationTypes.sql.out index 89b1cdb3e353d..a527b20dc04ff 100644 --- a/sql/core/src/test/resources/sql-tests/results/typeCoercion/native/widenSetOperationTypes.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/typeCoercion/native/widenSetOperationTypes.sql.out @@ -88,7 +88,7 @@ SELECT cast(1 as tinyint) FROM t UNION SELECT cast('2' as binary) FROM t struct<> -- !query output org.apache.spark.sql.AnalysisException -Union can only be performed on tables with the compatible column types. binary <> tinyint at the first column of the second table; +Union can only be performed on tables with the compatible column types. binary <> tinyint at the first column of the second table -- !query @@ -97,7 +97,7 @@ SELECT cast(1 as tinyint) FROM t UNION SELECT cast(2 as boolean) FROM t struct<> -- !query output org.apache.spark.sql.AnalysisException -Union can only be performed on tables with the compatible column types. boolean <> tinyint at the first column of the second table; +Union can only be performed on tables with the compatible column types. boolean <> tinyint at the first column of the second table -- !query @@ -106,7 +106,7 @@ SELECT cast(1 as tinyint) FROM t UNION SELECT cast('2017-12-11 09:30:00.0' as ti struct<> -- !query output org.apache.spark.sql.AnalysisException -Union can only be performed on tables with the compatible column types. timestamp <> tinyint at the first column of the second table; +Union can only be performed on tables with the compatible column types. timestamp <> tinyint at the first column of the second table -- !query @@ -115,7 +115,7 @@ SELECT cast(1 as tinyint) FROM t UNION SELECT cast('2017-12-11 09:30:00' as date struct<> -- !query output org.apache.spark.sql.AnalysisException -Union can only be performed on tables with the compatible column types. date <> tinyint at the first column of the second table; +Union can only be performed on tables with the compatible column types. date <> tinyint at the first column of the second table -- !query @@ -196,7 +196,7 @@ SELECT cast(1 as smallint) FROM t UNION SELECT cast('2' as binary) FROM t struct<> -- !query output org.apache.spark.sql.AnalysisException -Union can only be performed on tables with the compatible column types. binary <> smallint at the first column of the second table; +Union can only be performed on tables with the compatible column types. binary <> smallint at the first column of the second table -- !query @@ -205,7 +205,7 @@ SELECT cast(1 as smallint) FROM t UNION SELECT cast(2 as boolean) FROM t struct<> -- !query output org.apache.spark.sql.AnalysisException -Union can only be performed on tables with the compatible column types. boolean <> smallint at the first column of the second table; +Union can only be performed on tables with the compatible column types. boolean <> smallint at the first column of the second table -- !query @@ -214,7 +214,7 @@ SELECT cast(1 as smallint) FROM t UNION SELECT cast('2017-12-11 09:30:00.0' as t struct<> -- !query output org.apache.spark.sql.AnalysisException -Union can only be performed on tables with the compatible column types. timestamp <> smallint at the first column of the second table; +Union can only be performed on tables with the compatible column types. timestamp <> smallint at the first column of the second table -- !query @@ -223,7 +223,7 @@ SELECT cast(1 as smallint) FROM t UNION SELECT cast('2017-12-11 09:30:00' as dat struct<> -- !query output org.apache.spark.sql.AnalysisException -Union can only be performed on tables with the compatible column types. date <> smallint at the first column of the second table; +Union can only be performed on tables with the compatible column types. date <> smallint at the first column of the second table -- !query @@ -304,7 +304,7 @@ SELECT cast(1 as int) FROM t UNION SELECT cast('2' as binary) FROM t struct<> -- !query output org.apache.spark.sql.AnalysisException -Union can only be performed on tables with the compatible column types. binary <> int at the first column of the second table; +Union can only be performed on tables with the compatible column types. binary <> int at the first column of the second table -- !query @@ -313,7 +313,7 @@ SELECT cast(1 as int) FROM t UNION SELECT cast(2 as boolean) FROM t struct<> -- !query output org.apache.spark.sql.AnalysisException -Union can only be performed on tables with the compatible column types. boolean <> int at the first column of the second table; +Union can only be performed on tables with the compatible column types. boolean <> int at the first column of the second table -- !query @@ -322,7 +322,7 @@ SELECT cast(1 as int) FROM t UNION SELECT cast('2017-12-11 09:30:00.0' as timest struct<> -- !query output org.apache.spark.sql.AnalysisException -Union can only be performed on tables with the compatible column types. timestamp <> int at the first column of the second table; +Union can only be performed on tables with the compatible column types. timestamp <> int at the first column of the second table -- !query @@ -331,7 +331,7 @@ SELECT cast(1 as int) FROM t UNION SELECT cast('2017-12-11 09:30:00' as date) FR struct<> -- !query output org.apache.spark.sql.AnalysisException -Union can only be performed on tables with the compatible column types. date <> int at the first column of the second table; +Union can only be performed on tables with the compatible column types. date <> int at the first column of the second table -- !query @@ -412,7 +412,7 @@ SELECT cast(1 as bigint) FROM t UNION SELECT cast('2' as binary) FROM t struct<> -- !query output org.apache.spark.sql.AnalysisException -Union can only be performed on tables with the compatible column types. binary <> bigint at the first column of the second table; +Union can only be performed on tables with the compatible column types. binary <> bigint at the first column of the second table -- !query @@ -421,7 +421,7 @@ SELECT cast(1 as bigint) FROM t UNION SELECT cast(2 as boolean) FROM t struct<> -- !query output org.apache.spark.sql.AnalysisException -Union can only be performed on tables with the compatible column types. boolean <> bigint at the first column of the second table; +Union can only be performed on tables with the compatible column types. boolean <> bigint at the first column of the second table -- !query @@ -430,7 +430,7 @@ SELECT cast(1 as bigint) FROM t UNION SELECT cast('2017-12-11 09:30:00.0' as tim struct<> -- !query output org.apache.spark.sql.AnalysisException -Union can only be performed on tables with the compatible column types. timestamp <> bigint at the first column of the second table; +Union can only be performed on tables with the compatible column types. timestamp <> bigint at the first column of the second table -- !query @@ -439,7 +439,7 @@ SELECT cast(1 as bigint) FROM t UNION SELECT cast('2017-12-11 09:30:00' as date) struct<> -- !query output org.apache.spark.sql.AnalysisException -Union can only be performed on tables with the compatible column types. date <> bigint at the first column of the second table; +Union can only be performed on tables with the compatible column types. date <> bigint at the first column of the second table -- !query @@ -520,7 +520,7 @@ SELECT cast(1 as float) FROM t UNION SELECT cast('2' as binary) FROM t struct<> -- !query output org.apache.spark.sql.AnalysisException -Union can only be performed on tables with the compatible column types. binary <> float at the first column of the second table; +Union can only be performed on tables with the compatible column types. binary <> float at the first column of the second table -- !query @@ -529,7 +529,7 @@ SELECT cast(1 as float) FROM t UNION SELECT cast(2 as boolean) FROM t struct<> -- !query output org.apache.spark.sql.AnalysisException -Union can only be performed on tables with the compatible column types. boolean <> float at the first column of the second table; +Union can only be performed on tables with the compatible column types. boolean <> float at the first column of the second table -- !query @@ -538,7 +538,7 @@ SELECT cast(1 as float) FROM t UNION SELECT cast('2017-12-11 09:30:00.0' as time struct<> -- !query output org.apache.spark.sql.AnalysisException -Union can only be performed on tables with the compatible column types. timestamp <> float at the first column of the second table; +Union can only be performed on tables with the compatible column types. timestamp <> float at the first column of the second table -- !query @@ -547,7 +547,7 @@ SELECT cast(1 as float) FROM t UNION SELECT cast('2017-12-11 09:30:00' as date) struct<> -- !query output org.apache.spark.sql.AnalysisException -Union can only be performed on tables with the compatible column types. date <> float at the first column of the second table; +Union can only be performed on tables with the compatible column types. date <> float at the first column of the second table -- !query @@ -628,7 +628,7 @@ SELECT cast(1 as double) FROM t UNION SELECT cast('2' as binary) FROM t struct<> -- !query output org.apache.spark.sql.AnalysisException -Union can only be performed on tables with the compatible column types. binary <> double at the first column of the second table; +Union can only be performed on tables with the compatible column types. binary <> double at the first column of the second table -- !query @@ -637,7 +637,7 @@ SELECT cast(1 as double) FROM t UNION SELECT cast(2 as boolean) FROM t struct<> -- !query output org.apache.spark.sql.AnalysisException -Union can only be performed on tables with the compatible column types. boolean <> double at the first column of the second table; +Union can only be performed on tables with the compatible column types. boolean <> double at the first column of the second table -- !query @@ -646,7 +646,7 @@ SELECT cast(1 as double) FROM t UNION SELECT cast('2017-12-11 09:30:00.0' as tim struct<> -- !query output org.apache.spark.sql.AnalysisException -Union can only be performed on tables with the compatible column types. timestamp <> double at the first column of the second table; +Union can only be performed on tables with the compatible column types. timestamp <> double at the first column of the second table -- !query @@ -655,7 +655,7 @@ SELECT cast(1 as double) FROM t UNION SELECT cast('2017-12-11 09:30:00' as date) struct<> -- !query output org.apache.spark.sql.AnalysisException -Union can only be performed on tables with the compatible column types. date <> double at the first column of the second table; +Union can only be performed on tables with the compatible column types. date <> double at the first column of the second table -- !query @@ -736,7 +736,7 @@ SELECT cast(1 as decimal(10, 0)) FROM t UNION SELECT cast('2' as binary) FROM t struct<> -- !query output org.apache.spark.sql.AnalysisException -Union can only be performed on tables with the compatible column types. binary <> decimal(10,0) at the first column of the second table; +Union can only be performed on tables with the compatible column types. binary <> decimal(10,0) at the first column of the second table -- !query @@ -745,7 +745,7 @@ SELECT cast(1 as decimal(10, 0)) FROM t UNION SELECT cast(2 as boolean) FROM t struct<> -- !query output org.apache.spark.sql.AnalysisException -Union can only be performed on tables with the compatible column types. boolean <> decimal(10,0) at the first column of the second table; +Union can only be performed on tables with the compatible column types. boolean <> decimal(10,0) at the first column of the second table -- !query @@ -754,7 +754,7 @@ SELECT cast(1 as decimal(10, 0)) FROM t UNION SELECT cast('2017-12-11 09:30:00.0 struct<> -- !query output org.apache.spark.sql.AnalysisException -Union can only be performed on tables with the compatible column types. timestamp <> decimal(10,0) at the first column of the second table; +Union can only be performed on tables with the compatible column types. timestamp <> decimal(10,0) at the first column of the second table -- !query @@ -763,7 +763,7 @@ SELECT cast(1 as decimal(10, 0)) FROM t UNION SELECT cast('2017-12-11 09:30:00' struct<> -- !query output org.apache.spark.sql.AnalysisException -Union can only be performed on tables with the compatible column types. date <> decimal(10,0) at the first column of the second table; +Union can only be performed on tables with the compatible column types. date <> decimal(10,0) at the first column of the second table -- !query @@ -844,7 +844,7 @@ SELECT cast(1 as string) FROM t UNION SELECT cast('2' as binary) FROM t struct<> -- !query output org.apache.spark.sql.AnalysisException -Union can only be performed on tables with the compatible column types. binary <> string at the first column of the second table; +Union can only be performed on tables with the compatible column types. binary <> string at the first column of the second table -- !query @@ -853,7 +853,7 @@ SELECT cast(1 as string) FROM t UNION SELECT cast(2 as boolean) FROM t struct<> -- !query output org.apache.spark.sql.AnalysisException -Union can only be performed on tables with the compatible column types. boolean <> string at the first column of the second table; +Union can only be performed on tables with the compatible column types. boolean <> string at the first column of the second table -- !query @@ -880,7 +880,7 @@ SELECT cast('1' as binary) FROM t UNION SELECT cast(2 as tinyint) FROM t struct<> -- !query output org.apache.spark.sql.AnalysisException -Union can only be performed on tables with the compatible column types. tinyint <> binary at the first column of the second table; +Union can only be performed on tables with the compatible column types. tinyint <> binary at the first column of the second table -- !query @@ -889,7 +889,7 @@ SELECT cast('1' as binary) FROM t UNION SELECT cast(2 as smallint) FROM t struct<> -- !query output org.apache.spark.sql.AnalysisException -Union can only be performed on tables with the compatible column types. smallint <> binary at the first column of the second table; +Union can only be performed on tables with the compatible column types. smallint <> binary at the first column of the second table -- !query @@ -898,7 +898,7 @@ SELECT cast('1' as binary) FROM t UNION SELECT cast(2 as int) FROM t struct<> -- !query output org.apache.spark.sql.AnalysisException -Union can only be performed on tables with the compatible column types. int <> binary at the first column of the second table; +Union can only be performed on tables with the compatible column types. int <> binary at the first column of the second table -- !query @@ -907,7 +907,7 @@ SELECT cast('1' as binary) FROM t UNION SELECT cast(2 as bigint) FROM t struct<> -- !query output org.apache.spark.sql.AnalysisException -Union can only be performed on tables with the compatible column types. bigint <> binary at the first column of the second table; +Union can only be performed on tables with the compatible column types. bigint <> binary at the first column of the second table -- !query @@ -916,7 +916,7 @@ SELECT cast('1' as binary) FROM t UNION SELECT cast(2 as float) FROM t struct<> -- !query output org.apache.spark.sql.AnalysisException -Union can only be performed on tables with the compatible column types. float <> binary at the first column of the second table; +Union can only be performed on tables with the compatible column types. float <> binary at the first column of the second table -- !query @@ -925,7 +925,7 @@ SELECT cast('1' as binary) FROM t UNION SELECT cast(2 as double) FROM t struct<> -- !query output org.apache.spark.sql.AnalysisException -Union can only be performed on tables with the compatible column types. double <> binary at the first column of the second table; +Union can only be performed on tables with the compatible column types. double <> binary at the first column of the second table -- !query @@ -934,7 +934,7 @@ SELECT cast('1' as binary) FROM t UNION SELECT cast(2 as decimal(10, 0)) FROM t struct<> -- !query output org.apache.spark.sql.AnalysisException -Union can only be performed on tables with the compatible column types. decimal(10,0) <> binary at the first column of the second table; +Union can only be performed on tables with the compatible column types. decimal(10,0) <> binary at the first column of the second table -- !query @@ -943,7 +943,7 @@ SELECT cast('1' as binary) FROM t UNION SELECT cast(2 as string) FROM t struct<> -- !query output org.apache.spark.sql.AnalysisException -Union can only be performed on tables with the compatible column types. string <> binary at the first column of the second table; +Union can only be performed on tables with the compatible column types. string <> binary at the first column of the second table -- !query @@ -961,7 +961,7 @@ SELECT cast('1' as binary) FROM t UNION SELECT cast(2 as boolean) FROM t struct<> -- !query output org.apache.spark.sql.AnalysisException -Union can only be performed on tables with the compatible column types. boolean <> binary at the first column of the second table; +Union can only be performed on tables with the compatible column types. boolean <> binary at the first column of the second table -- !query @@ -970,7 +970,7 @@ SELECT cast('1' as binary) FROM t UNION SELECT cast('2017-12-11 09:30:00.0' as t struct<> -- !query output org.apache.spark.sql.AnalysisException -Union can only be performed on tables with the compatible column types. timestamp <> binary at the first column of the second table; +Union can only be performed on tables with the compatible column types. timestamp <> binary at the first column of the second table -- !query @@ -979,7 +979,7 @@ SELECT cast('1' as binary) FROM t UNION SELECT cast('2017-12-11 09:30:00' as dat struct<> -- !query output org.apache.spark.sql.AnalysisException -Union can only be performed on tables with the compatible column types. date <> binary at the first column of the second table; +Union can only be performed on tables with the compatible column types. date <> binary at the first column of the second table -- !query @@ -988,7 +988,7 @@ SELECT cast(1 as boolean) FROM t UNION SELECT cast(2 as tinyint) FROM t struct<> -- !query output org.apache.spark.sql.AnalysisException -Union can only be performed on tables with the compatible column types. tinyint <> boolean at the first column of the second table; +Union can only be performed on tables with the compatible column types. tinyint <> boolean at the first column of the second table -- !query @@ -997,7 +997,7 @@ SELECT cast(1 as boolean) FROM t UNION SELECT cast(2 as smallint) FROM t struct<> -- !query output org.apache.spark.sql.AnalysisException -Union can only be performed on tables with the compatible column types. smallint <> boolean at the first column of the second table; +Union can only be performed on tables with the compatible column types. smallint <> boolean at the first column of the second table -- !query @@ -1006,7 +1006,7 @@ SELECT cast(1 as boolean) FROM t UNION SELECT cast(2 as int) FROM t struct<> -- !query output org.apache.spark.sql.AnalysisException -Union can only be performed on tables with the compatible column types. int <> boolean at the first column of the second table; +Union can only be performed on tables with the compatible column types. int <> boolean at the first column of the second table -- !query @@ -1015,7 +1015,7 @@ SELECT cast(1 as boolean) FROM t UNION SELECT cast(2 as bigint) FROM t struct<> -- !query output org.apache.spark.sql.AnalysisException -Union can only be performed on tables with the compatible column types. bigint <> boolean at the first column of the second table; +Union can only be performed on tables with the compatible column types. bigint <> boolean at the first column of the second table -- !query @@ -1024,7 +1024,7 @@ SELECT cast(1 as boolean) FROM t UNION SELECT cast(2 as float) FROM t struct<> -- !query output org.apache.spark.sql.AnalysisException -Union can only be performed on tables with the compatible column types. float <> boolean at the first column of the second table; +Union can only be performed on tables with the compatible column types. float <> boolean at the first column of the second table -- !query @@ -1033,7 +1033,7 @@ SELECT cast(1 as boolean) FROM t UNION SELECT cast(2 as double) FROM t struct<> -- !query output org.apache.spark.sql.AnalysisException -Union can only be performed on tables with the compatible column types. double <> boolean at the first column of the second table; +Union can only be performed on tables with the compatible column types. double <> boolean at the first column of the second table -- !query @@ -1042,7 +1042,7 @@ SELECT cast(1 as boolean) FROM t UNION SELECT cast(2 as decimal(10, 0)) FROM t struct<> -- !query output org.apache.spark.sql.AnalysisException -Union can only be performed on tables with the compatible column types. decimal(10,0) <> boolean at the first column of the second table; +Union can only be performed on tables with the compatible column types. decimal(10,0) <> boolean at the first column of the second table -- !query @@ -1051,7 +1051,7 @@ SELECT cast(1 as boolean) FROM t UNION SELECT cast(2 as string) FROM t struct<> -- !query output org.apache.spark.sql.AnalysisException -Union can only be performed on tables with the compatible column types. string <> boolean at the first column of the second table; +Union can only be performed on tables with the compatible column types. string <> boolean at the first column of the second table -- !query @@ -1060,7 +1060,7 @@ SELECT cast(1 as boolean) FROM t UNION SELECT cast('2' as binary) FROM t struct<> -- !query output org.apache.spark.sql.AnalysisException -Union can only be performed on tables with the compatible column types. binary <> boolean at the first column of the second table; +Union can only be performed on tables with the compatible column types. binary <> boolean at the first column of the second table -- !query @@ -1077,7 +1077,7 @@ SELECT cast(1 as boolean) FROM t UNION SELECT cast('2017-12-11 09:30:00.0' as ti struct<> -- !query output org.apache.spark.sql.AnalysisException -Union can only be performed on tables with the compatible column types. timestamp <> boolean at the first column of the second table; +Union can only be performed on tables with the compatible column types. timestamp <> boolean at the first column of the second table -- !query @@ -1086,7 +1086,7 @@ SELECT cast(1 as boolean) FROM t UNION SELECT cast('2017-12-11 09:30:00' as date struct<> -- !query output org.apache.spark.sql.AnalysisException -Union can only be performed on tables with the compatible column types. date <> boolean at the first column of the second table; +Union can only be performed on tables with the compatible column types. date <> boolean at the first column of the second table -- !query @@ -1095,7 +1095,7 @@ SELECT cast('2017-12-12 09:30:00.0' as timestamp) FROM t UNION SELECT cast(2 as struct<> -- !query output org.apache.spark.sql.AnalysisException -Union can only be performed on tables with the compatible column types. tinyint <> timestamp at the first column of the second table; +Union can only be performed on tables with the compatible column types. tinyint <> timestamp at the first column of the second table -- !query @@ -1104,7 +1104,7 @@ SELECT cast('2017-12-12 09:30:00.0' as timestamp) FROM t UNION SELECT cast(2 as struct<> -- !query output org.apache.spark.sql.AnalysisException -Union can only be performed on tables with the compatible column types. smallint <> timestamp at the first column of the second table; +Union can only be performed on tables with the compatible column types. smallint <> timestamp at the first column of the second table -- !query @@ -1113,7 +1113,7 @@ SELECT cast('2017-12-12 09:30:00.0' as timestamp) FROM t UNION SELECT cast(2 as struct<> -- !query output org.apache.spark.sql.AnalysisException -Union can only be performed on tables with the compatible column types. int <> timestamp at the first column of the second table; +Union can only be performed on tables with the compatible column types. int <> timestamp at the first column of the second table -- !query @@ -1122,7 +1122,7 @@ SELECT cast('2017-12-12 09:30:00.0' as timestamp) FROM t UNION SELECT cast(2 as struct<> -- !query output org.apache.spark.sql.AnalysisException -Union can only be performed on tables with the compatible column types. bigint <> timestamp at the first column of the second table; +Union can only be performed on tables with the compatible column types. bigint <> timestamp at the first column of the second table -- !query @@ -1131,7 +1131,7 @@ SELECT cast('2017-12-12 09:30:00.0' as timestamp) FROM t UNION SELECT cast(2 as struct<> -- !query output org.apache.spark.sql.AnalysisException -Union can only be performed on tables with the compatible column types. float <> timestamp at the first column of the second table; +Union can only be performed on tables with the compatible column types. float <> timestamp at the first column of the second table -- !query @@ -1140,7 +1140,7 @@ SELECT cast('2017-12-12 09:30:00.0' as timestamp) FROM t UNION SELECT cast(2 as struct<> -- !query output org.apache.spark.sql.AnalysisException -Union can only be performed on tables with the compatible column types. double <> timestamp at the first column of the second table; +Union can only be performed on tables with the compatible column types. double <> timestamp at the first column of the second table -- !query @@ -1149,7 +1149,7 @@ SELECT cast('2017-12-12 09:30:00.0' as timestamp) FROM t UNION SELECT cast(2 as struct<> -- !query output org.apache.spark.sql.AnalysisException -Union can only be performed on tables with the compatible column types. decimal(10,0) <> timestamp at the first column of the second table; +Union can only be performed on tables with the compatible column types. decimal(10,0) <> timestamp at the first column of the second table -- !query @@ -1167,7 +1167,7 @@ SELECT cast('2017-12-12 09:30:00.0' as timestamp) FROM t UNION SELECT cast('2' a struct<> -- !query output org.apache.spark.sql.AnalysisException -Union can only be performed on tables with the compatible column types. binary <> timestamp at the first column of the second table; +Union can only be performed on tables with the compatible column types. binary <> timestamp at the first column of the second table -- !query @@ -1176,7 +1176,7 @@ SELECT cast('2017-12-12 09:30:00.0' as timestamp) FROM t UNION SELECT cast(2 as struct<> -- !query output org.apache.spark.sql.AnalysisException -Union can only be performed on tables with the compatible column types. boolean <> timestamp at the first column of the second table; +Union can only be performed on tables with the compatible column types. boolean <> timestamp at the first column of the second table -- !query @@ -1203,7 +1203,7 @@ SELECT cast('2017-12-12 09:30:00' as date) FROM t UNION SELECT cast(2 as tinyint struct<> -- !query output org.apache.spark.sql.AnalysisException -Union can only be performed on tables with the compatible column types. tinyint <> date at the first column of the second table; +Union can only be performed on tables with the compatible column types. tinyint <> date at the first column of the second table -- !query @@ -1212,7 +1212,7 @@ SELECT cast('2017-12-12 09:30:00' as date) FROM t UNION SELECT cast(2 as smallin struct<> -- !query output org.apache.spark.sql.AnalysisException -Union can only be performed on tables with the compatible column types. smallint <> date at the first column of the second table; +Union can only be performed on tables with the compatible column types. smallint <> date at the first column of the second table -- !query @@ -1221,7 +1221,7 @@ SELECT cast('2017-12-12 09:30:00' as date) FROM t UNION SELECT cast(2 as int) FR struct<> -- !query output org.apache.spark.sql.AnalysisException -Union can only be performed on tables with the compatible column types. int <> date at the first column of the second table; +Union can only be performed on tables with the compatible column types. int <> date at the first column of the second table -- !query @@ -1230,7 +1230,7 @@ SELECT cast('2017-12-12 09:30:00' as date) FROM t UNION SELECT cast(2 as bigint) struct<> -- !query output org.apache.spark.sql.AnalysisException -Union can only be performed on tables with the compatible column types. bigint <> date at the first column of the second table; +Union can only be performed on tables with the compatible column types. bigint <> date at the first column of the second table -- !query @@ -1239,7 +1239,7 @@ SELECT cast('2017-12-12 09:30:00' as date) FROM t UNION SELECT cast(2 as float) struct<> -- !query output org.apache.spark.sql.AnalysisException -Union can only be performed on tables with the compatible column types. float <> date at the first column of the second table; +Union can only be performed on tables with the compatible column types. float <> date at the first column of the second table -- !query @@ -1248,7 +1248,7 @@ SELECT cast('2017-12-12 09:30:00' as date) FROM t UNION SELECT cast(2 as double) struct<> -- !query output org.apache.spark.sql.AnalysisException -Union can only be performed on tables with the compatible column types. double <> date at the first column of the second table; +Union can only be performed on tables with the compatible column types. double <> date at the first column of the second table -- !query @@ -1257,7 +1257,7 @@ SELECT cast('2017-12-12 09:30:00' as date) FROM t UNION SELECT cast(2 as decimal struct<> -- !query output org.apache.spark.sql.AnalysisException -Union can only be performed on tables with the compatible column types. decimal(10,0) <> date at the first column of the second table; +Union can only be performed on tables with the compatible column types. decimal(10,0) <> date at the first column of the second table -- !query @@ -1275,7 +1275,7 @@ SELECT cast('2017-12-12 09:30:00' as date) FROM t UNION SELECT cast('2' as binar struct<> -- !query output org.apache.spark.sql.AnalysisException -Union can only be performed on tables with the compatible column types. binary <> date at the first column of the second table; +Union can only be performed on tables with the compatible column types. binary <> date at the first column of the second table -- !query @@ -1284,7 +1284,7 @@ SELECT cast('2017-12-12 09:30:00' as date) FROM t UNION SELECT cast(2 as boolean struct<> -- !query output org.apache.spark.sql.AnalysisException -Union can only be performed on tables with the compatible column types. boolean <> date at the first column of the second table; +Union can only be performed on tables with the compatible column types. boolean <> date at the first column of the second table -- !query diff --git a/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-aggregates_part1.sql.out b/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-aggregates_part1.sql.out index 76637bf578e6f..0eb21d386378d 100644 --- a/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-aggregates_part1.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-aggregates_part1.sql.out @@ -143,7 +143,7 @@ SELECT udf(var_pop(1.0)), var_samp(udf(2.0)) -- !query schema struct -- !query output -0.0 NaN +0.0 NULL -- !query @@ -151,7 +151,7 @@ SELECT stddev_pop(udf(CAST(3.0 AS Decimal(38,0)))), stddev_samp(CAST(udf(4.0) AS -- !query schema struct -- !query output -0.0 NaN +0.0 NULL -- !query @@ -373,7 +373,7 @@ org.apache.spark.sql.AnalysisException Aggregate/Window/Generate expressions are not valid in where clause of the query. Expression in where clause: [(sum(DISTINCT CAST((outer(a.`four`) + b.`four`) AS BIGINT)) = CAST(CAST(udf(ansi_cast(four as string)) AS INT) AS BIGINT))] -Invalid expressions: [sum(DISTINCT CAST((outer(a.`four`) + b.`four`) AS BIGINT))]; +Invalid expressions: [sum(DISTINCT CAST((outer(a.`four`) + b.`four`) AS BIGINT))] -- !query diff --git a/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-aggregates_part3.sql.out b/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-aggregates_part3.sql.out index f491d9b9ba3a8..17b77a8a7aea9 100644 --- a/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-aggregates_part3.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-aggregates_part3.sql.out @@ -8,7 +8,7 @@ select udf(max(min(unique1))) from tenk1 struct<> -- !query output org.apache.spark.sql.AnalysisException -It is not allowed to use an aggregate function in the argument of another aggregate function. Please use the inner aggregate function in a sub-query.; +It is not allowed to use an aggregate function in the argument of another aggregate function. Please use the inner aggregate function in a sub-query. -- !query diff --git a/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-case.sql.out b/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-case.sql.out index 6c733e916d734..2f31d2684ca22 100755 --- a/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-case.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-case.sql.out @@ -176,28 +176,28 @@ struct -- !query SELECT CASE WHEN udf(1=0) THEN 1/0 WHEN 1=1 THEN 1 ELSE 2/0 END -- !query schema -struct +struct<> -- !query output -1.0 +java.lang.ArithmeticException +divide by zero -- !query SELECT CASE 1 WHEN 0 THEN 1/udf(0) WHEN 1 THEN 1 ELSE 2/0 END -- !query schema -struct +struct<> -- !query output -1.0 +java.lang.ArithmeticException +divide by zero -- !query SELECT CASE WHEN i > 100 THEN udf(1/0) ELSE udf(0) END FROM case_tbl -- !query schema -struct 100) THEN CAST(udf(ansi_cast((ansi_cast(1 as double) / ansi_cast(0 as double)) as string)) AS DOUBLE) ELSE CAST(CAST(udf(ansi_cast(0 as string)) AS INT) AS DOUBLE) END:double> +struct<> -- !query output -0.0 -0.0 -0.0 -0.0 +java.lang.ArithmeticException +divide by zero -- !query diff --git a/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-select_having.sql.out b/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-select_having.sql.out index 50b6e60086747..e3d7eb169e818 100644 --- a/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-select_having.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-select_having.sql.out @@ -143,7 +143,7 @@ SELECT udf(a) FROM test_having HAVING udf(min(a)) < udf(max(a)) struct<> -- !query output org.apache.spark.sql.AnalysisException -grouping expressions sequence is empty, and 'spark_catalog.default.test_having.`a`' is not an aggregate function. Wrap '(min(spark_catalog.default.test_having.`a`) AS `min(a#x)`, max(spark_catalog.default.test_having.`a`) AS `max(a#x)`)' in windowing function(s) or wrap 'spark_catalog.default.test_having.`a`' in first() (or first_value) if you don't care which value you get.; +grouping expressions sequence is empty, and 'spark_catalog.default.test_having.`a`' is not an aggregate function. Wrap '(min(spark_catalog.default.test_having.`a`) AS `min(a#x)`, max(spark_catalog.default.test_having.`a`) AS `max(a#x)`)' in windowing function(s) or wrap 'spark_catalog.default.test_having.`a`' in first() (or first_value) if you don't care which value you get. -- !query @@ -174,9 +174,10 @@ struct -- !query SELECT 1 AS one FROM test_having WHERE 1/udf(a) = 1 HAVING 1 < 2 -- !query schema -struct +struct<> -- !query output -1 +java.lang.ArithmeticException +divide by zero -- !query diff --git a/sql/core/src/test/resources/sql-tests/results/udf/udf-except-all.sql.out b/sql/core/src/test/resources/sql-tests/results/udf/udf-except-all.sql.out index 2613120e004df..7a4ae72fac97b 100644 --- a/sql/core/src/test/resources/sql-tests/results/udf/udf-except-all.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/udf/udf-except-all.sql.out @@ -141,7 +141,7 @@ SELECT array(1) struct<> -- !query output org.apache.spark.sql.AnalysisException -ExceptAll can only be performed on tables with the compatible column types. array <> int at the first column of the second table; +ExceptAll can only be performed on tables with the compatible column types. array <> int at the first column of the second table -- !query @@ -213,7 +213,7 @@ SELECT k, v FROM tab4 struct<> -- !query output org.apache.spark.sql.AnalysisException -ExceptAll can only be performed on tables with the same number of columns, but the first table has 1 columns and the second table has 2 columns; +ExceptAll can only be performed on tables with the same number of columns, but the first table has 1 columns and the second table has 2 columns -- !query diff --git a/sql/core/src/test/resources/sql-tests/results/udf/udf-group-analytics.sql.out b/sql/core/src/test/resources/sql-tests/results/udf/udf-group-analytics.sql.out index f4cf4196298c1..15620e34f2be8 100644 --- a/sql/core/src/test/resources/sql-tests/results/udf/udf-group-analytics.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/udf/udf-group-analytics.sql.out @@ -210,7 +210,7 @@ SELECT course, udf(year), GROUPING(course) FROM courseSales GROUP BY course, udf struct<> -- !query output org.apache.spark.sql.AnalysisException -grouping() can only be used with GroupingSets/Cube/Rollup; +grouping() can only be used with GroupingSets/Cube/Rollup -- !query @@ -219,7 +219,7 @@ SELECT course, udf(year), GROUPING_ID(course, year) FROM courseSales GROUP BY ud struct<> -- !query output org.apache.spark.sql.AnalysisException -grouping_id() can only be used with GroupingSets/Cube/Rollup; +grouping_id() can only be used with GroupingSets/Cube/Rollup -- !query @@ -255,7 +255,7 @@ SELECT course, udf(year) FROM courseSales GROUP BY udf(course), year HAVING GROU struct<> -- !query output org.apache.spark.sql.AnalysisException -grouping()/grouping_id() can only be used with GroupingSets/Cube/Rollup; +grouping()/grouping_id() can only be used with GroupingSets/Cube/Rollup -- !query @@ -264,7 +264,7 @@ SELECT course, udf(udf(year)) FROM courseSales GROUP BY course, year HAVING GROU struct<> -- !query output org.apache.spark.sql.AnalysisException -grouping()/grouping_id() can only be used with GroupingSets/Cube/Rollup; +grouping()/grouping_id() can only be used with GroupingSets/Cube/Rollup -- !query @@ -319,7 +319,7 @@ SELECT course, udf(year) FROM courseSales GROUP BY course, udf(year) ORDER BY GR struct<> -- !query output org.apache.spark.sql.AnalysisException -grouping()/grouping_id() can only be used with GroupingSets/Cube/Rollup; +grouping()/grouping_id() can only be used with GroupingSets/Cube/Rollup -- !query @@ -328,7 +328,7 @@ SELECT course, udf(year) FROM courseSales GROUP BY course, udf(year) ORDER BY GR struct<> -- !query output org.apache.spark.sql.AnalysisException -grouping()/grouping_id() can only be used with GroupingSets/Cube/Rollup; +grouping()/grouping_id() can only be used with GroupingSets/Cube/Rollup -- !query diff --git a/sql/core/src/test/resources/sql-tests/results/udf/udf-group-by.sql.out b/sql/core/src/test/resources/sql-tests/results/udf/udf-group-by.sql.out index da5256f5c0453..18a7708c40685 100644 --- a/sql/core/src/test/resources/sql-tests/results/udf/udf-group-by.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/udf/udf-group-by.sql.out @@ -18,7 +18,7 @@ SELECT udf(a), udf(COUNT(b)) FROM testData struct<> -- !query output org.apache.spark.sql.AnalysisException -grouping expressions sequence is empty, and 'testdata.`a`' is not an aggregate function. Wrap '(CAST(udf(cast(count(b) as string)) AS BIGINT) AS `CAST(udf(cast(count(b) as string)) AS BIGINT)`)' in windowing function(s) or wrap 'testdata.`a`' in first() (or first_value) if you don't care which value you get.; +grouping expressions sequence is empty, and 'testdata.`a`' is not an aggregate function. Wrap '(CAST(udf(cast(count(b) as string)) AS BIGINT) AS `CAST(udf(cast(count(b) as string)) AS BIGINT)`)' in windowing function(s) or wrap 'testdata.`a`' in first() (or first_value) if you don't care which value you get. -- !query @@ -46,7 +46,7 @@ SELECT udf(a), udf(COUNT(udf(b))) FROM testData GROUP BY b struct<> -- !query output org.apache.spark.sql.AnalysisException -expression 'testdata.`a`' is neither present in the group by, nor is it an aggregate function. Add to group by or wrap in first() (or first_value) if you don't care which value you get.; +expression 'testdata.`a`' is neither present in the group by, nor is it an aggregate function. Add to group by or wrap in first() (or first_value) if you don't care which value you get. -- !query @@ -110,7 +110,7 @@ SELECT udf(a + 2), udf(COUNT(b)) FROM testData GROUP BY a + 1 struct<> -- !query output org.apache.spark.sql.AnalysisException -expression 'testdata.`a`' is neither present in the group by, nor is it an aggregate function. Add to group by or wrap in first() (or first_value) if you don't care which value you get.; +expression 'testdata.`a`' is neither present in the group by, nor is it an aggregate function. Add to group by or wrap in first() (or first_value) if you don't care which value you get. -- !query @@ -167,7 +167,7 @@ SELECT udf(COUNT(b)) AS k FROM testData GROUP BY k struct<> -- !query output org.apache.spark.sql.AnalysisException -aggregate functions are not allowed in GROUP BY, but found CAST(udf(cast(count(b) as string)) AS BIGINT); +aggregate functions are not allowed in GROUP BY, but found CAST(udf(cast(count(b) as string)) AS BIGINT) -- !query @@ -185,7 +185,7 @@ SELECT k AS a, udf(COUNT(udf(v))) FROM testDataHasSameNameWithAlias GROUP BY udf struct<> -- !query output org.apache.spark.sql.AnalysisException -expression 'testdatahassamenamewithalias.`k`' is neither present in the group by, nor is it an aggregate function. Add to group by or wrap in first() (or first_value) if you don't care which value you get.; +expression 'testdatahassamenamewithalias.`k`' is neither present in the group by, nor is it an aggregate function. Add to group by or wrap in first() (or first_value) if you don't care which value you get. -- !query @@ -274,7 +274,7 @@ SELECT udf(id) FROM range(10) HAVING id > 0 struct<> -- !query output org.apache.spark.sql.AnalysisException -grouping expressions sequence is empty, and '`id`' is not an aggregate function. Wrap '()' in windowing function(s) or wrap '`id`' in first() (or first_value) if you don't care which value you get.; +grouping expressions sequence is empty, and '`id`' is not an aggregate function. Wrap '()' in windowing function(s) or wrap '`id`' in first() (or first_value) if you don't care which value you get. -- !query @@ -496,7 +496,7 @@ org.apache.spark.sql.AnalysisException Aggregate/Window/Generate expressions are not valid in where clause of the query. Expression in where clause: [(count(1) > 1L)] -Invalid expressions: [count(1)]; +Invalid expressions: [count(1)] -- !query @@ -508,7 +508,7 @@ org.apache.spark.sql.AnalysisException Aggregate/Window/Generate expressions are not valid in where clause of the query. Expression in where clause: [((count(1) + 1L) > 1L)] -Invalid expressions: [count(1)]; +Invalid expressions: [count(1)] -- !query @@ -520,4 +520,4 @@ org.apache.spark.sql.AnalysisException Aggregate/Window/Generate expressions are not valid in where clause of the query. Expression in where clause: [(((test_agg.`k` = 1) OR (test_agg.`k` = 2)) OR (((count(1) + 1L) > 1L) OR (max(test_agg.`k`) > 1)))] -Invalid expressions: [count(1), max(test_agg.`k`)]; +Invalid expressions: [count(1), max(test_agg.`k`)] diff --git a/sql/core/src/test/resources/sql-tests/results/udf/udf-intersect-all.sql.out b/sql/core/src/test/resources/sql-tests/results/udf/udf-intersect-all.sql.out index b3735ae153267..e225a3df596c0 100644 --- a/sql/core/src/test/resources/sql-tests/results/udf/udf-intersect-all.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/udf/udf-intersect-all.sql.out @@ -98,7 +98,7 @@ SELECT array(1), udf(2) struct<> -- !query output org.apache.spark.sql.AnalysisException -IntersectAll can only be performed on tables with the compatible column types. array <> int at the first column of the second table; +IntersectAll can only be performed on tables with the compatible column types. array <> int at the first column of the second table -- !query @@ -109,7 +109,7 @@ SELECT udf(k), udf(v) FROM tab2 struct<> -- !query output org.apache.spark.sql.AnalysisException -IntersectAll can only be performed on tables with the same number of columns, but the first table has 1 columns and the second table has 2 columns; +IntersectAll can only be performed on tables with the same number of columns, but the first table has 1 columns and the second table has 2 columns -- !query diff --git a/sql/core/src/test/resources/sql-tests/results/udf/udf-pivot.sql.out b/sql/core/src/test/resources/sql-tests/results/udf/udf-pivot.sql.out index 414435e6b781d..bcec61470d4a4 100644 --- a/sql/core/src/test/resources/sql-tests/results/udf/udf-pivot.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/udf/udf-pivot.sql.out @@ -202,7 +202,7 @@ PIVOT ( struct<> -- !query output org.apache.spark.sql.AnalysisException -Aggregate expression required for pivot, but 'coursesales.`earnings`' did not appear in any aggregate function.; +Aggregate expression required for pivot, but 'coursesales.`earnings`' did not appear in any aggregate function. -- !query @@ -217,7 +217,7 @@ PIVOT ( struct<> -- !query output org.apache.spark.sql.AnalysisException -Aggregate expression required for pivot, but '__auto_generated_subquery_name.`year`' did not appear in any aggregate function.; +Aggregate expression required for pivot, but '__auto_generated_subquery_name.`year`' did not appear in any aggregate function. -- !query @@ -262,7 +262,7 @@ PIVOT ( struct<> -- !query output org.apache.spark.sql.AnalysisException -It is not allowed to use an aggregate function in the argument of another aggregate function. Please use the inner aggregate function in a sub-query.; +It is not allowed to use an aggregate function in the argument of another aggregate function. Please use the inner aggregate function in a sub-query. -- !query @@ -313,7 +313,7 @@ PIVOT ( struct<> -- !query output org.apache.spark.sql.AnalysisException -Invalid pivot value 'dotNET': value data type string does not match pivot column data type struct; +Invalid pivot value 'dotNET': value data type string does not match pivot column data type struct -- !query @@ -339,7 +339,7 @@ PIVOT ( struct<> -- !query output org.apache.spark.sql.AnalysisException -Literal expressions required for pivot values, found 'course#x'; +Literal expressions required for pivot values, found 'course#x' -- !query @@ -424,7 +424,7 @@ PIVOT ( struct<> -- !query output org.apache.spark.sql.AnalysisException -Invalid pivot column 'm#x'. Pivot columns must be comparable.; +Invalid pivot column 'm#x'. Pivot columns must be comparable. -- !query @@ -441,7 +441,7 @@ PIVOT ( struct<> -- !query output org.apache.spark.sql.AnalysisException -Invalid pivot column 'named_struct(course, course#x, m, m#x)'. Pivot columns must be comparable.; +Invalid pivot column 'named_struct(course, course#x, m, m#x)'. Pivot columns must be comparable. -- !query diff --git a/sql/core/src/test/resources/sql-tests/results/udf/udf-window.sql.out b/sql/core/src/test/resources/sql-tests/results/udf/udf-window.sql.out index a84070535b658..6d97800904971 100644 --- a/sql/core/src/test/resources/sql-tests/results/udf/udf-window.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/udf/udf-window.sql.out @@ -289,13 +289,13 @@ ORDER BY cate, udf(val) struct,collect_set:array,skewness:double,kurtosis:double> -- !query output NULL NULL NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL NULL NULL 1 1 0.5 0.0 1 1 NULL NULL 0 NULL NULL NULL NULL [] [] NULL NULL -3 NULL 3 3 3 1 3 3.0 NaN NULL 3 NULL 3 3 3 2 2 1.0 1.0 2 2 0.0 NaN 1 0.0 NaN NaN 0.0 [3] [3] NaN NaN -NULL a NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL NULL NULL 1 1 0.25 0.0 1 1 NULL NULL 0 NULL NULL NULL NULL [] [] NaN NaN +3 NULL 3 3 3 1 3 3.0 NULL NULL 3 NULL 3 3 3 2 2 1.0 1.0 2 2 0.0 NULL 1 0.0 NULL NULL 0.0 [3] [3] NULL NULL +NULL a NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL NULL NULL 1 1 0.25 0.0 1 1 NULL NULL 0 NULL NULL NULL NULL [] [] NULL NULL 1 a 1 1 1 2 2 1.0 0.0 NULL 1 NULL 1 1 1 2 2 0.75 0.3333333333333333 1 2 0.0 0.0 1 0.0 NULL 0.0 0.0 [1,1] [1] 0.7071067811865476 -1.5 1 a 1 1 1 2 2 1.0 0.0 NULL 1 NULL 1 1 1 2 2 0.75 0.3333333333333333 2 3 0.0 0.0 1 0.0 NULL 0.0 0.0 [1,1] [1] 0.7071067811865476 -1.5 2 a 2 1 1 3 4 1.3333333333333333 0.5773502691896258 NULL 1 NULL 2 2 2 4 3 1.0 1.0 2 4 0.22222222222222224 0.33333333333333337 2 4.772185885555555E8 1.0 0.5773502691896258 0.4714045207910317 [1,1,2] [1,2] 1.1539890888012805 -0.6672217220327235 -1 b 1 1 1 1 1 1.0 NaN 1 1 1 1 1 1 1 1 0.3333333333333333 0.0 1 1 0.0 NaN 1 NULL NULL NaN 0.0 [1] [1] NaN NaN -2 b 2 1 1 2 3 1.5 0.7071067811865476 1 1 1 2 2 2 2 2 0.6666666666666666 0.5 1 2 0.25 0.5 2 0.0 NaN 0.7071067811865476 0.5 [1,2] [1,2] 0.0 -2.0000000000000013 +1 b 1 1 1 1 1 1.0 NULL 1 1 1 1 1 1 1 1 0.3333333333333333 0.0 1 1 0.0 NULL 1 NULL NULL NULL 0.0 [1] [1] NULL NULL +2 b 2 1 1 2 3 1.5 0.7071067811865476 1 1 1 2 2 2 2 2 0.6666666666666666 0.5 1 2 0.25 0.5 2 0.0 NULL 0.7071067811865476 0.5 [1,2] [1,2] 0.0 -2.0000000000000013 3 b 3 1 1 3 6 2.0 1.0 1 1 1 3 3 3 3 3 1.0 1.0 2 3 0.6666666666666666 1.0 3 5.3687091175E8 1.0 1.0 0.816496580927726 [1,2,3] [1,2,3] 0.7057890433107311 -1.4999999999999984 @@ -321,7 +321,7 @@ SELECT udf(val), cate, row_number() OVER(PARTITION BY cate) FROM testData ORDER struct<> -- !query output org.apache.spark.sql.AnalysisException -Window function row_number() requires window to be ordered, please add ORDER BY clause. For example SELECT row_number()(value_expr) OVER (PARTITION BY window_partition ORDER BY window_ordering) from table; +Window function row_number() requires window to be ordered, please add ORDER BY clause. For example SELECT row_number()(value_expr) OVER (PARTITION BY window_partition ORDER BY window_ordering) from table -- !query diff --git a/sql/core/src/test/resources/sql-tests/results/window.sql.out b/sql/core/src/test/resources/sql-tests/results/window.sql.out index ede044a44fdaa..e3fd0cd77cb6f 100644 --- a/sql/core/src/test/resources/sql-tests/results/window.sql.out +++ b/sql/core/src/test/resources/sql-tests/results/window.sql.out @@ -1,5 +1,5 @@ -- Automatically generated by SQLQueryTestSuite --- Number of queries: 24 +-- Number of queries: 46 -- !query @@ -20,6 +20,50 @@ struct<> +-- !query +CREATE OR REPLACE TEMPORARY VIEW basic_pays AS SELECT * FROM VALUES +('Diane Murphy','Accounting',8435), +('Mary Patterson','Accounting',9998), +('Jeff Firrelli','Accounting',8992), +('William Patterson','Accounting',8870), +('Gerard Bondur','Accounting',11472), +('Anthony Bow','Accounting',6627), +('Leslie Jennings','IT',8113), +('Leslie Thompson','IT',5186), +('Julie Firrelli','Sales',9181), +('Steve Patterson','Sales',9441), +('Foon Yue Tseng','Sales',6660), +('George Vanauf','Sales',10563), +('Loui Bondur','SCM',10449), +('Gerard Hernandez','SCM',6949), +('Pamela Castillo','SCM',11303), +('Larry Bott','SCM',11798), +('Barry Jones','SCM',10586) +AS basic_pays(employee_name, department, salary) +-- !query schema +struct<> +-- !query output + + + +-- !query +CREATE OR REPLACE TEMPORARY VIEW test_ignore_null AS SELECT * FROM VALUES +('a', 0, null), +('a', 1, 'x'), +('b', 2, null), +('c', 3, null), +('a', 4, 'y'), +('b', 5, null), +('a', 6, 'z'), +('a', 7, 'v'), +('a', 8, null) +AS test_ignore_null(content, id, v) +-- !query schema +struct<> +-- !query output + + + -- !query SELECT val, cate, count(val) OVER(PARTITION BY cate ORDER BY val ROWS CURRENT ROW) FROM testData ORDER BY cate, val @@ -289,13 +333,13 @@ ORDER BY cate, val struct,collect_set:array,skewness:double,kurtosis:double> -- !query output NULL NULL NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL NULL NULL 1 1 0.5 0.0 1 1 NULL NULL 0 NULL NULL NULL NULL [] [] NULL NULL -3 NULL 3 3 3 1 3 3.0 NaN NULL 3 NULL 3 3 3 2 2 1.0 1.0 2 2 0.0 NaN 1 0.0 NaN NaN 0.0 [3] [3] NaN NaN -NULL a NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL NULL NULL 1 1 0.25 0.0 1 1 NULL NULL 0 NULL NULL NULL NULL [] [] NaN NaN +3 NULL 3 3 3 1 3 3.0 NULL NULL 3 NULL 3 3 3 2 2 1.0 1.0 2 2 0.0 NULL 1 0.0 NULL NULL 0.0 [3] [3] NULL NULL +NULL a NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL NULL NULL 1 1 0.25 0.0 1 1 NULL NULL 0 NULL NULL NULL NULL [] [] NULL NULL 1 a 1 1 1 2 2 1.0 0.0 NULL 1 NULL 1 1 1 2 2 0.75 0.3333333333333333 1 2 0.0 0.0 1 0.0 NULL 0.0 0.0 [1,1] [1] 0.7071067811865476 -1.5 1 a 1 1 1 2 2 1.0 0.0 NULL 1 NULL 1 1 1 2 2 0.75 0.3333333333333333 2 3 0.0 0.0 1 0.0 NULL 0.0 0.0 [1,1] [1] 0.7071067811865476 -1.5 2 a 2 1 1 3 4 1.3333333333333333 0.5773502691896258 NULL 1 NULL 2 2 2 4 3 1.0 1.0 2 4 0.22222222222222224 0.33333333333333337 2 4.772185885555555E8 1.0 0.5773502691896258 0.4714045207910317 [1,1,2] [1,2] 1.1539890888012805 -0.6672217220327235 -1 b 1 1 1 1 1 1.0 NaN 1 1 1 1 1 1 1 1 0.3333333333333333 0.0 1 1 0.0 NaN 1 NULL NULL NaN 0.0 [1] [1] NaN NaN -2 b 2 1 1 2 3 1.5 0.7071067811865476 1 1 1 2 2 2 2 2 0.6666666666666666 0.5 1 2 0.25 0.5 2 0.0 NaN 0.7071067811865476 0.5 [1,2] [1,2] 0.0 -2.0000000000000013 +1 b 1 1 1 1 1 1.0 NULL 1 1 1 1 1 1 1 1 0.3333333333333333 0.0 1 1 0.0 NULL 1 NULL NULL NULL 0.0 [1] [1] NULL NULL +2 b 2 1 1 2 3 1.5 0.7071067811865476 1 1 1 2 2 2 2 2 0.6666666666666666 0.5 1 2 0.25 0.5 2 0.0 NULL 0.7071067811865476 0.5 [1,2] [1,2] 0.0 -2.0000000000000013 3 b 3 1 1 3 6 2.0 1.0 1 1 1 3 3 3 3 3 1.0 1.0 2 3 0.6666666666666666 1.0 3 5.3687091175E8 1.0 1.0 0.816496580927726 [1,2,3] [1,2,3] 0.7057890433107311 -1.4999999999999984 @@ -321,7 +365,7 @@ SELECT val, cate, row_number() OVER(PARTITION BY cate) FROM testData ORDER BY ca struct<> -- !query output org.apache.spark.sql.AnalysisException -Window function row_number() requires window to be ordered, please add ORDER BY clause. For example SELECT row_number()(value_expr) OVER (PARTITION BY window_partition ORDER BY window_ordering) from table; +Window function row_number() requires window to be ordered, please add ORDER BY clause. For example SELECT row_number()(value_expr) OVER (PARTITION BY window_partition ORDER BY window_ordering) from table -- !query @@ -390,4 +434,623 @@ FROM testData ORDER BY cate, val struct<> -- !query output org.apache.spark.sql.AnalysisException -window aggregate function with filter predicate is not supported yet.; +window aggregate function with filter predicate is not supported yet. + + +-- !query +SELECT + employee_name, + salary, + first_value(employee_name) OVER w highest_salary, + nth_value(employee_name, 2) OVER w second_highest_salary +FROM + basic_pays +WINDOW w AS (ORDER BY salary DESC) +ORDER BY salary DESC +-- !query schema +struct +-- !query output +Larry Bott 11798 Larry Bott NULL +Gerard Bondur 11472 Larry Bott Gerard Bondur +Pamela Castillo 11303 Larry Bott Gerard Bondur +Barry Jones 10586 Larry Bott Gerard Bondur +George Vanauf 10563 Larry Bott Gerard Bondur +Loui Bondur 10449 Larry Bott Gerard Bondur +Mary Patterson 9998 Larry Bott Gerard Bondur +Steve Patterson 9441 Larry Bott Gerard Bondur +Julie Firrelli 9181 Larry Bott Gerard Bondur +Jeff Firrelli 8992 Larry Bott Gerard Bondur +William Patterson 8870 Larry Bott Gerard Bondur +Diane Murphy 8435 Larry Bott Gerard Bondur +Leslie Jennings 8113 Larry Bott Gerard Bondur +Gerard Hernandez 6949 Larry Bott Gerard Bondur +Foon Yue Tseng 6660 Larry Bott Gerard Bondur +Anthony Bow 6627 Larry Bott Gerard Bondur +Leslie Thompson 5186 Larry Bott Gerard Bondur + + +-- !query +SELECT + employee_name, + salary, + first_value(employee_name) OVER w highest_salary, + nth_value(employee_name, 2) OVER w second_highest_salary +FROM + basic_pays +WINDOW w AS (ORDER BY salary DESC RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) +ORDER BY salary DESC +-- !query schema +struct +-- !query output +Larry Bott 11798 Larry Bott NULL +Gerard Bondur 11472 Larry Bott Gerard Bondur +Pamela Castillo 11303 Larry Bott Gerard Bondur +Barry Jones 10586 Larry Bott Gerard Bondur +George Vanauf 10563 Larry Bott Gerard Bondur +Loui Bondur 10449 Larry Bott Gerard Bondur +Mary Patterson 9998 Larry Bott Gerard Bondur +Steve Patterson 9441 Larry Bott Gerard Bondur +Julie Firrelli 9181 Larry Bott Gerard Bondur +Jeff Firrelli 8992 Larry Bott Gerard Bondur +William Patterson 8870 Larry Bott Gerard Bondur +Diane Murphy 8435 Larry Bott Gerard Bondur +Leslie Jennings 8113 Larry Bott Gerard Bondur +Gerard Hernandez 6949 Larry Bott Gerard Bondur +Foon Yue Tseng 6660 Larry Bott Gerard Bondur +Anthony Bow 6627 Larry Bott Gerard Bondur +Leslie Thompson 5186 Larry Bott Gerard Bondur + + +-- !query +SELECT + employee_name, + salary, + first_value(employee_name) OVER w highest_salary, + nth_value(employee_name, 2) OVER w second_highest_salary +FROM + basic_pays +WINDOW w AS (ORDER BY salary DESC ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) +ORDER BY salary DESC +-- !query schema +struct +-- !query output +Larry Bott 11798 Larry Bott NULL +Gerard Bondur 11472 Larry Bott Gerard Bondur +Pamela Castillo 11303 Larry Bott Gerard Bondur +Barry Jones 10586 Larry Bott Gerard Bondur +George Vanauf 10563 Larry Bott Gerard Bondur +Loui Bondur 10449 Larry Bott Gerard Bondur +Mary Patterson 9998 Larry Bott Gerard Bondur +Steve Patterson 9441 Larry Bott Gerard Bondur +Julie Firrelli 9181 Larry Bott Gerard Bondur +Jeff Firrelli 8992 Larry Bott Gerard Bondur +William Patterson 8870 Larry Bott Gerard Bondur +Diane Murphy 8435 Larry Bott Gerard Bondur +Leslie Jennings 8113 Larry Bott Gerard Bondur +Gerard Hernandez 6949 Larry Bott Gerard Bondur +Foon Yue Tseng 6660 Larry Bott Gerard Bondur +Anthony Bow 6627 Larry Bott Gerard Bondur +Leslie Thompson 5186 Larry Bott Gerard Bondur + + +-- !query +SELECT + employee_name, + salary, + first_value(employee_name) OVER w highest_salary, + nth_value(employee_name, 2) OVER w second_highest_salary +FROM + basic_pays +WINDOW w AS (ORDER BY salary RANGE BETWEEN 2000 PRECEDING AND 1000 FOLLOWING) +ORDER BY salary +-- !query schema +struct +-- !query output +Leslie Thompson 5186 Leslie Thompson NULL +Anthony Bow 6627 Leslie Thompson Anthony Bow +Foon Yue Tseng 6660 Leslie Thompson Anthony Bow +Gerard Hernandez 6949 Leslie Thompson Anthony Bow +Leslie Jennings 8113 Anthony Bow Foon Yue Tseng +Diane Murphy 8435 Anthony Bow Foon Yue Tseng +William Patterson 8870 Gerard Hernandez Leslie Jennings +Jeff Firrelli 8992 Leslie Jennings Diane Murphy +Julie Firrelli 9181 Leslie Jennings Diane Murphy +Steve Patterson 9441 Leslie Jennings Diane Murphy +Mary Patterson 9998 Leslie Jennings Diane Murphy +Loui Bondur 10449 William Patterson Jeff Firrelli +George Vanauf 10563 William Patterson Jeff Firrelli +Barry Jones 10586 William Patterson Jeff Firrelli +Pamela Castillo 11303 Steve Patterson Mary Patterson +Gerard Bondur 11472 Mary Patterson Loui Bondur +Larry Bott 11798 Mary Patterson Loui Bondur + + +-- !query +SELECT + employee_name, + salary, + first_value(employee_name) OVER w highest_salary, + nth_value(employee_name, 2) OVER w second_highest_salary +FROM + basic_pays +WINDOW w AS (ORDER BY salary DESC ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING) +ORDER BY salary DESC +-- !query schema +struct +-- !query output +Larry Bott 11798 Larry Bott Gerard Bondur +Gerard Bondur 11472 Larry Bott Gerard Bondur +Pamela Castillo 11303 Larry Bott Gerard Bondur +Barry Jones 10586 Gerard Bondur Pamela Castillo +George Vanauf 10563 Pamela Castillo Barry Jones +Loui Bondur 10449 Barry Jones George Vanauf +Mary Patterson 9998 George Vanauf Loui Bondur +Steve Patterson 9441 Loui Bondur Mary Patterson +Julie Firrelli 9181 Mary Patterson Steve Patterson +Jeff Firrelli 8992 Steve Patterson Julie Firrelli +William Patterson 8870 Julie Firrelli Jeff Firrelli +Diane Murphy 8435 Jeff Firrelli William Patterson +Leslie Jennings 8113 William Patterson Diane Murphy +Gerard Hernandez 6949 Diane Murphy Leslie Jennings +Foon Yue Tseng 6660 Leslie Jennings Gerard Hernandez +Anthony Bow 6627 Gerard Hernandez Foon Yue Tseng +Leslie Thompson 5186 Foon Yue Tseng Anthony Bow + + +-- !query +SELECT + employee_name, + salary, + first_value(employee_name) OVER w highest_salary, + nth_value(employee_name, 2) OVER w second_highest_salary +FROM + basic_pays +WINDOW w AS (ORDER BY salary DESC RANGE BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) +ORDER BY salary DESC +-- !query schema +struct +-- !query output +Larry Bott 11798 Larry Bott Gerard Bondur +Gerard Bondur 11472 Gerard Bondur Pamela Castillo +Pamela Castillo 11303 Pamela Castillo Barry Jones +Barry Jones 10586 Barry Jones George Vanauf +George Vanauf 10563 George Vanauf Loui Bondur +Loui Bondur 10449 Loui Bondur Mary Patterson +Mary Patterson 9998 Mary Patterson Steve Patterson +Steve Patterson 9441 Steve Patterson Julie Firrelli +Julie Firrelli 9181 Julie Firrelli Jeff Firrelli +Jeff Firrelli 8992 Jeff Firrelli William Patterson +William Patterson 8870 William Patterson Diane Murphy +Diane Murphy 8435 Diane Murphy Leslie Jennings +Leslie Jennings 8113 Leslie Jennings Gerard Hernandez +Gerard Hernandez 6949 Gerard Hernandez Foon Yue Tseng +Foon Yue Tseng 6660 Foon Yue Tseng Anthony Bow +Anthony Bow 6627 Anthony Bow Leslie Thompson +Leslie Thompson 5186 Leslie Thompson NULL + + +-- !query +SELECT + employee_name, + salary, + first_value(employee_name) OVER w highest_salary, + nth_value(employee_name, 2) OVER w second_highest_salary +FROM + basic_pays +WINDOW w AS (ORDER BY salary DESC RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) +ORDER BY salary DESC +-- !query schema +struct +-- !query output +Larry Bott 11798 Larry Bott Gerard Bondur +Gerard Bondur 11472 Larry Bott Gerard Bondur +Pamela Castillo 11303 Larry Bott Gerard Bondur +Barry Jones 10586 Larry Bott Gerard Bondur +George Vanauf 10563 Larry Bott Gerard Bondur +Loui Bondur 10449 Larry Bott Gerard Bondur +Mary Patterson 9998 Larry Bott Gerard Bondur +Steve Patterson 9441 Larry Bott Gerard Bondur +Julie Firrelli 9181 Larry Bott Gerard Bondur +Jeff Firrelli 8992 Larry Bott Gerard Bondur +William Patterson 8870 Larry Bott Gerard Bondur +Diane Murphy 8435 Larry Bott Gerard Bondur +Leslie Jennings 8113 Larry Bott Gerard Bondur +Gerard Hernandez 6949 Larry Bott Gerard Bondur +Foon Yue Tseng 6660 Larry Bott Gerard Bondur +Anthony Bow 6627 Larry Bott Gerard Bondur +Leslie Thompson 5186 Larry Bott Gerard Bondur + + +-- !query +SELECT + employee_name, + salary, + first_value(employee_name) OVER w highest_salary, + nth_value(employee_name, 2) OVER w second_highest_salary +FROM + basic_pays +WINDOW w AS (ORDER BY salary DESC ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) +ORDER BY salary DESC +-- !query schema +struct +-- !query output +Larry Bott 11798 Larry Bott Gerard Bondur +Gerard Bondur 11472 Larry Bott Gerard Bondur +Pamela Castillo 11303 Larry Bott Gerard Bondur +Barry Jones 10586 Larry Bott Gerard Bondur +George Vanauf 10563 Larry Bott Gerard Bondur +Loui Bondur 10449 Larry Bott Gerard Bondur +Mary Patterson 9998 Larry Bott Gerard Bondur +Steve Patterson 9441 Larry Bott Gerard Bondur +Julie Firrelli 9181 Larry Bott Gerard Bondur +Jeff Firrelli 8992 Larry Bott Gerard Bondur +William Patterson 8870 Larry Bott Gerard Bondur +Diane Murphy 8435 Larry Bott Gerard Bondur +Leslie Jennings 8113 Larry Bott Gerard Bondur +Gerard Hernandez 6949 Larry Bott Gerard Bondur +Foon Yue Tseng 6660 Larry Bott Gerard Bondur +Anthony Bow 6627 Larry Bott Gerard Bondur +Leslie Thompson 5186 Larry Bott Gerard Bondur + + +-- !query +SELECT + employee_name, + salary, + first_value(employee_name) OVER w highest_salary, + nth_value(employee_name, 2) OVER w second_highest_salary +FROM + basic_pays +WINDOW w AS (ORDER BY salary DESC ROWS BETWEEN UNBOUNDED PRECEDING AND 1 FOLLOWING) +ORDER BY salary DESC +-- !query schema +struct +-- !query output +Larry Bott 11798 Larry Bott Gerard Bondur +Gerard Bondur 11472 Larry Bott Gerard Bondur +Pamela Castillo 11303 Larry Bott Gerard Bondur +Barry Jones 10586 Larry Bott Gerard Bondur +George Vanauf 10563 Larry Bott Gerard Bondur +Loui Bondur 10449 Larry Bott Gerard Bondur +Mary Patterson 9998 Larry Bott Gerard Bondur +Steve Patterson 9441 Larry Bott Gerard Bondur +Julie Firrelli 9181 Larry Bott Gerard Bondur +Jeff Firrelli 8992 Larry Bott Gerard Bondur +William Patterson 8870 Larry Bott Gerard Bondur +Diane Murphy 8435 Larry Bott Gerard Bondur +Leslie Jennings 8113 Larry Bott Gerard Bondur +Gerard Hernandez 6949 Larry Bott Gerard Bondur +Foon Yue Tseng 6660 Larry Bott Gerard Bondur +Anthony Bow 6627 Larry Bott Gerard Bondur +Leslie Thompson 5186 Larry Bott Gerard Bondur + + +-- !query +SELECT + employee_name, + department, + salary, + FIRST_VALUE(employee_name) OVER w highest_salary, + NTH_VALUE(employee_name, 2) OVER w second_highest_salary +FROM + basic_pays +WINDOW w AS ( + PARTITION BY department + ORDER BY salary DESC + RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING +) +ORDER BY department +-- !query schema +struct +-- !query output +Gerard Bondur Accounting 11472 Gerard Bondur Mary Patterson +Mary Patterson Accounting 9998 Gerard Bondur Mary Patterson +Jeff Firrelli Accounting 8992 Gerard Bondur Mary Patterson +William Patterson Accounting 8870 Gerard Bondur Mary Patterson +Diane Murphy Accounting 8435 Gerard Bondur Mary Patterson +Anthony Bow Accounting 6627 Gerard Bondur Mary Patterson +Leslie Jennings IT 8113 Leslie Jennings Leslie Thompson +Leslie Thompson IT 5186 Leslie Jennings Leslie Thompson +Larry Bott SCM 11798 Larry Bott Pamela Castillo +Pamela Castillo SCM 11303 Larry Bott Pamela Castillo +Barry Jones SCM 10586 Larry Bott Pamela Castillo +Loui Bondur SCM 10449 Larry Bott Pamela Castillo +Gerard Hernandez SCM 6949 Larry Bott Pamela Castillo +George Vanauf Sales 10563 George Vanauf Steve Patterson +Steve Patterson Sales 9441 George Vanauf Steve Patterson +Julie Firrelli Sales 9181 George Vanauf Steve Patterson +Foon Yue Tseng Sales 6660 George Vanauf Steve Patterson + + +-- !query +SELECT + employee_name, + salary, + first_value(employee_name) OVER w highest_salary, + nth_value(employee_name, 2) OVER w second_highest_salary +FROM + basic_pays +WINDOW + w AS (ORDER BY salary DESC ROWS BETWEEN UNBOUNDED PRECEDING AND 1 FOLLOWING), + w AS (ORDER BY salary DESC ROWS BETWEEN UNBOUNDED PRECEDING AND 2 FOLLOWING) +ORDER BY salary DESC +-- !query schema +struct<> +-- !query output +org.apache.spark.sql.catalyst.parser.ParseException + +The definition of window 'w' is repetitive(line 8, pos 0) + +== SQL == +SELECT + employee_name, + salary, + first_value(employee_name) OVER w highest_salary, + nth_value(employee_name, 2) OVER w second_highest_salary +FROM + basic_pays +WINDOW +^^^ + w AS (ORDER BY salary DESC ROWS BETWEEN UNBOUNDED PRECEDING AND 1 FOLLOWING), + w AS (ORDER BY salary DESC ROWS BETWEEN UNBOUNDED PRECEDING AND 2 FOLLOWING) +ORDER BY salary DESC + + +-- !query +SELECT + content, + id, + v, + lead(v, 0) IGNORE NULLS OVER w lead_0, + lead(v, 1) IGNORE NULLS OVER w lead_1, + lead(v, 2) IGNORE NULLS OVER w lead_2, + lead(v, 3) IGNORE NULLS OVER w lead_3, + lag(v, 0) IGNORE NULLS OVER w lag_0, + lag(v, 1) IGNORE NULLS OVER w lag_1, + lag(v, 2) IGNORE NULLS OVER w lag_2, + lag(v, 3) IGNORE NULLS OVER w lag_3, + nth_value(v, 1) IGNORE NULLS OVER w nth_value_1, + nth_value(v, 2) IGNORE NULLS OVER w nth_value_2, + nth_value(v, 3) IGNORE NULLS OVER w nth_value_3, + first_value(v) IGNORE NULLS OVER w first_value, + last_value(v) IGNORE NULLS OVER w last_value +FROM + test_ignore_null +WINDOW w AS (ORDER BY id) +ORDER BY id +-- !query schema +struct +-- !query output +a 0 NULL NULL x y z NULL NULL NULL NULL NULL NULL NULL NULL NULL +a 1 x x y z v x NULL NULL NULL x NULL NULL x x +b 2 NULL NULL y z v NULL x NULL NULL x NULL NULL x x +c 3 NULL NULL y z v NULL x NULL NULL x NULL NULL x x +a 4 y y z v NULL y x NULL NULL x y NULL x y +b 5 NULL NULL z v NULL NULL y x NULL x y NULL x y +a 6 z z v NULL NULL z y x NULL x y z x z +a 7 v v NULL NULL NULL v z y x x y z x v +a 8 NULL NULL NULL NULL NULL NULL v z y x y z x v + + +-- !query +SELECT + content, + id, + v, + nth_value(v, 1) IGNORE NULLS OVER w nth_value_1, + nth_value(v, 2) IGNORE NULLS OVER w nth_value_2, + nth_value(v, 3) IGNORE NULLS OVER w nth_value_3, + first_value(v) IGNORE NULLS OVER w first_value, + last_value(v) IGNORE NULLS OVER w last_value +FROM + test_ignore_null +WINDOW w AS (ORDER BY id RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) +ORDER BY id +-- !query schema +struct +-- !query output +a 0 NULL NULL NULL NULL NULL NULL +a 1 x x NULL NULL x x +b 2 NULL x NULL NULL x x +c 3 NULL x NULL NULL x x +a 4 y x y NULL x y +b 5 NULL x y NULL x y +a 6 z x y z x z +a 7 v x y z x v +a 8 NULL x y z x v + + +-- !query +SELECT + content, + id, + v, + nth_value(v, 1) IGNORE NULLS OVER w nth_value_1, + nth_value(v, 2) IGNORE NULLS OVER w nth_value_2, + nth_value(v, 3) IGNORE NULLS OVER w nth_value_3, + first_value(v) IGNORE NULLS OVER w first_value, + last_value(v) IGNORE NULLS OVER w last_value +FROM + test_ignore_null +WINDOW w AS (ORDER BY id ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) +ORDER BY id +-- !query schema +struct +-- !query output +a 0 NULL NULL NULL NULL NULL NULL +a 1 x x NULL NULL x x +b 2 NULL x NULL NULL x x +c 3 NULL x NULL NULL x x +a 4 y x y NULL x y +b 5 NULL x y NULL x y +a 6 z x y z x z +a 7 v x y z x v +a 8 NULL x y z x v + + +-- !query +SELECT + content, + id, + v, + nth_value(v, 1) IGNORE NULLS OVER w nth_value_1, + nth_value(v, 2) IGNORE NULLS OVER w nth_value_2, + nth_value(v, 3) IGNORE NULLS OVER w nth_value_3, + first_value(v) IGNORE NULLS OVER w first_value, + last_value(v) IGNORE NULLS OVER w last_value +FROM + test_ignore_null +WINDOW w AS (ORDER BY id RANGE BETWEEN 2 PRECEDING AND 2 FOLLOWING) +ORDER BY id +-- !query schema +struct +-- !query output +a 0 NULL x NULL NULL x x +a 1 x x NULL NULL x x +b 2 NULL x y NULL x y +c 3 NULL x y NULL x y +a 4 y y z NULL y z +b 5 NULL y z v y v +a 6 z y z v y v +a 7 v z v NULL z v +a 8 NULL z v NULL z v + + +-- !query +SELECT + content, + id, + v, + nth_value(v, 1) IGNORE NULLS OVER w nth_value_1, + nth_value(v, 2) IGNORE NULLS OVER w nth_value_2, + nth_value(v, 3) IGNORE NULLS OVER w nth_value_3, + first_value(v) IGNORE NULLS OVER w first_value, + last_value(v) IGNORE NULLS OVER w last_value +FROM + test_ignore_null +WINDOW w AS (ORDER BY id ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING) +ORDER BY id +-- !query schema +struct +-- !query output +a 0 NULL x NULL NULL x x +a 1 x x NULL NULL x x +b 2 NULL x y NULL x y +c 3 NULL x y NULL x y +a 4 y y z NULL y z +b 5 NULL y z v y v +a 6 z y z v y v +a 7 v z v NULL z v +a 8 NULL z v NULL z v + + +-- !query +SELECT + content, + id, + v, + nth_value(v, 1) IGNORE NULLS OVER w nth_value_1, + nth_value(v, 2) IGNORE NULLS OVER w nth_value_2, + nth_value(v, 3) IGNORE NULLS OVER w nth_value_3, + first_value(v) IGNORE NULLS OVER w first_value, + last_value(v) IGNORE NULLS OVER w last_value +FROM + test_ignore_null +WINDOW w AS (ORDER BY id RANGE BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) +ORDER BY id +-- !query schema +struct +-- !query output +a 0 NULL x y z x v +a 1 x x y z x v +b 2 NULL y z v y v +c 3 NULL y z v y v +a 4 y y z v y v +b 5 NULL z v NULL z v +a 6 z z v NULL z v +a 7 v v NULL NULL v v +a 8 NULL NULL NULL NULL NULL NULL + + +-- !query +SELECT + content, + id, + v, + nth_value(v, 1) IGNORE NULLS OVER w nth_value_1, + nth_value(v, 2) IGNORE NULLS OVER w nth_value_2, + nth_value(v, 3) IGNORE NULLS OVER w nth_value_3, + first_value(v) IGNORE NULLS OVER w first_value, + last_value(v) IGNORE NULLS OVER w last_value +FROM + test_ignore_null +WINDOW w AS (ORDER BY id RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) +ORDER BY id +-- !query schema +struct +-- !query output +a 0 NULL x y z x v +a 1 x x y z x v +b 2 NULL x y z x v +c 3 NULL x y z x v +a 4 y x y z x v +b 5 NULL x y z x v +a 6 z x y z x v +a 7 v x y z x v +a 8 NULL x y z x v + + +-- !query +SELECT + content, + id, + v, + nth_value(v, 1) IGNORE NULLS OVER w nth_value_1, + nth_value(v, 2) IGNORE NULLS OVER w nth_value_2, + nth_value(v, 3) IGNORE NULLS OVER w nth_value_3, + first_value(v) IGNORE NULLS OVER w first_value, + last_value(v) IGNORE NULLS OVER w last_value +FROM + test_ignore_null +WINDOW w AS (ORDER BY id ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) +ORDER BY id +-- !query schema +struct +-- !query output +a 0 NULL x y z x v +a 1 x x y z x v +b 2 NULL x y z x v +c 3 NULL x y z x v +a 4 y x y z x v +b 5 NULL x y z x v +a 6 z x y z x v +a 7 v x y z x v +a 8 NULL x y z x v + + +-- !query +SELECT + content, + id, + v, + nth_value(v, 1) IGNORE NULLS OVER w nth_value_1, + nth_value(v, 2) IGNORE NULLS OVER w nth_value_2, + nth_value(v, 3) IGNORE NULLS OVER w nth_value_3, + first_value(v) IGNORE NULLS OVER w first_value, + last_value(v) IGNORE NULLS OVER w last_value +FROM + test_ignore_null +WINDOW w AS (ORDER BY id ROWS BETWEEN UNBOUNDED PRECEDING AND 1 FOLLOWING) +ORDER BY id +-- !query schema +struct +-- !query output +a 0 NULL x NULL NULL x x +a 1 x x NULL NULL x x +b 2 NULL x NULL NULL x x +c 3 NULL x y NULL x y +a 4 y x y NULL x y +b 5 NULL x y z x z +a 6 z x y z x v +a 7 v x y z x v +a 8 NULL x y z x v \ No newline at end of file diff --git a/sql/core/src/test/resources/structured-streaming/checkpoint-version-3.0.0-streaming-statestore-codec/commits/0 b/sql/core/src/test/resources/structured-streaming/checkpoint-version-3.0.0-streaming-statestore-codec/commits/0 new file mode 100644 index 0000000000000..9c1e3021c3ead --- /dev/null +++ b/sql/core/src/test/resources/structured-streaming/checkpoint-version-3.0.0-streaming-statestore-codec/commits/0 @@ -0,0 +1,2 @@ +v1 +{"nextBatchWatermarkMs":0} \ No newline at end of file diff --git a/sql/core/src/test/resources/structured-streaming/checkpoint-version-3.0.0-streaming-statestore-codec/metadata b/sql/core/src/test/resources/structured-streaming/checkpoint-version-3.0.0-streaming-statestore-codec/metadata new file mode 100644 index 0000000000000..df5937f800382 --- /dev/null +++ b/sql/core/src/test/resources/structured-streaming/checkpoint-version-3.0.0-streaming-statestore-codec/metadata @@ -0,0 +1 @@ +{"id":"6bcf6671-d23e-4ad8-824f-98aa5924ce6d"} \ No newline at end of file diff --git a/sql/core/src/test/resources/structured-streaming/checkpoint-version-3.0.0-streaming-statestore-codec/offsets/0 b/sql/core/src/test/resources/structured-streaming/checkpoint-version-3.0.0-streaming-statestore-codec/offsets/0 new file mode 100644 index 0000000000000..d12f52147dd6a --- /dev/null +++ b/sql/core/src/test/resources/structured-streaming/checkpoint-version-3.0.0-streaming-statestore-codec/offsets/0 @@ -0,0 +1,3 @@ +v1 +{"batchWatermarkMs":0,"batchTimestampMs":1603918440918,"conf":{"spark.sql.streaming.stateStore.providerClass":"org.apache.spark.sql.execution.streaming.state.HDFSBackedStateStoreProvider","spark.sql.streaming.join.stateFormatVersion":"2","spark.sql.streaming.flatMapGroupsWithState.stateFormatVersion":"2","spark.sql.streaming.multipleWatermarkPolicy":"min","spark.sql.streaming.aggregation.stateFormatVersion":"2","spark.sql.shuffle.partitions":"1"}} +0 \ No newline at end of file diff --git a/sql/core/src/test/resources/structured-streaming/checkpoint-version-3.0.0-streaming-statestore-codec/state/0/0/1.delta b/sql/core/src/test/resources/structured-streaming/checkpoint-version-3.0.0-streaming-statestore-codec/state/0/0/1.delta new file mode 100644 index 0000000000000..8de7bc89a5de8 Binary files /dev/null and b/sql/core/src/test/resources/structured-streaming/checkpoint-version-3.0.0-streaming-statestore-codec/state/0/0/1.delta differ diff --git a/sql/core/src/test/resources/test-data/percentile_approx-input.csv.bz2 b/sql/core/src/test/resources/test-data/percentile_approx-input.csv.bz2 new file mode 100644 index 0000000000000..f85e2896b3a89 Binary files /dev/null and b/sql/core/src/test/resources/test-data/percentile_approx-input.csv.bz2 differ diff --git a/sql/core/src/test/resources/test_script.py b/sql/core/src/test/resources/test_script.py index 82ef7b38f0c1b..4fcd483f44d43 100644 --- a/sql/core/src/test/resources/test_script.py +++ b/sql/core/src/test/resources/test_script.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python3 + # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q27.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q27.sf100/explain.txt index fa01042350149..b3b11b60ded0b 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q27.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q27.sf100/explain.txt @@ -11,15 +11,15 @@ TakeOrderedAndProject (77) : : :- * Project (17) : : : +- * BroadcastHashJoin Inner BuildRight (16) : : : :- * Project (10) - : : : : +- * BroadcastHashJoin Inner BuildLeft (9) - : : : : :- BroadcastExchange (5) - : : : : : +- * Project (4) - : : : : : +- * Filter (3) - : : : : : +- * ColumnarToRow (2) - : : : : : +- Scan parquet default.date_dim (1) - : : : : +- * Filter (8) - : : : : +- * ColumnarToRow (7) - : : : : +- Scan parquet default.store_sales (6) + : : : : +- * BroadcastHashJoin Inner BuildRight (9) + : : : : :- * Filter (3) + : : : : : +- * ColumnarToRow (2) + : : : : : +- Scan parquet default.store_sales (1) + : : : : +- BroadcastExchange (8) + : : : : +- * Project (7) + : : : : +- * Filter (6) + : : : : +- * ColumnarToRow (5) + : : : : +- Scan parquet default.date_dim (4) : : : +- BroadcastExchange (15) : : : +- * Project (14) : : : +- * Filter (13) @@ -43,11 +43,11 @@ TakeOrderedAndProject (77) : : :- * Project (45) : : : +- * BroadcastHashJoin Inner BuildRight (44) : : : :- * Project (38) - : : : : +- * BroadcastHashJoin Inner BuildLeft (37) - : : : : :- ReusedExchange (33) - : : : : +- * Filter (36) - : : : : +- * ColumnarToRow (35) - : : : : +- Scan parquet default.store_sales (34) + : : : : +- * BroadcastHashJoin Inner BuildRight (37) + : : : : :- * Filter (35) + : : : : : +- * ColumnarToRow (34) + : : : : : +- Scan parquet default.store_sales (33) + : : : : +- ReusedExchange (36) : : : +- BroadcastExchange (43) : : : +- * Project (42) : : : +- * Filter (41) @@ -65,11 +65,11 @@ TakeOrderedAndProject (77) : :- * Project (63) : : +- * BroadcastHashJoin Inner BuildRight (62) : : :- * Project (60) - : : : +- * BroadcastHashJoin Inner BuildLeft (59) - : : : :- ReusedExchange (55) - : : : +- * Filter (58) - : : : +- * ColumnarToRow (57) - : : : +- Scan parquet default.store_sales (56) + : : : +- * BroadcastHashJoin Inner BuildRight (59) + : : : :- * Filter (57) + : : : : +- * ColumnarToRow (56) + : : : : +- Scan parquet default.store_sales (55) + : : : +- ReusedExchange (58) : : +- ReusedExchange (61) : +- ReusedExchange (64) +- BroadcastExchange (70) @@ -78,50 +78,50 @@ TakeOrderedAndProject (77) +- Scan parquet default.item (67) -(1) Scan parquet default.date_dim -Output [2]: [d_date_sk#1, d_year#2] +(1) Scan parquet default.store_sales +Output [8]: [ss_sold_date_sk#1, ss_item_sk#2, ss_cdemo_sk#3, ss_store_sk#4, ss_quantity#5, ss_list_price#6, ss_sales_price#7, ss_coupon_amt#8] Batched: true -Location [not included in comparison]/{warehouse_dir}/date_dim] -PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2000), GreaterThanOrEqual(d_date_sk,2451545), LessThanOrEqual(d_date_sk,2451910), IsNotNull(d_date_sk)] -ReadSchema: struct +Location [not included in comparison]/{warehouse_dir}/store_sales] +PushedFilters: [IsNotNull(ss_sold_date_sk), GreaterThanOrEqual(ss_sold_date_sk,2451545), LessThanOrEqual(ss_sold_date_sk,2451910), IsNotNull(ss_cdemo_sk), IsNotNull(ss_store_sk), IsNotNull(ss_item_sk)] +ReadSchema: struct -(2) ColumnarToRow [codegen id : 1] -Input [2]: [d_date_sk#1, d_year#2] +(2) ColumnarToRow [codegen id : 5] +Input [8]: [ss_sold_date_sk#1, ss_item_sk#2, ss_cdemo_sk#3, ss_store_sk#4, ss_quantity#5, ss_list_price#6, ss_sales_price#7, ss_coupon_amt#8] -(3) Filter [codegen id : 1] -Input [2]: [d_date_sk#1, d_year#2] -Condition : ((((isnotnull(d_year#2) AND (d_year#2 = 2000)) AND (d_date_sk#1 >= 2451545)) AND (d_date_sk#1 <= 2451910)) AND isnotnull(d_date_sk#1)) +(3) Filter [codegen id : 5] +Input [8]: [ss_sold_date_sk#1, ss_item_sk#2, ss_cdemo_sk#3, ss_store_sk#4, ss_quantity#5, ss_list_price#6, ss_sales_price#7, ss_coupon_amt#8] +Condition : (((((isnotnull(ss_sold_date_sk#1) AND (ss_sold_date_sk#1 >= 2451545)) AND (ss_sold_date_sk#1 <= 2451910)) AND isnotnull(ss_cdemo_sk#3)) AND isnotnull(ss_store_sk#4)) AND isnotnull(ss_item_sk#2)) -(4) Project [codegen id : 1] -Output [1]: [d_date_sk#1] -Input [2]: [d_date_sk#1, d_year#2] +(4) Scan parquet default.date_dim +Output [2]: [d_date_sk#9, d_year#10] +Batched: true +Location [not included in comparison]/{warehouse_dir}/date_dim] +PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2000), GreaterThanOrEqual(d_date_sk,2451545), LessThanOrEqual(d_date_sk,2451910), IsNotNull(d_date_sk)] +ReadSchema: struct -(5) BroadcastExchange -Input [1]: [d_date_sk#1] -Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#3] +(5) ColumnarToRow [codegen id : 1] +Input [2]: [d_date_sk#9, d_year#10] -(6) Scan parquet default.store_sales -Output [8]: [ss_sold_date_sk#4, ss_item_sk#5, ss_cdemo_sk#6, ss_store_sk#7, ss_quantity#8, ss_list_price#9, ss_sales_price#10, ss_coupon_amt#11] -Batched: true -Location [not included in comparison]/{warehouse_dir}/store_sales] -PushedFilters: [IsNotNull(ss_sold_date_sk), GreaterThanOrEqual(ss_sold_date_sk,2451545), LessThanOrEqual(ss_sold_date_sk,2451910), IsNotNull(ss_cdemo_sk), IsNotNull(ss_store_sk), IsNotNull(ss_item_sk)] -ReadSchema: struct +(6) Filter [codegen id : 1] +Input [2]: [d_date_sk#9, d_year#10] +Condition : ((((isnotnull(d_year#10) AND (d_year#10 = 2000)) AND (d_date_sk#9 >= 2451545)) AND (d_date_sk#9 <= 2451910)) AND isnotnull(d_date_sk#9)) -(7) ColumnarToRow -Input [8]: [ss_sold_date_sk#4, ss_item_sk#5, ss_cdemo_sk#6, ss_store_sk#7, ss_quantity#8, ss_list_price#9, ss_sales_price#10, ss_coupon_amt#11] +(7) Project [codegen id : 1] +Output [1]: [d_date_sk#9] +Input [2]: [d_date_sk#9, d_year#10] -(8) Filter -Input [8]: [ss_sold_date_sk#4, ss_item_sk#5, ss_cdemo_sk#6, ss_store_sk#7, ss_quantity#8, ss_list_price#9, ss_sales_price#10, ss_coupon_amt#11] -Condition : (((((isnotnull(ss_sold_date_sk#4) AND (ss_sold_date_sk#4 >= 2451545)) AND (ss_sold_date_sk#4 <= 2451910)) AND isnotnull(ss_cdemo_sk#6)) AND isnotnull(ss_store_sk#7)) AND isnotnull(ss_item_sk#5)) +(8) BroadcastExchange +Input [1]: [d_date_sk#9] +Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#11] (9) BroadcastHashJoin [codegen id : 5] -Left keys [1]: [d_date_sk#1] -Right keys [1]: [ss_sold_date_sk#4] +Left keys [1]: [ss_sold_date_sk#1] +Right keys [1]: [d_date_sk#9] Join condition: None (10) Project [codegen id : 5] -Output [7]: [ss_item_sk#5, ss_cdemo_sk#6, ss_store_sk#7, ss_quantity#8, ss_list_price#9, ss_sales_price#10, ss_coupon_amt#11] -Input [9]: [d_date_sk#1, ss_sold_date_sk#4, ss_item_sk#5, ss_cdemo_sk#6, ss_store_sk#7, ss_quantity#8, ss_list_price#9, ss_sales_price#10, ss_coupon_amt#11] +Output [7]: [ss_item_sk#2, ss_cdemo_sk#3, ss_store_sk#4, ss_quantity#5, ss_list_price#6, ss_sales_price#7, ss_coupon_amt#8] +Input [9]: [ss_sold_date_sk#1, ss_item_sk#2, ss_cdemo_sk#3, ss_store_sk#4, ss_quantity#5, ss_list_price#6, ss_sales_price#7, ss_coupon_amt#8, d_date_sk#9] (11) Scan parquet default.customer_demographics Output [4]: [cd_demo_sk#12, cd_gender#13, cd_marital_status#14, cd_education_status#15] @@ -146,13 +146,13 @@ Input [1]: [cd_demo_sk#12] Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#16] (16) BroadcastHashJoin [codegen id : 5] -Left keys [1]: [ss_cdemo_sk#6] +Left keys [1]: [ss_cdemo_sk#3] Right keys [1]: [cd_demo_sk#12] Join condition: None (17) Project [codegen id : 5] -Output [6]: [ss_item_sk#5, ss_store_sk#7, ss_quantity#8, ss_list_price#9, ss_sales_price#10, ss_coupon_amt#11] -Input [8]: [ss_item_sk#5, ss_cdemo_sk#6, ss_store_sk#7, ss_quantity#8, ss_list_price#9, ss_sales_price#10, ss_coupon_amt#11, cd_demo_sk#12] +Output [6]: [ss_item_sk#2, ss_store_sk#4, ss_quantity#5, ss_list_price#6, ss_sales_price#7, ss_coupon_amt#8] +Input [8]: [ss_item_sk#2, ss_cdemo_sk#3, ss_store_sk#4, ss_quantity#5, ss_list_price#6, ss_sales_price#7, ss_coupon_amt#8, cd_demo_sk#12] (18) Scan parquet default.store Output [2]: [s_store_sk#17, s_state#18] @@ -173,13 +173,13 @@ Input [2]: [s_store_sk#17, s_state#18] Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, false] as bigint)),false), [id=#19] (22) BroadcastHashJoin [codegen id : 5] -Left keys [1]: [ss_store_sk#7] +Left keys [1]: [ss_store_sk#4] Right keys [1]: [s_store_sk#17] Join condition: None (23) Project [codegen id : 5] -Output [6]: [ss_item_sk#5, ss_quantity#8, ss_list_price#9, ss_sales_price#10, ss_coupon_amt#11, s_state#18] -Input [8]: [ss_item_sk#5, ss_store_sk#7, ss_quantity#8, ss_list_price#9, ss_sales_price#10, ss_coupon_amt#11, s_store_sk#17, s_state#18] +Output [6]: [ss_item_sk#2, ss_quantity#5, ss_list_price#6, ss_sales_price#7, ss_coupon_amt#8, s_state#18] +Input [8]: [ss_item_sk#2, ss_store_sk#4, ss_quantity#5, ss_list_price#6, ss_sales_price#7, ss_coupon_amt#8, s_store_sk#17, s_state#18] (24) Scan parquet default.item Output [2]: [i_item_sk#20, i_item_id#21] @@ -200,13 +200,13 @@ Input [2]: [i_item_sk#20, i_item_id#21] Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, false] as bigint)),false), [id=#22] (28) BroadcastHashJoin [codegen id : 5] -Left keys [1]: [ss_item_sk#5] +Left keys [1]: [ss_item_sk#2] Right keys [1]: [i_item_sk#20] Join condition: None (29) Project [codegen id : 5] -Output [6]: [i_item_id#21, s_state#18, ss_quantity#8 AS agg1#23, ss_list_price#9 AS agg2#24, ss_coupon_amt#11 AS agg3#25, ss_sales_price#10 AS agg4#26] -Input [8]: [ss_item_sk#5, ss_quantity#8, ss_list_price#9, ss_sales_price#10, ss_coupon_amt#11, s_state#18, i_item_sk#20, i_item_id#21] +Output [6]: [i_item_id#21, s_state#18, ss_quantity#5 AS agg1#23, ss_list_price#6 AS agg2#24, ss_coupon_amt#8 AS agg3#25, ss_sales_price#7 AS agg4#26] +Input [8]: [ss_item_sk#2, ss_quantity#5, ss_list_price#6, ss_sales_price#7, ss_coupon_amt#8, s_state#18, i_item_sk#20, i_item_id#21] (30) HashAggregate [codegen id : 5] Input [6]: [i_item_id#21, s_state#18, agg1#23, agg2#24, agg3#25, agg4#26] @@ -226,31 +226,31 @@ Functions [4]: [avg(cast(agg1#23 as bigint)), avg(UnscaledValue(agg2#24)), avg(U Aggregate Attributes [4]: [avg(cast(agg1#23 as bigint))#44, avg(UnscaledValue(agg2#24))#45, avg(UnscaledValue(agg3#25))#46, avg(UnscaledValue(agg4#26))#47] Results [7]: [i_item_id#21, s_state#18, 0 AS g_state#48, avg(cast(agg1#23 as bigint))#44 AS agg1#49, cast((avg(UnscaledValue(agg2#24))#45 / 100.0) as decimal(11,6)) AS agg2#50, cast((avg(UnscaledValue(agg3#25))#46 / 100.0) as decimal(11,6)) AS agg3#51, cast((avg(UnscaledValue(agg4#26))#47 / 100.0) as decimal(11,6)) AS agg4#52] -(33) ReusedExchange [Reuses operator id: 5] -Output [1]: [d_date_sk#1] - -(34) Scan parquet default.store_sales -Output [8]: [ss_sold_date_sk#4, ss_item_sk#5, ss_cdemo_sk#6, ss_store_sk#7, ss_quantity#8, ss_list_price#9, ss_sales_price#10, ss_coupon_amt#11] +(33) Scan parquet default.store_sales +Output [8]: [ss_sold_date_sk#1, ss_item_sk#2, ss_cdemo_sk#3, ss_store_sk#4, ss_quantity#5, ss_list_price#6, ss_sales_price#7, ss_coupon_amt#8] Batched: true Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), GreaterThanOrEqual(ss_sold_date_sk,2451545), LessThanOrEqual(ss_sold_date_sk,2451910), IsNotNull(ss_cdemo_sk), IsNotNull(ss_store_sk), IsNotNull(ss_item_sk)] ReadSchema: struct -(35) ColumnarToRow -Input [8]: [ss_sold_date_sk#4, ss_item_sk#5, ss_cdemo_sk#6, ss_store_sk#7, ss_quantity#8, ss_list_price#9, ss_sales_price#10, ss_coupon_amt#11] +(34) ColumnarToRow [codegen id : 11] +Input [8]: [ss_sold_date_sk#1, ss_item_sk#2, ss_cdemo_sk#3, ss_store_sk#4, ss_quantity#5, ss_list_price#6, ss_sales_price#7, ss_coupon_amt#8] + +(35) Filter [codegen id : 11] +Input [8]: [ss_sold_date_sk#1, ss_item_sk#2, ss_cdemo_sk#3, ss_store_sk#4, ss_quantity#5, ss_list_price#6, ss_sales_price#7, ss_coupon_amt#8] +Condition : (((((isnotnull(ss_sold_date_sk#1) AND (ss_sold_date_sk#1 >= 2451545)) AND (ss_sold_date_sk#1 <= 2451910)) AND isnotnull(ss_cdemo_sk#3)) AND isnotnull(ss_store_sk#4)) AND isnotnull(ss_item_sk#2)) -(36) Filter -Input [8]: [ss_sold_date_sk#4, ss_item_sk#5, ss_cdemo_sk#6, ss_store_sk#7, ss_quantity#8, ss_list_price#9, ss_sales_price#10, ss_coupon_amt#11] -Condition : (((((isnotnull(ss_sold_date_sk#4) AND (ss_sold_date_sk#4 >= 2451545)) AND (ss_sold_date_sk#4 <= 2451910)) AND isnotnull(ss_cdemo_sk#6)) AND isnotnull(ss_store_sk#7)) AND isnotnull(ss_item_sk#5)) +(36) ReusedExchange [Reuses operator id: 8] +Output [1]: [d_date_sk#9] (37) BroadcastHashJoin [codegen id : 11] -Left keys [1]: [d_date_sk#1] -Right keys [1]: [ss_sold_date_sk#4] +Left keys [1]: [ss_sold_date_sk#1] +Right keys [1]: [d_date_sk#9] Join condition: None (38) Project [codegen id : 11] -Output [7]: [ss_item_sk#5, ss_cdemo_sk#6, ss_store_sk#7, ss_quantity#8, ss_list_price#9, ss_sales_price#10, ss_coupon_amt#11] -Input [9]: [d_date_sk#1, ss_sold_date_sk#4, ss_item_sk#5, ss_cdemo_sk#6, ss_store_sk#7, ss_quantity#8, ss_list_price#9, ss_sales_price#10, ss_coupon_amt#11] +Output [7]: [ss_item_sk#2, ss_cdemo_sk#3, ss_store_sk#4, ss_quantity#5, ss_list_price#6, ss_sales_price#7, ss_coupon_amt#8] +Input [9]: [ss_sold_date_sk#1, ss_item_sk#2, ss_cdemo_sk#3, ss_store_sk#4, ss_quantity#5, ss_list_price#6, ss_sales_price#7, ss_coupon_amt#8, d_date_sk#9] (39) Scan parquet default.store Output [2]: [s_store_sk#17, s_state#18] @@ -275,37 +275,37 @@ Input [1]: [s_store_sk#17] Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#53] (44) BroadcastHashJoin [codegen id : 11] -Left keys [1]: [ss_store_sk#7] +Left keys [1]: [ss_store_sk#4] Right keys [1]: [s_store_sk#17] Join condition: None (45) Project [codegen id : 11] -Output [6]: [ss_item_sk#5, ss_cdemo_sk#6, ss_quantity#8, ss_list_price#9, ss_sales_price#10, ss_coupon_amt#11] -Input [8]: [ss_item_sk#5, ss_cdemo_sk#6, ss_store_sk#7, ss_quantity#8, ss_list_price#9, ss_sales_price#10, ss_coupon_amt#11, s_store_sk#17] +Output [6]: [ss_item_sk#2, ss_cdemo_sk#3, ss_quantity#5, ss_list_price#6, ss_sales_price#7, ss_coupon_amt#8] +Input [8]: [ss_item_sk#2, ss_cdemo_sk#3, ss_store_sk#4, ss_quantity#5, ss_list_price#6, ss_sales_price#7, ss_coupon_amt#8, s_store_sk#17] (46) ReusedExchange [Reuses operator id: 15] Output [1]: [cd_demo_sk#12] (47) BroadcastHashJoin [codegen id : 11] -Left keys [1]: [ss_cdemo_sk#6] +Left keys [1]: [ss_cdemo_sk#3] Right keys [1]: [cd_demo_sk#12] Join condition: None (48) Project [codegen id : 11] -Output [5]: [ss_item_sk#5, ss_quantity#8, ss_list_price#9, ss_sales_price#10, ss_coupon_amt#11] -Input [7]: [ss_item_sk#5, ss_cdemo_sk#6, ss_quantity#8, ss_list_price#9, ss_sales_price#10, ss_coupon_amt#11, cd_demo_sk#12] +Output [5]: [ss_item_sk#2, ss_quantity#5, ss_list_price#6, ss_sales_price#7, ss_coupon_amt#8] +Input [7]: [ss_item_sk#2, ss_cdemo_sk#3, ss_quantity#5, ss_list_price#6, ss_sales_price#7, ss_coupon_amt#8, cd_demo_sk#12] (49) ReusedExchange [Reuses operator id: 27] Output [2]: [i_item_sk#20, i_item_id#21] (50) BroadcastHashJoin [codegen id : 11] -Left keys [1]: [ss_item_sk#5] +Left keys [1]: [ss_item_sk#2] Right keys [1]: [i_item_sk#20] Join condition: None (51) Project [codegen id : 11] -Output [5]: [i_item_id#21, ss_quantity#8 AS agg1#23, ss_list_price#9 AS agg2#24, ss_coupon_amt#11 AS agg3#25, ss_sales_price#10 AS agg4#26] -Input [7]: [ss_item_sk#5, ss_quantity#8, ss_list_price#9, ss_sales_price#10, ss_coupon_amt#11, i_item_sk#20, i_item_id#21] +Output [5]: [i_item_id#21, ss_quantity#5 AS agg1#23, ss_list_price#6 AS agg2#24, ss_coupon_amt#8 AS agg3#25, ss_sales_price#7 AS agg4#26] +Input [7]: [ss_item_sk#2, ss_quantity#5, ss_list_price#6, ss_sales_price#7, ss_coupon_amt#8, i_item_sk#20, i_item_id#21] (52) HashAggregate [codegen id : 11] Input [5]: [i_item_id#21, agg1#23, agg2#24, agg3#25, agg4#26] @@ -325,55 +325,55 @@ Functions [4]: [avg(cast(agg1#23 as bigint)), avg(UnscaledValue(agg2#24)), avg(U Aggregate Attributes [4]: [avg(cast(agg1#23 as bigint))#71, avg(UnscaledValue(agg2#24))#72, avg(UnscaledValue(agg3#25))#73, avg(UnscaledValue(agg4#26))#74] Results [7]: [i_item_id#21, null AS s_state#75, 1 AS g_state#76, avg(cast(agg1#23 as bigint))#71 AS agg1#77, cast((avg(UnscaledValue(agg2#24))#72 / 100.0) as decimal(11,6)) AS agg2#78, cast((avg(UnscaledValue(agg3#25))#73 / 100.0) as decimal(11,6)) AS agg3#79, cast((avg(UnscaledValue(agg4#26))#74 / 100.0) as decimal(11,6)) AS agg4#80] -(55) ReusedExchange [Reuses operator id: 5] -Output [1]: [d_date_sk#1] - -(56) Scan parquet default.store_sales -Output [8]: [ss_sold_date_sk#4, ss_item_sk#5, ss_cdemo_sk#6, ss_store_sk#7, ss_quantity#8, ss_list_price#9, ss_sales_price#10, ss_coupon_amt#11] +(55) Scan parquet default.store_sales +Output [8]: [ss_sold_date_sk#1, ss_item_sk#2, ss_cdemo_sk#3, ss_store_sk#4, ss_quantity#5, ss_list_price#6, ss_sales_price#7, ss_coupon_amt#8] Batched: true Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), GreaterThanOrEqual(ss_sold_date_sk,2451545), LessThanOrEqual(ss_sold_date_sk,2451910), IsNotNull(ss_cdemo_sk), IsNotNull(ss_store_sk), IsNotNull(ss_item_sk)] ReadSchema: struct -(57) ColumnarToRow -Input [8]: [ss_sold_date_sk#4, ss_item_sk#5, ss_cdemo_sk#6, ss_store_sk#7, ss_quantity#8, ss_list_price#9, ss_sales_price#10, ss_coupon_amt#11] +(56) ColumnarToRow [codegen id : 17] +Input [8]: [ss_sold_date_sk#1, ss_item_sk#2, ss_cdemo_sk#3, ss_store_sk#4, ss_quantity#5, ss_list_price#6, ss_sales_price#7, ss_coupon_amt#8] + +(57) Filter [codegen id : 17] +Input [8]: [ss_sold_date_sk#1, ss_item_sk#2, ss_cdemo_sk#3, ss_store_sk#4, ss_quantity#5, ss_list_price#6, ss_sales_price#7, ss_coupon_amt#8] +Condition : (((((isnotnull(ss_sold_date_sk#1) AND (ss_sold_date_sk#1 >= 2451545)) AND (ss_sold_date_sk#1 <= 2451910)) AND isnotnull(ss_cdemo_sk#3)) AND isnotnull(ss_store_sk#4)) AND isnotnull(ss_item_sk#2)) -(58) Filter -Input [8]: [ss_sold_date_sk#4, ss_item_sk#5, ss_cdemo_sk#6, ss_store_sk#7, ss_quantity#8, ss_list_price#9, ss_sales_price#10, ss_coupon_amt#11] -Condition : (((((isnotnull(ss_sold_date_sk#4) AND (ss_sold_date_sk#4 >= 2451545)) AND (ss_sold_date_sk#4 <= 2451910)) AND isnotnull(ss_cdemo_sk#6)) AND isnotnull(ss_store_sk#7)) AND isnotnull(ss_item_sk#5)) +(58) ReusedExchange [Reuses operator id: 8] +Output [1]: [d_date_sk#9] (59) BroadcastHashJoin [codegen id : 17] -Left keys [1]: [d_date_sk#1] -Right keys [1]: [ss_sold_date_sk#4] +Left keys [1]: [ss_sold_date_sk#1] +Right keys [1]: [d_date_sk#9] Join condition: None (60) Project [codegen id : 17] -Output [7]: [ss_item_sk#5, ss_cdemo_sk#6, ss_store_sk#7, ss_quantity#8, ss_list_price#9, ss_sales_price#10, ss_coupon_amt#11] -Input [9]: [d_date_sk#1, ss_sold_date_sk#4, ss_item_sk#5, ss_cdemo_sk#6, ss_store_sk#7, ss_quantity#8, ss_list_price#9, ss_sales_price#10, ss_coupon_amt#11] +Output [7]: [ss_item_sk#2, ss_cdemo_sk#3, ss_store_sk#4, ss_quantity#5, ss_list_price#6, ss_sales_price#7, ss_coupon_amt#8] +Input [9]: [ss_sold_date_sk#1, ss_item_sk#2, ss_cdemo_sk#3, ss_store_sk#4, ss_quantity#5, ss_list_price#6, ss_sales_price#7, ss_coupon_amt#8, d_date_sk#9] (61) ReusedExchange [Reuses operator id: 43] Output [1]: [s_store_sk#17] (62) BroadcastHashJoin [codegen id : 17] -Left keys [1]: [ss_store_sk#7] +Left keys [1]: [ss_store_sk#4] Right keys [1]: [s_store_sk#17] Join condition: None (63) Project [codegen id : 17] -Output [6]: [ss_item_sk#5, ss_cdemo_sk#6, ss_quantity#8, ss_list_price#9, ss_sales_price#10, ss_coupon_amt#11] -Input [8]: [ss_item_sk#5, ss_cdemo_sk#6, ss_store_sk#7, ss_quantity#8, ss_list_price#9, ss_sales_price#10, ss_coupon_amt#11, s_store_sk#17] +Output [6]: [ss_item_sk#2, ss_cdemo_sk#3, ss_quantity#5, ss_list_price#6, ss_sales_price#7, ss_coupon_amt#8] +Input [8]: [ss_item_sk#2, ss_cdemo_sk#3, ss_store_sk#4, ss_quantity#5, ss_list_price#6, ss_sales_price#7, ss_coupon_amt#8, s_store_sk#17] (64) ReusedExchange [Reuses operator id: 15] Output [1]: [cd_demo_sk#12] (65) BroadcastHashJoin [codegen id : 17] -Left keys [1]: [ss_cdemo_sk#6] +Left keys [1]: [ss_cdemo_sk#3] Right keys [1]: [cd_demo_sk#12] Join condition: None (66) Project [codegen id : 17] -Output [5]: [ss_item_sk#5, ss_quantity#8, ss_list_price#9, ss_sales_price#10, ss_coupon_amt#11] -Input [7]: [ss_item_sk#5, ss_cdemo_sk#6, ss_quantity#8, ss_list_price#9, ss_sales_price#10, ss_coupon_amt#11, cd_demo_sk#12] +Output [5]: [ss_item_sk#2, ss_quantity#5, ss_list_price#6, ss_sales_price#7, ss_coupon_amt#8] +Input [7]: [ss_item_sk#2, ss_cdemo_sk#3, ss_quantity#5, ss_list_price#6, ss_sales_price#7, ss_coupon_amt#8, cd_demo_sk#12] (67) Scan parquet default.item Output [1]: [i_item_sk#20] @@ -394,13 +394,13 @@ Input [1]: [i_item_sk#20] Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, false] as bigint)),false), [id=#81] (71) BroadcastHashJoin [codegen id : 17] -Left keys [1]: [ss_item_sk#5] +Left keys [1]: [ss_item_sk#2] Right keys [1]: [i_item_sk#20] Join condition: None (72) Project [codegen id : 17] -Output [4]: [ss_quantity#8 AS agg1#23, ss_list_price#9 AS agg2#24, ss_coupon_amt#11 AS agg3#25, ss_sales_price#10 AS agg4#26] -Input [6]: [ss_item_sk#5, ss_quantity#8, ss_list_price#9, ss_sales_price#10, ss_coupon_amt#11, i_item_sk#20] +Output [4]: [ss_quantity#5 AS agg1#23, ss_list_price#6 AS agg2#24, ss_coupon_amt#8 AS agg3#25, ss_sales_price#7 AS agg4#26] +Input [6]: [ss_item_sk#2, ss_quantity#5, ss_list_price#6, ss_sales_price#7, ss_coupon_amt#8, i_item_sk#20] (73) HashAggregate [codegen id : 17] Input [4]: [agg1#23, agg2#24, agg3#25, agg4#26] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q27.sf100/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q27.sf100/simplified.txt index fc7202e739bcc..d14061de1d1f4 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q27.sf100/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q27.sf100/simplified.txt @@ -13,7 +13,11 @@ TakeOrderedAndProject [i_item_id,s_state,g_state,agg1,agg2,agg3,agg4] Project [ss_item_sk,ss_store_sk,ss_quantity,ss_list_price,ss_sales_price,ss_coupon_amt] BroadcastHashJoin [ss_cdemo_sk,cd_demo_sk] Project [ss_item_sk,ss_cdemo_sk,ss_store_sk,ss_quantity,ss_list_price,ss_sales_price,ss_coupon_amt] - BroadcastHashJoin [d_date_sk,ss_sold_date_sk] + BroadcastHashJoin [ss_sold_date_sk,d_date_sk] + Filter [ss_sold_date_sk,ss_cdemo_sk,ss_store_sk,ss_item_sk] + ColumnarToRow + InputAdapter + Scan parquet default.store_sales [ss_sold_date_sk,ss_item_sk,ss_cdemo_sk,ss_store_sk,ss_quantity,ss_list_price,ss_sales_price,ss_coupon_amt] InputAdapter BroadcastExchange #2 WholeStageCodegen (1) @@ -22,10 +26,6 @@ TakeOrderedAndProject [i_item_id,s_state,g_state,agg1,agg2,agg3,agg4] ColumnarToRow InputAdapter Scan parquet default.date_dim [d_date_sk,d_year] - Filter [ss_sold_date_sk,ss_cdemo_sk,ss_store_sk,ss_item_sk] - ColumnarToRow - InputAdapter - Scan parquet default.store_sales [ss_sold_date_sk,ss_item_sk,ss_cdemo_sk,ss_store_sk,ss_quantity,ss_list_price,ss_sales_price,ss_coupon_amt] InputAdapter BroadcastExchange #3 WholeStageCodegen (2) @@ -61,13 +61,13 @@ TakeOrderedAndProject [i_item_id,s_state,g_state,agg1,agg2,agg3,agg4] Project [ss_item_sk,ss_cdemo_sk,ss_quantity,ss_list_price,ss_sales_price,ss_coupon_amt] BroadcastHashJoin [ss_store_sk,s_store_sk] Project [ss_item_sk,ss_cdemo_sk,ss_store_sk,ss_quantity,ss_list_price,ss_sales_price,ss_coupon_amt] - BroadcastHashJoin [d_date_sk,ss_sold_date_sk] - InputAdapter - ReusedExchange [d_date_sk] #2 + BroadcastHashJoin [ss_sold_date_sk,d_date_sk] Filter [ss_sold_date_sk,ss_cdemo_sk,ss_store_sk,ss_item_sk] ColumnarToRow InputAdapter Scan parquet default.store_sales [ss_sold_date_sk,ss_item_sk,ss_cdemo_sk,ss_store_sk,ss_quantity,ss_list_price,ss_sales_price,ss_coupon_amt] + InputAdapter + ReusedExchange [d_date_sk] #2 InputAdapter BroadcastExchange #7 WholeStageCodegen (8) @@ -93,13 +93,13 @@ TakeOrderedAndProject [i_item_id,s_state,g_state,agg1,agg2,agg3,agg4] Project [ss_item_sk,ss_cdemo_sk,ss_quantity,ss_list_price,ss_sales_price,ss_coupon_amt] BroadcastHashJoin [ss_store_sk,s_store_sk] Project [ss_item_sk,ss_cdemo_sk,ss_store_sk,ss_quantity,ss_list_price,ss_sales_price,ss_coupon_amt] - BroadcastHashJoin [d_date_sk,ss_sold_date_sk] - InputAdapter - ReusedExchange [d_date_sk] #2 + BroadcastHashJoin [ss_sold_date_sk,d_date_sk] Filter [ss_sold_date_sk,ss_cdemo_sk,ss_store_sk,ss_item_sk] ColumnarToRow InputAdapter Scan parquet default.store_sales [ss_sold_date_sk,ss_item_sk,ss_cdemo_sk,ss_store_sk,ss_quantity,ss_list_price,ss_sales_price,ss_coupon_amt] + InputAdapter + ReusedExchange [d_date_sk] #2 InputAdapter ReusedExchange [s_store_sk] #7 InputAdapter diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q34.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q34.sf100/explain.txt index ac1fca4f67a02..547806128e64a 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q34.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q34.sf100/explain.txt @@ -120,7 +120,7 @@ Input [5]: [ss_customer_sk#2, ss_hdemo_sk#3, ss_store_sk#4, ss_ticket_number#5, Output [4]: [hd_demo_sk#13, hd_buy_potential#14, hd_dep_count#15, hd_vehicle_count#16] Batched: true Location [not included in comparison]/{warehouse_dir}/household_demographics] -PushedFilters: [IsNotNull(hd_vehicle_count), Or(EqualTo(hd_buy_potential,>10000),EqualTo(hd_buy_potential,Unknown)), GreaterThan(hd_vehicle_count,0), IsNotNull(hd_demo_sk)] +PushedFilters: [IsNotNull(hd_vehicle_count), IsNotNull(hd_dep_count), Or(EqualTo(hd_buy_potential,>10000),EqualTo(hd_buy_potential,Unknown)), GreaterThan(hd_vehicle_count,0), GreaterThan(hd_vehicle_count,0), IsNotNull(hd_demo_sk)] ReadSchema: struct (19) ColumnarToRow [codegen id : 3] @@ -128,7 +128,7 @@ Input [4]: [hd_demo_sk#13, hd_buy_potential#14, hd_dep_count#15, hd_vehicle_coun (20) Filter [codegen id : 3] Input [4]: [hd_demo_sk#13, hd_buy_potential#14, hd_dep_count#15, hd_vehicle_count#16] -Condition : ((((isnotnull(hd_vehicle_count#16) AND ((hd_buy_potential#14 = >10000) OR (hd_buy_potential#14 = Unknown))) AND (hd_vehicle_count#16 > 0)) AND (CASE WHEN (hd_vehicle_count#16 > 0) THEN (cast(hd_dep_count#15 as double) / cast(hd_vehicle_count#16 as double)) ELSE null END > 1.2)) AND isnotnull(hd_demo_sk#13)) +Condition : (((((isnotnull(hd_vehicle_count#16) AND isnotnull(hd_dep_count#15)) AND ((hd_buy_potential#14 = >10000) OR (hd_buy_potential#14 = Unknown))) AND (hd_vehicle_count#16 > 0)) AND ((cast(hd_dep_count#15 as double) / cast(hd_vehicle_count#16 as double)) > 1.2)) AND isnotnull(hd_demo_sk#13)) (21) Project [codegen id : 3] Output [1]: [hd_demo_sk#13] @@ -156,7 +156,7 @@ Results [3]: [ss_ticket_number#5, ss_customer_sk#2, count#19] (26) Exchange Input [3]: [ss_ticket_number#5, ss_customer_sk#2, count#19] -Arguments: hashpartitioning(ss_ticket_number#5, ss_customer_sk#2, 5), true, [id=#20] +Arguments: hashpartitioning(ss_ticket_number#5, ss_customer_sk#2, 5), ENSURE_REQUIREMENTS, [id=#20] (27) HashAggregate [codegen id : 5] Input [3]: [ss_ticket_number#5, ss_customer_sk#2, count#19] @@ -171,7 +171,7 @@ Condition : ((cnt#22 >= 15) AND (cnt#22 <= 20)) (29) Exchange Input [3]: [ss_ticket_number#5, ss_customer_sk#2, cnt#22] -Arguments: hashpartitioning(ss_customer_sk#2, 5), true, [id=#23] +Arguments: hashpartitioning(ss_customer_sk#2, 5), ENSURE_REQUIREMENTS, [id=#23] (30) Sort [codegen id : 6] Input [3]: [ss_ticket_number#5, ss_customer_sk#2, cnt#22] @@ -193,7 +193,7 @@ Condition : isnotnull(c_customer_sk#24) (34) Exchange Input [5]: [c_customer_sk#24, c_salutation#25, c_first_name#26, c_last_name#27, c_preferred_cust_flag#28] -Arguments: hashpartitioning(c_customer_sk#24, 5), true, [id=#29] +Arguments: hashpartitioning(c_customer_sk#24, 5), ENSURE_REQUIREMENTS, [id=#29] (35) Sort [codegen id : 8] Input [5]: [c_customer_sk#24, c_salutation#25, c_first_name#26, c_last_name#27, c_preferred_cust_flag#28] @@ -210,7 +210,7 @@ Input [8]: [ss_ticket_number#5, ss_customer_sk#2, cnt#22, c_customer_sk#24, c_sa (38) Exchange Input [6]: [c_last_name#27, c_first_name#26, c_salutation#25, c_preferred_cust_flag#28, ss_ticket_number#5, cnt#22] -Arguments: rangepartitioning(c_last_name#27 ASC NULLS FIRST, c_first_name#26 ASC NULLS FIRST, c_salutation#25 ASC NULLS FIRST, c_preferred_cust_flag#28 DESC NULLS LAST, 5), true, [id=#30] +Arguments: rangepartitioning(c_last_name#27 ASC NULLS FIRST, c_first_name#26 ASC NULLS FIRST, c_salutation#25 ASC NULLS FIRST, c_preferred_cust_flag#28 DESC NULLS LAST, 5), ENSURE_REQUIREMENTS, [id=#30] (39) Sort [codegen id : 10] Input [6]: [c_last_name#27, c_first_name#26, c_salutation#25, c_preferred_cust_flag#28, ss_ticket_number#5, cnt#22] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q34.sf100/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q34.sf100/simplified.txt index d9b416ddba9ef..c9945cda67746 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q34.sf100/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q34.sf100/simplified.txt @@ -47,7 +47,7 @@ WholeStageCodegen (10) BroadcastExchange #6 WholeStageCodegen (3) Project [hd_demo_sk] - Filter [hd_vehicle_count,hd_buy_potential,hd_dep_count,hd_demo_sk] + Filter [hd_vehicle_count,hd_dep_count,hd_buy_potential,hd_demo_sk] ColumnarToRow InputAdapter Scan parquet default.household_demographics [hd_demo_sk,hd_buy_potential,hd_dep_count,hd_vehicle_count] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q34/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q34/explain.txt index 898d37403d6a0..74bbb52c55fbc 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q34/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q34/explain.txt @@ -117,7 +117,7 @@ Input [5]: [ss_customer_sk#2, ss_hdemo_sk#3, ss_store_sk#4, ss_ticket_number#5, Output [4]: [hd_demo_sk#13, hd_buy_potential#14, hd_dep_count#15, hd_vehicle_count#16] Batched: true Location [not included in comparison]/{warehouse_dir}/household_demographics] -PushedFilters: [IsNotNull(hd_vehicle_count), Or(EqualTo(hd_buy_potential,>10000),EqualTo(hd_buy_potential,Unknown)), GreaterThan(hd_vehicle_count,0), IsNotNull(hd_demo_sk)] +PushedFilters: [IsNotNull(hd_vehicle_count), IsNotNull(hd_dep_count), Or(EqualTo(hd_buy_potential,>10000),EqualTo(hd_buy_potential,Unknown)), GreaterThan(hd_vehicle_count,0), GreaterThan(hd_vehicle_count,0), IsNotNull(hd_demo_sk)] ReadSchema: struct (19) ColumnarToRow [codegen id : 3] @@ -125,7 +125,7 @@ Input [4]: [hd_demo_sk#13, hd_buy_potential#14, hd_dep_count#15, hd_vehicle_coun (20) Filter [codegen id : 3] Input [4]: [hd_demo_sk#13, hd_buy_potential#14, hd_dep_count#15, hd_vehicle_count#16] -Condition : ((((isnotnull(hd_vehicle_count#16) AND ((hd_buy_potential#14 = >10000) OR (hd_buy_potential#14 = Unknown))) AND (hd_vehicle_count#16 > 0)) AND (CASE WHEN (hd_vehicle_count#16 > 0) THEN (cast(hd_dep_count#15 as double) / cast(hd_vehicle_count#16 as double)) ELSE null END > 1.2)) AND isnotnull(hd_demo_sk#13)) +Condition : (((((isnotnull(hd_vehicle_count#16) AND isnotnull(hd_dep_count#15)) AND ((hd_buy_potential#14 = >10000) OR (hd_buy_potential#14 = Unknown))) AND (hd_vehicle_count#16 > 0)) AND ((cast(hd_dep_count#15 as double) / cast(hd_vehicle_count#16 as double)) > 1.2)) AND isnotnull(hd_demo_sk#13)) (21) Project [codegen id : 3] Output [1]: [hd_demo_sk#13] @@ -153,7 +153,7 @@ Results [3]: [ss_ticket_number#5, ss_customer_sk#2, count#19] (26) Exchange Input [3]: [ss_ticket_number#5, ss_customer_sk#2, count#19] -Arguments: hashpartitioning(ss_ticket_number#5, ss_customer_sk#2, 5), true, [id=#20] +Arguments: hashpartitioning(ss_ticket_number#5, ss_customer_sk#2, 5), ENSURE_REQUIREMENTS, [id=#20] (27) HashAggregate [codegen id : 6] Input [3]: [ss_ticket_number#5, ss_customer_sk#2, count#19] @@ -195,7 +195,7 @@ Input [8]: [ss_ticket_number#5, ss_customer_sk#2, cnt#22, c_customer_sk#23, c_sa (35) Exchange Input [6]: [c_last_name#26, c_first_name#25, c_salutation#24, c_preferred_cust_flag#27, ss_ticket_number#5, cnt#22] -Arguments: rangepartitioning(c_last_name#26 ASC NULLS FIRST, c_first_name#25 ASC NULLS FIRST, c_salutation#24 ASC NULLS FIRST, c_preferred_cust_flag#27 DESC NULLS LAST, 5), true, [id=#29] +Arguments: rangepartitioning(c_last_name#26 ASC NULLS FIRST, c_first_name#25 ASC NULLS FIRST, c_salutation#24 ASC NULLS FIRST, c_preferred_cust_flag#27 DESC NULLS LAST, 5), ENSURE_REQUIREMENTS, [id=#29] (36) Sort [codegen id : 7] Input [6]: [c_last_name#26, c_first_name#25, c_salutation#24, c_preferred_cust_flag#27, ss_ticket_number#5, cnt#22] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q34/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q34/simplified.txt index 5af07f1d4ddef..4484587f65355 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q34/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q34/simplified.txt @@ -41,7 +41,7 @@ WholeStageCodegen (7) BroadcastExchange #5 WholeStageCodegen (3) Project [hd_demo_sk] - Filter [hd_vehicle_count,hd_buy_potential,hd_dep_count,hd_demo_sk] + Filter [hd_vehicle_count,hd_dep_count,hd_buy_potential,hd_demo_sk] ColumnarToRow InputAdapter Scan parquet default.household_demographics [hd_demo_sk,hd_buy_potential,hd_dep_count,hd_vehicle_count] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q7.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q7.sf100/explain.txt index 6071139e809cf..220d661fd45e9 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q7.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q7.sf100/explain.txt @@ -10,15 +10,15 @@ TakeOrderedAndProject (34) : :- * Project (17) : : +- * BroadcastHashJoin Inner BuildRight (16) : : :- * Project (10) - : : : +- * BroadcastHashJoin Inner BuildLeft (9) - : : : :- BroadcastExchange (5) - : : : : +- * Project (4) - : : : : +- * Filter (3) - : : : : +- * ColumnarToRow (2) - : : : : +- Scan parquet default.date_dim (1) - : : : +- * Filter (8) - : : : +- * ColumnarToRow (7) - : : : +- Scan parquet default.store_sales (6) + : : : +- * BroadcastHashJoin Inner BuildRight (9) + : : : :- * Filter (3) + : : : : +- * ColumnarToRow (2) + : : : : +- Scan parquet default.store_sales (1) + : : : +- BroadcastExchange (8) + : : : +- * Project (7) + : : : +- * Filter (6) + : : : +- * ColumnarToRow (5) + : : : +- Scan parquet default.date_dim (4) : : +- BroadcastExchange (15) : : +- * Project (14) : : +- * Filter (13) @@ -35,50 +35,50 @@ TakeOrderedAndProject (34) +- Scan parquet default.item (25) -(1) Scan parquet default.date_dim -Output [2]: [d_date_sk#1, d_year#2] +(1) Scan parquet default.store_sales +Output [8]: [ss_sold_date_sk#1, ss_item_sk#2, ss_cdemo_sk#3, ss_promo_sk#4, ss_quantity#5, ss_list_price#6, ss_sales_price#7, ss_coupon_amt#8] Batched: true -Location [not included in comparison]/{warehouse_dir}/date_dim] -PushedFilters: [IsNotNull(d_year), EqualTo(d_year,1998), GreaterThanOrEqual(d_date_sk,2450815), LessThanOrEqual(d_date_sk,2451179), IsNotNull(d_date_sk)] -ReadSchema: struct +Location [not included in comparison]/{warehouse_dir}/store_sales] +PushedFilters: [IsNotNull(ss_sold_date_sk), GreaterThanOrEqual(ss_sold_date_sk,2450815), LessThanOrEqual(ss_sold_date_sk,2451179), IsNotNull(ss_cdemo_sk), IsNotNull(ss_item_sk), IsNotNull(ss_promo_sk)] +ReadSchema: struct -(2) ColumnarToRow [codegen id : 1] -Input [2]: [d_date_sk#1, d_year#2] +(2) ColumnarToRow [codegen id : 5] +Input [8]: [ss_sold_date_sk#1, ss_item_sk#2, ss_cdemo_sk#3, ss_promo_sk#4, ss_quantity#5, ss_list_price#6, ss_sales_price#7, ss_coupon_amt#8] -(3) Filter [codegen id : 1] -Input [2]: [d_date_sk#1, d_year#2] -Condition : ((((isnotnull(d_year#2) AND (d_year#2 = 1998)) AND (d_date_sk#1 >= 2450815)) AND (d_date_sk#1 <= 2451179)) AND isnotnull(d_date_sk#1)) +(3) Filter [codegen id : 5] +Input [8]: [ss_sold_date_sk#1, ss_item_sk#2, ss_cdemo_sk#3, ss_promo_sk#4, ss_quantity#5, ss_list_price#6, ss_sales_price#7, ss_coupon_amt#8] +Condition : (((((isnotnull(ss_sold_date_sk#1) AND (ss_sold_date_sk#1 >= 2450815)) AND (ss_sold_date_sk#1 <= 2451179)) AND isnotnull(ss_cdemo_sk#3)) AND isnotnull(ss_item_sk#2)) AND isnotnull(ss_promo_sk#4)) -(4) Project [codegen id : 1] -Output [1]: [d_date_sk#1] -Input [2]: [d_date_sk#1, d_year#2] +(4) Scan parquet default.date_dim +Output [2]: [d_date_sk#9, d_year#10] +Batched: true +Location [not included in comparison]/{warehouse_dir}/date_dim] +PushedFilters: [IsNotNull(d_year), EqualTo(d_year,1998), GreaterThanOrEqual(d_date_sk,2450815), LessThanOrEqual(d_date_sk,2451179), IsNotNull(d_date_sk)] +ReadSchema: struct -(5) BroadcastExchange -Input [1]: [d_date_sk#1] -Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#3] +(5) ColumnarToRow [codegen id : 1] +Input [2]: [d_date_sk#9, d_year#10] -(6) Scan parquet default.store_sales -Output [8]: [ss_sold_date_sk#4, ss_item_sk#5, ss_cdemo_sk#6, ss_promo_sk#7, ss_quantity#8, ss_list_price#9, ss_sales_price#10, ss_coupon_amt#11] -Batched: true -Location [not included in comparison]/{warehouse_dir}/store_sales] -PushedFilters: [IsNotNull(ss_sold_date_sk), GreaterThanOrEqual(ss_sold_date_sk,2450815), LessThanOrEqual(ss_sold_date_sk,2451179), IsNotNull(ss_cdemo_sk), IsNotNull(ss_item_sk), IsNotNull(ss_promo_sk)] -ReadSchema: struct +(6) Filter [codegen id : 1] +Input [2]: [d_date_sk#9, d_year#10] +Condition : ((((isnotnull(d_year#10) AND (d_year#10 = 1998)) AND (d_date_sk#9 >= 2450815)) AND (d_date_sk#9 <= 2451179)) AND isnotnull(d_date_sk#9)) -(7) ColumnarToRow -Input [8]: [ss_sold_date_sk#4, ss_item_sk#5, ss_cdemo_sk#6, ss_promo_sk#7, ss_quantity#8, ss_list_price#9, ss_sales_price#10, ss_coupon_amt#11] +(7) Project [codegen id : 1] +Output [1]: [d_date_sk#9] +Input [2]: [d_date_sk#9, d_year#10] -(8) Filter -Input [8]: [ss_sold_date_sk#4, ss_item_sk#5, ss_cdemo_sk#6, ss_promo_sk#7, ss_quantity#8, ss_list_price#9, ss_sales_price#10, ss_coupon_amt#11] -Condition : (((((isnotnull(ss_sold_date_sk#4) AND (ss_sold_date_sk#4 >= 2450815)) AND (ss_sold_date_sk#4 <= 2451179)) AND isnotnull(ss_cdemo_sk#6)) AND isnotnull(ss_item_sk#5)) AND isnotnull(ss_promo_sk#7)) +(8) BroadcastExchange +Input [1]: [d_date_sk#9] +Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#11] (9) BroadcastHashJoin [codegen id : 5] -Left keys [1]: [d_date_sk#1] -Right keys [1]: [ss_sold_date_sk#4] +Left keys [1]: [ss_sold_date_sk#1] +Right keys [1]: [d_date_sk#9] Join condition: None (10) Project [codegen id : 5] -Output [7]: [ss_item_sk#5, ss_cdemo_sk#6, ss_promo_sk#7, ss_quantity#8, ss_list_price#9, ss_sales_price#10, ss_coupon_amt#11] -Input [9]: [d_date_sk#1, ss_sold_date_sk#4, ss_item_sk#5, ss_cdemo_sk#6, ss_promo_sk#7, ss_quantity#8, ss_list_price#9, ss_sales_price#10, ss_coupon_amt#11] +Output [7]: [ss_item_sk#2, ss_cdemo_sk#3, ss_promo_sk#4, ss_quantity#5, ss_list_price#6, ss_sales_price#7, ss_coupon_amt#8] +Input [9]: [ss_sold_date_sk#1, ss_item_sk#2, ss_cdemo_sk#3, ss_promo_sk#4, ss_quantity#5, ss_list_price#6, ss_sales_price#7, ss_coupon_amt#8, d_date_sk#9] (11) Scan parquet default.promotion Output [3]: [p_promo_sk#12, p_channel_email#13, p_channel_event#14] @@ -103,13 +103,13 @@ Input [1]: [p_promo_sk#12] Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#15] (16) BroadcastHashJoin [codegen id : 5] -Left keys [1]: [ss_promo_sk#7] +Left keys [1]: [ss_promo_sk#4] Right keys [1]: [p_promo_sk#12] Join condition: None (17) Project [codegen id : 5] -Output [6]: [ss_item_sk#5, ss_cdemo_sk#6, ss_quantity#8, ss_list_price#9, ss_sales_price#10, ss_coupon_amt#11] -Input [8]: [ss_item_sk#5, ss_cdemo_sk#6, ss_promo_sk#7, ss_quantity#8, ss_list_price#9, ss_sales_price#10, ss_coupon_amt#11, p_promo_sk#12] +Output [6]: [ss_item_sk#2, ss_cdemo_sk#3, ss_quantity#5, ss_list_price#6, ss_sales_price#7, ss_coupon_amt#8] +Input [8]: [ss_item_sk#2, ss_cdemo_sk#3, ss_promo_sk#4, ss_quantity#5, ss_list_price#6, ss_sales_price#7, ss_coupon_amt#8, p_promo_sk#12] (18) Scan parquet default.customer_demographics Output [4]: [cd_demo_sk#16, cd_gender#17, cd_marital_status#18, cd_education_status#19] @@ -134,13 +134,13 @@ Input [1]: [cd_demo_sk#16] Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#20] (23) BroadcastHashJoin [codegen id : 5] -Left keys [1]: [ss_cdemo_sk#6] +Left keys [1]: [ss_cdemo_sk#3] Right keys [1]: [cd_demo_sk#16] Join condition: None (24) Project [codegen id : 5] -Output [5]: [ss_item_sk#5, ss_quantity#8, ss_list_price#9, ss_sales_price#10, ss_coupon_amt#11] -Input [7]: [ss_item_sk#5, ss_cdemo_sk#6, ss_quantity#8, ss_list_price#9, ss_sales_price#10, ss_coupon_amt#11, cd_demo_sk#16] +Output [5]: [ss_item_sk#2, ss_quantity#5, ss_list_price#6, ss_sales_price#7, ss_coupon_amt#8] +Input [7]: [ss_item_sk#2, ss_cdemo_sk#3, ss_quantity#5, ss_list_price#6, ss_sales_price#7, ss_coupon_amt#8, cd_demo_sk#16] (25) Scan parquet default.item Output [2]: [i_item_sk#21, i_item_id#22] @@ -161,18 +161,18 @@ Input [2]: [i_item_sk#21, i_item_id#22] Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, false] as bigint)),false), [id=#23] (29) BroadcastHashJoin [codegen id : 5] -Left keys [1]: [ss_item_sk#5] +Left keys [1]: [ss_item_sk#2] Right keys [1]: [i_item_sk#21] Join condition: None (30) Project [codegen id : 5] -Output [5]: [ss_quantity#8, ss_list_price#9, ss_sales_price#10, ss_coupon_amt#11, i_item_id#22] -Input [7]: [ss_item_sk#5, ss_quantity#8, ss_list_price#9, ss_sales_price#10, ss_coupon_amt#11, i_item_sk#21, i_item_id#22] +Output [5]: [ss_quantity#5, ss_list_price#6, ss_sales_price#7, ss_coupon_amt#8, i_item_id#22] +Input [7]: [ss_item_sk#2, ss_quantity#5, ss_list_price#6, ss_sales_price#7, ss_coupon_amt#8, i_item_sk#21, i_item_id#22] (31) HashAggregate [codegen id : 5] -Input [5]: [ss_quantity#8, ss_list_price#9, ss_sales_price#10, ss_coupon_amt#11, i_item_id#22] +Input [5]: [ss_quantity#5, ss_list_price#6, ss_sales_price#7, ss_coupon_amt#8, i_item_id#22] Keys [1]: [i_item_id#22] -Functions [4]: [partial_avg(cast(ss_quantity#8 as bigint)), partial_avg(UnscaledValue(ss_list_price#9)), partial_avg(UnscaledValue(ss_coupon_amt#11)), partial_avg(UnscaledValue(ss_sales_price#10))] +Functions [4]: [partial_avg(cast(ss_quantity#5 as bigint)), partial_avg(UnscaledValue(ss_list_price#6)), partial_avg(UnscaledValue(ss_coupon_amt#8)), partial_avg(UnscaledValue(ss_sales_price#7))] Aggregate Attributes [8]: [sum#24, count#25, sum#26, count#27, sum#28, count#29, sum#30, count#31] Results [9]: [i_item_id#22, sum#32, count#33, sum#34, count#35, sum#36, count#37, sum#38, count#39] @@ -183,9 +183,9 @@ Arguments: hashpartitioning(i_item_id#22, 5), true, [id=#40] (33) HashAggregate [codegen id : 6] Input [9]: [i_item_id#22, sum#32, count#33, sum#34, count#35, sum#36, count#37, sum#38, count#39] Keys [1]: [i_item_id#22] -Functions [4]: [avg(cast(ss_quantity#8 as bigint)), avg(UnscaledValue(ss_list_price#9)), avg(UnscaledValue(ss_coupon_amt#11)), avg(UnscaledValue(ss_sales_price#10))] -Aggregate Attributes [4]: [avg(cast(ss_quantity#8 as bigint))#41, avg(UnscaledValue(ss_list_price#9))#42, avg(UnscaledValue(ss_coupon_amt#11))#43, avg(UnscaledValue(ss_sales_price#10))#44] -Results [5]: [i_item_id#22, avg(cast(ss_quantity#8 as bigint))#41 AS agg1#45, cast((avg(UnscaledValue(ss_list_price#9))#42 / 100.0) as decimal(11,6)) AS agg2#46, cast((avg(UnscaledValue(ss_coupon_amt#11))#43 / 100.0) as decimal(11,6)) AS agg3#47, cast((avg(UnscaledValue(ss_sales_price#10))#44 / 100.0) as decimal(11,6)) AS agg4#48] +Functions [4]: [avg(cast(ss_quantity#5 as bigint)), avg(UnscaledValue(ss_list_price#6)), avg(UnscaledValue(ss_coupon_amt#8)), avg(UnscaledValue(ss_sales_price#7))] +Aggregate Attributes [4]: [avg(cast(ss_quantity#5 as bigint))#41, avg(UnscaledValue(ss_list_price#6))#42, avg(UnscaledValue(ss_coupon_amt#8))#43, avg(UnscaledValue(ss_sales_price#7))#44] +Results [5]: [i_item_id#22, avg(cast(ss_quantity#5 as bigint))#41 AS agg1#45, cast((avg(UnscaledValue(ss_list_price#6))#42 / 100.0) as decimal(11,6)) AS agg2#46, cast((avg(UnscaledValue(ss_coupon_amt#8))#43 / 100.0) as decimal(11,6)) AS agg3#47, cast((avg(UnscaledValue(ss_sales_price#7))#44 / 100.0) as decimal(11,6)) AS agg4#48] (34) TakeOrderedAndProject Input [5]: [i_item_id#22, agg1#45, agg2#46, agg3#47, agg4#48] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q7.sf100/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q7.sf100/simplified.txt index 4576b8cef59ee..61cc7daa76456 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q7.sf100/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q7.sf100/simplified.txt @@ -12,7 +12,11 @@ TakeOrderedAndProject [i_item_id,agg1,agg2,agg3,agg4] Project [ss_item_sk,ss_cdemo_sk,ss_quantity,ss_list_price,ss_sales_price,ss_coupon_amt] BroadcastHashJoin [ss_promo_sk,p_promo_sk] Project [ss_item_sk,ss_cdemo_sk,ss_promo_sk,ss_quantity,ss_list_price,ss_sales_price,ss_coupon_amt] - BroadcastHashJoin [d_date_sk,ss_sold_date_sk] + BroadcastHashJoin [ss_sold_date_sk,d_date_sk] + Filter [ss_sold_date_sk,ss_cdemo_sk,ss_item_sk,ss_promo_sk] + ColumnarToRow + InputAdapter + Scan parquet default.store_sales [ss_sold_date_sk,ss_item_sk,ss_cdemo_sk,ss_promo_sk,ss_quantity,ss_list_price,ss_sales_price,ss_coupon_amt] InputAdapter BroadcastExchange #2 WholeStageCodegen (1) @@ -21,10 +25,6 @@ TakeOrderedAndProject [i_item_id,agg1,agg2,agg3,agg4] ColumnarToRow InputAdapter Scan parquet default.date_dim [d_date_sk,d_year] - Filter [ss_sold_date_sk,ss_cdemo_sk,ss_item_sk,ss_promo_sk] - ColumnarToRow - InputAdapter - Scan parquet default.store_sales [ss_sold_date_sk,ss_item_sk,ss_cdemo_sk,ss_promo_sk,ss_quantity,ss_list_price,ss_sales_price,ss_coupon_amt] InputAdapter BroadcastExchange #3 WholeStageCodegen (2) diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q73.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q73.sf100/explain.txt index 25da173c8ecde..51b480ef64ab2 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q73.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q73.sf100/explain.txt @@ -117,7 +117,7 @@ Input [5]: [ss_customer_sk#2, ss_hdemo_sk#3, ss_store_sk#4, ss_ticket_number#5, Output [4]: [hd_demo_sk#13, hd_buy_potential#14, hd_dep_count#15, hd_vehicle_count#16] Batched: true Location [not included in comparison]/{warehouse_dir}/household_demographics] -PushedFilters: [IsNotNull(hd_vehicle_count), Or(EqualTo(hd_buy_potential,>10000),EqualTo(hd_buy_potential,Unknown)), GreaterThan(hd_vehicle_count,0), IsNotNull(hd_demo_sk)] +PushedFilters: [IsNotNull(hd_vehicle_count), IsNotNull(hd_dep_count), Or(EqualTo(hd_buy_potential,>10000),EqualTo(hd_buy_potential,Unknown)), GreaterThan(hd_vehicle_count,0), GreaterThan(hd_vehicle_count,0), IsNotNull(hd_demo_sk)] ReadSchema: struct (19) ColumnarToRow [codegen id : 3] @@ -125,7 +125,7 @@ Input [4]: [hd_demo_sk#13, hd_buy_potential#14, hd_dep_count#15, hd_vehicle_coun (20) Filter [codegen id : 3] Input [4]: [hd_demo_sk#13, hd_buy_potential#14, hd_dep_count#15, hd_vehicle_count#16] -Condition : ((((isnotnull(hd_vehicle_count#16) AND ((hd_buy_potential#14 = >10000) OR (hd_buy_potential#14 = Unknown))) AND (hd_vehicle_count#16 > 0)) AND (CASE WHEN (hd_vehicle_count#16 > 0) THEN (cast(hd_dep_count#15 as double) / cast(hd_vehicle_count#16 as double)) ELSE null END > 1.0)) AND isnotnull(hd_demo_sk#13)) +Condition : (((((isnotnull(hd_vehicle_count#16) AND isnotnull(hd_dep_count#15)) AND ((hd_buy_potential#14 = >10000) OR (hd_buy_potential#14 = Unknown))) AND (hd_vehicle_count#16 > 0)) AND ((cast(hd_dep_count#15 as double) / cast(hd_vehicle_count#16 as double)) > 1.0)) AND isnotnull(hd_demo_sk#13)) (21) Project [codegen id : 3] Output [1]: [hd_demo_sk#13] @@ -153,7 +153,7 @@ Results [3]: [ss_ticket_number#5, ss_customer_sk#2, count#19] (26) Exchange Input [3]: [ss_ticket_number#5, ss_customer_sk#2, count#19] -Arguments: hashpartitioning(ss_ticket_number#5, ss_customer_sk#2, 5), true, [id=#20] +Arguments: hashpartitioning(ss_ticket_number#5, ss_customer_sk#2, 5), ENSURE_REQUIREMENTS, [id=#20] (27) HashAggregate [codegen id : 5] Input [3]: [ss_ticket_number#5, ss_customer_sk#2, count#19] @@ -195,7 +195,7 @@ Input [8]: [ss_ticket_number#5, ss_customer_sk#2, cnt#22, c_customer_sk#24, c_sa (35) Exchange Input [6]: [c_last_name#27, c_first_name#26, c_salutation#25, c_preferred_cust_flag#28, ss_ticket_number#5, cnt#22] -Arguments: rangepartitioning(cnt#22 DESC NULLS LAST, 5), true, [id=#29] +Arguments: rangepartitioning(cnt#22 DESC NULLS LAST, 5), ENSURE_REQUIREMENTS, [id=#29] (36) Sort [codegen id : 7] Input [6]: [c_last_name#27, c_first_name#26, c_salutation#25, c_preferred_cust_flag#28, ss_ticket_number#5, cnt#22] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q73.sf100/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q73.sf100/simplified.txt index 7496388d3430c..8695f9da17114 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q73.sf100/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q73.sf100/simplified.txt @@ -44,7 +44,7 @@ WholeStageCodegen (7) BroadcastExchange #6 WholeStageCodegen (3) Project [hd_demo_sk] - Filter [hd_vehicle_count,hd_buy_potential,hd_dep_count,hd_demo_sk] + Filter [hd_vehicle_count,hd_dep_count,hd_buy_potential,hd_demo_sk] ColumnarToRow InputAdapter Scan parquet default.household_demographics [hd_demo_sk,hd_buy_potential,hd_dep_count,hd_vehicle_count] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q73/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q73/explain.txt index e420b656c3ad0..56ad4f4d926eb 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q73/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q73/explain.txt @@ -117,7 +117,7 @@ Input [5]: [ss_customer_sk#2, ss_hdemo_sk#3, ss_store_sk#4, ss_ticket_number#5, Output [4]: [hd_demo_sk#13, hd_buy_potential#14, hd_dep_count#15, hd_vehicle_count#16] Batched: true Location [not included in comparison]/{warehouse_dir}/household_demographics] -PushedFilters: [IsNotNull(hd_vehicle_count), Or(EqualTo(hd_buy_potential,>10000),EqualTo(hd_buy_potential,Unknown)), GreaterThan(hd_vehicle_count,0), IsNotNull(hd_demo_sk)] +PushedFilters: [IsNotNull(hd_vehicle_count), IsNotNull(`hd_dep_count`), Or(EqualTo(hd_buy_potential,>10000),EqualTo(hd_buy_potential,Unknown)), GreaterThan(hd_vehicle_count,0), GreaterThan(hd_vehicle_count,0), IsNotNull(hd_demo_sk)] ReadSchema: struct (19) ColumnarToRow [codegen id : 3] @@ -125,7 +125,7 @@ Input [4]: [hd_demo_sk#13, hd_buy_potential#14, hd_dep_count#15, hd_vehicle_coun (20) Filter [codegen id : 3] Input [4]: [hd_demo_sk#13, hd_buy_potential#14, hd_dep_count#15, hd_vehicle_count#16] -Condition : ((((isnotnull(hd_vehicle_count#16) AND ((hd_buy_potential#14 = >10000) OR (hd_buy_potential#14 = Unknown))) AND (hd_vehicle_count#16 > 0)) AND (CASE WHEN (hd_vehicle_count#16 > 0) THEN (cast(hd_dep_count#15 as double) / cast(hd_vehicle_count#16 as double)) ELSE null END > 1.0)) AND isnotnull(hd_demo_sk#13)) +Condition : (((((isnotnull(hd_vehicle_count#16) AND isnotnull(hd_dep_count#15)) AND ((hd_buy_potential#14 = >10000) OR (hd_buy_potential#14 = Unknown))) AND (hd_vehicle_count#16 > 0)) AND ((cast(hd_dep_count#15 as double) / cast(hd_vehicle_count#16 as double)) > 1.0)) AND isnotnull(hd_demo_sk#13)) (21) Project [codegen id : 3] Output [1]: [hd_demo_sk#13] @@ -153,7 +153,7 @@ Results [3]: [ss_ticket_number#5, ss_customer_sk#2, count#19] (26) Exchange Input [3]: [ss_ticket_number#5, ss_customer_sk#2, count#19] -Arguments: hashpartitioning(ss_ticket_number#5, ss_customer_sk#2, 5), true, [id=#20] +Arguments: hashpartitioning(ss_ticket_number#5, ss_customer_sk#2, 5), ENSURE_REQUIREMENTS, [id=#20] (27) HashAggregate [codegen id : 6] Input [3]: [ss_ticket_number#5, ss_customer_sk#2, count#19] @@ -195,7 +195,7 @@ Input [8]: [ss_ticket_number#5, ss_customer_sk#2, cnt#22, c_customer_sk#23, c_sa (35) Exchange Input [6]: [c_last_name#26, c_first_name#25, c_salutation#24, c_preferred_cust_flag#27, ss_ticket_number#5, cnt#22] -Arguments: rangepartitioning(cnt#22 DESC NULLS LAST, 5), true, [id=#29] +Arguments: rangepartitioning(cnt#22 DESC NULLS LAST, 5), ENSURE_REQUIREMENTS, [id=#29] (36) Sort [codegen id : 7] Input [6]: [c_last_name#26, c_first_name#25, c_salutation#24, c_preferred_cust_flag#27, ss_ticket_number#5, cnt#22] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q73/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q73/simplified.txt index 46b7241565719..5e49f6cb603d5 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q73/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-modified/q73/simplified.txt @@ -41,7 +41,7 @@ WholeStageCodegen (7) BroadcastExchange #5 WholeStageCodegen (3) Project [hd_demo_sk] - Filter [hd_vehicle_count,hd_buy_potential,hd_dep_count,hd_demo_sk] + Filter [hd_vehicle_count,hd_dep_count,hd_buy_potential,hd_demo_sk] ColumnarToRow InputAdapter Scan parquet default.household_demographics [hd_demo_sk,hd_buy_potential,hd_dep_count,hd_vehicle_count] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q13.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q13.sf100/explain.txt index 586abbd8f3fef..327e7db702faa 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q13.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q13.sf100/explain.txt @@ -4,21 +4,21 @@ +- * HashAggregate (36) +- * Project (35) +- * BroadcastHashJoin Inner BuildRight (34) - :- * Project (28) - : +- * BroadcastHashJoin Inner BuildRight (27) + :- * Project (29) + : +- * BroadcastHashJoin Inner BuildRight (28) : :- * Project (22) : : +- * BroadcastHashJoin Inner BuildRight (21) : : :- * Project (15) : : : +- * BroadcastHashJoin Inner BuildRight (14) : : : :- * Project (9) - : : : : +- * BroadcastHashJoin Inner BuildLeft (8) - : : : : :- BroadcastExchange (4) - : : : : : +- * Filter (3) - : : : : : +- * ColumnarToRow (2) - : : : : : +- Scan parquet default.customer_demographics (1) - : : : : +- * Filter (7) - : : : : +- * ColumnarToRow (6) - : : : : +- Scan parquet default.store_sales (5) + : : : : +- * BroadcastHashJoin Inner BuildRight (8) + : : : : :- * Filter (3) + : : : : : +- * ColumnarToRow (2) + : : : : : +- Scan parquet default.store_sales (1) + : : : : +- BroadcastExchange (7) + : : : : +- * Filter (6) + : : : : +- * ColumnarToRow (5) + : : : : +- Scan parquet default.customer_demographics (4) : : : +- BroadcastExchange (13) : : : +- * Filter (12) : : : +- * ColumnarToRow (11) @@ -27,58 +27,58 @@ : : +- * Project (19) : : +- * Filter (18) : : +- * ColumnarToRow (17) - : : +- Scan parquet default.date_dim (16) - : +- BroadcastExchange (26) - : +- * Filter (25) - : +- * ColumnarToRow (24) - : +- Scan parquet default.store (23) + : : +- Scan parquet default.customer_address (16) + : +- BroadcastExchange (27) + : +- * Project (26) + : +- * Filter (25) + : +- * ColumnarToRow (24) + : +- Scan parquet default.date_dim (23) +- BroadcastExchange (33) - +- * Project (32) - +- * Filter (31) - +- * ColumnarToRow (30) - +- Scan parquet default.customer_address (29) + +- * Filter (32) + +- * ColumnarToRow (31) + +- Scan parquet default.store (30) -(1) Scan parquet default.customer_demographics -Output [3]: [cd_demo_sk#1, cd_marital_status#2, cd_education_status#3] +(1) Scan parquet default.store_sales +Output [10]: [ss_sold_date_sk#1, ss_cdemo_sk#2, ss_hdemo_sk#3, ss_addr_sk#4, ss_store_sk#5, ss_quantity#6, ss_sales_price#7, ss_ext_sales_price#8, ss_ext_wholesale_cost#9, ss_net_profit#10] Batched: true -Location [not included in comparison]/{warehouse_dir}/customer_demographics] -PushedFilters: [IsNotNull(cd_demo_sk), Or(Or(And(EqualTo(cd_marital_status,M),EqualTo(cd_education_status,Advanced Degree)),And(EqualTo(cd_marital_status,S),EqualTo(cd_education_status,College))),And(EqualTo(cd_marital_status,W),EqualTo(cd_education_status,2 yr Degree)))] -ReadSchema: struct - -(2) ColumnarToRow [codegen id : 1] -Input [3]: [cd_demo_sk#1, cd_marital_status#2, cd_education_status#3] +Location [not included in comparison]/{warehouse_dir}/store_sales] +PushedFilters: [IsNotNull(ss_store_sk), IsNotNull(ss_addr_sk), IsNotNull(ss_sold_date_sk), IsNotNull(ss_cdemo_sk), IsNotNull(ss_hdemo_sk), Or(Or(And(GreaterThanOrEqual(ss_net_profit,100.00),LessThanOrEqual(ss_net_profit,200.00)),And(GreaterThanOrEqual(ss_net_profit,150.00),LessThanOrEqual(ss_net_profit,300.00))),And(GreaterThanOrEqual(ss_net_profit,50.00),LessThanOrEqual(ss_net_profit,250.00))), Or(Or(And(GreaterThanOrEqual(ss_sales_price,100.00),LessThanOrEqual(ss_sales_price,150.00)),And(GreaterThanOrEqual(ss_sales_price,50.00),LessThanOrEqual(ss_sales_price,100.00))),And(GreaterThanOrEqual(ss_sales_price,150.00),LessThanOrEqual(ss_sales_price,200.00)))] +ReadSchema: struct -(3) Filter [codegen id : 1] -Input [3]: [cd_demo_sk#1, cd_marital_status#2, cd_education_status#3] -Condition : (isnotnull(cd_demo_sk#1) AND ((((cd_marital_status#2 = M) AND (cd_education_status#3 = Advanced Degree)) OR ((cd_marital_status#2 = S) AND (cd_education_status#3 = College))) OR ((cd_marital_status#2 = W) AND (cd_education_status#3 = 2 yr Degree)))) +(2) ColumnarToRow [codegen id : 6] +Input [10]: [ss_sold_date_sk#1, ss_cdemo_sk#2, ss_hdemo_sk#3, ss_addr_sk#4, ss_store_sk#5, ss_quantity#6, ss_sales_price#7, ss_ext_sales_price#8, ss_ext_wholesale_cost#9, ss_net_profit#10] -(4) BroadcastExchange -Input [3]: [cd_demo_sk#1, cd_marital_status#2, cd_education_status#3] -Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, false] as bigint)),false), [id=#4] +(3) Filter [codegen id : 6] +Input [10]: [ss_sold_date_sk#1, ss_cdemo_sk#2, ss_hdemo_sk#3, ss_addr_sk#4, ss_store_sk#5, ss_quantity#6, ss_sales_price#7, ss_ext_sales_price#8, ss_ext_wholesale_cost#9, ss_net_profit#10] +Condition : ((((((isnotnull(ss_store_sk#5) AND isnotnull(ss_addr_sk#4)) AND isnotnull(ss_sold_date_sk#1)) AND isnotnull(ss_cdemo_sk#2)) AND isnotnull(ss_hdemo_sk#3)) AND ((((ss_net_profit#10 >= 100.00) AND (ss_net_profit#10 <= 200.00)) OR ((ss_net_profit#10 >= 150.00) AND (ss_net_profit#10 <= 300.00))) OR ((ss_net_profit#10 >= 50.00) AND (ss_net_profit#10 <= 250.00)))) AND ((((ss_sales_price#7 >= 100.00) AND (ss_sales_price#7 <= 150.00)) OR ((ss_sales_price#7 >= 50.00) AND (ss_sales_price#7 <= 100.00))) OR ((ss_sales_price#7 >= 150.00) AND (ss_sales_price#7 <= 200.00)))) -(5) Scan parquet default.store_sales -Output [10]: [ss_sold_date_sk#5, ss_cdemo_sk#6, ss_hdemo_sk#7, ss_addr_sk#8, ss_store_sk#9, ss_quantity#10, ss_sales_price#11, ss_ext_sales_price#12, ss_ext_wholesale_cost#13, ss_net_profit#14] +(4) Scan parquet default.customer_demographics +Output [3]: [cd_demo_sk#11, cd_marital_status#12, cd_education_status#13] Batched: true -Location [not included in comparison]/{warehouse_dir}/store_sales] -PushedFilters: [IsNotNull(ss_store_sk), IsNotNull(ss_addr_sk), IsNotNull(ss_sold_date_sk), IsNotNull(ss_cdemo_sk), IsNotNull(ss_hdemo_sk), Or(Or(And(GreaterThanOrEqual(ss_net_profit,100.00),LessThanOrEqual(ss_net_profit,200.00)),And(GreaterThanOrEqual(ss_net_profit,150.00),LessThanOrEqual(ss_net_profit,300.00))),And(GreaterThanOrEqual(ss_net_profit,50.00),LessThanOrEqual(ss_net_profit,250.00))), Or(Or(And(GreaterThanOrEqual(ss_sales_price,100.00),LessThanOrEqual(ss_sales_price,150.00)),And(GreaterThanOrEqual(ss_sales_price,50.00),LessThanOrEqual(ss_sales_price,100.00))),And(GreaterThanOrEqual(ss_sales_price,150.00),LessThanOrEqual(ss_sales_price,200.00)))] -ReadSchema: struct +Location [not included in comparison]/{warehouse_dir}/customer_demographics] +PushedFilters: [IsNotNull(cd_demo_sk), Or(Or(And(EqualTo(cd_marital_status,M),EqualTo(cd_education_status,Advanced Degree)),And(EqualTo(cd_marital_status,S),EqualTo(cd_education_status,College))),And(EqualTo(cd_marital_status,W),EqualTo(cd_education_status,2 yr Degree)))] +ReadSchema: struct + +(5) ColumnarToRow [codegen id : 1] +Input [3]: [cd_demo_sk#11, cd_marital_status#12, cd_education_status#13] -(6) ColumnarToRow -Input [10]: [ss_sold_date_sk#5, ss_cdemo_sk#6, ss_hdemo_sk#7, ss_addr_sk#8, ss_store_sk#9, ss_quantity#10, ss_sales_price#11, ss_ext_sales_price#12, ss_ext_wholesale_cost#13, ss_net_profit#14] +(6) Filter [codegen id : 1] +Input [3]: [cd_demo_sk#11, cd_marital_status#12, cd_education_status#13] +Condition : (isnotnull(cd_demo_sk#11) AND ((((cd_marital_status#12 = M) AND (cd_education_status#13 = Advanced Degree)) OR ((cd_marital_status#12 = S) AND (cd_education_status#13 = College))) OR ((cd_marital_status#12 = W) AND (cd_education_status#13 = 2 yr Degree)))) -(7) Filter -Input [10]: [ss_sold_date_sk#5, ss_cdemo_sk#6, ss_hdemo_sk#7, ss_addr_sk#8, ss_store_sk#9, ss_quantity#10, ss_sales_price#11, ss_ext_sales_price#12, ss_ext_wholesale_cost#13, ss_net_profit#14] -Condition : ((((((isnotnull(ss_store_sk#9) AND isnotnull(ss_addr_sk#8)) AND isnotnull(ss_sold_date_sk#5)) AND isnotnull(ss_cdemo_sk#6)) AND isnotnull(ss_hdemo_sk#7)) AND ((((ss_net_profit#14 >= 100.00) AND (ss_net_profit#14 <= 200.00)) OR ((ss_net_profit#14 >= 150.00) AND (ss_net_profit#14 <= 300.00))) OR ((ss_net_profit#14 >= 50.00) AND (ss_net_profit#14 <= 250.00)))) AND ((((ss_sales_price#11 >= 100.00) AND (ss_sales_price#11 <= 150.00)) OR ((ss_sales_price#11 >= 50.00) AND (ss_sales_price#11 <= 100.00))) OR ((ss_sales_price#11 >= 150.00) AND (ss_sales_price#11 <= 200.00)))) +(7) BroadcastExchange +Input [3]: [cd_demo_sk#11, cd_marital_status#12, cd_education_status#13] +Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, false] as bigint)),false), [id=#14] (8) BroadcastHashJoin [codegen id : 6] -Left keys [1]: [cd_demo_sk#1] -Right keys [1]: [ss_cdemo_sk#6] -Join condition: ((((((cd_marital_status#2 = M) AND (cd_education_status#3 = Advanced Degree)) AND (ss_sales_price#11 >= 100.00)) AND (ss_sales_price#11 <= 150.00)) OR ((((cd_marital_status#2 = S) AND (cd_education_status#3 = College)) AND (ss_sales_price#11 >= 50.00)) AND (ss_sales_price#11 <= 100.00))) OR ((((cd_marital_status#2 = W) AND (cd_education_status#3 = 2 yr Degree)) AND (ss_sales_price#11 >= 150.00)) AND (ss_sales_price#11 <= 200.00))) +Left keys [1]: [ss_cdemo_sk#2] +Right keys [1]: [cd_demo_sk#11] +Join condition: ((((((cd_marital_status#12 = M) AND (cd_education_status#13 = Advanced Degree)) AND (ss_sales_price#7 >= 100.00)) AND (ss_sales_price#7 <= 150.00)) OR ((((cd_marital_status#12 = S) AND (cd_education_status#13 = College)) AND (ss_sales_price#7 >= 50.00)) AND (ss_sales_price#7 <= 100.00))) OR ((((cd_marital_status#12 = W) AND (cd_education_status#13 = 2 yr Degree)) AND (ss_sales_price#7 >= 150.00)) AND (ss_sales_price#7 <= 200.00))) (9) Project [codegen id : 6] -Output [11]: [cd_marital_status#2, cd_education_status#3, ss_sold_date_sk#5, ss_hdemo_sk#7, ss_addr_sk#8, ss_store_sk#9, ss_quantity#10, ss_sales_price#11, ss_ext_sales_price#12, ss_ext_wholesale_cost#13, ss_net_profit#14] -Input [13]: [cd_demo_sk#1, cd_marital_status#2, cd_education_status#3, ss_sold_date_sk#5, ss_cdemo_sk#6, ss_hdemo_sk#7, ss_addr_sk#8, ss_store_sk#9, ss_quantity#10, ss_sales_price#11, ss_ext_sales_price#12, ss_ext_wholesale_cost#13, ss_net_profit#14] +Output [11]: [ss_sold_date_sk#1, ss_hdemo_sk#3, ss_addr_sk#4, ss_store_sk#5, ss_quantity#6, ss_sales_price#7, ss_ext_sales_price#8, ss_ext_wholesale_cost#9, ss_net_profit#10, cd_marital_status#12, cd_education_status#13] +Input [13]: [ss_sold_date_sk#1, ss_cdemo_sk#2, ss_hdemo_sk#3, ss_addr_sk#4, ss_store_sk#5, ss_quantity#6, ss_sales_price#7, ss_ext_sales_price#8, ss_ext_wholesale_cost#9, ss_net_profit#10, cd_demo_sk#11, cd_marital_status#12, cd_education_status#13] (10) Scan parquet default.household_demographics Output [2]: [hd_demo_sk#15, hd_dep_count#16] @@ -99,118 +99,118 @@ Input [2]: [hd_demo_sk#15, hd_dep_count#16] Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, false] as bigint)),false), [id=#17] (14) BroadcastHashJoin [codegen id : 6] -Left keys [1]: [ss_hdemo_sk#7] +Left keys [1]: [ss_hdemo_sk#3] Right keys [1]: [hd_demo_sk#15] -Join condition: (((((((cd_marital_status#2 = M) AND (cd_education_status#3 = Advanced Degree)) AND (ss_sales_price#11 >= 100.00)) AND (ss_sales_price#11 <= 150.00)) AND (hd_dep_count#16 = 3)) OR (((((cd_marital_status#2 = S) AND (cd_education_status#3 = College)) AND (ss_sales_price#11 >= 50.00)) AND (ss_sales_price#11 <= 100.00)) AND (hd_dep_count#16 = 1))) OR (((((cd_marital_status#2 = W) AND (cd_education_status#3 = 2 yr Degree)) AND (ss_sales_price#11 >= 150.00)) AND (ss_sales_price#11 <= 200.00)) AND (hd_dep_count#16 = 1))) +Join condition: (((((((cd_marital_status#12 = M) AND (cd_education_status#13 = Advanced Degree)) AND (ss_sales_price#7 >= 100.00)) AND (ss_sales_price#7 <= 150.00)) AND (hd_dep_count#16 = 3)) OR (((((cd_marital_status#12 = S) AND (cd_education_status#13 = College)) AND (ss_sales_price#7 >= 50.00)) AND (ss_sales_price#7 <= 100.00)) AND (hd_dep_count#16 = 1))) OR (((((cd_marital_status#12 = W) AND (cd_education_status#13 = 2 yr Degree)) AND (ss_sales_price#7 >= 150.00)) AND (ss_sales_price#7 <= 200.00)) AND (hd_dep_count#16 = 1))) (15) Project [codegen id : 6] -Output [7]: [ss_sold_date_sk#5, ss_addr_sk#8, ss_store_sk#9, ss_quantity#10, ss_ext_sales_price#12, ss_ext_wholesale_cost#13, ss_net_profit#14] -Input [13]: [cd_marital_status#2, cd_education_status#3, ss_sold_date_sk#5, ss_hdemo_sk#7, ss_addr_sk#8, ss_store_sk#9, ss_quantity#10, ss_sales_price#11, ss_ext_sales_price#12, ss_ext_wholesale_cost#13, ss_net_profit#14, hd_demo_sk#15, hd_dep_count#16] +Output [7]: [ss_sold_date_sk#1, ss_addr_sk#4, ss_store_sk#5, ss_quantity#6, ss_ext_sales_price#8, ss_ext_wholesale_cost#9, ss_net_profit#10] +Input [13]: [ss_sold_date_sk#1, ss_hdemo_sk#3, ss_addr_sk#4, ss_store_sk#5, ss_quantity#6, ss_sales_price#7, ss_ext_sales_price#8, ss_ext_wholesale_cost#9, ss_net_profit#10, cd_marital_status#12, cd_education_status#13, hd_demo_sk#15, hd_dep_count#16] -(16) Scan parquet default.date_dim -Output [2]: [d_date_sk#18, d_year#19] +(16) Scan parquet default.customer_address +Output [3]: [ca_address_sk#18, ca_state#19, ca_country#20] Batched: true -Location [not included in comparison]/{warehouse_dir}/date_dim] -PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2001), IsNotNull(d_date_sk)] -ReadSchema: struct +Location [not included in comparison]/{warehouse_dir}/customer_address] +PushedFilters: [IsNotNull(ca_country), EqualTo(ca_country,United States), IsNotNull(ca_address_sk), Or(Or(In(ca_state, [TX,OH]),In(ca_state, [OR,NM,KY])),In(ca_state, [VA,TX,MS]))] +ReadSchema: struct (17) ColumnarToRow [codegen id : 3] -Input [2]: [d_date_sk#18, d_year#19] +Input [3]: [ca_address_sk#18, ca_state#19, ca_country#20] (18) Filter [codegen id : 3] -Input [2]: [d_date_sk#18, d_year#19] -Condition : ((isnotnull(d_year#19) AND (d_year#19 = 2001)) AND isnotnull(d_date_sk#18)) +Input [3]: [ca_address_sk#18, ca_state#19, ca_country#20] +Condition : (((isnotnull(ca_country#20) AND (ca_country#20 = United States)) AND isnotnull(ca_address_sk#18)) AND ((ca_state#19 IN (TX,OH) OR ca_state#19 IN (OR,NM,KY)) OR ca_state#19 IN (VA,TX,MS))) (19) Project [codegen id : 3] -Output [1]: [d_date_sk#18] -Input [2]: [d_date_sk#18, d_year#19] +Output [2]: [ca_address_sk#18, ca_state#19] +Input [3]: [ca_address_sk#18, ca_state#19, ca_country#20] (20) BroadcastExchange -Input [1]: [d_date_sk#18] -Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#20] +Input [2]: [ca_address_sk#18, ca_state#19] +Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#21] (21) BroadcastHashJoin [codegen id : 6] -Left keys [1]: [ss_sold_date_sk#5] -Right keys [1]: [d_date_sk#18] -Join condition: None +Left keys [1]: [ss_addr_sk#4] +Right keys [1]: [ca_address_sk#18] +Join condition: ((((ca_state#19 IN (TX,OH) AND (ss_net_profit#10 >= 100.00)) AND (ss_net_profit#10 <= 200.00)) OR ((ca_state#19 IN (OR,NM,KY) AND (ss_net_profit#10 >= 150.00)) AND (ss_net_profit#10 <= 300.00))) OR ((ca_state#19 IN (VA,TX,MS) AND (ss_net_profit#10 >= 50.00)) AND (ss_net_profit#10 <= 250.00))) (22) Project [codegen id : 6] -Output [6]: [ss_addr_sk#8, ss_store_sk#9, ss_quantity#10, ss_ext_sales_price#12, ss_ext_wholesale_cost#13, ss_net_profit#14] -Input [8]: [ss_sold_date_sk#5, ss_addr_sk#8, ss_store_sk#9, ss_quantity#10, ss_ext_sales_price#12, ss_ext_wholesale_cost#13, ss_net_profit#14, d_date_sk#18] +Output [5]: [ss_sold_date_sk#1, ss_store_sk#5, ss_quantity#6, ss_ext_sales_price#8, ss_ext_wholesale_cost#9] +Input [9]: [ss_sold_date_sk#1, ss_addr_sk#4, ss_store_sk#5, ss_quantity#6, ss_ext_sales_price#8, ss_ext_wholesale_cost#9, ss_net_profit#10, ca_address_sk#18, ca_state#19] -(23) Scan parquet default.store -Output [1]: [s_store_sk#21] +(23) Scan parquet default.date_dim +Output [2]: [d_date_sk#22, d_year#23] Batched: true -Location [not included in comparison]/{warehouse_dir}/store] -PushedFilters: [IsNotNull(s_store_sk)] -ReadSchema: struct +Location [not included in comparison]/{warehouse_dir}/date_dim] +PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2001), IsNotNull(d_date_sk)] +ReadSchema: struct (24) ColumnarToRow [codegen id : 4] -Input [1]: [s_store_sk#21] +Input [2]: [d_date_sk#22, d_year#23] (25) Filter [codegen id : 4] -Input [1]: [s_store_sk#21] -Condition : isnotnull(s_store_sk#21) +Input [2]: [d_date_sk#22, d_year#23] +Condition : ((isnotnull(d_year#23) AND (d_year#23 = 2001)) AND isnotnull(d_date_sk#22)) -(26) BroadcastExchange -Input [1]: [s_store_sk#21] -Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, false] as bigint)),false), [id=#22] +(26) Project [codegen id : 4] +Output [1]: [d_date_sk#22] +Input [2]: [d_date_sk#22, d_year#23] -(27) BroadcastHashJoin [codegen id : 6] -Left keys [1]: [ss_store_sk#9] -Right keys [1]: [s_store_sk#21] +(27) BroadcastExchange +Input [1]: [d_date_sk#22] +Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#24] + +(28) BroadcastHashJoin [codegen id : 6] +Left keys [1]: [ss_sold_date_sk#1] +Right keys [1]: [d_date_sk#22] Join condition: None -(28) Project [codegen id : 6] -Output [5]: [ss_addr_sk#8, ss_quantity#10, ss_ext_sales_price#12, ss_ext_wholesale_cost#13, ss_net_profit#14] -Input [7]: [ss_addr_sk#8, ss_store_sk#9, ss_quantity#10, ss_ext_sales_price#12, ss_ext_wholesale_cost#13, ss_net_profit#14, s_store_sk#21] +(29) Project [codegen id : 6] +Output [4]: [ss_store_sk#5, ss_quantity#6, ss_ext_sales_price#8, ss_ext_wholesale_cost#9] +Input [6]: [ss_sold_date_sk#1, ss_store_sk#5, ss_quantity#6, ss_ext_sales_price#8, ss_ext_wholesale_cost#9, d_date_sk#22] -(29) Scan parquet default.customer_address -Output [3]: [ca_address_sk#23, ca_state#24, ca_country#25] +(30) Scan parquet default.store +Output [1]: [s_store_sk#25] Batched: true -Location [not included in comparison]/{warehouse_dir}/customer_address] -PushedFilters: [IsNotNull(ca_country), EqualTo(ca_country,United States), IsNotNull(ca_address_sk), Or(Or(In(ca_state, [TX,OH]),In(ca_state, [OR,NM,KY])),In(ca_state, [VA,TX,MS]))] -ReadSchema: struct - -(30) ColumnarToRow [codegen id : 5] -Input [3]: [ca_address_sk#23, ca_state#24, ca_country#25] +Location [not included in comparison]/{warehouse_dir}/store] +PushedFilters: [IsNotNull(s_store_sk)] +ReadSchema: struct -(31) Filter [codegen id : 5] -Input [3]: [ca_address_sk#23, ca_state#24, ca_country#25] -Condition : (((isnotnull(ca_country#25) AND (ca_country#25 = United States)) AND isnotnull(ca_address_sk#23)) AND ((ca_state#24 IN (TX,OH) OR ca_state#24 IN (OR,NM,KY)) OR ca_state#24 IN (VA,TX,MS))) +(31) ColumnarToRow [codegen id : 5] +Input [1]: [s_store_sk#25] -(32) Project [codegen id : 5] -Output [2]: [ca_address_sk#23, ca_state#24] -Input [3]: [ca_address_sk#23, ca_state#24, ca_country#25] +(32) Filter [codegen id : 5] +Input [1]: [s_store_sk#25] +Condition : isnotnull(s_store_sk#25) (33) BroadcastExchange -Input [2]: [ca_address_sk#23, ca_state#24] -Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#26] +Input [1]: [s_store_sk#25] +Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, false] as bigint)),false), [id=#26] (34) BroadcastHashJoin [codegen id : 6] -Left keys [1]: [ss_addr_sk#8] -Right keys [1]: [ca_address_sk#23] -Join condition: ((((ca_state#24 IN (TX,OH) AND (ss_net_profit#14 >= 100.00)) AND (ss_net_profit#14 <= 200.00)) OR ((ca_state#24 IN (OR,NM,KY) AND (ss_net_profit#14 >= 150.00)) AND (ss_net_profit#14 <= 300.00))) OR ((ca_state#24 IN (VA,TX,MS) AND (ss_net_profit#14 >= 50.00)) AND (ss_net_profit#14 <= 250.00))) +Left keys [1]: [ss_store_sk#5] +Right keys [1]: [s_store_sk#25] +Join condition: None (35) Project [codegen id : 6] -Output [3]: [ss_quantity#10, ss_ext_sales_price#12, ss_ext_wholesale_cost#13] -Input [7]: [ss_addr_sk#8, ss_quantity#10, ss_ext_sales_price#12, ss_ext_wholesale_cost#13, ss_net_profit#14, ca_address_sk#23, ca_state#24] +Output [3]: [ss_quantity#6, ss_ext_sales_price#8, ss_ext_wholesale_cost#9] +Input [5]: [ss_store_sk#5, ss_quantity#6, ss_ext_sales_price#8, ss_ext_wholesale_cost#9, s_store_sk#25] (36) HashAggregate [codegen id : 6] -Input [3]: [ss_quantity#10, ss_ext_sales_price#12, ss_ext_wholesale_cost#13] +Input [3]: [ss_quantity#6, ss_ext_sales_price#8, ss_ext_wholesale_cost#9] Keys: [] -Functions [4]: [partial_avg(cast(ss_quantity#10 as bigint)), partial_avg(UnscaledValue(ss_ext_sales_price#12)), partial_avg(UnscaledValue(ss_ext_wholesale_cost#13)), partial_sum(UnscaledValue(ss_ext_wholesale_cost#13))] +Functions [4]: [partial_avg(cast(ss_quantity#6 as bigint)), partial_avg(UnscaledValue(ss_ext_sales_price#8)), partial_avg(UnscaledValue(ss_ext_wholesale_cost#9)), partial_sum(UnscaledValue(ss_ext_wholesale_cost#9))] Aggregate Attributes [7]: [sum#27, count#28, sum#29, count#30, sum#31, count#32, sum#33] Results [7]: [sum#34, count#35, sum#36, count#37, sum#38, count#39, sum#40] (37) Exchange Input [7]: [sum#34, count#35, sum#36, count#37, sum#38, count#39, sum#40] -Arguments: SinglePartition, true, [id=#41] +Arguments: SinglePartition, ENSURE_REQUIREMENTS, [id=#41] (38) HashAggregate [codegen id : 7] Input [7]: [sum#34, count#35, sum#36, count#37, sum#38, count#39, sum#40] Keys: [] -Functions [4]: [avg(cast(ss_quantity#10 as bigint)), avg(UnscaledValue(ss_ext_sales_price#12)), avg(UnscaledValue(ss_ext_wholesale_cost#13)), sum(UnscaledValue(ss_ext_wholesale_cost#13))] -Aggregate Attributes [4]: [avg(cast(ss_quantity#10 as bigint))#42, avg(UnscaledValue(ss_ext_sales_price#12))#43, avg(UnscaledValue(ss_ext_wholesale_cost#13))#44, sum(UnscaledValue(ss_ext_wholesale_cost#13))#45] -Results [4]: [avg(cast(ss_quantity#10 as bigint))#42 AS avg(ss_quantity)#46, cast((avg(UnscaledValue(ss_ext_sales_price#12))#43 / 100.0) as decimal(11,6)) AS avg(ss_ext_sales_price)#47, cast((avg(UnscaledValue(ss_ext_wholesale_cost#13))#44 / 100.0) as decimal(11,6)) AS avg(ss_ext_wholesale_cost)#48, MakeDecimal(sum(UnscaledValue(ss_ext_wholesale_cost#13))#45,17,2) AS sum(ss_ext_wholesale_cost)#49] +Functions [4]: [avg(cast(ss_quantity#6 as bigint)), avg(UnscaledValue(ss_ext_sales_price#8)), avg(UnscaledValue(ss_ext_wholesale_cost#9)), sum(UnscaledValue(ss_ext_wholesale_cost#9))] +Aggregate Attributes [4]: [avg(cast(ss_quantity#6 as bigint))#42, avg(UnscaledValue(ss_ext_sales_price#8))#43, avg(UnscaledValue(ss_ext_wholesale_cost#9))#44, sum(UnscaledValue(ss_ext_wholesale_cost#9))#45] +Results [4]: [avg(cast(ss_quantity#6 as bigint))#42 AS avg(ss_quantity)#46, cast((avg(UnscaledValue(ss_ext_sales_price#8))#43 / 100.0) as decimal(11,6)) AS avg(ss_ext_sales_price)#47, cast((avg(UnscaledValue(ss_ext_wholesale_cost#9))#44 / 100.0) as decimal(11,6)) AS avg(ss_ext_wholesale_cost)#48, MakeDecimal(sum(UnscaledValue(ss_ext_wholesale_cost#9))#45,17,2) AS sum(ss_ext_wholesale_cost)#49] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q13.sf100/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q13.sf100/simplified.txt index e410b27e9cf3b..45d6c8f3b0bae 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q13.sf100/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q13.sf100/simplified.txt @@ -5,15 +5,19 @@ WholeStageCodegen (7) WholeStageCodegen (6) HashAggregate [ss_quantity,ss_ext_sales_price,ss_ext_wholesale_cost] [sum,count,sum,count,sum,count,sum,sum,count,sum,count,sum,count,sum] Project [ss_quantity,ss_ext_sales_price,ss_ext_wholesale_cost] - BroadcastHashJoin [ss_addr_sk,ca_address_sk,ca_state,ss_net_profit] - Project [ss_addr_sk,ss_quantity,ss_ext_sales_price,ss_ext_wholesale_cost,ss_net_profit] - BroadcastHashJoin [ss_store_sk,s_store_sk] - Project [ss_addr_sk,ss_store_sk,ss_quantity,ss_ext_sales_price,ss_ext_wholesale_cost,ss_net_profit] - BroadcastHashJoin [ss_sold_date_sk,d_date_sk] + BroadcastHashJoin [ss_store_sk,s_store_sk] + Project [ss_store_sk,ss_quantity,ss_ext_sales_price,ss_ext_wholesale_cost] + BroadcastHashJoin [ss_sold_date_sk,d_date_sk] + Project [ss_sold_date_sk,ss_store_sk,ss_quantity,ss_ext_sales_price,ss_ext_wholesale_cost] + BroadcastHashJoin [ss_addr_sk,ca_address_sk,ca_state,ss_net_profit] Project [ss_sold_date_sk,ss_addr_sk,ss_store_sk,ss_quantity,ss_ext_sales_price,ss_ext_wholesale_cost,ss_net_profit] BroadcastHashJoin [ss_hdemo_sk,hd_demo_sk,cd_marital_status,cd_education_status,ss_sales_price,hd_dep_count] - Project [cd_marital_status,cd_education_status,ss_sold_date_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_quantity,ss_sales_price,ss_ext_sales_price,ss_ext_wholesale_cost,ss_net_profit] - BroadcastHashJoin [cd_demo_sk,ss_cdemo_sk,cd_marital_status,cd_education_status,ss_sales_price] + Project [ss_sold_date_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_quantity,ss_sales_price,ss_ext_sales_price,ss_ext_wholesale_cost,ss_net_profit,cd_marital_status,cd_education_status] + BroadcastHashJoin [ss_cdemo_sk,cd_demo_sk,cd_marital_status,cd_education_status,ss_sales_price] + Filter [ss_store_sk,ss_addr_sk,ss_sold_date_sk,ss_cdemo_sk,ss_hdemo_sk,ss_net_profit,ss_sales_price] + ColumnarToRow + InputAdapter + Scan parquet default.store_sales [ss_sold_date_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_quantity,ss_sales_price,ss_ext_sales_price,ss_ext_wholesale_cost,ss_net_profit] InputAdapter BroadcastExchange #2 WholeStageCodegen (1) @@ -21,10 +25,6 @@ WholeStageCodegen (7) ColumnarToRow InputAdapter Scan parquet default.customer_demographics [cd_demo_sk,cd_marital_status,cd_education_status] - Filter [ss_store_sk,ss_addr_sk,ss_sold_date_sk,ss_cdemo_sk,ss_hdemo_sk,ss_net_profit,ss_sales_price] - ColumnarToRow - InputAdapter - Scan parquet default.store_sales [ss_sold_date_sk,ss_cdemo_sk,ss_hdemo_sk,ss_addr_sk,ss_store_sk,ss_quantity,ss_sales_price,ss_ext_sales_price,ss_ext_wholesale_cost,ss_net_profit] InputAdapter BroadcastExchange #3 WholeStageCodegen (2) @@ -35,23 +35,23 @@ WholeStageCodegen (7) InputAdapter BroadcastExchange #4 WholeStageCodegen (3) - Project [d_date_sk] - Filter [d_year,d_date_sk] + Project [ca_address_sk,ca_state] + Filter [ca_country,ca_address_sk,ca_state] ColumnarToRow InputAdapter - Scan parquet default.date_dim [d_date_sk,d_year] + Scan parquet default.customer_address [ca_address_sk,ca_state,ca_country] InputAdapter BroadcastExchange #5 WholeStageCodegen (4) - Filter [s_store_sk] - ColumnarToRow - InputAdapter - Scan parquet default.store [s_store_sk] + Project [d_date_sk] + Filter [d_year,d_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.date_dim [d_date_sk,d_year] InputAdapter BroadcastExchange #6 WholeStageCodegen (5) - Project [ca_address_sk,ca_state] - Filter [ca_country,ca_address_sk,ca_state] - ColumnarToRow - InputAdapter - Scan parquet default.customer_address [ca_address_sk,ca_state,ca_country] + Filter [s_store_sk] + ColumnarToRow + InputAdapter + Scan parquet default.store [s_store_sk] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q14a.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q14a.sf100/explain.txt index c3e9f9418cef5..b346701fa3148 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q14a.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q14a.sf100/explain.txt @@ -517,15 +517,15 @@ Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum#36, isEmpty#37, cou Keys [3]: [i_brand_id#7, i_class_id#8, i_category_id#9] Functions [2]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true)), count(1)] Aggregate Attributes [2]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#40, count(1)#41] -Results [7]: [store AS channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#40 AS sales#43, count(1)#41 AS number_sales#44, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#40 AS sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#45] +Results [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#40 AS sales#42, count(1)#41 AS number_sales#43, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#40 AS sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#44] (86) Filter [codegen id : 39] -Input [7]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sales#43, number_sales#44, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#45] -Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#45) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#45 as decimal(32,6)) > cast(Subquery scalar-subquery#46, [id=#47] as decimal(32,6)))) +Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sales#42, number_sales#43, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#44] +Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#44) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#44 as decimal(32,6)) > cast(Subquery scalar-subquery#45, [id=#46] as decimal(32,6)))) (87) Project [codegen id : 39] -Output [6]: [sales#43, number_sales#44, channel#42, i_brand_id#7, i_class_id#8, i_category_id#9] -Input [7]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sales#43, number_sales#44, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#45] +Output [6]: [sales#42, number_sales#43, store AS channel#47, i_brand_id#7, i_class_id#8, i_category_id#9] +Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sales#42, number_sales#43, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#44] (88) Scan parquet default.catalog_sales Output [4]: [cs_sold_date_sk#18, cs_item_sk#19, cs_quantity#48, cs_list_price#49] @@ -601,15 +601,15 @@ Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum#54, isEmpty#55, cou Keys [3]: [i_brand_id#7, i_class_id#8, i_category_id#9] Functions [2]: [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true)), count(1)] Aggregate Attributes [2]: [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#58, count(1)#59] -Results [7]: [catalog AS channel#60, i_brand_id#7, i_class_id#8, i_category_id#9, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#58 AS sales#61, count(1)#59 AS number_sales#62, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#58 AS sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#63] +Results [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#58 AS sales#60, count(1)#59 AS number_sales#61, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#58 AS sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#62] (105) Filter [codegen id : 78] -Input [7]: [channel#60, i_brand_id#7, i_class_id#8, i_category_id#9, sales#61, number_sales#62, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#63] -Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#63) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#63 as decimal(32,6)) > cast(ReusedSubquery Subquery scalar-subquery#46, [id=#47] as decimal(32,6)))) +Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sales#60, number_sales#61, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#62] +Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#62) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#62 as decimal(32,6)) > cast(ReusedSubquery Subquery scalar-subquery#45, [id=#46] as decimal(32,6)))) (106) Project [codegen id : 78] -Output [6]: [sales#61, number_sales#62, channel#60, i_brand_id#7, i_class_id#8, i_category_id#9] -Input [7]: [channel#60, i_brand_id#7, i_class_id#8, i_category_id#9, sales#61, number_sales#62, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#63] +Output [6]: [sales#60, number_sales#61, catalog AS channel#63, i_brand_id#7, i_class_id#8, i_category_id#9] +Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sales#60, number_sales#61, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#62] (107) Scan parquet default.web_sales Output [4]: [ws_sold_date_sk#22, ws_item_sk#23, ws_quantity#64, ws_list_price#65] @@ -685,26 +685,26 @@ Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum#70, isEmpty#71, cou Keys [3]: [i_brand_id#7, i_class_id#8, i_category_id#9] Functions [2]: [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true)), count(1)] Aggregate Attributes [2]: [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#74, count(1)#75] -Results [7]: [web AS channel#76, i_brand_id#7, i_class_id#8, i_category_id#9, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#74 AS sales#77, count(1)#75 AS number_sales#78, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#74 AS sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#79] +Results [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#74 AS sales#76, count(1)#75 AS number_sales#77, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#74 AS sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#78] (124) Filter [codegen id : 117] -Input [7]: [channel#76, i_brand_id#7, i_class_id#8, i_category_id#9, sales#77, number_sales#78, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#79] -Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#79) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#79 as decimal(32,6)) > cast(ReusedSubquery Subquery scalar-subquery#46, [id=#47] as decimal(32,6)))) +Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sales#76, number_sales#77, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#78] +Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#78) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#78 as decimal(32,6)) > cast(ReusedSubquery Subquery scalar-subquery#45, [id=#46] as decimal(32,6)))) (125) Project [codegen id : 117] -Output [6]: [sales#77, number_sales#78, channel#76, i_brand_id#7, i_class_id#8, i_category_id#9] -Input [7]: [channel#76, i_brand_id#7, i_class_id#8, i_category_id#9, sales#77, number_sales#78, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#79] +Output [6]: [sales#76, number_sales#77, web AS channel#79, i_brand_id#7, i_class_id#8, i_category_id#9] +Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sales#76, number_sales#77, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#78] (126) Union (127) Expand [codegen id : 118] -Input [6]: [sales#43, number_sales#44, channel#42, i_brand_id#7, i_class_id#8, i_category_id#9] -Arguments: [List(sales#43, number_sales#44, channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, 0), List(sales#43, number_sales#44, channel#42, i_brand_id#7, i_class_id#8, null, 1), List(sales#43, number_sales#44, channel#42, i_brand_id#7, null, null, 3), List(sales#43, number_sales#44, channel#42, null, null, null, 7), List(sales#43, number_sales#44, null, null, null, null, 15)], [sales#43, number_sales#44, channel#80, i_brand_id#81, i_class_id#82, i_category_id#83, spark_grouping_id#84] +Input [6]: [sales#42, number_sales#43, channel#47, i_brand_id#7, i_class_id#8, i_category_id#9] +Arguments: [List(sales#42, number_sales#43, channel#47, i_brand_id#7, i_class_id#8, i_category_id#9, 0), List(sales#42, number_sales#43, channel#47, i_brand_id#7, i_class_id#8, null, 1), List(sales#42, number_sales#43, channel#47, i_brand_id#7, null, null, 3), List(sales#42, number_sales#43, channel#47, null, null, null, 7), List(sales#42, number_sales#43, null, null, null, null, 15)], [sales#42, number_sales#43, channel#80, i_brand_id#81, i_class_id#82, i_category_id#83, spark_grouping_id#84] (128) HashAggregate [codegen id : 118] -Input [7]: [sales#43, number_sales#44, channel#80, i_brand_id#81, i_class_id#82, i_category_id#83, spark_grouping_id#84] +Input [7]: [sales#42, number_sales#43, channel#80, i_brand_id#81, i_class_id#82, i_category_id#83, spark_grouping_id#84] Keys [5]: [channel#80, i_brand_id#81, i_class_id#82, i_category_id#83, spark_grouping_id#84] -Functions [2]: [partial_sum(sales#43), partial_sum(number_sales#44)] +Functions [2]: [partial_sum(sales#42), partial_sum(number_sales#43)] Aggregate Attributes [3]: [sum#85, isEmpty#86, sum#87] Results [8]: [channel#80, i_brand_id#81, i_class_id#82, i_category_id#83, spark_grouping_id#84, sum#88, isEmpty#89, sum#90] @@ -715,9 +715,9 @@ Arguments: hashpartitioning(channel#80, i_brand_id#81, i_class_id#82, i_category (130) HashAggregate [codegen id : 119] Input [8]: [channel#80, i_brand_id#81, i_class_id#82, i_category_id#83, spark_grouping_id#84, sum#88, isEmpty#89, sum#90] Keys [5]: [channel#80, i_brand_id#81, i_class_id#82, i_category_id#83, spark_grouping_id#84] -Functions [2]: [sum(sales#43), sum(number_sales#44)] -Aggregate Attributes [2]: [sum(sales#43)#92, sum(number_sales#44)#93] -Results [6]: [channel#80, i_brand_id#81, i_class_id#82, i_category_id#83, sum(sales#43)#92 AS sum(sales)#94, sum(number_sales#44)#93 AS sum(number_sales)#95] +Functions [2]: [sum(sales#42), sum(number_sales#43)] +Aggregate Attributes [2]: [sum(sales#42)#92, sum(number_sales#43)#93] +Results [6]: [channel#80, i_brand_id#81, i_class_id#82, i_category_id#83, sum(sales#42)#92 AS sum(sales)#94, sum(number_sales#43)#93 AS sum(number_sales)#95] (131) TakeOrderedAndProject Input [6]: [channel#80, i_brand_id#81, i_class_id#82, i_category_id#83, sum(sales)#94, sum(number_sales)#95] @@ -725,7 +725,7 @@ Arguments: 100, [channel#80 ASC NULLS FIRST, i_brand_id#81 ASC NULLS FIRST, i_cl ===== Subqueries ===== -Subquery:1 Hosting operator id = 86 Hosting Expression = Subquery scalar-subquery#46, [id=#47] +Subquery:1 Hosting operator id = 86 Hosting Expression = Subquery scalar-subquery#45, [id=#46] * HashAggregate (157) +- Exchange (156) +- * HashAggregate (155) @@ -871,8 +871,8 @@ Functions [1]: [avg(CheckOverflow((promote_precision(cast(cast(quantity#97 as de Aggregate Attributes [1]: [avg(CheckOverflow((promote_precision(cast(cast(quantity#97 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(list_price#98 as decimal(12,2)))), DecimalType(18,2), true))#108] Results [1]: [avg(CheckOverflow((promote_precision(cast(cast(quantity#97 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(list_price#98 as decimal(12,2)))), DecimalType(18,2), true))#108 AS average_sales#109] -Subquery:2 Hosting operator id = 105 Hosting Expression = ReusedSubquery Subquery scalar-subquery#46, [id=#47] +Subquery:2 Hosting operator id = 105 Hosting Expression = ReusedSubquery Subquery scalar-subquery#45, [id=#46] -Subquery:3 Hosting operator id = 124 Hosting Expression = ReusedSubquery Subquery scalar-subquery#46, [id=#47] +Subquery:3 Hosting operator id = 124 Hosting Expression = ReusedSubquery Subquery scalar-subquery#45, [id=#46] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q14a.sf100/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q14a.sf100/simplified.txt index c6dbfcaa3fe43..5b93392d023db 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q14a.sf100/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q14a.sf100/simplified.txt @@ -9,7 +9,7 @@ TakeOrderedAndProject [channel,i_brand_id,i_class_id,i_category_id,sum(sales),su InputAdapter Union WholeStageCodegen (39) - Project [sales,number_sales,channel,i_brand_id,i_class_id,i_category_id] + Project [sales,number_sales,i_brand_id,i_class_id,i_category_id] Filter [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true))] Subquery #1 WholeStageCodegen (8) @@ -53,7 +53,7 @@ TakeOrderedAndProject [channel,i_brand_id,i_class_id,i_category_id,sum(sales),su Scan parquet default.web_sales [ws_sold_date_sk,ws_quantity,ws_list_price] InputAdapter ReusedExchange [d_date_sk] #17 - HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),channel,sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] + HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] InputAdapter Exchange [i_brand_id,i_class_id,i_category_id] #2 WholeStageCodegen (38) @@ -189,10 +189,10 @@ TakeOrderedAndProject [channel,i_brand_id,i_class_id,i_category_id,sum(sales),su InputAdapter ReusedExchange [ss_item_sk] #4 WholeStageCodegen (78) - Project [sales,number_sales,channel,i_brand_id,i_class_id,i_category_id] + Project [sales,number_sales,i_brand_id,i_class_id,i_category_id] Filter [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price as decimal(12,2)))), DecimalType(18,2), true))] ReusedSubquery [average_sales] #1 - HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),channel,sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(cs_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] + HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(cs_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] InputAdapter Exchange [i_brand_id,i_class_id,i_category_id] #18 WholeStageCodegen (77) @@ -221,10 +221,10 @@ TakeOrderedAndProject [channel,i_brand_id,i_class_id,i_category_id,sum(sales),su InputAdapter ReusedExchange [i_item_sk,i_brand_id,i_class_id,i_category_id] #14 WholeStageCodegen (117) - Project [sales,number_sales,channel,i_brand_id,i_class_id,i_category_id] + Project [sales,number_sales,i_brand_id,i_class_id,i_category_id] Filter [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price as decimal(12,2)))), DecimalType(18,2), true))] ReusedSubquery [average_sales] #1 - HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),channel,sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(ws_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] + HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(ws_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] InputAdapter Exchange [i_brand_id,i_class_id,i_category_id] #20 WholeStageCodegen (116) diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q14a/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q14a/explain.txt index c1b77321f16e6..3f0cc9e7acb1e 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q14a/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q14a/explain.txt @@ -461,15 +461,15 @@ Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum#33, isEmpty#34, cou Keys [3]: [i_brand_id#6, i_class_id#7, i_category_id#8] Functions [2]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true)), count(1)] Aggregate Attributes [2]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#37, count(1)#38] -Results [7]: [store AS channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#37 AS sales#40, count(1)#38 AS number_sales#41, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#37 AS sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#42] +Results [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#37 AS sales#39, count(1)#38 AS number_sales#40, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#37 AS sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#41] (76) Filter [codegen id : 26] -Input [7]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sales#40, number_sales#41, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#42] -Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#42) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#42 as decimal(32,6)) > cast(Subquery scalar-subquery#43, [id=#44] as decimal(32,6)))) +Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sales#39, number_sales#40, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#41] +Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#41) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#41 as decimal(32,6)) > cast(Subquery scalar-subquery#42, [id=#43] as decimal(32,6)))) (77) Project [codegen id : 26] -Output [6]: [sales#40, number_sales#41, channel#39, i_brand_id#6, i_class_id#7, i_category_id#8] -Input [7]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sales#40, number_sales#41, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#42] +Output [6]: [sales#39, number_sales#40, store AS channel#44, i_brand_id#6, i_class_id#7, i_category_id#8] +Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sales#39, number_sales#40, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#41] (78) Scan parquet default.catalog_sales Output [4]: [cs_sold_date_sk#16, cs_item_sk#17, cs_quantity#45, cs_list_price#46] @@ -533,15 +533,15 @@ Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum#50, isEmpty#51, cou Keys [3]: [i_brand_id#6, i_class_id#7, i_category_id#8] Functions [2]: [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true)), count(1)] Aggregate Attributes [2]: [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#54, count(1)#55] -Results [7]: [catalog AS channel#56, i_brand_id#6, i_class_id#7, i_category_id#8, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#54 AS sales#57, count(1)#55 AS number_sales#58, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#54 AS sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#59] +Results [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#54 AS sales#56, count(1)#55 AS number_sales#57, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#54 AS sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#58] (92) Filter [codegen id : 52] -Input [7]: [channel#56, i_brand_id#6, i_class_id#7, i_category_id#8, sales#57, number_sales#58, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#59] -Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#59) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#59 as decimal(32,6)) > cast(ReusedSubquery Subquery scalar-subquery#43, [id=#44] as decimal(32,6)))) +Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sales#56, number_sales#57, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#58] +Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#58) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#58 as decimal(32,6)) > cast(ReusedSubquery Subquery scalar-subquery#42, [id=#43] as decimal(32,6)))) (93) Project [codegen id : 52] -Output [6]: [sales#57, number_sales#58, channel#56, i_brand_id#6, i_class_id#7, i_category_id#8] -Input [7]: [channel#56, i_brand_id#6, i_class_id#7, i_category_id#8, sales#57, number_sales#58, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#59] +Output [6]: [sales#56, number_sales#57, catalog AS channel#59, i_brand_id#6, i_class_id#7, i_category_id#8] +Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sales#56, number_sales#57, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#58] (94) Scan parquet default.web_sales Output [4]: [ws_sold_date_sk#20, ws_item_sk#21, ws_quantity#60, ws_list_price#61] @@ -605,26 +605,26 @@ Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum#65, isEmpty#66, cou Keys [3]: [i_brand_id#6, i_class_id#7, i_category_id#8] Functions [2]: [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true)), count(1)] Aggregate Attributes [2]: [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#69, count(1)#70] -Results [7]: [web AS channel#71, i_brand_id#6, i_class_id#7, i_category_id#8, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#69 AS sales#72, count(1)#70 AS number_sales#73, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#69 AS sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#74] +Results [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#69 AS sales#71, count(1)#70 AS number_sales#72, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#69 AS sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#73] (108) Filter [codegen id : 78] -Input [7]: [channel#71, i_brand_id#6, i_class_id#7, i_category_id#8, sales#72, number_sales#73, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#74] -Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#74) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#74 as decimal(32,6)) > cast(ReusedSubquery Subquery scalar-subquery#43, [id=#44] as decimal(32,6)))) +Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sales#71, number_sales#72, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#73] +Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#73) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#73 as decimal(32,6)) > cast(ReusedSubquery Subquery scalar-subquery#42, [id=#43] as decimal(32,6)))) (109) Project [codegen id : 78] -Output [6]: [sales#72, number_sales#73, channel#71, i_brand_id#6, i_class_id#7, i_category_id#8] -Input [7]: [channel#71, i_brand_id#6, i_class_id#7, i_category_id#8, sales#72, number_sales#73, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#74] +Output [6]: [sales#71, number_sales#72, web AS channel#74, i_brand_id#6, i_class_id#7, i_category_id#8] +Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sales#71, number_sales#72, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#73] (110) Union (111) Expand [codegen id : 79] -Input [6]: [sales#40, number_sales#41, channel#39, i_brand_id#6, i_class_id#7, i_category_id#8] -Arguments: [List(sales#40, number_sales#41, channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, 0), List(sales#40, number_sales#41, channel#39, i_brand_id#6, i_class_id#7, null, 1), List(sales#40, number_sales#41, channel#39, i_brand_id#6, null, null, 3), List(sales#40, number_sales#41, channel#39, null, null, null, 7), List(sales#40, number_sales#41, null, null, null, null, 15)], [sales#40, number_sales#41, channel#75, i_brand_id#76, i_class_id#77, i_category_id#78, spark_grouping_id#79] +Input [6]: [sales#39, number_sales#40, channel#44, i_brand_id#6, i_class_id#7, i_category_id#8] +Arguments: [List(sales#39, number_sales#40, channel#44, i_brand_id#6, i_class_id#7, i_category_id#8, 0), List(sales#39, number_sales#40, channel#44, i_brand_id#6, i_class_id#7, null, 1), List(sales#39, number_sales#40, channel#44, i_brand_id#6, null, null, 3), List(sales#39, number_sales#40, channel#44, null, null, null, 7), List(sales#39, number_sales#40, null, null, null, null, 15)], [sales#39, number_sales#40, channel#75, i_brand_id#76, i_class_id#77, i_category_id#78, spark_grouping_id#79] (112) HashAggregate [codegen id : 79] -Input [7]: [sales#40, number_sales#41, channel#75, i_brand_id#76, i_class_id#77, i_category_id#78, spark_grouping_id#79] +Input [7]: [sales#39, number_sales#40, channel#75, i_brand_id#76, i_class_id#77, i_category_id#78, spark_grouping_id#79] Keys [5]: [channel#75, i_brand_id#76, i_class_id#77, i_category_id#78, spark_grouping_id#79] -Functions [2]: [partial_sum(sales#40), partial_sum(number_sales#41)] +Functions [2]: [partial_sum(sales#39), partial_sum(number_sales#40)] Aggregate Attributes [3]: [sum#80, isEmpty#81, sum#82] Results [8]: [channel#75, i_brand_id#76, i_class_id#77, i_category_id#78, spark_grouping_id#79, sum#83, isEmpty#84, sum#85] @@ -635,9 +635,9 @@ Arguments: hashpartitioning(channel#75, i_brand_id#76, i_class_id#77, i_category (114) HashAggregate [codegen id : 80] Input [8]: [channel#75, i_brand_id#76, i_class_id#77, i_category_id#78, spark_grouping_id#79, sum#83, isEmpty#84, sum#85] Keys [5]: [channel#75, i_brand_id#76, i_class_id#77, i_category_id#78, spark_grouping_id#79] -Functions [2]: [sum(sales#40), sum(number_sales#41)] -Aggregate Attributes [2]: [sum(sales#40)#87, sum(number_sales#41)#88] -Results [6]: [channel#75, i_brand_id#76, i_class_id#77, i_category_id#78, sum(sales#40)#87 AS sum(sales)#89, sum(number_sales#41)#88 AS sum(number_sales)#90] +Functions [2]: [sum(sales#39), sum(number_sales#40)] +Aggregate Attributes [2]: [sum(sales#39)#87, sum(number_sales#40)#88] +Results [6]: [channel#75, i_brand_id#76, i_class_id#77, i_category_id#78, sum(sales#39)#87 AS sum(sales)#89, sum(number_sales#40)#88 AS sum(number_sales)#90] (115) TakeOrderedAndProject Input [6]: [channel#75, i_brand_id#76, i_class_id#77, i_category_id#78, sum(sales)#89, sum(number_sales)#90] @@ -645,7 +645,7 @@ Arguments: 100, [channel#75 ASC NULLS FIRST, i_brand_id#76 ASC NULLS FIRST, i_cl ===== Subqueries ===== -Subquery:1 Hosting operator id = 76 Hosting Expression = Subquery scalar-subquery#43, [id=#44] +Subquery:1 Hosting operator id = 76 Hosting Expression = Subquery scalar-subquery#42, [id=#43] * HashAggregate (141) +- Exchange (140) +- * HashAggregate (139) @@ -791,8 +791,8 @@ Functions [1]: [avg(CheckOverflow((promote_precision(cast(cast(quantity#92 as de Aggregate Attributes [1]: [avg(CheckOverflow((promote_precision(cast(cast(quantity#92 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(list_price#93 as decimal(12,2)))), DecimalType(18,2), true))#103] Results [1]: [avg(CheckOverflow((promote_precision(cast(cast(quantity#92 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(list_price#93 as decimal(12,2)))), DecimalType(18,2), true))#103 AS average_sales#104] -Subquery:2 Hosting operator id = 92 Hosting Expression = ReusedSubquery Subquery scalar-subquery#43, [id=#44] +Subquery:2 Hosting operator id = 92 Hosting Expression = ReusedSubquery Subquery scalar-subquery#42, [id=#43] -Subquery:3 Hosting operator id = 108 Hosting Expression = ReusedSubquery Subquery scalar-subquery#43, [id=#44] +Subquery:3 Hosting operator id = 108 Hosting Expression = ReusedSubquery Subquery scalar-subquery#42, [id=#43] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q14a/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q14a/simplified.txt index 604bd792f5ffd..dfa8c1bcc1579 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q14a/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q14a/simplified.txt @@ -9,7 +9,7 @@ TakeOrderedAndProject [channel,i_brand_id,i_class_id,i_category_id,sum(sales),su InputAdapter Union WholeStageCodegen (26) - Project [sales,number_sales,channel,i_brand_id,i_class_id,i_category_id] + Project [sales,number_sales,i_brand_id,i_class_id,i_category_id] Filter [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true))] Subquery #1 WholeStageCodegen (8) @@ -53,7 +53,7 @@ TakeOrderedAndProject [channel,i_brand_id,i_class_id,i_category_id,sum(sales),su Scan parquet default.web_sales [ws_sold_date_sk,ws_quantity,ws_list_price] InputAdapter ReusedExchange [d_date_sk] #14 - HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),channel,sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] + HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] InputAdapter Exchange [i_brand_id,i_class_id,i_category_id] #2 WholeStageCodegen (25) @@ -165,10 +165,10 @@ TakeOrderedAndProject [channel,i_brand_id,i_class_id,i_category_id,sum(sales),su InputAdapter Scan parquet default.date_dim [d_date_sk,d_year,d_moy] WholeStageCodegen (52) - Project [sales,number_sales,channel,i_brand_id,i_class_id,i_category_id] + Project [sales,number_sales,i_brand_id,i_class_id,i_category_id] Filter [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price as decimal(12,2)))), DecimalType(18,2), true))] ReusedSubquery [average_sales] #1 - HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),channel,sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(cs_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] + HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(cs_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] InputAdapter Exchange [i_brand_id,i_class_id,i_category_id] #15 WholeStageCodegen (51) @@ -189,10 +189,10 @@ TakeOrderedAndProject [channel,i_brand_id,i_class_id,i_category_id,sum(sales),su InputAdapter ReusedExchange [d_date_sk] #12 WholeStageCodegen (78) - Project [sales,number_sales,channel,i_brand_id,i_class_id,i_category_id] + Project [sales,number_sales,i_brand_id,i_class_id,i_category_id] Filter [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price as decimal(12,2)))), DecimalType(18,2), true))] ReusedSubquery [average_sales] #1 - HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),channel,sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(ws_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] + HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(ws_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] InputAdapter Exchange [i_brand_id,i_class_id,i_category_id] #16 WholeStageCodegen (77) diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q14b.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q14b.sf100/explain.txt index f71ceaaf91f47..2d2b56e32bdb8 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q14b.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q14b.sf100/explain.txt @@ -496,15 +496,15 @@ Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum#38, isEmpty#39, cou Keys [3]: [i_brand_id#7, i_class_id#8, i_category_id#9] Functions [2]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true)), count(1)] Aggregate Attributes [2]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#42, count(1)#43] -Results [7]: [store AS channel#44, i_brand_id#7, i_class_id#8, i_category_id#9, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#42 AS sales#45, count(1)#43 AS number_sales#46, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#42 AS sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#47] +Results [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#42 AS sales#44, count(1)#43 AS number_sales#45, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#42 AS sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#46] (86) Filter [codegen id : 78] -Input [7]: [channel#44, i_brand_id#7, i_class_id#8, i_category_id#9, sales#45, number_sales#46, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#47] -Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#47) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#47 as decimal(32,6)) > cast(Subquery scalar-subquery#48, [id=#49] as decimal(32,6)))) +Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sales#44, number_sales#45, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#46] +Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#46) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#46 as decimal(32,6)) > cast(Subquery scalar-subquery#47, [id=#48] as decimal(32,6)))) (87) Project [codegen id : 78] -Output [6]: [channel#44, i_brand_id#7, i_class_id#8, i_category_id#9, sales#45, number_sales#46] -Input [7]: [channel#44, i_brand_id#7, i_class_id#8, i_category_id#9, sales#45, number_sales#46, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#47] +Output [6]: [store AS channel#49, i_brand_id#7, i_class_id#8, i_category_id#9, sales#44, number_sales#45] +Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sales#44, number_sales#45, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#46] (88) ReusedExchange [Reuses operator id: 4] Output [4]: [ss_sold_date_sk#1, ss_item_sk#2, ss_quantity#3, ss_list_price#4] @@ -584,18 +584,18 @@ Input [6]: [i_brand_id#54, i_class_id#55, i_category_id#56, sum#60, isEmpty#61, Keys [3]: [i_brand_id#54, i_class_id#55, i_category_id#56] Functions [2]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true)), count(1)] Aggregate Attributes [2]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#64, count(1)#65] -Results [7]: [store AS channel#66, i_brand_id#54, i_class_id#55, i_category_id#56, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#64 AS sales#67, count(1)#65 AS number_sales#68, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#64 AS sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#69] +Results [6]: [i_brand_id#54, i_class_id#55, i_category_id#56, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#64 AS sales#66, count(1)#65 AS number_sales#67, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#64 AS sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#68] (106) Filter [codegen id : 77] -Input [7]: [channel#66, i_brand_id#54, i_class_id#55, i_category_id#56, sales#67, number_sales#68, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#69] -Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#69) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#69 as decimal(32,6)) > cast(ReusedSubquery Subquery scalar-subquery#48, [id=#49] as decimal(32,6)))) +Input [6]: [i_brand_id#54, i_class_id#55, i_category_id#56, sales#66, number_sales#67, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#68] +Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#68) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#68 as decimal(32,6)) > cast(ReusedSubquery Subquery scalar-subquery#47, [id=#48] as decimal(32,6)))) (107) Project [codegen id : 77] -Output [6]: [channel#66, i_brand_id#54, i_class_id#55, i_category_id#56, sales#67, number_sales#68] -Input [7]: [channel#66, i_brand_id#54, i_class_id#55, i_category_id#56, sales#67, number_sales#68, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#69] +Output [6]: [store AS channel#69, i_brand_id#54, i_class_id#55, i_category_id#56, sales#66, number_sales#67] +Input [6]: [i_brand_id#54, i_class_id#55, i_category_id#56, sales#66, number_sales#67, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#68] (108) BroadcastExchange -Input [6]: [channel#66, i_brand_id#54, i_class_id#55, i_category_id#56, sales#67, number_sales#68] +Input [6]: [channel#69, i_brand_id#54, i_class_id#55, i_category_id#56, sales#66, number_sales#67] Arguments: HashedRelationBroadcastMode(List(input[1, int, true], input[2, int, true], input[3, int, true]),false), [id=#70] (109) BroadcastHashJoin [codegen id : 78] @@ -604,12 +604,12 @@ Right keys [3]: [i_brand_id#54, i_class_id#55, i_category_id#56] Join condition: None (110) TakeOrderedAndProject -Input [12]: [channel#44, i_brand_id#7, i_class_id#8, i_category_id#9, sales#45, number_sales#46, channel#66, i_brand_id#54, i_class_id#55, i_category_id#56, sales#67, number_sales#68] -Arguments: 100, [channel#44 ASC NULLS FIRST, i_brand_id#7 ASC NULLS FIRST, i_class_id#8 ASC NULLS FIRST, i_category_id#9 ASC NULLS FIRST], [channel#44, i_brand_id#7, i_class_id#8, i_category_id#9, sales#45, number_sales#46, channel#66, i_brand_id#54, i_class_id#55, i_category_id#56, sales#67, number_sales#68] +Input [12]: [channel#49, i_brand_id#7, i_class_id#8, i_category_id#9, sales#44, number_sales#45, channel#69, i_brand_id#54, i_class_id#55, i_category_id#56, sales#66, number_sales#67] +Arguments: 100, [i_brand_id#7 ASC NULLS FIRST, i_class_id#8 ASC NULLS FIRST, i_category_id#9 ASC NULLS FIRST], [channel#49, i_brand_id#7, i_class_id#8, i_category_id#9, sales#44, number_sales#45, channel#69, i_brand_id#54, i_class_id#55, i_category_id#56, sales#66, number_sales#67] ===== Subqueries ===== -Subquery:1 Hosting operator id = 86 Hosting Expression = Subquery scalar-subquery#48, [id=#49] +Subquery:1 Hosting operator id = 86 Hosting Expression = Subquery scalar-subquery#47, [id=#48] * HashAggregate (136) +- Exchange (135) +- * HashAggregate (134) @@ -780,7 +780,7 @@ Condition : (((((isnotnull(d_year#11) AND isnotnull(d_moy#89)) AND isnotnull(d_d Output [1]: [d_week_seq#29] Input [4]: [d_week_seq#29, d_year#11, d_moy#89, d_dom#90] -Subquery:3 Hosting operator id = 106 Hosting Expression = ReusedSubquery Subquery scalar-subquery#48, [id=#49] +Subquery:3 Hosting operator id = 106 Hosting Expression = ReusedSubquery Subquery scalar-subquery#47, [id=#48] Subquery:4 Hosting operator id = 95 Hosting Expression = Subquery scalar-subquery#50, [id=#51] * Project (144) diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q14b.sf100/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q14b.sf100/simplified.txt index 37186560cb3b8..d6b8ba4395d2e 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q14b.sf100/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q14b.sf100/simplified.txt @@ -1,7 +1,7 @@ -TakeOrderedAndProject [channel,i_brand_id,i_class_id,i_category_id,sales,number_sales,channel,i_brand_id,i_class_id,i_category_id,sales,number_sales] +TakeOrderedAndProject [i_brand_id,i_class_id,i_category_id,channel,sales,number_sales,channel,i_brand_id,i_class_id,i_category_id,sales,number_sales] WholeStageCodegen (78) BroadcastHashJoin [i_brand_id,i_class_id,i_category_id,i_brand_id,i_class_id,i_category_id] - Project [channel,i_brand_id,i_class_id,i_category_id,sales,number_sales] + Project [i_brand_id,i_class_id,i_category_id,sales,number_sales] Filter [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true))] Subquery #2 WholeStageCodegen (8) @@ -45,7 +45,7 @@ TakeOrderedAndProject [channel,i_brand_id,i_class_id,i_category_id,sales,number_ Scan parquet default.web_sales [ws_sold_date_sk,ws_quantity,ws_list_price] InputAdapter ReusedExchange [d_date_sk] #16 - HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),channel,sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] + HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] InputAdapter Exchange [i_brand_id,i_class_id,i_category_id] #1 WholeStageCodegen (38) @@ -190,10 +190,10 @@ TakeOrderedAndProject [channel,i_brand_id,i_class_id,i_category_id,sales,number_ InputAdapter BroadcastExchange #17 WholeStageCodegen (77) - Project [channel,i_brand_id,i_class_id,i_category_id,sales,number_sales] + Project [i_brand_id,i_class_id,i_category_id,sales,number_sales] Filter [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true))] ReusedSubquery [average_sales] #2 - HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),channel,sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] + HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] InputAdapter Exchange [i_brand_id,i_class_id,i_category_id] #18 WholeStageCodegen (76) diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q14b/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q14b/explain.txt index b68ce0e9f2264..1f31ded51f1ef 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q14b/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q14b/explain.txt @@ -446,15 +446,15 @@ Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum#35, isEmpty#36, cou Keys [3]: [i_brand_id#6, i_class_id#7, i_category_id#8] Functions [2]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true)), count(1)] Aggregate Attributes [2]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#39, count(1)#40] -Results [7]: [store AS channel#41, i_brand_id#6, i_class_id#7, i_category_id#8, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#39 AS sales#42, count(1)#40 AS number_sales#43, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#39 AS sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#44] +Results [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#39 AS sales#41, count(1)#40 AS number_sales#42, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#39 AS sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#43] (76) Filter [codegen id : 52] -Input [7]: [channel#41, i_brand_id#6, i_class_id#7, i_category_id#8, sales#42, number_sales#43, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#44] -Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#44) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#44 as decimal(32,6)) > cast(Subquery scalar-subquery#45, [id=#46] as decimal(32,6)))) +Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sales#41, number_sales#42, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#43] +Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#43) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#43 as decimal(32,6)) > cast(Subquery scalar-subquery#44, [id=#45] as decimal(32,6)))) (77) Project [codegen id : 52] -Output [6]: [channel#41, i_brand_id#6, i_class_id#7, i_category_id#8, sales#42, number_sales#43] -Input [7]: [channel#41, i_brand_id#6, i_class_id#7, i_category_id#8, sales#42, number_sales#43, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#44] +Output [6]: [store AS channel#46, i_brand_id#6, i_class_id#7, i_category_id#8, sales#41, number_sales#42] +Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sales#41, number_sales#42, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#43] (78) Scan parquet default.store_sales Output [4]: [ss_sold_date_sk#1, ss_item_sk#2, ss_quantity#3, ss_list_price#4] @@ -537,18 +537,18 @@ Input [6]: [i_brand_id#48, i_class_id#49, i_category_id#50, sum#57, isEmpty#58, Keys [3]: [i_brand_id#48, i_class_id#49, i_category_id#50] Functions [2]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true)), count(1)] Aggregate Attributes [2]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#61, count(1)#62] -Results [7]: [store AS channel#63, i_brand_id#48, i_class_id#49, i_category_id#50, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#61 AS sales#64, count(1)#62 AS number_sales#65, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#61 AS sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#66] +Results [6]: [i_brand_id#48, i_class_id#49, i_category_id#50, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#61 AS sales#63, count(1)#62 AS number_sales#64, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#61 AS sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#65] (96) Filter [codegen id : 51] -Input [7]: [channel#63, i_brand_id#48, i_class_id#49, i_category_id#50, sales#64, number_sales#65, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#66] -Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#66) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#66 as decimal(32,6)) > cast(ReusedSubquery Subquery scalar-subquery#45, [id=#46] as decimal(32,6)))) +Input [6]: [i_brand_id#48, i_class_id#49, i_category_id#50, sales#63, number_sales#64, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#65] +Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#65) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#65 as decimal(32,6)) > cast(ReusedSubquery Subquery scalar-subquery#44, [id=#45] as decimal(32,6)))) (97) Project [codegen id : 51] -Output [6]: [channel#63, i_brand_id#48, i_class_id#49, i_category_id#50, sales#64, number_sales#65] -Input [7]: [channel#63, i_brand_id#48, i_class_id#49, i_category_id#50, sales#64, number_sales#65, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#66] +Output [6]: [store AS channel#66, i_brand_id#48, i_class_id#49, i_category_id#50, sales#63, number_sales#64] +Input [6]: [i_brand_id#48, i_class_id#49, i_category_id#50, sales#63, number_sales#64, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#65] (98) BroadcastExchange -Input [6]: [channel#63, i_brand_id#48, i_class_id#49, i_category_id#50, sales#64, number_sales#65] +Input [6]: [channel#66, i_brand_id#48, i_class_id#49, i_category_id#50, sales#63, number_sales#64] Arguments: HashedRelationBroadcastMode(List(input[1, int, true], input[2, int, true], input[3, int, true]),false), [id=#67] (99) BroadcastHashJoin [codegen id : 52] @@ -557,12 +557,12 @@ Right keys [3]: [i_brand_id#48, i_class_id#49, i_category_id#50] Join condition: None (100) TakeOrderedAndProject -Input [12]: [channel#41, i_brand_id#6, i_class_id#7, i_category_id#8, sales#42, number_sales#43, channel#63, i_brand_id#48, i_class_id#49, i_category_id#50, sales#64, number_sales#65] -Arguments: 100, [channel#41 ASC NULLS FIRST, i_brand_id#6 ASC NULLS FIRST, i_class_id#7 ASC NULLS FIRST, i_category_id#8 ASC NULLS FIRST], [channel#41, i_brand_id#6, i_class_id#7, i_category_id#8, sales#42, number_sales#43, channel#63, i_brand_id#48, i_class_id#49, i_category_id#50, sales#64, number_sales#65] +Input [12]: [channel#46, i_brand_id#6, i_class_id#7, i_category_id#8, sales#41, number_sales#42, channel#66, i_brand_id#48, i_class_id#49, i_category_id#50, sales#63, number_sales#64] +Arguments: 100, [i_brand_id#6 ASC NULLS FIRST, i_class_id#7 ASC NULLS FIRST, i_category_id#8 ASC NULLS FIRST], [channel#46, i_brand_id#6, i_class_id#7, i_category_id#8, sales#41, number_sales#42, channel#66, i_brand_id#48, i_class_id#49, i_category_id#50, sales#63, number_sales#64] ===== Subqueries ===== -Subquery:1 Hosting operator id = 76 Hosting Expression = Subquery scalar-subquery#45, [id=#46] +Subquery:1 Hosting operator id = 76 Hosting Expression = Subquery scalar-subquery#44, [id=#45] * HashAggregate (126) +- Exchange (125) +- * HashAggregate (124) @@ -733,7 +733,7 @@ Condition : (((((isnotnull(d_year#11) AND isnotnull(d_moy#86)) AND isnotnull(d_d Output [1]: [d_week_seq#28] Input [4]: [d_week_seq#28, d_year#11, d_moy#86, d_dom#87] -Subquery:3 Hosting operator id = 96 Hosting Expression = ReusedSubquery Subquery scalar-subquery#45, [id=#46] +Subquery:3 Hosting operator id = 96 Hosting Expression = ReusedSubquery Subquery scalar-subquery#44, [id=#45] Subquery:4 Hosting operator id = 88 Hosting Expression = Subquery scalar-subquery#51, [id=#52] * Project (134) diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q14b/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q14b/simplified.txt index 6e6950d4cb33a..7bbf83e3de707 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q14b/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q14b/simplified.txt @@ -1,7 +1,7 @@ -TakeOrderedAndProject [channel,i_brand_id,i_class_id,i_category_id,sales,number_sales,channel,i_brand_id,i_class_id,i_category_id,sales,number_sales] +TakeOrderedAndProject [i_brand_id,i_class_id,i_category_id,channel,sales,number_sales,channel,i_brand_id,i_class_id,i_category_id,sales,number_sales] WholeStageCodegen (52) BroadcastHashJoin [i_brand_id,i_class_id,i_category_id,i_brand_id,i_class_id,i_category_id] - Project [channel,i_brand_id,i_class_id,i_category_id,sales,number_sales] + Project [i_brand_id,i_class_id,i_category_id,sales,number_sales] Filter [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true))] Subquery #2 WholeStageCodegen (8) @@ -45,7 +45,7 @@ TakeOrderedAndProject [channel,i_brand_id,i_class_id,i_category_id,sales,number_ Scan parquet default.web_sales [ws_sold_date_sk,ws_quantity,ws_list_price] InputAdapter ReusedExchange [d_date_sk] #13 - HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),channel,sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] + HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] InputAdapter Exchange [i_brand_id,i_class_id,i_category_id] #1 WholeStageCodegen (25) @@ -166,10 +166,10 @@ TakeOrderedAndProject [channel,i_brand_id,i_class_id,i_category_id,sales,number_ InputAdapter BroadcastExchange #14 WholeStageCodegen (51) - Project [channel,i_brand_id,i_class_id,i_category_id,sales,number_sales] + Project [i_brand_id,i_class_id,i_category_id,sales,number_sales] Filter [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true))] ReusedSubquery [average_sales] #2 - HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),channel,sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] + HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] InputAdapter Exchange [i_brand_id,i_class_id,i_category_id] #15 WholeStageCodegen (50) diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q16.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q16.sf100/explain.txt index 509fb0133095b..a446163e3d29d 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q16.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q16.sf100/explain.txt @@ -1,5 +1,5 @@ == Physical Plan == -TakeOrderedAndProject (44) +* Sort (44) +- * HashAggregate (43) +- Exchange (42) +- * HashAggregate (41) @@ -244,7 +244,7 @@ Functions [3]: [sum(UnscaledValue(cs_ext_ship_cost#6)), sum(UnscaledValue(cs_net Aggregate Attributes [3]: [sum(UnscaledValue(cs_ext_ship_cost#6))#23, sum(UnscaledValue(cs_net_profit#7))#24, count(cs_order_number#5)#27] Results [3]: [count(cs_order_number#5)#27 AS order count #30, MakeDecimal(sum(UnscaledValue(cs_ext_ship_cost#6))#23,17,2) AS total shipping cost #31, MakeDecimal(sum(UnscaledValue(cs_net_profit#7))#24,17,2) AS total net profit #32] -(44) TakeOrderedAndProject +(44) Sort [codegen id : 12] Input [3]: [order count #30, total shipping cost #31, total net profit #32] -Arguments: 100, [order count #30 ASC NULLS FIRST], [order count #30, total shipping cost #31, total net profit #32] +Arguments: [order count #30 ASC NULLS FIRST], true, 0 diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q16.sf100/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q16.sf100/simplified.txt index ea9a0b27ff700..73a9b58010f58 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q16.sf100/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q16.sf100/simplified.txt @@ -1,5 +1,5 @@ -TakeOrderedAndProject [order count ,total shipping cost ,total net profit ] - WholeStageCodegen (12) +WholeStageCodegen (12) + Sort [order count ] HashAggregate [sum,sum,count] [sum(UnscaledValue(cs_ext_ship_cost)),sum(UnscaledValue(cs_net_profit)),count(cs_order_number),order count ,total shipping cost ,total net profit ,sum,sum,count] InputAdapter Exchange #1 diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q16/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q16/explain.txt index 2ae939cfe41f3..ea7e298393e4c 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q16/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q16/explain.txt @@ -1,5 +1,5 @@ == Physical Plan == -TakeOrderedAndProject (41) +* Sort (41) +- * HashAggregate (40) +- Exchange (39) +- * HashAggregate (38) @@ -229,7 +229,7 @@ Functions [3]: [sum(UnscaledValue(cs_ext_ship_cost#6)), sum(UnscaledValue(cs_net Aggregate Attributes [3]: [sum(UnscaledValue(cs_ext_ship_cost#6))#22, sum(UnscaledValue(cs_net_profit#7))#23, count(cs_order_number#5)#27] Results [3]: [count(cs_order_number#5)#27 AS order count #30, MakeDecimal(sum(UnscaledValue(cs_ext_ship_cost#6))#22,17,2) AS total shipping cost #31, MakeDecimal(sum(UnscaledValue(cs_net_profit#7))#23,17,2) AS total net profit #32] -(41) TakeOrderedAndProject +(41) Sort [codegen id : 8] Input [3]: [order count #30, total shipping cost #31, total net profit #32] -Arguments: 100, [order count #30 ASC NULLS FIRST], [order count #30, total shipping cost #31, total net profit #32] +Arguments: [order count #30 ASC NULLS FIRST], true, 0 diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q16/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q16/simplified.txt index a044b05365f8e..169f07c2d85e5 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q16/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q16/simplified.txt @@ -1,5 +1,5 @@ -TakeOrderedAndProject [order count ,total shipping cost ,total net profit ] - WholeStageCodegen (8) +WholeStageCodegen (8) + Sort [order count ] HashAggregate [sum,sum,count] [sum(UnscaledValue(cs_ext_ship_cost)),sum(UnscaledValue(cs_net_profit)),count(cs_order_number),order count ,total shipping cost ,total net profit ,sum,sum,count] InputAdapter Exchange #1 diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q17.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q17.sf100/explain.txt index e24b656e843aa..a9ab8c3690a00 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q17.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q17.sf100/explain.txt @@ -5,57 +5,57 @@ TakeOrderedAndProject (57) +- * HashAggregate (54) +- * Project (53) +- * SortMergeJoin Inner (52) - :- * Sort (43) - : +- Exchange (42) - : +- * Project (41) - : +- * SortMergeJoin Inner (40) - : :- * Sort (27) - : : +- Exchange (26) - : : +- * Project (25) - : : +- * SortMergeJoin Inner (24) - : : :- * Sort (18) - : : : +- Exchange (17) - : : : +- * Project (16) - : : : +- * BroadcastHashJoin Inner BuildRight (15) - : : : :- * Project (10) - : : : : +- * BroadcastHashJoin Inner BuildRight (9) - : : : : :- * Filter (3) - : : : : : +- * ColumnarToRow (2) - : : : : : +- Scan parquet default.store_sales (1) - : : : : +- BroadcastExchange (8) - : : : : +- * Project (7) - : : : : +- * Filter (6) - : : : : +- * ColumnarToRow (5) - : : : : +- Scan parquet default.date_dim (4) - : : : +- BroadcastExchange (14) - : : : +- * Filter (13) - : : : +- * ColumnarToRow (12) - : : : +- Scan parquet default.store (11) - : : +- * Sort (23) - : : +- Exchange (22) - : : +- * Filter (21) - : : +- * ColumnarToRow (20) - : : +- Scan parquet default.item (19) - : +- * Sort (39) - : +- Exchange (38) - : +- * Project (37) - : +- * BroadcastHashJoin Inner BuildLeft (36) - : :- BroadcastExchange (32) - : : +- * Project (31) - : : +- * Filter (30) - : : +- * ColumnarToRow (29) - : : +- Scan parquet default.date_dim (28) - : +- * Filter (35) - : +- * ColumnarToRow (34) - : +- Scan parquet default.store_returns (33) + :- * Sort (27) + : +- Exchange (26) + : +- * Project (25) + : +- * SortMergeJoin Inner (24) + : :- * Sort (18) + : : +- Exchange (17) + : : +- * Project (16) + : : +- * BroadcastHashJoin Inner BuildRight (15) + : : :- * Project (10) + : : : +- * BroadcastHashJoin Inner BuildRight (9) + : : : :- * Filter (3) + : : : : +- * ColumnarToRow (2) + : : : : +- Scan parquet default.store_sales (1) + : : : +- BroadcastExchange (8) + : : : +- * Project (7) + : : : +- * Filter (6) + : : : +- * ColumnarToRow (5) + : : : +- Scan parquet default.date_dim (4) + : : +- BroadcastExchange (14) + : : +- * Filter (13) + : : +- * ColumnarToRow (12) + : : +- Scan parquet default.store (11) + : +- * Sort (23) + : +- Exchange (22) + : +- * Filter (21) + : +- * ColumnarToRow (20) + : +- Scan parquet default.item (19) +- * Sort (51) +- Exchange (50) +- * Project (49) - +- * BroadcastHashJoin Inner BuildRight (48) - :- * Filter (46) - : +- * ColumnarToRow (45) - : +- Scan parquet default.catalog_sales (44) - +- ReusedExchange (47) + +- * SortMergeJoin Inner (48) + :- * Sort (39) + : +- Exchange (38) + : +- * Project (37) + : +- * BroadcastHashJoin Inner BuildRight (36) + : :- * Filter (30) + : : +- * ColumnarToRow (29) + : : +- Scan parquet default.store_returns (28) + : +- BroadcastExchange (35) + : +- * Project (34) + : +- * Filter (33) + : +- * ColumnarToRow (32) + : +- Scan parquet default.date_dim (31) + +- * Sort (47) + +- Exchange (46) + +- * Project (45) + +- * BroadcastHashJoin Inner BuildRight (44) + :- * Filter (42) + : +- * ColumnarToRow (41) + : +- Scan parquet default.catalog_sales (40) + +- ReusedExchange (43) (1) Scan parquet default.store_sales @@ -132,7 +132,7 @@ Input [7]: [ss_item_sk#2, ss_customer_sk#3, ss_store_sk#4, ss_ticket_number#5, s (17) Exchange Input [5]: [ss_item_sk#2, ss_customer_sk#3, ss_ticket_number#5, ss_quantity#6, s_state#11] -Arguments: hashpartitioning(ss_item_sk#2, 5), true, [id=#13] +Arguments: hashpartitioning(ss_item_sk#2, 5), ENSURE_REQUIREMENTS, [id=#13] (18) Sort [codegen id : 4] Input [5]: [ss_item_sk#2, ss_customer_sk#3, ss_ticket_number#5, ss_quantity#6, s_state#11] @@ -154,7 +154,7 @@ Condition : isnotnull(i_item_sk#14) (22) Exchange Input [3]: [i_item_sk#14, i_item_id#15, i_item_desc#16] -Arguments: hashpartitioning(i_item_sk#14, 5), true, [id=#17] +Arguments: hashpartitioning(i_item_sk#14, 5), ENSURE_REQUIREMENTS, [id=#17] (23) Sort [codegen id : 6] Input [3]: [i_item_sk#14, i_item_id#15, i_item_desc#16] @@ -171,142 +171,142 @@ Input [8]: [ss_item_sk#2, ss_customer_sk#3, ss_ticket_number#5, ss_quantity#6, s (26) Exchange Input [7]: [ss_item_sk#2, ss_customer_sk#3, ss_ticket_number#5, ss_quantity#6, s_state#11, i_item_id#15, i_item_desc#16] -Arguments: hashpartitioning(cast(ss_customer_sk#3 as bigint), cast(ss_item_sk#2 as bigint), cast(ss_ticket_number#5 as bigint), 5), true, [id=#18] +Arguments: hashpartitioning(cast(ss_customer_sk#3 as bigint), cast(ss_item_sk#2 as bigint), cast(ss_ticket_number#5 as bigint), 5), ENSURE_REQUIREMENTS, [id=#18] (27) Sort [codegen id : 8] Input [7]: [ss_item_sk#2, ss_customer_sk#3, ss_ticket_number#5, ss_quantity#6, s_state#11, i_item_id#15, i_item_desc#16] Arguments: [cast(ss_customer_sk#3 as bigint) ASC NULLS FIRST, cast(ss_item_sk#2 as bigint) ASC NULLS FIRST, cast(ss_ticket_number#5 as bigint) ASC NULLS FIRST], false, 0 -(28) Scan parquet default.date_dim -Output [2]: [d_date_sk#19, d_quarter_name#20] +(28) Scan parquet default.store_returns +Output [5]: [sr_returned_date_sk#19, sr_item_sk#20, sr_customer_sk#21, sr_ticket_number#22, sr_return_quantity#23] Batched: true -Location [not included in comparison]/{warehouse_dir}/date_dim] -PushedFilters: [In(d_quarter_name, [2001Q1,2001Q2,2001Q3]), IsNotNull(d_date_sk)] -ReadSchema: struct +Location [not included in comparison]/{warehouse_dir}/store_returns] +PushedFilters: [IsNotNull(sr_customer_sk), IsNotNull(sr_item_sk), IsNotNull(sr_ticket_number), IsNotNull(sr_returned_date_sk)] +ReadSchema: struct -(29) ColumnarToRow [codegen id : 9] -Input [2]: [d_date_sk#19, d_quarter_name#20] +(29) ColumnarToRow [codegen id : 10] +Input [5]: [sr_returned_date_sk#19, sr_item_sk#20, sr_customer_sk#21, sr_ticket_number#22, sr_return_quantity#23] -(30) Filter [codegen id : 9] -Input [2]: [d_date_sk#19, d_quarter_name#20] -Condition : (d_quarter_name#20 IN (2001Q1,2001Q2,2001Q3) AND isnotnull(d_date_sk#19)) +(30) Filter [codegen id : 10] +Input [5]: [sr_returned_date_sk#19, sr_item_sk#20, sr_customer_sk#21, sr_ticket_number#22, sr_return_quantity#23] +Condition : (((isnotnull(sr_customer_sk#21) AND isnotnull(sr_item_sk#20)) AND isnotnull(sr_ticket_number#22)) AND isnotnull(sr_returned_date_sk#19)) -(31) Project [codegen id : 9] -Output [1]: [d_date_sk#19] -Input [2]: [d_date_sk#19, d_quarter_name#20] +(31) Scan parquet default.date_dim +Output [2]: [d_date_sk#24, d_quarter_name#25] +Batched: true +Location [not included in comparison]/{warehouse_dir}/date_dim] +PushedFilters: [In(d_quarter_name, [2001Q1,2001Q2,2001Q3]), IsNotNull(d_date_sk)] +ReadSchema: struct -(32) BroadcastExchange -Input [1]: [d_date_sk#19] -Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#21] +(32) ColumnarToRow [codegen id : 9] +Input [2]: [d_date_sk#24, d_quarter_name#25] -(33) Scan parquet default.store_returns -Output [5]: [sr_returned_date_sk#22, sr_item_sk#23, sr_customer_sk#24, sr_ticket_number#25, sr_return_quantity#26] -Batched: true -Location [not included in comparison]/{warehouse_dir}/store_returns] -PushedFilters: [IsNotNull(sr_customer_sk), IsNotNull(sr_item_sk), IsNotNull(sr_ticket_number), IsNotNull(sr_returned_date_sk)] -ReadSchema: struct +(33) Filter [codegen id : 9] +Input [2]: [d_date_sk#24, d_quarter_name#25] +Condition : (d_quarter_name#25 IN (2001Q1,2001Q2,2001Q3) AND isnotnull(d_date_sk#24)) -(34) ColumnarToRow -Input [5]: [sr_returned_date_sk#22, sr_item_sk#23, sr_customer_sk#24, sr_ticket_number#25, sr_return_quantity#26] +(34) Project [codegen id : 9] +Output [1]: [d_date_sk#24] +Input [2]: [d_date_sk#24, d_quarter_name#25] -(35) Filter -Input [5]: [sr_returned_date_sk#22, sr_item_sk#23, sr_customer_sk#24, sr_ticket_number#25, sr_return_quantity#26] -Condition : (((isnotnull(sr_customer_sk#24) AND isnotnull(sr_item_sk#23)) AND isnotnull(sr_ticket_number#25)) AND isnotnull(sr_returned_date_sk#22)) +(35) BroadcastExchange +Input [1]: [d_date_sk#24] +Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#26] (36) BroadcastHashJoin [codegen id : 10] -Left keys [1]: [cast(d_date_sk#19 as bigint)] -Right keys [1]: [sr_returned_date_sk#22] +Left keys [1]: [sr_returned_date_sk#19] +Right keys [1]: [cast(d_date_sk#24 as bigint)] Join condition: None (37) Project [codegen id : 10] -Output [4]: [sr_item_sk#23, sr_customer_sk#24, sr_ticket_number#25, sr_return_quantity#26] -Input [6]: [d_date_sk#19, sr_returned_date_sk#22, sr_item_sk#23, sr_customer_sk#24, sr_ticket_number#25, sr_return_quantity#26] +Output [4]: [sr_item_sk#20, sr_customer_sk#21, sr_ticket_number#22, sr_return_quantity#23] +Input [6]: [sr_returned_date_sk#19, sr_item_sk#20, sr_customer_sk#21, sr_ticket_number#22, sr_return_quantity#23, d_date_sk#24] (38) Exchange -Input [4]: [sr_item_sk#23, sr_customer_sk#24, sr_ticket_number#25, sr_return_quantity#26] -Arguments: hashpartitioning(sr_customer_sk#24, sr_item_sk#23, sr_ticket_number#25, 5), true, [id=#27] +Input [4]: [sr_item_sk#20, sr_customer_sk#21, sr_ticket_number#22, sr_return_quantity#23] +Arguments: hashpartitioning(sr_customer_sk#21, sr_item_sk#20, 5), ENSURE_REQUIREMENTS, [id=#27] (39) Sort [codegen id : 11] -Input [4]: [sr_item_sk#23, sr_customer_sk#24, sr_ticket_number#25, sr_return_quantity#26] -Arguments: [sr_customer_sk#24 ASC NULLS FIRST, sr_item_sk#23 ASC NULLS FIRST, sr_ticket_number#25 ASC NULLS FIRST], false, 0 - -(40) SortMergeJoin [codegen id : 12] -Left keys [3]: [cast(ss_customer_sk#3 as bigint), cast(ss_item_sk#2 as bigint), cast(ss_ticket_number#5 as bigint)] -Right keys [3]: [sr_customer_sk#24, sr_item_sk#23, sr_ticket_number#25] -Join condition: None - -(41) Project [codegen id : 12] -Output [7]: [ss_quantity#6, s_state#11, i_item_id#15, i_item_desc#16, sr_item_sk#23, sr_customer_sk#24, sr_return_quantity#26] -Input [11]: [ss_item_sk#2, ss_customer_sk#3, ss_ticket_number#5, ss_quantity#6, s_state#11, i_item_id#15, i_item_desc#16, sr_item_sk#23, sr_customer_sk#24, sr_ticket_number#25, sr_return_quantity#26] - -(42) Exchange -Input [7]: [ss_quantity#6, s_state#11, i_item_id#15, i_item_desc#16, sr_item_sk#23, sr_customer_sk#24, sr_return_quantity#26] -Arguments: hashpartitioning(sr_customer_sk#24, sr_item_sk#23, 5), true, [id=#28] - -(43) Sort [codegen id : 13] -Input [7]: [ss_quantity#6, s_state#11, i_item_id#15, i_item_desc#16, sr_item_sk#23, sr_customer_sk#24, sr_return_quantity#26] -Arguments: [sr_customer_sk#24 ASC NULLS FIRST, sr_item_sk#23 ASC NULLS FIRST], false, 0 +Input [4]: [sr_item_sk#20, sr_customer_sk#21, sr_ticket_number#22, sr_return_quantity#23] +Arguments: [sr_customer_sk#21 ASC NULLS FIRST, sr_item_sk#20 ASC NULLS FIRST], false, 0 -(44) Scan parquet default.catalog_sales -Output [4]: [cs_sold_date_sk#29, cs_bill_customer_sk#30, cs_item_sk#31, cs_quantity#32] +(40) Scan parquet default.catalog_sales +Output [4]: [cs_sold_date_sk#28, cs_bill_customer_sk#29, cs_item_sk#30, cs_quantity#31] Batched: true Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_bill_customer_sk), IsNotNull(cs_item_sk), IsNotNull(cs_sold_date_sk)] ReadSchema: struct -(45) ColumnarToRow [codegen id : 15] -Input [4]: [cs_sold_date_sk#29, cs_bill_customer_sk#30, cs_item_sk#31, cs_quantity#32] +(41) ColumnarToRow [codegen id : 13] +Input [4]: [cs_sold_date_sk#28, cs_bill_customer_sk#29, cs_item_sk#30, cs_quantity#31] -(46) Filter [codegen id : 15] -Input [4]: [cs_sold_date_sk#29, cs_bill_customer_sk#30, cs_item_sk#31, cs_quantity#32] -Condition : ((isnotnull(cs_bill_customer_sk#30) AND isnotnull(cs_item_sk#31)) AND isnotnull(cs_sold_date_sk#29)) +(42) Filter [codegen id : 13] +Input [4]: [cs_sold_date_sk#28, cs_bill_customer_sk#29, cs_item_sk#30, cs_quantity#31] +Condition : ((isnotnull(cs_bill_customer_sk#29) AND isnotnull(cs_item_sk#30)) AND isnotnull(cs_sold_date_sk#28)) -(47) ReusedExchange [Reuses operator id: 32] -Output [1]: [d_date_sk#33] +(43) ReusedExchange [Reuses operator id: 35] +Output [1]: [d_date_sk#32] -(48) BroadcastHashJoin [codegen id : 15] -Left keys [1]: [cs_sold_date_sk#29] -Right keys [1]: [d_date_sk#33] +(44) BroadcastHashJoin [codegen id : 13] +Left keys [1]: [cs_sold_date_sk#28] +Right keys [1]: [d_date_sk#32] +Join condition: None + +(45) Project [codegen id : 13] +Output [3]: [cs_bill_customer_sk#29, cs_item_sk#30, cs_quantity#31] +Input [5]: [cs_sold_date_sk#28, cs_bill_customer_sk#29, cs_item_sk#30, cs_quantity#31, d_date_sk#32] + +(46) Exchange +Input [3]: [cs_bill_customer_sk#29, cs_item_sk#30, cs_quantity#31] +Arguments: hashpartitioning(cast(cs_bill_customer_sk#29 as bigint), cast(cs_item_sk#30 as bigint), 5), ENSURE_REQUIREMENTS, [id=#33] + +(47) Sort [codegen id : 14] +Input [3]: [cs_bill_customer_sk#29, cs_item_sk#30, cs_quantity#31] +Arguments: [cast(cs_bill_customer_sk#29 as bigint) ASC NULLS FIRST, cast(cs_item_sk#30 as bigint) ASC NULLS FIRST], false, 0 + +(48) SortMergeJoin [codegen id : 15] +Left keys [2]: [sr_customer_sk#21, sr_item_sk#20] +Right keys [2]: [cast(cs_bill_customer_sk#29 as bigint), cast(cs_item_sk#30 as bigint)] Join condition: None (49) Project [codegen id : 15] -Output [3]: [cs_bill_customer_sk#30, cs_item_sk#31, cs_quantity#32] -Input [5]: [cs_sold_date_sk#29, cs_bill_customer_sk#30, cs_item_sk#31, cs_quantity#32, d_date_sk#33] +Output [5]: [sr_item_sk#20, sr_customer_sk#21, sr_ticket_number#22, sr_return_quantity#23, cs_quantity#31] +Input [7]: [sr_item_sk#20, sr_customer_sk#21, sr_ticket_number#22, sr_return_quantity#23, cs_bill_customer_sk#29, cs_item_sk#30, cs_quantity#31] (50) Exchange -Input [3]: [cs_bill_customer_sk#30, cs_item_sk#31, cs_quantity#32] -Arguments: hashpartitioning(cast(cs_bill_customer_sk#30 as bigint), cast(cs_item_sk#31 as bigint), 5), true, [id=#34] +Input [5]: [sr_item_sk#20, sr_customer_sk#21, sr_ticket_number#22, sr_return_quantity#23, cs_quantity#31] +Arguments: hashpartitioning(sr_customer_sk#21, sr_item_sk#20, sr_ticket_number#22, 5), ENSURE_REQUIREMENTS, [id=#34] (51) Sort [codegen id : 16] -Input [3]: [cs_bill_customer_sk#30, cs_item_sk#31, cs_quantity#32] -Arguments: [cast(cs_bill_customer_sk#30 as bigint) ASC NULLS FIRST, cast(cs_item_sk#31 as bigint) ASC NULLS FIRST], false, 0 +Input [5]: [sr_item_sk#20, sr_customer_sk#21, sr_ticket_number#22, sr_return_quantity#23, cs_quantity#31] +Arguments: [sr_customer_sk#21 ASC NULLS FIRST, sr_item_sk#20 ASC NULLS FIRST, sr_ticket_number#22 ASC NULLS FIRST], false, 0 (52) SortMergeJoin [codegen id : 17] -Left keys [2]: [sr_customer_sk#24, sr_item_sk#23] -Right keys [2]: [cast(cs_bill_customer_sk#30 as bigint), cast(cs_item_sk#31 as bigint)] +Left keys [3]: [cast(ss_customer_sk#3 as bigint), cast(ss_item_sk#2 as bigint), cast(ss_ticket_number#5 as bigint)] +Right keys [3]: [sr_customer_sk#21, sr_item_sk#20, sr_ticket_number#22] Join condition: None (53) Project [codegen id : 17] -Output [6]: [ss_quantity#6, sr_return_quantity#26, cs_quantity#32, s_state#11, i_item_id#15, i_item_desc#16] -Input [10]: [ss_quantity#6, s_state#11, i_item_id#15, i_item_desc#16, sr_item_sk#23, sr_customer_sk#24, sr_return_quantity#26, cs_bill_customer_sk#30, cs_item_sk#31, cs_quantity#32] +Output [6]: [ss_quantity#6, sr_return_quantity#23, cs_quantity#31, s_state#11, i_item_id#15, i_item_desc#16] +Input [12]: [ss_item_sk#2, ss_customer_sk#3, ss_ticket_number#5, ss_quantity#6, s_state#11, i_item_id#15, i_item_desc#16, sr_item_sk#20, sr_customer_sk#21, sr_ticket_number#22, sr_return_quantity#23, cs_quantity#31] (54) HashAggregate [codegen id : 17] -Input [6]: [ss_quantity#6, sr_return_quantity#26, cs_quantity#32, s_state#11, i_item_id#15, i_item_desc#16] +Input [6]: [ss_quantity#6, sr_return_quantity#23, cs_quantity#31, s_state#11, i_item_id#15, i_item_desc#16] Keys [3]: [i_item_id#15, i_item_desc#16, s_state#11] -Functions [9]: [partial_count(ss_quantity#6), partial_avg(cast(ss_quantity#6 as bigint)), partial_stddev_samp(cast(ss_quantity#6 as double)), partial_count(sr_return_quantity#26), partial_avg(cast(sr_return_quantity#26 as bigint)), partial_stddev_samp(cast(sr_return_quantity#26 as double)), partial_count(cs_quantity#32), partial_avg(cast(cs_quantity#32 as bigint)), partial_stddev_samp(cast(cs_quantity#32 as double))] +Functions [9]: [partial_count(ss_quantity#6), partial_avg(cast(ss_quantity#6 as bigint)), partial_stddev_samp(cast(ss_quantity#6 as double)), partial_count(sr_return_quantity#23), partial_avg(cast(sr_return_quantity#23 as bigint)), partial_stddev_samp(cast(sr_return_quantity#23 as double)), partial_count(cs_quantity#31), partial_avg(cast(cs_quantity#31 as bigint)), partial_stddev_samp(cast(cs_quantity#31 as double))] Aggregate Attributes [18]: [count#35, sum#36, count#37, n#38, avg#39, m2#40, count#41, sum#42, count#43, n#44, avg#45, m2#46, count#47, sum#48, count#49, n#50, avg#51, m2#52] Results [21]: [i_item_id#15, i_item_desc#16, s_state#11, count#53, sum#54, count#55, n#56, avg#57, m2#58, count#59, sum#60, count#61, n#62, avg#63, m2#64, count#65, sum#66, count#67, n#68, avg#69, m2#70] (55) Exchange Input [21]: [i_item_id#15, i_item_desc#16, s_state#11, count#53, sum#54, count#55, n#56, avg#57, m2#58, count#59, sum#60, count#61, n#62, avg#63, m2#64, count#65, sum#66, count#67, n#68, avg#69, m2#70] -Arguments: hashpartitioning(i_item_id#15, i_item_desc#16, s_state#11, 5), true, [id=#71] +Arguments: hashpartitioning(i_item_id#15, i_item_desc#16, s_state#11, 5), ENSURE_REQUIREMENTS, [id=#71] (56) HashAggregate [codegen id : 18] Input [21]: [i_item_id#15, i_item_desc#16, s_state#11, count#53, sum#54, count#55, n#56, avg#57, m2#58, count#59, sum#60, count#61, n#62, avg#63, m2#64, count#65, sum#66, count#67, n#68, avg#69, m2#70] Keys [3]: [i_item_id#15, i_item_desc#16, s_state#11] -Functions [9]: [count(ss_quantity#6), avg(cast(ss_quantity#6 as bigint)), stddev_samp(cast(ss_quantity#6 as double)), count(sr_return_quantity#26), avg(cast(sr_return_quantity#26 as bigint)), stddev_samp(cast(sr_return_quantity#26 as double)), count(cs_quantity#32), avg(cast(cs_quantity#32 as bigint)), stddev_samp(cast(cs_quantity#32 as double))] -Aggregate Attributes [9]: [count(ss_quantity#6)#72, avg(cast(ss_quantity#6 as bigint))#73, stddev_samp(cast(ss_quantity#6 as double))#74, count(sr_return_quantity#26)#75, avg(cast(sr_return_quantity#26 as bigint))#76, stddev_samp(cast(sr_return_quantity#26 as double))#77, count(cs_quantity#32)#78, avg(cast(cs_quantity#32 as bigint))#79, stddev_samp(cast(cs_quantity#32 as double))#80] -Results [15]: [i_item_id#15, i_item_desc#16, s_state#11, count(ss_quantity#6)#72 AS store_sales_quantitycount#81, avg(cast(ss_quantity#6 as bigint))#73 AS store_sales_quantityave#82, stddev_samp(cast(ss_quantity#6 as double))#74 AS store_sales_quantitystdev#83, (stddev_samp(cast(ss_quantity#6 as double))#74 / avg(cast(ss_quantity#6 as bigint))#73) AS store_sales_quantitycov#84, count(sr_return_quantity#26)#75 AS as_store_returns_quantitycount#85, avg(cast(sr_return_quantity#26 as bigint))#76 AS as_store_returns_quantityave#86, stddev_samp(cast(sr_return_quantity#26 as double))#77 AS as_store_returns_quantitystdev#87, (stddev_samp(cast(sr_return_quantity#26 as double))#77 / avg(cast(sr_return_quantity#26 as bigint))#76) AS store_returns_quantitycov#88, count(cs_quantity#32)#78 AS catalog_sales_quantitycount#89, avg(cast(cs_quantity#32 as bigint))#79 AS catalog_sales_quantityave#90, (stddev_samp(cast(cs_quantity#32 as double))#80 / avg(cast(cs_quantity#32 as bigint))#79) AS catalog_sales_quantitystdev#91, (stddev_samp(cast(cs_quantity#32 as double))#80 / avg(cast(cs_quantity#32 as bigint))#79) AS catalog_sales_quantitycov#92] +Functions [9]: [count(ss_quantity#6), avg(cast(ss_quantity#6 as bigint)), stddev_samp(cast(ss_quantity#6 as double)), count(sr_return_quantity#23), avg(cast(sr_return_quantity#23 as bigint)), stddev_samp(cast(sr_return_quantity#23 as double)), count(cs_quantity#31), avg(cast(cs_quantity#31 as bigint)), stddev_samp(cast(cs_quantity#31 as double))] +Aggregate Attributes [9]: [count(ss_quantity#6)#72, avg(cast(ss_quantity#6 as bigint))#73, stddev_samp(cast(ss_quantity#6 as double))#74, count(sr_return_quantity#23)#75, avg(cast(sr_return_quantity#23 as bigint))#76, stddev_samp(cast(sr_return_quantity#23 as double))#77, count(cs_quantity#31)#78, avg(cast(cs_quantity#31 as bigint))#79, stddev_samp(cast(cs_quantity#31 as double))#80] +Results [15]: [i_item_id#15, i_item_desc#16, s_state#11, count(ss_quantity#6)#72 AS store_sales_quantitycount#81, avg(cast(ss_quantity#6 as bigint))#73 AS store_sales_quantityave#82, stddev_samp(cast(ss_quantity#6 as double))#74 AS store_sales_quantitystdev#83, (stddev_samp(cast(ss_quantity#6 as double))#74 / avg(cast(ss_quantity#6 as bigint))#73) AS store_sales_quantitycov#84, count(sr_return_quantity#23)#75 AS as_store_returns_quantitycount#85, avg(cast(sr_return_quantity#23 as bigint))#76 AS as_store_returns_quantityave#86, stddev_samp(cast(sr_return_quantity#23 as double))#77 AS as_store_returns_quantitystdev#87, (stddev_samp(cast(sr_return_quantity#23 as double))#77 / avg(cast(sr_return_quantity#23 as bigint))#76) AS store_returns_quantitycov#88, count(cs_quantity#31)#78 AS catalog_sales_quantitycount#89, avg(cast(cs_quantity#31 as bigint))#79 AS catalog_sales_quantityave#90, (stddev_samp(cast(cs_quantity#31 as double))#80 / avg(cast(cs_quantity#31 as bigint))#79) AS catalog_sales_quantitystdev#91, (stddev_samp(cast(cs_quantity#31 as double))#80 / avg(cast(cs_quantity#31 as bigint))#79) AS catalog_sales_quantitycov#92] (57) TakeOrderedAndProject Input [15]: [i_item_id#15, i_item_desc#16, s_state#11, store_sales_quantitycount#81, store_sales_quantityave#82, store_sales_quantitystdev#83, store_sales_quantitycov#84, as_store_returns_quantitycount#85, as_store_returns_quantityave#86, as_store_returns_quantitystdev#87, store_returns_quantitycov#88, catalog_sales_quantitycount#89, catalog_sales_quantityave#90, catalog_sales_quantitystdev#91, catalog_sales_quantitycov#92] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q17.sf100/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q17.sf100/simplified.txt index 216adf3588eca..79226a34e6768 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q17.sf100/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q17.sf100/simplified.txt @@ -6,70 +6,74 @@ TakeOrderedAndProject [i_item_id,i_item_desc,s_state,store_sales_quantitycount,s WholeStageCodegen (17) HashAggregate [i_item_id,i_item_desc,s_state,ss_quantity,sr_return_quantity,cs_quantity] [count,sum,count,n,avg,m2,count,sum,count,n,avg,m2,count,sum,count,n,avg,m2,count,sum,count,n,avg,m2,count,sum,count,n,avg,m2,count,sum,count,n,avg,m2] Project [ss_quantity,sr_return_quantity,cs_quantity,s_state,i_item_id,i_item_desc] - SortMergeJoin [sr_customer_sk,sr_item_sk,cs_bill_customer_sk,cs_item_sk] + SortMergeJoin [ss_customer_sk,ss_item_sk,ss_ticket_number,sr_customer_sk,sr_item_sk,sr_ticket_number] InputAdapter - WholeStageCodegen (13) - Sort [sr_customer_sk,sr_item_sk] + WholeStageCodegen (8) + Sort [ss_customer_sk,ss_item_sk,ss_ticket_number] InputAdapter - Exchange [sr_customer_sk,sr_item_sk] #2 - WholeStageCodegen (12) - Project [ss_quantity,s_state,i_item_id,i_item_desc,sr_item_sk,sr_customer_sk,sr_return_quantity] - SortMergeJoin [ss_customer_sk,ss_item_sk,ss_ticket_number,sr_customer_sk,sr_item_sk,sr_ticket_number] + Exchange [ss_customer_sk,ss_item_sk,ss_ticket_number] #2 + WholeStageCodegen (7) + Project [ss_item_sk,ss_customer_sk,ss_ticket_number,ss_quantity,s_state,i_item_id,i_item_desc] + SortMergeJoin [ss_item_sk,i_item_sk] InputAdapter - WholeStageCodegen (8) - Sort [ss_customer_sk,ss_item_sk,ss_ticket_number] + WholeStageCodegen (4) + Sort [ss_item_sk] InputAdapter - Exchange [ss_customer_sk,ss_item_sk,ss_ticket_number] #3 - WholeStageCodegen (7) - Project [ss_item_sk,ss_customer_sk,ss_ticket_number,ss_quantity,s_state,i_item_id,i_item_desc] - SortMergeJoin [ss_item_sk,i_item_sk] - InputAdapter - WholeStageCodegen (4) - Sort [ss_item_sk] - InputAdapter - Exchange [ss_item_sk] #4 - WholeStageCodegen (3) - Project [ss_item_sk,ss_customer_sk,ss_ticket_number,ss_quantity,s_state] - BroadcastHashJoin [ss_store_sk,s_store_sk] - Project [ss_item_sk,ss_customer_sk,ss_store_sk,ss_ticket_number,ss_quantity] - BroadcastHashJoin [ss_sold_date_sk,d_date_sk] - Filter [ss_customer_sk,ss_item_sk,ss_ticket_number,ss_sold_date_sk,ss_store_sk] - ColumnarToRow - InputAdapter - Scan parquet default.store_sales [ss_sold_date_sk,ss_item_sk,ss_customer_sk,ss_store_sk,ss_ticket_number,ss_quantity] - InputAdapter - BroadcastExchange #5 - WholeStageCodegen (1) - Project [d_date_sk] - Filter [d_quarter_name,d_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.date_dim [d_date_sk,d_quarter_name] - InputAdapter - BroadcastExchange #6 - WholeStageCodegen (2) - Filter [s_store_sk] - ColumnarToRow - InputAdapter - Scan parquet default.store [s_store_sk,s_state] - InputAdapter - WholeStageCodegen (6) - Sort [i_item_sk] - InputAdapter - Exchange [i_item_sk] #7 - WholeStageCodegen (5) - Filter [i_item_sk] + Exchange [ss_item_sk] #3 + WholeStageCodegen (3) + Project [ss_item_sk,ss_customer_sk,ss_ticket_number,ss_quantity,s_state] + BroadcastHashJoin [ss_store_sk,s_store_sk] + Project [ss_item_sk,ss_customer_sk,ss_store_sk,ss_ticket_number,ss_quantity] + BroadcastHashJoin [ss_sold_date_sk,d_date_sk] + Filter [ss_customer_sk,ss_item_sk,ss_ticket_number,ss_sold_date_sk,ss_store_sk] + ColumnarToRow + InputAdapter + Scan parquet default.store_sales [ss_sold_date_sk,ss_item_sk,ss_customer_sk,ss_store_sk,ss_ticket_number,ss_quantity] + InputAdapter + BroadcastExchange #4 + WholeStageCodegen (1) + Project [d_date_sk] + Filter [d_quarter_name,d_date_sk] ColumnarToRow InputAdapter - Scan parquet default.item [i_item_sk,i_item_id,i_item_desc] + Scan parquet default.date_dim [d_date_sk,d_quarter_name] + InputAdapter + BroadcastExchange #5 + WholeStageCodegen (2) + Filter [s_store_sk] + ColumnarToRow + InputAdapter + Scan parquet default.store [s_store_sk,s_state] + InputAdapter + WholeStageCodegen (6) + Sort [i_item_sk] + InputAdapter + Exchange [i_item_sk] #6 + WholeStageCodegen (5) + Filter [i_item_sk] + ColumnarToRow + InputAdapter + Scan parquet default.item [i_item_sk,i_item_id,i_item_desc] + InputAdapter + WholeStageCodegen (16) + Sort [sr_customer_sk,sr_item_sk,sr_ticket_number] + InputAdapter + Exchange [sr_customer_sk,sr_item_sk,sr_ticket_number] #7 + WholeStageCodegen (15) + Project [sr_item_sk,sr_customer_sk,sr_ticket_number,sr_return_quantity,cs_quantity] + SortMergeJoin [sr_customer_sk,sr_item_sk,cs_bill_customer_sk,cs_item_sk] InputAdapter WholeStageCodegen (11) - Sort [sr_customer_sk,sr_item_sk,sr_ticket_number] + Sort [sr_customer_sk,sr_item_sk] InputAdapter - Exchange [sr_customer_sk,sr_item_sk,sr_ticket_number] #8 + Exchange [sr_customer_sk,sr_item_sk] #8 WholeStageCodegen (10) Project [sr_item_sk,sr_customer_sk,sr_ticket_number,sr_return_quantity] - BroadcastHashJoin [d_date_sk,sr_returned_date_sk] + BroadcastHashJoin [sr_returned_date_sk,d_date_sk] + Filter [sr_customer_sk,sr_item_sk,sr_ticket_number,sr_returned_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.store_returns [sr_returned_date_sk,sr_item_sk,sr_customer_sk,sr_ticket_number,sr_return_quantity] InputAdapter BroadcastExchange #9 WholeStageCodegen (9) @@ -78,21 +82,17 @@ TakeOrderedAndProject [i_item_id,i_item_desc,s_state,store_sales_quantitycount,s ColumnarToRow InputAdapter Scan parquet default.date_dim [d_date_sk,d_quarter_name] - Filter [sr_customer_sk,sr_item_sk,sr_ticket_number,sr_returned_date_sk] + InputAdapter + WholeStageCodegen (14) + Sort [cs_bill_customer_sk,cs_item_sk] + InputAdapter + Exchange [cs_bill_customer_sk,cs_item_sk] #10 + WholeStageCodegen (13) + Project [cs_bill_customer_sk,cs_item_sk,cs_quantity] + BroadcastHashJoin [cs_sold_date_sk,d_date_sk] + Filter [cs_bill_customer_sk,cs_item_sk,cs_sold_date_sk] ColumnarToRow InputAdapter - Scan parquet default.store_returns [sr_returned_date_sk,sr_item_sk,sr_customer_sk,sr_ticket_number,sr_return_quantity] - InputAdapter - WholeStageCodegen (16) - Sort [cs_bill_customer_sk,cs_item_sk] - InputAdapter - Exchange [cs_bill_customer_sk,cs_item_sk] #10 - WholeStageCodegen (15) - Project [cs_bill_customer_sk,cs_item_sk,cs_quantity] - BroadcastHashJoin [cs_sold_date_sk,d_date_sk] - Filter [cs_bill_customer_sk,cs_item_sk,cs_sold_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.catalog_sales [cs_sold_date_sk,cs_bill_customer_sk,cs_item_sk,cs_quantity] - InputAdapter - ReusedExchange [d_date_sk] #9 + Scan parquet default.catalog_sales [cs_sold_date_sk,cs_bill_customer_sk,cs_item_sk,cs_quantity] + InputAdapter + ReusedExchange [d_date_sk] #9 diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q18.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q18.sf100/explain.txt index 516f782057631..12e95ba50cd0d 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q18.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q18.sf100/explain.txt @@ -34,24 +34,24 @@ TakeOrderedAndProject (53) +- * Sort (46) +- Exchange (45) +- * Project (44) - +- * SortMergeJoin Inner (43) - :- * Sort (37) - : +- Exchange (36) - : +- * Project (35) - : +- * BroadcastHashJoin Inner BuildRight (34) - : :- * Project (29) - : : +- * Filter (28) - : : +- * ColumnarToRow (27) - : : +- Scan parquet default.customer (26) - : +- BroadcastExchange (33) - : +- * Filter (32) - : +- * ColumnarToRow (31) - : +- Scan parquet default.customer_address (30) - +- * Sort (42) - +- Exchange (41) - +- * Filter (40) - +- * ColumnarToRow (39) - +- Scan parquet default.customer_demographics (38) + +- * BroadcastHashJoin Inner BuildRight (43) + :- * Project (38) + : +- * SortMergeJoin Inner (37) + : :- * Sort (31) + : : +- Exchange (30) + : : +- * Project (29) + : : +- * Filter (28) + : : +- * ColumnarToRow (27) + : : +- Scan parquet default.customer (26) + : +- * Sort (36) + : +- Exchange (35) + : +- * Filter (34) + : +- * ColumnarToRow (33) + : +- Scan parquet default.customer_demographics (32) + +- BroadcastExchange (42) + +- * Filter (41) + +- * ColumnarToRow (40) + +- Scan parquet default.customer_address (39) (1) Scan parquet default.catalog_sales @@ -159,7 +159,7 @@ Input [10]: [cs_bill_customer_sk#2, cs_item_sk#4, cs_quantity#5, cs_list_price#6 (24) Exchange Input [8]: [cs_bill_customer_sk#2, cs_quantity#5, cs_list_price#6, cs_sales_price#7, cs_coupon_amt#8, cs_net_profit#9, cd_dep_count#13, i_item_id#19] -Arguments: hashpartitioning(cs_bill_customer_sk#2, 5), true, [id=#21] +Arguments: hashpartitioning(cs_bill_customer_sk#2, 5), ENSURE_REQUIREMENTS, [id=#21] (25) Sort [codegen id : 5] Input [8]: [cs_bill_customer_sk#2, cs_quantity#5, cs_list_price#6, cs_sales_price#7, cs_coupon_amt#8, cs_net_profit#9, cd_dep_count#13, i_item_id#19] @@ -172,89 +172,89 @@ Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [In(c_birth_month, [1,6,8,9,12,2]), IsNotNull(c_customer_sk), IsNotNull(c_current_cdemo_sk), IsNotNull(c_current_addr_sk)] ReadSchema: struct -(27) ColumnarToRow [codegen id : 7] +(27) ColumnarToRow [codegen id : 6] Input [5]: [c_customer_sk#22, c_current_cdemo_sk#23, c_current_addr_sk#24, c_birth_month#25, c_birth_year#26] -(28) Filter [codegen id : 7] +(28) Filter [codegen id : 6] Input [5]: [c_customer_sk#22, c_current_cdemo_sk#23, c_current_addr_sk#24, c_birth_month#25, c_birth_year#26] Condition : (((c_birth_month#25 IN (1,6,8,9,12,2) AND isnotnull(c_customer_sk#22)) AND isnotnull(c_current_cdemo_sk#23)) AND isnotnull(c_current_addr_sk#24)) -(29) Project [codegen id : 7] +(29) Project [codegen id : 6] Output [4]: [c_customer_sk#22, c_current_cdemo_sk#23, c_current_addr_sk#24, c_birth_year#26] Input [5]: [c_customer_sk#22, c_current_cdemo_sk#23, c_current_addr_sk#24, c_birth_month#25, c_birth_year#26] -(30) Scan parquet default.customer_address -Output [4]: [ca_address_sk#27, ca_county#28, ca_state#29, ca_country#30] -Batched: true -Location [not included in comparison]/{warehouse_dir}/customer_address] -PushedFilters: [In(ca_state, [MS,IN,ND,OK,NM,VA]), IsNotNull(ca_address_sk)] -ReadSchema: struct - -(31) ColumnarToRow [codegen id : 6] -Input [4]: [ca_address_sk#27, ca_county#28, ca_state#29, ca_country#30] - -(32) Filter [codegen id : 6] -Input [4]: [ca_address_sk#27, ca_county#28, ca_state#29, ca_country#30] -Condition : (ca_state#29 IN (MS,IN,ND,OK,NM,VA) AND isnotnull(ca_address_sk#27)) - -(33) BroadcastExchange -Input [4]: [ca_address_sk#27, ca_county#28, ca_state#29, ca_country#30] -Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, false] as bigint)),false), [id=#31] - -(34) BroadcastHashJoin [codegen id : 7] -Left keys [1]: [c_current_addr_sk#24] -Right keys [1]: [ca_address_sk#27] -Join condition: None - -(35) Project [codegen id : 7] -Output [6]: [c_customer_sk#22, c_current_cdemo_sk#23, c_birth_year#26, ca_county#28, ca_state#29, ca_country#30] -Input [8]: [c_customer_sk#22, c_current_cdemo_sk#23, c_current_addr_sk#24, c_birth_year#26, ca_address_sk#27, ca_county#28, ca_state#29, ca_country#30] - -(36) Exchange -Input [6]: [c_customer_sk#22, c_current_cdemo_sk#23, c_birth_year#26, ca_county#28, ca_state#29, ca_country#30] -Arguments: hashpartitioning(c_current_cdemo_sk#23, 5), true, [id=#32] +(30) Exchange +Input [4]: [c_customer_sk#22, c_current_cdemo_sk#23, c_current_addr_sk#24, c_birth_year#26] +Arguments: hashpartitioning(c_current_cdemo_sk#23, 5), ENSURE_REQUIREMENTS, [id=#27] -(37) Sort [codegen id : 8] -Input [6]: [c_customer_sk#22, c_current_cdemo_sk#23, c_birth_year#26, ca_county#28, ca_state#29, ca_country#30] +(31) Sort [codegen id : 7] +Input [4]: [c_customer_sk#22, c_current_cdemo_sk#23, c_current_addr_sk#24, c_birth_year#26] Arguments: [c_current_cdemo_sk#23 ASC NULLS FIRST], false, 0 -(38) Scan parquet default.customer_demographics -Output [1]: [cd_demo_sk#33] +(32) Scan parquet default.customer_demographics +Output [1]: [cd_demo_sk#28] Batched: true Location [not included in comparison]/{warehouse_dir}/customer_demographics] PushedFilters: [IsNotNull(cd_demo_sk)] ReadSchema: struct -(39) ColumnarToRow [codegen id : 9] -Input [1]: [cd_demo_sk#33] +(33) ColumnarToRow [codegen id : 8] +Input [1]: [cd_demo_sk#28] -(40) Filter [codegen id : 9] -Input [1]: [cd_demo_sk#33] -Condition : isnotnull(cd_demo_sk#33) +(34) Filter [codegen id : 8] +Input [1]: [cd_demo_sk#28] +Condition : isnotnull(cd_demo_sk#28) -(41) Exchange -Input [1]: [cd_demo_sk#33] -Arguments: hashpartitioning(cd_demo_sk#33, 5), true, [id=#34] +(35) Exchange +Input [1]: [cd_demo_sk#28] +Arguments: hashpartitioning(cd_demo_sk#28, 5), ENSURE_REQUIREMENTS, [id=#29] -(42) Sort [codegen id : 10] -Input [1]: [cd_demo_sk#33] -Arguments: [cd_demo_sk#33 ASC NULLS FIRST], false, 0 +(36) Sort [codegen id : 9] +Input [1]: [cd_demo_sk#28] +Arguments: [cd_demo_sk#28 ASC NULLS FIRST], false, 0 -(43) SortMergeJoin [codegen id : 11] +(37) SortMergeJoin [codegen id : 11] Left keys [1]: [c_current_cdemo_sk#23] -Right keys [1]: [cd_demo_sk#33] +Right keys [1]: [cd_demo_sk#28] +Join condition: None + +(38) Project [codegen id : 11] +Output [3]: [c_customer_sk#22, c_current_addr_sk#24, c_birth_year#26] +Input [5]: [c_customer_sk#22, c_current_cdemo_sk#23, c_current_addr_sk#24, c_birth_year#26, cd_demo_sk#28] + +(39) Scan parquet default.customer_address +Output [4]: [ca_address_sk#30, ca_county#31, ca_state#32, ca_country#33] +Batched: true +Location [not included in comparison]/{warehouse_dir}/customer_address] +PushedFilters: [In(ca_state, [MS,IN,ND,OK,NM,VA]), IsNotNull(ca_address_sk)] +ReadSchema: struct + +(40) ColumnarToRow [codegen id : 10] +Input [4]: [ca_address_sk#30, ca_county#31, ca_state#32, ca_country#33] + +(41) Filter [codegen id : 10] +Input [4]: [ca_address_sk#30, ca_county#31, ca_state#32, ca_country#33] +Condition : (ca_state#32 IN (MS,IN,ND,OK,NM,VA) AND isnotnull(ca_address_sk#30)) + +(42) BroadcastExchange +Input [4]: [ca_address_sk#30, ca_county#31, ca_state#32, ca_country#33] +Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, false] as bigint)),false), [id=#34] + +(43) BroadcastHashJoin [codegen id : 11] +Left keys [1]: [c_current_addr_sk#24] +Right keys [1]: [ca_address_sk#30] Join condition: None (44) Project [codegen id : 11] -Output [5]: [c_customer_sk#22, c_birth_year#26, ca_county#28, ca_state#29, ca_country#30] -Input [7]: [c_customer_sk#22, c_current_cdemo_sk#23, c_birth_year#26, ca_county#28, ca_state#29, ca_country#30, cd_demo_sk#33] +Output [5]: [c_customer_sk#22, c_birth_year#26, ca_county#31, ca_state#32, ca_country#33] +Input [7]: [c_customer_sk#22, c_current_addr_sk#24, c_birth_year#26, ca_address_sk#30, ca_county#31, ca_state#32, ca_country#33] (45) Exchange -Input [5]: [c_customer_sk#22, c_birth_year#26, ca_county#28, ca_state#29, ca_country#30] -Arguments: hashpartitioning(c_customer_sk#22, 5), true, [id=#35] +Input [5]: [c_customer_sk#22, c_birth_year#26, ca_county#31, ca_state#32, ca_country#33] +Arguments: hashpartitioning(c_customer_sk#22, 5), ENSURE_REQUIREMENTS, [id=#35] (46) Sort [codegen id : 12] -Input [5]: [c_customer_sk#22, c_birth_year#26, ca_county#28, ca_state#29, ca_country#30] +Input [5]: [c_customer_sk#22, c_birth_year#26, ca_county#31, ca_state#32, ca_country#33] Arguments: [c_customer_sk#22 ASC NULLS FIRST], false, 0 (47) SortMergeJoin [codegen id : 13] @@ -263,12 +263,12 @@ Right keys [1]: [c_customer_sk#22] Join condition: None (48) Project [codegen id : 13] -Output [11]: [cs_quantity#5, cs_list_price#6, cs_sales_price#7, cs_coupon_amt#8, cs_net_profit#9, cd_dep_count#13, c_birth_year#26, i_item_id#19, ca_country#30, ca_state#29, ca_county#28] -Input [13]: [cs_bill_customer_sk#2, cs_quantity#5, cs_list_price#6, cs_sales_price#7, cs_coupon_amt#8, cs_net_profit#9, cd_dep_count#13, i_item_id#19, c_customer_sk#22, c_birth_year#26, ca_county#28, ca_state#29, ca_country#30] +Output [11]: [cs_quantity#5, cs_list_price#6, cs_sales_price#7, cs_coupon_amt#8, cs_net_profit#9, cd_dep_count#13, c_birth_year#26, i_item_id#19, ca_country#33, ca_state#32, ca_county#31] +Input [13]: [cs_bill_customer_sk#2, cs_quantity#5, cs_list_price#6, cs_sales_price#7, cs_coupon_amt#8, cs_net_profit#9, cd_dep_count#13, i_item_id#19, c_customer_sk#22, c_birth_year#26, ca_county#31, ca_state#32, ca_country#33] (49) Expand [codegen id : 13] -Input [11]: [cs_quantity#5, cs_list_price#6, cs_sales_price#7, cs_coupon_amt#8, cs_net_profit#9, cd_dep_count#13, c_birth_year#26, i_item_id#19, ca_country#30, ca_state#29, ca_county#28] -Arguments: [List(cs_quantity#5, cs_list_price#6, cs_sales_price#7, cs_coupon_amt#8, cs_net_profit#9, cd_dep_count#13, c_birth_year#26, i_item_id#19, ca_country#30, ca_state#29, ca_county#28, 0), List(cs_quantity#5, cs_list_price#6, cs_sales_price#7, cs_coupon_amt#8, cs_net_profit#9, cd_dep_count#13, c_birth_year#26, i_item_id#19, ca_country#30, ca_state#29, null, 1), List(cs_quantity#5, cs_list_price#6, cs_sales_price#7, cs_coupon_amt#8, cs_net_profit#9, cd_dep_count#13, c_birth_year#26, i_item_id#19, ca_country#30, null, null, 3), List(cs_quantity#5, cs_list_price#6, cs_sales_price#7, cs_coupon_amt#8, cs_net_profit#9, cd_dep_count#13, c_birth_year#26, i_item_id#19, null, null, null, 7), List(cs_quantity#5, cs_list_price#6, cs_sales_price#7, cs_coupon_amt#8, cs_net_profit#9, cd_dep_count#13, c_birth_year#26, null, null, null, null, 15)], [cs_quantity#5, cs_list_price#6, cs_sales_price#7, cs_coupon_amt#8, cs_net_profit#9, cd_dep_count#13, c_birth_year#26, i_item_id#36, ca_country#37, ca_state#38, ca_county#39, spark_grouping_id#40] +Input [11]: [cs_quantity#5, cs_list_price#6, cs_sales_price#7, cs_coupon_amt#8, cs_net_profit#9, cd_dep_count#13, c_birth_year#26, i_item_id#19, ca_country#33, ca_state#32, ca_county#31] +Arguments: [List(cs_quantity#5, cs_list_price#6, cs_sales_price#7, cs_coupon_amt#8, cs_net_profit#9, cd_dep_count#13, c_birth_year#26, i_item_id#19, ca_country#33, ca_state#32, ca_county#31, 0), List(cs_quantity#5, cs_list_price#6, cs_sales_price#7, cs_coupon_amt#8, cs_net_profit#9, cd_dep_count#13, c_birth_year#26, i_item_id#19, ca_country#33, ca_state#32, null, 1), List(cs_quantity#5, cs_list_price#6, cs_sales_price#7, cs_coupon_amt#8, cs_net_profit#9, cd_dep_count#13, c_birth_year#26, i_item_id#19, ca_country#33, null, null, 3), List(cs_quantity#5, cs_list_price#6, cs_sales_price#7, cs_coupon_amt#8, cs_net_profit#9, cd_dep_count#13, c_birth_year#26, i_item_id#19, null, null, null, 7), List(cs_quantity#5, cs_list_price#6, cs_sales_price#7, cs_coupon_amt#8, cs_net_profit#9, cd_dep_count#13, c_birth_year#26, null, null, null, null, 15)], [cs_quantity#5, cs_list_price#6, cs_sales_price#7, cs_coupon_amt#8, cs_net_profit#9, cd_dep_count#13, c_birth_year#26, i_item_id#36, ca_country#37, ca_state#38, ca_county#39, spark_grouping_id#40] (50) HashAggregate [codegen id : 13] Input [12]: [cs_quantity#5, cs_list_price#6, cs_sales_price#7, cs_coupon_amt#8, cs_net_profit#9, cd_dep_count#13, c_birth_year#26, i_item_id#36, ca_country#37, ca_state#38, ca_county#39, spark_grouping_id#40] @@ -279,7 +279,7 @@ Results [19]: [i_item_id#36, ca_country#37, ca_state#38, ca_county#39, spark_gro (51) Exchange Input [19]: [i_item_id#36, ca_country#37, ca_state#38, ca_county#39, spark_grouping_id#40, sum#55, count#56, sum#57, count#58, sum#59, count#60, sum#61, count#62, sum#63, count#64, sum#65, count#66, sum#67, count#68] -Arguments: hashpartitioning(i_item_id#36, ca_country#37, ca_state#38, ca_county#39, spark_grouping_id#40, 5), true, [id=#69] +Arguments: hashpartitioning(i_item_id#36, ca_country#37, ca_state#38, ca_county#39, spark_grouping_id#40, 5), ENSURE_REQUIREMENTS, [id=#69] (52) HashAggregate [codegen id : 14] Input [19]: [i_item_id#36, ca_country#37, ca_state#38, ca_county#39, spark_grouping_id#40, sum#55, count#56, sum#57, count#58, sum#59, count#60, sum#61, count#62, sum#63, count#64, sum#65, count#66, sum#67, count#68] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q18.sf100/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q18.sf100/simplified.txt index 8c76e7cab3310..8069d43c3451a 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q18.sf100/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q18.sf100/simplified.txt @@ -54,34 +54,34 @@ TakeOrderedAndProject [ca_country,ca_state,ca_county,i_item_id,agg1,agg2,agg3,ag Exchange [c_customer_sk] #6 WholeStageCodegen (11) Project [c_customer_sk,c_birth_year,ca_county,ca_state,ca_country] - SortMergeJoin [c_current_cdemo_sk,cd_demo_sk] - InputAdapter - WholeStageCodegen (8) - Sort [c_current_cdemo_sk] - InputAdapter - Exchange [c_current_cdemo_sk] #7 - WholeStageCodegen (7) - Project [c_customer_sk,c_current_cdemo_sk,c_birth_year,ca_county,ca_state,ca_country] - BroadcastHashJoin [c_current_addr_sk,ca_address_sk] + BroadcastHashJoin [c_current_addr_sk,ca_address_sk] + Project [c_customer_sk,c_current_addr_sk,c_birth_year] + SortMergeJoin [c_current_cdemo_sk,cd_demo_sk] + InputAdapter + WholeStageCodegen (7) + Sort [c_current_cdemo_sk] + InputAdapter + Exchange [c_current_cdemo_sk] #7 + WholeStageCodegen (6) Project [c_customer_sk,c_current_cdemo_sk,c_current_addr_sk,c_birth_year] Filter [c_birth_month,c_customer_sk,c_current_cdemo_sk,c_current_addr_sk] ColumnarToRow InputAdapter Scan parquet default.customer [c_customer_sk,c_current_cdemo_sk,c_current_addr_sk,c_birth_month,c_birth_year] - InputAdapter - BroadcastExchange #8 - WholeStageCodegen (6) - Filter [ca_state,ca_address_sk] - ColumnarToRow - InputAdapter - Scan parquet default.customer_address [ca_address_sk,ca_county,ca_state,ca_country] + InputAdapter + WholeStageCodegen (9) + Sort [cd_demo_sk] + InputAdapter + Exchange [cd_demo_sk] #8 + WholeStageCodegen (8) + Filter [cd_demo_sk] + ColumnarToRow + InputAdapter + Scan parquet default.customer_demographics [cd_demo_sk] InputAdapter - WholeStageCodegen (10) - Sort [cd_demo_sk] - InputAdapter - Exchange [cd_demo_sk] #9 - WholeStageCodegen (9) - Filter [cd_demo_sk] - ColumnarToRow - InputAdapter - Scan parquet default.customer_demographics [cd_demo_sk] + BroadcastExchange #9 + WholeStageCodegen (10) + Filter [ca_state,ca_address_sk] + ColumnarToRow + InputAdapter + Scan parquet default.customer_address [ca_address_sk,ca_county,ca_state,ca_country] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q19.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q19.sf100/explain.txt index 0fbe0ccef6d13..4627bc19f25f0 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q19.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q19.sf100/explain.txt @@ -4,248 +4,248 @@ TakeOrderedAndProject (45) +- Exchange (43) +- * HashAggregate (42) +- * Project (41) - +- * SortMergeJoin Inner (40) - :- * Sort (25) - : +- Exchange (24) - : +- * Project (23) - : +- * BroadcastHashJoin Inner BuildRight (22) - : :- * Project (17) - : : +- * BroadcastHashJoin Inner BuildRight (16) - : : :- * Project (10) - : : : +- * BroadcastHashJoin Inner BuildLeft (9) - : : : :- BroadcastExchange (5) - : : : : +- * Project (4) - : : : : +- * Filter (3) - : : : : +- * ColumnarToRow (2) - : : : : +- Scan parquet default.item (1) - : : : +- * Filter (8) - : : : +- * ColumnarToRow (7) - : : : +- Scan parquet default.store_sales (6) - : : +- BroadcastExchange (15) - : : +- * Project (14) - : : +- * Filter (13) - : : +- * ColumnarToRow (12) - : : +- Scan parquet default.date_dim (11) - : +- BroadcastExchange (21) - : +- * Filter (20) - : +- * ColumnarToRow (19) - : +- Scan parquet default.store (18) - +- * Sort (39) - +- Exchange (38) - +- * Project (37) - +- * SortMergeJoin Inner (36) - :- * Sort (30) - : +- Exchange (29) - : +- * Filter (28) - : +- * ColumnarToRow (27) - : +- Scan parquet default.customer_address (26) - +- * Sort (35) - +- Exchange (34) - +- * Filter (33) - +- * ColumnarToRow (32) - +- Scan parquet default.customer (31) - - -(1) Scan parquet default.item -Output [6]: [i_item_sk#1, i_brand_id#2, i_brand#3, i_manufact_id#4, i_manufact#5, i_manager_id#6] + +- * BroadcastHashJoin Inner BuildRight (40) + :- * Project (34) + : +- * SortMergeJoin Inner (33) + : :- * Sort (18) + : : +- Exchange (17) + : : +- * Project (16) + : : +- * BroadcastHashJoin Inner BuildRight (15) + : : :- * Project (10) + : : : +- * BroadcastHashJoin Inner BuildLeft (9) + : : : :- BroadcastExchange (5) + : : : : +- * Project (4) + : : : : +- * Filter (3) + : : : : +- * ColumnarToRow (2) + : : : : +- Scan parquet default.date_dim (1) + : : : +- * Filter (8) + : : : +- * ColumnarToRow (7) + : : : +- Scan parquet default.store_sales (6) + : : +- BroadcastExchange (14) + : : +- * Filter (13) + : : +- * ColumnarToRow (12) + : : +- Scan parquet default.store (11) + : +- * Sort (32) + : +- Exchange (31) + : +- * Project (30) + : +- * SortMergeJoin Inner (29) + : :- * Sort (23) + : : +- Exchange (22) + : : +- * Filter (21) + : : +- * ColumnarToRow (20) + : : +- Scan parquet default.customer (19) + : +- * Sort (28) + : +- Exchange (27) + : +- * Filter (26) + : +- * ColumnarToRow (25) + : +- Scan parquet default.customer_address (24) + +- BroadcastExchange (39) + +- * Project (38) + +- * Filter (37) + +- * ColumnarToRow (36) + +- Scan parquet default.item (35) + + +(1) Scan parquet default.date_dim +Output [3]: [d_date_sk#1, d_year#2, d_moy#3] Batched: true -Location [not included in comparison]/{warehouse_dir}/item] -PushedFilters: [IsNotNull(i_manager_id), EqualTo(i_manager_id,8), IsNotNull(i_item_sk)] -ReadSchema: struct +Location [not included in comparison]/{warehouse_dir}/date_dim] +PushedFilters: [IsNotNull(d_moy), IsNotNull(d_year), EqualTo(d_moy,11), EqualTo(d_year,1998), IsNotNull(d_date_sk)] +ReadSchema: struct (2) ColumnarToRow [codegen id : 1] -Input [6]: [i_item_sk#1, i_brand_id#2, i_brand#3, i_manufact_id#4, i_manufact#5, i_manager_id#6] +Input [3]: [d_date_sk#1, d_year#2, d_moy#3] (3) Filter [codegen id : 1] -Input [6]: [i_item_sk#1, i_brand_id#2, i_brand#3, i_manufact_id#4, i_manufact#5, i_manager_id#6] -Condition : ((isnotnull(i_manager_id#6) AND (i_manager_id#6 = 8)) AND isnotnull(i_item_sk#1)) +Input [3]: [d_date_sk#1, d_year#2, d_moy#3] +Condition : ((((isnotnull(d_moy#3) AND isnotnull(d_year#2)) AND (d_moy#3 = 11)) AND (d_year#2 = 1998)) AND isnotnull(d_date_sk#1)) (4) Project [codegen id : 1] -Output [5]: [i_item_sk#1, i_brand_id#2, i_brand#3, i_manufact_id#4, i_manufact#5] -Input [6]: [i_item_sk#1, i_brand_id#2, i_brand#3, i_manufact_id#4, i_manufact#5, i_manager_id#6] +Output [1]: [d_date_sk#1] +Input [3]: [d_date_sk#1, d_year#2, d_moy#3] (5) BroadcastExchange -Input [5]: [i_item_sk#1, i_brand_id#2, i_brand#3, i_manufact_id#4, i_manufact#5] -Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#7] +Input [1]: [d_date_sk#1] +Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#4] (6) Scan parquet default.store_sales -Output [5]: [ss_sold_date_sk#8, ss_item_sk#9, ss_customer_sk#10, ss_store_sk#11, ss_ext_sales_price#12] +Output [5]: [ss_sold_date_sk#5, ss_item_sk#6, ss_customer_sk#7, ss_store_sk#8, ss_ext_sales_price#9] Batched: true Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_item_sk), IsNotNull(ss_customer_sk), IsNotNull(ss_store_sk)] ReadSchema: struct (7) ColumnarToRow -Input [5]: [ss_sold_date_sk#8, ss_item_sk#9, ss_customer_sk#10, ss_store_sk#11, ss_ext_sales_price#12] +Input [5]: [ss_sold_date_sk#5, ss_item_sk#6, ss_customer_sk#7, ss_store_sk#8, ss_ext_sales_price#9] (8) Filter -Input [5]: [ss_sold_date_sk#8, ss_item_sk#9, ss_customer_sk#10, ss_store_sk#11, ss_ext_sales_price#12] -Condition : (((isnotnull(ss_sold_date_sk#8) AND isnotnull(ss_item_sk#9)) AND isnotnull(ss_customer_sk#10)) AND isnotnull(ss_store_sk#11)) +Input [5]: [ss_sold_date_sk#5, ss_item_sk#6, ss_customer_sk#7, ss_store_sk#8, ss_ext_sales_price#9] +Condition : (((isnotnull(ss_sold_date_sk#5) AND isnotnull(ss_item_sk#6)) AND isnotnull(ss_customer_sk#7)) AND isnotnull(ss_store_sk#8)) -(9) BroadcastHashJoin [codegen id : 4] -Left keys [1]: [i_item_sk#1] -Right keys [1]: [ss_item_sk#9] +(9) BroadcastHashJoin [codegen id : 3] +Left keys [1]: [d_date_sk#1] +Right keys [1]: [ss_sold_date_sk#5] Join condition: None -(10) Project [codegen id : 4] -Output [8]: [i_brand_id#2, i_brand#3, i_manufact_id#4, i_manufact#5, ss_sold_date_sk#8, ss_customer_sk#10, ss_store_sk#11, ss_ext_sales_price#12] -Input [10]: [i_item_sk#1, i_brand_id#2, i_brand#3, i_manufact_id#4, i_manufact#5, ss_sold_date_sk#8, ss_item_sk#9, ss_customer_sk#10, ss_store_sk#11, ss_ext_sales_price#12] +(10) Project [codegen id : 3] +Output [4]: [ss_item_sk#6, ss_customer_sk#7, ss_store_sk#8, ss_ext_sales_price#9] +Input [6]: [d_date_sk#1, ss_sold_date_sk#5, ss_item_sk#6, ss_customer_sk#7, ss_store_sk#8, ss_ext_sales_price#9] -(11) Scan parquet default.date_dim -Output [3]: [d_date_sk#13, d_year#14, d_moy#15] +(11) Scan parquet default.store +Output [2]: [s_store_sk#10, s_zip#11] Batched: true -Location [not included in comparison]/{warehouse_dir}/date_dim] -PushedFilters: [IsNotNull(d_moy), IsNotNull(d_year), EqualTo(d_moy,11), EqualTo(d_year,1998), IsNotNull(d_date_sk)] -ReadSchema: struct +Location [not included in comparison]/{warehouse_dir}/store] +PushedFilters: [IsNotNull(s_zip), IsNotNull(s_store_sk)] +ReadSchema: struct (12) ColumnarToRow [codegen id : 2] -Input [3]: [d_date_sk#13, d_year#14, d_moy#15] +Input [2]: [s_store_sk#10, s_zip#11] (13) Filter [codegen id : 2] -Input [3]: [d_date_sk#13, d_year#14, d_moy#15] -Condition : ((((isnotnull(d_moy#15) AND isnotnull(d_year#14)) AND (d_moy#15 = 11)) AND (d_year#14 = 1998)) AND isnotnull(d_date_sk#13)) +Input [2]: [s_store_sk#10, s_zip#11] +Condition : (isnotnull(s_zip#11) AND isnotnull(s_store_sk#10)) -(14) Project [codegen id : 2] -Output [1]: [d_date_sk#13] -Input [3]: [d_date_sk#13, d_year#14, d_moy#15] +(14) BroadcastExchange +Input [2]: [s_store_sk#10, s_zip#11] +Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, false] as bigint)),false), [id=#12] -(15) BroadcastExchange -Input [1]: [d_date_sk#13] -Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#16] - -(16) BroadcastHashJoin [codegen id : 4] -Left keys [1]: [ss_sold_date_sk#8] -Right keys [1]: [d_date_sk#13] +(15) BroadcastHashJoin [codegen id : 3] +Left keys [1]: [ss_store_sk#8] +Right keys [1]: [s_store_sk#10] Join condition: None -(17) Project [codegen id : 4] -Output [7]: [i_brand_id#2, i_brand#3, i_manufact_id#4, i_manufact#5, ss_customer_sk#10, ss_store_sk#11, ss_ext_sales_price#12] -Input [9]: [i_brand_id#2, i_brand#3, i_manufact_id#4, i_manufact#5, ss_sold_date_sk#8, ss_customer_sk#10, ss_store_sk#11, ss_ext_sales_price#12, d_date_sk#13] - -(18) Scan parquet default.store -Output [2]: [s_store_sk#17, s_zip#18] -Batched: true -Location [not included in comparison]/{warehouse_dir}/store] -PushedFilters: [IsNotNull(s_zip), IsNotNull(s_store_sk)] -ReadSchema: struct +(16) Project [codegen id : 3] +Output [4]: [ss_item_sk#6, ss_customer_sk#7, ss_ext_sales_price#9, s_zip#11] +Input [6]: [ss_item_sk#6, ss_customer_sk#7, ss_store_sk#8, ss_ext_sales_price#9, s_store_sk#10, s_zip#11] -(19) ColumnarToRow [codegen id : 3] -Input [2]: [s_store_sk#17, s_zip#18] +(17) Exchange +Input [4]: [ss_item_sk#6, ss_customer_sk#7, ss_ext_sales_price#9, s_zip#11] +Arguments: hashpartitioning(ss_customer_sk#7, 5), ENSURE_REQUIREMENTS, [id=#13] -(20) Filter [codegen id : 3] -Input [2]: [s_store_sk#17, s_zip#18] -Condition : (isnotnull(s_zip#18) AND isnotnull(s_store_sk#17)) +(18) Sort [codegen id : 4] +Input [4]: [ss_item_sk#6, ss_customer_sk#7, ss_ext_sales_price#9, s_zip#11] +Arguments: [ss_customer_sk#7 ASC NULLS FIRST], false, 0 -(21) BroadcastExchange -Input [2]: [s_store_sk#17, s_zip#18] -Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, false] as bigint)),false), [id=#19] +(19) Scan parquet default.customer +Output [2]: [c_customer_sk#14, c_current_addr_sk#15] +Batched: true +Location [not included in comparison]/{warehouse_dir}/customer] +PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_current_addr_sk)] +ReadSchema: struct -(22) BroadcastHashJoin [codegen id : 4] -Left keys [1]: [ss_store_sk#11] -Right keys [1]: [s_store_sk#17] -Join condition: None +(20) ColumnarToRow [codegen id : 5] +Input [2]: [c_customer_sk#14, c_current_addr_sk#15] -(23) Project [codegen id : 4] -Output [7]: [i_brand_id#2, i_brand#3, i_manufact_id#4, i_manufact#5, ss_customer_sk#10, ss_ext_sales_price#12, s_zip#18] -Input [9]: [i_brand_id#2, i_brand#3, i_manufact_id#4, i_manufact#5, ss_customer_sk#10, ss_store_sk#11, ss_ext_sales_price#12, s_store_sk#17, s_zip#18] +(21) Filter [codegen id : 5] +Input [2]: [c_customer_sk#14, c_current_addr_sk#15] +Condition : (isnotnull(c_customer_sk#14) AND isnotnull(c_current_addr_sk#15)) -(24) Exchange -Input [7]: [i_brand_id#2, i_brand#3, i_manufact_id#4, i_manufact#5, ss_customer_sk#10, ss_ext_sales_price#12, s_zip#18] -Arguments: hashpartitioning(ss_customer_sk#10, 5), true, [id=#20] +(22) Exchange +Input [2]: [c_customer_sk#14, c_current_addr_sk#15] +Arguments: hashpartitioning(c_current_addr_sk#15, 5), ENSURE_REQUIREMENTS, [id=#16] -(25) Sort [codegen id : 5] -Input [7]: [i_brand_id#2, i_brand#3, i_manufact_id#4, i_manufact#5, ss_customer_sk#10, ss_ext_sales_price#12, s_zip#18] -Arguments: [ss_customer_sk#10 ASC NULLS FIRST], false, 0 +(23) Sort [codegen id : 6] +Input [2]: [c_customer_sk#14, c_current_addr_sk#15] +Arguments: [c_current_addr_sk#15 ASC NULLS FIRST], false, 0 -(26) Scan parquet default.customer_address -Output [2]: [ca_address_sk#21, ca_zip#22] +(24) Scan parquet default.customer_address +Output [2]: [ca_address_sk#17, ca_zip#18] Batched: true Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_address_sk), IsNotNull(ca_zip)] ReadSchema: struct -(27) ColumnarToRow [codegen id : 6] -Input [2]: [ca_address_sk#21, ca_zip#22] +(25) ColumnarToRow [codegen id : 7] +Input [2]: [ca_address_sk#17, ca_zip#18] -(28) Filter [codegen id : 6] -Input [2]: [ca_address_sk#21, ca_zip#22] -Condition : (isnotnull(ca_address_sk#21) AND isnotnull(ca_zip#22)) +(26) Filter [codegen id : 7] +Input [2]: [ca_address_sk#17, ca_zip#18] +Condition : (isnotnull(ca_address_sk#17) AND isnotnull(ca_zip#18)) -(29) Exchange -Input [2]: [ca_address_sk#21, ca_zip#22] -Arguments: hashpartitioning(ca_address_sk#21, 5), true, [id=#23] +(27) Exchange +Input [2]: [ca_address_sk#17, ca_zip#18] +Arguments: hashpartitioning(ca_address_sk#17, 5), ENSURE_REQUIREMENTS, [id=#19] -(30) Sort [codegen id : 7] -Input [2]: [ca_address_sk#21, ca_zip#22] -Arguments: [ca_address_sk#21 ASC NULLS FIRST], false, 0 +(28) Sort [codegen id : 8] +Input [2]: [ca_address_sk#17, ca_zip#18] +Arguments: [ca_address_sk#17 ASC NULLS FIRST], false, 0 -(31) Scan parquet default.customer -Output [2]: [c_customer_sk#24, c_current_addr_sk#25] -Batched: true -Location [not included in comparison]/{warehouse_dir}/customer] -PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_current_addr_sk)] -ReadSchema: struct +(29) SortMergeJoin [codegen id : 9] +Left keys [1]: [c_current_addr_sk#15] +Right keys [1]: [ca_address_sk#17] +Join condition: None -(32) ColumnarToRow [codegen id : 8] -Input [2]: [c_customer_sk#24, c_current_addr_sk#25] +(30) Project [codegen id : 9] +Output [2]: [c_customer_sk#14, ca_zip#18] +Input [4]: [c_customer_sk#14, c_current_addr_sk#15, ca_address_sk#17, ca_zip#18] -(33) Filter [codegen id : 8] -Input [2]: [c_customer_sk#24, c_current_addr_sk#25] -Condition : (isnotnull(c_customer_sk#24) AND isnotnull(c_current_addr_sk#25)) +(31) Exchange +Input [2]: [c_customer_sk#14, ca_zip#18] +Arguments: hashpartitioning(c_customer_sk#14, 5), ENSURE_REQUIREMENTS, [id=#20] -(34) Exchange -Input [2]: [c_customer_sk#24, c_current_addr_sk#25] -Arguments: hashpartitioning(c_current_addr_sk#25, 5), true, [id=#26] +(32) Sort [codegen id : 10] +Input [2]: [c_customer_sk#14, ca_zip#18] +Arguments: [c_customer_sk#14 ASC NULLS FIRST], false, 0 -(35) Sort [codegen id : 9] -Input [2]: [c_customer_sk#24, c_current_addr_sk#25] -Arguments: [c_current_addr_sk#25 ASC NULLS FIRST], false, 0 +(33) SortMergeJoin [codegen id : 12] +Left keys [1]: [ss_customer_sk#7] +Right keys [1]: [c_customer_sk#14] +Join condition: NOT (substr(ca_zip#18, 1, 5) = substr(s_zip#11, 1, 5)) -(36) SortMergeJoin [codegen id : 10] -Left keys [1]: [ca_address_sk#21] -Right keys [1]: [c_current_addr_sk#25] -Join condition: None +(34) Project [codegen id : 12] +Output [2]: [ss_item_sk#6, ss_ext_sales_price#9] +Input [6]: [ss_item_sk#6, ss_customer_sk#7, ss_ext_sales_price#9, s_zip#11, c_customer_sk#14, ca_zip#18] -(37) Project [codegen id : 10] -Output [2]: [ca_zip#22, c_customer_sk#24] -Input [4]: [ca_address_sk#21, ca_zip#22, c_customer_sk#24, c_current_addr_sk#25] +(35) Scan parquet default.item +Output [6]: [i_item_sk#21, i_brand_id#22, i_brand#23, i_manufact_id#24, i_manufact#25, i_manager_id#26] +Batched: true +Location [not included in comparison]/{warehouse_dir}/item] +PushedFilters: [IsNotNull(i_manager_id), EqualTo(i_manager_id,8), IsNotNull(i_item_sk)] +ReadSchema: struct + +(36) ColumnarToRow [codegen id : 11] +Input [6]: [i_item_sk#21, i_brand_id#22, i_brand#23, i_manufact_id#24, i_manufact#25, i_manager_id#26] -(38) Exchange -Input [2]: [ca_zip#22, c_customer_sk#24] -Arguments: hashpartitioning(c_customer_sk#24, 5), true, [id=#27] +(37) Filter [codegen id : 11] +Input [6]: [i_item_sk#21, i_brand_id#22, i_brand#23, i_manufact_id#24, i_manufact#25, i_manager_id#26] +Condition : ((isnotnull(i_manager_id#26) AND (i_manager_id#26 = 8)) AND isnotnull(i_item_sk#21)) -(39) Sort [codegen id : 11] -Input [2]: [ca_zip#22, c_customer_sk#24] -Arguments: [c_customer_sk#24 ASC NULLS FIRST], false, 0 +(38) Project [codegen id : 11] +Output [5]: [i_item_sk#21, i_brand_id#22, i_brand#23, i_manufact_id#24, i_manufact#25] +Input [6]: [i_item_sk#21, i_brand_id#22, i_brand#23, i_manufact_id#24, i_manufact#25, i_manager_id#26] -(40) SortMergeJoin [codegen id : 12] -Left keys [1]: [ss_customer_sk#10] -Right keys [1]: [c_customer_sk#24] -Join condition: NOT (substr(ca_zip#22, 1, 5) = substr(s_zip#18, 1, 5)) +(39) BroadcastExchange +Input [5]: [i_item_sk#21, i_brand_id#22, i_brand#23, i_manufact_id#24, i_manufact#25] +Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#27] + +(40) BroadcastHashJoin [codegen id : 12] +Left keys [1]: [ss_item_sk#6] +Right keys [1]: [i_item_sk#21] +Join condition: None (41) Project [codegen id : 12] -Output [5]: [ss_ext_sales_price#12, i_brand_id#2, i_brand#3, i_manufact_id#4, i_manufact#5] -Input [9]: [i_brand_id#2, i_brand#3, i_manufact_id#4, i_manufact#5, ss_customer_sk#10, ss_ext_sales_price#12, s_zip#18, ca_zip#22, c_customer_sk#24] +Output [5]: [ss_ext_sales_price#9, i_brand_id#22, i_brand#23, i_manufact_id#24, i_manufact#25] +Input [7]: [ss_item_sk#6, ss_ext_sales_price#9, i_item_sk#21, i_brand_id#22, i_brand#23, i_manufact_id#24, i_manufact#25] (42) HashAggregate [codegen id : 12] -Input [5]: [ss_ext_sales_price#12, i_brand_id#2, i_brand#3, i_manufact_id#4, i_manufact#5] -Keys [4]: [i_brand#3, i_brand_id#2, i_manufact_id#4, i_manufact#5] -Functions [1]: [partial_sum(UnscaledValue(ss_ext_sales_price#12))] +Input [5]: [ss_ext_sales_price#9, i_brand_id#22, i_brand#23, i_manufact_id#24, i_manufact#25] +Keys [4]: [i_brand#23, i_brand_id#22, i_manufact_id#24, i_manufact#25] +Functions [1]: [partial_sum(UnscaledValue(ss_ext_sales_price#9))] Aggregate Attributes [1]: [sum#28] -Results [5]: [i_brand#3, i_brand_id#2, i_manufact_id#4, i_manufact#5, sum#29] +Results [5]: [i_brand#23, i_brand_id#22, i_manufact_id#24, i_manufact#25, sum#29] (43) Exchange -Input [5]: [i_brand#3, i_brand_id#2, i_manufact_id#4, i_manufact#5, sum#29] -Arguments: hashpartitioning(i_brand#3, i_brand_id#2, i_manufact_id#4, i_manufact#5, 5), true, [id=#30] +Input [5]: [i_brand#23, i_brand_id#22, i_manufact_id#24, i_manufact#25, sum#29] +Arguments: hashpartitioning(i_brand#23, i_brand_id#22, i_manufact_id#24, i_manufact#25, 5), ENSURE_REQUIREMENTS, [id=#30] (44) HashAggregate [codegen id : 13] -Input [5]: [i_brand#3, i_brand_id#2, i_manufact_id#4, i_manufact#5, sum#29] -Keys [4]: [i_brand#3, i_brand_id#2, i_manufact_id#4, i_manufact#5] -Functions [1]: [sum(UnscaledValue(ss_ext_sales_price#12))] -Aggregate Attributes [1]: [sum(UnscaledValue(ss_ext_sales_price#12))#31] -Results [5]: [i_brand_id#2 AS brand_id#32, i_brand#3 AS brand#33, i_manufact_id#4, i_manufact#5, MakeDecimal(sum(UnscaledValue(ss_ext_sales_price#12))#31,17,2) AS ext_price#34] +Input [5]: [i_brand#23, i_brand_id#22, i_manufact_id#24, i_manufact#25, sum#29] +Keys [4]: [i_brand#23, i_brand_id#22, i_manufact_id#24, i_manufact#25] +Functions [1]: [sum(UnscaledValue(ss_ext_sales_price#9))] +Aggregate Attributes [1]: [sum(UnscaledValue(ss_ext_sales_price#9))#31] +Results [5]: [i_brand_id#22 AS brand_id#32, i_brand#23 AS brand#33, i_manufact_id#24, i_manufact#25, MakeDecimal(sum(UnscaledValue(ss_ext_sales_price#9))#31,17,2) AS ext_price#34] (45) TakeOrderedAndProject -Input [5]: [brand_id#32, brand#33, i_manufact_id#4, i_manufact#5, ext_price#34] -Arguments: 100, [ext_price#34 DESC NULLS LAST, brand#33 ASC NULLS FIRST, brand_id#32 ASC NULLS FIRST, i_manufact_id#4 ASC NULLS FIRST, i_manufact#5 ASC NULLS FIRST], [brand_id#32, brand#33, i_manufact_id#4, i_manufact#5, ext_price#34] +Input [5]: [brand_id#32, brand#33, i_manufact_id#24, i_manufact#25, ext_price#34] +Arguments: 100, [ext_price#34 DESC NULLS LAST, brand#33 ASC NULLS FIRST, brand_id#32 ASC NULLS FIRST, i_manufact_id#24 ASC NULLS FIRST, i_manufact#25 ASC NULLS FIRST], [brand_id#32, brand#33, i_manufact_id#24, i_manufact#25, ext_price#34] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q19.sf100/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q19.sf100/simplified.txt index c8737d8a70782..b6441c5fe72c1 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q19.sf100/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q19.sf100/simplified.txt @@ -6,27 +6,27 @@ TakeOrderedAndProject [ext_price,brand,brand_id,i_manufact_id,i_manufact] WholeStageCodegen (12) HashAggregate [i_brand,i_brand_id,i_manufact_id,i_manufact,ss_ext_sales_price] [sum,sum] Project [ss_ext_sales_price,i_brand_id,i_brand,i_manufact_id,i_manufact] - SortMergeJoin [ss_customer_sk,c_customer_sk,ca_zip,s_zip] - InputAdapter - WholeStageCodegen (5) - Sort [ss_customer_sk] - InputAdapter - Exchange [ss_customer_sk] #2 - WholeStageCodegen (4) - Project [i_brand_id,i_brand,i_manufact_id,i_manufact,ss_customer_sk,ss_ext_sales_price,s_zip] - BroadcastHashJoin [ss_store_sk,s_store_sk] - Project [i_brand_id,i_brand,i_manufact_id,i_manufact,ss_customer_sk,ss_store_sk,ss_ext_sales_price] - BroadcastHashJoin [ss_sold_date_sk,d_date_sk] - Project [i_brand_id,i_brand,i_manufact_id,i_manufact,ss_sold_date_sk,ss_customer_sk,ss_store_sk,ss_ext_sales_price] - BroadcastHashJoin [i_item_sk,ss_item_sk] + BroadcastHashJoin [ss_item_sk,i_item_sk] + Project [ss_item_sk,ss_ext_sales_price] + SortMergeJoin [ss_customer_sk,c_customer_sk,ca_zip,s_zip] + InputAdapter + WholeStageCodegen (4) + Sort [ss_customer_sk] + InputAdapter + Exchange [ss_customer_sk] #2 + WholeStageCodegen (3) + Project [ss_item_sk,ss_customer_sk,ss_ext_sales_price,s_zip] + BroadcastHashJoin [ss_store_sk,s_store_sk] + Project [ss_item_sk,ss_customer_sk,ss_store_sk,ss_ext_sales_price] + BroadcastHashJoin [d_date_sk,ss_sold_date_sk] InputAdapter BroadcastExchange #3 WholeStageCodegen (1) - Project [i_item_sk,i_brand_id,i_brand,i_manufact_id,i_manufact] - Filter [i_manager_id,i_item_sk] + Project [d_date_sk] + Filter [d_moy,d_year,d_date_sk] ColumnarToRow InputAdapter - Scan parquet default.item [i_item_sk,i_brand_id,i_brand,i_manufact_id,i_manufact,i_manager_id] + Scan parquet default.date_dim [d_date_sk,d_year,d_moy] Filter [ss_sold_date_sk,ss_item_sk,ss_customer_sk,ss_store_sk] ColumnarToRow InputAdapter @@ -34,43 +34,43 @@ TakeOrderedAndProject [ext_price,brand,brand_id,i_manufact_id,i_manufact] InputAdapter BroadcastExchange #4 WholeStageCodegen (2) - Project [d_date_sk] - Filter [d_moy,d_year,d_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.date_dim [d_date_sk,d_year,d_moy] - InputAdapter - BroadcastExchange #5 - WholeStageCodegen (3) - Filter [s_zip,s_store_sk] - ColumnarToRow + Filter [s_zip,s_store_sk] + ColumnarToRow + InputAdapter + Scan parquet default.store [s_store_sk,s_zip] + InputAdapter + WholeStageCodegen (10) + Sort [c_customer_sk] + InputAdapter + Exchange [c_customer_sk] #5 + WholeStageCodegen (9) + Project [c_customer_sk,ca_zip] + SortMergeJoin [c_current_addr_sk,ca_address_sk] + InputAdapter + WholeStageCodegen (6) + Sort [c_current_addr_sk] + InputAdapter + Exchange [c_current_addr_sk] #6 + WholeStageCodegen (5) + Filter [c_customer_sk,c_current_addr_sk] + ColumnarToRow + InputAdapter + Scan parquet default.customer [c_customer_sk,c_current_addr_sk] + InputAdapter + WholeStageCodegen (8) + Sort [ca_address_sk] InputAdapter - Scan parquet default.store [s_store_sk,s_zip] + Exchange [ca_address_sk] #7 + WholeStageCodegen (7) + Filter [ca_address_sk,ca_zip] + ColumnarToRow + InputAdapter + Scan parquet default.customer_address [ca_address_sk,ca_zip] InputAdapter - WholeStageCodegen (11) - Sort [c_customer_sk] - InputAdapter - Exchange [c_customer_sk] #6 - WholeStageCodegen (10) - Project [ca_zip,c_customer_sk] - SortMergeJoin [ca_address_sk,c_current_addr_sk] - InputAdapter - WholeStageCodegen (7) - Sort [ca_address_sk] - InputAdapter - Exchange [ca_address_sk] #7 - WholeStageCodegen (6) - Filter [ca_address_sk,ca_zip] - ColumnarToRow - InputAdapter - Scan parquet default.customer_address [ca_address_sk,ca_zip] - InputAdapter - WholeStageCodegen (9) - Sort [c_current_addr_sk] - InputAdapter - Exchange [c_current_addr_sk] #8 - WholeStageCodegen (8) - Filter [c_customer_sk,c_current_addr_sk] - ColumnarToRow - InputAdapter - Scan parquet default.customer [c_customer_sk,c_current_addr_sk] + BroadcastExchange #8 + WholeStageCodegen (11) + Project [i_item_sk,i_brand_id,i_brand,i_manufact_id,i_manufact] + Filter [i_manager_id,i_item_sk] + ColumnarToRow + InputAdapter + Scan parquet default.item [i_item_sk,i_brand_id,i_brand,i_manufact_id,i_manufact,i_manager_id] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q2.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q2.sf100/explain.txt index fe5966bb4dfb3..52dfff442bf3a 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q2.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q2.sf100/explain.txt @@ -1,46 +1,43 @@ == Physical Plan == -* Sort (42) -+- Exchange (41) - +- * Project (40) - +- * SortMergeJoin Inner (39) - :- * Sort (27) - : +- Exchange (26) - : +- * Project (25) - : +- * BroadcastHashJoin Inner BuildRight (24) - : :- * HashAggregate (18) - : : +- Exchange (17) - : : +- * HashAggregate (16) - : : +- * Project (15) - : : +- * BroadcastHashJoin Inner BuildRight (14) - : : :- Union (9) - : : : :- * Project (4) - : : : : +- * Filter (3) - : : : : +- * ColumnarToRow (2) - : : : : +- Scan parquet default.web_sales (1) - : : : +- * Project (8) - : : : +- * Filter (7) - : : : +- * ColumnarToRow (6) - : : : +- Scan parquet default.catalog_sales (5) - : : +- BroadcastExchange (13) - : : +- * Filter (12) - : : +- * ColumnarToRow (11) - : : +- Scan parquet default.date_dim (10) - : +- BroadcastExchange (23) - : +- * Project (22) - : +- * Filter (21) - : +- * ColumnarToRow (20) - : +- Scan parquet default.date_dim (19) - +- * Sort (38) - +- Exchange (37) - +- * Project (36) - +- * BroadcastHashJoin Inner BuildRight (35) - :- * HashAggregate (29) - : +- ReusedExchange (28) - +- BroadcastExchange (34) - +- * Project (33) - +- * Filter (32) - +- * ColumnarToRow (31) - +- Scan parquet default.date_dim (30) +* Sort (39) ++- Exchange (38) + +- * Project (37) + +- * BroadcastHashJoin Inner BuildRight (36) + :- * Project (25) + : +- * BroadcastHashJoin Inner BuildRight (24) + : :- * HashAggregate (18) + : : +- Exchange (17) + : : +- * HashAggregate (16) + : : +- * Project (15) + : : +- * BroadcastHashJoin Inner BuildRight (14) + : : :- Union (9) + : : : :- * Project (4) + : : : : +- * Filter (3) + : : : : +- * ColumnarToRow (2) + : : : : +- Scan parquet default.web_sales (1) + : : : +- * Project (8) + : : : +- * Filter (7) + : : : +- * ColumnarToRow (6) + : : : +- Scan parquet default.catalog_sales (5) + : : +- BroadcastExchange (13) + : : +- * Filter (12) + : : +- * ColumnarToRow (11) + : : +- Scan parquet default.date_dim (10) + : +- BroadcastExchange (23) + : +- * Project (22) + : +- * Filter (21) + : +- * ColumnarToRow (20) + : +- Scan parquet default.date_dim (19) + +- BroadcastExchange (35) + +- * Project (34) + +- * BroadcastHashJoin Inner BuildRight (33) + :- * HashAggregate (27) + : +- ReusedExchange (26) + +- BroadcastExchange (32) + +- * Project (31) + +- * Filter (30) + +- * ColumnarToRow (29) + +- Scan parquet default.date_dim (28) (1) Scan parquet default.web_sales @@ -117,9 +114,9 @@ Results [8]: [d_week_seq#10, sum#20, sum#21, sum#22, sum#23, sum#24, sum#25, sum (17) Exchange Input [8]: [d_week_seq#10, sum#20, sum#21, sum#22, sum#23, sum#24, sum#25, sum#26] -Arguments: hashpartitioning(d_week_seq#10, 5), true, [id=#27] +Arguments: hashpartitioning(d_week_seq#10, 5), ENSURE_REQUIREMENTS, [id=#27] -(18) HashAggregate [codegen id : 6] +(18) HashAggregate [codegen id : 12] Input [8]: [d_week_seq#10, sum#20, sum#21, sum#22, sum#23, sum#24, sum#25, sum#26] Keys [1]: [d_week_seq#10] Functions [7]: [sum(UnscaledValue(CASE WHEN (d_day_name#11 = Sunday) THEN sales_price#4 ELSE null END)), sum(UnscaledValue(CASE WHEN (d_day_name#11 = Monday) THEN sales_price#4 ELSE null END)), sum(UnscaledValue(CASE WHEN (d_day_name#11 = Tuesday) THEN sales_price#4 ELSE null END)), sum(UnscaledValue(CASE WHEN (d_day_name#11 = Wednesday) THEN sales_price#4 ELSE null END)), sum(UnscaledValue(CASE WHEN (d_day_name#11 = Thursday) THEN sales_price#4 ELSE null END)), sum(UnscaledValue(CASE WHEN (d_day_name#11 = Friday) THEN sales_price#4 ELSE null END)), sum(UnscaledValue(CASE WHEN (d_day_name#11 = Saturday) THEN sales_price#4 ELSE null END))] @@ -148,86 +145,74 @@ Input [2]: [d_week_seq#42, d_year#43] Input [1]: [d_week_seq#42] Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#44] -(24) BroadcastHashJoin [codegen id : 6] +(24) BroadcastHashJoin [codegen id : 12] Left keys [1]: [d_week_seq#10] Right keys [1]: [d_week_seq#42] Join condition: None -(25) Project [codegen id : 6] +(25) Project [codegen id : 12] Output [8]: [d_week_seq#10 AS d_week_seq1#45, sun_sales#35 AS sun_sales1#46, mon_sales#36 AS mon_sales1#47, tue_sales#37 AS tue_sales1#48, wed_sales#38 AS wed_sales1#49, thu_sales#39 AS thu_sales1#50, fri_sales#40 AS fri_sales1#51, sat_sales#41 AS sat_sales1#52] Input [9]: [d_week_seq#10, sun_sales#35, mon_sales#36, tue_sales#37, wed_sales#38, thu_sales#39, fri_sales#40, sat_sales#41, d_week_seq#42] -(26) Exchange -Input [8]: [d_week_seq1#45, sun_sales1#46, mon_sales1#47, tue_sales1#48, wed_sales1#49, thu_sales1#50, fri_sales1#51, sat_sales1#52] -Arguments: hashpartitioning(d_week_seq1#45, 5), true, [id=#53] +(26) ReusedExchange [Reuses operator id: 17] +Output [8]: [d_week_seq#10, sum#53, sum#54, sum#55, sum#56, sum#57, sum#58, sum#59] -(27) Sort [codegen id : 7] -Input [8]: [d_week_seq1#45, sun_sales1#46, mon_sales1#47, tue_sales1#48, wed_sales1#49, thu_sales1#50, fri_sales1#51, sat_sales1#52] -Arguments: [d_week_seq1#45 ASC NULLS FIRST], false, 0 - -(28) ReusedExchange [Reuses operator id: 17] -Output [8]: [d_week_seq#10, sum#54, sum#55, sum#56, sum#57, sum#58, sum#59, sum#60] - -(29) HashAggregate [codegen id : 13] -Input [8]: [d_week_seq#10, sum#54, sum#55, sum#56, sum#57, sum#58, sum#59, sum#60] +(27) HashAggregate [codegen id : 11] +Input [8]: [d_week_seq#10, sum#53, sum#54, sum#55, sum#56, sum#57, sum#58, sum#59] Keys [1]: [d_week_seq#10] Functions [7]: [sum(UnscaledValue(CASE WHEN (d_day_name#11 = Sunday) THEN sales_price#4 ELSE null END)), sum(UnscaledValue(CASE WHEN (d_day_name#11 = Monday) THEN sales_price#4 ELSE null END)), sum(UnscaledValue(CASE WHEN (d_day_name#11 = Tuesday) THEN sales_price#4 ELSE null END)), sum(UnscaledValue(CASE WHEN (d_day_name#11 = Wednesday) THEN sales_price#4 ELSE null END)), sum(UnscaledValue(CASE WHEN (d_day_name#11 = Thursday) THEN sales_price#4 ELSE null END)), sum(UnscaledValue(CASE WHEN (d_day_name#11 = Friday) THEN sales_price#4 ELSE null END)), sum(UnscaledValue(CASE WHEN (d_day_name#11 = Saturday) THEN sales_price#4 ELSE null END))] -Aggregate Attributes [7]: [sum(UnscaledValue(CASE WHEN (d_day_name#11 = Sunday) THEN sales_price#4 ELSE null END))#61, sum(UnscaledValue(CASE WHEN (d_day_name#11 = Monday) THEN sales_price#4 ELSE null END))#62, sum(UnscaledValue(CASE WHEN (d_day_name#11 = Tuesday) THEN sales_price#4 ELSE null END))#63, sum(UnscaledValue(CASE WHEN (d_day_name#11 = Wednesday) THEN sales_price#4 ELSE null END))#64, sum(UnscaledValue(CASE WHEN (d_day_name#11 = Thursday) THEN sales_price#4 ELSE null END))#65, sum(UnscaledValue(CASE WHEN (d_day_name#11 = Friday) THEN sales_price#4 ELSE null END))#66, sum(UnscaledValue(CASE WHEN (d_day_name#11 = Saturday) THEN sales_price#4 ELSE null END))#67] -Results [8]: [d_week_seq#10, MakeDecimal(sum(UnscaledValue(CASE WHEN (d_day_name#11 = Sunday) THEN sales_price#4 ELSE null END))#61,17,2) AS sun_sales#35, MakeDecimal(sum(UnscaledValue(CASE WHEN (d_day_name#11 = Monday) THEN sales_price#4 ELSE null END))#62,17,2) AS mon_sales#36, MakeDecimal(sum(UnscaledValue(CASE WHEN (d_day_name#11 = Tuesday) THEN sales_price#4 ELSE null END))#63,17,2) AS tue_sales#37, MakeDecimal(sum(UnscaledValue(CASE WHEN (d_day_name#11 = Wednesday) THEN sales_price#4 ELSE null END))#64,17,2) AS wed_sales#38, MakeDecimal(sum(UnscaledValue(CASE WHEN (d_day_name#11 = Thursday) THEN sales_price#4 ELSE null END))#65,17,2) AS thu_sales#39, MakeDecimal(sum(UnscaledValue(CASE WHEN (d_day_name#11 = Friday) THEN sales_price#4 ELSE null END))#66,17,2) AS fri_sales#40, MakeDecimal(sum(UnscaledValue(CASE WHEN (d_day_name#11 = Saturday) THEN sales_price#4 ELSE null END))#67,17,2) AS sat_sales#41] +Aggregate Attributes [7]: [sum(UnscaledValue(CASE WHEN (d_day_name#11 = Sunday) THEN sales_price#4 ELSE null END))#60, sum(UnscaledValue(CASE WHEN (d_day_name#11 = Monday) THEN sales_price#4 ELSE null END))#61, sum(UnscaledValue(CASE WHEN (d_day_name#11 = Tuesday) THEN sales_price#4 ELSE null END))#62, sum(UnscaledValue(CASE WHEN (d_day_name#11 = Wednesday) THEN sales_price#4 ELSE null END))#63, sum(UnscaledValue(CASE WHEN (d_day_name#11 = Thursday) THEN sales_price#4 ELSE null END))#64, sum(UnscaledValue(CASE WHEN (d_day_name#11 = Friday) THEN sales_price#4 ELSE null END))#65, sum(UnscaledValue(CASE WHEN (d_day_name#11 = Saturday) THEN sales_price#4 ELSE null END))#66] +Results [8]: [d_week_seq#10, MakeDecimal(sum(UnscaledValue(CASE WHEN (d_day_name#11 = Sunday) THEN sales_price#4 ELSE null END))#60,17,2) AS sun_sales#35, MakeDecimal(sum(UnscaledValue(CASE WHEN (d_day_name#11 = Monday) THEN sales_price#4 ELSE null END))#61,17,2) AS mon_sales#36, MakeDecimal(sum(UnscaledValue(CASE WHEN (d_day_name#11 = Tuesday) THEN sales_price#4 ELSE null END))#62,17,2) AS tue_sales#37, MakeDecimal(sum(UnscaledValue(CASE WHEN (d_day_name#11 = Wednesday) THEN sales_price#4 ELSE null END))#63,17,2) AS wed_sales#38, MakeDecimal(sum(UnscaledValue(CASE WHEN (d_day_name#11 = Thursday) THEN sales_price#4 ELSE null END))#64,17,2) AS thu_sales#39, MakeDecimal(sum(UnscaledValue(CASE WHEN (d_day_name#11 = Friday) THEN sales_price#4 ELSE null END))#65,17,2) AS fri_sales#40, MakeDecimal(sum(UnscaledValue(CASE WHEN (d_day_name#11 = Saturday) THEN sales_price#4 ELSE null END))#66,17,2) AS sat_sales#41] -(30) Scan parquet default.date_dim -Output [2]: [d_week_seq#68, d_year#69] +(28) Scan parquet default.date_dim +Output [2]: [d_week_seq#67, d_year#68] Batched: true Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2002), IsNotNull(d_week_seq)] ReadSchema: struct -(31) ColumnarToRow [codegen id : 12] -Input [2]: [d_week_seq#68, d_year#69] +(29) ColumnarToRow [codegen id : 10] +Input [2]: [d_week_seq#67, d_year#68] -(32) Filter [codegen id : 12] -Input [2]: [d_week_seq#68, d_year#69] -Condition : ((isnotnull(d_year#69) AND (d_year#69 = 2002)) AND isnotnull(d_week_seq#68)) +(30) Filter [codegen id : 10] +Input [2]: [d_week_seq#67, d_year#68] +Condition : ((isnotnull(d_year#68) AND (d_year#68 = 2002)) AND isnotnull(d_week_seq#67)) -(33) Project [codegen id : 12] -Output [1]: [d_week_seq#68] -Input [2]: [d_week_seq#68, d_year#69] +(31) Project [codegen id : 10] +Output [1]: [d_week_seq#67] +Input [2]: [d_week_seq#67, d_year#68] -(34) BroadcastExchange -Input [1]: [d_week_seq#68] -Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#70] +(32) BroadcastExchange +Input [1]: [d_week_seq#67] +Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#69] -(35) BroadcastHashJoin [codegen id : 13] +(33) BroadcastHashJoin [codegen id : 11] Left keys [1]: [d_week_seq#10] -Right keys [1]: [d_week_seq#68] +Right keys [1]: [d_week_seq#67] Join condition: None -(36) Project [codegen id : 13] -Output [8]: [d_week_seq#10 AS d_week_seq2#71, sun_sales#35 AS sun_sales2#72, mon_sales#36 AS mon_sales2#73, tue_sales#37 AS tue_sales2#74, wed_sales#38 AS wed_sales2#75, thu_sales#39 AS thu_sales2#76, fri_sales#40 AS fri_sales2#77, sat_sales#41 AS sat_sales2#78] -Input [9]: [d_week_seq#10, sun_sales#35, mon_sales#36, tue_sales#37, wed_sales#38, thu_sales#39, fri_sales#40, sat_sales#41, d_week_seq#68] - -(37) Exchange -Input [8]: [d_week_seq2#71, sun_sales2#72, mon_sales2#73, tue_sales2#74, wed_sales2#75, thu_sales2#76, fri_sales2#77, sat_sales2#78] -Arguments: hashpartitioning((d_week_seq2#71 - 53), 5), true, [id=#79] +(34) Project [codegen id : 11] +Output [8]: [d_week_seq#10 AS d_week_seq2#70, sun_sales#35 AS sun_sales2#71, mon_sales#36 AS mon_sales2#72, tue_sales#37 AS tue_sales2#73, wed_sales#38 AS wed_sales2#74, thu_sales#39 AS thu_sales2#75, fri_sales#40 AS fri_sales2#76, sat_sales#41 AS sat_sales2#77] +Input [9]: [d_week_seq#10, sun_sales#35, mon_sales#36, tue_sales#37, wed_sales#38, thu_sales#39, fri_sales#40, sat_sales#41, d_week_seq#67] -(38) Sort [codegen id : 14] -Input [8]: [d_week_seq2#71, sun_sales2#72, mon_sales2#73, tue_sales2#74, wed_sales2#75, thu_sales2#76, fri_sales2#77, sat_sales2#78] -Arguments: [(d_week_seq2#71 - 53) ASC NULLS FIRST], false, 0 +(35) BroadcastExchange +Input [8]: [d_week_seq2#70, sun_sales2#71, mon_sales2#72, tue_sales2#73, wed_sales2#74, thu_sales2#75, fri_sales2#76, sat_sales2#77] +Arguments: HashedRelationBroadcastMode(List(cast((input[0, int, true] - 53) as bigint)),false), [id=#78] -(39) SortMergeJoin [codegen id : 15] +(36) BroadcastHashJoin [codegen id : 12] Left keys [1]: [d_week_seq1#45] -Right keys [1]: [(d_week_seq2#71 - 53)] +Right keys [1]: [(d_week_seq2#70 - 53)] Join condition: None -(40) Project [codegen id : 15] -Output [8]: [d_week_seq1#45, round(CheckOverflow((promote_precision(sun_sales1#46) / promote_precision(sun_sales2#72)), DecimalType(37,20), true), 2) AS round((sun_sales1 / sun_sales2), 2)#80, round(CheckOverflow((promote_precision(mon_sales1#47) / promote_precision(mon_sales2#73)), DecimalType(37,20), true), 2) AS round((mon_sales1 / mon_sales2), 2)#81, round(CheckOverflow((promote_precision(tue_sales1#48) / promote_precision(tue_sales2#74)), DecimalType(37,20), true), 2) AS round((tue_sales1 / tue_sales2), 2)#82, round(CheckOverflow((promote_precision(wed_sales1#49) / promote_precision(wed_sales2#75)), DecimalType(37,20), true), 2) AS round((wed_sales1 / wed_sales2), 2)#83, round(CheckOverflow((promote_precision(thu_sales1#50) / promote_precision(thu_sales2#76)), DecimalType(37,20), true), 2) AS round((thu_sales1 / thu_sales2), 2)#84, round(CheckOverflow((promote_precision(fri_sales1#51) / promote_precision(fri_sales2#77)), DecimalType(37,20), true), 2) AS round((fri_sales1 / fri_sales2), 2)#85, round(CheckOverflow((promote_precision(sat_sales1#52) / promote_precision(sat_sales2#78)), DecimalType(37,20), true), 2) AS round((sat_sales1 / sat_sales2), 2)#86] -Input [16]: [d_week_seq1#45, sun_sales1#46, mon_sales1#47, tue_sales1#48, wed_sales1#49, thu_sales1#50, fri_sales1#51, sat_sales1#52, d_week_seq2#71, sun_sales2#72, mon_sales2#73, tue_sales2#74, wed_sales2#75, thu_sales2#76, fri_sales2#77, sat_sales2#78] +(37) Project [codegen id : 12] +Output [8]: [d_week_seq1#45, round(CheckOverflow((promote_precision(sun_sales1#46) / promote_precision(sun_sales2#71)), DecimalType(37,20), true), 2) AS round((sun_sales1 / sun_sales2), 2)#79, round(CheckOverflow((promote_precision(mon_sales1#47) / promote_precision(mon_sales2#72)), DecimalType(37,20), true), 2) AS round((mon_sales1 / mon_sales2), 2)#80, round(CheckOverflow((promote_precision(tue_sales1#48) / promote_precision(tue_sales2#73)), DecimalType(37,20), true), 2) AS round((tue_sales1 / tue_sales2), 2)#81, round(CheckOverflow((promote_precision(wed_sales1#49) / promote_precision(wed_sales2#74)), DecimalType(37,20), true), 2) AS round((wed_sales1 / wed_sales2), 2)#82, round(CheckOverflow((promote_precision(thu_sales1#50) / promote_precision(thu_sales2#75)), DecimalType(37,20), true), 2) AS round((thu_sales1 / thu_sales2), 2)#83, round(CheckOverflow((promote_precision(fri_sales1#51) / promote_precision(fri_sales2#76)), DecimalType(37,20), true), 2) AS round((fri_sales1 / fri_sales2), 2)#84, round(CheckOverflow((promote_precision(sat_sales1#52) / promote_precision(sat_sales2#77)), DecimalType(37,20), true), 2) AS round((sat_sales1 / sat_sales2), 2)#85] +Input [16]: [d_week_seq1#45, sun_sales1#46, mon_sales1#47, tue_sales1#48, wed_sales1#49, thu_sales1#50, fri_sales1#51, sat_sales1#52, d_week_seq2#70, sun_sales2#71, mon_sales2#72, tue_sales2#73, wed_sales2#74, thu_sales2#75, fri_sales2#76, sat_sales2#77] -(41) Exchange -Input [8]: [d_week_seq1#45, round((sun_sales1 / sun_sales2), 2)#80, round((mon_sales1 / mon_sales2), 2)#81, round((tue_sales1 / tue_sales2), 2)#82, round((wed_sales1 / wed_sales2), 2)#83, round((thu_sales1 / thu_sales2), 2)#84, round((fri_sales1 / fri_sales2), 2)#85, round((sat_sales1 / sat_sales2), 2)#86] -Arguments: rangepartitioning(d_week_seq1#45 ASC NULLS FIRST, 5), true, [id=#87] +(38) Exchange +Input [8]: [d_week_seq1#45, round((sun_sales1 / sun_sales2), 2)#79, round((mon_sales1 / mon_sales2), 2)#80, round((tue_sales1 / tue_sales2), 2)#81, round((wed_sales1 / wed_sales2), 2)#82, round((thu_sales1 / thu_sales2), 2)#83, round((fri_sales1 / fri_sales2), 2)#84, round((sat_sales1 / sat_sales2), 2)#85] +Arguments: rangepartitioning(d_week_seq1#45 ASC NULLS FIRST, 5), ENSURE_REQUIREMENTS, [id=#86] -(42) Sort [codegen id : 16] -Input [8]: [d_week_seq1#45, round((sun_sales1 / sun_sales2), 2)#80, round((mon_sales1 / mon_sales2), 2)#81, round((tue_sales1 / tue_sales2), 2)#82, round((wed_sales1 / wed_sales2), 2)#83, round((thu_sales1 / thu_sales2), 2)#84, round((fri_sales1 / fri_sales2), 2)#85, round((sat_sales1 / sat_sales2), 2)#86] +(39) Sort [codegen id : 13] +Input [8]: [d_week_seq1#45, round((sun_sales1 / sun_sales2), 2)#79, round((mon_sales1 / mon_sales2), 2)#80, round((tue_sales1 / tue_sales2), 2)#81, round((wed_sales1 / wed_sales2), 2)#82, round((thu_sales1 / thu_sales2), 2)#83, round((fri_sales1 / fri_sales2), 2)#84, round((sat_sales1 / sat_sales2), 2)#85] Arguments: [d_week_seq1#45 ASC NULLS FIRST], true, 0 diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q2.sf100/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q2.sf100/simplified.txt index 3df7e4c8e6f3f..424a535e14847 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q2.sf100/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q2.sf100/simplified.txt @@ -1,70 +1,61 @@ -WholeStageCodegen (16) +WholeStageCodegen (13) Sort [d_week_seq1] InputAdapter Exchange [d_week_seq1] #1 - WholeStageCodegen (15) + WholeStageCodegen (12) Project [d_week_seq1,sun_sales1,sun_sales2,mon_sales1,mon_sales2,tue_sales1,tue_sales2,wed_sales1,wed_sales2,thu_sales1,thu_sales2,fri_sales1,fri_sales2,sat_sales1,sat_sales2] - SortMergeJoin [d_week_seq1,d_week_seq2] - InputAdapter - WholeStageCodegen (7) - Sort [d_week_seq1] + BroadcastHashJoin [d_week_seq1,d_week_seq2] + Project [d_week_seq,sun_sales,mon_sales,tue_sales,wed_sales,thu_sales,fri_sales,sat_sales] + BroadcastHashJoin [d_week_seq,d_week_seq] + HashAggregate [d_week_seq,sum,sum,sum,sum,sum,sum,sum] [sum(UnscaledValue(CASE WHEN (d_day_name = Sunday) THEN sales_price ELSE null END)),sum(UnscaledValue(CASE WHEN (d_day_name = Monday) THEN sales_price ELSE null END)),sum(UnscaledValue(CASE WHEN (d_day_name = Tuesday) THEN sales_price ELSE null END)),sum(UnscaledValue(CASE WHEN (d_day_name = Wednesday) THEN sales_price ELSE null END)),sum(UnscaledValue(CASE WHEN (d_day_name = Thursday) THEN sales_price ELSE null END)),sum(UnscaledValue(CASE WHEN (d_day_name = Friday) THEN sales_price ELSE null END)),sum(UnscaledValue(CASE WHEN (d_day_name = Saturday) THEN sales_price ELSE null END)),sun_sales,mon_sales,tue_sales,wed_sales,thu_sales,fri_sales,sat_sales,sum,sum,sum,sum,sum,sum,sum] InputAdapter - Exchange [d_week_seq1] #2 - WholeStageCodegen (6) - Project [d_week_seq,sun_sales,mon_sales,tue_sales,wed_sales,thu_sales,fri_sales,sat_sales] - BroadcastHashJoin [d_week_seq,d_week_seq] - HashAggregate [d_week_seq,sum,sum,sum,sum,sum,sum,sum] [sum(UnscaledValue(CASE WHEN (d_day_name = Sunday) THEN sales_price ELSE null END)),sum(UnscaledValue(CASE WHEN (d_day_name = Monday) THEN sales_price ELSE null END)),sum(UnscaledValue(CASE WHEN (d_day_name = Tuesday) THEN sales_price ELSE null END)),sum(UnscaledValue(CASE WHEN (d_day_name = Wednesday) THEN sales_price ELSE null END)),sum(UnscaledValue(CASE WHEN (d_day_name = Thursday) THEN sales_price ELSE null END)),sum(UnscaledValue(CASE WHEN (d_day_name = Friday) THEN sales_price ELSE null END)),sum(UnscaledValue(CASE WHEN (d_day_name = Saturday) THEN sales_price ELSE null END)),sun_sales,mon_sales,tue_sales,wed_sales,thu_sales,fri_sales,sat_sales,sum,sum,sum,sum,sum,sum,sum] + Exchange [d_week_seq] #2 + WholeStageCodegen (4) + HashAggregate [d_week_seq,d_day_name,sales_price] [sum,sum,sum,sum,sum,sum,sum,sum,sum,sum,sum,sum,sum,sum] + Project [sales_price,d_week_seq,d_day_name] + BroadcastHashJoin [sold_date_sk,d_date_sk] InputAdapter - Exchange [d_week_seq] #3 - WholeStageCodegen (4) - HashAggregate [d_week_seq,d_day_name,sales_price] [sum,sum,sum,sum,sum,sum,sum,sum,sum,sum,sum,sum,sum,sum] - Project [sales_price,d_week_seq,d_day_name] - BroadcastHashJoin [sold_date_sk,d_date_sk] + Union + WholeStageCodegen (1) + Project [ws_sold_date_sk,ws_ext_sales_price] + Filter [ws_sold_date_sk] + ColumnarToRow InputAdapter - Union - WholeStageCodegen (1) - Project [ws_sold_date_sk,ws_ext_sales_price] - Filter [ws_sold_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.web_sales [ws_sold_date_sk,ws_ext_sales_price] - WholeStageCodegen (2) - Project [cs_sold_date_sk,cs_ext_sales_price] - Filter [cs_sold_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.catalog_sales [cs_sold_date_sk,cs_ext_sales_price] + Scan parquet default.web_sales [ws_sold_date_sk,ws_ext_sales_price] + WholeStageCodegen (2) + Project [cs_sold_date_sk,cs_ext_sales_price] + Filter [cs_sold_date_sk] + ColumnarToRow InputAdapter - BroadcastExchange #4 - WholeStageCodegen (3) - Filter [d_date_sk,d_week_seq] - ColumnarToRow - InputAdapter - Scan parquet default.date_dim [d_date_sk,d_week_seq,d_day_name] - InputAdapter - BroadcastExchange #5 - WholeStageCodegen (5) - Project [d_week_seq] - Filter [d_year,d_week_seq] - ColumnarToRow - InputAdapter - Scan parquet default.date_dim [d_week_seq,d_year] - InputAdapter - WholeStageCodegen (14) - Sort [d_week_seq2] - InputAdapter - Exchange [d_week_seq2] #6 - WholeStageCodegen (13) - Project [d_week_seq,sun_sales,mon_sales,tue_sales,wed_sales,thu_sales,fri_sales,sat_sales] - BroadcastHashJoin [d_week_seq,d_week_seq] - HashAggregate [d_week_seq,sum,sum,sum,sum,sum,sum,sum] [sum(UnscaledValue(CASE WHEN (d_day_name = Sunday) THEN sales_price ELSE null END)),sum(UnscaledValue(CASE WHEN (d_day_name = Monday) THEN sales_price ELSE null END)),sum(UnscaledValue(CASE WHEN (d_day_name = Tuesday) THEN sales_price ELSE null END)),sum(UnscaledValue(CASE WHEN (d_day_name = Wednesday) THEN sales_price ELSE null END)),sum(UnscaledValue(CASE WHEN (d_day_name = Thursday) THEN sales_price ELSE null END)),sum(UnscaledValue(CASE WHEN (d_day_name = Friday) THEN sales_price ELSE null END)),sum(UnscaledValue(CASE WHEN (d_day_name = Saturday) THEN sales_price ELSE null END)),sun_sales,mon_sales,tue_sales,wed_sales,thu_sales,fri_sales,sat_sales,sum,sum,sum,sum,sum,sum,sum] + Scan parquet default.catalog_sales [cs_sold_date_sk,cs_ext_sales_price] InputAdapter - ReusedExchange [d_week_seq,sum,sum,sum,sum,sum,sum,sum] #3 - InputAdapter - BroadcastExchange #7 - WholeStageCodegen (12) - Project [d_week_seq] - Filter [d_year,d_week_seq] + BroadcastExchange #3 + WholeStageCodegen (3) + Filter [d_date_sk,d_week_seq] ColumnarToRow InputAdapter - Scan parquet default.date_dim [d_week_seq,d_year] + Scan parquet default.date_dim [d_date_sk,d_week_seq,d_day_name] + InputAdapter + BroadcastExchange #4 + WholeStageCodegen (5) + Project [d_week_seq] + Filter [d_year,d_week_seq] + ColumnarToRow + InputAdapter + Scan parquet default.date_dim [d_week_seq,d_year] + InputAdapter + BroadcastExchange #5 + WholeStageCodegen (11) + Project [d_week_seq,sun_sales,mon_sales,tue_sales,wed_sales,thu_sales,fri_sales,sat_sales] + BroadcastHashJoin [d_week_seq,d_week_seq] + HashAggregate [d_week_seq,sum,sum,sum,sum,sum,sum,sum] [sum(UnscaledValue(CASE WHEN (d_day_name = Sunday) THEN sales_price ELSE null END)),sum(UnscaledValue(CASE WHEN (d_day_name = Monday) THEN sales_price ELSE null END)),sum(UnscaledValue(CASE WHEN (d_day_name = Tuesday) THEN sales_price ELSE null END)),sum(UnscaledValue(CASE WHEN (d_day_name = Wednesday) THEN sales_price ELSE null END)),sum(UnscaledValue(CASE WHEN (d_day_name = Thursday) THEN sales_price ELSE null END)),sum(UnscaledValue(CASE WHEN (d_day_name = Friday) THEN sales_price ELSE null END)),sum(UnscaledValue(CASE WHEN (d_day_name = Saturday) THEN sales_price ELSE null END)),sun_sales,mon_sales,tue_sales,wed_sales,thu_sales,fri_sales,sat_sales,sum,sum,sum,sum,sum,sum,sum] + InputAdapter + ReusedExchange [d_week_seq,sum,sum,sum,sum,sum,sum,sum] #2 + InputAdapter + BroadcastExchange #6 + WholeStageCodegen (10) + Project [d_week_seq] + Filter [d_year,d_week_seq] + ColumnarToRow + InputAdapter + Scan parquet default.date_dim [d_week_seq,d_year] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q21.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q21.sf100/explain.txt index 9de369f611d0e..094e7aac5cbbd 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q21.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q21.sf100/explain.txt @@ -130,24 +130,24 @@ Input [6]: [inv_warehouse_sk#3, inv_quantity_on_hand#4, i_item_id#6, d_date#10, (23) HashAggregate [codegen id : 4] Input [4]: [inv_quantity_on_hand#4, w_warehouse_name#13, i_item_id#6, d_date#10] Keys [2]: [w_warehouse_name#13, i_item_id#6] -Functions [2]: [partial_sum(cast(CASE WHEN (d_date#10 < 11027) THEN inv_quantity_on_hand#4 ELSE 0 END as bigint)), partial_sum(cast(CASE WHEN (d_date#10 >= 11027) THEN inv_quantity_on_hand#4 ELSE 0 END as bigint))] +Functions [2]: [partial_sum(CASE WHEN (d_date#10 < 11027) THEN cast(inv_quantity_on_hand#4 as bigint) ELSE 0 END), partial_sum(CASE WHEN (d_date#10 >= 11027) THEN cast(inv_quantity_on_hand#4 as bigint) ELSE 0 END)] Aggregate Attributes [2]: [sum#15, sum#16] Results [4]: [w_warehouse_name#13, i_item_id#6, sum#17, sum#18] (24) Exchange Input [4]: [w_warehouse_name#13, i_item_id#6, sum#17, sum#18] -Arguments: hashpartitioning(w_warehouse_name#13, i_item_id#6, 5), true, [id=#19] +Arguments: hashpartitioning(w_warehouse_name#13, i_item_id#6, 5), ENSURE_REQUIREMENTS, [id=#19] (25) HashAggregate [codegen id : 5] Input [4]: [w_warehouse_name#13, i_item_id#6, sum#17, sum#18] Keys [2]: [w_warehouse_name#13, i_item_id#6] -Functions [2]: [sum(cast(CASE WHEN (d_date#10 < 11027) THEN inv_quantity_on_hand#4 ELSE 0 END as bigint)), sum(cast(CASE WHEN (d_date#10 >= 11027) THEN inv_quantity_on_hand#4 ELSE 0 END as bigint))] -Aggregate Attributes [2]: [sum(cast(CASE WHEN (d_date#10 < 11027) THEN inv_quantity_on_hand#4 ELSE 0 END as bigint))#20, sum(cast(CASE WHEN (d_date#10 >= 11027) THEN inv_quantity_on_hand#4 ELSE 0 END as bigint))#21] -Results [4]: [w_warehouse_name#13, i_item_id#6, sum(cast(CASE WHEN (d_date#10 < 11027) THEN inv_quantity_on_hand#4 ELSE 0 END as bigint))#20 AS inv_before#22, sum(cast(CASE WHEN (d_date#10 >= 11027) THEN inv_quantity_on_hand#4 ELSE 0 END as bigint))#21 AS inv_after#23] +Functions [2]: [sum(CASE WHEN (d_date#10 < 11027) THEN cast(inv_quantity_on_hand#4 as bigint) ELSE 0 END), sum(CASE WHEN (d_date#10 >= 11027) THEN cast(inv_quantity_on_hand#4 as bigint) ELSE 0 END)] +Aggregate Attributes [2]: [sum(CASE WHEN (d_date#10 < 11027) THEN cast(inv_quantity_on_hand#4 as bigint) ELSE 0 END)#20, sum(CASE WHEN (d_date#10 >= 11027) THEN cast(inv_quantity_on_hand#4 as bigint) ELSE 0 END)#21] +Results [4]: [w_warehouse_name#13, i_item_id#6, sum(CASE WHEN (d_date#10 < 11027) THEN cast(inv_quantity_on_hand#4 as bigint) ELSE 0 END)#20 AS inv_before#22, sum(CASE WHEN (d_date#10 >= 11027) THEN cast(inv_quantity_on_hand#4 as bigint) ELSE 0 END)#21 AS inv_after#23] (26) Filter [codegen id : 5] Input [4]: [w_warehouse_name#13, i_item_id#6, inv_before#22, inv_after#23] -Condition : ((CASE WHEN (inv_before#22 > 0) THEN (cast(inv_after#23 as double) / cast(inv_before#22 as double)) ELSE null END >= 0.666667) AND (CASE WHEN (inv_before#22 > 0) THEN (cast(inv_after#23 as double) / cast(inv_before#22 as double)) ELSE null END <= 1.5)) +Condition : (CASE WHEN (inv_before#22 > 0) THEN ((cast(inv_after#23 as double) / cast(inv_before#22 as double)) >= 0.666667) ELSE false END AND CASE WHEN (inv_before#22 > 0) THEN ((cast(inv_after#23 as double) / cast(inv_before#22 as double)) <= 1.5) ELSE false END) (27) TakeOrderedAndProject Input [4]: [w_warehouse_name#13, i_item_id#6, inv_before#22, inv_after#23] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q21.sf100/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q21.sf100/simplified.txt index 0ee47d05af65b..3da4f967ccbd3 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q21.sf100/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q21.sf100/simplified.txt @@ -1,7 +1,7 @@ TakeOrderedAndProject [w_warehouse_name,i_item_id,inv_before,inv_after] WholeStageCodegen (5) Filter [inv_before,inv_after] - HashAggregate [w_warehouse_name,i_item_id,sum,sum] [sum(cast(CASE WHEN (d_date < 11027) THEN inv_quantity_on_hand ELSE 0 END as bigint)),sum(cast(CASE WHEN (d_date >= 11027) THEN inv_quantity_on_hand ELSE 0 END as bigint)),inv_before,inv_after,sum,sum] + HashAggregate [w_warehouse_name,i_item_id,sum,sum] [sum(CASE WHEN (d_date < 11027) THEN cast(inv_quantity_on_hand as bigint) ELSE 0 END),sum(CASE WHEN (d_date >= 11027) THEN cast(inv_quantity_on_hand as bigint) ELSE 0 END),inv_before,inv_after,sum,sum] InputAdapter Exchange [w_warehouse_name,i_item_id] #1 WholeStageCodegen (4) diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q21/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q21/explain.txt index 788d1affde1b8..8edf52683fe7d 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q21/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q21/explain.txt @@ -130,24 +130,24 @@ Input [6]: [inv_date_sk#1, inv_quantity_on_hand#4, w_warehouse_name#6, i_item_id (23) HashAggregate [codegen id : 4] Input [4]: [inv_quantity_on_hand#4, w_warehouse_name#6, i_item_id#9, d_date#13] Keys [2]: [w_warehouse_name#6, i_item_id#9] -Functions [2]: [partial_sum(cast(CASE WHEN (d_date#13 < 11027) THEN inv_quantity_on_hand#4 ELSE 0 END as bigint)), partial_sum(cast(CASE WHEN (d_date#13 >= 11027) THEN inv_quantity_on_hand#4 ELSE 0 END as bigint))] +Functions [2]: [partial_sum(CASE WHEN (d_date#13 < 11027) THEN cast(inv_quantity_on_hand#4 as bigint) ELSE 0 END), partial_sum(CASE WHEN (d_date#13 >= 11027) THEN cast(inv_quantity_on_hand#4 as bigint) ELSE 0 END)] Aggregate Attributes [2]: [sum#15, sum#16] Results [4]: [w_warehouse_name#6, i_item_id#9, sum#17, sum#18] (24) Exchange Input [4]: [w_warehouse_name#6, i_item_id#9, sum#17, sum#18] -Arguments: hashpartitioning(w_warehouse_name#6, i_item_id#9, 5), true, [id=#19] +Arguments: hashpartitioning(w_warehouse_name#6, i_item_id#9, 5), ENSURE_REQUIREMENTS, [id=#19] (25) HashAggregate [codegen id : 5] Input [4]: [w_warehouse_name#6, i_item_id#9, sum#17, sum#18] Keys [2]: [w_warehouse_name#6, i_item_id#9] -Functions [2]: [sum(cast(CASE WHEN (d_date#13 < 11027) THEN inv_quantity_on_hand#4 ELSE 0 END as bigint)), sum(cast(CASE WHEN (d_date#13 >= 11027) THEN inv_quantity_on_hand#4 ELSE 0 END as bigint))] -Aggregate Attributes [2]: [sum(cast(CASE WHEN (d_date#13 < 11027) THEN inv_quantity_on_hand#4 ELSE 0 END as bigint))#20, sum(cast(CASE WHEN (d_date#13 >= 11027) THEN inv_quantity_on_hand#4 ELSE 0 END as bigint))#21] -Results [4]: [w_warehouse_name#6, i_item_id#9, sum(cast(CASE WHEN (d_date#13 < 11027) THEN inv_quantity_on_hand#4 ELSE 0 END as bigint))#20 AS inv_before#22, sum(cast(CASE WHEN (d_date#13 >= 11027) THEN inv_quantity_on_hand#4 ELSE 0 END as bigint))#21 AS inv_after#23] +Functions [2]: [sum(CASE WHEN (d_date#13 < 11027) THEN cast(inv_quantity_on_hand#4 as bigint) ELSE 0 END), sum(CASE WHEN (d_date#13 >= 11027) THEN cast(inv_quantity_on_hand#4 as bigint) ELSE 0 END)] +Aggregate Attributes [2]: [sum(CASE WHEN (d_date#13 < 11027) THEN cast(inv_quantity_on_hand#4 as bigint) ELSE 0 END)#20, sum(CASE WHEN (d_date#13 >= 11027) THEN cast(inv_quantity_on_hand#4 as bigint) ELSE 0 END)#21] +Results [4]: [w_warehouse_name#6, i_item_id#9, sum(CASE WHEN (d_date#13 < 11027) THEN cast(inv_quantity_on_hand#4 as bigint) ELSE 0 END)#20 AS inv_before#22, sum(CASE WHEN (d_date#13 >= 11027) THEN cast(inv_quantity_on_hand#4 as bigint) ELSE 0 END)#21 AS inv_after#23] (26) Filter [codegen id : 5] Input [4]: [w_warehouse_name#6, i_item_id#9, inv_before#22, inv_after#23] -Condition : ((CASE WHEN (inv_before#22 > 0) THEN (cast(inv_after#23 as double) / cast(inv_before#22 as double)) ELSE null END >= 0.666667) AND (CASE WHEN (inv_before#22 > 0) THEN (cast(inv_after#23 as double) / cast(inv_before#22 as double)) ELSE null END <= 1.5)) +Condition : (CASE WHEN (inv_before#22 > 0) THEN ((cast(inv_after#23 as double) / cast(inv_before#22 as double)) >= 0.666667) ELSE false END AND CASE WHEN (inv_before#22 > 0) THEN ((cast(inv_after#23 as double) / cast(inv_before#22 as double)) <= 1.5) ELSE false END) (27) TakeOrderedAndProject Input [4]: [w_warehouse_name#6, i_item_id#9, inv_before#22, inv_after#23] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q21/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q21/simplified.txt index 9b5483bd7191b..b9729a8c80968 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q21/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q21/simplified.txt @@ -1,7 +1,7 @@ TakeOrderedAndProject [w_warehouse_name,i_item_id,inv_before,inv_after] WholeStageCodegen (5) Filter [inv_before,inv_after] - HashAggregate [w_warehouse_name,i_item_id,sum,sum] [sum(cast(CASE WHEN (d_date < 11027) THEN inv_quantity_on_hand ELSE 0 END as bigint)),sum(cast(CASE WHEN (d_date >= 11027) THEN inv_quantity_on_hand ELSE 0 END as bigint)),inv_before,inv_after,sum,sum] + HashAggregate [w_warehouse_name,i_item_id,sum,sum] [sum(CASE WHEN (d_date < 11027) THEN cast(inv_quantity_on_hand as bigint) ELSE 0 END),sum(CASE WHEN (d_date >= 11027) THEN cast(inv_quantity_on_hand as bigint) ELSE 0 END),inv_before,inv_after,sum,sum] InputAdapter Exchange [w_warehouse_name,i_item_id] #1 WholeStageCodegen (4) diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q23a.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q23a.sf100/explain.txt index c5988072f758d..85f71b6cd9388 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q23a.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q23a.sf100/explain.txt @@ -1,96 +1,103 @@ == Physical Plan == -CollectLimit (92) -+- * HashAggregate (91) - +- Exchange (90) - +- * HashAggregate (89) - +- Union (88) - :- * Project (60) - : +- * BroadcastHashJoin Inner BuildRight (59) - : :- * Project (53) - : : +- SortMergeJoin LeftSemi (52) - : : :- * Sort (34) - : : : +- Exchange (33) - : : : +- * Project (32) - : : : +- SortMergeJoin LeftSemi (31) - : : : :- * Sort (5) - : : : : +- Exchange (4) - : : : : +- * Filter (3) - : : : : +- * ColumnarToRow (2) - : : : : +- Scan parquet default.catalog_sales (1) - : : : +- * Sort (30) - : : : +- Exchange (29) - : : : +- * Project (28) - : : : +- * Filter (27) - : : : +- * HashAggregate (26) - : : : +- * HashAggregate (25) - : : : +- * Project (24) - : : : +- * SortMergeJoin Inner (23) - : : : :- * Sort (17) - : : : : +- Exchange (16) - : : : : +- * Project (15) - : : : : +- * BroadcastHashJoin Inner BuildRight (14) - : : : : :- * Filter (8) - : : : : : +- * ColumnarToRow (7) - : : : : : +- Scan parquet default.store_sales (6) - : : : : +- BroadcastExchange (13) - : : : : +- * Project (12) - : : : : +- * Filter (11) - : : : : +- * ColumnarToRow (10) - : : : : +- Scan parquet default.date_dim (9) - : : : +- * Sort (22) - : : : +- Exchange (21) - : : : +- * Filter (20) - : : : +- * ColumnarToRow (19) - : : : +- Scan parquet default.item (18) - : : +- * Sort (51) - : : +- * Project (50) - : : +- * Filter (49) - : : +- * HashAggregate (48) - : : +- * HashAggregate (47) - : : +- * Project (46) - : : +- * SortMergeJoin Inner (45) - : : :- * Sort (39) - : : : +- Exchange (38) - : : : +- * Filter (37) - : : : +- * ColumnarToRow (36) - : : : +- Scan parquet default.store_sales (35) - : : +- * Sort (44) - : : +- Exchange (43) - : : +- * Filter (42) - : : +- * ColumnarToRow (41) - : : +- Scan parquet default.customer (40) - : +- BroadcastExchange (58) - : +- * Project (57) - : +- * Filter (56) - : +- * ColumnarToRow (55) - : +- Scan parquet default.date_dim (54) - +- * Project (87) - +- * BroadcastHashJoin Inner BuildRight (86) - :- * Project (84) - : +- SortMergeJoin LeftSemi (83) - : :- * Sort (71) - : : +- Exchange (70) - : : +- * Project (69) - : : +- SortMergeJoin LeftSemi (68) - : : :- * Sort (65) - : : : +- Exchange (64) - : : : +- * Filter (63) - : : : +- * ColumnarToRow (62) - : : : +- Scan parquet default.web_sales (61) - : : +- * Sort (67) - : : +- ReusedExchange (66) - : +- * Sort (82) - : +- * Project (81) - : +- * Filter (80) - : +- * HashAggregate (79) - : +- * HashAggregate (78) - : +- * Project (77) - : +- * SortMergeJoin Inner (76) - : :- * Sort (73) - : : +- ReusedExchange (72) - : +- * Sort (75) - : +- ReusedExchange (74) - +- ReusedExchange (85) +* HashAggregate (99) ++- Exchange (98) + +- * HashAggregate (97) + +- Union (96) + :- * Project (59) + : +- * BroadcastHashJoin Inner BuildRight (58) + : :- * Project (52) + : : +- SortMergeJoin LeftSemi (51) + : : :- * Sort (33) + : : : +- Exchange (32) + : : : +- * Project (31) + : : : +- SortMergeJoin LeftSemi (30) + : : : :- * Sort (5) + : : : : +- Exchange (4) + : : : : +- * Filter (3) + : : : : +- * ColumnarToRow (2) + : : : : +- Scan parquet default.catalog_sales (1) + : : : +- * Sort (29) + : : : +- * Project (28) + : : : +- * Filter (27) + : : : +- * HashAggregate (26) + : : : +- * HashAggregate (25) + : : : +- * Project (24) + : : : +- * SortMergeJoin Inner (23) + : : : :- * Sort (17) + : : : : +- Exchange (16) + : : : : +- * Project (15) + : : : : +- * BroadcastHashJoin Inner BuildRight (14) + : : : : :- * Filter (8) + : : : : : +- * ColumnarToRow (7) + : : : : : +- Scan parquet default.store_sales (6) + : : : : +- BroadcastExchange (13) + : : : : +- * Project (12) + : : : : +- * Filter (11) + : : : : +- * ColumnarToRow (10) + : : : : +- Scan parquet default.date_dim (9) + : : : +- * Sort (22) + : : : +- Exchange (21) + : : : +- * Filter (20) + : : : +- * ColumnarToRow (19) + : : : +- Scan parquet default.item (18) + : : +- * Sort (50) + : : +- * Project (49) + : : +- * Filter (48) + : : +- * HashAggregate (47) + : : +- * HashAggregate (46) + : : +- * Project (45) + : : +- * SortMergeJoin Inner (44) + : : :- * Sort (38) + : : : +- Exchange (37) + : : : +- * Filter (36) + : : : +- * ColumnarToRow (35) + : : : +- Scan parquet default.store_sales (34) + : : +- * Sort (43) + : : +- Exchange (42) + : : +- * Filter (41) + : : +- * ColumnarToRow (40) + : : +- Scan parquet default.customer (39) + : +- BroadcastExchange (57) + : +- * Project (56) + : +- * Filter (55) + : +- * ColumnarToRow (54) + : +- Scan parquet default.date_dim (53) + +- * Project (95) + +- * BroadcastHashJoin Inner BuildRight (94) + :- * Project (92) + : +- SortMergeJoin LeftSemi (91) + : :- * Sort (79) + : : +- Exchange (78) + : : +- * Project (77) + : : +- SortMergeJoin LeftSemi (76) + : : :- * Sort (64) + : : : +- Exchange (63) + : : : +- * Filter (62) + : : : +- * ColumnarToRow (61) + : : : +- Scan parquet default.web_sales (60) + : : +- * Sort (75) + : : +- * Project (74) + : : +- * Filter (73) + : : +- * HashAggregate (72) + : : +- * HashAggregate (71) + : : +- * Project (70) + : : +- * SortMergeJoin Inner (69) + : : :- * Sort (66) + : : : +- ReusedExchange (65) + : : +- * Sort (68) + : : +- ReusedExchange (67) + : +- * Sort (90) + : +- * Project (89) + : +- * Filter (88) + : +- * HashAggregate (87) + : +- * HashAggregate (86) + : +- * Project (85) + : +- * SortMergeJoin Inner (84) + : :- * Sort (81) + : : +- ReusedExchange (80) + : +- * Sort (83) + : +- ReusedExchange (82) + +- ReusedExchange (93) (1) Scan parquet default.catalog_sales @@ -221,435 +228,469 @@ Condition : (count(1)#22 > 4) Output [1]: [item_sk#21] Input [2]: [item_sk#21, count(1)#22] -(29) Exchange -Input [1]: [item_sk#21] -Arguments: hashpartitioning(item_sk#21, 5), true, [id=#23] - -(30) Sort [codegen id : 9] +(29) Sort [codegen id : 8] Input [1]: [item_sk#21] Arguments: [item_sk#21 ASC NULLS FIRST], false, 0 -(31) SortMergeJoin +(30) SortMergeJoin Left keys [1]: [cs_item_sk#3] Right keys [1]: [item_sk#21] Join condition: None -(32) Project [codegen id : 10] +(31) Project [codegen id : 9] Output [4]: [cs_sold_date_sk#1, cs_bill_customer_sk#2, cs_quantity#4, cs_list_price#5] Input [5]: [cs_sold_date_sk#1, cs_bill_customer_sk#2, cs_item_sk#3, cs_quantity#4, cs_list_price#5] -(33) Exchange +(32) Exchange Input [4]: [cs_sold_date_sk#1, cs_bill_customer_sk#2, cs_quantity#4, cs_list_price#5] -Arguments: hashpartitioning(cs_bill_customer_sk#2, 5), true, [id=#24] +Arguments: hashpartitioning(cs_bill_customer_sk#2, 5), true, [id=#23] -(34) Sort [codegen id : 11] +(33) Sort [codegen id : 10] Input [4]: [cs_sold_date_sk#1, cs_bill_customer_sk#2, cs_quantity#4, cs_list_price#5] Arguments: [cs_bill_customer_sk#2 ASC NULLS FIRST], false, 0 -(35) Scan parquet default.store_sales -Output [3]: [ss_customer_sk#25, ss_quantity#26, ss_sales_price#27] +(34) Scan parquet default.store_sales +Output [3]: [ss_customer_sk#24, ss_quantity#25, ss_sales_price#26] Batched: true Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_customer_sk)] ReadSchema: struct -(36) ColumnarToRow [codegen id : 12] -Input [3]: [ss_customer_sk#25, ss_quantity#26, ss_sales_price#27] +(35) ColumnarToRow [codegen id : 11] +Input [3]: [ss_customer_sk#24, ss_quantity#25, ss_sales_price#26] -(37) Filter [codegen id : 12] -Input [3]: [ss_customer_sk#25, ss_quantity#26, ss_sales_price#27] -Condition : isnotnull(ss_customer_sk#25) +(36) Filter [codegen id : 11] +Input [3]: [ss_customer_sk#24, ss_quantity#25, ss_sales_price#26] +Condition : isnotnull(ss_customer_sk#24) -(38) Exchange -Input [3]: [ss_customer_sk#25, ss_quantity#26, ss_sales_price#27] -Arguments: hashpartitioning(ss_customer_sk#25, 5), true, [id=#28] +(37) Exchange +Input [3]: [ss_customer_sk#24, ss_quantity#25, ss_sales_price#26] +Arguments: hashpartitioning(ss_customer_sk#24, 5), true, [id=#27] -(39) Sort [codegen id : 13] -Input [3]: [ss_customer_sk#25, ss_quantity#26, ss_sales_price#27] -Arguments: [ss_customer_sk#25 ASC NULLS FIRST], false, 0 +(38) Sort [codegen id : 12] +Input [3]: [ss_customer_sk#24, ss_quantity#25, ss_sales_price#26] +Arguments: [ss_customer_sk#24 ASC NULLS FIRST], false, 0 -(40) Scan parquet default.customer -Output [1]: [c_customer_sk#29] +(39) Scan parquet default.customer +Output [1]: [c_customer_sk#28] Batched: true Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk)] ReadSchema: struct -(41) ColumnarToRow [codegen id : 14] -Input [1]: [c_customer_sk#29] +(40) ColumnarToRow [codegen id : 13] +Input [1]: [c_customer_sk#28] -(42) Filter [codegen id : 14] -Input [1]: [c_customer_sk#29] -Condition : isnotnull(c_customer_sk#29) +(41) Filter [codegen id : 13] +Input [1]: [c_customer_sk#28] +Condition : isnotnull(c_customer_sk#28) -(43) Exchange -Input [1]: [c_customer_sk#29] -Arguments: hashpartitioning(c_customer_sk#29, 5), true, [id=#30] +(42) Exchange +Input [1]: [c_customer_sk#28] +Arguments: hashpartitioning(c_customer_sk#28, 5), true, [id=#29] -(44) Sort [codegen id : 15] -Input [1]: [c_customer_sk#29] -Arguments: [c_customer_sk#29 ASC NULLS FIRST], false, 0 +(43) Sort [codegen id : 14] +Input [1]: [c_customer_sk#28] +Arguments: [c_customer_sk#28 ASC NULLS FIRST], false, 0 -(45) SortMergeJoin [codegen id : 16] -Left keys [1]: [ss_customer_sk#25] -Right keys [1]: [c_customer_sk#29] +(44) SortMergeJoin [codegen id : 15] +Left keys [1]: [ss_customer_sk#24] +Right keys [1]: [c_customer_sk#28] Join condition: None -(46) Project [codegen id : 16] -Output [3]: [ss_quantity#26, ss_sales_price#27, c_customer_sk#29] -Input [4]: [ss_customer_sk#25, ss_quantity#26, ss_sales_price#27, c_customer_sk#29] - -(47) HashAggregate [codegen id : 16] -Input [3]: [ss_quantity#26, ss_sales_price#27, c_customer_sk#29] -Keys [1]: [c_customer_sk#29] -Functions [1]: [partial_sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#26 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#27 as decimal(12,2)))), DecimalType(18,2), true))] -Aggregate Attributes [2]: [sum#31, isEmpty#32] -Results [3]: [c_customer_sk#29, sum#33, isEmpty#34] - -(48) HashAggregate [codegen id : 16] -Input [3]: [c_customer_sk#29, sum#33, isEmpty#34] -Keys [1]: [c_customer_sk#29] -Functions [1]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#26 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#27 as decimal(12,2)))), DecimalType(18,2), true))] -Aggregate Attributes [1]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#26 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#27 as decimal(12,2)))), DecimalType(18,2), true))#35] -Results [2]: [c_customer_sk#29, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#26 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#27 as decimal(12,2)))), DecimalType(18,2), true))#35 AS sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#26 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#27 as decimal(12,2)))), DecimalType(18,2), true))#36] - -(49) Filter [codegen id : 16] -Input [2]: [c_customer_sk#29, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#26 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#27 as decimal(12,2)))), DecimalType(18,2), true))#36] -Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#26 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#27 as decimal(12,2)))), DecimalType(18,2), true))#36) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#26 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#27 as decimal(12,2)))), DecimalType(18,2), true))#36 as decimal(38,8)) > CheckOverflow((0.500000 * promote_precision(cast(Subquery scalar-subquery#37, [id=#38] as decimal(32,6)))), DecimalType(38,8), true))) - -(50) Project [codegen id : 16] -Output [1]: [c_customer_sk#29] -Input [2]: [c_customer_sk#29, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#26 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#27 as decimal(12,2)))), DecimalType(18,2), true))#36] - -(51) Sort [codegen id : 16] -Input [1]: [c_customer_sk#29] -Arguments: [c_customer_sk#29 ASC NULLS FIRST], false, 0 - -(52) SortMergeJoin +(45) Project [codegen id : 15] +Output [3]: [ss_quantity#25, ss_sales_price#26, c_customer_sk#28] +Input [4]: [ss_customer_sk#24, ss_quantity#25, ss_sales_price#26, c_customer_sk#28] + +(46) HashAggregate [codegen id : 15] +Input [3]: [ss_quantity#25, ss_sales_price#26, c_customer_sk#28] +Keys [1]: [c_customer_sk#28] +Functions [1]: [partial_sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#25 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#26 as decimal(12,2)))), DecimalType(18,2), true))] +Aggregate Attributes [2]: [sum#30, isEmpty#31] +Results [3]: [c_customer_sk#28, sum#32, isEmpty#33] + +(47) HashAggregate [codegen id : 15] +Input [3]: [c_customer_sk#28, sum#32, isEmpty#33] +Keys [1]: [c_customer_sk#28] +Functions [1]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#25 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#26 as decimal(12,2)))), DecimalType(18,2), true))] +Aggregate Attributes [1]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#25 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#26 as decimal(12,2)))), DecimalType(18,2), true))#34] +Results [2]: [c_customer_sk#28, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#25 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#26 as decimal(12,2)))), DecimalType(18,2), true))#34 AS sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#25 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#26 as decimal(12,2)))), DecimalType(18,2), true))#35] + +(48) Filter [codegen id : 15] +Input [2]: [c_customer_sk#28, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#25 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#26 as decimal(12,2)))), DecimalType(18,2), true))#35] +Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#25 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#26 as decimal(12,2)))), DecimalType(18,2), true))#35) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#25 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#26 as decimal(12,2)))), DecimalType(18,2), true))#35 as decimal(38,8)) > CheckOverflow((0.500000 * promote_precision(cast(Subquery scalar-subquery#36, [id=#37] as decimal(32,6)))), DecimalType(38,8), true))) + +(49) Project [codegen id : 15] +Output [1]: [c_customer_sk#28] +Input [2]: [c_customer_sk#28, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#25 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#26 as decimal(12,2)))), DecimalType(18,2), true))#35] + +(50) Sort [codegen id : 15] +Input [1]: [c_customer_sk#28] +Arguments: [c_customer_sk#28 ASC NULLS FIRST], false, 0 + +(51) SortMergeJoin Left keys [1]: [cs_bill_customer_sk#2] -Right keys [1]: [c_customer_sk#29] +Right keys [1]: [c_customer_sk#28] Join condition: None -(53) Project [codegen id : 18] +(52) Project [codegen id : 17] Output [3]: [cs_sold_date_sk#1, cs_quantity#4, cs_list_price#5] Input [4]: [cs_sold_date_sk#1, cs_bill_customer_sk#2, cs_quantity#4, cs_list_price#5] -(54) Scan parquet default.date_dim -Output [3]: [d_date_sk#9, d_year#11, d_moy#39] +(53) Scan parquet default.date_dim +Output [3]: [d_date_sk#9, d_year#11, d_moy#38] Batched: true Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), IsNotNull(d_moy), EqualTo(d_year,2000), EqualTo(d_moy,2), IsNotNull(d_date_sk)] ReadSchema: struct -(55) ColumnarToRow [codegen id : 17] -Input [3]: [d_date_sk#9, d_year#11, d_moy#39] +(54) ColumnarToRow [codegen id : 16] +Input [3]: [d_date_sk#9, d_year#11, d_moy#38] -(56) Filter [codegen id : 17] -Input [3]: [d_date_sk#9, d_year#11, d_moy#39] -Condition : ((((isnotnull(d_year#11) AND isnotnull(d_moy#39)) AND (d_year#11 = 2000)) AND (d_moy#39 = 2)) AND isnotnull(d_date_sk#9)) +(55) Filter [codegen id : 16] +Input [3]: [d_date_sk#9, d_year#11, d_moy#38] +Condition : ((((isnotnull(d_year#11) AND isnotnull(d_moy#38)) AND (d_year#11 = 2000)) AND (d_moy#38 = 2)) AND isnotnull(d_date_sk#9)) -(57) Project [codegen id : 17] +(56) Project [codegen id : 16] Output [1]: [d_date_sk#9] -Input [3]: [d_date_sk#9, d_year#11, d_moy#39] +Input [3]: [d_date_sk#9, d_year#11, d_moy#38] -(58) BroadcastExchange +(57) BroadcastExchange Input [1]: [d_date_sk#9] -Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#40] +Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#39] -(59) BroadcastHashJoin [codegen id : 18] +(58) BroadcastHashJoin [codegen id : 17] Left keys [1]: [cs_sold_date_sk#1] Right keys [1]: [d_date_sk#9] Join condition: None -(60) Project [codegen id : 18] -Output [1]: [CheckOverflow((promote_precision(cast(cast(cs_quantity#4 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#5 as decimal(12,2)))), DecimalType(18,2), true) AS sales#41] +(59) Project [codegen id : 17] +Output [1]: [CheckOverflow((promote_precision(cast(cast(cs_quantity#4 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#5 as decimal(12,2)))), DecimalType(18,2), true) AS sales#40] Input [4]: [cs_sold_date_sk#1, cs_quantity#4, cs_list_price#5, d_date_sk#9] -(61) Scan parquet default.web_sales -Output [5]: [ws_sold_date_sk#42, ws_item_sk#43, ws_bill_customer_sk#44, ws_quantity#45, ws_list_price#46] +(60) Scan parquet default.web_sales +Output [5]: [ws_sold_date_sk#41, ws_item_sk#42, ws_bill_customer_sk#43, ws_quantity#44, ws_list_price#45] Batched: true Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_sold_date_sk)] ReadSchema: struct -(62) ColumnarToRow [codegen id : 19] -Input [5]: [ws_sold_date_sk#42, ws_item_sk#43, ws_bill_customer_sk#44, ws_quantity#45, ws_list_price#46] +(61) ColumnarToRow [codegen id : 18] +Input [5]: [ws_sold_date_sk#41, ws_item_sk#42, ws_bill_customer_sk#43, ws_quantity#44, ws_list_price#45] + +(62) Filter [codegen id : 18] +Input [5]: [ws_sold_date_sk#41, ws_item_sk#42, ws_bill_customer_sk#43, ws_quantity#44, ws_list_price#45] +Condition : isnotnull(ws_sold_date_sk#41) + +(63) Exchange +Input [5]: [ws_sold_date_sk#41, ws_item_sk#42, ws_bill_customer_sk#43, ws_quantity#44, ws_list_price#45] +Arguments: hashpartitioning(ws_item_sk#42, 5), true, [id=#46] + +(64) Sort [codegen id : 19] +Input [5]: [ws_sold_date_sk#41, ws_item_sk#42, ws_bill_customer_sk#43, ws_quantity#44, ws_list_price#45] +Arguments: [ws_item_sk#42 ASC NULLS FIRST], false, 0 + +(65) ReusedExchange [Reuses operator id: 16] +Output [2]: [ss_item_sk#8, d_date#10] + +(66) Sort [codegen id : 22] +Input [2]: [ss_item_sk#8, d_date#10] +Arguments: [ss_item_sk#8 ASC NULLS FIRST], false, 0 + +(67) ReusedExchange [Reuses operator id: 21] +Output [2]: [i_item_sk#14, i_item_desc#15] + +(68) Sort [codegen id : 24] +Input [2]: [i_item_sk#14, i_item_desc#15] +Arguments: [i_item_sk#14 ASC NULLS FIRST], false, 0 + +(69) SortMergeJoin [codegen id : 25] +Left keys [1]: [ss_item_sk#8] +Right keys [1]: [i_item_sk#14] +Join condition: None + +(70) Project [codegen id : 25] +Output [3]: [d_date#10, i_item_sk#14, i_item_desc#15] +Input [4]: [ss_item_sk#8, d_date#10, i_item_sk#14, i_item_desc#15] -(63) Filter [codegen id : 19] -Input [5]: [ws_sold_date_sk#42, ws_item_sk#43, ws_bill_customer_sk#44, ws_quantity#45, ws_list_price#46] -Condition : isnotnull(ws_sold_date_sk#42) +(71) HashAggregate [codegen id : 25] +Input [3]: [d_date#10, i_item_sk#14, i_item_desc#15] +Keys [3]: [substr(i_item_desc#15, 1, 30) AS substr(i_item_desc#15, 1, 30)#47, i_item_sk#14, d_date#10] +Functions [1]: [partial_count(1)] +Aggregate Attributes [1]: [count#48] +Results [4]: [substr(i_item_desc#15, 1, 30)#47, i_item_sk#14, d_date#10, count#49] -(64) Exchange -Input [5]: [ws_sold_date_sk#42, ws_item_sk#43, ws_bill_customer_sk#44, ws_quantity#45, ws_list_price#46] -Arguments: hashpartitioning(ws_item_sk#43, 5), true, [id=#47] +(72) HashAggregate [codegen id : 25] +Input [4]: [substr(i_item_desc#15, 1, 30)#47, i_item_sk#14, d_date#10, count#49] +Keys [3]: [substr(i_item_desc#15, 1, 30)#47, i_item_sk#14, d_date#10] +Functions [1]: [count(1)] +Aggregate Attributes [1]: [count(1)#50] +Results [2]: [i_item_sk#14 AS item_sk#21, count(1)#50 AS count(1)#51] -(65) Sort [codegen id : 20] -Input [5]: [ws_sold_date_sk#42, ws_item_sk#43, ws_bill_customer_sk#44, ws_quantity#45, ws_list_price#46] -Arguments: [ws_item_sk#43 ASC NULLS FIRST], false, 0 +(73) Filter [codegen id : 25] +Input [2]: [item_sk#21, count(1)#51] +Condition : (count(1)#51 > 4) -(66) ReusedExchange [Reuses operator id: 29] +(74) Project [codegen id : 25] Output [1]: [item_sk#21] +Input [2]: [item_sk#21, count(1)#51] -(67) Sort [codegen id : 27] +(75) Sort [codegen id : 25] Input [1]: [item_sk#21] Arguments: [item_sk#21 ASC NULLS FIRST], false, 0 -(68) SortMergeJoin -Left keys [1]: [ws_item_sk#43] +(76) SortMergeJoin +Left keys [1]: [ws_item_sk#42] Right keys [1]: [item_sk#21] Join condition: None -(69) Project [codegen id : 28] -Output [4]: [ws_sold_date_sk#42, ws_bill_customer_sk#44, ws_quantity#45, ws_list_price#46] -Input [5]: [ws_sold_date_sk#42, ws_item_sk#43, ws_bill_customer_sk#44, ws_quantity#45, ws_list_price#46] +(77) Project [codegen id : 26] +Output [4]: [ws_sold_date_sk#41, ws_bill_customer_sk#43, ws_quantity#44, ws_list_price#45] +Input [5]: [ws_sold_date_sk#41, ws_item_sk#42, ws_bill_customer_sk#43, ws_quantity#44, ws_list_price#45] -(70) Exchange -Input [4]: [ws_sold_date_sk#42, ws_bill_customer_sk#44, ws_quantity#45, ws_list_price#46] -Arguments: hashpartitioning(ws_bill_customer_sk#44, 5), true, [id=#48] +(78) Exchange +Input [4]: [ws_sold_date_sk#41, ws_bill_customer_sk#43, ws_quantity#44, ws_list_price#45] +Arguments: hashpartitioning(ws_bill_customer_sk#43, 5), true, [id=#52] -(71) Sort [codegen id : 29] -Input [4]: [ws_sold_date_sk#42, ws_bill_customer_sk#44, ws_quantity#45, ws_list_price#46] -Arguments: [ws_bill_customer_sk#44 ASC NULLS FIRST], false, 0 +(79) Sort [codegen id : 27] +Input [4]: [ws_sold_date_sk#41, ws_bill_customer_sk#43, ws_quantity#44, ws_list_price#45] +Arguments: [ws_bill_customer_sk#43 ASC NULLS FIRST], false, 0 -(72) ReusedExchange [Reuses operator id: 38] -Output [3]: [ss_customer_sk#25, ss_quantity#26, ss_sales_price#27] +(80) ReusedExchange [Reuses operator id: 37] +Output [3]: [ss_customer_sk#24, ss_quantity#25, ss_sales_price#26] -(73) Sort [codegen id : 31] -Input [3]: [ss_customer_sk#25, ss_quantity#26, ss_sales_price#27] -Arguments: [ss_customer_sk#25 ASC NULLS FIRST], false, 0 +(81) Sort [codegen id : 29] +Input [3]: [ss_customer_sk#24, ss_quantity#25, ss_sales_price#26] +Arguments: [ss_customer_sk#24 ASC NULLS FIRST], false, 0 -(74) ReusedExchange [Reuses operator id: 43] -Output [1]: [c_customer_sk#29] +(82) ReusedExchange [Reuses operator id: 42] +Output [1]: [c_customer_sk#28] -(75) Sort [codegen id : 33] -Input [1]: [c_customer_sk#29] -Arguments: [c_customer_sk#29 ASC NULLS FIRST], false, 0 +(83) Sort [codegen id : 31] +Input [1]: [c_customer_sk#28] +Arguments: [c_customer_sk#28 ASC NULLS FIRST], false, 0 -(76) SortMergeJoin [codegen id : 34] -Left keys [1]: [ss_customer_sk#25] -Right keys [1]: [c_customer_sk#29] +(84) SortMergeJoin [codegen id : 32] +Left keys [1]: [ss_customer_sk#24] +Right keys [1]: [c_customer_sk#28] Join condition: None -(77) Project [codegen id : 34] -Output [3]: [ss_quantity#26, ss_sales_price#27, c_customer_sk#29] -Input [4]: [ss_customer_sk#25, ss_quantity#26, ss_sales_price#27, c_customer_sk#29] - -(78) HashAggregate [codegen id : 34] -Input [3]: [ss_quantity#26, ss_sales_price#27, c_customer_sk#29] -Keys [1]: [c_customer_sk#29] -Functions [1]: [partial_sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#26 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#27 as decimal(12,2)))), DecimalType(18,2), true))] -Aggregate Attributes [2]: [sum#49, isEmpty#50] -Results [3]: [c_customer_sk#29, sum#51, isEmpty#52] - -(79) HashAggregate [codegen id : 34] -Input [3]: [c_customer_sk#29, sum#51, isEmpty#52] -Keys [1]: [c_customer_sk#29] -Functions [1]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#26 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#27 as decimal(12,2)))), DecimalType(18,2), true))] -Aggregate Attributes [1]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#26 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#27 as decimal(12,2)))), DecimalType(18,2), true))#53] -Results [2]: [c_customer_sk#29, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#26 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#27 as decimal(12,2)))), DecimalType(18,2), true))#53 AS sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#26 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#27 as decimal(12,2)))), DecimalType(18,2), true))#54] - -(80) Filter [codegen id : 34] -Input [2]: [c_customer_sk#29, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#26 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#27 as decimal(12,2)))), DecimalType(18,2), true))#54] -Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#26 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#27 as decimal(12,2)))), DecimalType(18,2), true))#54) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#26 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#27 as decimal(12,2)))), DecimalType(18,2), true))#54 as decimal(38,8)) > CheckOverflow((0.500000 * promote_precision(cast(ReusedSubquery Subquery scalar-subquery#37, [id=#38] as decimal(32,6)))), DecimalType(38,8), true))) - -(81) Project [codegen id : 34] -Output [1]: [c_customer_sk#29] -Input [2]: [c_customer_sk#29, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#26 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#27 as decimal(12,2)))), DecimalType(18,2), true))#54] - -(82) Sort [codegen id : 34] -Input [1]: [c_customer_sk#29] -Arguments: [c_customer_sk#29 ASC NULLS FIRST], false, 0 - -(83) SortMergeJoin -Left keys [1]: [ws_bill_customer_sk#44] -Right keys [1]: [c_customer_sk#29] +(85) Project [codegen id : 32] +Output [3]: [ss_quantity#25, ss_sales_price#26, c_customer_sk#28] +Input [4]: [ss_customer_sk#24, ss_quantity#25, ss_sales_price#26, c_customer_sk#28] + +(86) HashAggregate [codegen id : 32] +Input [3]: [ss_quantity#25, ss_sales_price#26, c_customer_sk#28] +Keys [1]: [c_customer_sk#28] +Functions [1]: [partial_sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#25 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#26 as decimal(12,2)))), DecimalType(18,2), true))] +Aggregate Attributes [2]: [sum#53, isEmpty#54] +Results [3]: [c_customer_sk#28, sum#55, isEmpty#56] + +(87) HashAggregate [codegen id : 32] +Input [3]: [c_customer_sk#28, sum#55, isEmpty#56] +Keys [1]: [c_customer_sk#28] +Functions [1]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#25 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#26 as decimal(12,2)))), DecimalType(18,2), true))] +Aggregate Attributes [1]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#25 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#26 as decimal(12,2)))), DecimalType(18,2), true))#57] +Results [2]: [c_customer_sk#28, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#25 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#26 as decimal(12,2)))), DecimalType(18,2), true))#57 AS sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#25 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#26 as decimal(12,2)))), DecimalType(18,2), true))#58] + +(88) Filter [codegen id : 32] +Input [2]: [c_customer_sk#28, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#25 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#26 as decimal(12,2)))), DecimalType(18,2), true))#58] +Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#25 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#26 as decimal(12,2)))), DecimalType(18,2), true))#58) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#25 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#26 as decimal(12,2)))), DecimalType(18,2), true))#58 as decimal(38,8)) > CheckOverflow((0.500000 * promote_precision(cast(ReusedSubquery Subquery scalar-subquery#36, [id=#37] as decimal(32,6)))), DecimalType(38,8), true))) + +(89) Project [codegen id : 32] +Output [1]: [c_customer_sk#28] +Input [2]: [c_customer_sk#28, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#25 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#26 as decimal(12,2)))), DecimalType(18,2), true))#58] + +(90) Sort [codegen id : 32] +Input [1]: [c_customer_sk#28] +Arguments: [c_customer_sk#28 ASC NULLS FIRST], false, 0 + +(91) SortMergeJoin +Left keys [1]: [ws_bill_customer_sk#43] +Right keys [1]: [c_customer_sk#28] Join condition: None -(84) Project [codegen id : 36] -Output [3]: [ws_sold_date_sk#42, ws_quantity#45, ws_list_price#46] -Input [4]: [ws_sold_date_sk#42, ws_bill_customer_sk#44, ws_quantity#45, ws_list_price#46] +(92) Project [codegen id : 34] +Output [3]: [ws_sold_date_sk#41, ws_quantity#44, ws_list_price#45] +Input [4]: [ws_sold_date_sk#41, ws_bill_customer_sk#43, ws_quantity#44, ws_list_price#45] -(85) ReusedExchange [Reuses operator id: 58] +(93) ReusedExchange [Reuses operator id: 57] Output [1]: [d_date_sk#9] -(86) BroadcastHashJoin [codegen id : 36] -Left keys [1]: [ws_sold_date_sk#42] +(94) BroadcastHashJoin [codegen id : 34] +Left keys [1]: [ws_sold_date_sk#41] Right keys [1]: [d_date_sk#9] Join condition: None -(87) Project [codegen id : 36] -Output [1]: [CheckOverflow((promote_precision(cast(cast(ws_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#46 as decimal(12,2)))), DecimalType(18,2), true) AS sales#55] -Input [4]: [ws_sold_date_sk#42, ws_quantity#45, ws_list_price#46, d_date_sk#9] +(95) Project [codegen id : 34] +Output [1]: [CheckOverflow((promote_precision(cast(cast(ws_quantity#44 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#45 as decimal(12,2)))), DecimalType(18,2), true) AS sales#59] +Input [4]: [ws_sold_date_sk#41, ws_quantity#44, ws_list_price#45, d_date_sk#9] -(88) Union +(96) Union -(89) HashAggregate [codegen id : 37] -Input [1]: [sales#41] +(97) HashAggregate [codegen id : 35] +Input [1]: [sales#40] Keys: [] -Functions [1]: [partial_sum(sales#41)] -Aggregate Attributes [2]: [sum#56, isEmpty#57] -Results [2]: [sum#58, isEmpty#59] +Functions [1]: [partial_sum(sales#40)] +Aggregate Attributes [2]: [sum#60, isEmpty#61] +Results [2]: [sum#62, isEmpty#63] -(90) Exchange -Input [2]: [sum#58, isEmpty#59] -Arguments: SinglePartition, true, [id=#60] +(98) Exchange +Input [2]: [sum#62, isEmpty#63] +Arguments: SinglePartition, true, [id=#64] -(91) HashAggregate [codegen id : 38] -Input [2]: [sum#58, isEmpty#59] +(99) HashAggregate [codegen id : 36] +Input [2]: [sum#62, isEmpty#63] Keys: [] -Functions [1]: [sum(sales#41)] -Aggregate Attributes [1]: [sum(sales#41)#61] -Results [1]: [sum(sales#41)#61 AS sum(sales)#62] - -(92) CollectLimit -Input [1]: [sum(sales)#62] -Arguments: 100 +Functions [1]: [sum(sales#40)] +Aggregate Attributes [1]: [sum(sales#40)#65] +Results [1]: [sum(sales#40)#65 AS sum(sales)#66] ===== Subqueries ===== -Subquery:1 Hosting operator id = 49 Hosting Expression = Subquery scalar-subquery#37, [id=#38] -* HashAggregate (116) -+- Exchange (115) - +- * HashAggregate (114) - +- * HashAggregate (113) - +- * HashAggregate (112) - +- * Project (111) - +- * SortMergeJoin Inner (110) - :- * Sort (104) - : +- Exchange (103) - : +- * Project (102) - : +- * BroadcastHashJoin Inner BuildRight (101) - : :- * Filter (95) - : : +- * ColumnarToRow (94) - : : +- Scan parquet default.store_sales (93) - : +- BroadcastExchange (100) - : +- * Project (99) - : +- * Filter (98) - : +- * ColumnarToRow (97) - : +- Scan parquet default.date_dim (96) - +- * Sort (109) - +- Exchange (108) - +- * Filter (107) - +- * ColumnarToRow (106) - +- Scan parquet default.customer (105) - - -(93) Scan parquet default.store_sales -Output [4]: [ss_sold_date_sk#7, ss_customer_sk#25, ss_quantity#26, ss_sales_price#27] +Subquery:1 Hosting operator id = 48 Hosting Expression = Subquery scalar-subquery#36, [id=#37] +* HashAggregate (123) ++- Exchange (122) + +- * HashAggregate (121) + +- * HashAggregate (120) + +- * HashAggregate (119) + +- * Project (118) + +- * SortMergeJoin Inner (117) + :- * Sort (111) + : +- Exchange (110) + : +- * Project (109) + : +- * BroadcastHashJoin Inner BuildRight (108) + : :- * Filter (102) + : : +- * ColumnarToRow (101) + : : +- Scan parquet default.store_sales (100) + : +- BroadcastExchange (107) + : +- * Project (106) + : +- * Filter (105) + : +- * ColumnarToRow (104) + : +- Scan parquet default.date_dim (103) + +- * Sort (116) + +- Exchange (115) + +- * Filter (114) + +- * ColumnarToRow (113) + +- Scan parquet default.customer (112) + + +(100) Scan parquet default.store_sales +Output [4]: [ss_sold_date_sk#7, ss_customer_sk#24, ss_quantity#25, ss_sales_price#26] Batched: true Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_customer_sk), IsNotNull(ss_sold_date_sk)] ReadSchema: struct -(94) ColumnarToRow [codegen id : 2] -Input [4]: [ss_sold_date_sk#7, ss_customer_sk#25, ss_quantity#26, ss_sales_price#27] +(101) ColumnarToRow [codegen id : 2] +Input [4]: [ss_sold_date_sk#7, ss_customer_sk#24, ss_quantity#25, ss_sales_price#26] -(95) Filter [codegen id : 2] -Input [4]: [ss_sold_date_sk#7, ss_customer_sk#25, ss_quantity#26, ss_sales_price#27] -Condition : (isnotnull(ss_customer_sk#25) AND isnotnull(ss_sold_date_sk#7)) +(102) Filter [codegen id : 2] +Input [4]: [ss_sold_date_sk#7, ss_customer_sk#24, ss_quantity#25, ss_sales_price#26] +Condition : (isnotnull(ss_customer_sk#24) AND isnotnull(ss_sold_date_sk#7)) -(96) Scan parquet default.date_dim +(103) Scan parquet default.date_dim Output [2]: [d_date_sk#9, d_year#11] Batched: true Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [In(d_year, [2000,2001,2002,2003]), IsNotNull(d_date_sk)] ReadSchema: struct -(97) ColumnarToRow [codegen id : 1] +(104) ColumnarToRow [codegen id : 1] Input [2]: [d_date_sk#9, d_year#11] -(98) Filter [codegen id : 1] +(105) Filter [codegen id : 1] Input [2]: [d_date_sk#9, d_year#11] Condition : (d_year#11 IN (2000,2001,2002,2003) AND isnotnull(d_date_sk#9)) -(99) Project [codegen id : 1] +(106) Project [codegen id : 1] Output [1]: [d_date_sk#9] Input [2]: [d_date_sk#9, d_year#11] -(100) BroadcastExchange +(107) BroadcastExchange Input [1]: [d_date_sk#9] -Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#63] +Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#67] -(101) BroadcastHashJoin [codegen id : 2] +(108) BroadcastHashJoin [codegen id : 2] Left keys [1]: [ss_sold_date_sk#7] Right keys [1]: [d_date_sk#9] Join condition: None -(102) Project [codegen id : 2] -Output [3]: [ss_customer_sk#25, ss_quantity#26, ss_sales_price#27] -Input [5]: [ss_sold_date_sk#7, ss_customer_sk#25, ss_quantity#26, ss_sales_price#27, d_date_sk#9] +(109) Project [codegen id : 2] +Output [3]: [ss_customer_sk#24, ss_quantity#25, ss_sales_price#26] +Input [5]: [ss_sold_date_sk#7, ss_customer_sk#24, ss_quantity#25, ss_sales_price#26, d_date_sk#9] -(103) Exchange -Input [3]: [ss_customer_sk#25, ss_quantity#26, ss_sales_price#27] -Arguments: hashpartitioning(ss_customer_sk#25, 5), true, [id=#64] +(110) Exchange +Input [3]: [ss_customer_sk#24, ss_quantity#25, ss_sales_price#26] +Arguments: hashpartitioning(ss_customer_sk#24, 5), true, [id=#68] -(104) Sort [codegen id : 3] -Input [3]: [ss_customer_sk#25, ss_quantity#26, ss_sales_price#27] -Arguments: [ss_customer_sk#25 ASC NULLS FIRST], false, 0 +(111) Sort [codegen id : 3] +Input [3]: [ss_customer_sk#24, ss_quantity#25, ss_sales_price#26] +Arguments: [ss_customer_sk#24 ASC NULLS FIRST], false, 0 -(105) Scan parquet default.customer -Output [1]: [c_customer_sk#29] +(112) Scan parquet default.customer +Output [1]: [c_customer_sk#28] Batched: true Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk)] ReadSchema: struct -(106) ColumnarToRow [codegen id : 4] -Input [1]: [c_customer_sk#29] +(113) ColumnarToRow [codegen id : 4] +Input [1]: [c_customer_sk#28] -(107) Filter [codegen id : 4] -Input [1]: [c_customer_sk#29] -Condition : isnotnull(c_customer_sk#29) +(114) Filter [codegen id : 4] +Input [1]: [c_customer_sk#28] +Condition : isnotnull(c_customer_sk#28) -(108) Exchange -Input [1]: [c_customer_sk#29] -Arguments: hashpartitioning(c_customer_sk#29, 5), true, [id=#65] +(115) Exchange +Input [1]: [c_customer_sk#28] +Arguments: hashpartitioning(c_customer_sk#28, 5), true, [id=#69] -(109) Sort [codegen id : 5] -Input [1]: [c_customer_sk#29] -Arguments: [c_customer_sk#29 ASC NULLS FIRST], false, 0 +(116) Sort [codegen id : 5] +Input [1]: [c_customer_sk#28] +Arguments: [c_customer_sk#28 ASC NULLS FIRST], false, 0 -(110) SortMergeJoin [codegen id : 6] -Left keys [1]: [ss_customer_sk#25] -Right keys [1]: [c_customer_sk#29] +(117) SortMergeJoin [codegen id : 6] +Left keys [1]: [ss_customer_sk#24] +Right keys [1]: [c_customer_sk#28] Join condition: None -(111) Project [codegen id : 6] -Output [3]: [ss_quantity#26, ss_sales_price#27, c_customer_sk#29] -Input [4]: [ss_customer_sk#25, ss_quantity#26, ss_sales_price#27, c_customer_sk#29] - -(112) HashAggregate [codegen id : 6] -Input [3]: [ss_quantity#26, ss_sales_price#27, c_customer_sk#29] -Keys [1]: [c_customer_sk#29] -Functions [1]: [partial_sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#26 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#27 as decimal(12,2)))), DecimalType(18,2), true))] -Aggregate Attributes [2]: [sum#66, isEmpty#67] -Results [3]: [c_customer_sk#29, sum#68, isEmpty#69] - -(113) HashAggregate [codegen id : 6] -Input [3]: [c_customer_sk#29, sum#68, isEmpty#69] -Keys [1]: [c_customer_sk#29] -Functions [1]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#26 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#27 as decimal(12,2)))), DecimalType(18,2), true))] -Aggregate Attributes [1]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#26 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#27 as decimal(12,2)))), DecimalType(18,2), true))#70] -Results [1]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#26 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#27 as decimal(12,2)))), DecimalType(18,2), true))#70 AS csales#71] - -(114) HashAggregate [codegen id : 6] -Input [1]: [csales#71] +(118) Project [codegen id : 6] +Output [3]: [ss_quantity#25, ss_sales_price#26, c_customer_sk#28] +Input [4]: [ss_customer_sk#24, ss_quantity#25, ss_sales_price#26, c_customer_sk#28] + +(119) HashAggregate [codegen id : 6] +Input [3]: [ss_quantity#25, ss_sales_price#26, c_customer_sk#28] +Keys [1]: [c_customer_sk#28] +Functions [1]: [partial_sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#25 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#26 as decimal(12,2)))), DecimalType(18,2), true))] +Aggregate Attributes [2]: [sum#70, isEmpty#71] +Results [3]: [c_customer_sk#28, sum#72, isEmpty#73] + +(120) HashAggregate [codegen id : 6] +Input [3]: [c_customer_sk#28, sum#72, isEmpty#73] +Keys [1]: [c_customer_sk#28] +Functions [1]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#25 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#26 as decimal(12,2)))), DecimalType(18,2), true))] +Aggregate Attributes [1]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#25 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#26 as decimal(12,2)))), DecimalType(18,2), true))#74] +Results [1]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#25 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#26 as decimal(12,2)))), DecimalType(18,2), true))#74 AS csales#75] + +(121) HashAggregate [codegen id : 6] +Input [1]: [csales#75] Keys: [] -Functions [1]: [partial_max(csales#71)] -Aggregate Attributes [1]: [max#72] -Results [1]: [max#73] +Functions [1]: [partial_max(csales#75)] +Aggregate Attributes [1]: [max#76] +Results [1]: [max#77] -(115) Exchange -Input [1]: [max#73] -Arguments: SinglePartition, true, [id=#74] +(122) Exchange +Input [1]: [max#77] +Arguments: SinglePartition, true, [id=#78] -(116) HashAggregate [codegen id : 7] -Input [1]: [max#73] +(123) HashAggregate [codegen id : 7] +Input [1]: [max#77] Keys: [] -Functions [1]: [max(csales#71)] -Aggregate Attributes [1]: [max(csales#71)#75] -Results [1]: [max(csales#71)#75 AS tpcds_cmax#76] +Functions [1]: [max(csales#75)] +Aggregate Attributes [1]: [max(csales#75)#79] +Results [1]: [max(csales#75)#79 AS tpcds_cmax#80] -Subquery:2 Hosting operator id = 80 Hosting Expression = ReusedSubquery Subquery scalar-subquery#37, [id=#38] +Subquery:2 Hosting operator id = 88 Hosting Expression = ReusedSubquery Subquery scalar-subquery#36, [id=#37] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q23a.sf100/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q23a.sf100/simplified.txt index 9ee444cdd988c..5bb8bc5b99d0c 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q23a.sf100/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q23a.sf100/simplified.txt @@ -1,198 +1,208 @@ -CollectLimit - WholeStageCodegen (38) - HashAggregate [sum,isEmpty] [sum(sales),sum(sales),sum,isEmpty] - InputAdapter - Exchange #1 - WholeStageCodegen (37) - HashAggregate [sales] [sum,isEmpty,sum,isEmpty] - InputAdapter - Union - WholeStageCodegen (18) - Project [cs_quantity,cs_list_price] - BroadcastHashJoin [cs_sold_date_sk,d_date_sk] - Project [cs_sold_date_sk,cs_quantity,cs_list_price] - InputAdapter - SortMergeJoin [cs_bill_customer_sk,c_customer_sk] - WholeStageCodegen (11) - Sort [cs_bill_customer_sk] - InputAdapter - Exchange [cs_bill_customer_sk] #2 - WholeStageCodegen (10) - Project [cs_sold_date_sk,cs_bill_customer_sk,cs_quantity,cs_list_price] - InputAdapter - SortMergeJoin [cs_item_sk,item_sk] - WholeStageCodegen (2) - Sort [cs_item_sk] - InputAdapter - Exchange [cs_item_sk] #3 - WholeStageCodegen (1) - Filter [cs_sold_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.catalog_sales [cs_sold_date_sk,cs_bill_customer_sk,cs_item_sk,cs_quantity,cs_list_price] - WholeStageCodegen (9) - Sort [item_sk] - InputAdapter - Exchange [item_sk] #4 - WholeStageCodegen (8) - Project [item_sk] - Filter [count(1)] - HashAggregate [substr(i_item_desc, 1, 30),i_item_sk,d_date,count] [count(1),item_sk,count(1),count] - HashAggregate [i_item_desc,i_item_sk,d_date] [count,substr(i_item_desc, 1, 30),count] - Project [d_date,i_item_sk,i_item_desc] - SortMergeJoin [ss_item_sk,i_item_sk] - InputAdapter - WholeStageCodegen (5) - Sort [ss_item_sk] - InputAdapter - Exchange [ss_item_sk] #5 - WholeStageCodegen (4) - Project [ss_item_sk,d_date] - BroadcastHashJoin [ss_sold_date_sk,d_date_sk] - Filter [ss_sold_date_sk,ss_item_sk] - ColumnarToRow - InputAdapter - Scan parquet default.store_sales [ss_sold_date_sk,ss_item_sk] - InputAdapter - BroadcastExchange #6 - WholeStageCodegen (3) - Project [d_date_sk,d_date] - Filter [d_year,d_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.date_dim [d_date_sk,d_date,d_year] - InputAdapter - WholeStageCodegen (7) - Sort [i_item_sk] - InputAdapter - Exchange [i_item_sk] #7 - WholeStageCodegen (6) - Filter [i_item_sk] - ColumnarToRow - InputAdapter - Scan parquet default.item [i_item_sk,i_item_desc] - WholeStageCodegen (16) - Sort [c_customer_sk] - Project [c_customer_sk] - Filter [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price as decimal(12,2)))), DecimalType(18,2), true))] - Subquery #1 - WholeStageCodegen (7) - HashAggregate [max] [max(csales),tpcds_cmax,max] - InputAdapter - Exchange #10 - WholeStageCodegen (6) - HashAggregate [csales] [max,max] - HashAggregate [c_customer_sk,sum,isEmpty] [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price as decimal(12,2)))), DecimalType(18,2), true)),csales,sum,isEmpty] - HashAggregate [c_customer_sk,ss_quantity,ss_sales_price] [sum,isEmpty,sum,isEmpty] - Project [ss_quantity,ss_sales_price,c_customer_sk] - SortMergeJoin [ss_customer_sk,c_customer_sk] +WholeStageCodegen (36) + HashAggregate [sum,isEmpty] [sum(sales),sum(sales),sum,isEmpty] + InputAdapter + Exchange #1 + WholeStageCodegen (35) + HashAggregate [sales] [sum,isEmpty,sum,isEmpty] + InputAdapter + Union + WholeStageCodegen (17) + Project [cs_quantity,cs_list_price] + BroadcastHashJoin [cs_sold_date_sk,d_date_sk] + Project [cs_sold_date_sk,cs_quantity,cs_list_price] + InputAdapter + SortMergeJoin [cs_bill_customer_sk,c_customer_sk] + WholeStageCodegen (10) + Sort [cs_bill_customer_sk] + InputAdapter + Exchange [cs_bill_customer_sk] #2 + WholeStageCodegen (9) + Project [cs_sold_date_sk,cs_bill_customer_sk,cs_quantity,cs_list_price] + InputAdapter + SortMergeJoin [cs_item_sk,item_sk] + WholeStageCodegen (2) + Sort [cs_item_sk] + InputAdapter + Exchange [cs_item_sk] #3 + WholeStageCodegen (1) + Filter [cs_sold_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.catalog_sales [cs_sold_date_sk,cs_bill_customer_sk,cs_item_sk,cs_quantity,cs_list_price] + WholeStageCodegen (8) + Sort [item_sk] + Project [item_sk] + Filter [count(1)] + HashAggregate [substr(i_item_desc, 1, 30),i_item_sk,d_date,count] [count(1),item_sk,count(1),count] + HashAggregate [i_item_desc,i_item_sk,d_date] [count,substr(i_item_desc, 1, 30),count] + Project [d_date,i_item_sk,i_item_desc] + SortMergeJoin [ss_item_sk,i_item_sk] InputAdapter - WholeStageCodegen (3) - Sort [ss_customer_sk] + WholeStageCodegen (5) + Sort [ss_item_sk] InputAdapter - Exchange [ss_customer_sk] #11 - WholeStageCodegen (2) - Project [ss_customer_sk,ss_quantity,ss_sales_price] + Exchange [ss_item_sk] #4 + WholeStageCodegen (4) + Project [ss_item_sk,d_date] BroadcastHashJoin [ss_sold_date_sk,d_date_sk] - Filter [ss_customer_sk,ss_sold_date_sk] + Filter [ss_sold_date_sk,ss_item_sk] ColumnarToRow InputAdapter - Scan parquet default.store_sales [ss_sold_date_sk,ss_customer_sk,ss_quantity,ss_sales_price] + Scan parquet default.store_sales [ss_sold_date_sk,ss_item_sk] InputAdapter - BroadcastExchange #12 - WholeStageCodegen (1) - Project [d_date_sk] + BroadcastExchange #5 + WholeStageCodegen (3) + Project [d_date_sk,d_date] Filter [d_year,d_date_sk] ColumnarToRow InputAdapter - Scan parquet default.date_dim [d_date_sk,d_year] + Scan parquet default.date_dim [d_date_sk,d_date,d_year] InputAdapter - WholeStageCodegen (5) - Sort [c_customer_sk] + WholeStageCodegen (7) + Sort [i_item_sk] InputAdapter - Exchange [c_customer_sk] #13 - WholeStageCodegen (4) - Filter [c_customer_sk] + Exchange [i_item_sk] #6 + WholeStageCodegen (6) + Filter [i_item_sk] ColumnarToRow InputAdapter - Scan parquet default.customer [c_customer_sk] - HashAggregate [c_customer_sk,sum,isEmpty] [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price as decimal(12,2)))), DecimalType(18,2), true)),sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty] - HashAggregate [c_customer_sk,ss_quantity,ss_sales_price] [sum,isEmpty,sum,isEmpty] - Project [ss_quantity,ss_sales_price,c_customer_sk] - SortMergeJoin [ss_customer_sk,c_customer_sk] - InputAdapter - WholeStageCodegen (13) - Sort [ss_customer_sk] - InputAdapter - Exchange [ss_customer_sk] #8 - WholeStageCodegen (12) - Filter [ss_customer_sk] - ColumnarToRow - InputAdapter - Scan parquet default.store_sales [ss_customer_sk,ss_quantity,ss_sales_price] - InputAdapter - WholeStageCodegen (15) - Sort [c_customer_sk] - InputAdapter - Exchange [c_customer_sk] #9 - WholeStageCodegen (14) - Filter [c_customer_sk] - ColumnarToRow - InputAdapter - Scan parquet default.customer [c_customer_sk] - InputAdapter - BroadcastExchange #14 - WholeStageCodegen (17) - Project [d_date_sk] - Filter [d_year,d_moy,d_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.date_dim [d_date_sk,d_year,d_moy] - WholeStageCodegen (36) - Project [ws_quantity,ws_list_price] - BroadcastHashJoin [ws_sold_date_sk,d_date_sk] - Project [ws_sold_date_sk,ws_quantity,ws_list_price] - InputAdapter - SortMergeJoin [ws_bill_customer_sk,c_customer_sk] - WholeStageCodegen (29) - Sort [ws_bill_customer_sk] - InputAdapter - Exchange [ws_bill_customer_sk] #15 - WholeStageCodegen (28) - Project [ws_sold_date_sk,ws_bill_customer_sk,ws_quantity,ws_list_price] + Scan parquet default.item [i_item_sk,i_item_desc] + WholeStageCodegen (15) + Sort [c_customer_sk] + Project [c_customer_sk] + Filter [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price as decimal(12,2)))), DecimalType(18,2), true))] + Subquery #1 + WholeStageCodegen (7) + HashAggregate [max] [max(csales),tpcds_cmax,max] InputAdapter - SortMergeJoin [ws_item_sk,item_sk] - WholeStageCodegen (20) - Sort [ws_item_sk] + Exchange #9 + WholeStageCodegen (6) + HashAggregate [csales] [max,max] + HashAggregate [c_customer_sk,sum,isEmpty] [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price as decimal(12,2)))), DecimalType(18,2), true)),csales,sum,isEmpty] + HashAggregate [c_customer_sk,ss_quantity,ss_sales_price] [sum,isEmpty,sum,isEmpty] + Project [ss_quantity,ss_sales_price,c_customer_sk] + SortMergeJoin [ss_customer_sk,c_customer_sk] + InputAdapter + WholeStageCodegen (3) + Sort [ss_customer_sk] + InputAdapter + Exchange [ss_customer_sk] #10 + WholeStageCodegen (2) + Project [ss_customer_sk,ss_quantity,ss_sales_price] + BroadcastHashJoin [ss_sold_date_sk,d_date_sk] + Filter [ss_customer_sk,ss_sold_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.store_sales [ss_sold_date_sk,ss_customer_sk,ss_quantity,ss_sales_price] + InputAdapter + BroadcastExchange #11 + WholeStageCodegen (1) + Project [d_date_sk] + Filter [d_year,d_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.date_dim [d_date_sk,d_year] + InputAdapter + WholeStageCodegen (5) + Sort [c_customer_sk] + InputAdapter + Exchange [c_customer_sk] #12 + WholeStageCodegen (4) + Filter [c_customer_sk] + ColumnarToRow + InputAdapter + Scan parquet default.customer [c_customer_sk] + HashAggregate [c_customer_sk,sum,isEmpty] [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price as decimal(12,2)))), DecimalType(18,2), true)),sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty] + HashAggregate [c_customer_sk,ss_quantity,ss_sales_price] [sum,isEmpty,sum,isEmpty] + Project [ss_quantity,ss_sales_price,c_customer_sk] + SortMergeJoin [ss_customer_sk,c_customer_sk] + InputAdapter + WholeStageCodegen (12) + Sort [ss_customer_sk] InputAdapter - Exchange [ws_item_sk] #16 - WholeStageCodegen (19) - Filter [ws_sold_date_sk] + Exchange [ss_customer_sk] #7 + WholeStageCodegen (11) + Filter [ss_customer_sk] ColumnarToRow InputAdapter - Scan parquet default.web_sales [ws_sold_date_sk,ws_item_sk,ws_bill_customer_sk,ws_quantity,ws_list_price] - WholeStageCodegen (27) - Sort [item_sk] + Scan parquet default.store_sales [ss_customer_sk,ss_quantity,ss_sales_price] + InputAdapter + WholeStageCodegen (14) + Sort [c_customer_sk] InputAdapter - ReusedExchange [item_sk] #4 - WholeStageCodegen (34) - Sort [c_customer_sk] - Project [c_customer_sk] - Filter [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price as decimal(12,2)))), DecimalType(18,2), true))] - ReusedSubquery [tpcds_cmax] #1 - HashAggregate [c_customer_sk,sum,isEmpty] [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price as decimal(12,2)))), DecimalType(18,2), true)),sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty] - HashAggregate [c_customer_sk,ss_quantity,ss_sales_price] [sum,isEmpty,sum,isEmpty] - Project [ss_quantity,ss_sales_price,c_customer_sk] - SortMergeJoin [ss_customer_sk,c_customer_sk] - InputAdapter - WholeStageCodegen (31) - Sort [ss_customer_sk] - InputAdapter - ReusedExchange [ss_customer_sk,ss_quantity,ss_sales_price] #8 - InputAdapter - WholeStageCodegen (33) - Sort [c_customer_sk] - InputAdapter - ReusedExchange [c_customer_sk] #9 + Exchange [c_customer_sk] #8 + WholeStageCodegen (13) + Filter [c_customer_sk] + ColumnarToRow + InputAdapter + Scan parquet default.customer [c_customer_sk] + InputAdapter + BroadcastExchange #13 + WholeStageCodegen (16) + Project [d_date_sk] + Filter [d_year,d_moy,d_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.date_dim [d_date_sk,d_year,d_moy] + WholeStageCodegen (34) + Project [ws_quantity,ws_list_price] + BroadcastHashJoin [ws_sold_date_sk,d_date_sk] + Project [ws_sold_date_sk,ws_quantity,ws_list_price] InputAdapter - ReusedExchange [d_date_sk] #14 + SortMergeJoin [ws_bill_customer_sk,c_customer_sk] + WholeStageCodegen (27) + Sort [ws_bill_customer_sk] + InputAdapter + Exchange [ws_bill_customer_sk] #14 + WholeStageCodegen (26) + Project [ws_sold_date_sk,ws_bill_customer_sk,ws_quantity,ws_list_price] + InputAdapter + SortMergeJoin [ws_item_sk,item_sk] + WholeStageCodegen (19) + Sort [ws_item_sk] + InputAdapter + Exchange [ws_item_sk] #15 + WholeStageCodegen (18) + Filter [ws_sold_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.web_sales [ws_sold_date_sk,ws_item_sk,ws_bill_customer_sk,ws_quantity,ws_list_price] + WholeStageCodegen (25) + Sort [item_sk] + Project [item_sk] + Filter [count(1)] + HashAggregate [substr(i_item_desc, 1, 30),i_item_sk,d_date,count] [count(1),item_sk,count(1),count] + HashAggregate [i_item_desc,i_item_sk,d_date] [count,substr(i_item_desc, 1, 30),count] + Project [d_date,i_item_sk,i_item_desc] + SortMergeJoin [ss_item_sk,i_item_sk] + InputAdapter + WholeStageCodegen (22) + Sort [ss_item_sk] + InputAdapter + ReusedExchange [ss_item_sk,d_date] #4 + InputAdapter + WholeStageCodegen (24) + Sort [i_item_sk] + InputAdapter + ReusedExchange [i_item_sk,i_item_desc] #6 + WholeStageCodegen (32) + Sort [c_customer_sk] + Project [c_customer_sk] + Filter [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price as decimal(12,2)))), DecimalType(18,2), true))] + ReusedSubquery [tpcds_cmax] #1 + HashAggregate [c_customer_sk,sum,isEmpty] [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price as decimal(12,2)))), DecimalType(18,2), true)),sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty] + HashAggregate [c_customer_sk,ss_quantity,ss_sales_price] [sum,isEmpty,sum,isEmpty] + Project [ss_quantity,ss_sales_price,c_customer_sk] + SortMergeJoin [ss_customer_sk,c_customer_sk] + InputAdapter + WholeStageCodegen (29) + Sort [ss_customer_sk] + InputAdapter + ReusedExchange [ss_customer_sk,ss_quantity,ss_sales_price] #7 + InputAdapter + WholeStageCodegen (31) + Sort [c_customer_sk] + InputAdapter + ReusedExchange [c_customer_sk] #8 + InputAdapter + ReusedExchange [d_date_sk] #13 diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q23a/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q23a/explain.txt index 6d2b5b0013d8f..15ae5bfe24303 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q23a/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q23a/explain.txt @@ -1,76 +1,75 @@ == Physical Plan == -CollectLimit (72) -+- * HashAggregate (71) - +- Exchange (70) - +- * HashAggregate (69) - +- Union (68) - :- * Project (51) - : +- * BroadcastHashJoin Inner BuildRight (50) - : :- * Project (44) - : : +- * BroadcastHashJoin LeftSemi BuildRight (43) - : : :- * Project (27) - : : : +- * BroadcastHashJoin LeftSemi BuildRight (26) - : : : :- * Filter (3) - : : : : +- * ColumnarToRow (2) - : : : : +- Scan parquet default.catalog_sales (1) - : : : +- BroadcastExchange (25) - : : : +- * Project (24) - : : : +- * Filter (23) - : : : +- * HashAggregate (22) - : : : +- Exchange (21) - : : : +- * HashAggregate (20) - : : : +- * Project (19) - : : : +- * BroadcastHashJoin Inner BuildRight (18) - : : : :- * Project (13) - : : : : +- * BroadcastHashJoin Inner BuildRight (12) - : : : : :- * Filter (6) - : : : : : +- * ColumnarToRow (5) - : : : : : +- Scan parquet default.store_sales (4) - : : : : +- BroadcastExchange (11) - : : : : +- * Project (10) - : : : : +- * Filter (9) - : : : : +- * ColumnarToRow (8) - : : : : +- Scan parquet default.date_dim (7) - : : : +- BroadcastExchange (17) - : : : +- * Filter (16) - : : : +- * ColumnarToRow (15) - : : : +- Scan parquet default.item (14) - : : +- BroadcastExchange (42) - : : +- * Project (41) - : : +- * Filter (40) - : : +- * HashAggregate (39) - : : +- Exchange (38) - : : +- * HashAggregate (37) - : : +- * Project (36) - : : +- * BroadcastHashJoin Inner BuildRight (35) - : : :- * Filter (30) - : : : +- * ColumnarToRow (29) - : : : +- Scan parquet default.store_sales (28) - : : +- BroadcastExchange (34) - : : +- * Filter (33) - : : +- * ColumnarToRow (32) - : : +- Scan parquet default.customer (31) - : +- BroadcastExchange (49) - : +- * Project (48) - : +- * Filter (47) - : +- * ColumnarToRow (46) - : +- Scan parquet default.date_dim (45) - +- * Project (67) - +- * BroadcastHashJoin Inner BuildRight (66) - :- * Project (64) - : +- * BroadcastHashJoin LeftSemi BuildRight (63) - : :- * Project (57) - : : +- * BroadcastHashJoin LeftSemi BuildRight (56) - : : :- * Filter (54) - : : : +- * ColumnarToRow (53) - : : : +- Scan parquet default.web_sales (52) - : : +- ReusedExchange (55) - : +- BroadcastExchange (62) - : +- * Project (61) - : +- * Filter (60) - : +- * HashAggregate (59) - : +- ReusedExchange (58) - +- ReusedExchange (65) +* HashAggregate (71) ++- Exchange (70) + +- * HashAggregate (69) + +- Union (68) + :- * Project (51) + : +- * BroadcastHashJoin Inner BuildRight (50) + : :- * Project (44) + : : +- * BroadcastHashJoin LeftSemi BuildRight (43) + : : :- * Project (27) + : : : +- * BroadcastHashJoin LeftSemi BuildRight (26) + : : : :- * Filter (3) + : : : : +- * ColumnarToRow (2) + : : : : +- Scan parquet default.catalog_sales (1) + : : : +- BroadcastExchange (25) + : : : +- * Project (24) + : : : +- * Filter (23) + : : : +- * HashAggregate (22) + : : : +- Exchange (21) + : : : +- * HashAggregate (20) + : : : +- * Project (19) + : : : +- * BroadcastHashJoin Inner BuildRight (18) + : : : :- * Project (13) + : : : : +- * BroadcastHashJoin Inner BuildRight (12) + : : : : :- * Filter (6) + : : : : : +- * ColumnarToRow (5) + : : : : : +- Scan parquet default.store_sales (4) + : : : : +- BroadcastExchange (11) + : : : : +- * Project (10) + : : : : +- * Filter (9) + : : : : +- * ColumnarToRow (8) + : : : : +- Scan parquet default.date_dim (7) + : : : +- BroadcastExchange (17) + : : : +- * Filter (16) + : : : +- * ColumnarToRow (15) + : : : +- Scan parquet default.item (14) + : : +- BroadcastExchange (42) + : : +- * Project (41) + : : +- * Filter (40) + : : +- * HashAggregate (39) + : : +- Exchange (38) + : : +- * HashAggregate (37) + : : +- * Project (36) + : : +- * BroadcastHashJoin Inner BuildRight (35) + : : :- * Filter (30) + : : : +- * ColumnarToRow (29) + : : : +- Scan parquet default.store_sales (28) + : : +- BroadcastExchange (34) + : : +- * Filter (33) + : : +- * ColumnarToRow (32) + : : +- Scan parquet default.customer (31) + : +- BroadcastExchange (49) + : +- * Project (48) + : +- * Filter (47) + : +- * ColumnarToRow (46) + : +- Scan parquet default.date_dim (45) + +- * Project (67) + +- * BroadcastHashJoin Inner BuildRight (66) + :- * Project (64) + : +- * BroadcastHashJoin LeftSemi BuildRight (63) + : :- * Project (57) + : : +- * BroadcastHashJoin LeftSemi BuildRight (56) + : : :- * Filter (54) + : : : +- * ColumnarToRow (53) + : : : +- Scan parquet default.web_sales (52) + : : +- ReusedExchange (55) + : +- BroadcastExchange (62) + : +- * Project (61) + : +- * Filter (60) + : +- * HashAggregate (59) + : +- ReusedExchange (58) + +- ReusedExchange (65) (1) Scan parquet default.catalog_sales @@ -398,139 +397,135 @@ Functions [1]: [sum(sales#40)] Aggregate Attributes [1]: [sum(sales#40)#57] Results [1]: [sum(sales#40)#57 AS sum(sales)#58] -(72) CollectLimit -Input [1]: [sum(sales)#58] -Arguments: 100 - ===== Subqueries ===== Subquery:1 Hosting operator id = 40 Hosting Expression = Subquery scalar-subquery#35, [id=#36] -* HashAggregate (94) -+- Exchange (93) - +- * HashAggregate (92) - +- * HashAggregate (91) - +- Exchange (90) - +- * HashAggregate (89) - +- * Project (88) - +- * BroadcastHashJoin Inner BuildRight (87) - :- * Project (81) - : +- * BroadcastHashJoin Inner BuildRight (80) - : :- * Filter (75) - : : +- * ColumnarToRow (74) - : : +- Scan parquet default.store_sales (73) - : +- BroadcastExchange (79) - : +- * Filter (78) - : +- * ColumnarToRow (77) - : +- Scan parquet default.customer (76) - +- BroadcastExchange (86) - +- * Project (85) - +- * Filter (84) - +- * ColumnarToRow (83) - +- Scan parquet default.date_dim (82) - - -(73) Scan parquet default.store_sales +* HashAggregate (93) ++- Exchange (92) + +- * HashAggregate (91) + +- * HashAggregate (90) + +- Exchange (89) + +- * HashAggregate (88) + +- * Project (87) + +- * BroadcastHashJoin Inner BuildRight (86) + :- * Project (80) + : +- * BroadcastHashJoin Inner BuildRight (79) + : :- * Filter (74) + : : +- * ColumnarToRow (73) + : : +- Scan parquet default.store_sales (72) + : +- BroadcastExchange (78) + : +- * Filter (77) + : +- * ColumnarToRow (76) + : +- Scan parquet default.customer (75) + +- BroadcastExchange (85) + +- * Project (84) + +- * Filter (83) + +- * ColumnarToRow (82) + +- Scan parquet default.date_dim (81) + + +(72) Scan parquet default.store_sales Output [4]: [ss_sold_date_sk#6, ss_customer_sk#23, ss_quantity#24, ss_sales_price#25] Batched: true Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_customer_sk), IsNotNull(ss_sold_date_sk)] ReadSchema: struct -(74) ColumnarToRow [codegen id : 3] +(73) ColumnarToRow [codegen id : 3] Input [4]: [ss_sold_date_sk#6, ss_customer_sk#23, ss_quantity#24, ss_sales_price#25] -(75) Filter [codegen id : 3] +(74) Filter [codegen id : 3] Input [4]: [ss_sold_date_sk#6, ss_customer_sk#23, ss_quantity#24, ss_sales_price#25] Condition : (isnotnull(ss_customer_sk#23) AND isnotnull(ss_sold_date_sk#6)) -(76) Scan parquet default.customer +(75) Scan parquet default.customer Output [1]: [c_customer_sk#26] Batched: true Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk)] ReadSchema: struct -(77) ColumnarToRow [codegen id : 1] +(76) ColumnarToRow [codegen id : 1] Input [1]: [c_customer_sk#26] -(78) Filter [codegen id : 1] +(77) Filter [codegen id : 1] Input [1]: [c_customer_sk#26] Condition : isnotnull(c_customer_sk#26) -(79) BroadcastExchange +(78) BroadcastExchange Input [1]: [c_customer_sk#26] Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, false] as bigint)),false), [id=#59] -(80) BroadcastHashJoin [codegen id : 3] +(79) BroadcastHashJoin [codegen id : 3] Left keys [1]: [ss_customer_sk#23] Right keys [1]: [c_customer_sk#26] Join condition: None -(81) Project [codegen id : 3] +(80) Project [codegen id : 3] Output [4]: [ss_sold_date_sk#6, ss_quantity#24, ss_sales_price#25, c_customer_sk#26] Input [5]: [ss_sold_date_sk#6, ss_customer_sk#23, ss_quantity#24, ss_sales_price#25, c_customer_sk#26] -(82) Scan parquet default.date_dim +(81) Scan parquet default.date_dim Output [2]: [d_date_sk#8, d_year#10] Batched: true Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [In(d_year, [2000,2001,2002,2003]), IsNotNull(d_date_sk)] ReadSchema: struct -(83) ColumnarToRow [codegen id : 2] +(82) ColumnarToRow [codegen id : 2] Input [2]: [d_date_sk#8, d_year#10] -(84) Filter [codegen id : 2] +(83) Filter [codegen id : 2] Input [2]: [d_date_sk#8, d_year#10] Condition : (d_year#10 IN (2000,2001,2002,2003) AND isnotnull(d_date_sk#8)) -(85) Project [codegen id : 2] +(84) Project [codegen id : 2] Output [1]: [d_date_sk#8] Input [2]: [d_date_sk#8, d_year#10] -(86) BroadcastExchange +(85) BroadcastExchange Input [1]: [d_date_sk#8] Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#60] -(87) BroadcastHashJoin [codegen id : 3] +(86) BroadcastHashJoin [codegen id : 3] Left keys [1]: [ss_sold_date_sk#6] Right keys [1]: [d_date_sk#8] Join condition: None -(88) Project [codegen id : 3] +(87) Project [codegen id : 3] Output [3]: [ss_quantity#24, ss_sales_price#25, c_customer_sk#26] Input [5]: [ss_sold_date_sk#6, ss_quantity#24, ss_sales_price#25, c_customer_sk#26, d_date_sk#8] -(89) HashAggregate [codegen id : 3] +(88) HashAggregate [codegen id : 3] Input [3]: [ss_quantity#24, ss_sales_price#25, c_customer_sk#26] Keys [1]: [c_customer_sk#26] Functions [1]: [partial_sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#24 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#25 as decimal(12,2)))), DecimalType(18,2), true))] Aggregate Attributes [2]: [sum#61, isEmpty#62] Results [3]: [c_customer_sk#26, sum#63, isEmpty#64] -(90) Exchange +(89) Exchange Input [3]: [c_customer_sk#26, sum#63, isEmpty#64] Arguments: hashpartitioning(c_customer_sk#26, 5), true, [id=#65] -(91) HashAggregate [codegen id : 4] +(90) HashAggregate [codegen id : 4] Input [3]: [c_customer_sk#26, sum#63, isEmpty#64] Keys [1]: [c_customer_sk#26] Functions [1]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#24 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#25 as decimal(12,2)))), DecimalType(18,2), true))] Aggregate Attributes [1]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#24 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#25 as decimal(12,2)))), DecimalType(18,2), true))#66] Results [1]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#24 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#25 as decimal(12,2)))), DecimalType(18,2), true))#66 AS csales#67] -(92) HashAggregate [codegen id : 4] +(91) HashAggregate [codegen id : 4] Input [1]: [csales#67] Keys: [] Functions [1]: [partial_max(csales#67)] Aggregate Attributes [1]: [max#68] Results [1]: [max#69] -(93) Exchange +(92) Exchange Input [1]: [max#69] Arguments: SinglePartition, true, [id=#70] -(94) HashAggregate [codegen id : 5] +(93) HashAggregate [codegen id : 5] Input [1]: [max#69] Keys: [] Functions [1]: [max(csales#67)] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q23a/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q23a/simplified.txt index d860e18574f2a..aebe2bd3e1a6c 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q23a/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q23a/simplified.txt @@ -1,143 +1,142 @@ -CollectLimit - WholeStageCodegen (20) - HashAggregate [sum,isEmpty] [sum(sales),sum(sales),sum,isEmpty] - InputAdapter - Exchange #1 - WholeStageCodegen (19) - HashAggregate [sales] [sum,isEmpty,sum,isEmpty] - InputAdapter - Union - WholeStageCodegen (9) - Project [cs_quantity,cs_list_price] - BroadcastHashJoin [cs_sold_date_sk,d_date_sk] - Project [cs_sold_date_sk,cs_quantity,cs_list_price] - BroadcastHashJoin [cs_bill_customer_sk,c_customer_sk] - Project [cs_sold_date_sk,cs_bill_customer_sk,cs_quantity,cs_list_price] - BroadcastHashJoin [cs_item_sk,item_sk] - Filter [cs_sold_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.catalog_sales [cs_sold_date_sk,cs_bill_customer_sk,cs_item_sk,cs_quantity,cs_list_price] - InputAdapter - BroadcastExchange #2 - WholeStageCodegen (4) - Project [item_sk] - Filter [count(1)] - HashAggregate [substr(i_item_desc, 1, 30),i_item_sk,d_date,count] [count(1),item_sk,count(1),count] - InputAdapter - Exchange [substr(i_item_desc, 1, 30),i_item_sk,d_date] #3 - WholeStageCodegen (3) - HashAggregate [i_item_desc,i_item_sk,d_date] [count,substr(i_item_desc, 1, 30),count] - Project [d_date,i_item_sk,i_item_desc] - BroadcastHashJoin [ss_item_sk,i_item_sk] - Project [ss_item_sk,d_date] - BroadcastHashJoin [ss_sold_date_sk,d_date_sk] - Filter [ss_sold_date_sk,ss_item_sk] +WholeStageCodegen (20) + HashAggregate [sum,isEmpty] [sum(sales),sum(sales),sum,isEmpty] + InputAdapter + Exchange #1 + WholeStageCodegen (19) + HashAggregate [sales] [sum,isEmpty,sum,isEmpty] + InputAdapter + Union + WholeStageCodegen (9) + Project [cs_quantity,cs_list_price] + BroadcastHashJoin [cs_sold_date_sk,d_date_sk] + Project [cs_sold_date_sk,cs_quantity,cs_list_price] + BroadcastHashJoin [cs_bill_customer_sk,c_customer_sk] + Project [cs_sold_date_sk,cs_bill_customer_sk,cs_quantity,cs_list_price] + BroadcastHashJoin [cs_item_sk,item_sk] + Filter [cs_sold_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.catalog_sales [cs_sold_date_sk,cs_bill_customer_sk,cs_item_sk,cs_quantity,cs_list_price] + InputAdapter + BroadcastExchange #2 + WholeStageCodegen (4) + Project [item_sk] + Filter [count(1)] + HashAggregate [substr(i_item_desc, 1, 30),i_item_sk,d_date,count] [count(1),item_sk,count(1),count] + InputAdapter + Exchange [substr(i_item_desc, 1, 30),i_item_sk,d_date] #3 + WholeStageCodegen (3) + HashAggregate [i_item_desc,i_item_sk,d_date] [count,substr(i_item_desc, 1, 30),count] + Project [d_date,i_item_sk,i_item_desc] + BroadcastHashJoin [ss_item_sk,i_item_sk] + Project [ss_item_sk,d_date] + BroadcastHashJoin [ss_sold_date_sk,d_date_sk] + Filter [ss_sold_date_sk,ss_item_sk] + ColumnarToRow + InputAdapter + Scan parquet default.store_sales [ss_sold_date_sk,ss_item_sk] + InputAdapter + BroadcastExchange #4 + WholeStageCodegen (1) + Project [d_date_sk,d_date] + Filter [d_year,d_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.date_dim [d_date_sk,d_date,d_year] + InputAdapter + BroadcastExchange #5 + WholeStageCodegen (2) + Filter [i_item_sk] ColumnarToRow InputAdapter - Scan parquet default.store_sales [ss_sold_date_sk,ss_item_sk] - InputAdapter - BroadcastExchange #4 - WholeStageCodegen (1) - Project [d_date_sk,d_date] - Filter [d_year,d_date_sk] + Scan parquet default.item [i_item_sk,i_item_desc] + InputAdapter + BroadcastExchange #6 + WholeStageCodegen (7) + Project [c_customer_sk] + Filter [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price as decimal(12,2)))), DecimalType(18,2), true))] + Subquery #1 + WholeStageCodegen (5) + HashAggregate [max] [max(csales),tpcds_cmax,max] + InputAdapter + Exchange #9 + WholeStageCodegen (4) + HashAggregate [csales] [max,max] + HashAggregate [c_customer_sk,sum,isEmpty] [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price as decimal(12,2)))), DecimalType(18,2), true)),csales,sum,isEmpty] + InputAdapter + Exchange [c_customer_sk] #10 + WholeStageCodegen (3) + HashAggregate [c_customer_sk,ss_quantity,ss_sales_price] [sum,isEmpty,sum,isEmpty] + Project [ss_quantity,ss_sales_price,c_customer_sk] + BroadcastHashJoin [ss_sold_date_sk,d_date_sk] + Project [ss_sold_date_sk,ss_quantity,ss_sales_price,c_customer_sk] + BroadcastHashJoin [ss_customer_sk,c_customer_sk] + Filter [ss_customer_sk,ss_sold_date_sk] ColumnarToRow InputAdapter - Scan parquet default.date_dim [d_date_sk,d_date,d_year] - InputAdapter - BroadcastExchange #5 - WholeStageCodegen (2) - Filter [i_item_sk] - ColumnarToRow - InputAdapter - Scan parquet default.item [i_item_sk,i_item_desc] - InputAdapter - BroadcastExchange #6 - WholeStageCodegen (7) - Project [c_customer_sk] - Filter [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price as decimal(12,2)))), DecimalType(18,2), true))] - Subquery #1 - WholeStageCodegen (5) - HashAggregate [max] [max(csales),tpcds_cmax,max] - InputAdapter - Exchange #9 - WholeStageCodegen (4) - HashAggregate [csales] [max,max] - HashAggregate [c_customer_sk,sum,isEmpty] [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price as decimal(12,2)))), DecimalType(18,2), true)),csales,sum,isEmpty] - InputAdapter - Exchange [c_customer_sk] #10 - WholeStageCodegen (3) - HashAggregate [c_customer_sk,ss_quantity,ss_sales_price] [sum,isEmpty,sum,isEmpty] - Project [ss_quantity,ss_sales_price,c_customer_sk] - BroadcastHashJoin [ss_sold_date_sk,d_date_sk] - Project [ss_sold_date_sk,ss_quantity,ss_sales_price,c_customer_sk] - BroadcastHashJoin [ss_customer_sk,c_customer_sk] - Filter [ss_customer_sk,ss_sold_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.store_sales [ss_sold_date_sk,ss_customer_sk,ss_quantity,ss_sales_price] - InputAdapter - BroadcastExchange #11 - WholeStageCodegen (1) - Filter [c_customer_sk] - ColumnarToRow - InputAdapter - Scan parquet default.customer [c_customer_sk] - InputAdapter - BroadcastExchange #12 - WholeStageCodegen (2) - Project [d_date_sk] - Filter [d_year,d_date_sk] + Scan parquet default.store_sales [ss_sold_date_sk,ss_customer_sk,ss_quantity,ss_sales_price] + InputAdapter + BroadcastExchange #11 + WholeStageCodegen (1) + Filter [c_customer_sk] ColumnarToRow InputAdapter - Scan parquet default.date_dim [d_date_sk,d_year] - HashAggregate [c_customer_sk,sum,isEmpty] [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price as decimal(12,2)))), DecimalType(18,2), true)),sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty] - InputAdapter - Exchange [c_customer_sk] #7 - WholeStageCodegen (6) - HashAggregate [c_customer_sk,ss_quantity,ss_sales_price] [sum,isEmpty,sum,isEmpty] - Project [ss_quantity,ss_sales_price,c_customer_sk] - BroadcastHashJoin [ss_customer_sk,c_customer_sk] - Filter [ss_customer_sk] - ColumnarToRow - InputAdapter - Scan parquet default.store_sales [ss_customer_sk,ss_quantity,ss_sales_price] - InputAdapter - BroadcastExchange #8 - WholeStageCodegen (5) - Filter [c_customer_sk] - ColumnarToRow - InputAdapter - Scan parquet default.customer [c_customer_sk] - InputAdapter - BroadcastExchange #13 - WholeStageCodegen (8) - Project [d_date_sk] - Filter [d_year,d_moy,d_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.date_dim [d_date_sk,d_year,d_moy] - WholeStageCodegen (18) - Project [ws_quantity,ws_list_price] - BroadcastHashJoin [ws_sold_date_sk,d_date_sk] - Project [ws_sold_date_sk,ws_quantity,ws_list_price] - BroadcastHashJoin [ws_bill_customer_sk,c_customer_sk] - Project [ws_sold_date_sk,ws_bill_customer_sk,ws_quantity,ws_list_price] - BroadcastHashJoin [ws_item_sk,item_sk] - Filter [ws_sold_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.web_sales [ws_sold_date_sk,ws_item_sk,ws_bill_customer_sk,ws_quantity,ws_list_price] - InputAdapter - ReusedExchange [item_sk] #2 - InputAdapter - BroadcastExchange #14 - WholeStageCodegen (16) - Project [c_customer_sk] - Filter [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price as decimal(12,2)))), DecimalType(18,2), true))] - ReusedSubquery [tpcds_cmax] #1 - HashAggregate [c_customer_sk,sum,isEmpty] [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price as decimal(12,2)))), DecimalType(18,2), true)),sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty] - InputAdapter - ReusedExchange [c_customer_sk,sum,isEmpty] #7 - InputAdapter - ReusedExchange [d_date_sk] #13 + Scan parquet default.customer [c_customer_sk] + InputAdapter + BroadcastExchange #12 + WholeStageCodegen (2) + Project [d_date_sk] + Filter [d_year,d_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.date_dim [d_date_sk,d_year] + HashAggregate [c_customer_sk,sum,isEmpty] [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price as decimal(12,2)))), DecimalType(18,2), true)),sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty] + InputAdapter + Exchange [c_customer_sk] #7 + WholeStageCodegen (6) + HashAggregate [c_customer_sk,ss_quantity,ss_sales_price] [sum,isEmpty,sum,isEmpty] + Project [ss_quantity,ss_sales_price,c_customer_sk] + BroadcastHashJoin [ss_customer_sk,c_customer_sk] + Filter [ss_customer_sk] + ColumnarToRow + InputAdapter + Scan parquet default.store_sales [ss_customer_sk,ss_quantity,ss_sales_price] + InputAdapter + BroadcastExchange #8 + WholeStageCodegen (5) + Filter [c_customer_sk] + ColumnarToRow + InputAdapter + Scan parquet default.customer [c_customer_sk] + InputAdapter + BroadcastExchange #13 + WholeStageCodegen (8) + Project [d_date_sk] + Filter [d_year,d_moy,d_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.date_dim [d_date_sk,d_year,d_moy] + WholeStageCodegen (18) + Project [ws_quantity,ws_list_price] + BroadcastHashJoin [ws_sold_date_sk,d_date_sk] + Project [ws_sold_date_sk,ws_quantity,ws_list_price] + BroadcastHashJoin [ws_bill_customer_sk,c_customer_sk] + Project [ws_sold_date_sk,ws_bill_customer_sk,ws_quantity,ws_list_price] + BroadcastHashJoin [ws_item_sk,item_sk] + Filter [ws_sold_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.web_sales [ws_sold_date_sk,ws_item_sk,ws_bill_customer_sk,ws_quantity,ws_list_price] + InputAdapter + ReusedExchange [item_sk] #2 + InputAdapter + BroadcastExchange #14 + WholeStageCodegen (16) + Project [c_customer_sk] + Filter [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price as decimal(12,2)))), DecimalType(18,2), true))] + ReusedSubquery [tpcds_cmax] #1 + HashAggregate [c_customer_sk,sum,isEmpty] [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price as decimal(12,2)))), DecimalType(18,2), true)),sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty] + InputAdapter + ReusedExchange [c_customer_sk,sum,isEmpty] #7 + InputAdapter + ReusedExchange [d_date_sk] #13 diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q23b.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q23b.sf100/explain.txt index 51b85142f37ff..9a4c2b064d091 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q23b.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q23b.sf100/explain.txt @@ -1,134 +1,140 @@ == Physical Plan == -TakeOrderedAndProject (130) -+- Union (129) - :- * HashAggregate (82) - : +- Exchange (81) - : +- * HashAggregate (80) - : +- * Project (79) - : +- * SortMergeJoin Inner (78) - : :- * Project (59) - : : +- * BroadcastHashJoin Inner BuildRight (58) - : : :- SortMergeJoin LeftSemi (52) - : : : :- * Sort (34) - : : : : +- Exchange (33) - : : : : +- * Project (32) - : : : : +- SortMergeJoin LeftSemi (31) +TakeOrderedAndProject (136) ++- Union (135) + :- * HashAggregate (80) + : +- Exchange (79) + : +- * HashAggregate (78) + : +- * Project (77) + : +- * SortMergeJoin Inner (76) + : :- * Project (58) + : : +- * BroadcastHashJoin Inner BuildRight (57) + : : :- SortMergeJoin LeftSemi (51) + : : : :- * Sort (33) + : : : : +- Exchange (32) + : : : : +- * Project (31) + : : : : +- SortMergeJoin LeftSemi (30) : : : : :- * Sort (5) : : : : : +- Exchange (4) : : : : : +- * Filter (3) : : : : : +- * ColumnarToRow (2) : : : : : +- Scan parquet default.catalog_sales (1) - : : : : +- * Sort (30) - : : : : +- Exchange (29) - : : : : +- * Project (28) - : : : : +- * Filter (27) - : : : : +- * HashAggregate (26) - : : : : +- * HashAggregate (25) - : : : : +- * Project (24) - : : : : +- * SortMergeJoin Inner (23) - : : : : :- * Sort (17) - : : : : : +- Exchange (16) - : : : : : +- * Project (15) - : : : : : +- * BroadcastHashJoin Inner BuildRight (14) - : : : : : :- * Filter (8) - : : : : : : +- * ColumnarToRow (7) - : : : : : : +- Scan parquet default.store_sales (6) - : : : : : +- BroadcastExchange (13) - : : : : : +- * Project (12) - : : : : : +- * Filter (11) - : : : : : +- * ColumnarToRow (10) - : : : : : +- Scan parquet default.date_dim (9) - : : : : +- * Sort (22) - : : : : +- Exchange (21) - : : : : +- * Filter (20) - : : : : +- * ColumnarToRow (19) - : : : : +- Scan parquet default.item (18) - : : : +- * Sort (51) - : : : +- * Project (50) - : : : +- * Filter (49) - : : : +- * HashAggregate (48) - : : : +- * HashAggregate (47) - : : : +- * Project (46) - : : : +- * SortMergeJoin Inner (45) - : : : :- * Sort (39) - : : : : +- Exchange (38) - : : : : +- * Filter (37) - : : : : +- * ColumnarToRow (36) - : : : : +- Scan parquet default.store_sales (35) - : : : +- * Sort (44) - : : : +- Exchange (43) - : : : +- * Filter (42) - : : : +- * ColumnarToRow (41) - : : : +- Scan parquet default.customer (40) - : : +- BroadcastExchange (57) - : : +- * Project (56) - : : +- * Filter (55) - : : +- * ColumnarToRow (54) - : : +- Scan parquet default.date_dim (53) - : +- SortMergeJoin LeftSemi (77) - : :- * Sort (64) - : : +- Exchange (63) - : : +- * Filter (62) - : : +- * ColumnarToRow (61) - : : +- Scan parquet default.customer (60) - : +- * Sort (76) - : +- Exchange (75) - : +- * Project (74) - : +- * Filter (73) - : +- * HashAggregate (72) - : +- * HashAggregate (71) - : +- * Project (70) - : +- * SortMergeJoin Inner (69) - : :- * Sort (66) - : : +- ReusedExchange (65) - : +- * Sort (68) - : +- ReusedExchange (67) - +- * HashAggregate (128) - +- Exchange (127) - +- * HashAggregate (126) - +- * Project (125) - +- * SortMergeJoin Inner (124) - :- * Project (108) - : +- * BroadcastHashJoin Inner BuildRight (107) - : :- SortMergeJoin LeftSemi (105) - : : :- * Sort (93) - : : : +- Exchange (92) - : : : +- * Project (91) - : : : +- SortMergeJoin LeftSemi (90) - : : : :- * Sort (87) - : : : : +- Exchange (86) - : : : : +- * Filter (85) - : : : : +- * ColumnarToRow (84) - : : : : +- Scan parquet default.web_sales (83) - : : : +- * Sort (89) - : : : +- ReusedExchange (88) - : : +- * Sort (104) - : : +- * Project (103) - : : +- * Filter (102) - : : +- * HashAggregate (101) - : : +- * HashAggregate (100) - : : +- * Project (99) - : : +- * SortMergeJoin Inner (98) - : : :- * Sort (95) - : : : +- ReusedExchange (94) - : : +- * Sort (97) - : : +- ReusedExchange (96) - : +- ReusedExchange (106) - +- SortMergeJoin LeftSemi (123) - :- * Sort (110) - : +- ReusedExchange (109) - +- * Sort (122) - +- Exchange (121) - +- * Project (120) - +- * Filter (119) - +- * HashAggregate (118) - +- * HashAggregate (117) - +- * Project (116) - +- * SortMergeJoin Inner (115) - :- * Sort (112) - : +- ReusedExchange (111) - +- * Sort (114) - +- ReusedExchange (113) + : : : : +- * Sort (29) + : : : : +- * Project (28) + : : : : +- * Filter (27) + : : : : +- * HashAggregate (26) + : : : : +- * HashAggregate (25) + : : : : +- * Project (24) + : : : : +- * SortMergeJoin Inner (23) + : : : : :- * Sort (17) + : : : : : +- Exchange (16) + : : : : : +- * Project (15) + : : : : : +- * BroadcastHashJoin Inner BuildRight (14) + : : : : : :- * Filter (8) + : : : : : : +- * ColumnarToRow (7) + : : : : : : +- Scan parquet default.store_sales (6) + : : : : : +- BroadcastExchange (13) + : : : : : +- * Project (12) + : : : : : +- * Filter (11) + : : : : : +- * ColumnarToRow (10) + : : : : : +- Scan parquet default.date_dim (9) + : : : : +- * Sort (22) + : : : : +- Exchange (21) + : : : : +- * Filter (20) + : : : : +- * ColumnarToRow (19) + : : : : +- Scan parquet default.item (18) + : : : +- * Sort (50) + : : : +- * Project (49) + : : : +- * Filter (48) + : : : +- * HashAggregate (47) + : : : +- * HashAggregate (46) + : : : +- * Project (45) + : : : +- * SortMergeJoin Inner (44) + : : : :- * Sort (38) + : : : : +- Exchange (37) + : : : : +- * Filter (36) + : : : : +- * ColumnarToRow (35) + : : : : +- Scan parquet default.store_sales (34) + : : : +- * Sort (43) + : : : +- Exchange (42) + : : : +- * Filter (41) + : : : +- * ColumnarToRow (40) + : : : +- Scan parquet default.customer (39) + : : +- BroadcastExchange (56) + : : +- * Project (55) + : : +- * Filter (54) + : : +- * ColumnarToRow (53) + : : +- Scan parquet default.date_dim (52) + : +- SortMergeJoin LeftSemi (75) + : :- * Sort (63) + : : +- Exchange (62) + : : +- * Filter (61) + : : +- * ColumnarToRow (60) + : : +- Scan parquet default.customer (59) + : +- * Sort (74) + : +- * Project (73) + : +- * Filter (72) + : +- * HashAggregate (71) + : +- * HashAggregate (70) + : +- * Project (69) + : +- * SortMergeJoin Inner (68) + : :- * Sort (65) + : : +- ReusedExchange (64) + : +- * Sort (67) + : +- ReusedExchange (66) + +- * HashAggregate (134) + +- Exchange (133) + +- * HashAggregate (132) + +- * Project (131) + +- * SortMergeJoin Inner (130) + :- * Project (115) + : +- * BroadcastHashJoin Inner BuildRight (114) + : :- SortMergeJoin LeftSemi (112) + : : :- * Sort (100) + : : : +- Exchange (99) + : : : +- * Project (98) + : : : +- SortMergeJoin LeftSemi (97) + : : : :- * Sort (85) + : : : : +- Exchange (84) + : : : : +- * Filter (83) + : : : : +- * ColumnarToRow (82) + : : : : +- Scan parquet default.web_sales (81) + : : : +- * Sort (96) + : : : +- * Project (95) + : : : +- * Filter (94) + : : : +- * HashAggregate (93) + : : : +- * HashAggregate (92) + : : : +- * Project (91) + : : : +- * SortMergeJoin Inner (90) + : : : :- * Sort (87) + : : : : +- ReusedExchange (86) + : : : +- * Sort (89) + : : : +- ReusedExchange (88) + : : +- * Sort (111) + : : +- * Project (110) + : : +- * Filter (109) + : : +- * HashAggregate (108) + : : +- * HashAggregate (107) + : : +- * Project (106) + : : +- * SortMergeJoin Inner (105) + : : :- * Sort (102) + : : : +- ReusedExchange (101) + : : +- * Sort (104) + : : +- ReusedExchange (103) + : +- ReusedExchange (113) + +- SortMergeJoin LeftSemi (129) + :- * Sort (117) + : +- ReusedExchange (116) + +- * Sort (128) + +- * Project (127) + +- * Filter (126) + +- * HashAggregate (125) + +- * HashAggregate (124) + +- * Project (123) + +- * SortMergeJoin Inner (122) + :- * Sort (119) + : +- ReusedExchange (118) + +- * Sort (121) + +- ReusedExchange (120) (1) Scan parquet default.catalog_sales @@ -259,612 +265,642 @@ Condition : (count(1)#22 > 4) Output [1]: [item_sk#21] Input [2]: [item_sk#21, count(1)#22] -(29) Exchange -Input [1]: [item_sk#21] -Arguments: hashpartitioning(item_sk#21, 5), true, [id=#23] - -(30) Sort [codegen id : 9] +(29) Sort [codegen id : 8] Input [1]: [item_sk#21] Arguments: [item_sk#21 ASC NULLS FIRST], false, 0 -(31) SortMergeJoin +(30) SortMergeJoin Left keys [1]: [cs_item_sk#3] Right keys [1]: [item_sk#21] Join condition: None -(32) Project [codegen id : 10] +(31) Project [codegen id : 9] Output [4]: [cs_sold_date_sk#1, cs_bill_customer_sk#2, cs_quantity#4, cs_list_price#5] Input [5]: [cs_sold_date_sk#1, cs_bill_customer_sk#2, cs_item_sk#3, cs_quantity#4, cs_list_price#5] -(33) Exchange +(32) Exchange Input [4]: [cs_sold_date_sk#1, cs_bill_customer_sk#2, cs_quantity#4, cs_list_price#5] -Arguments: hashpartitioning(cs_bill_customer_sk#2, 5), true, [id=#24] +Arguments: hashpartitioning(cs_bill_customer_sk#2, 5), true, [id=#23] -(34) Sort [codegen id : 11] +(33) Sort [codegen id : 10] Input [4]: [cs_sold_date_sk#1, cs_bill_customer_sk#2, cs_quantity#4, cs_list_price#5] Arguments: [cs_bill_customer_sk#2 ASC NULLS FIRST], false, 0 -(35) Scan parquet default.store_sales -Output [3]: [ss_customer_sk#25, ss_quantity#26, ss_sales_price#27] +(34) Scan parquet default.store_sales +Output [3]: [ss_customer_sk#24, ss_quantity#25, ss_sales_price#26] Batched: true Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_customer_sk)] ReadSchema: struct -(36) ColumnarToRow [codegen id : 12] -Input [3]: [ss_customer_sk#25, ss_quantity#26, ss_sales_price#27] +(35) ColumnarToRow [codegen id : 11] +Input [3]: [ss_customer_sk#24, ss_quantity#25, ss_sales_price#26] -(37) Filter [codegen id : 12] -Input [3]: [ss_customer_sk#25, ss_quantity#26, ss_sales_price#27] -Condition : isnotnull(ss_customer_sk#25) +(36) Filter [codegen id : 11] +Input [3]: [ss_customer_sk#24, ss_quantity#25, ss_sales_price#26] +Condition : isnotnull(ss_customer_sk#24) -(38) Exchange -Input [3]: [ss_customer_sk#25, ss_quantity#26, ss_sales_price#27] -Arguments: hashpartitioning(ss_customer_sk#25, 5), true, [id=#28] +(37) Exchange +Input [3]: [ss_customer_sk#24, ss_quantity#25, ss_sales_price#26] +Arguments: hashpartitioning(ss_customer_sk#24, 5), true, [id=#27] -(39) Sort [codegen id : 13] -Input [3]: [ss_customer_sk#25, ss_quantity#26, ss_sales_price#27] -Arguments: [ss_customer_sk#25 ASC NULLS FIRST], false, 0 +(38) Sort [codegen id : 12] +Input [3]: [ss_customer_sk#24, ss_quantity#25, ss_sales_price#26] +Arguments: [ss_customer_sk#24 ASC NULLS FIRST], false, 0 -(40) Scan parquet default.customer -Output [1]: [c_customer_sk#29] +(39) Scan parquet default.customer +Output [1]: [c_customer_sk#28] Batched: true Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk)] ReadSchema: struct -(41) ColumnarToRow [codegen id : 14] -Input [1]: [c_customer_sk#29] +(40) ColumnarToRow [codegen id : 13] +Input [1]: [c_customer_sk#28] -(42) Filter [codegen id : 14] -Input [1]: [c_customer_sk#29] -Condition : isnotnull(c_customer_sk#29) +(41) Filter [codegen id : 13] +Input [1]: [c_customer_sk#28] +Condition : isnotnull(c_customer_sk#28) -(43) Exchange -Input [1]: [c_customer_sk#29] -Arguments: hashpartitioning(c_customer_sk#29, 5), true, [id=#30] +(42) Exchange +Input [1]: [c_customer_sk#28] +Arguments: hashpartitioning(c_customer_sk#28, 5), true, [id=#29] -(44) Sort [codegen id : 15] -Input [1]: [c_customer_sk#29] -Arguments: [c_customer_sk#29 ASC NULLS FIRST], false, 0 +(43) Sort [codegen id : 14] +Input [1]: [c_customer_sk#28] +Arguments: [c_customer_sk#28 ASC NULLS FIRST], false, 0 -(45) SortMergeJoin [codegen id : 16] -Left keys [1]: [ss_customer_sk#25] -Right keys [1]: [c_customer_sk#29] +(44) SortMergeJoin [codegen id : 15] +Left keys [1]: [ss_customer_sk#24] +Right keys [1]: [c_customer_sk#28] Join condition: None -(46) Project [codegen id : 16] -Output [3]: [ss_quantity#26, ss_sales_price#27, c_customer_sk#29] -Input [4]: [ss_customer_sk#25, ss_quantity#26, ss_sales_price#27, c_customer_sk#29] - -(47) HashAggregate [codegen id : 16] -Input [3]: [ss_quantity#26, ss_sales_price#27, c_customer_sk#29] -Keys [1]: [c_customer_sk#29] -Functions [1]: [partial_sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#26 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#27 as decimal(12,2)))), DecimalType(18,2), true))] -Aggregate Attributes [2]: [sum#31, isEmpty#32] -Results [3]: [c_customer_sk#29, sum#33, isEmpty#34] - -(48) HashAggregate [codegen id : 16] -Input [3]: [c_customer_sk#29, sum#33, isEmpty#34] -Keys [1]: [c_customer_sk#29] -Functions [1]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#26 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#27 as decimal(12,2)))), DecimalType(18,2), true))] -Aggregate Attributes [1]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#26 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#27 as decimal(12,2)))), DecimalType(18,2), true))#35] -Results [2]: [c_customer_sk#29, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#26 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#27 as decimal(12,2)))), DecimalType(18,2), true))#35 AS sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#26 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#27 as decimal(12,2)))), DecimalType(18,2), true))#36] - -(49) Filter [codegen id : 16] -Input [2]: [c_customer_sk#29, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#26 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#27 as decimal(12,2)))), DecimalType(18,2), true))#36] -Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#26 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#27 as decimal(12,2)))), DecimalType(18,2), true))#36) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#26 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#27 as decimal(12,2)))), DecimalType(18,2), true))#36 as decimal(38,8)) > CheckOverflow((0.500000 * promote_precision(cast(Subquery scalar-subquery#37, [id=#38] as decimal(32,6)))), DecimalType(38,8), true))) - -(50) Project [codegen id : 16] -Output [1]: [c_customer_sk#29] -Input [2]: [c_customer_sk#29, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#26 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#27 as decimal(12,2)))), DecimalType(18,2), true))#36] - -(51) Sort [codegen id : 16] -Input [1]: [c_customer_sk#29] -Arguments: [c_customer_sk#29 ASC NULLS FIRST], false, 0 - -(52) SortMergeJoin +(45) Project [codegen id : 15] +Output [3]: [ss_quantity#25, ss_sales_price#26, c_customer_sk#28] +Input [4]: [ss_customer_sk#24, ss_quantity#25, ss_sales_price#26, c_customer_sk#28] + +(46) HashAggregate [codegen id : 15] +Input [3]: [ss_quantity#25, ss_sales_price#26, c_customer_sk#28] +Keys [1]: [c_customer_sk#28] +Functions [1]: [partial_sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#25 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#26 as decimal(12,2)))), DecimalType(18,2), true))] +Aggregate Attributes [2]: [sum#30, isEmpty#31] +Results [3]: [c_customer_sk#28, sum#32, isEmpty#33] + +(47) HashAggregate [codegen id : 15] +Input [3]: [c_customer_sk#28, sum#32, isEmpty#33] +Keys [1]: [c_customer_sk#28] +Functions [1]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#25 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#26 as decimal(12,2)))), DecimalType(18,2), true))] +Aggregate Attributes [1]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#25 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#26 as decimal(12,2)))), DecimalType(18,2), true))#34] +Results [2]: [c_customer_sk#28, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#25 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#26 as decimal(12,2)))), DecimalType(18,2), true))#34 AS sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#25 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#26 as decimal(12,2)))), DecimalType(18,2), true))#35] + +(48) Filter [codegen id : 15] +Input [2]: [c_customer_sk#28, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#25 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#26 as decimal(12,2)))), DecimalType(18,2), true))#35] +Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#25 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#26 as decimal(12,2)))), DecimalType(18,2), true))#35) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#25 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#26 as decimal(12,2)))), DecimalType(18,2), true))#35 as decimal(38,8)) > CheckOverflow((0.500000 * promote_precision(cast(Subquery scalar-subquery#36, [id=#37] as decimal(32,6)))), DecimalType(38,8), true))) + +(49) Project [codegen id : 15] +Output [1]: [c_customer_sk#28] +Input [2]: [c_customer_sk#28, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#25 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#26 as decimal(12,2)))), DecimalType(18,2), true))#35] + +(50) Sort [codegen id : 15] +Input [1]: [c_customer_sk#28] +Arguments: [c_customer_sk#28 ASC NULLS FIRST], false, 0 + +(51) SortMergeJoin Left keys [1]: [cs_bill_customer_sk#2] -Right keys [1]: [c_customer_sk#29] +Right keys [1]: [c_customer_sk#28] Join condition: None -(53) Scan parquet default.date_dim -Output [3]: [d_date_sk#9, d_year#11, d_moy#39] +(52) Scan parquet default.date_dim +Output [3]: [d_date_sk#9, d_year#11, d_moy#38] Batched: true Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), IsNotNull(d_moy), EqualTo(d_year,2000), EqualTo(d_moy,2), IsNotNull(d_date_sk)] ReadSchema: struct -(54) ColumnarToRow [codegen id : 17] -Input [3]: [d_date_sk#9, d_year#11, d_moy#39] +(53) ColumnarToRow [codegen id : 16] +Input [3]: [d_date_sk#9, d_year#11, d_moy#38] -(55) Filter [codegen id : 17] -Input [3]: [d_date_sk#9, d_year#11, d_moy#39] -Condition : ((((isnotnull(d_year#11) AND isnotnull(d_moy#39)) AND (d_year#11 = 2000)) AND (d_moy#39 = 2)) AND isnotnull(d_date_sk#9)) +(54) Filter [codegen id : 16] +Input [3]: [d_date_sk#9, d_year#11, d_moy#38] +Condition : ((((isnotnull(d_year#11) AND isnotnull(d_moy#38)) AND (d_year#11 = 2000)) AND (d_moy#38 = 2)) AND isnotnull(d_date_sk#9)) -(56) Project [codegen id : 17] +(55) Project [codegen id : 16] Output [1]: [d_date_sk#9] -Input [3]: [d_date_sk#9, d_year#11, d_moy#39] +Input [3]: [d_date_sk#9, d_year#11, d_moy#38] -(57) BroadcastExchange +(56) BroadcastExchange Input [1]: [d_date_sk#9] -Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#40] +Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#39] -(58) BroadcastHashJoin [codegen id : 18] +(57) BroadcastHashJoin [codegen id : 17] Left keys [1]: [cs_sold_date_sk#1] Right keys [1]: [d_date_sk#9] Join condition: None -(59) Project [codegen id : 18] +(58) Project [codegen id : 17] Output [3]: [cs_bill_customer_sk#2, cs_quantity#4, cs_list_price#5] Input [5]: [cs_sold_date_sk#1, cs_bill_customer_sk#2, cs_quantity#4, cs_list_price#5, d_date_sk#9] -(60) Scan parquet default.customer -Output [3]: [c_customer_sk#29, c_first_name#41, c_last_name#42] +(59) Scan parquet default.customer +Output [3]: [c_customer_sk#28, c_first_name#40, c_last_name#41] Batched: true Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk)] ReadSchema: struct -(61) ColumnarToRow [codegen id : 19] -Input [3]: [c_customer_sk#29, c_first_name#41, c_last_name#42] +(60) ColumnarToRow [codegen id : 18] +Input [3]: [c_customer_sk#28, c_first_name#40, c_last_name#41] -(62) Filter [codegen id : 19] -Input [3]: [c_customer_sk#29, c_first_name#41, c_last_name#42] -Condition : isnotnull(c_customer_sk#29) +(61) Filter [codegen id : 18] +Input [3]: [c_customer_sk#28, c_first_name#40, c_last_name#41] +Condition : isnotnull(c_customer_sk#28) -(63) Exchange -Input [3]: [c_customer_sk#29, c_first_name#41, c_last_name#42] -Arguments: hashpartitioning(c_customer_sk#29, 5), true, [id=#43] +(62) Exchange +Input [3]: [c_customer_sk#28, c_first_name#40, c_last_name#41] +Arguments: hashpartitioning(c_customer_sk#28, 5), true, [id=#42] -(64) Sort [codegen id : 20] -Input [3]: [c_customer_sk#29, c_first_name#41, c_last_name#42] -Arguments: [c_customer_sk#29 ASC NULLS FIRST], false, 0 +(63) Sort [codegen id : 19] +Input [3]: [c_customer_sk#28, c_first_name#40, c_last_name#41] +Arguments: [c_customer_sk#28 ASC NULLS FIRST], false, 0 -(65) ReusedExchange [Reuses operator id: 38] -Output [3]: [ss_customer_sk#25, ss_quantity#26, ss_sales_price#27] +(64) ReusedExchange [Reuses operator id: 37] +Output [3]: [ss_customer_sk#24, ss_quantity#25, ss_sales_price#26] -(66) Sort [codegen id : 22] -Input [3]: [ss_customer_sk#25, ss_quantity#26, ss_sales_price#27] -Arguments: [ss_customer_sk#25 ASC NULLS FIRST], false, 0 +(65) Sort [codegen id : 21] +Input [3]: [ss_customer_sk#24, ss_quantity#25, ss_sales_price#26] +Arguments: [ss_customer_sk#24 ASC NULLS FIRST], false, 0 -(67) ReusedExchange [Reuses operator id: 43] -Output [1]: [c_customer_sk#29] +(66) ReusedExchange [Reuses operator id: 42] +Output [1]: [c_customer_sk#28] -(68) Sort [codegen id : 24] -Input [1]: [c_customer_sk#29] -Arguments: [c_customer_sk#29 ASC NULLS FIRST], false, 0 +(67) Sort [codegen id : 23] +Input [1]: [c_customer_sk#28] +Arguments: [c_customer_sk#28 ASC NULLS FIRST], false, 0 -(69) SortMergeJoin [codegen id : 25] -Left keys [1]: [ss_customer_sk#25] -Right keys [1]: [c_customer_sk#29] +(68) SortMergeJoin [codegen id : 24] +Left keys [1]: [ss_customer_sk#24] +Right keys [1]: [c_customer_sk#28] Join condition: None -(70) Project [codegen id : 25] -Output [3]: [ss_quantity#26, ss_sales_price#27, c_customer_sk#29] -Input [4]: [ss_customer_sk#25, ss_quantity#26, ss_sales_price#27, c_customer_sk#29] - -(71) HashAggregate [codegen id : 25] -Input [3]: [ss_quantity#26, ss_sales_price#27, c_customer_sk#29] -Keys [1]: [c_customer_sk#29] -Functions [1]: [partial_sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#26 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#27 as decimal(12,2)))), DecimalType(18,2), true))] -Aggregate Attributes [2]: [sum#31, isEmpty#32] -Results [3]: [c_customer_sk#29, sum#33, isEmpty#34] - -(72) HashAggregate [codegen id : 25] -Input [3]: [c_customer_sk#29, sum#33, isEmpty#34] -Keys [1]: [c_customer_sk#29] -Functions [1]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#26 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#27 as decimal(12,2)))), DecimalType(18,2), true))] -Aggregate Attributes [1]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#26 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#27 as decimal(12,2)))), DecimalType(18,2), true))#35] -Results [2]: [c_customer_sk#29, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#26 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#27 as decimal(12,2)))), DecimalType(18,2), true))#35 AS sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#26 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#27 as decimal(12,2)))), DecimalType(18,2), true))#36] - -(73) Filter [codegen id : 25] -Input [2]: [c_customer_sk#29, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#26 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#27 as decimal(12,2)))), DecimalType(18,2), true))#36] -Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#26 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#27 as decimal(12,2)))), DecimalType(18,2), true))#36) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#26 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#27 as decimal(12,2)))), DecimalType(18,2), true))#36 as decimal(38,8)) > CheckOverflow((0.500000 * promote_precision(cast(ReusedSubquery Subquery scalar-subquery#37, [id=#38] as decimal(32,6)))), DecimalType(38,8), true))) - -(74) Project [codegen id : 25] -Output [1]: [c_customer_sk#29 AS c_customer_sk#29#44] -Input [2]: [c_customer_sk#29, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#26 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#27 as decimal(12,2)))), DecimalType(18,2), true))#36] - -(75) Exchange -Input [1]: [c_customer_sk#29#44] -Arguments: hashpartitioning(c_customer_sk#29#44, 5), true, [id=#45] - -(76) Sort [codegen id : 26] -Input [1]: [c_customer_sk#29#44] -Arguments: [c_customer_sk#29#44 ASC NULLS FIRST], false, 0 - -(77) SortMergeJoin -Left keys [1]: [c_customer_sk#29] -Right keys [1]: [c_customer_sk#29#44] +(69) Project [codegen id : 24] +Output [3]: [ss_quantity#25, ss_sales_price#26, c_customer_sk#28] +Input [4]: [ss_customer_sk#24, ss_quantity#25, ss_sales_price#26, c_customer_sk#28] + +(70) HashAggregate [codegen id : 24] +Input [3]: [ss_quantity#25, ss_sales_price#26, c_customer_sk#28] +Keys [1]: [c_customer_sk#28] +Functions [1]: [partial_sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#25 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#26 as decimal(12,2)))), DecimalType(18,2), true))] +Aggregate Attributes [2]: [sum#30, isEmpty#31] +Results [3]: [c_customer_sk#28, sum#32, isEmpty#33] + +(71) HashAggregate [codegen id : 24] +Input [3]: [c_customer_sk#28, sum#32, isEmpty#33] +Keys [1]: [c_customer_sk#28] +Functions [1]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#25 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#26 as decimal(12,2)))), DecimalType(18,2), true))] +Aggregate Attributes [1]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#25 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#26 as decimal(12,2)))), DecimalType(18,2), true))#34] +Results [2]: [c_customer_sk#28, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#25 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#26 as decimal(12,2)))), DecimalType(18,2), true))#34 AS sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#25 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#26 as decimal(12,2)))), DecimalType(18,2), true))#35] + +(72) Filter [codegen id : 24] +Input [2]: [c_customer_sk#28, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#25 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#26 as decimal(12,2)))), DecimalType(18,2), true))#35] +Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#25 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#26 as decimal(12,2)))), DecimalType(18,2), true))#35) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#25 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#26 as decimal(12,2)))), DecimalType(18,2), true))#35 as decimal(38,8)) > CheckOverflow((0.500000 * promote_precision(cast(ReusedSubquery Subquery scalar-subquery#36, [id=#37] as decimal(32,6)))), DecimalType(38,8), true))) + +(73) Project [codegen id : 24] +Output [1]: [c_customer_sk#28 AS c_customer_sk#28#43] +Input [2]: [c_customer_sk#28, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#25 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#26 as decimal(12,2)))), DecimalType(18,2), true))#35] + +(74) Sort [codegen id : 24] +Input [1]: [c_customer_sk#28#43] +Arguments: [c_customer_sk#28#43 ASC NULLS FIRST], false, 0 + +(75) SortMergeJoin +Left keys [1]: [c_customer_sk#28] +Right keys [1]: [c_customer_sk#28#43] Join condition: None -(78) SortMergeJoin [codegen id : 27] +(76) SortMergeJoin [codegen id : 25] Left keys [1]: [cs_bill_customer_sk#2] -Right keys [1]: [c_customer_sk#29] +Right keys [1]: [c_customer_sk#28] Join condition: None -(79) Project [codegen id : 27] -Output [4]: [cs_quantity#4, cs_list_price#5, c_first_name#41, c_last_name#42] -Input [6]: [cs_bill_customer_sk#2, cs_quantity#4, cs_list_price#5, c_customer_sk#29, c_first_name#41, c_last_name#42] +(77) Project [codegen id : 25] +Output [4]: [cs_quantity#4, cs_list_price#5, c_first_name#40, c_last_name#41] +Input [6]: [cs_bill_customer_sk#2, cs_quantity#4, cs_list_price#5, c_customer_sk#28, c_first_name#40, c_last_name#41] -(80) HashAggregate [codegen id : 27] -Input [4]: [cs_quantity#4, cs_list_price#5, c_first_name#41, c_last_name#42] -Keys [2]: [c_last_name#42, c_first_name#41] +(78) HashAggregate [codegen id : 25] +Input [4]: [cs_quantity#4, cs_list_price#5, c_first_name#40, c_last_name#41] +Keys [2]: [c_last_name#41, c_first_name#40] Functions [1]: [partial_sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#4 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#5 as decimal(12,2)))), DecimalType(18,2), true))] -Aggregate Attributes [2]: [sum#46, isEmpty#47] -Results [4]: [c_last_name#42, c_first_name#41, sum#48, isEmpty#49] +Aggregate Attributes [2]: [sum#44, isEmpty#45] +Results [4]: [c_last_name#41, c_first_name#40, sum#46, isEmpty#47] -(81) Exchange -Input [4]: [c_last_name#42, c_first_name#41, sum#48, isEmpty#49] -Arguments: hashpartitioning(c_last_name#42, c_first_name#41, 5), true, [id=#50] +(79) Exchange +Input [4]: [c_last_name#41, c_first_name#40, sum#46, isEmpty#47] +Arguments: hashpartitioning(c_last_name#41, c_first_name#40, 5), true, [id=#48] -(82) HashAggregate [codegen id : 28] -Input [4]: [c_last_name#42, c_first_name#41, sum#48, isEmpty#49] -Keys [2]: [c_last_name#42, c_first_name#41] +(80) HashAggregate [codegen id : 26] +Input [4]: [c_last_name#41, c_first_name#40, sum#46, isEmpty#47] +Keys [2]: [c_last_name#41, c_first_name#40] Functions [1]: [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#4 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#5 as decimal(12,2)))), DecimalType(18,2), true))] -Aggregate Attributes [1]: [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#4 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#5 as decimal(12,2)))), DecimalType(18,2), true))#51] -Results [3]: [c_last_name#42, c_first_name#41, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#4 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#5 as decimal(12,2)))), DecimalType(18,2), true))#51 AS sales#52] +Aggregate Attributes [1]: [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#4 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#5 as decimal(12,2)))), DecimalType(18,2), true))#49] +Results [3]: [c_last_name#41, c_first_name#40, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#4 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#5 as decimal(12,2)))), DecimalType(18,2), true))#49 AS sales#50] -(83) Scan parquet default.web_sales -Output [5]: [ws_sold_date_sk#53, ws_item_sk#54, ws_bill_customer_sk#55, ws_quantity#56, ws_list_price#57] +(81) Scan parquet default.web_sales +Output [5]: [ws_sold_date_sk#51, ws_item_sk#52, ws_bill_customer_sk#53, ws_quantity#54, ws_list_price#55] Batched: true Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_bill_customer_sk), IsNotNull(ws_sold_date_sk)] ReadSchema: struct -(84) ColumnarToRow [codegen id : 29] -Input [5]: [ws_sold_date_sk#53, ws_item_sk#54, ws_bill_customer_sk#55, ws_quantity#56, ws_list_price#57] +(82) ColumnarToRow [codegen id : 27] +Input [5]: [ws_sold_date_sk#51, ws_item_sk#52, ws_bill_customer_sk#53, ws_quantity#54, ws_list_price#55] + +(83) Filter [codegen id : 27] +Input [5]: [ws_sold_date_sk#51, ws_item_sk#52, ws_bill_customer_sk#53, ws_quantity#54, ws_list_price#55] +Condition : (isnotnull(ws_bill_customer_sk#53) AND isnotnull(ws_sold_date_sk#51)) + +(84) Exchange +Input [5]: [ws_sold_date_sk#51, ws_item_sk#52, ws_bill_customer_sk#53, ws_quantity#54, ws_list_price#55] +Arguments: hashpartitioning(ws_item_sk#52, 5), true, [id=#56] -(85) Filter [codegen id : 29] -Input [5]: [ws_sold_date_sk#53, ws_item_sk#54, ws_bill_customer_sk#55, ws_quantity#56, ws_list_price#57] -Condition : (isnotnull(ws_bill_customer_sk#55) AND isnotnull(ws_sold_date_sk#53)) +(85) Sort [codegen id : 28] +Input [5]: [ws_sold_date_sk#51, ws_item_sk#52, ws_bill_customer_sk#53, ws_quantity#54, ws_list_price#55] +Arguments: [ws_item_sk#52 ASC NULLS FIRST], false, 0 -(86) Exchange -Input [5]: [ws_sold_date_sk#53, ws_item_sk#54, ws_bill_customer_sk#55, ws_quantity#56, ws_list_price#57] -Arguments: hashpartitioning(ws_item_sk#54, 5), true, [id=#58] +(86) ReusedExchange [Reuses operator id: 16] +Output [2]: [ss_item_sk#8, d_date#10] + +(87) Sort [codegen id : 31] +Input [2]: [ss_item_sk#8, d_date#10] +Arguments: [ss_item_sk#8 ASC NULLS FIRST], false, 0 + +(88) ReusedExchange [Reuses operator id: 21] +Output [2]: [i_item_sk#14, i_item_desc#15] + +(89) Sort [codegen id : 33] +Input [2]: [i_item_sk#14, i_item_desc#15] +Arguments: [i_item_sk#14 ASC NULLS FIRST], false, 0 + +(90) SortMergeJoin [codegen id : 34] +Left keys [1]: [ss_item_sk#8] +Right keys [1]: [i_item_sk#14] +Join condition: None + +(91) Project [codegen id : 34] +Output [3]: [d_date#10, i_item_sk#14, i_item_desc#15] +Input [4]: [ss_item_sk#8, d_date#10, i_item_sk#14, i_item_desc#15] + +(92) HashAggregate [codegen id : 34] +Input [3]: [d_date#10, i_item_sk#14, i_item_desc#15] +Keys [3]: [substr(i_item_desc#15, 1, 30) AS substr(i_item_desc#15, 1, 30)#57, i_item_sk#14, d_date#10] +Functions [1]: [partial_count(1)] +Aggregate Attributes [1]: [count#58] +Results [4]: [substr(i_item_desc#15, 1, 30)#57, i_item_sk#14, d_date#10, count#59] + +(93) HashAggregate [codegen id : 34] +Input [4]: [substr(i_item_desc#15, 1, 30)#57, i_item_sk#14, d_date#10, count#59] +Keys [3]: [substr(i_item_desc#15, 1, 30)#57, i_item_sk#14, d_date#10] +Functions [1]: [count(1)] +Aggregate Attributes [1]: [count(1)#60] +Results [2]: [i_item_sk#14 AS item_sk#21, count(1)#60 AS count(1)#61] -(87) Sort [codegen id : 30] -Input [5]: [ws_sold_date_sk#53, ws_item_sk#54, ws_bill_customer_sk#55, ws_quantity#56, ws_list_price#57] -Arguments: [ws_item_sk#54 ASC NULLS FIRST], false, 0 +(94) Filter [codegen id : 34] +Input [2]: [item_sk#21, count(1)#61] +Condition : (count(1)#61 > 4) -(88) ReusedExchange [Reuses operator id: 29] +(95) Project [codegen id : 34] Output [1]: [item_sk#21] +Input [2]: [item_sk#21, count(1)#61] -(89) Sort [codegen id : 37] +(96) Sort [codegen id : 34] Input [1]: [item_sk#21] Arguments: [item_sk#21 ASC NULLS FIRST], false, 0 -(90) SortMergeJoin -Left keys [1]: [ws_item_sk#54] +(97) SortMergeJoin +Left keys [1]: [ws_item_sk#52] Right keys [1]: [item_sk#21] Join condition: None -(91) Project [codegen id : 38] -Output [4]: [ws_sold_date_sk#53, ws_bill_customer_sk#55, ws_quantity#56, ws_list_price#57] -Input [5]: [ws_sold_date_sk#53, ws_item_sk#54, ws_bill_customer_sk#55, ws_quantity#56, ws_list_price#57] +(98) Project [codegen id : 35] +Output [4]: [ws_sold_date_sk#51, ws_bill_customer_sk#53, ws_quantity#54, ws_list_price#55] +Input [5]: [ws_sold_date_sk#51, ws_item_sk#52, ws_bill_customer_sk#53, ws_quantity#54, ws_list_price#55] -(92) Exchange -Input [4]: [ws_sold_date_sk#53, ws_bill_customer_sk#55, ws_quantity#56, ws_list_price#57] -Arguments: hashpartitioning(ws_bill_customer_sk#55, 5), true, [id=#59] +(99) Exchange +Input [4]: [ws_sold_date_sk#51, ws_bill_customer_sk#53, ws_quantity#54, ws_list_price#55] +Arguments: hashpartitioning(ws_bill_customer_sk#53, 5), true, [id=#62] -(93) Sort [codegen id : 39] -Input [4]: [ws_sold_date_sk#53, ws_bill_customer_sk#55, ws_quantity#56, ws_list_price#57] -Arguments: [ws_bill_customer_sk#55 ASC NULLS FIRST], false, 0 +(100) Sort [codegen id : 36] +Input [4]: [ws_sold_date_sk#51, ws_bill_customer_sk#53, ws_quantity#54, ws_list_price#55] +Arguments: [ws_bill_customer_sk#53 ASC NULLS FIRST], false, 0 -(94) ReusedExchange [Reuses operator id: 38] -Output [3]: [ss_customer_sk#25, ss_quantity#26, ss_sales_price#27] +(101) ReusedExchange [Reuses operator id: 37] +Output [3]: [ss_customer_sk#24, ss_quantity#25, ss_sales_price#26] -(95) Sort [codegen id : 41] -Input [3]: [ss_customer_sk#25, ss_quantity#26, ss_sales_price#27] -Arguments: [ss_customer_sk#25 ASC NULLS FIRST], false, 0 +(102) Sort [codegen id : 38] +Input [3]: [ss_customer_sk#24, ss_quantity#25, ss_sales_price#26] +Arguments: [ss_customer_sk#24 ASC NULLS FIRST], false, 0 -(96) ReusedExchange [Reuses operator id: 43] -Output [1]: [c_customer_sk#29] +(103) ReusedExchange [Reuses operator id: 42] +Output [1]: [c_customer_sk#28] -(97) Sort [codegen id : 43] -Input [1]: [c_customer_sk#29] -Arguments: [c_customer_sk#29 ASC NULLS FIRST], false, 0 +(104) Sort [codegen id : 40] +Input [1]: [c_customer_sk#28] +Arguments: [c_customer_sk#28 ASC NULLS FIRST], false, 0 -(98) SortMergeJoin [codegen id : 44] -Left keys [1]: [ss_customer_sk#25] -Right keys [1]: [c_customer_sk#29] +(105) SortMergeJoin [codegen id : 41] +Left keys [1]: [ss_customer_sk#24] +Right keys [1]: [c_customer_sk#28] Join condition: None -(99) Project [codegen id : 44] -Output [3]: [ss_quantity#26, ss_sales_price#27, c_customer_sk#29] -Input [4]: [ss_customer_sk#25, ss_quantity#26, ss_sales_price#27, c_customer_sk#29] - -(100) HashAggregate [codegen id : 44] -Input [3]: [ss_quantity#26, ss_sales_price#27, c_customer_sk#29] -Keys [1]: [c_customer_sk#29] -Functions [1]: [partial_sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#26 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#27 as decimal(12,2)))), DecimalType(18,2), true))] -Aggregate Attributes [2]: [sum#60, isEmpty#61] -Results [3]: [c_customer_sk#29, sum#62, isEmpty#63] - -(101) HashAggregate [codegen id : 44] -Input [3]: [c_customer_sk#29, sum#62, isEmpty#63] -Keys [1]: [c_customer_sk#29] -Functions [1]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#26 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#27 as decimal(12,2)))), DecimalType(18,2), true))] -Aggregate Attributes [1]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#26 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#27 as decimal(12,2)))), DecimalType(18,2), true))#64] -Results [2]: [c_customer_sk#29, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#26 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#27 as decimal(12,2)))), DecimalType(18,2), true))#64 AS sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#26 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#27 as decimal(12,2)))), DecimalType(18,2), true))#65] - -(102) Filter [codegen id : 44] -Input [2]: [c_customer_sk#29, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#26 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#27 as decimal(12,2)))), DecimalType(18,2), true))#65] -Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#26 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#27 as decimal(12,2)))), DecimalType(18,2), true))#65) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#26 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#27 as decimal(12,2)))), DecimalType(18,2), true))#65 as decimal(38,8)) > CheckOverflow((0.500000 * promote_precision(cast(ReusedSubquery Subquery scalar-subquery#37, [id=#38] as decimal(32,6)))), DecimalType(38,8), true))) - -(103) Project [codegen id : 44] -Output [1]: [c_customer_sk#29] -Input [2]: [c_customer_sk#29, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#26 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#27 as decimal(12,2)))), DecimalType(18,2), true))#65] - -(104) Sort [codegen id : 44] -Input [1]: [c_customer_sk#29] -Arguments: [c_customer_sk#29 ASC NULLS FIRST], false, 0 - -(105) SortMergeJoin -Left keys [1]: [ws_bill_customer_sk#55] -Right keys [1]: [c_customer_sk#29] +(106) Project [codegen id : 41] +Output [3]: [ss_quantity#25, ss_sales_price#26, c_customer_sk#28] +Input [4]: [ss_customer_sk#24, ss_quantity#25, ss_sales_price#26, c_customer_sk#28] + +(107) HashAggregate [codegen id : 41] +Input [3]: [ss_quantity#25, ss_sales_price#26, c_customer_sk#28] +Keys [1]: [c_customer_sk#28] +Functions [1]: [partial_sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#25 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#26 as decimal(12,2)))), DecimalType(18,2), true))] +Aggregate Attributes [2]: [sum#63, isEmpty#64] +Results [3]: [c_customer_sk#28, sum#65, isEmpty#66] + +(108) HashAggregate [codegen id : 41] +Input [3]: [c_customer_sk#28, sum#65, isEmpty#66] +Keys [1]: [c_customer_sk#28] +Functions [1]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#25 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#26 as decimal(12,2)))), DecimalType(18,2), true))] +Aggregate Attributes [1]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#25 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#26 as decimal(12,2)))), DecimalType(18,2), true))#67] +Results [2]: [c_customer_sk#28, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#25 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#26 as decimal(12,2)))), DecimalType(18,2), true))#67 AS sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#25 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#26 as decimal(12,2)))), DecimalType(18,2), true))#68] + +(109) Filter [codegen id : 41] +Input [2]: [c_customer_sk#28, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#25 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#26 as decimal(12,2)))), DecimalType(18,2), true))#68] +Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#25 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#26 as decimal(12,2)))), DecimalType(18,2), true))#68) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#25 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#26 as decimal(12,2)))), DecimalType(18,2), true))#68 as decimal(38,8)) > CheckOverflow((0.500000 * promote_precision(cast(ReusedSubquery Subquery scalar-subquery#36, [id=#37] as decimal(32,6)))), DecimalType(38,8), true))) + +(110) Project [codegen id : 41] +Output [1]: [c_customer_sk#28] +Input [2]: [c_customer_sk#28, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#25 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#26 as decimal(12,2)))), DecimalType(18,2), true))#68] + +(111) Sort [codegen id : 41] +Input [1]: [c_customer_sk#28] +Arguments: [c_customer_sk#28 ASC NULLS FIRST], false, 0 + +(112) SortMergeJoin +Left keys [1]: [ws_bill_customer_sk#53] +Right keys [1]: [c_customer_sk#28] Join condition: None -(106) ReusedExchange [Reuses operator id: 57] +(113) ReusedExchange [Reuses operator id: 56] Output [1]: [d_date_sk#9] -(107) BroadcastHashJoin [codegen id : 46] -Left keys [1]: [ws_sold_date_sk#53] +(114) BroadcastHashJoin [codegen id : 43] +Left keys [1]: [ws_sold_date_sk#51] Right keys [1]: [d_date_sk#9] Join condition: None -(108) Project [codegen id : 46] -Output [3]: [ws_bill_customer_sk#55, ws_quantity#56, ws_list_price#57] -Input [5]: [ws_sold_date_sk#53, ws_bill_customer_sk#55, ws_quantity#56, ws_list_price#57, d_date_sk#9] +(115) Project [codegen id : 43] +Output [3]: [ws_bill_customer_sk#53, ws_quantity#54, ws_list_price#55] +Input [5]: [ws_sold_date_sk#51, ws_bill_customer_sk#53, ws_quantity#54, ws_list_price#55, d_date_sk#9] -(109) ReusedExchange [Reuses operator id: 63] -Output [3]: [c_customer_sk#29, c_first_name#41, c_last_name#42] +(116) ReusedExchange [Reuses operator id: 62] +Output [3]: [c_customer_sk#28, c_first_name#40, c_last_name#41] -(110) Sort [codegen id : 48] -Input [3]: [c_customer_sk#29, c_first_name#41, c_last_name#42] -Arguments: [c_customer_sk#29 ASC NULLS FIRST], false, 0 +(117) Sort [codegen id : 45] +Input [3]: [c_customer_sk#28, c_first_name#40, c_last_name#41] +Arguments: [c_customer_sk#28 ASC NULLS FIRST], false, 0 -(111) ReusedExchange [Reuses operator id: 38] -Output [3]: [ss_customer_sk#25, ss_quantity#26, ss_sales_price#27] +(118) ReusedExchange [Reuses operator id: 37] +Output [3]: [ss_customer_sk#24, ss_quantity#25, ss_sales_price#26] -(112) Sort [codegen id : 50] -Input [3]: [ss_customer_sk#25, ss_quantity#26, ss_sales_price#27] -Arguments: [ss_customer_sk#25 ASC NULLS FIRST], false, 0 +(119) Sort [codegen id : 47] +Input [3]: [ss_customer_sk#24, ss_quantity#25, ss_sales_price#26] +Arguments: [ss_customer_sk#24 ASC NULLS FIRST], false, 0 -(113) ReusedExchange [Reuses operator id: 43] -Output [1]: [c_customer_sk#29] +(120) ReusedExchange [Reuses operator id: 42] +Output [1]: [c_customer_sk#28] -(114) Sort [codegen id : 52] -Input [1]: [c_customer_sk#29] -Arguments: [c_customer_sk#29 ASC NULLS FIRST], false, 0 +(121) Sort [codegen id : 49] +Input [1]: [c_customer_sk#28] +Arguments: [c_customer_sk#28 ASC NULLS FIRST], false, 0 -(115) SortMergeJoin [codegen id : 53] -Left keys [1]: [ss_customer_sk#25] -Right keys [1]: [c_customer_sk#29] +(122) SortMergeJoin [codegen id : 50] +Left keys [1]: [ss_customer_sk#24] +Right keys [1]: [c_customer_sk#28] Join condition: None -(116) Project [codegen id : 53] -Output [3]: [ss_quantity#26, ss_sales_price#27, c_customer_sk#29] -Input [4]: [ss_customer_sk#25, ss_quantity#26, ss_sales_price#27, c_customer_sk#29] - -(117) HashAggregate [codegen id : 53] -Input [3]: [ss_quantity#26, ss_sales_price#27, c_customer_sk#29] -Keys [1]: [c_customer_sk#29] -Functions [1]: [partial_sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#26 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#27 as decimal(12,2)))), DecimalType(18,2), true))] -Aggregate Attributes [2]: [sum#60, isEmpty#61] -Results [3]: [c_customer_sk#29, sum#62, isEmpty#63] - -(118) HashAggregate [codegen id : 53] -Input [3]: [c_customer_sk#29, sum#62, isEmpty#63] -Keys [1]: [c_customer_sk#29] -Functions [1]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#26 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#27 as decimal(12,2)))), DecimalType(18,2), true))] -Aggregate Attributes [1]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#26 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#27 as decimal(12,2)))), DecimalType(18,2), true))#64] -Results [2]: [c_customer_sk#29, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#26 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#27 as decimal(12,2)))), DecimalType(18,2), true))#64 AS sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#26 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#27 as decimal(12,2)))), DecimalType(18,2), true))#65] - -(119) Filter [codegen id : 53] -Input [2]: [c_customer_sk#29, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#26 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#27 as decimal(12,2)))), DecimalType(18,2), true))#65] -Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#26 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#27 as decimal(12,2)))), DecimalType(18,2), true))#65) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#26 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#27 as decimal(12,2)))), DecimalType(18,2), true))#65 as decimal(38,8)) > CheckOverflow((0.500000 * promote_precision(cast(ReusedSubquery Subquery scalar-subquery#37, [id=#38] as decimal(32,6)))), DecimalType(38,8), true))) - -(120) Project [codegen id : 53] -Output [1]: [c_customer_sk#29 AS c_customer_sk#29#66] -Input [2]: [c_customer_sk#29, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#26 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#27 as decimal(12,2)))), DecimalType(18,2), true))#65] - -(121) Exchange -Input [1]: [c_customer_sk#29#66] -Arguments: hashpartitioning(c_customer_sk#29#66, 5), true, [id=#67] - -(122) Sort [codegen id : 54] -Input [1]: [c_customer_sk#29#66] -Arguments: [c_customer_sk#29#66 ASC NULLS FIRST], false, 0 - -(123) SortMergeJoin -Left keys [1]: [c_customer_sk#29] -Right keys [1]: [c_customer_sk#29#66] +(123) Project [codegen id : 50] +Output [3]: [ss_quantity#25, ss_sales_price#26, c_customer_sk#28] +Input [4]: [ss_customer_sk#24, ss_quantity#25, ss_sales_price#26, c_customer_sk#28] + +(124) HashAggregate [codegen id : 50] +Input [3]: [ss_quantity#25, ss_sales_price#26, c_customer_sk#28] +Keys [1]: [c_customer_sk#28] +Functions [1]: [partial_sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#25 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#26 as decimal(12,2)))), DecimalType(18,2), true))] +Aggregate Attributes [2]: [sum#63, isEmpty#64] +Results [3]: [c_customer_sk#28, sum#65, isEmpty#66] + +(125) HashAggregate [codegen id : 50] +Input [3]: [c_customer_sk#28, sum#65, isEmpty#66] +Keys [1]: [c_customer_sk#28] +Functions [1]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#25 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#26 as decimal(12,2)))), DecimalType(18,2), true))] +Aggregate Attributes [1]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#25 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#26 as decimal(12,2)))), DecimalType(18,2), true))#67] +Results [2]: [c_customer_sk#28, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#25 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#26 as decimal(12,2)))), DecimalType(18,2), true))#67 AS sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#25 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#26 as decimal(12,2)))), DecimalType(18,2), true))#68] + +(126) Filter [codegen id : 50] +Input [2]: [c_customer_sk#28, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#25 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#26 as decimal(12,2)))), DecimalType(18,2), true))#68] +Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#25 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#26 as decimal(12,2)))), DecimalType(18,2), true))#68) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#25 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#26 as decimal(12,2)))), DecimalType(18,2), true))#68 as decimal(38,8)) > CheckOverflow((0.500000 * promote_precision(cast(ReusedSubquery Subquery scalar-subquery#36, [id=#37] as decimal(32,6)))), DecimalType(38,8), true))) + +(127) Project [codegen id : 50] +Output [1]: [c_customer_sk#28 AS c_customer_sk#28#69] +Input [2]: [c_customer_sk#28, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#25 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#26 as decimal(12,2)))), DecimalType(18,2), true))#68] + +(128) Sort [codegen id : 50] +Input [1]: [c_customer_sk#28#69] +Arguments: [c_customer_sk#28#69 ASC NULLS FIRST], false, 0 + +(129) SortMergeJoin +Left keys [1]: [c_customer_sk#28] +Right keys [1]: [c_customer_sk#28#69] Join condition: None -(124) SortMergeJoin [codegen id : 55] -Left keys [1]: [ws_bill_customer_sk#55] -Right keys [1]: [c_customer_sk#29] +(130) SortMergeJoin [codegen id : 51] +Left keys [1]: [ws_bill_customer_sk#53] +Right keys [1]: [c_customer_sk#28] Join condition: None -(125) Project [codegen id : 55] -Output [4]: [ws_quantity#56, ws_list_price#57, c_first_name#41, c_last_name#42] -Input [6]: [ws_bill_customer_sk#55, ws_quantity#56, ws_list_price#57, c_customer_sk#29, c_first_name#41, c_last_name#42] +(131) Project [codegen id : 51] +Output [4]: [ws_quantity#54, ws_list_price#55, c_first_name#40, c_last_name#41] +Input [6]: [ws_bill_customer_sk#53, ws_quantity#54, ws_list_price#55, c_customer_sk#28, c_first_name#40, c_last_name#41] -(126) HashAggregate [codegen id : 55] -Input [4]: [ws_quantity#56, ws_list_price#57, c_first_name#41, c_last_name#42] -Keys [2]: [c_last_name#42, c_first_name#41] -Functions [1]: [partial_sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#56 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#57 as decimal(12,2)))), DecimalType(18,2), true))] -Aggregate Attributes [2]: [sum#68, isEmpty#69] -Results [4]: [c_last_name#42, c_first_name#41, sum#70, isEmpty#71] +(132) HashAggregate [codegen id : 51] +Input [4]: [ws_quantity#54, ws_list_price#55, c_first_name#40, c_last_name#41] +Keys [2]: [c_last_name#41, c_first_name#40] +Functions [1]: [partial_sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#54 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#55 as decimal(12,2)))), DecimalType(18,2), true))] +Aggregate Attributes [2]: [sum#70, isEmpty#71] +Results [4]: [c_last_name#41, c_first_name#40, sum#72, isEmpty#73] -(127) Exchange -Input [4]: [c_last_name#42, c_first_name#41, sum#70, isEmpty#71] -Arguments: hashpartitioning(c_last_name#42, c_first_name#41, 5), true, [id=#72] +(133) Exchange +Input [4]: [c_last_name#41, c_first_name#40, sum#72, isEmpty#73] +Arguments: hashpartitioning(c_last_name#41, c_first_name#40, 5), true, [id=#74] -(128) HashAggregate [codegen id : 56] -Input [4]: [c_last_name#42, c_first_name#41, sum#70, isEmpty#71] -Keys [2]: [c_last_name#42, c_first_name#41] -Functions [1]: [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#56 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#57 as decimal(12,2)))), DecimalType(18,2), true))] -Aggregate Attributes [1]: [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#56 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#57 as decimal(12,2)))), DecimalType(18,2), true))#73] -Results [3]: [c_last_name#42, c_first_name#41, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#56 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#57 as decimal(12,2)))), DecimalType(18,2), true))#73 AS sales#74] +(134) HashAggregate [codegen id : 52] +Input [4]: [c_last_name#41, c_first_name#40, sum#72, isEmpty#73] +Keys [2]: [c_last_name#41, c_first_name#40] +Functions [1]: [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#54 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#55 as decimal(12,2)))), DecimalType(18,2), true))] +Aggregate Attributes [1]: [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#54 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#55 as decimal(12,2)))), DecimalType(18,2), true))#75] +Results [3]: [c_last_name#41, c_first_name#40, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#54 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#55 as decimal(12,2)))), DecimalType(18,2), true))#75 AS sales#76] -(129) Union +(135) Union -(130) TakeOrderedAndProject -Input [3]: [c_last_name#42, c_first_name#41, sales#52] -Arguments: 100, [c_last_name#42 ASC NULLS FIRST, c_first_name#41 ASC NULLS FIRST, sales#52 ASC NULLS FIRST], [c_last_name#42, c_first_name#41, sales#52] +(136) TakeOrderedAndProject +Input [3]: [c_last_name#41, c_first_name#40, sales#50] +Arguments: 100, [c_last_name#41 ASC NULLS FIRST, c_first_name#40 ASC NULLS FIRST, sales#50 ASC NULLS FIRST], [c_last_name#41, c_first_name#40, sales#50] ===== Subqueries ===== -Subquery:1 Hosting operator id = 49 Hosting Expression = Subquery scalar-subquery#37, [id=#38] -* HashAggregate (154) -+- Exchange (153) - +- * HashAggregate (152) - +- * HashAggregate (151) - +- * HashAggregate (150) - +- * Project (149) - +- * SortMergeJoin Inner (148) - :- * Sort (142) - : +- Exchange (141) - : +- * Project (140) - : +- * BroadcastHashJoin Inner BuildRight (139) - : :- * Filter (133) - : : +- * ColumnarToRow (132) - : : +- Scan parquet default.store_sales (131) - : +- BroadcastExchange (138) - : +- * Project (137) - : +- * Filter (136) - : +- * ColumnarToRow (135) - : +- Scan parquet default.date_dim (134) - +- * Sort (147) - +- Exchange (146) - +- * Filter (145) - +- * ColumnarToRow (144) - +- Scan parquet default.customer (143) - - -(131) Scan parquet default.store_sales -Output [4]: [ss_sold_date_sk#7, ss_customer_sk#25, ss_quantity#26, ss_sales_price#27] +Subquery:1 Hosting operator id = 48 Hosting Expression = Subquery scalar-subquery#36, [id=#37] +* HashAggregate (160) ++- Exchange (159) + +- * HashAggregate (158) + +- * HashAggregate (157) + +- * HashAggregate (156) + +- * Project (155) + +- * SortMergeJoin Inner (154) + :- * Sort (148) + : +- Exchange (147) + : +- * Project (146) + : +- * BroadcastHashJoin Inner BuildRight (145) + : :- * Filter (139) + : : +- * ColumnarToRow (138) + : : +- Scan parquet default.store_sales (137) + : +- BroadcastExchange (144) + : +- * Project (143) + : +- * Filter (142) + : +- * ColumnarToRow (141) + : +- Scan parquet default.date_dim (140) + +- * Sort (153) + +- Exchange (152) + +- * Filter (151) + +- * ColumnarToRow (150) + +- Scan parquet default.customer (149) + + +(137) Scan parquet default.store_sales +Output [4]: [ss_sold_date_sk#7, ss_customer_sk#24, ss_quantity#25, ss_sales_price#26] Batched: true Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_customer_sk), IsNotNull(ss_sold_date_sk)] ReadSchema: struct -(132) ColumnarToRow [codegen id : 2] -Input [4]: [ss_sold_date_sk#7, ss_customer_sk#25, ss_quantity#26, ss_sales_price#27] +(138) ColumnarToRow [codegen id : 2] +Input [4]: [ss_sold_date_sk#7, ss_customer_sk#24, ss_quantity#25, ss_sales_price#26] -(133) Filter [codegen id : 2] -Input [4]: [ss_sold_date_sk#7, ss_customer_sk#25, ss_quantity#26, ss_sales_price#27] -Condition : (isnotnull(ss_customer_sk#25) AND isnotnull(ss_sold_date_sk#7)) +(139) Filter [codegen id : 2] +Input [4]: [ss_sold_date_sk#7, ss_customer_sk#24, ss_quantity#25, ss_sales_price#26] +Condition : (isnotnull(ss_customer_sk#24) AND isnotnull(ss_sold_date_sk#7)) -(134) Scan parquet default.date_dim +(140) Scan parquet default.date_dim Output [2]: [d_date_sk#9, d_year#11] Batched: true Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [In(d_year, [2000,2001,2002,2003]), IsNotNull(d_date_sk)] ReadSchema: struct -(135) ColumnarToRow [codegen id : 1] +(141) ColumnarToRow [codegen id : 1] Input [2]: [d_date_sk#9, d_year#11] -(136) Filter [codegen id : 1] +(142) Filter [codegen id : 1] Input [2]: [d_date_sk#9, d_year#11] Condition : (d_year#11 IN (2000,2001,2002,2003) AND isnotnull(d_date_sk#9)) -(137) Project [codegen id : 1] +(143) Project [codegen id : 1] Output [1]: [d_date_sk#9] Input [2]: [d_date_sk#9, d_year#11] -(138) BroadcastExchange +(144) BroadcastExchange Input [1]: [d_date_sk#9] -Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#75] +Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#77] -(139) BroadcastHashJoin [codegen id : 2] +(145) BroadcastHashJoin [codegen id : 2] Left keys [1]: [ss_sold_date_sk#7] Right keys [1]: [d_date_sk#9] Join condition: None -(140) Project [codegen id : 2] -Output [3]: [ss_customer_sk#25, ss_quantity#26, ss_sales_price#27] -Input [5]: [ss_sold_date_sk#7, ss_customer_sk#25, ss_quantity#26, ss_sales_price#27, d_date_sk#9] +(146) Project [codegen id : 2] +Output [3]: [ss_customer_sk#24, ss_quantity#25, ss_sales_price#26] +Input [5]: [ss_sold_date_sk#7, ss_customer_sk#24, ss_quantity#25, ss_sales_price#26, d_date_sk#9] -(141) Exchange -Input [3]: [ss_customer_sk#25, ss_quantity#26, ss_sales_price#27] -Arguments: hashpartitioning(ss_customer_sk#25, 5), true, [id=#76] +(147) Exchange +Input [3]: [ss_customer_sk#24, ss_quantity#25, ss_sales_price#26] +Arguments: hashpartitioning(ss_customer_sk#24, 5), true, [id=#78] -(142) Sort [codegen id : 3] -Input [3]: [ss_customer_sk#25, ss_quantity#26, ss_sales_price#27] -Arguments: [ss_customer_sk#25 ASC NULLS FIRST], false, 0 +(148) Sort [codegen id : 3] +Input [3]: [ss_customer_sk#24, ss_quantity#25, ss_sales_price#26] +Arguments: [ss_customer_sk#24 ASC NULLS FIRST], false, 0 -(143) Scan parquet default.customer -Output [1]: [c_customer_sk#29] +(149) Scan parquet default.customer +Output [1]: [c_customer_sk#28] Batched: true Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk)] ReadSchema: struct -(144) ColumnarToRow [codegen id : 4] -Input [1]: [c_customer_sk#29] +(150) ColumnarToRow [codegen id : 4] +Input [1]: [c_customer_sk#28] -(145) Filter [codegen id : 4] -Input [1]: [c_customer_sk#29] -Condition : isnotnull(c_customer_sk#29) +(151) Filter [codegen id : 4] +Input [1]: [c_customer_sk#28] +Condition : isnotnull(c_customer_sk#28) -(146) Exchange -Input [1]: [c_customer_sk#29] -Arguments: hashpartitioning(c_customer_sk#29, 5), true, [id=#77] +(152) Exchange +Input [1]: [c_customer_sk#28] +Arguments: hashpartitioning(c_customer_sk#28, 5), true, [id=#79] -(147) Sort [codegen id : 5] -Input [1]: [c_customer_sk#29] -Arguments: [c_customer_sk#29 ASC NULLS FIRST], false, 0 +(153) Sort [codegen id : 5] +Input [1]: [c_customer_sk#28] +Arguments: [c_customer_sk#28 ASC NULLS FIRST], false, 0 -(148) SortMergeJoin [codegen id : 6] -Left keys [1]: [ss_customer_sk#25] -Right keys [1]: [c_customer_sk#29] +(154) SortMergeJoin [codegen id : 6] +Left keys [1]: [ss_customer_sk#24] +Right keys [1]: [c_customer_sk#28] Join condition: None -(149) Project [codegen id : 6] -Output [3]: [ss_quantity#26, ss_sales_price#27, c_customer_sk#29] -Input [4]: [ss_customer_sk#25, ss_quantity#26, ss_sales_price#27, c_customer_sk#29] - -(150) HashAggregate [codegen id : 6] -Input [3]: [ss_quantity#26, ss_sales_price#27, c_customer_sk#29] -Keys [1]: [c_customer_sk#29] -Functions [1]: [partial_sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#26 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#27 as decimal(12,2)))), DecimalType(18,2), true))] -Aggregate Attributes [2]: [sum#78, isEmpty#79] -Results [3]: [c_customer_sk#29, sum#80, isEmpty#81] - -(151) HashAggregate [codegen id : 6] -Input [3]: [c_customer_sk#29, sum#80, isEmpty#81] -Keys [1]: [c_customer_sk#29] -Functions [1]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#26 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#27 as decimal(12,2)))), DecimalType(18,2), true))] -Aggregate Attributes [1]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#26 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#27 as decimal(12,2)))), DecimalType(18,2), true))#82] -Results [1]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#26 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#27 as decimal(12,2)))), DecimalType(18,2), true))#82 AS csales#83] - -(152) HashAggregate [codegen id : 6] -Input [1]: [csales#83] +(155) Project [codegen id : 6] +Output [3]: [ss_quantity#25, ss_sales_price#26, c_customer_sk#28] +Input [4]: [ss_customer_sk#24, ss_quantity#25, ss_sales_price#26, c_customer_sk#28] + +(156) HashAggregate [codegen id : 6] +Input [3]: [ss_quantity#25, ss_sales_price#26, c_customer_sk#28] +Keys [1]: [c_customer_sk#28] +Functions [1]: [partial_sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#25 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#26 as decimal(12,2)))), DecimalType(18,2), true))] +Aggregate Attributes [2]: [sum#80, isEmpty#81] +Results [3]: [c_customer_sk#28, sum#82, isEmpty#83] + +(157) HashAggregate [codegen id : 6] +Input [3]: [c_customer_sk#28, sum#82, isEmpty#83] +Keys [1]: [c_customer_sk#28] +Functions [1]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#25 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#26 as decimal(12,2)))), DecimalType(18,2), true))] +Aggregate Attributes [1]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#25 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#26 as decimal(12,2)))), DecimalType(18,2), true))#84] +Results [1]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#25 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price#26 as decimal(12,2)))), DecimalType(18,2), true))#84 AS csales#85] + +(158) HashAggregate [codegen id : 6] +Input [1]: [csales#85] Keys: [] -Functions [1]: [partial_max(csales#83)] -Aggregate Attributes [1]: [max#84] -Results [1]: [max#85] +Functions [1]: [partial_max(csales#85)] +Aggregate Attributes [1]: [max#86] +Results [1]: [max#87] -(153) Exchange -Input [1]: [max#85] -Arguments: SinglePartition, true, [id=#86] +(159) Exchange +Input [1]: [max#87] +Arguments: SinglePartition, true, [id=#88] -(154) HashAggregate [codegen id : 7] -Input [1]: [max#85] +(160) HashAggregate [codegen id : 7] +Input [1]: [max#87] Keys: [] -Functions [1]: [max(csales#83)] -Aggregate Attributes [1]: [max(csales#83)#87] -Results [1]: [max(csales#83)#87 AS tpcds_cmax#88] +Functions [1]: [max(csales#85)] +Aggregate Attributes [1]: [max(csales#85)#89] +Results [1]: [max(csales#85)#89 AS tpcds_cmax#90] -Subquery:2 Hosting operator id = 73 Hosting Expression = ReusedSubquery Subquery scalar-subquery#37, [id=#38] +Subquery:2 Hosting operator id = 72 Hosting Expression = ReusedSubquery Subquery scalar-subquery#36, [id=#37] -Subquery:3 Hosting operator id = 102 Hosting Expression = ReusedSubquery Subquery scalar-subquery#37, [id=#38] +Subquery:3 Hosting operator id = 109 Hosting Expression = ReusedSubquery Subquery scalar-subquery#36, [id=#37] -Subquery:4 Hosting operator id = 119 Hosting Expression = ReusedSubquery Subquery scalar-subquery#37, [id=#38] +Subquery:4 Hosting operator id = 126 Hosting Expression = ReusedSubquery Subquery scalar-subquery#36, [id=#37] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q23b.sf100/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q23b.sf100/simplified.txt index e8891f032a091..4279bf3e16a82 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q23b.sf100/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q23b.sf100/simplified.txt @@ -1,24 +1,24 @@ TakeOrderedAndProject [c_last_name,c_first_name,sales] Union - WholeStageCodegen (28) + WholeStageCodegen (26) HashAggregate [c_last_name,c_first_name,sum,isEmpty] [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price as decimal(12,2)))), DecimalType(18,2), true)),sales,sum,isEmpty] InputAdapter Exchange [c_last_name,c_first_name] #1 - WholeStageCodegen (27) + WholeStageCodegen (25) HashAggregate [c_last_name,c_first_name,cs_quantity,cs_list_price] [sum,isEmpty,sum,isEmpty] Project [cs_quantity,cs_list_price,c_first_name,c_last_name] SortMergeJoin [cs_bill_customer_sk,c_customer_sk] InputAdapter - WholeStageCodegen (18) + WholeStageCodegen (17) Project [cs_bill_customer_sk,cs_quantity,cs_list_price] BroadcastHashJoin [cs_sold_date_sk,d_date_sk] InputAdapter SortMergeJoin [cs_bill_customer_sk,c_customer_sk] - WholeStageCodegen (11) + WholeStageCodegen (10) Sort [cs_bill_customer_sk] InputAdapter Exchange [cs_bill_customer_sk] #2 - WholeStageCodegen (10) + WholeStageCodegen (9) Project [cs_sold_date_sk,cs_bill_customer_sk,cs_quantity,cs_list_price] InputAdapter SortMergeJoin [cs_item_sk,item_sk] @@ -31,48 +31,45 @@ TakeOrderedAndProject [c_last_name,c_first_name,sales] ColumnarToRow InputAdapter Scan parquet default.catalog_sales [cs_sold_date_sk,cs_bill_customer_sk,cs_item_sk,cs_quantity,cs_list_price] - WholeStageCodegen (9) + WholeStageCodegen (8) Sort [item_sk] - InputAdapter - Exchange [item_sk] #4 - WholeStageCodegen (8) - Project [item_sk] - Filter [count(1)] - HashAggregate [substr(i_item_desc, 1, 30),i_item_sk,d_date,count] [count(1),item_sk,count(1),count] - HashAggregate [i_item_desc,i_item_sk,d_date] [count,substr(i_item_desc, 1, 30),count] - Project [d_date,i_item_sk,i_item_desc] - SortMergeJoin [ss_item_sk,i_item_sk] + Project [item_sk] + Filter [count(1)] + HashAggregate [substr(i_item_desc, 1, 30),i_item_sk,d_date,count] [count(1),item_sk,count(1),count] + HashAggregate [i_item_desc,i_item_sk,d_date] [count,substr(i_item_desc, 1, 30),count] + Project [d_date,i_item_sk,i_item_desc] + SortMergeJoin [ss_item_sk,i_item_sk] + InputAdapter + WholeStageCodegen (5) + Sort [ss_item_sk] InputAdapter - WholeStageCodegen (5) - Sort [ss_item_sk] - InputAdapter - Exchange [ss_item_sk] #5 - WholeStageCodegen (4) - Project [ss_item_sk,d_date] - BroadcastHashJoin [ss_sold_date_sk,d_date_sk] - Filter [ss_sold_date_sk,ss_item_sk] - ColumnarToRow - InputAdapter - Scan parquet default.store_sales [ss_sold_date_sk,ss_item_sk] - InputAdapter - BroadcastExchange #6 - WholeStageCodegen (3) - Project [d_date_sk,d_date] - Filter [d_year,d_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.date_dim [d_date_sk,d_date,d_year] + Exchange [ss_item_sk] #4 + WholeStageCodegen (4) + Project [ss_item_sk,d_date] + BroadcastHashJoin [ss_sold_date_sk,d_date_sk] + Filter [ss_sold_date_sk,ss_item_sk] + ColumnarToRow + InputAdapter + Scan parquet default.store_sales [ss_sold_date_sk,ss_item_sk] + InputAdapter + BroadcastExchange #5 + WholeStageCodegen (3) + Project [d_date_sk,d_date] + Filter [d_year,d_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.date_dim [d_date_sk,d_date,d_year] + InputAdapter + WholeStageCodegen (7) + Sort [i_item_sk] InputAdapter - WholeStageCodegen (7) - Sort [i_item_sk] - InputAdapter - Exchange [i_item_sk] #7 - WholeStageCodegen (6) - Filter [i_item_sk] - ColumnarToRow - InputAdapter - Scan parquet default.item [i_item_sk,i_item_desc] - WholeStageCodegen (16) + Exchange [i_item_sk] #6 + WholeStageCodegen (6) + Filter [i_item_sk] + ColumnarToRow + InputAdapter + Scan parquet default.item [i_item_sk,i_item_desc] + WholeStageCodegen (15) Sort [c_customer_sk] Project [c_customer_sk] Filter [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price as decimal(12,2)))), DecimalType(18,2), true))] @@ -80,7 +77,7 @@ TakeOrderedAndProject [c_last_name,c_first_name,sales] WholeStageCodegen (7) HashAggregate [max] [max(csales),tpcds_cmax,max] InputAdapter - Exchange #10 + Exchange #9 WholeStageCodegen (6) HashAggregate [csales] [max,max] HashAggregate [c_customer_sk,sum,isEmpty] [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price as decimal(12,2)))), DecimalType(18,2), true)),csales,sum,isEmpty] @@ -91,7 +88,7 @@ TakeOrderedAndProject [c_last_name,c_first_name,sales] WholeStageCodegen (3) Sort [ss_customer_sk] InputAdapter - Exchange [ss_customer_sk] #11 + Exchange [ss_customer_sk] #10 WholeStageCodegen (2) Project [ss_customer_sk,ss_quantity,ss_sales_price] BroadcastHashJoin [ss_sold_date_sk,d_date_sk] @@ -100,7 +97,7 @@ TakeOrderedAndProject [c_last_name,c_first_name,sales] InputAdapter Scan parquet default.store_sales [ss_sold_date_sk,ss_customer_sk,ss_quantity,ss_sales_price] InputAdapter - BroadcastExchange #12 + BroadcastExchange #11 WholeStageCodegen (1) Project [d_date_sk] Filter [d_year,d_date_sk] @@ -111,7 +108,7 @@ TakeOrderedAndProject [c_last_name,c_first_name,sales] WholeStageCodegen (5) Sort [c_customer_sk] InputAdapter - Exchange [c_customer_sk] #13 + Exchange [c_customer_sk] #12 WholeStageCodegen (4) Filter [c_customer_sk] ColumnarToRow @@ -122,28 +119,28 @@ TakeOrderedAndProject [c_last_name,c_first_name,sales] Project [ss_quantity,ss_sales_price,c_customer_sk] SortMergeJoin [ss_customer_sk,c_customer_sk] InputAdapter - WholeStageCodegen (13) + WholeStageCodegen (12) Sort [ss_customer_sk] InputAdapter - Exchange [ss_customer_sk] #8 - WholeStageCodegen (12) + Exchange [ss_customer_sk] #7 + WholeStageCodegen (11) Filter [ss_customer_sk] ColumnarToRow InputAdapter Scan parquet default.store_sales [ss_customer_sk,ss_quantity,ss_sales_price] InputAdapter - WholeStageCodegen (15) + WholeStageCodegen (14) Sort [c_customer_sk] InputAdapter - Exchange [c_customer_sk] #9 - WholeStageCodegen (14) + Exchange [c_customer_sk] #8 + WholeStageCodegen (13) Filter [c_customer_sk] ColumnarToRow InputAdapter Scan parquet default.customer [c_customer_sk] InputAdapter - BroadcastExchange #14 - WholeStageCodegen (17) + BroadcastExchange #13 + WholeStageCodegen (16) Project [d_date_sk] Filter [d_year,d_moy,d_date_sk] ColumnarToRow @@ -151,73 +148,84 @@ TakeOrderedAndProject [c_last_name,c_first_name,sales] Scan parquet default.date_dim [d_date_sk,d_year,d_moy] InputAdapter SortMergeJoin [c_customer_sk,c_customer_sk] - WholeStageCodegen (20) + WholeStageCodegen (19) Sort [c_customer_sk] InputAdapter - Exchange [c_customer_sk] #15 - WholeStageCodegen (19) + Exchange [c_customer_sk] #14 + WholeStageCodegen (18) Filter [c_customer_sk] ColumnarToRow InputAdapter Scan parquet default.customer [c_customer_sk,c_first_name,c_last_name] - WholeStageCodegen (26) + WholeStageCodegen (24) Sort [c_customer_sk] - InputAdapter - Exchange [c_customer_sk] #16 - WholeStageCodegen (25) - Project [c_customer_sk] - Filter [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price as decimal(12,2)))), DecimalType(18,2), true))] - ReusedSubquery [tpcds_cmax] #1 - HashAggregate [c_customer_sk,sum,isEmpty] [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price as decimal(12,2)))), DecimalType(18,2), true)),sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty] - HashAggregate [c_customer_sk,ss_quantity,ss_sales_price] [sum,isEmpty,sum,isEmpty] - Project [ss_quantity,ss_sales_price,c_customer_sk] - SortMergeJoin [ss_customer_sk,c_customer_sk] + Project [c_customer_sk] + Filter [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price as decimal(12,2)))), DecimalType(18,2), true))] + ReusedSubquery [tpcds_cmax] #1 + HashAggregate [c_customer_sk,sum,isEmpty] [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price as decimal(12,2)))), DecimalType(18,2), true)),sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty] + HashAggregate [c_customer_sk,ss_quantity,ss_sales_price] [sum,isEmpty,sum,isEmpty] + Project [ss_quantity,ss_sales_price,c_customer_sk] + SortMergeJoin [ss_customer_sk,c_customer_sk] + InputAdapter + WholeStageCodegen (21) + Sort [ss_customer_sk] InputAdapter - WholeStageCodegen (22) - Sort [ss_customer_sk] - InputAdapter - ReusedExchange [ss_customer_sk,ss_quantity,ss_sales_price] #8 + ReusedExchange [ss_customer_sk,ss_quantity,ss_sales_price] #7 + InputAdapter + WholeStageCodegen (23) + Sort [c_customer_sk] InputAdapter - WholeStageCodegen (24) - Sort [c_customer_sk] - InputAdapter - ReusedExchange [c_customer_sk] #9 - WholeStageCodegen (56) + ReusedExchange [c_customer_sk] #8 + WholeStageCodegen (52) HashAggregate [c_last_name,c_first_name,sum,isEmpty] [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price as decimal(12,2)))), DecimalType(18,2), true)),sales,sum,isEmpty] InputAdapter - Exchange [c_last_name,c_first_name] #17 - WholeStageCodegen (55) + Exchange [c_last_name,c_first_name] #15 + WholeStageCodegen (51) HashAggregate [c_last_name,c_first_name,ws_quantity,ws_list_price] [sum,isEmpty,sum,isEmpty] Project [ws_quantity,ws_list_price,c_first_name,c_last_name] SortMergeJoin [ws_bill_customer_sk,c_customer_sk] InputAdapter - WholeStageCodegen (46) + WholeStageCodegen (43) Project [ws_bill_customer_sk,ws_quantity,ws_list_price] BroadcastHashJoin [ws_sold_date_sk,d_date_sk] InputAdapter SortMergeJoin [ws_bill_customer_sk,c_customer_sk] - WholeStageCodegen (39) + WholeStageCodegen (36) Sort [ws_bill_customer_sk] InputAdapter - Exchange [ws_bill_customer_sk] #18 - WholeStageCodegen (38) + Exchange [ws_bill_customer_sk] #16 + WholeStageCodegen (35) Project [ws_sold_date_sk,ws_bill_customer_sk,ws_quantity,ws_list_price] InputAdapter SortMergeJoin [ws_item_sk,item_sk] - WholeStageCodegen (30) + WholeStageCodegen (28) Sort [ws_item_sk] InputAdapter - Exchange [ws_item_sk] #19 - WholeStageCodegen (29) + Exchange [ws_item_sk] #17 + WholeStageCodegen (27) Filter [ws_bill_customer_sk,ws_sold_date_sk] ColumnarToRow InputAdapter Scan parquet default.web_sales [ws_sold_date_sk,ws_item_sk,ws_bill_customer_sk,ws_quantity,ws_list_price] - WholeStageCodegen (37) + WholeStageCodegen (34) Sort [item_sk] - InputAdapter - ReusedExchange [item_sk] #4 - WholeStageCodegen (44) + Project [item_sk] + Filter [count(1)] + HashAggregate [substr(i_item_desc, 1, 30),i_item_sk,d_date,count] [count(1),item_sk,count(1),count] + HashAggregate [i_item_desc,i_item_sk,d_date] [count,substr(i_item_desc, 1, 30),count] + Project [d_date,i_item_sk,i_item_desc] + SortMergeJoin [ss_item_sk,i_item_sk] + InputAdapter + WholeStageCodegen (31) + Sort [ss_item_sk] + InputAdapter + ReusedExchange [ss_item_sk,d_date] #4 + InputAdapter + WholeStageCodegen (33) + Sort [i_item_sk] + InputAdapter + ReusedExchange [i_item_sk,i_item_desc] #6 + WholeStageCodegen (41) Sort [c_customer_sk] Project [c_customer_sk] Filter [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price as decimal(12,2)))), DecimalType(18,2), true))] @@ -227,42 +235,39 @@ TakeOrderedAndProject [c_last_name,c_first_name,sales] Project [ss_quantity,ss_sales_price,c_customer_sk] SortMergeJoin [ss_customer_sk,c_customer_sk] InputAdapter - WholeStageCodegen (41) + WholeStageCodegen (38) Sort [ss_customer_sk] InputAdapter - ReusedExchange [ss_customer_sk,ss_quantity,ss_sales_price] #8 + ReusedExchange [ss_customer_sk,ss_quantity,ss_sales_price] #7 InputAdapter - WholeStageCodegen (43) + WholeStageCodegen (40) Sort [c_customer_sk] InputAdapter - ReusedExchange [c_customer_sk] #9 + ReusedExchange [c_customer_sk] #8 InputAdapter - ReusedExchange [d_date_sk] #14 + ReusedExchange [d_date_sk] #13 InputAdapter SortMergeJoin [c_customer_sk,c_customer_sk] - WholeStageCodegen (48) + WholeStageCodegen (45) Sort [c_customer_sk] InputAdapter - ReusedExchange [c_customer_sk,c_first_name,c_last_name] #15 - WholeStageCodegen (54) + ReusedExchange [c_customer_sk,c_first_name,c_last_name] #14 + WholeStageCodegen (50) Sort [c_customer_sk] - InputAdapter - Exchange [c_customer_sk] #20 - WholeStageCodegen (53) - Project [c_customer_sk] - Filter [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price as decimal(12,2)))), DecimalType(18,2), true))] - ReusedSubquery [tpcds_cmax] #1 - HashAggregate [c_customer_sk,sum,isEmpty] [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price as decimal(12,2)))), DecimalType(18,2), true)),sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty] - HashAggregate [c_customer_sk,ss_quantity,ss_sales_price] [sum,isEmpty,sum,isEmpty] - Project [ss_quantity,ss_sales_price,c_customer_sk] - SortMergeJoin [ss_customer_sk,c_customer_sk] + Project [c_customer_sk] + Filter [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price as decimal(12,2)))), DecimalType(18,2), true))] + ReusedSubquery [tpcds_cmax] #1 + HashAggregate [c_customer_sk,sum,isEmpty] [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price as decimal(12,2)))), DecimalType(18,2), true)),sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_sales_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty] + HashAggregate [c_customer_sk,ss_quantity,ss_sales_price] [sum,isEmpty,sum,isEmpty] + Project [ss_quantity,ss_sales_price,c_customer_sk] + SortMergeJoin [ss_customer_sk,c_customer_sk] + InputAdapter + WholeStageCodegen (47) + Sort [ss_customer_sk] InputAdapter - WholeStageCodegen (50) - Sort [ss_customer_sk] - InputAdapter - ReusedExchange [ss_customer_sk,ss_quantity,ss_sales_price] #8 + ReusedExchange [ss_customer_sk,ss_quantity,ss_sales_price] #7 + InputAdapter + WholeStageCodegen (49) + Sort [c_customer_sk] InputAdapter - WholeStageCodegen (52) - Sort [c_customer_sk] - InputAdapter - ReusedExchange [c_customer_sk] #9 + ReusedExchange [c_customer_sk] #8 diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q24a.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q24a.sf100/explain.txt index 6e0a5ced1992a..093c4eed6cf11 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q24a.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q24a.sf100/explain.txt @@ -92,7 +92,7 @@ Input [11]: [ss_item_sk#1, ss_customer_sk#2, ss_store_sk#3, ss_ticket_number#4, (10) Exchange Input [10]: [ss_item_sk#1, ss_customer_sk#2, ss_store_sk#3, ss_ticket_number#4, ss_net_paid#5, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11] -Arguments: hashpartitioning(ss_customer_sk#2, 5), true, [id=#13] +Arguments: hashpartitioning(ss_customer_sk#2, 5), ENSURE_REQUIREMENTS, [id=#13] (11) Sort [codegen id : 3] Input [10]: [ss_item_sk#1, ss_customer_sk#2, ss_store_sk#3, ss_ticket_number#4, ss_net_paid#5, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11] @@ -114,7 +114,7 @@ Condition : (isnotnull(c_customer_sk#14) AND isnotnull(c_birth_country#17)) (15) Exchange Input [4]: [c_customer_sk#14, c_first_name#15, c_last_name#16, c_birth_country#17] -Arguments: hashpartitioning(c_customer_sk#14, 5), true, [id=#18] +Arguments: hashpartitioning(c_customer_sk#14, 5), ENSURE_REQUIREMENTS, [id=#18] (16) Sort [codegen id : 5] Input [4]: [c_customer_sk#14, c_first_name#15, c_last_name#16, c_birth_country#17] @@ -189,7 +189,7 @@ Input [17]: [ss_item_sk#1, ss_store_sk#3, ss_ticket_number#4, ss_net_paid#5, i_c (32) Exchange Input [13]: [ss_item_sk#1, ss_ticket_number#4, ss_net_paid#5, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11, c_first_name#15, c_last_name#16, s_store_name#20, s_state#22, ca_state#25] -Arguments: hashpartitioning(cast(ss_ticket_number#4 as bigint), cast(ss_item_sk#1 as bigint), 5), true, [id=#29] +Arguments: hashpartitioning(cast(ss_ticket_number#4 as bigint), cast(ss_item_sk#1 as bigint), 5), ENSURE_REQUIREMENTS, [id=#29] (33) Sort [codegen id : 9] Input [13]: [ss_item_sk#1, ss_ticket_number#4, ss_net_paid#5, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11, c_first_name#15, c_last_name#16, s_store_name#20, s_state#22, ca_state#25] @@ -211,7 +211,7 @@ Condition : (isnotnull(sr_ticket_number#31) AND isnotnull(sr_item_sk#30)) (37) Exchange Input [2]: [sr_item_sk#30, sr_ticket_number#31] -Arguments: hashpartitioning(sr_ticket_number#31, sr_item_sk#30, 5), true, [id=#32] +Arguments: hashpartitioning(sr_ticket_number#31, sr_item_sk#30, 5), ENSURE_REQUIREMENTS, [id=#32] (38) Sort [codegen id : 11] Input [2]: [sr_item_sk#30, sr_ticket_number#31] @@ -235,7 +235,7 @@ Results [11]: [c_last_name#16, c_first_name#15, s_store_name#20, ca_state#25, s_ (42) Exchange Input [11]: [c_last_name#16, c_first_name#15, s_store_name#20, ca_state#25, s_state#22, i_color#9, i_current_price#7, i_manager_id#11, i_units#10, i_size#8, sum#34] -Arguments: hashpartitioning(c_last_name#16, c_first_name#15, s_store_name#20, ca_state#25, s_state#22, i_color#9, i_current_price#7, i_manager_id#11, i_units#10, i_size#8, 5), true, [id=#35] +Arguments: hashpartitioning(c_last_name#16, c_first_name#15, s_store_name#20, ca_state#25, s_state#22, i_color#9, i_current_price#7, i_manager_id#11, i_units#10, i_size#8, 5), ENSURE_REQUIREMENTS, [id=#35] (43) HashAggregate [codegen id : 13] Input [11]: [c_last_name#16, c_first_name#15, s_store_name#20, ca_state#25, s_state#22, i_color#9, i_current_price#7, i_manager_id#11, i_units#10, i_size#8, sum#34] @@ -253,7 +253,7 @@ Results [5]: [c_last_name#16, c_first_name#15, s_store_name#20, sum#40, isEmpty# (45) Exchange Input [5]: [c_last_name#16, c_first_name#15, s_store_name#20, sum#40, isEmpty#41] -Arguments: hashpartitioning(c_last_name#16, c_first_name#15, s_store_name#20, 5), true, [id=#42] +Arguments: hashpartitioning(c_last_name#16, c_first_name#15, s_store_name#20, 5), ENSURE_REQUIREMENTS, [id=#42] (46) HashAggregate [codegen id : 14] Input [5]: [c_last_name#16, c_first_name#15, s_store_name#20, sum#40, isEmpty#41] @@ -296,25 +296,25 @@ Subquery:1 Hosting operator id = 47 Hosting Expression = Subquery scalar-subquer : : : :- * Sort (60) : : : : +- Exchange (59) : : : : +- * Project (58) - : : : : +- * BroadcastHashJoin Inner BuildLeft (57) - : : : : :- BroadcastExchange (53) - : : : : : +- * Project (52) - : : : : : +- * Filter (51) - : : : : : +- * ColumnarToRow (50) - : : : : : +- Scan parquet default.store (49) - : : : : +- * Filter (56) - : : : : +- * ColumnarToRow (55) - : : : : +- Scan parquet default.store_sales (54) + : : : : +- * BroadcastHashJoin Inner BuildRight (57) + : : : : :- * Filter (51) + : : : : : +- * ColumnarToRow (50) + : : : : : +- Scan parquet default.store_sales (49) + : : : : +- BroadcastExchange (56) + : : : : +- * Project (55) + : : : : +- * Filter (54) + : : : : +- * ColumnarToRow (53) + : : : : +- Scan parquet default.store (52) : : : +- * Sort (65) : : : +- Exchange (64) : : : +- * Filter (63) : : : +- * ColumnarToRow (62) - : : : +- Scan parquet default.item (61) + : : : +- Scan parquet default.customer (61) : : +- * Sort (74) : : +- Exchange (73) : : +- * Filter (72) : : +- * ColumnarToRow (71) - : : +- Scan parquet default.customer (70) + : : +- Scan parquet default.item (70) : +- * Sort (83) : +- Exchange (82) : +- * Filter (81) @@ -327,135 +327,135 @@ Subquery:1 Hosting operator id = 47 Hosting Expression = Subquery scalar-subquer +- Scan parquet default.store_returns (88) -(49) Scan parquet default.store +(49) Scan parquet default.store_sales +Output [5]: [ss_item_sk#1, ss_customer_sk#2, ss_store_sk#3, ss_ticket_number#4, ss_net_paid#5] +Batched: true +Location [not included in comparison]/{warehouse_dir}/store_sales] +PushedFilters: [IsNotNull(ss_ticket_number), IsNotNull(ss_item_sk), IsNotNull(ss_store_sk), IsNotNull(ss_customer_sk)] +ReadSchema: struct + +(50) ColumnarToRow [codegen id : 2] +Input [5]: [ss_item_sk#1, ss_customer_sk#2, ss_store_sk#3, ss_ticket_number#4, ss_net_paid#5] + +(51) Filter [codegen id : 2] +Input [5]: [ss_item_sk#1, ss_customer_sk#2, ss_store_sk#3, ss_ticket_number#4, ss_net_paid#5] +Condition : (((isnotnull(ss_ticket_number#4) AND isnotnull(ss_item_sk#1)) AND isnotnull(ss_store_sk#3)) AND isnotnull(ss_customer_sk#2)) + +(52) Scan parquet default.store Output [5]: [s_store_sk#19, s_store_name#20, s_market_id#21, s_state#22, s_zip#23] Batched: true Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_market_id), EqualTo(s_market_id,8), IsNotNull(s_store_sk), IsNotNull(s_zip)] ReadSchema: struct -(50) ColumnarToRow [codegen id : 1] +(53) ColumnarToRow [codegen id : 1] Input [5]: [s_store_sk#19, s_store_name#20, s_market_id#21, s_state#22, s_zip#23] -(51) Filter [codegen id : 1] +(54) Filter [codegen id : 1] Input [5]: [s_store_sk#19, s_store_name#20, s_market_id#21, s_state#22, s_zip#23] Condition : (((isnotnull(s_market_id#21) AND (s_market_id#21 = 8)) AND isnotnull(s_store_sk#19)) AND isnotnull(s_zip#23)) -(52) Project [codegen id : 1] +(55) Project [codegen id : 1] Output [4]: [s_store_sk#19, s_store_name#20, s_state#22, s_zip#23] Input [5]: [s_store_sk#19, s_store_name#20, s_market_id#21, s_state#22, s_zip#23] -(53) BroadcastExchange +(56) BroadcastExchange Input [4]: [s_store_sk#19, s_store_name#20, s_state#22, s_zip#23] Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#48] -(54) Scan parquet default.store_sales -Output [5]: [ss_item_sk#1, ss_customer_sk#2, ss_store_sk#3, ss_ticket_number#4, ss_net_paid#5] -Batched: true -Location [not included in comparison]/{warehouse_dir}/store_sales] -PushedFilters: [IsNotNull(ss_ticket_number), IsNotNull(ss_item_sk), IsNotNull(ss_store_sk), IsNotNull(ss_customer_sk)] -ReadSchema: struct - -(55) ColumnarToRow -Input [5]: [ss_item_sk#1, ss_customer_sk#2, ss_store_sk#3, ss_ticket_number#4, ss_net_paid#5] - -(56) Filter -Input [5]: [ss_item_sk#1, ss_customer_sk#2, ss_store_sk#3, ss_ticket_number#4, ss_net_paid#5] -Condition : (((isnotnull(ss_ticket_number#4) AND isnotnull(ss_item_sk#1)) AND isnotnull(ss_store_sk#3)) AND isnotnull(ss_customer_sk#2)) - (57) BroadcastHashJoin [codegen id : 2] -Left keys [1]: [s_store_sk#19] -Right keys [1]: [ss_store_sk#3] +Left keys [1]: [ss_store_sk#3] +Right keys [1]: [s_store_sk#19] Join condition: None (58) Project [codegen id : 2] -Output [7]: [s_store_name#20, s_state#22, s_zip#23, ss_item_sk#1, ss_customer_sk#2, ss_ticket_number#4, ss_net_paid#5] -Input [9]: [s_store_sk#19, s_store_name#20, s_state#22, s_zip#23, ss_item_sk#1, ss_customer_sk#2, ss_store_sk#3, ss_ticket_number#4, ss_net_paid#5] +Output [7]: [ss_item_sk#1, ss_customer_sk#2, ss_ticket_number#4, ss_net_paid#5, s_store_name#20, s_state#22, s_zip#23] +Input [9]: [ss_item_sk#1, ss_customer_sk#2, ss_store_sk#3, ss_ticket_number#4, ss_net_paid#5, s_store_sk#19, s_store_name#20, s_state#22, s_zip#23] (59) Exchange -Input [7]: [s_store_name#20, s_state#22, s_zip#23, ss_item_sk#1, ss_customer_sk#2, ss_ticket_number#4, ss_net_paid#5] -Arguments: hashpartitioning(ss_item_sk#1, 5), true, [id=#49] +Input [7]: [ss_item_sk#1, ss_customer_sk#2, ss_ticket_number#4, ss_net_paid#5, s_store_name#20, s_state#22, s_zip#23] +Arguments: hashpartitioning(ss_customer_sk#2, 5), ENSURE_REQUIREMENTS, [id=#49] (60) Sort [codegen id : 3] -Input [7]: [s_store_name#20, s_state#22, s_zip#23, ss_item_sk#1, ss_customer_sk#2, ss_ticket_number#4, ss_net_paid#5] -Arguments: [ss_item_sk#1 ASC NULLS FIRST], false, 0 +Input [7]: [ss_item_sk#1, ss_customer_sk#2, ss_ticket_number#4, ss_net_paid#5, s_store_name#20, s_state#22, s_zip#23] +Arguments: [ss_customer_sk#2 ASC NULLS FIRST], false, 0 -(61) Scan parquet default.item -Output [6]: [i_item_sk#6, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11] +(61) Scan parquet default.customer +Output [4]: [c_customer_sk#14, c_first_name#15, c_last_name#16, c_birth_country#17] Batched: true -Location [not included in comparison]/{warehouse_dir}/item] -PushedFilters: [IsNotNull(i_item_sk)] -ReadSchema: struct +Location [not included in comparison]/{warehouse_dir}/customer] +PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_birth_country)] +ReadSchema: struct (62) ColumnarToRow [codegen id : 4] -Input [6]: [i_item_sk#6, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11] +Input [4]: [c_customer_sk#14, c_first_name#15, c_last_name#16, c_birth_country#17] (63) Filter [codegen id : 4] -Input [6]: [i_item_sk#6, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11] -Condition : isnotnull(i_item_sk#6) +Input [4]: [c_customer_sk#14, c_first_name#15, c_last_name#16, c_birth_country#17] +Condition : (isnotnull(c_customer_sk#14) AND isnotnull(c_birth_country#17)) (64) Exchange -Input [6]: [i_item_sk#6, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11] -Arguments: hashpartitioning(i_item_sk#6, 5), true, [id=#50] +Input [4]: [c_customer_sk#14, c_first_name#15, c_last_name#16, c_birth_country#17] +Arguments: hashpartitioning(c_customer_sk#14, 5), ENSURE_REQUIREMENTS, [id=#50] (65) Sort [codegen id : 5] -Input [6]: [i_item_sk#6, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11] -Arguments: [i_item_sk#6 ASC NULLS FIRST], false, 0 +Input [4]: [c_customer_sk#14, c_first_name#15, c_last_name#16, c_birth_country#17] +Arguments: [c_customer_sk#14 ASC NULLS FIRST], false, 0 (66) SortMergeJoin [codegen id : 6] -Left keys [1]: [ss_item_sk#1] -Right keys [1]: [i_item_sk#6] +Left keys [1]: [ss_customer_sk#2] +Right keys [1]: [c_customer_sk#14] Join condition: None (67) Project [codegen id : 6] -Output [12]: [s_store_name#20, s_state#22, s_zip#23, ss_item_sk#1, ss_customer_sk#2, ss_ticket_number#4, ss_net_paid#5, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11] -Input [13]: [s_store_name#20, s_state#22, s_zip#23, ss_item_sk#1, ss_customer_sk#2, ss_ticket_number#4, ss_net_paid#5, i_item_sk#6, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11] +Output [9]: [ss_item_sk#1, ss_ticket_number#4, ss_net_paid#5, s_store_name#20, s_state#22, s_zip#23, c_first_name#15, c_last_name#16, c_birth_country#17] +Input [11]: [ss_item_sk#1, ss_customer_sk#2, ss_ticket_number#4, ss_net_paid#5, s_store_name#20, s_state#22, s_zip#23, c_customer_sk#14, c_first_name#15, c_last_name#16, c_birth_country#17] (68) Exchange -Input [12]: [s_store_name#20, s_state#22, s_zip#23, ss_item_sk#1, ss_customer_sk#2, ss_ticket_number#4, ss_net_paid#5, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11] -Arguments: hashpartitioning(ss_customer_sk#2, 5), true, [id=#51] +Input [9]: [ss_item_sk#1, ss_ticket_number#4, ss_net_paid#5, s_store_name#20, s_state#22, s_zip#23, c_first_name#15, c_last_name#16, c_birth_country#17] +Arguments: hashpartitioning(ss_item_sk#1, 5), ENSURE_REQUIREMENTS, [id=#51] (69) Sort [codegen id : 7] -Input [12]: [s_store_name#20, s_state#22, s_zip#23, ss_item_sk#1, ss_customer_sk#2, ss_ticket_number#4, ss_net_paid#5, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11] -Arguments: [ss_customer_sk#2 ASC NULLS FIRST], false, 0 +Input [9]: [ss_item_sk#1, ss_ticket_number#4, ss_net_paid#5, s_store_name#20, s_state#22, s_zip#23, c_first_name#15, c_last_name#16, c_birth_country#17] +Arguments: [ss_item_sk#1 ASC NULLS FIRST], false, 0 -(70) Scan parquet default.customer -Output [4]: [c_customer_sk#14, c_first_name#15, c_last_name#16, c_birth_country#17] +(70) Scan parquet default.item +Output [6]: [i_item_sk#6, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11] Batched: true -Location [not included in comparison]/{warehouse_dir}/customer] -PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_birth_country)] -ReadSchema: struct +Location [not included in comparison]/{warehouse_dir}/item] +PushedFilters: [IsNotNull(i_item_sk)] +ReadSchema: struct (71) ColumnarToRow [codegen id : 8] -Input [4]: [c_customer_sk#14, c_first_name#15, c_last_name#16, c_birth_country#17] +Input [6]: [i_item_sk#6, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11] (72) Filter [codegen id : 8] -Input [4]: [c_customer_sk#14, c_first_name#15, c_last_name#16, c_birth_country#17] -Condition : (isnotnull(c_customer_sk#14) AND isnotnull(c_birth_country#17)) +Input [6]: [i_item_sk#6, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11] +Condition : isnotnull(i_item_sk#6) (73) Exchange -Input [4]: [c_customer_sk#14, c_first_name#15, c_last_name#16, c_birth_country#17] -Arguments: hashpartitioning(c_customer_sk#14, 5), true, [id=#52] +Input [6]: [i_item_sk#6, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11] +Arguments: hashpartitioning(i_item_sk#6, 5), ENSURE_REQUIREMENTS, [id=#52] (74) Sort [codegen id : 9] -Input [4]: [c_customer_sk#14, c_first_name#15, c_last_name#16, c_birth_country#17] -Arguments: [c_customer_sk#14 ASC NULLS FIRST], false, 0 +Input [6]: [i_item_sk#6, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11] +Arguments: [i_item_sk#6 ASC NULLS FIRST], false, 0 (75) SortMergeJoin [codegen id : 10] -Left keys [1]: [ss_customer_sk#2] -Right keys [1]: [c_customer_sk#14] +Left keys [1]: [ss_item_sk#1] +Right keys [1]: [i_item_sk#6] Join condition: None (76) Project [codegen id : 10] -Output [14]: [s_store_name#20, s_state#22, s_zip#23, ss_item_sk#1, ss_ticket_number#4, ss_net_paid#5, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11, c_first_name#15, c_last_name#16, c_birth_country#17] -Input [16]: [s_store_name#20, s_state#22, s_zip#23, ss_item_sk#1, ss_customer_sk#2, ss_ticket_number#4, ss_net_paid#5, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11, c_customer_sk#14, c_first_name#15, c_last_name#16, c_birth_country#17] +Output [14]: [ss_item_sk#1, ss_ticket_number#4, ss_net_paid#5, s_store_name#20, s_state#22, s_zip#23, c_first_name#15, c_last_name#16, c_birth_country#17, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11] +Input [15]: [ss_item_sk#1, ss_ticket_number#4, ss_net_paid#5, s_store_name#20, s_state#22, s_zip#23, c_first_name#15, c_last_name#16, c_birth_country#17, i_item_sk#6, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11] (77) Exchange -Input [14]: [s_store_name#20, s_state#22, s_zip#23, ss_item_sk#1, ss_ticket_number#4, ss_net_paid#5, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11, c_first_name#15, c_last_name#16, c_birth_country#17] -Arguments: hashpartitioning(c_birth_country#17, s_zip#23, 5), true, [id=#53] +Input [14]: [ss_item_sk#1, ss_ticket_number#4, ss_net_paid#5, s_store_name#20, s_state#22, s_zip#23, c_first_name#15, c_last_name#16, c_birth_country#17, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11] +Arguments: hashpartitioning(c_birth_country#17, s_zip#23, 5), ENSURE_REQUIREMENTS, [id=#53] (78) Sort [codegen id : 11] -Input [14]: [s_store_name#20, s_state#22, s_zip#23, ss_item_sk#1, ss_ticket_number#4, ss_net_paid#5, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11, c_first_name#15, c_last_name#16, c_birth_country#17] +Input [14]: [ss_item_sk#1, ss_ticket_number#4, ss_net_paid#5, s_store_name#20, s_state#22, s_zip#23, c_first_name#15, c_last_name#16, c_birth_country#17, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11] Arguments: [c_birth_country#17 ASC NULLS FIRST, s_zip#23 ASC NULLS FIRST], false, 0 (79) Scan parquet default.customer_address @@ -474,7 +474,7 @@ Condition : (isnotnull(ca_country#27) AND isnotnull(ca_zip#26)) (82) Exchange Input [3]: [ca_state#25, ca_zip#26, ca_country#27] -Arguments: hashpartitioning(upper(ca_country#27), ca_zip#26, 5), true, [id=#54] +Arguments: hashpartitioning(upper(ca_country#27), ca_zip#26, 5), ENSURE_REQUIREMENTS, [id=#54] (83) Sort [codegen id : 13] Input [3]: [ca_state#25, ca_zip#26, ca_country#27] @@ -486,15 +486,15 @@ Right keys [2]: [upper(ca_country#27), ca_zip#26] Join condition: None (85) Project [codegen id : 14] -Output [13]: [s_store_name#20, s_state#22, ss_item_sk#1, ss_ticket_number#4, ss_net_paid#5, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11, c_first_name#15, c_last_name#16, ca_state#25] -Input [17]: [s_store_name#20, s_state#22, s_zip#23, ss_item_sk#1, ss_ticket_number#4, ss_net_paid#5, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11, c_first_name#15, c_last_name#16, c_birth_country#17, ca_state#25, ca_zip#26, ca_country#27] +Output [13]: [ss_item_sk#1, ss_ticket_number#4, ss_net_paid#5, s_store_name#20, s_state#22, c_first_name#15, c_last_name#16, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11, ca_state#25] +Input [17]: [ss_item_sk#1, ss_ticket_number#4, ss_net_paid#5, s_store_name#20, s_state#22, s_zip#23, c_first_name#15, c_last_name#16, c_birth_country#17, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11, ca_state#25, ca_zip#26, ca_country#27] (86) Exchange -Input [13]: [s_store_name#20, s_state#22, ss_item_sk#1, ss_ticket_number#4, ss_net_paid#5, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11, c_first_name#15, c_last_name#16, ca_state#25] -Arguments: hashpartitioning(cast(ss_ticket_number#4 as bigint), cast(ss_item_sk#1 as bigint), 5), true, [id=#55] +Input [13]: [ss_item_sk#1, ss_ticket_number#4, ss_net_paid#5, s_store_name#20, s_state#22, c_first_name#15, c_last_name#16, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11, ca_state#25] +Arguments: hashpartitioning(cast(ss_ticket_number#4 as bigint), cast(ss_item_sk#1 as bigint), 5), ENSURE_REQUIREMENTS, [id=#55] (87) Sort [codegen id : 15] -Input [13]: [s_store_name#20, s_state#22, ss_item_sk#1, ss_ticket_number#4, ss_net_paid#5, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11, c_first_name#15, c_last_name#16, ca_state#25] +Input [13]: [ss_item_sk#1, ss_ticket_number#4, ss_net_paid#5, s_store_name#20, s_state#22, c_first_name#15, c_last_name#16, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11, ca_state#25] Arguments: [cast(ss_ticket_number#4 as bigint) ASC NULLS FIRST, cast(ss_item_sk#1 as bigint) ASC NULLS FIRST], false, 0 (88) Scan parquet default.store_returns @@ -513,7 +513,7 @@ Condition : (isnotnull(sr_ticket_number#31) AND isnotnull(sr_item_sk#30)) (91) Exchange Input [2]: [sr_item_sk#30, sr_ticket_number#31] -Arguments: hashpartitioning(sr_ticket_number#31, sr_item_sk#30, 5), true, [id=#56] +Arguments: hashpartitioning(sr_ticket_number#31, sr_item_sk#30, 5), ENSURE_REQUIREMENTS, [id=#56] (92) Sort [codegen id : 17] Input [2]: [sr_item_sk#30, sr_ticket_number#31] @@ -526,7 +526,7 @@ Join condition: None (94) Project [codegen id : 18] Output [11]: [ss_net_paid#5, s_store_name#20, s_state#22, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11, c_first_name#15, c_last_name#16, ca_state#25] -Input [15]: [s_store_name#20, s_state#22, ss_item_sk#1, ss_ticket_number#4, ss_net_paid#5, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11, c_first_name#15, c_last_name#16, ca_state#25, sr_item_sk#30, sr_ticket_number#31] +Input [15]: [ss_item_sk#1, ss_ticket_number#4, ss_net_paid#5, s_store_name#20, s_state#22, c_first_name#15, c_last_name#16, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11, ca_state#25, sr_item_sk#30, sr_ticket_number#31] (95) HashAggregate [codegen id : 18] Input [11]: [ss_net_paid#5, s_store_name#20, s_state#22, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11, c_first_name#15, c_last_name#16, ca_state#25] @@ -537,7 +537,7 @@ Results [11]: [c_last_name#16, c_first_name#15, s_store_name#20, ca_state#25, s_ (96) Exchange Input [11]: [c_last_name#16, c_first_name#15, s_store_name#20, ca_state#25, s_state#22, i_color#9, i_current_price#7, i_manager_id#11, i_units#10, i_size#8, sum#58] -Arguments: hashpartitioning(c_last_name#16, c_first_name#15, s_store_name#20, ca_state#25, s_state#22, i_color#9, i_current_price#7, i_manager_id#11, i_units#10, i_size#8, 5), true, [id=#59] +Arguments: hashpartitioning(c_last_name#16, c_first_name#15, s_store_name#20, ca_state#25, s_state#22, i_color#9, i_current_price#7, i_manager_id#11, i_units#10, i_size#8, 5), ENSURE_REQUIREMENTS, [id=#59] (97) HashAggregate [codegen id : 19] Input [11]: [c_last_name#16, c_first_name#15, s_store_name#20, ca_state#25, s_state#22, i_color#9, i_current_price#7, i_manager_id#11, i_units#10, i_size#8, sum#58] @@ -555,7 +555,7 @@ Results [2]: [sum#63, count#64] (99) Exchange Input [2]: [sum#63, count#64] -Arguments: SinglePartition, true, [id=#65] +Arguments: SinglePartition, ENSURE_REQUIREMENTS, [id=#65] (100) HashAggregate [codegen id : 20] Input [2]: [sum#63, count#64] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q24a.sf100/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q24a.sf100/simplified.txt index f51d1972b630f..7de562c5d59a1 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q24a.sf100/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q24a.sf100/simplified.txt @@ -21,7 +21,7 @@ WholeStageCodegen (14) InputAdapter Exchange [ss_ticket_number,ss_item_sk] #12 WholeStageCodegen (14) - Project [s_store_name,s_state,ss_item_sk,ss_ticket_number,ss_net_paid,i_current_price,i_size,i_color,i_units,i_manager_id,c_first_name,c_last_name,ca_state] + Project [ss_item_sk,ss_ticket_number,ss_net_paid,s_store_name,s_state,c_first_name,c_last_name,i_current_price,i_size,i_color,i_units,i_manager_id,ca_state] SortMergeJoin [c_birth_country,s_zip,ca_country,ca_zip] InputAdapter WholeStageCodegen (11) @@ -29,24 +29,28 @@ WholeStageCodegen (14) InputAdapter Exchange [c_birth_country,s_zip] #13 WholeStageCodegen (10) - Project [s_store_name,s_state,s_zip,ss_item_sk,ss_ticket_number,ss_net_paid,i_current_price,i_size,i_color,i_units,i_manager_id,c_first_name,c_last_name,c_birth_country] - SortMergeJoin [ss_customer_sk,c_customer_sk] + Project [ss_item_sk,ss_ticket_number,ss_net_paid,s_store_name,s_state,s_zip,c_first_name,c_last_name,c_birth_country,i_current_price,i_size,i_color,i_units,i_manager_id] + SortMergeJoin [ss_item_sk,i_item_sk] InputAdapter WholeStageCodegen (7) - Sort [ss_customer_sk] + Sort [ss_item_sk] InputAdapter - Exchange [ss_customer_sk] #14 + Exchange [ss_item_sk] #14 WholeStageCodegen (6) - Project [s_store_name,s_state,s_zip,ss_item_sk,ss_customer_sk,ss_ticket_number,ss_net_paid,i_current_price,i_size,i_color,i_units,i_manager_id] - SortMergeJoin [ss_item_sk,i_item_sk] + Project [ss_item_sk,ss_ticket_number,ss_net_paid,s_store_name,s_state,s_zip,c_first_name,c_last_name,c_birth_country] + SortMergeJoin [ss_customer_sk,c_customer_sk] InputAdapter WholeStageCodegen (3) - Sort [ss_item_sk] + Sort [ss_customer_sk] InputAdapter - Exchange [ss_item_sk] #15 + Exchange [ss_customer_sk] #15 WholeStageCodegen (2) - Project [s_store_name,s_state,s_zip,ss_item_sk,ss_customer_sk,ss_ticket_number,ss_net_paid] - BroadcastHashJoin [s_store_sk,ss_store_sk] + Project [ss_item_sk,ss_customer_sk,ss_ticket_number,ss_net_paid,s_store_name,s_state,s_zip] + BroadcastHashJoin [ss_store_sk,s_store_sk] + Filter [ss_ticket_number,ss_item_sk,ss_store_sk,ss_customer_sk] + ColumnarToRow + InputAdapter + Scan parquet default.store_sales [ss_item_sk,ss_customer_sk,ss_store_sk,ss_ticket_number,ss_net_paid] InputAdapter BroadcastExchange #16 WholeStageCodegen (1) @@ -55,30 +59,26 @@ WholeStageCodegen (14) ColumnarToRow InputAdapter Scan parquet default.store [s_store_sk,s_store_name,s_market_id,s_state,s_zip] - Filter [ss_ticket_number,ss_item_sk,ss_store_sk,ss_customer_sk] - ColumnarToRow - InputAdapter - Scan parquet default.store_sales [ss_item_sk,ss_customer_sk,ss_store_sk,ss_ticket_number,ss_net_paid] InputAdapter WholeStageCodegen (5) - Sort [i_item_sk] + Sort [c_customer_sk] InputAdapter - Exchange [i_item_sk] #17 + Exchange [c_customer_sk] #17 WholeStageCodegen (4) - Filter [i_item_sk] + Filter [c_customer_sk,c_birth_country] ColumnarToRow InputAdapter - Scan parquet default.item [i_item_sk,i_current_price,i_size,i_color,i_units,i_manager_id] + Scan parquet default.customer [c_customer_sk,c_first_name,c_last_name,c_birth_country] InputAdapter WholeStageCodegen (9) - Sort [c_customer_sk] + Sort [i_item_sk] InputAdapter - Exchange [c_customer_sk] #18 + Exchange [i_item_sk] #18 WholeStageCodegen (8) - Filter [c_customer_sk,c_birth_country] + Filter [i_item_sk] ColumnarToRow InputAdapter - Scan parquet default.customer [c_customer_sk,c_first_name,c_last_name,c_birth_country] + Scan parquet default.item [i_item_sk,i_current_price,i_size,i_color,i_units,i_manager_id] InputAdapter WholeStageCodegen (13) Sort [ca_country,ca_zip] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q24b.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q24b.sf100/explain.txt index cbac3787cab6c..273950bed3546 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q24b.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q24b.sf100/explain.txt @@ -92,7 +92,7 @@ Input [11]: [ss_item_sk#1, ss_customer_sk#2, ss_store_sk#3, ss_ticket_number#4, (10) Exchange Input [10]: [ss_item_sk#1, ss_customer_sk#2, ss_store_sk#3, ss_ticket_number#4, ss_net_paid#5, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11] -Arguments: hashpartitioning(ss_customer_sk#2, 5), true, [id=#13] +Arguments: hashpartitioning(ss_customer_sk#2, 5), ENSURE_REQUIREMENTS, [id=#13] (11) Sort [codegen id : 3] Input [10]: [ss_item_sk#1, ss_customer_sk#2, ss_store_sk#3, ss_ticket_number#4, ss_net_paid#5, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11] @@ -114,7 +114,7 @@ Condition : (isnotnull(c_customer_sk#14) AND isnotnull(c_birth_country#17)) (15) Exchange Input [4]: [c_customer_sk#14, c_first_name#15, c_last_name#16, c_birth_country#17] -Arguments: hashpartitioning(c_customer_sk#14, 5), true, [id=#18] +Arguments: hashpartitioning(c_customer_sk#14, 5), ENSURE_REQUIREMENTS, [id=#18] (16) Sort [codegen id : 5] Input [4]: [c_customer_sk#14, c_first_name#15, c_last_name#16, c_birth_country#17] @@ -189,7 +189,7 @@ Input [17]: [ss_item_sk#1, ss_store_sk#3, ss_ticket_number#4, ss_net_paid#5, i_c (32) Exchange Input [13]: [ss_item_sk#1, ss_ticket_number#4, ss_net_paid#5, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11, c_first_name#15, c_last_name#16, s_store_name#20, s_state#22, ca_state#25] -Arguments: hashpartitioning(cast(ss_ticket_number#4 as bigint), cast(ss_item_sk#1 as bigint), 5), true, [id=#29] +Arguments: hashpartitioning(cast(ss_ticket_number#4 as bigint), cast(ss_item_sk#1 as bigint), 5), ENSURE_REQUIREMENTS, [id=#29] (33) Sort [codegen id : 9] Input [13]: [ss_item_sk#1, ss_ticket_number#4, ss_net_paid#5, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11, c_first_name#15, c_last_name#16, s_store_name#20, s_state#22, ca_state#25] @@ -211,7 +211,7 @@ Condition : (isnotnull(sr_ticket_number#31) AND isnotnull(sr_item_sk#30)) (37) Exchange Input [2]: [sr_item_sk#30, sr_ticket_number#31] -Arguments: hashpartitioning(sr_ticket_number#31, sr_item_sk#30, 5), true, [id=#32] +Arguments: hashpartitioning(sr_ticket_number#31, sr_item_sk#30, 5), ENSURE_REQUIREMENTS, [id=#32] (38) Sort [codegen id : 11] Input [2]: [sr_item_sk#30, sr_ticket_number#31] @@ -235,7 +235,7 @@ Results [11]: [c_last_name#16, c_first_name#15, s_store_name#20, ca_state#25, s_ (42) Exchange Input [11]: [c_last_name#16, c_first_name#15, s_store_name#20, ca_state#25, s_state#22, i_color#9, i_current_price#7, i_manager_id#11, i_units#10, i_size#8, sum#34] -Arguments: hashpartitioning(c_last_name#16, c_first_name#15, s_store_name#20, ca_state#25, s_state#22, i_color#9, i_current_price#7, i_manager_id#11, i_units#10, i_size#8, 5), true, [id=#35] +Arguments: hashpartitioning(c_last_name#16, c_first_name#15, s_store_name#20, ca_state#25, s_state#22, i_color#9, i_current_price#7, i_manager_id#11, i_units#10, i_size#8, 5), ENSURE_REQUIREMENTS, [id=#35] (43) HashAggregate [codegen id : 13] Input [11]: [c_last_name#16, c_first_name#15, s_store_name#20, ca_state#25, s_state#22, i_color#9, i_current_price#7, i_manager_id#11, i_units#10, i_size#8, sum#34] @@ -253,7 +253,7 @@ Results [5]: [c_last_name#16, c_first_name#15, s_store_name#20, sum#40, isEmpty# (45) Exchange Input [5]: [c_last_name#16, c_first_name#15, s_store_name#20, sum#40, isEmpty#41] -Arguments: hashpartitioning(c_last_name#16, c_first_name#15, s_store_name#20, 5), true, [id=#42] +Arguments: hashpartitioning(c_last_name#16, c_first_name#15, s_store_name#20, 5), ENSURE_REQUIREMENTS, [id=#42] (46) HashAggregate [codegen id : 14] Input [5]: [c_last_name#16, c_first_name#15, s_store_name#20, sum#40, isEmpty#41] @@ -296,25 +296,25 @@ Subquery:1 Hosting operator id = 47 Hosting Expression = Subquery scalar-subquer : : : :- * Sort (60) : : : : +- Exchange (59) : : : : +- * Project (58) - : : : : +- * BroadcastHashJoin Inner BuildLeft (57) - : : : : :- BroadcastExchange (53) - : : : : : +- * Project (52) - : : : : : +- * Filter (51) - : : : : : +- * ColumnarToRow (50) - : : : : : +- Scan parquet default.store (49) - : : : : +- * Filter (56) - : : : : +- * ColumnarToRow (55) - : : : : +- Scan parquet default.store_sales (54) + : : : : +- * BroadcastHashJoin Inner BuildRight (57) + : : : : :- * Filter (51) + : : : : : +- * ColumnarToRow (50) + : : : : : +- Scan parquet default.store_sales (49) + : : : : +- BroadcastExchange (56) + : : : : +- * Project (55) + : : : : +- * Filter (54) + : : : : +- * ColumnarToRow (53) + : : : : +- Scan parquet default.store (52) : : : +- * Sort (65) : : : +- Exchange (64) : : : +- * Filter (63) : : : +- * ColumnarToRow (62) - : : : +- Scan parquet default.item (61) + : : : +- Scan parquet default.customer (61) : : +- * Sort (74) : : +- Exchange (73) : : +- * Filter (72) : : +- * ColumnarToRow (71) - : : +- Scan parquet default.customer (70) + : : +- Scan parquet default.item (70) : +- * Sort (83) : +- Exchange (82) : +- * Filter (81) @@ -327,135 +327,135 @@ Subquery:1 Hosting operator id = 47 Hosting Expression = Subquery scalar-subquer +- Scan parquet default.store_returns (88) -(49) Scan parquet default.store +(49) Scan parquet default.store_sales +Output [5]: [ss_item_sk#1, ss_customer_sk#2, ss_store_sk#3, ss_ticket_number#4, ss_net_paid#5] +Batched: true +Location [not included in comparison]/{warehouse_dir}/store_sales] +PushedFilters: [IsNotNull(ss_ticket_number), IsNotNull(ss_item_sk), IsNotNull(ss_store_sk), IsNotNull(ss_customer_sk)] +ReadSchema: struct + +(50) ColumnarToRow [codegen id : 2] +Input [5]: [ss_item_sk#1, ss_customer_sk#2, ss_store_sk#3, ss_ticket_number#4, ss_net_paid#5] + +(51) Filter [codegen id : 2] +Input [5]: [ss_item_sk#1, ss_customer_sk#2, ss_store_sk#3, ss_ticket_number#4, ss_net_paid#5] +Condition : (((isnotnull(ss_ticket_number#4) AND isnotnull(ss_item_sk#1)) AND isnotnull(ss_store_sk#3)) AND isnotnull(ss_customer_sk#2)) + +(52) Scan parquet default.store Output [5]: [s_store_sk#19, s_store_name#20, s_market_id#21, s_state#22, s_zip#23] Batched: true Location [not included in comparison]/{warehouse_dir}/store] PushedFilters: [IsNotNull(s_market_id), EqualTo(s_market_id,8), IsNotNull(s_store_sk), IsNotNull(s_zip)] ReadSchema: struct -(50) ColumnarToRow [codegen id : 1] +(53) ColumnarToRow [codegen id : 1] Input [5]: [s_store_sk#19, s_store_name#20, s_market_id#21, s_state#22, s_zip#23] -(51) Filter [codegen id : 1] +(54) Filter [codegen id : 1] Input [5]: [s_store_sk#19, s_store_name#20, s_market_id#21, s_state#22, s_zip#23] Condition : (((isnotnull(s_market_id#21) AND (s_market_id#21 = 8)) AND isnotnull(s_store_sk#19)) AND isnotnull(s_zip#23)) -(52) Project [codegen id : 1] +(55) Project [codegen id : 1] Output [4]: [s_store_sk#19, s_store_name#20, s_state#22, s_zip#23] Input [5]: [s_store_sk#19, s_store_name#20, s_market_id#21, s_state#22, s_zip#23] -(53) BroadcastExchange +(56) BroadcastExchange Input [4]: [s_store_sk#19, s_store_name#20, s_state#22, s_zip#23] Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#48] -(54) Scan parquet default.store_sales -Output [5]: [ss_item_sk#1, ss_customer_sk#2, ss_store_sk#3, ss_ticket_number#4, ss_net_paid#5] -Batched: true -Location [not included in comparison]/{warehouse_dir}/store_sales] -PushedFilters: [IsNotNull(ss_ticket_number), IsNotNull(ss_item_sk), IsNotNull(ss_store_sk), IsNotNull(ss_customer_sk)] -ReadSchema: struct - -(55) ColumnarToRow -Input [5]: [ss_item_sk#1, ss_customer_sk#2, ss_store_sk#3, ss_ticket_number#4, ss_net_paid#5] - -(56) Filter -Input [5]: [ss_item_sk#1, ss_customer_sk#2, ss_store_sk#3, ss_ticket_number#4, ss_net_paid#5] -Condition : (((isnotnull(ss_ticket_number#4) AND isnotnull(ss_item_sk#1)) AND isnotnull(ss_store_sk#3)) AND isnotnull(ss_customer_sk#2)) - (57) BroadcastHashJoin [codegen id : 2] -Left keys [1]: [s_store_sk#19] -Right keys [1]: [ss_store_sk#3] +Left keys [1]: [ss_store_sk#3] +Right keys [1]: [s_store_sk#19] Join condition: None (58) Project [codegen id : 2] -Output [7]: [s_store_name#20, s_state#22, s_zip#23, ss_item_sk#1, ss_customer_sk#2, ss_ticket_number#4, ss_net_paid#5] -Input [9]: [s_store_sk#19, s_store_name#20, s_state#22, s_zip#23, ss_item_sk#1, ss_customer_sk#2, ss_store_sk#3, ss_ticket_number#4, ss_net_paid#5] +Output [7]: [ss_item_sk#1, ss_customer_sk#2, ss_ticket_number#4, ss_net_paid#5, s_store_name#20, s_state#22, s_zip#23] +Input [9]: [ss_item_sk#1, ss_customer_sk#2, ss_store_sk#3, ss_ticket_number#4, ss_net_paid#5, s_store_sk#19, s_store_name#20, s_state#22, s_zip#23] (59) Exchange -Input [7]: [s_store_name#20, s_state#22, s_zip#23, ss_item_sk#1, ss_customer_sk#2, ss_ticket_number#4, ss_net_paid#5] -Arguments: hashpartitioning(ss_item_sk#1, 5), true, [id=#49] +Input [7]: [ss_item_sk#1, ss_customer_sk#2, ss_ticket_number#4, ss_net_paid#5, s_store_name#20, s_state#22, s_zip#23] +Arguments: hashpartitioning(ss_customer_sk#2, 5), ENSURE_REQUIREMENTS, [id=#49] (60) Sort [codegen id : 3] -Input [7]: [s_store_name#20, s_state#22, s_zip#23, ss_item_sk#1, ss_customer_sk#2, ss_ticket_number#4, ss_net_paid#5] -Arguments: [ss_item_sk#1 ASC NULLS FIRST], false, 0 +Input [7]: [ss_item_sk#1, ss_customer_sk#2, ss_ticket_number#4, ss_net_paid#5, s_store_name#20, s_state#22, s_zip#23] +Arguments: [ss_customer_sk#2 ASC NULLS FIRST], false, 0 -(61) Scan parquet default.item -Output [6]: [i_item_sk#6, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11] +(61) Scan parquet default.customer +Output [4]: [c_customer_sk#14, c_first_name#15, c_last_name#16, c_birth_country#17] Batched: true -Location [not included in comparison]/{warehouse_dir}/item] -PushedFilters: [IsNotNull(i_item_sk)] -ReadSchema: struct +Location [not included in comparison]/{warehouse_dir}/customer] +PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_birth_country)] +ReadSchema: struct (62) ColumnarToRow [codegen id : 4] -Input [6]: [i_item_sk#6, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11] +Input [4]: [c_customer_sk#14, c_first_name#15, c_last_name#16, c_birth_country#17] (63) Filter [codegen id : 4] -Input [6]: [i_item_sk#6, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11] -Condition : isnotnull(i_item_sk#6) +Input [4]: [c_customer_sk#14, c_first_name#15, c_last_name#16, c_birth_country#17] +Condition : (isnotnull(c_customer_sk#14) AND isnotnull(c_birth_country#17)) (64) Exchange -Input [6]: [i_item_sk#6, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11] -Arguments: hashpartitioning(i_item_sk#6, 5), true, [id=#50] +Input [4]: [c_customer_sk#14, c_first_name#15, c_last_name#16, c_birth_country#17] +Arguments: hashpartitioning(c_customer_sk#14, 5), ENSURE_REQUIREMENTS, [id=#50] (65) Sort [codegen id : 5] -Input [6]: [i_item_sk#6, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11] -Arguments: [i_item_sk#6 ASC NULLS FIRST], false, 0 +Input [4]: [c_customer_sk#14, c_first_name#15, c_last_name#16, c_birth_country#17] +Arguments: [c_customer_sk#14 ASC NULLS FIRST], false, 0 (66) SortMergeJoin [codegen id : 6] -Left keys [1]: [ss_item_sk#1] -Right keys [1]: [i_item_sk#6] +Left keys [1]: [ss_customer_sk#2] +Right keys [1]: [c_customer_sk#14] Join condition: None (67) Project [codegen id : 6] -Output [12]: [s_store_name#20, s_state#22, s_zip#23, ss_item_sk#1, ss_customer_sk#2, ss_ticket_number#4, ss_net_paid#5, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11] -Input [13]: [s_store_name#20, s_state#22, s_zip#23, ss_item_sk#1, ss_customer_sk#2, ss_ticket_number#4, ss_net_paid#5, i_item_sk#6, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11] +Output [9]: [ss_item_sk#1, ss_ticket_number#4, ss_net_paid#5, s_store_name#20, s_state#22, s_zip#23, c_first_name#15, c_last_name#16, c_birth_country#17] +Input [11]: [ss_item_sk#1, ss_customer_sk#2, ss_ticket_number#4, ss_net_paid#5, s_store_name#20, s_state#22, s_zip#23, c_customer_sk#14, c_first_name#15, c_last_name#16, c_birth_country#17] (68) Exchange -Input [12]: [s_store_name#20, s_state#22, s_zip#23, ss_item_sk#1, ss_customer_sk#2, ss_ticket_number#4, ss_net_paid#5, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11] -Arguments: hashpartitioning(ss_customer_sk#2, 5), true, [id=#51] +Input [9]: [ss_item_sk#1, ss_ticket_number#4, ss_net_paid#5, s_store_name#20, s_state#22, s_zip#23, c_first_name#15, c_last_name#16, c_birth_country#17] +Arguments: hashpartitioning(ss_item_sk#1, 5), ENSURE_REQUIREMENTS, [id=#51] (69) Sort [codegen id : 7] -Input [12]: [s_store_name#20, s_state#22, s_zip#23, ss_item_sk#1, ss_customer_sk#2, ss_ticket_number#4, ss_net_paid#5, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11] -Arguments: [ss_customer_sk#2 ASC NULLS FIRST], false, 0 +Input [9]: [ss_item_sk#1, ss_ticket_number#4, ss_net_paid#5, s_store_name#20, s_state#22, s_zip#23, c_first_name#15, c_last_name#16, c_birth_country#17] +Arguments: [ss_item_sk#1 ASC NULLS FIRST], false, 0 -(70) Scan parquet default.customer -Output [4]: [c_customer_sk#14, c_first_name#15, c_last_name#16, c_birth_country#17] +(70) Scan parquet default.item +Output [6]: [i_item_sk#6, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11] Batched: true -Location [not included in comparison]/{warehouse_dir}/customer] -PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_birth_country)] -ReadSchema: struct +Location [not included in comparison]/{warehouse_dir}/item] +PushedFilters: [IsNotNull(i_item_sk)] +ReadSchema: struct (71) ColumnarToRow [codegen id : 8] -Input [4]: [c_customer_sk#14, c_first_name#15, c_last_name#16, c_birth_country#17] +Input [6]: [i_item_sk#6, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11] (72) Filter [codegen id : 8] -Input [4]: [c_customer_sk#14, c_first_name#15, c_last_name#16, c_birth_country#17] -Condition : (isnotnull(c_customer_sk#14) AND isnotnull(c_birth_country#17)) +Input [6]: [i_item_sk#6, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11] +Condition : isnotnull(i_item_sk#6) (73) Exchange -Input [4]: [c_customer_sk#14, c_first_name#15, c_last_name#16, c_birth_country#17] -Arguments: hashpartitioning(c_customer_sk#14, 5), true, [id=#52] +Input [6]: [i_item_sk#6, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11] +Arguments: hashpartitioning(i_item_sk#6, 5), ENSURE_REQUIREMENTS, [id=#52] (74) Sort [codegen id : 9] -Input [4]: [c_customer_sk#14, c_first_name#15, c_last_name#16, c_birth_country#17] -Arguments: [c_customer_sk#14 ASC NULLS FIRST], false, 0 +Input [6]: [i_item_sk#6, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11] +Arguments: [i_item_sk#6 ASC NULLS FIRST], false, 0 (75) SortMergeJoin [codegen id : 10] -Left keys [1]: [ss_customer_sk#2] -Right keys [1]: [c_customer_sk#14] +Left keys [1]: [ss_item_sk#1] +Right keys [1]: [i_item_sk#6] Join condition: None (76) Project [codegen id : 10] -Output [14]: [s_store_name#20, s_state#22, s_zip#23, ss_item_sk#1, ss_ticket_number#4, ss_net_paid#5, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11, c_first_name#15, c_last_name#16, c_birth_country#17] -Input [16]: [s_store_name#20, s_state#22, s_zip#23, ss_item_sk#1, ss_customer_sk#2, ss_ticket_number#4, ss_net_paid#5, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11, c_customer_sk#14, c_first_name#15, c_last_name#16, c_birth_country#17] +Output [14]: [ss_item_sk#1, ss_ticket_number#4, ss_net_paid#5, s_store_name#20, s_state#22, s_zip#23, c_first_name#15, c_last_name#16, c_birth_country#17, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11] +Input [15]: [ss_item_sk#1, ss_ticket_number#4, ss_net_paid#5, s_store_name#20, s_state#22, s_zip#23, c_first_name#15, c_last_name#16, c_birth_country#17, i_item_sk#6, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11] (77) Exchange -Input [14]: [s_store_name#20, s_state#22, s_zip#23, ss_item_sk#1, ss_ticket_number#4, ss_net_paid#5, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11, c_first_name#15, c_last_name#16, c_birth_country#17] -Arguments: hashpartitioning(c_birth_country#17, s_zip#23, 5), true, [id=#53] +Input [14]: [ss_item_sk#1, ss_ticket_number#4, ss_net_paid#5, s_store_name#20, s_state#22, s_zip#23, c_first_name#15, c_last_name#16, c_birth_country#17, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11] +Arguments: hashpartitioning(c_birth_country#17, s_zip#23, 5), ENSURE_REQUIREMENTS, [id=#53] (78) Sort [codegen id : 11] -Input [14]: [s_store_name#20, s_state#22, s_zip#23, ss_item_sk#1, ss_ticket_number#4, ss_net_paid#5, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11, c_first_name#15, c_last_name#16, c_birth_country#17] +Input [14]: [ss_item_sk#1, ss_ticket_number#4, ss_net_paid#5, s_store_name#20, s_state#22, s_zip#23, c_first_name#15, c_last_name#16, c_birth_country#17, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11] Arguments: [c_birth_country#17 ASC NULLS FIRST, s_zip#23 ASC NULLS FIRST], false, 0 (79) Scan parquet default.customer_address @@ -474,7 +474,7 @@ Condition : (isnotnull(ca_country#27) AND isnotnull(ca_zip#26)) (82) Exchange Input [3]: [ca_state#25, ca_zip#26, ca_country#27] -Arguments: hashpartitioning(upper(ca_country#27), ca_zip#26, 5), true, [id=#54] +Arguments: hashpartitioning(upper(ca_country#27), ca_zip#26, 5), ENSURE_REQUIREMENTS, [id=#54] (83) Sort [codegen id : 13] Input [3]: [ca_state#25, ca_zip#26, ca_country#27] @@ -486,15 +486,15 @@ Right keys [2]: [upper(ca_country#27), ca_zip#26] Join condition: None (85) Project [codegen id : 14] -Output [13]: [s_store_name#20, s_state#22, ss_item_sk#1, ss_ticket_number#4, ss_net_paid#5, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11, c_first_name#15, c_last_name#16, ca_state#25] -Input [17]: [s_store_name#20, s_state#22, s_zip#23, ss_item_sk#1, ss_ticket_number#4, ss_net_paid#5, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11, c_first_name#15, c_last_name#16, c_birth_country#17, ca_state#25, ca_zip#26, ca_country#27] +Output [13]: [ss_item_sk#1, ss_ticket_number#4, ss_net_paid#5, s_store_name#20, s_state#22, c_first_name#15, c_last_name#16, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11, ca_state#25] +Input [17]: [ss_item_sk#1, ss_ticket_number#4, ss_net_paid#5, s_store_name#20, s_state#22, s_zip#23, c_first_name#15, c_last_name#16, c_birth_country#17, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11, ca_state#25, ca_zip#26, ca_country#27] (86) Exchange -Input [13]: [s_store_name#20, s_state#22, ss_item_sk#1, ss_ticket_number#4, ss_net_paid#5, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11, c_first_name#15, c_last_name#16, ca_state#25] -Arguments: hashpartitioning(cast(ss_ticket_number#4 as bigint), cast(ss_item_sk#1 as bigint), 5), true, [id=#55] +Input [13]: [ss_item_sk#1, ss_ticket_number#4, ss_net_paid#5, s_store_name#20, s_state#22, c_first_name#15, c_last_name#16, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11, ca_state#25] +Arguments: hashpartitioning(cast(ss_ticket_number#4 as bigint), cast(ss_item_sk#1 as bigint), 5), ENSURE_REQUIREMENTS, [id=#55] (87) Sort [codegen id : 15] -Input [13]: [s_store_name#20, s_state#22, ss_item_sk#1, ss_ticket_number#4, ss_net_paid#5, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11, c_first_name#15, c_last_name#16, ca_state#25] +Input [13]: [ss_item_sk#1, ss_ticket_number#4, ss_net_paid#5, s_store_name#20, s_state#22, c_first_name#15, c_last_name#16, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11, ca_state#25] Arguments: [cast(ss_ticket_number#4 as bigint) ASC NULLS FIRST, cast(ss_item_sk#1 as bigint) ASC NULLS FIRST], false, 0 (88) Scan parquet default.store_returns @@ -513,7 +513,7 @@ Condition : (isnotnull(sr_ticket_number#31) AND isnotnull(sr_item_sk#30)) (91) Exchange Input [2]: [sr_item_sk#30, sr_ticket_number#31] -Arguments: hashpartitioning(sr_ticket_number#31, sr_item_sk#30, 5), true, [id=#56] +Arguments: hashpartitioning(sr_ticket_number#31, sr_item_sk#30, 5), ENSURE_REQUIREMENTS, [id=#56] (92) Sort [codegen id : 17] Input [2]: [sr_item_sk#30, sr_ticket_number#31] @@ -526,7 +526,7 @@ Join condition: None (94) Project [codegen id : 18] Output [11]: [ss_net_paid#5, s_store_name#20, s_state#22, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11, c_first_name#15, c_last_name#16, ca_state#25] -Input [15]: [s_store_name#20, s_state#22, ss_item_sk#1, ss_ticket_number#4, ss_net_paid#5, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11, c_first_name#15, c_last_name#16, ca_state#25, sr_item_sk#30, sr_ticket_number#31] +Input [15]: [ss_item_sk#1, ss_ticket_number#4, ss_net_paid#5, s_store_name#20, s_state#22, c_first_name#15, c_last_name#16, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11, ca_state#25, sr_item_sk#30, sr_ticket_number#31] (95) HashAggregate [codegen id : 18] Input [11]: [ss_net_paid#5, s_store_name#20, s_state#22, i_current_price#7, i_size#8, i_color#9, i_units#10, i_manager_id#11, c_first_name#15, c_last_name#16, ca_state#25] @@ -537,7 +537,7 @@ Results [11]: [c_last_name#16, c_first_name#15, s_store_name#20, ca_state#25, s_ (96) Exchange Input [11]: [c_last_name#16, c_first_name#15, s_store_name#20, ca_state#25, s_state#22, i_color#9, i_current_price#7, i_manager_id#11, i_units#10, i_size#8, sum#58] -Arguments: hashpartitioning(c_last_name#16, c_first_name#15, s_store_name#20, ca_state#25, s_state#22, i_color#9, i_current_price#7, i_manager_id#11, i_units#10, i_size#8, 5), true, [id=#59] +Arguments: hashpartitioning(c_last_name#16, c_first_name#15, s_store_name#20, ca_state#25, s_state#22, i_color#9, i_current_price#7, i_manager_id#11, i_units#10, i_size#8, 5), ENSURE_REQUIREMENTS, [id=#59] (97) HashAggregate [codegen id : 19] Input [11]: [c_last_name#16, c_first_name#15, s_store_name#20, ca_state#25, s_state#22, i_color#9, i_current_price#7, i_manager_id#11, i_units#10, i_size#8, sum#58] @@ -555,7 +555,7 @@ Results [2]: [sum#63, count#64] (99) Exchange Input [2]: [sum#63, count#64] -Arguments: SinglePartition, true, [id=#65] +Arguments: SinglePartition, ENSURE_REQUIREMENTS, [id=#65] (100) HashAggregate [codegen id : 20] Input [2]: [sum#63, count#64] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q24b.sf100/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q24b.sf100/simplified.txt index f51d1972b630f..7de562c5d59a1 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q24b.sf100/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q24b.sf100/simplified.txt @@ -21,7 +21,7 @@ WholeStageCodegen (14) InputAdapter Exchange [ss_ticket_number,ss_item_sk] #12 WholeStageCodegen (14) - Project [s_store_name,s_state,ss_item_sk,ss_ticket_number,ss_net_paid,i_current_price,i_size,i_color,i_units,i_manager_id,c_first_name,c_last_name,ca_state] + Project [ss_item_sk,ss_ticket_number,ss_net_paid,s_store_name,s_state,c_first_name,c_last_name,i_current_price,i_size,i_color,i_units,i_manager_id,ca_state] SortMergeJoin [c_birth_country,s_zip,ca_country,ca_zip] InputAdapter WholeStageCodegen (11) @@ -29,24 +29,28 @@ WholeStageCodegen (14) InputAdapter Exchange [c_birth_country,s_zip] #13 WholeStageCodegen (10) - Project [s_store_name,s_state,s_zip,ss_item_sk,ss_ticket_number,ss_net_paid,i_current_price,i_size,i_color,i_units,i_manager_id,c_first_name,c_last_name,c_birth_country] - SortMergeJoin [ss_customer_sk,c_customer_sk] + Project [ss_item_sk,ss_ticket_number,ss_net_paid,s_store_name,s_state,s_zip,c_first_name,c_last_name,c_birth_country,i_current_price,i_size,i_color,i_units,i_manager_id] + SortMergeJoin [ss_item_sk,i_item_sk] InputAdapter WholeStageCodegen (7) - Sort [ss_customer_sk] + Sort [ss_item_sk] InputAdapter - Exchange [ss_customer_sk] #14 + Exchange [ss_item_sk] #14 WholeStageCodegen (6) - Project [s_store_name,s_state,s_zip,ss_item_sk,ss_customer_sk,ss_ticket_number,ss_net_paid,i_current_price,i_size,i_color,i_units,i_manager_id] - SortMergeJoin [ss_item_sk,i_item_sk] + Project [ss_item_sk,ss_ticket_number,ss_net_paid,s_store_name,s_state,s_zip,c_first_name,c_last_name,c_birth_country] + SortMergeJoin [ss_customer_sk,c_customer_sk] InputAdapter WholeStageCodegen (3) - Sort [ss_item_sk] + Sort [ss_customer_sk] InputAdapter - Exchange [ss_item_sk] #15 + Exchange [ss_customer_sk] #15 WholeStageCodegen (2) - Project [s_store_name,s_state,s_zip,ss_item_sk,ss_customer_sk,ss_ticket_number,ss_net_paid] - BroadcastHashJoin [s_store_sk,ss_store_sk] + Project [ss_item_sk,ss_customer_sk,ss_ticket_number,ss_net_paid,s_store_name,s_state,s_zip] + BroadcastHashJoin [ss_store_sk,s_store_sk] + Filter [ss_ticket_number,ss_item_sk,ss_store_sk,ss_customer_sk] + ColumnarToRow + InputAdapter + Scan parquet default.store_sales [ss_item_sk,ss_customer_sk,ss_store_sk,ss_ticket_number,ss_net_paid] InputAdapter BroadcastExchange #16 WholeStageCodegen (1) @@ -55,30 +59,26 @@ WholeStageCodegen (14) ColumnarToRow InputAdapter Scan parquet default.store [s_store_sk,s_store_name,s_market_id,s_state,s_zip] - Filter [ss_ticket_number,ss_item_sk,ss_store_sk,ss_customer_sk] - ColumnarToRow - InputAdapter - Scan parquet default.store_sales [ss_item_sk,ss_customer_sk,ss_store_sk,ss_ticket_number,ss_net_paid] InputAdapter WholeStageCodegen (5) - Sort [i_item_sk] + Sort [c_customer_sk] InputAdapter - Exchange [i_item_sk] #17 + Exchange [c_customer_sk] #17 WholeStageCodegen (4) - Filter [i_item_sk] + Filter [c_customer_sk,c_birth_country] ColumnarToRow InputAdapter - Scan parquet default.item [i_item_sk,i_current_price,i_size,i_color,i_units,i_manager_id] + Scan parquet default.customer [c_customer_sk,c_first_name,c_last_name,c_birth_country] InputAdapter WholeStageCodegen (9) - Sort [c_customer_sk] + Sort [i_item_sk] InputAdapter - Exchange [c_customer_sk] #18 + Exchange [i_item_sk] #18 WholeStageCodegen (8) - Filter [c_customer_sk,c_birth_country] + Filter [i_item_sk] ColumnarToRow InputAdapter - Scan parquet default.customer [c_customer_sk,c_first_name,c_last_name,c_birth_country] + Scan parquet default.item [i_item_sk,i_current_price,i_size,i_color,i_units,i_manager_id] InputAdapter WholeStageCodegen (13) Sort [ca_country,ca_zip] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q25.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q25.sf100/explain.txt index 87a72d3bbe777..3100e574e60e3 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q25.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q25.sf100/explain.txt @@ -5,57 +5,57 @@ TakeOrderedAndProject (57) +- * HashAggregate (54) +- * Project (53) +- * SortMergeJoin Inner (52) - :- * Sort (43) - : +- Exchange (42) - : +- * Project (41) - : +- * SortMergeJoin Inner (40) - : :- * Sort (27) - : : +- Exchange (26) - : : +- * Project (25) - : : +- * SortMergeJoin Inner (24) - : : :- * Sort (18) - : : : +- Exchange (17) - : : : +- * Project (16) - : : : +- * BroadcastHashJoin Inner BuildRight (15) - : : : :- * Project (10) - : : : : +- * BroadcastHashJoin Inner BuildRight (9) - : : : : :- * Filter (3) - : : : : : +- * ColumnarToRow (2) - : : : : : +- Scan parquet default.store_sales (1) - : : : : +- BroadcastExchange (8) - : : : : +- * Project (7) - : : : : +- * Filter (6) - : : : : +- * ColumnarToRow (5) - : : : : +- Scan parquet default.date_dim (4) - : : : +- BroadcastExchange (14) - : : : +- * Filter (13) - : : : +- * ColumnarToRow (12) - : : : +- Scan parquet default.store (11) - : : +- * Sort (23) - : : +- Exchange (22) - : : +- * Filter (21) - : : +- * ColumnarToRow (20) - : : +- Scan parquet default.item (19) - : +- * Sort (39) - : +- Exchange (38) - : +- * Project (37) - : +- * BroadcastHashJoin Inner BuildLeft (36) - : :- BroadcastExchange (32) - : : +- * Project (31) - : : +- * Filter (30) - : : +- * ColumnarToRow (29) - : : +- Scan parquet default.date_dim (28) - : +- * Filter (35) - : +- * ColumnarToRow (34) - : +- Scan parquet default.store_returns (33) + :- * Sort (27) + : +- Exchange (26) + : +- * Project (25) + : +- * SortMergeJoin Inner (24) + : :- * Sort (18) + : : +- Exchange (17) + : : +- * Project (16) + : : +- * BroadcastHashJoin Inner BuildRight (15) + : : :- * Project (10) + : : : +- * BroadcastHashJoin Inner BuildRight (9) + : : : :- * Filter (3) + : : : : +- * ColumnarToRow (2) + : : : : +- Scan parquet default.store_sales (1) + : : : +- BroadcastExchange (8) + : : : +- * Project (7) + : : : +- * Filter (6) + : : : +- * ColumnarToRow (5) + : : : +- Scan parquet default.date_dim (4) + : : +- BroadcastExchange (14) + : : +- * Filter (13) + : : +- * ColumnarToRow (12) + : : +- Scan parquet default.store (11) + : +- * Sort (23) + : +- Exchange (22) + : +- * Filter (21) + : +- * ColumnarToRow (20) + : +- Scan parquet default.item (19) +- * Sort (51) +- Exchange (50) +- * Project (49) - +- * BroadcastHashJoin Inner BuildRight (48) - :- * Filter (46) - : +- * ColumnarToRow (45) - : +- Scan parquet default.catalog_sales (44) - +- ReusedExchange (47) + +- * SortMergeJoin Inner (48) + :- * Sort (39) + : +- Exchange (38) + : +- * Project (37) + : +- * BroadcastHashJoin Inner BuildRight (36) + : :- * Filter (30) + : : +- * ColumnarToRow (29) + : : +- Scan parquet default.store_returns (28) + : +- BroadcastExchange (35) + : +- * Project (34) + : +- * Filter (33) + : +- * ColumnarToRow (32) + : +- Scan parquet default.date_dim (31) + +- * Sort (47) + +- Exchange (46) + +- * Project (45) + +- * BroadcastHashJoin Inner BuildRight (44) + :- * Filter (42) + : +- * ColumnarToRow (41) + : +- Scan parquet default.catalog_sales (40) + +- ReusedExchange (43) (1) Scan parquet default.store_sales @@ -132,7 +132,7 @@ Input [8]: [ss_item_sk#2, ss_customer_sk#3, ss_store_sk#4, ss_ticket_number#5, s (17) Exchange Input [6]: [ss_item_sk#2, ss_customer_sk#3, ss_ticket_number#5, ss_net_profit#6, s_store_id#12, s_store_name#13] -Arguments: hashpartitioning(ss_item_sk#2, 5), true, [id=#15] +Arguments: hashpartitioning(ss_item_sk#2, 5), ENSURE_REQUIREMENTS, [id=#15] (18) Sort [codegen id : 4] Input [6]: [ss_item_sk#2, ss_customer_sk#3, ss_ticket_number#5, ss_net_profit#6, s_store_id#12, s_store_name#13] @@ -154,7 +154,7 @@ Condition : isnotnull(i_item_sk#16) (22) Exchange Input [3]: [i_item_sk#16, i_item_id#17, i_item_desc#18] -Arguments: hashpartitioning(i_item_sk#16, 5), true, [id=#19] +Arguments: hashpartitioning(i_item_sk#16, 5), ENSURE_REQUIREMENTS, [id=#19] (23) Sort [codegen id : 6] Input [3]: [i_item_sk#16, i_item_id#17, i_item_desc#18] @@ -171,142 +171,142 @@ Input [9]: [ss_item_sk#2, ss_customer_sk#3, ss_ticket_number#5, ss_net_profit#6, (26) Exchange Input [8]: [ss_item_sk#2, ss_customer_sk#3, ss_ticket_number#5, ss_net_profit#6, s_store_id#12, s_store_name#13, i_item_id#17, i_item_desc#18] -Arguments: hashpartitioning(cast(ss_customer_sk#3 as bigint), cast(ss_item_sk#2 as bigint), cast(ss_ticket_number#5 as bigint), 5), true, [id=#20] +Arguments: hashpartitioning(cast(ss_customer_sk#3 as bigint), cast(ss_item_sk#2 as bigint), cast(ss_ticket_number#5 as bigint), 5), ENSURE_REQUIREMENTS, [id=#20] (27) Sort [codegen id : 8] Input [8]: [ss_item_sk#2, ss_customer_sk#3, ss_ticket_number#5, ss_net_profit#6, s_store_id#12, s_store_name#13, i_item_id#17, i_item_desc#18] Arguments: [cast(ss_customer_sk#3 as bigint) ASC NULLS FIRST, cast(ss_item_sk#2 as bigint) ASC NULLS FIRST, cast(ss_ticket_number#5 as bigint) ASC NULLS FIRST], false, 0 -(28) Scan parquet default.date_dim -Output [3]: [d_date_sk#21, d_year#22, d_moy#23] +(28) Scan parquet default.store_returns +Output [5]: [sr_returned_date_sk#21, sr_item_sk#22, sr_customer_sk#23, sr_ticket_number#24, sr_net_loss#25] Batched: true -Location [not included in comparison]/{warehouse_dir}/date_dim] -PushedFilters: [IsNotNull(d_moy), IsNotNull(d_year), GreaterThanOrEqual(d_moy,4), LessThanOrEqual(d_moy,10), EqualTo(d_year,2001), IsNotNull(d_date_sk)] -ReadSchema: struct +Location [not included in comparison]/{warehouse_dir}/store_returns] +PushedFilters: [IsNotNull(sr_customer_sk), IsNotNull(sr_item_sk), IsNotNull(sr_ticket_number), IsNotNull(sr_returned_date_sk)] +ReadSchema: struct -(29) ColumnarToRow [codegen id : 9] -Input [3]: [d_date_sk#21, d_year#22, d_moy#23] +(29) ColumnarToRow [codegen id : 10] +Input [5]: [sr_returned_date_sk#21, sr_item_sk#22, sr_customer_sk#23, sr_ticket_number#24, sr_net_loss#25] -(30) Filter [codegen id : 9] -Input [3]: [d_date_sk#21, d_year#22, d_moy#23] -Condition : (((((isnotnull(d_moy#23) AND isnotnull(d_year#22)) AND (d_moy#23 >= 4)) AND (d_moy#23 <= 10)) AND (d_year#22 = 2001)) AND isnotnull(d_date_sk#21)) +(30) Filter [codegen id : 10] +Input [5]: [sr_returned_date_sk#21, sr_item_sk#22, sr_customer_sk#23, sr_ticket_number#24, sr_net_loss#25] +Condition : (((isnotnull(sr_customer_sk#23) AND isnotnull(sr_item_sk#22)) AND isnotnull(sr_ticket_number#24)) AND isnotnull(sr_returned_date_sk#21)) -(31) Project [codegen id : 9] -Output [1]: [d_date_sk#21] -Input [3]: [d_date_sk#21, d_year#22, d_moy#23] +(31) Scan parquet default.date_dim +Output [3]: [d_date_sk#26, d_year#27, d_moy#28] +Batched: true +Location [not included in comparison]/{warehouse_dir}/date_dim] +PushedFilters: [IsNotNull(d_moy), IsNotNull(d_year), GreaterThanOrEqual(d_moy,4), LessThanOrEqual(d_moy,10), EqualTo(d_year,2001), IsNotNull(d_date_sk)] +ReadSchema: struct -(32) BroadcastExchange -Input [1]: [d_date_sk#21] -Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#24] +(32) ColumnarToRow [codegen id : 9] +Input [3]: [d_date_sk#26, d_year#27, d_moy#28] -(33) Scan parquet default.store_returns -Output [5]: [sr_returned_date_sk#25, sr_item_sk#26, sr_customer_sk#27, sr_ticket_number#28, sr_net_loss#29] -Batched: true -Location [not included in comparison]/{warehouse_dir}/store_returns] -PushedFilters: [IsNotNull(sr_customer_sk), IsNotNull(sr_item_sk), IsNotNull(sr_ticket_number), IsNotNull(sr_returned_date_sk)] -ReadSchema: struct +(33) Filter [codegen id : 9] +Input [3]: [d_date_sk#26, d_year#27, d_moy#28] +Condition : (((((isnotnull(d_moy#28) AND isnotnull(d_year#27)) AND (d_moy#28 >= 4)) AND (d_moy#28 <= 10)) AND (d_year#27 = 2001)) AND isnotnull(d_date_sk#26)) -(34) ColumnarToRow -Input [5]: [sr_returned_date_sk#25, sr_item_sk#26, sr_customer_sk#27, sr_ticket_number#28, sr_net_loss#29] +(34) Project [codegen id : 9] +Output [1]: [d_date_sk#26] +Input [3]: [d_date_sk#26, d_year#27, d_moy#28] -(35) Filter -Input [5]: [sr_returned_date_sk#25, sr_item_sk#26, sr_customer_sk#27, sr_ticket_number#28, sr_net_loss#29] -Condition : (((isnotnull(sr_customer_sk#27) AND isnotnull(sr_item_sk#26)) AND isnotnull(sr_ticket_number#28)) AND isnotnull(sr_returned_date_sk#25)) +(35) BroadcastExchange +Input [1]: [d_date_sk#26] +Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#29] (36) BroadcastHashJoin [codegen id : 10] -Left keys [1]: [cast(d_date_sk#21 as bigint)] -Right keys [1]: [sr_returned_date_sk#25] +Left keys [1]: [sr_returned_date_sk#21] +Right keys [1]: [cast(d_date_sk#26 as bigint)] Join condition: None (37) Project [codegen id : 10] -Output [4]: [sr_item_sk#26, sr_customer_sk#27, sr_ticket_number#28, sr_net_loss#29] -Input [6]: [d_date_sk#21, sr_returned_date_sk#25, sr_item_sk#26, sr_customer_sk#27, sr_ticket_number#28, sr_net_loss#29] +Output [4]: [sr_item_sk#22, sr_customer_sk#23, sr_ticket_number#24, sr_net_loss#25] +Input [6]: [sr_returned_date_sk#21, sr_item_sk#22, sr_customer_sk#23, sr_ticket_number#24, sr_net_loss#25, d_date_sk#26] (38) Exchange -Input [4]: [sr_item_sk#26, sr_customer_sk#27, sr_ticket_number#28, sr_net_loss#29] -Arguments: hashpartitioning(sr_customer_sk#27, sr_item_sk#26, sr_ticket_number#28, 5), true, [id=#30] +Input [4]: [sr_item_sk#22, sr_customer_sk#23, sr_ticket_number#24, sr_net_loss#25] +Arguments: hashpartitioning(sr_customer_sk#23, sr_item_sk#22, 5), ENSURE_REQUIREMENTS, [id=#30] (39) Sort [codegen id : 11] -Input [4]: [sr_item_sk#26, sr_customer_sk#27, sr_ticket_number#28, sr_net_loss#29] -Arguments: [sr_customer_sk#27 ASC NULLS FIRST, sr_item_sk#26 ASC NULLS FIRST, sr_ticket_number#28 ASC NULLS FIRST], false, 0 - -(40) SortMergeJoin [codegen id : 12] -Left keys [3]: [cast(ss_customer_sk#3 as bigint), cast(ss_item_sk#2 as bigint), cast(ss_ticket_number#5 as bigint)] -Right keys [3]: [sr_customer_sk#27, sr_item_sk#26, sr_ticket_number#28] -Join condition: None - -(41) Project [codegen id : 12] -Output [8]: [ss_net_profit#6, s_store_id#12, s_store_name#13, i_item_id#17, i_item_desc#18, sr_item_sk#26, sr_customer_sk#27, sr_net_loss#29] -Input [12]: [ss_item_sk#2, ss_customer_sk#3, ss_ticket_number#5, ss_net_profit#6, s_store_id#12, s_store_name#13, i_item_id#17, i_item_desc#18, sr_item_sk#26, sr_customer_sk#27, sr_ticket_number#28, sr_net_loss#29] - -(42) Exchange -Input [8]: [ss_net_profit#6, s_store_id#12, s_store_name#13, i_item_id#17, i_item_desc#18, sr_item_sk#26, sr_customer_sk#27, sr_net_loss#29] -Arguments: hashpartitioning(sr_customer_sk#27, sr_item_sk#26, 5), true, [id=#31] - -(43) Sort [codegen id : 13] -Input [8]: [ss_net_profit#6, s_store_id#12, s_store_name#13, i_item_id#17, i_item_desc#18, sr_item_sk#26, sr_customer_sk#27, sr_net_loss#29] -Arguments: [sr_customer_sk#27 ASC NULLS FIRST, sr_item_sk#26 ASC NULLS FIRST], false, 0 +Input [4]: [sr_item_sk#22, sr_customer_sk#23, sr_ticket_number#24, sr_net_loss#25] +Arguments: [sr_customer_sk#23 ASC NULLS FIRST, sr_item_sk#22 ASC NULLS FIRST], false, 0 -(44) Scan parquet default.catalog_sales -Output [4]: [cs_sold_date_sk#32, cs_bill_customer_sk#33, cs_item_sk#34, cs_net_profit#35] +(40) Scan parquet default.catalog_sales +Output [4]: [cs_sold_date_sk#31, cs_bill_customer_sk#32, cs_item_sk#33, cs_net_profit#34] Batched: true Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_bill_customer_sk), IsNotNull(cs_item_sk), IsNotNull(cs_sold_date_sk)] ReadSchema: struct -(45) ColumnarToRow [codegen id : 15] -Input [4]: [cs_sold_date_sk#32, cs_bill_customer_sk#33, cs_item_sk#34, cs_net_profit#35] +(41) ColumnarToRow [codegen id : 13] +Input [4]: [cs_sold_date_sk#31, cs_bill_customer_sk#32, cs_item_sk#33, cs_net_profit#34] -(46) Filter [codegen id : 15] -Input [4]: [cs_sold_date_sk#32, cs_bill_customer_sk#33, cs_item_sk#34, cs_net_profit#35] -Condition : ((isnotnull(cs_bill_customer_sk#33) AND isnotnull(cs_item_sk#34)) AND isnotnull(cs_sold_date_sk#32)) +(42) Filter [codegen id : 13] +Input [4]: [cs_sold_date_sk#31, cs_bill_customer_sk#32, cs_item_sk#33, cs_net_profit#34] +Condition : ((isnotnull(cs_bill_customer_sk#32) AND isnotnull(cs_item_sk#33)) AND isnotnull(cs_sold_date_sk#31)) -(47) ReusedExchange [Reuses operator id: 32] -Output [1]: [d_date_sk#36] +(43) ReusedExchange [Reuses operator id: 35] +Output [1]: [d_date_sk#35] -(48) BroadcastHashJoin [codegen id : 15] -Left keys [1]: [cs_sold_date_sk#32] -Right keys [1]: [d_date_sk#36] +(44) BroadcastHashJoin [codegen id : 13] +Left keys [1]: [cs_sold_date_sk#31] +Right keys [1]: [d_date_sk#35] +Join condition: None + +(45) Project [codegen id : 13] +Output [3]: [cs_bill_customer_sk#32, cs_item_sk#33, cs_net_profit#34] +Input [5]: [cs_sold_date_sk#31, cs_bill_customer_sk#32, cs_item_sk#33, cs_net_profit#34, d_date_sk#35] + +(46) Exchange +Input [3]: [cs_bill_customer_sk#32, cs_item_sk#33, cs_net_profit#34] +Arguments: hashpartitioning(cast(cs_bill_customer_sk#32 as bigint), cast(cs_item_sk#33 as bigint), 5), ENSURE_REQUIREMENTS, [id=#36] + +(47) Sort [codegen id : 14] +Input [3]: [cs_bill_customer_sk#32, cs_item_sk#33, cs_net_profit#34] +Arguments: [cast(cs_bill_customer_sk#32 as bigint) ASC NULLS FIRST, cast(cs_item_sk#33 as bigint) ASC NULLS FIRST], false, 0 + +(48) SortMergeJoin [codegen id : 15] +Left keys [2]: [sr_customer_sk#23, sr_item_sk#22] +Right keys [2]: [cast(cs_bill_customer_sk#32 as bigint), cast(cs_item_sk#33 as bigint)] Join condition: None (49) Project [codegen id : 15] -Output [3]: [cs_bill_customer_sk#33, cs_item_sk#34, cs_net_profit#35] -Input [5]: [cs_sold_date_sk#32, cs_bill_customer_sk#33, cs_item_sk#34, cs_net_profit#35, d_date_sk#36] +Output [5]: [sr_item_sk#22, sr_customer_sk#23, sr_ticket_number#24, sr_net_loss#25, cs_net_profit#34] +Input [7]: [sr_item_sk#22, sr_customer_sk#23, sr_ticket_number#24, sr_net_loss#25, cs_bill_customer_sk#32, cs_item_sk#33, cs_net_profit#34] (50) Exchange -Input [3]: [cs_bill_customer_sk#33, cs_item_sk#34, cs_net_profit#35] -Arguments: hashpartitioning(cast(cs_bill_customer_sk#33 as bigint), cast(cs_item_sk#34 as bigint), 5), true, [id=#37] +Input [5]: [sr_item_sk#22, sr_customer_sk#23, sr_ticket_number#24, sr_net_loss#25, cs_net_profit#34] +Arguments: hashpartitioning(sr_customer_sk#23, sr_item_sk#22, sr_ticket_number#24, 5), ENSURE_REQUIREMENTS, [id=#37] (51) Sort [codegen id : 16] -Input [3]: [cs_bill_customer_sk#33, cs_item_sk#34, cs_net_profit#35] -Arguments: [cast(cs_bill_customer_sk#33 as bigint) ASC NULLS FIRST, cast(cs_item_sk#34 as bigint) ASC NULLS FIRST], false, 0 +Input [5]: [sr_item_sk#22, sr_customer_sk#23, sr_ticket_number#24, sr_net_loss#25, cs_net_profit#34] +Arguments: [sr_customer_sk#23 ASC NULLS FIRST, sr_item_sk#22 ASC NULLS FIRST, sr_ticket_number#24 ASC NULLS FIRST], false, 0 (52) SortMergeJoin [codegen id : 17] -Left keys [2]: [sr_customer_sk#27, sr_item_sk#26] -Right keys [2]: [cast(cs_bill_customer_sk#33 as bigint), cast(cs_item_sk#34 as bigint)] +Left keys [3]: [cast(ss_customer_sk#3 as bigint), cast(ss_item_sk#2 as bigint), cast(ss_ticket_number#5 as bigint)] +Right keys [3]: [sr_customer_sk#23, sr_item_sk#22, sr_ticket_number#24] Join condition: None (53) Project [codegen id : 17] -Output [7]: [ss_net_profit#6, sr_net_loss#29, cs_net_profit#35, s_store_id#12, s_store_name#13, i_item_id#17, i_item_desc#18] -Input [11]: [ss_net_profit#6, s_store_id#12, s_store_name#13, i_item_id#17, i_item_desc#18, sr_item_sk#26, sr_customer_sk#27, sr_net_loss#29, cs_bill_customer_sk#33, cs_item_sk#34, cs_net_profit#35] +Output [7]: [ss_net_profit#6, sr_net_loss#25, cs_net_profit#34, s_store_id#12, s_store_name#13, i_item_id#17, i_item_desc#18] +Input [13]: [ss_item_sk#2, ss_customer_sk#3, ss_ticket_number#5, ss_net_profit#6, s_store_id#12, s_store_name#13, i_item_id#17, i_item_desc#18, sr_item_sk#22, sr_customer_sk#23, sr_ticket_number#24, sr_net_loss#25, cs_net_profit#34] (54) HashAggregate [codegen id : 17] -Input [7]: [ss_net_profit#6, sr_net_loss#29, cs_net_profit#35, s_store_id#12, s_store_name#13, i_item_id#17, i_item_desc#18] +Input [7]: [ss_net_profit#6, sr_net_loss#25, cs_net_profit#34, s_store_id#12, s_store_name#13, i_item_id#17, i_item_desc#18] Keys [4]: [i_item_id#17, i_item_desc#18, s_store_id#12, s_store_name#13] -Functions [3]: [partial_sum(UnscaledValue(ss_net_profit#6)), partial_sum(UnscaledValue(sr_net_loss#29)), partial_sum(UnscaledValue(cs_net_profit#35))] +Functions [3]: [partial_sum(UnscaledValue(ss_net_profit#6)), partial_sum(UnscaledValue(sr_net_loss#25)), partial_sum(UnscaledValue(cs_net_profit#34))] Aggregate Attributes [3]: [sum#38, sum#39, sum#40] Results [7]: [i_item_id#17, i_item_desc#18, s_store_id#12, s_store_name#13, sum#41, sum#42, sum#43] (55) Exchange Input [7]: [i_item_id#17, i_item_desc#18, s_store_id#12, s_store_name#13, sum#41, sum#42, sum#43] -Arguments: hashpartitioning(i_item_id#17, i_item_desc#18, s_store_id#12, s_store_name#13, 5), true, [id=#44] +Arguments: hashpartitioning(i_item_id#17, i_item_desc#18, s_store_id#12, s_store_name#13, 5), ENSURE_REQUIREMENTS, [id=#44] (56) HashAggregate [codegen id : 18] Input [7]: [i_item_id#17, i_item_desc#18, s_store_id#12, s_store_name#13, sum#41, sum#42, sum#43] Keys [4]: [i_item_id#17, i_item_desc#18, s_store_id#12, s_store_name#13] -Functions [3]: [sum(UnscaledValue(ss_net_profit#6)), sum(UnscaledValue(sr_net_loss#29)), sum(UnscaledValue(cs_net_profit#35))] -Aggregate Attributes [3]: [sum(UnscaledValue(ss_net_profit#6))#45, sum(UnscaledValue(sr_net_loss#29))#46, sum(UnscaledValue(cs_net_profit#35))#47] -Results [7]: [i_item_id#17, i_item_desc#18, s_store_id#12, s_store_name#13, MakeDecimal(sum(UnscaledValue(ss_net_profit#6))#45,17,2) AS store_sales_profit#48, MakeDecimal(sum(UnscaledValue(sr_net_loss#29))#46,17,2) AS store_returns_loss#49, MakeDecimal(sum(UnscaledValue(cs_net_profit#35))#47,17,2) AS catalog_sales_profit#50] +Functions [3]: [sum(UnscaledValue(ss_net_profit#6)), sum(UnscaledValue(sr_net_loss#25)), sum(UnscaledValue(cs_net_profit#34))] +Aggregate Attributes [3]: [sum(UnscaledValue(ss_net_profit#6))#45, sum(UnscaledValue(sr_net_loss#25))#46, sum(UnscaledValue(cs_net_profit#34))#47] +Results [7]: [i_item_id#17, i_item_desc#18, s_store_id#12, s_store_name#13, MakeDecimal(sum(UnscaledValue(ss_net_profit#6))#45,17,2) AS store_sales_profit#48, MakeDecimal(sum(UnscaledValue(sr_net_loss#25))#46,17,2) AS store_returns_loss#49, MakeDecimal(sum(UnscaledValue(cs_net_profit#34))#47,17,2) AS catalog_sales_profit#50] (57) TakeOrderedAndProject Input [7]: [i_item_id#17, i_item_desc#18, s_store_id#12, s_store_name#13, store_sales_profit#48, store_returns_loss#49, catalog_sales_profit#50] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q25.sf100/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q25.sf100/simplified.txt index 8e61cf9c519fd..9b53cdaa5dc67 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q25.sf100/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q25.sf100/simplified.txt @@ -6,70 +6,74 @@ TakeOrderedAndProject [i_item_id,i_item_desc,s_store_id,s_store_name,store_sales WholeStageCodegen (17) HashAggregate [i_item_id,i_item_desc,s_store_id,s_store_name,ss_net_profit,sr_net_loss,cs_net_profit] [sum,sum,sum,sum,sum,sum] Project [ss_net_profit,sr_net_loss,cs_net_profit,s_store_id,s_store_name,i_item_id,i_item_desc] - SortMergeJoin [sr_customer_sk,sr_item_sk,cs_bill_customer_sk,cs_item_sk] + SortMergeJoin [ss_customer_sk,ss_item_sk,ss_ticket_number,sr_customer_sk,sr_item_sk,sr_ticket_number] InputAdapter - WholeStageCodegen (13) - Sort [sr_customer_sk,sr_item_sk] + WholeStageCodegen (8) + Sort [ss_customer_sk,ss_item_sk,ss_ticket_number] InputAdapter - Exchange [sr_customer_sk,sr_item_sk] #2 - WholeStageCodegen (12) - Project [ss_net_profit,s_store_id,s_store_name,i_item_id,i_item_desc,sr_item_sk,sr_customer_sk,sr_net_loss] - SortMergeJoin [ss_customer_sk,ss_item_sk,ss_ticket_number,sr_customer_sk,sr_item_sk,sr_ticket_number] + Exchange [ss_customer_sk,ss_item_sk,ss_ticket_number] #2 + WholeStageCodegen (7) + Project [ss_item_sk,ss_customer_sk,ss_ticket_number,ss_net_profit,s_store_id,s_store_name,i_item_id,i_item_desc] + SortMergeJoin [ss_item_sk,i_item_sk] InputAdapter - WholeStageCodegen (8) - Sort [ss_customer_sk,ss_item_sk,ss_ticket_number] + WholeStageCodegen (4) + Sort [ss_item_sk] InputAdapter - Exchange [ss_customer_sk,ss_item_sk,ss_ticket_number] #3 - WholeStageCodegen (7) - Project [ss_item_sk,ss_customer_sk,ss_ticket_number,ss_net_profit,s_store_id,s_store_name,i_item_id,i_item_desc] - SortMergeJoin [ss_item_sk,i_item_sk] - InputAdapter - WholeStageCodegen (4) - Sort [ss_item_sk] - InputAdapter - Exchange [ss_item_sk] #4 - WholeStageCodegen (3) - Project [ss_item_sk,ss_customer_sk,ss_ticket_number,ss_net_profit,s_store_id,s_store_name] - BroadcastHashJoin [ss_store_sk,s_store_sk] - Project [ss_item_sk,ss_customer_sk,ss_store_sk,ss_ticket_number,ss_net_profit] - BroadcastHashJoin [ss_sold_date_sk,d_date_sk] - Filter [ss_customer_sk,ss_item_sk,ss_ticket_number,ss_sold_date_sk,ss_store_sk] - ColumnarToRow - InputAdapter - Scan parquet default.store_sales [ss_sold_date_sk,ss_item_sk,ss_customer_sk,ss_store_sk,ss_ticket_number,ss_net_profit] - InputAdapter - BroadcastExchange #5 - WholeStageCodegen (1) - Project [d_date_sk] - Filter [d_moy,d_year,d_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.date_dim [d_date_sk,d_year,d_moy] - InputAdapter - BroadcastExchange #6 - WholeStageCodegen (2) - Filter [s_store_sk] - ColumnarToRow - InputAdapter - Scan parquet default.store [s_store_sk,s_store_id,s_store_name] - InputAdapter - WholeStageCodegen (6) - Sort [i_item_sk] - InputAdapter - Exchange [i_item_sk] #7 - WholeStageCodegen (5) - Filter [i_item_sk] + Exchange [ss_item_sk] #3 + WholeStageCodegen (3) + Project [ss_item_sk,ss_customer_sk,ss_ticket_number,ss_net_profit,s_store_id,s_store_name] + BroadcastHashJoin [ss_store_sk,s_store_sk] + Project [ss_item_sk,ss_customer_sk,ss_store_sk,ss_ticket_number,ss_net_profit] + BroadcastHashJoin [ss_sold_date_sk,d_date_sk] + Filter [ss_customer_sk,ss_item_sk,ss_ticket_number,ss_sold_date_sk,ss_store_sk] + ColumnarToRow + InputAdapter + Scan parquet default.store_sales [ss_sold_date_sk,ss_item_sk,ss_customer_sk,ss_store_sk,ss_ticket_number,ss_net_profit] + InputAdapter + BroadcastExchange #4 + WholeStageCodegen (1) + Project [d_date_sk] + Filter [d_moy,d_year,d_date_sk] ColumnarToRow InputAdapter - Scan parquet default.item [i_item_sk,i_item_id,i_item_desc] + Scan parquet default.date_dim [d_date_sk,d_year,d_moy] + InputAdapter + BroadcastExchange #5 + WholeStageCodegen (2) + Filter [s_store_sk] + ColumnarToRow + InputAdapter + Scan parquet default.store [s_store_sk,s_store_id,s_store_name] + InputAdapter + WholeStageCodegen (6) + Sort [i_item_sk] + InputAdapter + Exchange [i_item_sk] #6 + WholeStageCodegen (5) + Filter [i_item_sk] + ColumnarToRow + InputAdapter + Scan parquet default.item [i_item_sk,i_item_id,i_item_desc] + InputAdapter + WholeStageCodegen (16) + Sort [sr_customer_sk,sr_item_sk,sr_ticket_number] + InputAdapter + Exchange [sr_customer_sk,sr_item_sk,sr_ticket_number] #7 + WholeStageCodegen (15) + Project [sr_item_sk,sr_customer_sk,sr_ticket_number,sr_net_loss,cs_net_profit] + SortMergeJoin [sr_customer_sk,sr_item_sk,cs_bill_customer_sk,cs_item_sk] InputAdapter WholeStageCodegen (11) - Sort [sr_customer_sk,sr_item_sk,sr_ticket_number] + Sort [sr_customer_sk,sr_item_sk] InputAdapter - Exchange [sr_customer_sk,sr_item_sk,sr_ticket_number] #8 + Exchange [sr_customer_sk,sr_item_sk] #8 WholeStageCodegen (10) Project [sr_item_sk,sr_customer_sk,sr_ticket_number,sr_net_loss] - BroadcastHashJoin [d_date_sk,sr_returned_date_sk] + BroadcastHashJoin [sr_returned_date_sk,d_date_sk] + Filter [sr_customer_sk,sr_item_sk,sr_ticket_number,sr_returned_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.store_returns [sr_returned_date_sk,sr_item_sk,sr_customer_sk,sr_ticket_number,sr_net_loss] InputAdapter BroadcastExchange #9 WholeStageCodegen (9) @@ -78,21 +82,17 @@ TakeOrderedAndProject [i_item_id,i_item_desc,s_store_id,s_store_name,store_sales ColumnarToRow InputAdapter Scan parquet default.date_dim [d_date_sk,d_year,d_moy] - Filter [sr_customer_sk,sr_item_sk,sr_ticket_number,sr_returned_date_sk] + InputAdapter + WholeStageCodegen (14) + Sort [cs_bill_customer_sk,cs_item_sk] + InputAdapter + Exchange [cs_bill_customer_sk,cs_item_sk] #10 + WholeStageCodegen (13) + Project [cs_bill_customer_sk,cs_item_sk,cs_net_profit] + BroadcastHashJoin [cs_sold_date_sk,d_date_sk] + Filter [cs_bill_customer_sk,cs_item_sk,cs_sold_date_sk] ColumnarToRow InputAdapter - Scan parquet default.store_returns [sr_returned_date_sk,sr_item_sk,sr_customer_sk,sr_ticket_number,sr_net_loss] - InputAdapter - WholeStageCodegen (16) - Sort [cs_bill_customer_sk,cs_item_sk] - InputAdapter - Exchange [cs_bill_customer_sk,cs_item_sk] #10 - WholeStageCodegen (15) - Project [cs_bill_customer_sk,cs_item_sk,cs_net_profit] - BroadcastHashJoin [cs_sold_date_sk,d_date_sk] - Filter [cs_bill_customer_sk,cs_item_sk,cs_sold_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.catalog_sales [cs_sold_date_sk,cs_bill_customer_sk,cs_item_sk,cs_net_profit] - InputAdapter - ReusedExchange [d_date_sk] #9 + Scan parquet default.catalog_sales [cs_sold_date_sk,cs_bill_customer_sk,cs_item_sk,cs_net_profit] + InputAdapter + ReusedExchange [d_date_sk] #9 diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q28.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q28.sf100/explain.txt index 9788040bbe6de..a0f029c9b9325 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q28.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q28.sf100/explain.txt @@ -1,82 +1,81 @@ == Physical Plan == -CollectLimit (71) -+- BroadcastNestedLoopJoin Inner BuildRight (70) - :- BroadcastNestedLoopJoin Inner BuildRight (58) - : :- BroadcastNestedLoopJoin Inner BuildRight (46) - : : :- BroadcastNestedLoopJoin Inner BuildRight (34) - : : : :- BroadcastNestedLoopJoin Inner BuildRight (22) - : : : : :- * HashAggregate (10) - : : : : : +- Exchange (9) - : : : : : +- * HashAggregate (8) - : : : : : +- * HashAggregate (7) - : : : : : +- Exchange (6) - : : : : : +- * HashAggregate (5) - : : : : : +- * Project (4) - : : : : : +- * Filter (3) - : : : : : +- * ColumnarToRow (2) - : : : : : +- Scan parquet default.store_sales (1) - : : : : +- BroadcastExchange (21) - : : : : +- * HashAggregate (20) - : : : : +- Exchange (19) - : : : : +- * HashAggregate (18) - : : : : +- * HashAggregate (17) - : : : : +- Exchange (16) - : : : : +- * HashAggregate (15) - : : : : +- * Project (14) - : : : : +- * Filter (13) - : : : : +- * ColumnarToRow (12) - : : : : +- Scan parquet default.store_sales (11) - : : : +- BroadcastExchange (33) - : : : +- * HashAggregate (32) - : : : +- Exchange (31) - : : : +- * HashAggregate (30) - : : : +- * HashAggregate (29) - : : : +- Exchange (28) - : : : +- * HashAggregate (27) - : : : +- * Project (26) - : : : +- * Filter (25) - : : : +- * ColumnarToRow (24) - : : : +- Scan parquet default.store_sales (23) - : : +- BroadcastExchange (45) - : : +- * HashAggregate (44) - : : +- Exchange (43) - : : +- * HashAggregate (42) - : : +- * HashAggregate (41) - : : +- Exchange (40) - : : +- * HashAggregate (39) - : : +- * Project (38) - : : +- * Filter (37) - : : +- * ColumnarToRow (36) - : : +- Scan parquet default.store_sales (35) - : +- BroadcastExchange (57) - : +- * HashAggregate (56) - : +- Exchange (55) - : +- * HashAggregate (54) - : +- * HashAggregate (53) - : +- Exchange (52) - : +- * HashAggregate (51) - : +- * Project (50) - : +- * Filter (49) - : +- * ColumnarToRow (48) - : +- Scan parquet default.store_sales (47) - +- BroadcastExchange (69) - +- * HashAggregate (68) - +- Exchange (67) - +- * HashAggregate (66) - +- * HashAggregate (65) - +- Exchange (64) - +- * HashAggregate (63) - +- * Project (62) - +- * Filter (61) - +- * ColumnarToRow (60) - +- Scan parquet default.store_sales (59) +BroadcastNestedLoopJoin Inner BuildRight (70) +:- BroadcastNestedLoopJoin Inner BuildRight (58) +: :- BroadcastNestedLoopJoin Inner BuildRight (46) +: : :- BroadcastNestedLoopJoin Inner BuildRight (34) +: : : :- BroadcastNestedLoopJoin Inner BuildRight (22) +: : : : :- * HashAggregate (10) +: : : : : +- Exchange (9) +: : : : : +- * HashAggregate (8) +: : : : : +- * HashAggregate (7) +: : : : : +- Exchange (6) +: : : : : +- * HashAggregate (5) +: : : : : +- * Project (4) +: : : : : +- * Filter (3) +: : : : : +- * ColumnarToRow (2) +: : : : : +- Scan parquet default.store_sales (1) +: : : : +- BroadcastExchange (21) +: : : : +- * HashAggregate (20) +: : : : +- Exchange (19) +: : : : +- * HashAggregate (18) +: : : : +- * HashAggregate (17) +: : : : +- Exchange (16) +: : : : +- * HashAggregate (15) +: : : : +- * Project (14) +: : : : +- * Filter (13) +: : : : +- * ColumnarToRow (12) +: : : : +- Scan parquet default.store_sales (11) +: : : +- BroadcastExchange (33) +: : : +- * HashAggregate (32) +: : : +- Exchange (31) +: : : +- * HashAggregate (30) +: : : +- * HashAggregate (29) +: : : +- Exchange (28) +: : : +- * HashAggregate (27) +: : : +- * Project (26) +: : : +- * Filter (25) +: : : +- * ColumnarToRow (24) +: : : +- Scan parquet default.store_sales (23) +: : +- BroadcastExchange (45) +: : +- * HashAggregate (44) +: : +- Exchange (43) +: : +- * HashAggregate (42) +: : +- * HashAggregate (41) +: : +- Exchange (40) +: : +- * HashAggregate (39) +: : +- * Project (38) +: : +- * Filter (37) +: : +- * ColumnarToRow (36) +: : +- Scan parquet default.store_sales (35) +: +- BroadcastExchange (57) +: +- * HashAggregate (56) +: +- Exchange (55) +: +- * HashAggregate (54) +: +- * HashAggregate (53) +: +- Exchange (52) +: +- * HashAggregate (51) +: +- * Project (50) +: +- * Filter (49) +: +- * ColumnarToRow (48) +: +- Scan parquet default.store_sales (47) ++- BroadcastExchange (69) + +- * HashAggregate (68) + +- Exchange (67) + +- * HashAggregate (66) + +- * HashAggregate (65) + +- Exchange (64) + +- * HashAggregate (63) + +- * Project (62) + +- * Filter (61) + +- * ColumnarToRow (60) + +- Scan parquet default.store_sales (59) (1) Scan parquet default.store_sales Output [4]: [ss_quantity#1, ss_wholesale_cost#2, ss_list_price#3, ss_coupon_amt#4] Batched: true Location [not included in comparison]/{warehouse_dir}/store_sales] -PushedFilters: [IsNotNull(ss_quantity), GreaterThanOrEqual(ss_quantity,0), LessThanOrEqual(ss_quantity,5)] +PushedFilters: [IsNotNull(ss_quantity), GreaterThanOrEqual(ss_quantity,0), LessThanOrEqual(ss_quantity,5), Or(Or(And(GreaterThanOrEqual(ss_list_price,8.00),LessThanOrEqual(ss_list_price,18.00)),And(GreaterThanOrEqual(ss_coupon_amt,459.00),LessThanOrEqual(ss_coupon_amt,1459.00))),And(GreaterThanOrEqual(ss_wholesale_cost,57.00),LessThanOrEqual(ss_wholesale_cost,77.00)))] ReadSchema: struct (2) ColumnarToRow [codegen id : 1] @@ -84,7 +83,7 @@ Input [4]: [ss_quantity#1, ss_wholesale_cost#2, ss_list_price#3, ss_coupon_amt#4 (3) Filter [codegen id : 1] Input [4]: [ss_quantity#1, ss_wholesale_cost#2, ss_list_price#3, ss_coupon_amt#4] -Condition : (((isnotnull(ss_quantity#1) AND (ss_quantity#1 >= 0)) AND (ss_quantity#1 <= 5)) AND ((((ss_list_price#3 >= 8.00) AND (cast(ss_list_price#3 as decimal(12,2)) <= 18.00)) OR ((ss_coupon_amt#4 >= 459.00) AND (cast(ss_coupon_amt#4 as decimal(12,2)) <= 1459.00))) OR ((ss_wholesale_cost#2 >= 57.00) AND (cast(ss_wholesale_cost#2 as decimal(12,2)) <= 77.00)))) +Condition : (((isnotnull(ss_quantity#1) AND (ss_quantity#1 >= 0)) AND (ss_quantity#1 <= 5)) AND ((((ss_list_price#3 >= 8.00) AND (ss_list_price#3 <= 18.00)) OR ((ss_coupon_amt#4 >= 459.00) AND (ss_coupon_amt#4 <= 1459.00))) OR ((ss_wholesale_cost#2 >= 57.00) AND (ss_wholesale_cost#2 <= 77.00)))) (4) Project [codegen id : 1] Output [1]: [ss_list_price#3] @@ -130,7 +129,7 @@ Results [3]: [cast((avg(UnscaledValue(ss_list_price#3))#5 / 100.0) as decimal(11 Output [4]: [ss_quantity#1, ss_wholesale_cost#2, ss_list_price#3, ss_coupon_amt#4] Batched: true Location [not included in comparison]/{warehouse_dir}/store_sales] -PushedFilters: [IsNotNull(ss_quantity), GreaterThanOrEqual(ss_quantity,6), LessThanOrEqual(ss_quantity,10)] +PushedFilters: [IsNotNull(ss_quantity), GreaterThanOrEqual(ss_quantity,6), LessThanOrEqual(ss_quantity,10), Or(Or(And(GreaterThanOrEqual(ss_list_price,90.00),LessThanOrEqual(ss_list_price,100.00)),And(GreaterThanOrEqual(ss_coupon_amt,2323.00),LessThanOrEqual(ss_coupon_amt,3323.00))),And(GreaterThanOrEqual(ss_wholesale_cost,31.00),LessThanOrEqual(ss_wholesale_cost,51.00)))] ReadSchema: struct (12) ColumnarToRow [codegen id : 4] @@ -138,7 +137,7 @@ Input [4]: [ss_quantity#1, ss_wholesale_cost#2, ss_list_price#3, ss_coupon_amt#4 (13) Filter [codegen id : 4] Input [4]: [ss_quantity#1, ss_wholesale_cost#2, ss_list_price#3, ss_coupon_amt#4] -Condition : (((isnotnull(ss_quantity#1) AND (ss_quantity#1 >= 6)) AND (ss_quantity#1 <= 10)) AND ((((ss_list_price#3 >= 90.00) AND (cast(ss_list_price#3 as decimal(12,2)) <= 100.00)) OR ((ss_coupon_amt#4 >= 2323.00) AND (cast(ss_coupon_amt#4 as decimal(12,2)) <= 3323.00))) OR ((ss_wholesale_cost#2 >= 31.00) AND (cast(ss_wholesale_cost#2 as decimal(12,2)) <= 51.00)))) +Condition : (((isnotnull(ss_quantity#1) AND (ss_quantity#1 >= 6)) AND (ss_quantity#1 <= 10)) AND ((((ss_list_price#3 >= 90.00) AND (ss_list_price#3 <= 100.00)) OR ((ss_coupon_amt#4 >= 2323.00) AND (ss_coupon_amt#4 <= 3323.00))) OR ((ss_wholesale_cost#2 >= 31.00) AND (ss_wholesale_cost#2 <= 51.00)))) (14) Project [codegen id : 4] Output [1]: [ss_list_price#3] @@ -191,7 +190,7 @@ Join condition: None Output [4]: [ss_quantity#1, ss_wholesale_cost#2, ss_list_price#3, ss_coupon_amt#4] Batched: true Location [not included in comparison]/{warehouse_dir}/store_sales] -PushedFilters: [IsNotNull(ss_quantity), GreaterThanOrEqual(ss_quantity,11), LessThanOrEqual(ss_quantity,15)] +PushedFilters: [IsNotNull(ss_quantity), GreaterThanOrEqual(ss_quantity,11), LessThanOrEqual(ss_quantity,15), Or(Or(And(GreaterThanOrEqual(ss_list_price,142.00),LessThanOrEqual(ss_list_price,152.00)),And(GreaterThanOrEqual(ss_coupon_amt,12214.00),LessThanOrEqual(ss_coupon_amt,13214.00))),And(GreaterThanOrEqual(ss_wholesale_cost,79.00),LessThanOrEqual(ss_wholesale_cost,99.00)))] ReadSchema: struct (24) ColumnarToRow [codegen id : 7] @@ -199,7 +198,7 @@ Input [4]: [ss_quantity#1, ss_wholesale_cost#2, ss_list_price#3, ss_coupon_amt#4 (25) Filter [codegen id : 7] Input [4]: [ss_quantity#1, ss_wholesale_cost#2, ss_list_price#3, ss_coupon_amt#4] -Condition : (((isnotnull(ss_quantity#1) AND (ss_quantity#1 >= 11)) AND (ss_quantity#1 <= 15)) AND ((((ss_list_price#3 >= 142.00) AND (cast(ss_list_price#3 as decimal(12,2)) <= 152.00)) OR ((ss_coupon_amt#4 >= 12214.00) AND (cast(ss_coupon_amt#4 as decimal(12,2)) <= 13214.00))) OR ((ss_wholesale_cost#2 >= 79.00) AND (cast(ss_wholesale_cost#2 as decimal(12,2)) <= 99.00)))) +Condition : (((isnotnull(ss_quantity#1) AND (ss_quantity#1 >= 11)) AND (ss_quantity#1 <= 15)) AND ((((ss_list_price#3 >= 142.00) AND (ss_list_price#3 <= 152.00)) OR ((ss_coupon_amt#4 >= 12214.00) AND (ss_coupon_amt#4 <= 13214.00))) OR ((ss_wholesale_cost#2 >= 79.00) AND (ss_wholesale_cost#2 <= 99.00)))) (26) Project [codegen id : 7] Output [1]: [ss_list_price#3] @@ -252,7 +251,7 @@ Join condition: None Output [4]: [ss_quantity#1, ss_wholesale_cost#2, ss_list_price#3, ss_coupon_amt#4] Batched: true Location [not included in comparison]/{warehouse_dir}/store_sales] -PushedFilters: [IsNotNull(ss_quantity), GreaterThanOrEqual(ss_quantity,16), LessThanOrEqual(ss_quantity,20)] +PushedFilters: [IsNotNull(ss_quantity), GreaterThanOrEqual(ss_quantity,16), LessThanOrEqual(ss_quantity,20), Or(Or(And(GreaterThanOrEqual(ss_list_price,135.00),LessThanOrEqual(ss_list_price,145.00)),And(GreaterThanOrEqual(ss_coupon_amt,6071.00),LessThanOrEqual(ss_coupon_amt,7071.00))),And(GreaterThanOrEqual(ss_wholesale_cost,38.00),LessThanOrEqual(ss_wholesale_cost,58.00)))] ReadSchema: struct (36) ColumnarToRow [codegen id : 10] @@ -260,7 +259,7 @@ Input [4]: [ss_quantity#1, ss_wholesale_cost#2, ss_list_price#3, ss_coupon_amt#4 (37) Filter [codegen id : 10] Input [4]: [ss_quantity#1, ss_wholesale_cost#2, ss_list_price#3, ss_coupon_amt#4] -Condition : (((isnotnull(ss_quantity#1) AND (ss_quantity#1 >= 16)) AND (ss_quantity#1 <= 20)) AND ((((ss_list_price#3 >= 135.00) AND (cast(ss_list_price#3 as decimal(12,2)) <= 145.00)) OR ((ss_coupon_amt#4 >= 6071.00) AND (cast(ss_coupon_amt#4 as decimal(12,2)) <= 7071.00))) OR ((ss_wholesale_cost#2 >= 38.00) AND (cast(ss_wholesale_cost#2 as decimal(12,2)) <= 58.00)))) +Condition : (((isnotnull(ss_quantity#1) AND (ss_quantity#1 >= 16)) AND (ss_quantity#1 <= 20)) AND ((((ss_list_price#3 >= 135.00) AND (ss_list_price#3 <= 145.00)) OR ((ss_coupon_amt#4 >= 6071.00) AND (ss_coupon_amt#4 <= 7071.00))) OR ((ss_wholesale_cost#2 >= 38.00) AND (ss_wholesale_cost#2 <= 58.00)))) (38) Project [codegen id : 10] Output [1]: [ss_list_price#3] @@ -313,7 +312,7 @@ Join condition: None Output [4]: [ss_quantity#1, ss_wholesale_cost#2, ss_list_price#3, ss_coupon_amt#4] Batched: true Location [not included in comparison]/{warehouse_dir}/store_sales] -PushedFilters: [IsNotNull(ss_quantity), GreaterThanOrEqual(ss_quantity,21), LessThanOrEqual(ss_quantity,25)] +PushedFilters: [IsNotNull(ss_quantity), GreaterThanOrEqual(ss_quantity,21), LessThanOrEqual(ss_quantity,25), Or(Or(And(GreaterThanOrEqual(ss_list_price,122.00),LessThanOrEqual(ss_list_price,132.00)),And(GreaterThanOrEqual(ss_coupon_amt,836.00),LessThanOrEqual(ss_coupon_amt,1836.00))),And(GreaterThanOrEqual(ss_wholesale_cost,17.00),LessThanOrEqual(ss_wholesale_cost,37.00)))] ReadSchema: struct (48) ColumnarToRow [codegen id : 13] @@ -321,7 +320,7 @@ Input [4]: [ss_quantity#1, ss_wholesale_cost#2, ss_list_price#3, ss_coupon_amt#4 (49) Filter [codegen id : 13] Input [4]: [ss_quantity#1, ss_wholesale_cost#2, ss_list_price#3, ss_coupon_amt#4] -Condition : (((isnotnull(ss_quantity#1) AND (ss_quantity#1 >= 21)) AND (ss_quantity#1 <= 25)) AND ((((ss_list_price#3 >= 122.00) AND (cast(ss_list_price#3 as decimal(12,2)) <= 132.00)) OR ((ss_coupon_amt#4 >= 836.00) AND (cast(ss_coupon_amt#4 as decimal(12,2)) <= 1836.00))) OR ((ss_wholesale_cost#2 >= 17.00) AND (cast(ss_wholesale_cost#2 as decimal(12,2)) <= 37.00)))) +Condition : (((isnotnull(ss_quantity#1) AND (ss_quantity#1 >= 21)) AND (ss_quantity#1 <= 25)) AND ((((ss_list_price#3 >= 122.00) AND (ss_list_price#3 <= 132.00)) OR ((ss_coupon_amt#4 >= 836.00) AND (ss_coupon_amt#4 <= 1836.00))) OR ((ss_wholesale_cost#2 >= 17.00) AND (ss_wholesale_cost#2 <= 37.00)))) (50) Project [codegen id : 13] Output [1]: [ss_list_price#3] @@ -374,7 +373,7 @@ Join condition: None Output [4]: [ss_quantity#1, ss_wholesale_cost#2, ss_list_price#3, ss_coupon_amt#4] Batched: true Location [not included in comparison]/{warehouse_dir}/store_sales] -PushedFilters: [IsNotNull(ss_quantity), GreaterThanOrEqual(ss_quantity,26), LessThanOrEqual(ss_quantity,30)] +PushedFilters: [IsNotNull(ss_quantity), GreaterThanOrEqual(ss_quantity,26), LessThanOrEqual(ss_quantity,30), Or(Or(And(GreaterThanOrEqual(ss_list_price,154.00),LessThanOrEqual(ss_list_price,164.00)),And(GreaterThanOrEqual(ss_coupon_amt,7326.00),LessThanOrEqual(ss_coupon_amt,8326.00))),And(GreaterThanOrEqual(ss_wholesale_cost,7.00),LessThanOrEqual(ss_wholesale_cost,27.00)))] ReadSchema: struct (60) ColumnarToRow [codegen id : 16] @@ -382,7 +381,7 @@ Input [4]: [ss_quantity#1, ss_wholesale_cost#2, ss_list_price#3, ss_coupon_amt#4 (61) Filter [codegen id : 16] Input [4]: [ss_quantity#1, ss_wholesale_cost#2, ss_list_price#3, ss_coupon_amt#4] -Condition : (((isnotnull(ss_quantity#1) AND (ss_quantity#1 >= 26)) AND (ss_quantity#1 <= 30)) AND ((((ss_list_price#3 >= 154.00) AND (cast(ss_list_price#3 as decimal(12,2)) <= 164.00)) OR ((ss_coupon_amt#4 >= 7326.00) AND (cast(ss_coupon_amt#4 as decimal(12,2)) <= 8326.00))) OR ((ss_wholesale_cost#2 >= 7.00) AND (cast(ss_wholesale_cost#2 as decimal(12,2)) <= 27.00)))) +Condition : (((isnotnull(ss_quantity#1) AND (ss_quantity#1 >= 26)) AND (ss_quantity#1 <= 30)) AND ((((ss_list_price#3 >= 154.00) AND (ss_list_price#3 <= 164.00)) OR ((ss_coupon_amt#4 >= 7326.00) AND (ss_coupon_amt#4 <= 8326.00))) OR ((ss_wholesale_cost#2 >= 7.00) AND (ss_wholesale_cost#2 <= 27.00)))) (62) Project [codegen id : 16] Output [1]: [ss_list_price#3] @@ -431,7 +430,3 @@ Arguments: IdentityBroadcastMode, [id=#81] (70) BroadcastNestedLoopJoin Join condition: None -(71) CollectLimit -Input [18]: [B1_LP#14, B1_CNT#15, B1_CNTD#16, B2_LP#26, B2_CNT#27, B2_CNTD#28, B3_LP#39, B3_CNT#40, B3_CNTD#41, B4_LP#52, B4_CNT#53, B4_CNTD#54, B5_LP#65, B5_CNT#66, B5_CNTD#67, B6_LP#78, B6_CNT#79, B6_CNTD#80] -Arguments: 100 - diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q28.sf100/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q28.sf100/simplified.txt index d896002b0965d..77afa321d3ee4 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q28.sf100/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q28.sf100/simplified.txt @@ -1,107 +1,106 @@ -CollectLimit +BroadcastNestedLoopJoin BroadcastNestedLoopJoin BroadcastNestedLoopJoin BroadcastNestedLoopJoin BroadcastNestedLoopJoin - BroadcastNestedLoopJoin - WholeStageCodegen (3) - HashAggregate [sum,count,count,count] [avg(UnscaledValue(ss_list_price)),count(ss_list_price),count(ss_list_price),B1_LP,B1_CNT,B1_CNTD,sum,count,count,count] - InputAdapter - Exchange #1 - WholeStageCodegen (2) - HashAggregate [ss_list_price] [avg(UnscaledValue(ss_list_price)),count(ss_list_price),count(ss_list_price),sum,count,count,count,sum,count,count,count] - HashAggregate [ss_list_price] [avg(UnscaledValue(ss_list_price)),count(ss_list_price),sum,count,count,sum,count,count] - InputAdapter - Exchange [ss_list_price] #2 - WholeStageCodegen (1) - HashAggregate [ss_list_price] [avg(UnscaledValue(ss_list_price)),count(ss_list_price),sum,count,count,sum,count,count] - Project [ss_list_price] - Filter [ss_quantity,ss_list_price,ss_coupon_amt,ss_wholesale_cost] - ColumnarToRow - InputAdapter - Scan parquet default.store_sales [ss_quantity,ss_wholesale_cost,ss_list_price,ss_coupon_amt] - BroadcastExchange #3 - WholeStageCodegen (6) - HashAggregate [sum,count,count,count] [avg(UnscaledValue(ss_list_price)),count(ss_list_price),count(ss_list_price),B2_LP,B2_CNT,B2_CNTD,sum,count,count,count] - InputAdapter - Exchange #4 - WholeStageCodegen (5) - HashAggregate [ss_list_price] [avg(UnscaledValue(ss_list_price)),count(ss_list_price),count(ss_list_price),sum,count,count,count,sum,count,count,count] - HashAggregate [ss_list_price] [avg(UnscaledValue(ss_list_price)),count(ss_list_price),sum,count,count,sum,count,count] - InputAdapter - Exchange [ss_list_price] #5 - WholeStageCodegen (4) - HashAggregate [ss_list_price] [avg(UnscaledValue(ss_list_price)),count(ss_list_price),sum,count,count,sum,count,count] - Project [ss_list_price] - Filter [ss_quantity,ss_list_price,ss_coupon_amt,ss_wholesale_cost] - ColumnarToRow - InputAdapter - Scan parquet default.store_sales [ss_quantity,ss_wholesale_cost,ss_list_price,ss_coupon_amt] - BroadcastExchange #6 - WholeStageCodegen (9) - HashAggregate [sum,count,count,count] [avg(UnscaledValue(ss_list_price)),count(ss_list_price),count(ss_list_price),B3_LP,B3_CNT,B3_CNTD,sum,count,count,count] + WholeStageCodegen (3) + HashAggregate [sum,count,count,count] [avg(UnscaledValue(ss_list_price)),count(ss_list_price),count(ss_list_price),B1_LP,B1_CNT,B1_CNTD,sum,count,count,count] + InputAdapter + Exchange #1 + WholeStageCodegen (2) + HashAggregate [ss_list_price] [avg(UnscaledValue(ss_list_price)),count(ss_list_price),count(ss_list_price),sum,count,count,count,sum,count,count,count] + HashAggregate [ss_list_price] [avg(UnscaledValue(ss_list_price)),count(ss_list_price),sum,count,count,sum,count,count] + InputAdapter + Exchange [ss_list_price] #2 + WholeStageCodegen (1) + HashAggregate [ss_list_price] [avg(UnscaledValue(ss_list_price)),count(ss_list_price),sum,count,count,sum,count,count] + Project [ss_list_price] + Filter [ss_quantity,ss_list_price,ss_coupon_amt,ss_wholesale_cost] + ColumnarToRow + InputAdapter + Scan parquet default.store_sales [ss_quantity,ss_wholesale_cost,ss_list_price,ss_coupon_amt] + BroadcastExchange #3 + WholeStageCodegen (6) + HashAggregate [sum,count,count,count] [avg(UnscaledValue(ss_list_price)),count(ss_list_price),count(ss_list_price),B2_LP,B2_CNT,B2_CNTD,sum,count,count,count] InputAdapter - Exchange #7 - WholeStageCodegen (8) + Exchange #4 + WholeStageCodegen (5) HashAggregate [ss_list_price] [avg(UnscaledValue(ss_list_price)),count(ss_list_price),count(ss_list_price),sum,count,count,count,sum,count,count,count] HashAggregate [ss_list_price] [avg(UnscaledValue(ss_list_price)),count(ss_list_price),sum,count,count,sum,count,count] InputAdapter - Exchange [ss_list_price] #8 - WholeStageCodegen (7) + Exchange [ss_list_price] #5 + WholeStageCodegen (4) HashAggregate [ss_list_price] [avg(UnscaledValue(ss_list_price)),count(ss_list_price),sum,count,count,sum,count,count] Project [ss_list_price] Filter [ss_quantity,ss_list_price,ss_coupon_amt,ss_wholesale_cost] ColumnarToRow InputAdapter Scan parquet default.store_sales [ss_quantity,ss_wholesale_cost,ss_list_price,ss_coupon_amt] - BroadcastExchange #9 - WholeStageCodegen (12) - HashAggregate [sum,count,count,count] [avg(UnscaledValue(ss_list_price)),count(ss_list_price),count(ss_list_price),B4_LP,B4_CNT,B4_CNTD,sum,count,count,count] + BroadcastExchange #6 + WholeStageCodegen (9) + HashAggregate [sum,count,count,count] [avg(UnscaledValue(ss_list_price)),count(ss_list_price),count(ss_list_price),B3_LP,B3_CNT,B3_CNTD,sum,count,count,count] InputAdapter - Exchange #10 - WholeStageCodegen (11) + Exchange #7 + WholeStageCodegen (8) HashAggregate [ss_list_price] [avg(UnscaledValue(ss_list_price)),count(ss_list_price),count(ss_list_price),sum,count,count,count,sum,count,count,count] HashAggregate [ss_list_price] [avg(UnscaledValue(ss_list_price)),count(ss_list_price),sum,count,count,sum,count,count] InputAdapter - Exchange [ss_list_price] #11 - WholeStageCodegen (10) + Exchange [ss_list_price] #8 + WholeStageCodegen (7) HashAggregate [ss_list_price] [avg(UnscaledValue(ss_list_price)),count(ss_list_price),sum,count,count,sum,count,count] Project [ss_list_price] Filter [ss_quantity,ss_list_price,ss_coupon_amt,ss_wholesale_cost] ColumnarToRow InputAdapter Scan parquet default.store_sales [ss_quantity,ss_wholesale_cost,ss_list_price,ss_coupon_amt] - BroadcastExchange #12 - WholeStageCodegen (15) - HashAggregate [sum,count,count,count] [avg(UnscaledValue(ss_list_price)),count(ss_list_price),count(ss_list_price),B5_LP,B5_CNT,B5_CNTD,sum,count,count,count] + BroadcastExchange #9 + WholeStageCodegen (12) + HashAggregate [sum,count,count,count] [avg(UnscaledValue(ss_list_price)),count(ss_list_price),count(ss_list_price),B4_LP,B4_CNT,B4_CNTD,sum,count,count,count] InputAdapter - Exchange #13 - WholeStageCodegen (14) + Exchange #10 + WholeStageCodegen (11) HashAggregate [ss_list_price] [avg(UnscaledValue(ss_list_price)),count(ss_list_price),count(ss_list_price),sum,count,count,count,sum,count,count,count] HashAggregate [ss_list_price] [avg(UnscaledValue(ss_list_price)),count(ss_list_price),sum,count,count,sum,count,count] InputAdapter - Exchange [ss_list_price] #14 - WholeStageCodegen (13) + Exchange [ss_list_price] #11 + WholeStageCodegen (10) HashAggregate [ss_list_price] [avg(UnscaledValue(ss_list_price)),count(ss_list_price),sum,count,count,sum,count,count] Project [ss_list_price] Filter [ss_quantity,ss_list_price,ss_coupon_amt,ss_wholesale_cost] ColumnarToRow InputAdapter Scan parquet default.store_sales [ss_quantity,ss_wholesale_cost,ss_list_price,ss_coupon_amt] - BroadcastExchange #15 - WholeStageCodegen (18) - HashAggregate [sum,count,count,count] [avg(UnscaledValue(ss_list_price)),count(ss_list_price),count(ss_list_price),B6_LP,B6_CNT,B6_CNTD,sum,count,count,count] + BroadcastExchange #12 + WholeStageCodegen (15) + HashAggregate [sum,count,count,count] [avg(UnscaledValue(ss_list_price)),count(ss_list_price),count(ss_list_price),B5_LP,B5_CNT,B5_CNTD,sum,count,count,count] InputAdapter - Exchange #16 - WholeStageCodegen (17) + Exchange #13 + WholeStageCodegen (14) HashAggregate [ss_list_price] [avg(UnscaledValue(ss_list_price)),count(ss_list_price),count(ss_list_price),sum,count,count,count,sum,count,count,count] HashAggregate [ss_list_price] [avg(UnscaledValue(ss_list_price)),count(ss_list_price),sum,count,count,sum,count,count] InputAdapter - Exchange [ss_list_price] #17 - WholeStageCodegen (16) + Exchange [ss_list_price] #14 + WholeStageCodegen (13) HashAggregate [ss_list_price] [avg(UnscaledValue(ss_list_price)),count(ss_list_price),sum,count,count,sum,count,count] Project [ss_list_price] Filter [ss_quantity,ss_list_price,ss_coupon_amt,ss_wholesale_cost] ColumnarToRow InputAdapter Scan parquet default.store_sales [ss_quantity,ss_wholesale_cost,ss_list_price,ss_coupon_amt] + BroadcastExchange #15 + WholeStageCodegen (18) + HashAggregate [sum,count,count,count] [avg(UnscaledValue(ss_list_price)),count(ss_list_price),count(ss_list_price),B6_LP,B6_CNT,B6_CNTD,sum,count,count,count] + InputAdapter + Exchange #16 + WholeStageCodegen (17) + HashAggregate [ss_list_price] [avg(UnscaledValue(ss_list_price)),count(ss_list_price),count(ss_list_price),sum,count,count,count,sum,count,count,count] + HashAggregate [ss_list_price] [avg(UnscaledValue(ss_list_price)),count(ss_list_price),sum,count,count,sum,count,count] + InputAdapter + Exchange [ss_list_price] #17 + WholeStageCodegen (16) + HashAggregate [ss_list_price] [avg(UnscaledValue(ss_list_price)),count(ss_list_price),sum,count,count,sum,count,count] + Project [ss_list_price] + Filter [ss_quantity,ss_list_price,ss_coupon_amt,ss_wholesale_cost] + ColumnarToRow + InputAdapter + Scan parquet default.store_sales [ss_quantity,ss_wholesale_cost,ss_list_price,ss_coupon_amt] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q28/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q28/explain.txt index 9788040bbe6de..a0f029c9b9325 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q28/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q28/explain.txt @@ -1,82 +1,81 @@ == Physical Plan == -CollectLimit (71) -+- BroadcastNestedLoopJoin Inner BuildRight (70) - :- BroadcastNestedLoopJoin Inner BuildRight (58) - : :- BroadcastNestedLoopJoin Inner BuildRight (46) - : : :- BroadcastNestedLoopJoin Inner BuildRight (34) - : : : :- BroadcastNestedLoopJoin Inner BuildRight (22) - : : : : :- * HashAggregate (10) - : : : : : +- Exchange (9) - : : : : : +- * HashAggregate (8) - : : : : : +- * HashAggregate (7) - : : : : : +- Exchange (6) - : : : : : +- * HashAggregate (5) - : : : : : +- * Project (4) - : : : : : +- * Filter (3) - : : : : : +- * ColumnarToRow (2) - : : : : : +- Scan parquet default.store_sales (1) - : : : : +- BroadcastExchange (21) - : : : : +- * HashAggregate (20) - : : : : +- Exchange (19) - : : : : +- * HashAggregate (18) - : : : : +- * HashAggregate (17) - : : : : +- Exchange (16) - : : : : +- * HashAggregate (15) - : : : : +- * Project (14) - : : : : +- * Filter (13) - : : : : +- * ColumnarToRow (12) - : : : : +- Scan parquet default.store_sales (11) - : : : +- BroadcastExchange (33) - : : : +- * HashAggregate (32) - : : : +- Exchange (31) - : : : +- * HashAggregate (30) - : : : +- * HashAggregate (29) - : : : +- Exchange (28) - : : : +- * HashAggregate (27) - : : : +- * Project (26) - : : : +- * Filter (25) - : : : +- * ColumnarToRow (24) - : : : +- Scan parquet default.store_sales (23) - : : +- BroadcastExchange (45) - : : +- * HashAggregate (44) - : : +- Exchange (43) - : : +- * HashAggregate (42) - : : +- * HashAggregate (41) - : : +- Exchange (40) - : : +- * HashAggregate (39) - : : +- * Project (38) - : : +- * Filter (37) - : : +- * ColumnarToRow (36) - : : +- Scan parquet default.store_sales (35) - : +- BroadcastExchange (57) - : +- * HashAggregate (56) - : +- Exchange (55) - : +- * HashAggregate (54) - : +- * HashAggregate (53) - : +- Exchange (52) - : +- * HashAggregate (51) - : +- * Project (50) - : +- * Filter (49) - : +- * ColumnarToRow (48) - : +- Scan parquet default.store_sales (47) - +- BroadcastExchange (69) - +- * HashAggregate (68) - +- Exchange (67) - +- * HashAggregate (66) - +- * HashAggregate (65) - +- Exchange (64) - +- * HashAggregate (63) - +- * Project (62) - +- * Filter (61) - +- * ColumnarToRow (60) - +- Scan parquet default.store_sales (59) +BroadcastNestedLoopJoin Inner BuildRight (70) +:- BroadcastNestedLoopJoin Inner BuildRight (58) +: :- BroadcastNestedLoopJoin Inner BuildRight (46) +: : :- BroadcastNestedLoopJoin Inner BuildRight (34) +: : : :- BroadcastNestedLoopJoin Inner BuildRight (22) +: : : : :- * HashAggregate (10) +: : : : : +- Exchange (9) +: : : : : +- * HashAggregate (8) +: : : : : +- * HashAggregate (7) +: : : : : +- Exchange (6) +: : : : : +- * HashAggregate (5) +: : : : : +- * Project (4) +: : : : : +- * Filter (3) +: : : : : +- * ColumnarToRow (2) +: : : : : +- Scan parquet default.store_sales (1) +: : : : +- BroadcastExchange (21) +: : : : +- * HashAggregate (20) +: : : : +- Exchange (19) +: : : : +- * HashAggregate (18) +: : : : +- * HashAggregate (17) +: : : : +- Exchange (16) +: : : : +- * HashAggregate (15) +: : : : +- * Project (14) +: : : : +- * Filter (13) +: : : : +- * ColumnarToRow (12) +: : : : +- Scan parquet default.store_sales (11) +: : : +- BroadcastExchange (33) +: : : +- * HashAggregate (32) +: : : +- Exchange (31) +: : : +- * HashAggregate (30) +: : : +- * HashAggregate (29) +: : : +- Exchange (28) +: : : +- * HashAggregate (27) +: : : +- * Project (26) +: : : +- * Filter (25) +: : : +- * ColumnarToRow (24) +: : : +- Scan parquet default.store_sales (23) +: : +- BroadcastExchange (45) +: : +- * HashAggregate (44) +: : +- Exchange (43) +: : +- * HashAggregate (42) +: : +- * HashAggregate (41) +: : +- Exchange (40) +: : +- * HashAggregate (39) +: : +- * Project (38) +: : +- * Filter (37) +: : +- * ColumnarToRow (36) +: : +- Scan parquet default.store_sales (35) +: +- BroadcastExchange (57) +: +- * HashAggregate (56) +: +- Exchange (55) +: +- * HashAggregate (54) +: +- * HashAggregate (53) +: +- Exchange (52) +: +- * HashAggregate (51) +: +- * Project (50) +: +- * Filter (49) +: +- * ColumnarToRow (48) +: +- Scan parquet default.store_sales (47) ++- BroadcastExchange (69) + +- * HashAggregate (68) + +- Exchange (67) + +- * HashAggregate (66) + +- * HashAggregate (65) + +- Exchange (64) + +- * HashAggregate (63) + +- * Project (62) + +- * Filter (61) + +- * ColumnarToRow (60) + +- Scan parquet default.store_sales (59) (1) Scan parquet default.store_sales Output [4]: [ss_quantity#1, ss_wholesale_cost#2, ss_list_price#3, ss_coupon_amt#4] Batched: true Location [not included in comparison]/{warehouse_dir}/store_sales] -PushedFilters: [IsNotNull(ss_quantity), GreaterThanOrEqual(ss_quantity,0), LessThanOrEqual(ss_quantity,5)] +PushedFilters: [IsNotNull(ss_quantity), GreaterThanOrEqual(ss_quantity,0), LessThanOrEqual(ss_quantity,5), Or(Or(And(GreaterThanOrEqual(ss_list_price,8.00),LessThanOrEqual(ss_list_price,18.00)),And(GreaterThanOrEqual(ss_coupon_amt,459.00),LessThanOrEqual(ss_coupon_amt,1459.00))),And(GreaterThanOrEqual(ss_wholesale_cost,57.00),LessThanOrEqual(ss_wholesale_cost,77.00)))] ReadSchema: struct (2) ColumnarToRow [codegen id : 1] @@ -84,7 +83,7 @@ Input [4]: [ss_quantity#1, ss_wholesale_cost#2, ss_list_price#3, ss_coupon_amt#4 (3) Filter [codegen id : 1] Input [4]: [ss_quantity#1, ss_wholesale_cost#2, ss_list_price#3, ss_coupon_amt#4] -Condition : (((isnotnull(ss_quantity#1) AND (ss_quantity#1 >= 0)) AND (ss_quantity#1 <= 5)) AND ((((ss_list_price#3 >= 8.00) AND (cast(ss_list_price#3 as decimal(12,2)) <= 18.00)) OR ((ss_coupon_amt#4 >= 459.00) AND (cast(ss_coupon_amt#4 as decimal(12,2)) <= 1459.00))) OR ((ss_wholesale_cost#2 >= 57.00) AND (cast(ss_wholesale_cost#2 as decimal(12,2)) <= 77.00)))) +Condition : (((isnotnull(ss_quantity#1) AND (ss_quantity#1 >= 0)) AND (ss_quantity#1 <= 5)) AND ((((ss_list_price#3 >= 8.00) AND (ss_list_price#3 <= 18.00)) OR ((ss_coupon_amt#4 >= 459.00) AND (ss_coupon_amt#4 <= 1459.00))) OR ((ss_wholesale_cost#2 >= 57.00) AND (ss_wholesale_cost#2 <= 77.00)))) (4) Project [codegen id : 1] Output [1]: [ss_list_price#3] @@ -130,7 +129,7 @@ Results [3]: [cast((avg(UnscaledValue(ss_list_price#3))#5 / 100.0) as decimal(11 Output [4]: [ss_quantity#1, ss_wholesale_cost#2, ss_list_price#3, ss_coupon_amt#4] Batched: true Location [not included in comparison]/{warehouse_dir}/store_sales] -PushedFilters: [IsNotNull(ss_quantity), GreaterThanOrEqual(ss_quantity,6), LessThanOrEqual(ss_quantity,10)] +PushedFilters: [IsNotNull(ss_quantity), GreaterThanOrEqual(ss_quantity,6), LessThanOrEqual(ss_quantity,10), Or(Or(And(GreaterThanOrEqual(ss_list_price,90.00),LessThanOrEqual(ss_list_price,100.00)),And(GreaterThanOrEqual(ss_coupon_amt,2323.00),LessThanOrEqual(ss_coupon_amt,3323.00))),And(GreaterThanOrEqual(ss_wholesale_cost,31.00),LessThanOrEqual(ss_wholesale_cost,51.00)))] ReadSchema: struct (12) ColumnarToRow [codegen id : 4] @@ -138,7 +137,7 @@ Input [4]: [ss_quantity#1, ss_wholesale_cost#2, ss_list_price#3, ss_coupon_amt#4 (13) Filter [codegen id : 4] Input [4]: [ss_quantity#1, ss_wholesale_cost#2, ss_list_price#3, ss_coupon_amt#4] -Condition : (((isnotnull(ss_quantity#1) AND (ss_quantity#1 >= 6)) AND (ss_quantity#1 <= 10)) AND ((((ss_list_price#3 >= 90.00) AND (cast(ss_list_price#3 as decimal(12,2)) <= 100.00)) OR ((ss_coupon_amt#4 >= 2323.00) AND (cast(ss_coupon_amt#4 as decimal(12,2)) <= 3323.00))) OR ((ss_wholesale_cost#2 >= 31.00) AND (cast(ss_wholesale_cost#2 as decimal(12,2)) <= 51.00)))) +Condition : (((isnotnull(ss_quantity#1) AND (ss_quantity#1 >= 6)) AND (ss_quantity#1 <= 10)) AND ((((ss_list_price#3 >= 90.00) AND (ss_list_price#3 <= 100.00)) OR ((ss_coupon_amt#4 >= 2323.00) AND (ss_coupon_amt#4 <= 3323.00))) OR ((ss_wholesale_cost#2 >= 31.00) AND (ss_wholesale_cost#2 <= 51.00)))) (14) Project [codegen id : 4] Output [1]: [ss_list_price#3] @@ -191,7 +190,7 @@ Join condition: None Output [4]: [ss_quantity#1, ss_wholesale_cost#2, ss_list_price#3, ss_coupon_amt#4] Batched: true Location [not included in comparison]/{warehouse_dir}/store_sales] -PushedFilters: [IsNotNull(ss_quantity), GreaterThanOrEqual(ss_quantity,11), LessThanOrEqual(ss_quantity,15)] +PushedFilters: [IsNotNull(ss_quantity), GreaterThanOrEqual(ss_quantity,11), LessThanOrEqual(ss_quantity,15), Or(Or(And(GreaterThanOrEqual(ss_list_price,142.00),LessThanOrEqual(ss_list_price,152.00)),And(GreaterThanOrEqual(ss_coupon_amt,12214.00),LessThanOrEqual(ss_coupon_amt,13214.00))),And(GreaterThanOrEqual(ss_wholesale_cost,79.00),LessThanOrEqual(ss_wholesale_cost,99.00)))] ReadSchema: struct (24) ColumnarToRow [codegen id : 7] @@ -199,7 +198,7 @@ Input [4]: [ss_quantity#1, ss_wholesale_cost#2, ss_list_price#3, ss_coupon_amt#4 (25) Filter [codegen id : 7] Input [4]: [ss_quantity#1, ss_wholesale_cost#2, ss_list_price#3, ss_coupon_amt#4] -Condition : (((isnotnull(ss_quantity#1) AND (ss_quantity#1 >= 11)) AND (ss_quantity#1 <= 15)) AND ((((ss_list_price#3 >= 142.00) AND (cast(ss_list_price#3 as decimal(12,2)) <= 152.00)) OR ((ss_coupon_amt#4 >= 12214.00) AND (cast(ss_coupon_amt#4 as decimal(12,2)) <= 13214.00))) OR ((ss_wholesale_cost#2 >= 79.00) AND (cast(ss_wholesale_cost#2 as decimal(12,2)) <= 99.00)))) +Condition : (((isnotnull(ss_quantity#1) AND (ss_quantity#1 >= 11)) AND (ss_quantity#1 <= 15)) AND ((((ss_list_price#3 >= 142.00) AND (ss_list_price#3 <= 152.00)) OR ((ss_coupon_amt#4 >= 12214.00) AND (ss_coupon_amt#4 <= 13214.00))) OR ((ss_wholesale_cost#2 >= 79.00) AND (ss_wholesale_cost#2 <= 99.00)))) (26) Project [codegen id : 7] Output [1]: [ss_list_price#3] @@ -252,7 +251,7 @@ Join condition: None Output [4]: [ss_quantity#1, ss_wholesale_cost#2, ss_list_price#3, ss_coupon_amt#4] Batched: true Location [not included in comparison]/{warehouse_dir}/store_sales] -PushedFilters: [IsNotNull(ss_quantity), GreaterThanOrEqual(ss_quantity,16), LessThanOrEqual(ss_quantity,20)] +PushedFilters: [IsNotNull(ss_quantity), GreaterThanOrEqual(ss_quantity,16), LessThanOrEqual(ss_quantity,20), Or(Or(And(GreaterThanOrEqual(ss_list_price,135.00),LessThanOrEqual(ss_list_price,145.00)),And(GreaterThanOrEqual(ss_coupon_amt,6071.00),LessThanOrEqual(ss_coupon_amt,7071.00))),And(GreaterThanOrEqual(ss_wholesale_cost,38.00),LessThanOrEqual(ss_wholesale_cost,58.00)))] ReadSchema: struct (36) ColumnarToRow [codegen id : 10] @@ -260,7 +259,7 @@ Input [4]: [ss_quantity#1, ss_wholesale_cost#2, ss_list_price#3, ss_coupon_amt#4 (37) Filter [codegen id : 10] Input [4]: [ss_quantity#1, ss_wholesale_cost#2, ss_list_price#3, ss_coupon_amt#4] -Condition : (((isnotnull(ss_quantity#1) AND (ss_quantity#1 >= 16)) AND (ss_quantity#1 <= 20)) AND ((((ss_list_price#3 >= 135.00) AND (cast(ss_list_price#3 as decimal(12,2)) <= 145.00)) OR ((ss_coupon_amt#4 >= 6071.00) AND (cast(ss_coupon_amt#4 as decimal(12,2)) <= 7071.00))) OR ((ss_wholesale_cost#2 >= 38.00) AND (cast(ss_wholesale_cost#2 as decimal(12,2)) <= 58.00)))) +Condition : (((isnotnull(ss_quantity#1) AND (ss_quantity#1 >= 16)) AND (ss_quantity#1 <= 20)) AND ((((ss_list_price#3 >= 135.00) AND (ss_list_price#3 <= 145.00)) OR ((ss_coupon_amt#4 >= 6071.00) AND (ss_coupon_amt#4 <= 7071.00))) OR ((ss_wholesale_cost#2 >= 38.00) AND (ss_wholesale_cost#2 <= 58.00)))) (38) Project [codegen id : 10] Output [1]: [ss_list_price#3] @@ -313,7 +312,7 @@ Join condition: None Output [4]: [ss_quantity#1, ss_wholesale_cost#2, ss_list_price#3, ss_coupon_amt#4] Batched: true Location [not included in comparison]/{warehouse_dir}/store_sales] -PushedFilters: [IsNotNull(ss_quantity), GreaterThanOrEqual(ss_quantity,21), LessThanOrEqual(ss_quantity,25)] +PushedFilters: [IsNotNull(ss_quantity), GreaterThanOrEqual(ss_quantity,21), LessThanOrEqual(ss_quantity,25), Or(Or(And(GreaterThanOrEqual(ss_list_price,122.00),LessThanOrEqual(ss_list_price,132.00)),And(GreaterThanOrEqual(ss_coupon_amt,836.00),LessThanOrEqual(ss_coupon_amt,1836.00))),And(GreaterThanOrEqual(ss_wholesale_cost,17.00),LessThanOrEqual(ss_wholesale_cost,37.00)))] ReadSchema: struct (48) ColumnarToRow [codegen id : 13] @@ -321,7 +320,7 @@ Input [4]: [ss_quantity#1, ss_wholesale_cost#2, ss_list_price#3, ss_coupon_amt#4 (49) Filter [codegen id : 13] Input [4]: [ss_quantity#1, ss_wholesale_cost#2, ss_list_price#3, ss_coupon_amt#4] -Condition : (((isnotnull(ss_quantity#1) AND (ss_quantity#1 >= 21)) AND (ss_quantity#1 <= 25)) AND ((((ss_list_price#3 >= 122.00) AND (cast(ss_list_price#3 as decimal(12,2)) <= 132.00)) OR ((ss_coupon_amt#4 >= 836.00) AND (cast(ss_coupon_amt#4 as decimal(12,2)) <= 1836.00))) OR ((ss_wholesale_cost#2 >= 17.00) AND (cast(ss_wholesale_cost#2 as decimal(12,2)) <= 37.00)))) +Condition : (((isnotnull(ss_quantity#1) AND (ss_quantity#1 >= 21)) AND (ss_quantity#1 <= 25)) AND ((((ss_list_price#3 >= 122.00) AND (ss_list_price#3 <= 132.00)) OR ((ss_coupon_amt#4 >= 836.00) AND (ss_coupon_amt#4 <= 1836.00))) OR ((ss_wholesale_cost#2 >= 17.00) AND (ss_wholesale_cost#2 <= 37.00)))) (50) Project [codegen id : 13] Output [1]: [ss_list_price#3] @@ -374,7 +373,7 @@ Join condition: None Output [4]: [ss_quantity#1, ss_wholesale_cost#2, ss_list_price#3, ss_coupon_amt#4] Batched: true Location [not included in comparison]/{warehouse_dir}/store_sales] -PushedFilters: [IsNotNull(ss_quantity), GreaterThanOrEqual(ss_quantity,26), LessThanOrEqual(ss_quantity,30)] +PushedFilters: [IsNotNull(ss_quantity), GreaterThanOrEqual(ss_quantity,26), LessThanOrEqual(ss_quantity,30), Or(Or(And(GreaterThanOrEqual(ss_list_price,154.00),LessThanOrEqual(ss_list_price,164.00)),And(GreaterThanOrEqual(ss_coupon_amt,7326.00),LessThanOrEqual(ss_coupon_amt,8326.00))),And(GreaterThanOrEqual(ss_wholesale_cost,7.00),LessThanOrEqual(ss_wholesale_cost,27.00)))] ReadSchema: struct (60) ColumnarToRow [codegen id : 16] @@ -382,7 +381,7 @@ Input [4]: [ss_quantity#1, ss_wholesale_cost#2, ss_list_price#3, ss_coupon_amt#4 (61) Filter [codegen id : 16] Input [4]: [ss_quantity#1, ss_wholesale_cost#2, ss_list_price#3, ss_coupon_amt#4] -Condition : (((isnotnull(ss_quantity#1) AND (ss_quantity#1 >= 26)) AND (ss_quantity#1 <= 30)) AND ((((ss_list_price#3 >= 154.00) AND (cast(ss_list_price#3 as decimal(12,2)) <= 164.00)) OR ((ss_coupon_amt#4 >= 7326.00) AND (cast(ss_coupon_amt#4 as decimal(12,2)) <= 8326.00))) OR ((ss_wholesale_cost#2 >= 7.00) AND (cast(ss_wholesale_cost#2 as decimal(12,2)) <= 27.00)))) +Condition : (((isnotnull(ss_quantity#1) AND (ss_quantity#1 >= 26)) AND (ss_quantity#1 <= 30)) AND ((((ss_list_price#3 >= 154.00) AND (ss_list_price#3 <= 164.00)) OR ((ss_coupon_amt#4 >= 7326.00) AND (ss_coupon_amt#4 <= 8326.00))) OR ((ss_wholesale_cost#2 >= 7.00) AND (ss_wholesale_cost#2 <= 27.00)))) (62) Project [codegen id : 16] Output [1]: [ss_list_price#3] @@ -431,7 +430,3 @@ Arguments: IdentityBroadcastMode, [id=#81] (70) BroadcastNestedLoopJoin Join condition: None -(71) CollectLimit -Input [18]: [B1_LP#14, B1_CNT#15, B1_CNTD#16, B2_LP#26, B2_CNT#27, B2_CNTD#28, B3_LP#39, B3_CNT#40, B3_CNTD#41, B4_LP#52, B4_CNT#53, B4_CNTD#54, B5_LP#65, B5_CNT#66, B5_CNTD#67, B6_LP#78, B6_CNT#79, B6_CNTD#80] -Arguments: 100 - diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q28/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q28/simplified.txt index d896002b0965d..77afa321d3ee4 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q28/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q28/simplified.txt @@ -1,107 +1,106 @@ -CollectLimit +BroadcastNestedLoopJoin BroadcastNestedLoopJoin BroadcastNestedLoopJoin BroadcastNestedLoopJoin BroadcastNestedLoopJoin - BroadcastNestedLoopJoin - WholeStageCodegen (3) - HashAggregate [sum,count,count,count] [avg(UnscaledValue(ss_list_price)),count(ss_list_price),count(ss_list_price),B1_LP,B1_CNT,B1_CNTD,sum,count,count,count] - InputAdapter - Exchange #1 - WholeStageCodegen (2) - HashAggregate [ss_list_price] [avg(UnscaledValue(ss_list_price)),count(ss_list_price),count(ss_list_price),sum,count,count,count,sum,count,count,count] - HashAggregate [ss_list_price] [avg(UnscaledValue(ss_list_price)),count(ss_list_price),sum,count,count,sum,count,count] - InputAdapter - Exchange [ss_list_price] #2 - WholeStageCodegen (1) - HashAggregate [ss_list_price] [avg(UnscaledValue(ss_list_price)),count(ss_list_price),sum,count,count,sum,count,count] - Project [ss_list_price] - Filter [ss_quantity,ss_list_price,ss_coupon_amt,ss_wholesale_cost] - ColumnarToRow - InputAdapter - Scan parquet default.store_sales [ss_quantity,ss_wholesale_cost,ss_list_price,ss_coupon_amt] - BroadcastExchange #3 - WholeStageCodegen (6) - HashAggregate [sum,count,count,count] [avg(UnscaledValue(ss_list_price)),count(ss_list_price),count(ss_list_price),B2_LP,B2_CNT,B2_CNTD,sum,count,count,count] - InputAdapter - Exchange #4 - WholeStageCodegen (5) - HashAggregate [ss_list_price] [avg(UnscaledValue(ss_list_price)),count(ss_list_price),count(ss_list_price),sum,count,count,count,sum,count,count,count] - HashAggregate [ss_list_price] [avg(UnscaledValue(ss_list_price)),count(ss_list_price),sum,count,count,sum,count,count] - InputAdapter - Exchange [ss_list_price] #5 - WholeStageCodegen (4) - HashAggregate [ss_list_price] [avg(UnscaledValue(ss_list_price)),count(ss_list_price),sum,count,count,sum,count,count] - Project [ss_list_price] - Filter [ss_quantity,ss_list_price,ss_coupon_amt,ss_wholesale_cost] - ColumnarToRow - InputAdapter - Scan parquet default.store_sales [ss_quantity,ss_wholesale_cost,ss_list_price,ss_coupon_amt] - BroadcastExchange #6 - WholeStageCodegen (9) - HashAggregate [sum,count,count,count] [avg(UnscaledValue(ss_list_price)),count(ss_list_price),count(ss_list_price),B3_LP,B3_CNT,B3_CNTD,sum,count,count,count] + WholeStageCodegen (3) + HashAggregate [sum,count,count,count] [avg(UnscaledValue(ss_list_price)),count(ss_list_price),count(ss_list_price),B1_LP,B1_CNT,B1_CNTD,sum,count,count,count] + InputAdapter + Exchange #1 + WholeStageCodegen (2) + HashAggregate [ss_list_price] [avg(UnscaledValue(ss_list_price)),count(ss_list_price),count(ss_list_price),sum,count,count,count,sum,count,count,count] + HashAggregate [ss_list_price] [avg(UnscaledValue(ss_list_price)),count(ss_list_price),sum,count,count,sum,count,count] + InputAdapter + Exchange [ss_list_price] #2 + WholeStageCodegen (1) + HashAggregate [ss_list_price] [avg(UnscaledValue(ss_list_price)),count(ss_list_price),sum,count,count,sum,count,count] + Project [ss_list_price] + Filter [ss_quantity,ss_list_price,ss_coupon_amt,ss_wholesale_cost] + ColumnarToRow + InputAdapter + Scan parquet default.store_sales [ss_quantity,ss_wholesale_cost,ss_list_price,ss_coupon_amt] + BroadcastExchange #3 + WholeStageCodegen (6) + HashAggregate [sum,count,count,count] [avg(UnscaledValue(ss_list_price)),count(ss_list_price),count(ss_list_price),B2_LP,B2_CNT,B2_CNTD,sum,count,count,count] InputAdapter - Exchange #7 - WholeStageCodegen (8) + Exchange #4 + WholeStageCodegen (5) HashAggregate [ss_list_price] [avg(UnscaledValue(ss_list_price)),count(ss_list_price),count(ss_list_price),sum,count,count,count,sum,count,count,count] HashAggregate [ss_list_price] [avg(UnscaledValue(ss_list_price)),count(ss_list_price),sum,count,count,sum,count,count] InputAdapter - Exchange [ss_list_price] #8 - WholeStageCodegen (7) + Exchange [ss_list_price] #5 + WholeStageCodegen (4) HashAggregate [ss_list_price] [avg(UnscaledValue(ss_list_price)),count(ss_list_price),sum,count,count,sum,count,count] Project [ss_list_price] Filter [ss_quantity,ss_list_price,ss_coupon_amt,ss_wholesale_cost] ColumnarToRow InputAdapter Scan parquet default.store_sales [ss_quantity,ss_wholesale_cost,ss_list_price,ss_coupon_amt] - BroadcastExchange #9 - WholeStageCodegen (12) - HashAggregate [sum,count,count,count] [avg(UnscaledValue(ss_list_price)),count(ss_list_price),count(ss_list_price),B4_LP,B4_CNT,B4_CNTD,sum,count,count,count] + BroadcastExchange #6 + WholeStageCodegen (9) + HashAggregate [sum,count,count,count] [avg(UnscaledValue(ss_list_price)),count(ss_list_price),count(ss_list_price),B3_LP,B3_CNT,B3_CNTD,sum,count,count,count] InputAdapter - Exchange #10 - WholeStageCodegen (11) + Exchange #7 + WholeStageCodegen (8) HashAggregate [ss_list_price] [avg(UnscaledValue(ss_list_price)),count(ss_list_price),count(ss_list_price),sum,count,count,count,sum,count,count,count] HashAggregate [ss_list_price] [avg(UnscaledValue(ss_list_price)),count(ss_list_price),sum,count,count,sum,count,count] InputAdapter - Exchange [ss_list_price] #11 - WholeStageCodegen (10) + Exchange [ss_list_price] #8 + WholeStageCodegen (7) HashAggregate [ss_list_price] [avg(UnscaledValue(ss_list_price)),count(ss_list_price),sum,count,count,sum,count,count] Project [ss_list_price] Filter [ss_quantity,ss_list_price,ss_coupon_amt,ss_wholesale_cost] ColumnarToRow InputAdapter Scan parquet default.store_sales [ss_quantity,ss_wholesale_cost,ss_list_price,ss_coupon_amt] - BroadcastExchange #12 - WholeStageCodegen (15) - HashAggregate [sum,count,count,count] [avg(UnscaledValue(ss_list_price)),count(ss_list_price),count(ss_list_price),B5_LP,B5_CNT,B5_CNTD,sum,count,count,count] + BroadcastExchange #9 + WholeStageCodegen (12) + HashAggregate [sum,count,count,count] [avg(UnscaledValue(ss_list_price)),count(ss_list_price),count(ss_list_price),B4_LP,B4_CNT,B4_CNTD,sum,count,count,count] InputAdapter - Exchange #13 - WholeStageCodegen (14) + Exchange #10 + WholeStageCodegen (11) HashAggregate [ss_list_price] [avg(UnscaledValue(ss_list_price)),count(ss_list_price),count(ss_list_price),sum,count,count,count,sum,count,count,count] HashAggregate [ss_list_price] [avg(UnscaledValue(ss_list_price)),count(ss_list_price),sum,count,count,sum,count,count] InputAdapter - Exchange [ss_list_price] #14 - WholeStageCodegen (13) + Exchange [ss_list_price] #11 + WholeStageCodegen (10) HashAggregate [ss_list_price] [avg(UnscaledValue(ss_list_price)),count(ss_list_price),sum,count,count,sum,count,count] Project [ss_list_price] Filter [ss_quantity,ss_list_price,ss_coupon_amt,ss_wholesale_cost] ColumnarToRow InputAdapter Scan parquet default.store_sales [ss_quantity,ss_wholesale_cost,ss_list_price,ss_coupon_amt] - BroadcastExchange #15 - WholeStageCodegen (18) - HashAggregate [sum,count,count,count] [avg(UnscaledValue(ss_list_price)),count(ss_list_price),count(ss_list_price),B6_LP,B6_CNT,B6_CNTD,sum,count,count,count] + BroadcastExchange #12 + WholeStageCodegen (15) + HashAggregate [sum,count,count,count] [avg(UnscaledValue(ss_list_price)),count(ss_list_price),count(ss_list_price),B5_LP,B5_CNT,B5_CNTD,sum,count,count,count] InputAdapter - Exchange #16 - WholeStageCodegen (17) + Exchange #13 + WholeStageCodegen (14) HashAggregate [ss_list_price] [avg(UnscaledValue(ss_list_price)),count(ss_list_price),count(ss_list_price),sum,count,count,count,sum,count,count,count] HashAggregate [ss_list_price] [avg(UnscaledValue(ss_list_price)),count(ss_list_price),sum,count,count,sum,count,count] InputAdapter - Exchange [ss_list_price] #17 - WholeStageCodegen (16) + Exchange [ss_list_price] #14 + WholeStageCodegen (13) HashAggregate [ss_list_price] [avg(UnscaledValue(ss_list_price)),count(ss_list_price),sum,count,count,sum,count,count] Project [ss_list_price] Filter [ss_quantity,ss_list_price,ss_coupon_amt,ss_wholesale_cost] ColumnarToRow InputAdapter Scan parquet default.store_sales [ss_quantity,ss_wholesale_cost,ss_list_price,ss_coupon_amt] + BroadcastExchange #15 + WholeStageCodegen (18) + HashAggregate [sum,count,count,count] [avg(UnscaledValue(ss_list_price)),count(ss_list_price),count(ss_list_price),B6_LP,B6_CNT,B6_CNTD,sum,count,count,count] + InputAdapter + Exchange #16 + WholeStageCodegen (17) + HashAggregate [ss_list_price] [avg(UnscaledValue(ss_list_price)),count(ss_list_price),count(ss_list_price),sum,count,count,count,sum,count,count,count] + HashAggregate [ss_list_price] [avg(UnscaledValue(ss_list_price)),count(ss_list_price),sum,count,count,sum,count,count] + InputAdapter + Exchange [ss_list_price] #17 + WholeStageCodegen (16) + HashAggregate [ss_list_price] [avg(UnscaledValue(ss_list_price)),count(ss_list_price),sum,count,count,sum,count,count] + Project [ss_list_price] + Filter [ss_quantity,ss_list_price,ss_coupon_amt,ss_wholesale_cost] + ColumnarToRow + InputAdapter + Scan parquet default.store_sales [ss_quantity,ss_wholesale_cost,ss_list_price,ss_coupon_amt] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q29.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q29.sf100/explain.txt index 35e24698c517e..a949b93f3bcb0 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q29.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q29.sf100/explain.txt @@ -39,15 +39,15 @@ TakeOrderedAndProject (61) : +- * Sort (39) : +- Exchange (38) : +- * Project (37) - : +- * BroadcastHashJoin Inner BuildLeft (36) - : :- BroadcastExchange (32) - : : +- * Project (31) - : : +- * Filter (30) - : : +- * ColumnarToRow (29) - : : +- Scan parquet default.date_dim (28) - : +- * Filter (35) - : +- * ColumnarToRow (34) - : +- Scan parquet default.store_returns (33) + : +- * BroadcastHashJoin Inner BuildRight (36) + : :- * Filter (30) + : : +- * ColumnarToRow (29) + : : +- Scan parquet default.store_returns (28) + : +- BroadcastExchange (35) + : +- * Project (34) + : +- * Filter (33) + : +- * ColumnarToRow (32) + : +- Scan parquet default.date_dim (31) +- * Sort (55) +- Exchange (54) +- * Project (53) @@ -181,75 +181,75 @@ Arguments: hashpartitioning(cast(ss_customer_sk#3 as bigint), cast(ss_item_sk#2 Input [8]: [ss_item_sk#2, ss_customer_sk#3, ss_ticket_number#5, ss_quantity#6, s_store_id#12, s_store_name#13, i_item_id#17, i_item_desc#18] Arguments: [cast(ss_customer_sk#3 as bigint) ASC NULLS FIRST, cast(ss_item_sk#2 as bigint) ASC NULLS FIRST, cast(ss_ticket_number#5 as bigint) ASC NULLS FIRST], false, 0 -(28) Scan parquet default.date_dim -Output [3]: [d_date_sk#21, d_year#22, d_moy#23] +(28) Scan parquet default.store_returns +Output [5]: [sr_returned_date_sk#21, sr_item_sk#22, sr_customer_sk#23, sr_ticket_number#24, sr_return_quantity#25] Batched: true -Location [not included in comparison]/{warehouse_dir}/date_dim] -PushedFilters: [IsNotNull(d_moy), IsNotNull(d_year), GreaterThanOrEqual(d_moy,9), LessThanOrEqual(d_moy,12), EqualTo(d_year,1999), IsNotNull(d_date_sk)] -ReadSchema: struct +Location [not included in comparison]/{warehouse_dir}/store_returns] +PushedFilters: [IsNotNull(sr_customer_sk), IsNotNull(sr_item_sk), IsNotNull(sr_ticket_number), IsNotNull(sr_returned_date_sk)] +ReadSchema: struct -(29) ColumnarToRow [codegen id : 9] -Input [3]: [d_date_sk#21, d_year#22, d_moy#23] +(29) ColumnarToRow [codegen id : 10] +Input [5]: [sr_returned_date_sk#21, sr_item_sk#22, sr_customer_sk#23, sr_ticket_number#24, sr_return_quantity#25] -(30) Filter [codegen id : 9] -Input [3]: [d_date_sk#21, d_year#22, d_moy#23] -Condition : (((((isnotnull(d_moy#23) AND isnotnull(d_year#22)) AND (d_moy#23 >= 9)) AND (d_moy#23 <= 12)) AND (d_year#22 = 1999)) AND isnotnull(d_date_sk#21)) +(30) Filter [codegen id : 10] +Input [5]: [sr_returned_date_sk#21, sr_item_sk#22, sr_customer_sk#23, sr_ticket_number#24, sr_return_quantity#25] +Condition : (((isnotnull(sr_customer_sk#23) AND isnotnull(sr_item_sk#22)) AND isnotnull(sr_ticket_number#24)) AND isnotnull(sr_returned_date_sk#21)) -(31) Project [codegen id : 9] -Output [1]: [d_date_sk#21] -Input [3]: [d_date_sk#21, d_year#22, d_moy#23] +(31) Scan parquet default.date_dim +Output [3]: [d_date_sk#26, d_year#27, d_moy#28] +Batched: true +Location [not included in comparison]/{warehouse_dir}/date_dim] +PushedFilters: [IsNotNull(d_moy), IsNotNull(d_year), GreaterThanOrEqual(d_moy,9), LessThanOrEqual(d_moy,12), EqualTo(d_year,1999), IsNotNull(d_date_sk)] +ReadSchema: struct -(32) BroadcastExchange -Input [1]: [d_date_sk#21] -Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#24] +(32) ColumnarToRow [codegen id : 9] +Input [3]: [d_date_sk#26, d_year#27, d_moy#28] -(33) Scan parquet default.store_returns -Output [5]: [sr_returned_date_sk#25, sr_item_sk#26, sr_customer_sk#27, sr_ticket_number#28, sr_return_quantity#29] -Batched: true -Location [not included in comparison]/{warehouse_dir}/store_returns] -PushedFilters: [IsNotNull(sr_customer_sk), IsNotNull(sr_item_sk), IsNotNull(sr_ticket_number), IsNotNull(sr_returned_date_sk)] -ReadSchema: struct +(33) Filter [codegen id : 9] +Input [3]: [d_date_sk#26, d_year#27, d_moy#28] +Condition : (((((isnotnull(d_moy#28) AND isnotnull(d_year#27)) AND (d_moy#28 >= 9)) AND (d_moy#28 <= 12)) AND (d_year#27 = 1999)) AND isnotnull(d_date_sk#26)) -(34) ColumnarToRow -Input [5]: [sr_returned_date_sk#25, sr_item_sk#26, sr_customer_sk#27, sr_ticket_number#28, sr_return_quantity#29] +(34) Project [codegen id : 9] +Output [1]: [d_date_sk#26] +Input [3]: [d_date_sk#26, d_year#27, d_moy#28] -(35) Filter -Input [5]: [sr_returned_date_sk#25, sr_item_sk#26, sr_customer_sk#27, sr_ticket_number#28, sr_return_quantity#29] -Condition : (((isnotnull(sr_customer_sk#27) AND isnotnull(sr_item_sk#26)) AND isnotnull(sr_ticket_number#28)) AND isnotnull(sr_returned_date_sk#25)) +(35) BroadcastExchange +Input [1]: [d_date_sk#26] +Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#29] (36) BroadcastHashJoin [codegen id : 10] -Left keys [1]: [cast(d_date_sk#21 as bigint)] -Right keys [1]: [sr_returned_date_sk#25] +Left keys [1]: [sr_returned_date_sk#21] +Right keys [1]: [cast(d_date_sk#26 as bigint)] Join condition: None (37) Project [codegen id : 10] -Output [4]: [sr_item_sk#26, sr_customer_sk#27, sr_ticket_number#28, sr_return_quantity#29] -Input [6]: [d_date_sk#21, sr_returned_date_sk#25, sr_item_sk#26, sr_customer_sk#27, sr_ticket_number#28, sr_return_quantity#29] +Output [4]: [sr_item_sk#22, sr_customer_sk#23, sr_ticket_number#24, sr_return_quantity#25] +Input [6]: [sr_returned_date_sk#21, sr_item_sk#22, sr_customer_sk#23, sr_ticket_number#24, sr_return_quantity#25, d_date_sk#26] (38) Exchange -Input [4]: [sr_item_sk#26, sr_customer_sk#27, sr_ticket_number#28, sr_return_quantity#29] -Arguments: hashpartitioning(sr_customer_sk#27, sr_item_sk#26, sr_ticket_number#28, 5), true, [id=#30] +Input [4]: [sr_item_sk#22, sr_customer_sk#23, sr_ticket_number#24, sr_return_quantity#25] +Arguments: hashpartitioning(sr_customer_sk#23, sr_item_sk#22, sr_ticket_number#24, 5), true, [id=#30] (39) Sort [codegen id : 11] -Input [4]: [sr_item_sk#26, sr_customer_sk#27, sr_ticket_number#28, sr_return_quantity#29] -Arguments: [sr_customer_sk#27 ASC NULLS FIRST, sr_item_sk#26 ASC NULLS FIRST, sr_ticket_number#28 ASC NULLS FIRST], false, 0 +Input [4]: [sr_item_sk#22, sr_customer_sk#23, sr_ticket_number#24, sr_return_quantity#25] +Arguments: [sr_customer_sk#23 ASC NULLS FIRST, sr_item_sk#22 ASC NULLS FIRST, sr_ticket_number#24 ASC NULLS FIRST], false, 0 (40) SortMergeJoin [codegen id : 12] Left keys [3]: [cast(ss_customer_sk#3 as bigint), cast(ss_item_sk#2 as bigint), cast(ss_ticket_number#5 as bigint)] -Right keys [3]: [sr_customer_sk#27, sr_item_sk#26, sr_ticket_number#28] +Right keys [3]: [sr_customer_sk#23, sr_item_sk#22, sr_ticket_number#24] Join condition: None (41) Project [codegen id : 12] -Output [8]: [ss_quantity#6, s_store_id#12, s_store_name#13, i_item_id#17, i_item_desc#18, sr_item_sk#26, sr_customer_sk#27, sr_return_quantity#29] -Input [12]: [ss_item_sk#2, ss_customer_sk#3, ss_ticket_number#5, ss_quantity#6, s_store_id#12, s_store_name#13, i_item_id#17, i_item_desc#18, sr_item_sk#26, sr_customer_sk#27, sr_ticket_number#28, sr_return_quantity#29] +Output [8]: [ss_quantity#6, s_store_id#12, s_store_name#13, i_item_id#17, i_item_desc#18, sr_item_sk#22, sr_customer_sk#23, sr_return_quantity#25] +Input [12]: [ss_item_sk#2, ss_customer_sk#3, ss_ticket_number#5, ss_quantity#6, s_store_id#12, s_store_name#13, i_item_id#17, i_item_desc#18, sr_item_sk#22, sr_customer_sk#23, sr_ticket_number#24, sr_return_quantity#25] (42) Exchange -Input [8]: [ss_quantity#6, s_store_id#12, s_store_name#13, i_item_id#17, i_item_desc#18, sr_item_sk#26, sr_customer_sk#27, sr_return_quantity#29] -Arguments: hashpartitioning(sr_customer_sk#27, sr_item_sk#26, 5), true, [id=#31] +Input [8]: [ss_quantity#6, s_store_id#12, s_store_name#13, i_item_id#17, i_item_desc#18, sr_item_sk#22, sr_customer_sk#23, sr_return_quantity#25] +Arguments: hashpartitioning(sr_customer_sk#23, sr_item_sk#22, 5), true, [id=#31] (43) Sort [codegen id : 13] -Input [8]: [ss_quantity#6, s_store_id#12, s_store_name#13, i_item_id#17, i_item_desc#18, sr_item_sk#26, sr_customer_sk#27, sr_return_quantity#29] -Arguments: [sr_customer_sk#27 ASC NULLS FIRST, sr_item_sk#26 ASC NULLS FIRST], false, 0 +Input [8]: [ss_quantity#6, s_store_id#12, s_store_name#13, i_item_id#17, i_item_desc#18, sr_item_sk#22, sr_customer_sk#23, sr_return_quantity#25] +Arguments: [sr_customer_sk#23 ASC NULLS FIRST, sr_item_sk#22 ASC NULLS FIRST], false, 0 (44) Scan parquet default.catalog_sales Output [4]: [cs_sold_date_sk#32, cs_bill_customer_sk#33, cs_item_sk#34, cs_quantity#35] @@ -305,18 +305,18 @@ Input [3]: [cs_bill_customer_sk#33, cs_item_sk#34, cs_quantity#35] Arguments: [cast(cs_bill_customer_sk#33 as bigint) ASC NULLS FIRST, cast(cs_item_sk#34 as bigint) ASC NULLS FIRST], false, 0 (56) SortMergeJoin [codegen id : 17] -Left keys [2]: [sr_customer_sk#27, sr_item_sk#26] +Left keys [2]: [sr_customer_sk#23, sr_item_sk#22] Right keys [2]: [cast(cs_bill_customer_sk#33 as bigint), cast(cs_item_sk#34 as bigint)] Join condition: None (57) Project [codegen id : 17] -Output [7]: [ss_quantity#6, sr_return_quantity#29, cs_quantity#35, s_store_id#12, s_store_name#13, i_item_id#17, i_item_desc#18] -Input [11]: [ss_quantity#6, s_store_id#12, s_store_name#13, i_item_id#17, i_item_desc#18, sr_item_sk#26, sr_customer_sk#27, sr_return_quantity#29, cs_bill_customer_sk#33, cs_item_sk#34, cs_quantity#35] +Output [7]: [ss_quantity#6, sr_return_quantity#25, cs_quantity#35, s_store_id#12, s_store_name#13, i_item_id#17, i_item_desc#18] +Input [11]: [ss_quantity#6, s_store_id#12, s_store_name#13, i_item_id#17, i_item_desc#18, sr_item_sk#22, sr_customer_sk#23, sr_return_quantity#25, cs_bill_customer_sk#33, cs_item_sk#34, cs_quantity#35] (58) HashAggregate [codegen id : 17] -Input [7]: [ss_quantity#6, sr_return_quantity#29, cs_quantity#35, s_store_id#12, s_store_name#13, i_item_id#17, i_item_desc#18] +Input [7]: [ss_quantity#6, sr_return_quantity#25, cs_quantity#35, s_store_id#12, s_store_name#13, i_item_id#17, i_item_desc#18] Keys [4]: [i_item_id#17, i_item_desc#18, s_store_id#12, s_store_name#13] -Functions [3]: [partial_sum(cast(ss_quantity#6 as bigint)), partial_sum(cast(sr_return_quantity#29 as bigint)), partial_sum(cast(cs_quantity#35 as bigint))] +Functions [3]: [partial_sum(cast(ss_quantity#6 as bigint)), partial_sum(cast(sr_return_quantity#25 as bigint)), partial_sum(cast(cs_quantity#35 as bigint))] Aggregate Attributes [3]: [sum#40, sum#41, sum#42] Results [7]: [i_item_id#17, i_item_desc#18, s_store_id#12, s_store_name#13, sum#43, sum#44, sum#45] @@ -327,9 +327,9 @@ Arguments: hashpartitioning(i_item_id#17, i_item_desc#18, s_store_id#12, s_store (60) HashAggregate [codegen id : 18] Input [7]: [i_item_id#17, i_item_desc#18, s_store_id#12, s_store_name#13, sum#43, sum#44, sum#45] Keys [4]: [i_item_id#17, i_item_desc#18, s_store_id#12, s_store_name#13] -Functions [3]: [sum(cast(ss_quantity#6 as bigint)), sum(cast(sr_return_quantity#29 as bigint)), sum(cast(cs_quantity#35 as bigint))] -Aggregate Attributes [3]: [sum(cast(ss_quantity#6 as bigint))#47, sum(cast(sr_return_quantity#29 as bigint))#48, sum(cast(cs_quantity#35 as bigint))#49] -Results [7]: [i_item_id#17, i_item_desc#18, s_store_id#12, s_store_name#13, sum(cast(ss_quantity#6 as bigint))#47 AS store_sales_quantity#50, sum(cast(sr_return_quantity#29 as bigint))#48 AS store_returns_quantity#51, sum(cast(cs_quantity#35 as bigint))#49 AS catalog_sales_quantity#52] +Functions [3]: [sum(cast(ss_quantity#6 as bigint)), sum(cast(sr_return_quantity#25 as bigint)), sum(cast(cs_quantity#35 as bigint))] +Aggregate Attributes [3]: [sum(cast(ss_quantity#6 as bigint))#47, sum(cast(sr_return_quantity#25 as bigint))#48, sum(cast(cs_quantity#35 as bigint))#49] +Results [7]: [i_item_id#17, i_item_desc#18, s_store_id#12, s_store_name#13, sum(cast(ss_quantity#6 as bigint))#47 AS store_sales_quantity#50, sum(cast(sr_return_quantity#25 as bigint))#48 AS store_returns_quantity#51, sum(cast(cs_quantity#35 as bigint))#49 AS catalog_sales_quantity#52] (61) TakeOrderedAndProject Input [7]: [i_item_id#17, i_item_desc#18, s_store_id#12, s_store_name#13, store_sales_quantity#50, store_returns_quantity#51, catalog_sales_quantity#52] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q29.sf100/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q29.sf100/simplified.txt index f10b8e245c50e..ea91af9e8f755 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q29.sf100/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q29.sf100/simplified.txt @@ -69,7 +69,11 @@ TakeOrderedAndProject [i_item_id,i_item_desc,s_store_id,s_store_name,store_sales Exchange [sr_customer_sk,sr_item_sk,sr_ticket_number] #8 WholeStageCodegen (10) Project [sr_item_sk,sr_customer_sk,sr_ticket_number,sr_return_quantity] - BroadcastHashJoin [d_date_sk,sr_returned_date_sk] + BroadcastHashJoin [sr_returned_date_sk,d_date_sk] + Filter [sr_customer_sk,sr_item_sk,sr_ticket_number,sr_returned_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.store_returns [sr_returned_date_sk,sr_item_sk,sr_customer_sk,sr_ticket_number,sr_return_quantity] InputAdapter BroadcastExchange #9 WholeStageCodegen (9) @@ -78,10 +82,6 @@ TakeOrderedAndProject [i_item_id,i_item_desc,s_store_id,s_store_name,store_sales ColumnarToRow InputAdapter Scan parquet default.date_dim [d_date_sk,d_year,d_moy] - Filter [sr_customer_sk,sr_item_sk,sr_ticket_number,sr_returned_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.store_returns [sr_returned_date_sk,sr_item_sk,sr_customer_sk,sr_ticket_number,sr_return_quantity] InputAdapter WholeStageCodegen (16) Sort [cs_bill_customer_sk,cs_item_sk] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q31.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q31.sf100/explain.txt index d3b013660ba28..9f123c4044cc8 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q31.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q31.sf100/explain.txt @@ -138,7 +138,7 @@ Condition : (isnotnull(ss_sold_date_sk#1) AND isnotnull(ss_addr_sk#2)) Output [3]: [d_date_sk#4, d_year#5, d_qoy#6] Batched: true Location [not included in comparison]/{warehouse_dir}/date_dim] -PushedFilters: [IsNotNull(d_qoy), IsNotNull(d_year), EqualTo(d_qoy,3), EqualTo(d_year,2000), IsNotNull(d_date_sk)] +PushedFilters: [IsNotNull(d_qoy), IsNotNull(d_year), EqualTo(d_qoy,2), EqualTo(d_year,2000), IsNotNull(d_date_sk)] ReadSchema: struct (5) ColumnarToRow [codegen id : 1] @@ -146,7 +146,7 @@ Input [3]: [d_date_sk#4, d_year#5, d_qoy#6] (6) Filter [codegen id : 1] Input [3]: [d_date_sk#4, d_year#5, d_qoy#6] -Condition : ((((isnotnull(d_qoy#6) AND isnotnull(d_year#5)) AND (d_qoy#6 = 3)) AND (d_year#5 = 2000)) AND isnotnull(d_date_sk#4)) +Condition : ((((isnotnull(d_qoy#6) AND isnotnull(d_year#5)) AND (d_qoy#6 = 2)) AND (d_year#5 = 2000)) AND isnotnull(d_date_sk#4)) (7) BroadcastExchange Input [3]: [d_date_sk#4, d_year#5, d_qoy#6] @@ -236,7 +236,7 @@ Condition : (isnotnull(ss_sold_date_sk#1) AND isnotnull(ss_addr_sk#2)) Output [3]: [d_date_sk#17, d_year#18, d_qoy#19] Batched: true Location [not included in comparison]/{warehouse_dir}/date_dim] -PushedFilters: [IsNotNull(d_qoy), IsNotNull(d_year), EqualTo(d_qoy,2), EqualTo(d_year,2000), IsNotNull(d_date_sk)] +PushedFilters: [IsNotNull(d_qoy), IsNotNull(d_year), EqualTo(d_qoy,3), EqualTo(d_year,2000), IsNotNull(d_date_sk)] ReadSchema: struct (26) ColumnarToRow [codegen id : 7] @@ -244,7 +244,7 @@ Input [3]: [d_date_sk#17, d_year#18, d_qoy#19] (27) Filter [codegen id : 7] Input [3]: [d_date_sk#17, d_year#18, d_qoy#19] -Condition : ((((isnotnull(d_qoy#19) AND isnotnull(d_year#18)) AND (d_qoy#19 = 2)) AND (d_year#18 = 2000)) AND isnotnull(d_date_sk#17)) +Condition : ((((isnotnull(d_qoy#19) AND isnotnull(d_year#18)) AND (d_qoy#19 = 3)) AND (d_year#18 = 2000)) AND isnotnull(d_date_sk#17)) (28) BroadcastExchange Input [3]: [d_date_sk#17, d_year#18, d_qoy#19] @@ -311,7 +311,7 @@ Right keys [1]: [ca_county#23] Join condition: None (42) Project [codegen id : 42] -Output [3]: [store_sales#16, ca_county#23, store_sales#28] +Output [3]: [ca_county#10, store_sales#16, store_sales#28] Input [4]: [ca_county#10, store_sales#16, ca_county#23, store_sales#28] (43) Scan parquet default.store_sales @@ -402,13 +402,13 @@ Input [3]: [ca_county#36, d_year#31, store_sales#41] Arguments: HashedRelationBroadcastMode(List(input[0, string, true]),false), [id=#42] (62) BroadcastHashJoin [codegen id : 42] -Left keys [1]: [ca_county#23] +Left keys [1]: [ca_county#10] Right keys [1]: [ca_county#36] Join condition: None (63) Project [codegen id : 42] Output [5]: [store_sales#16, store_sales#28, ca_county#36, d_year#31, store_sales#41] -Input [6]: [store_sales#16, ca_county#23, store_sales#28, ca_county#36, d_year#31, store_sales#41] +Input [6]: [ca_county#10, store_sales#16, store_sales#28, ca_county#36, d_year#31, store_sales#41] (64) Scan parquet default.web_sales Output [3]: [ws_sold_date_sk#43, ws_bill_addr_sk#44, ws_ext_sales_price#45] @@ -424,7 +424,7 @@ Input [3]: [ws_sold_date_sk#43, ws_bill_addr_sk#44, ws_ext_sales_price#45] Input [3]: [ws_sold_date_sk#43, ws_bill_addr_sk#44, ws_ext_sales_price#45] Condition : (isnotnull(ws_sold_date_sk#43) AND isnotnull(ws_bill_addr_sk#44)) -(67) ReusedExchange [Reuses operator id: 28] +(67) ReusedExchange [Reuses operator id: 49] Output [3]: [d_date_sk#46, d_year#47, d_qoy#48] (68) BroadcastHashJoin [codegen id : 22] @@ -492,7 +492,7 @@ Input [3]: [ws_sold_date_sk#43, ws_bill_addr_sk#44, ws_ext_sales_price#45] Input [3]: [ws_sold_date_sk#43, ws_bill_addr_sk#44, ws_ext_sales_price#45] Condition : (isnotnull(ws_sold_date_sk#43) AND isnotnull(ws_bill_addr_sk#44)) -(82) ReusedExchange [Reuses operator id: 49] +(82) ReusedExchange [Reuses operator id: 7] Output [3]: [d_date_sk#57, d_year#58, d_qoy#59] (83) BroadcastHashJoin [codegen id : 28] @@ -556,7 +556,7 @@ Right keys [1]: [ca_county#62] Join condition: None (96) Project [codegen id : 41] -Output [3]: [web_sales#56, ca_county#62, web_sales#67] +Output [3]: [ca_county#51, web_sales#56, web_sales#67] Input [4]: [ca_county#51, web_sales#56, ca_county#62, web_sales#67] (97) Scan parquet default.web_sales @@ -573,7 +573,7 @@ Input [3]: [ws_sold_date_sk#43, ws_bill_addr_sk#44, ws_ext_sales_price#45] Input [3]: [ws_sold_date_sk#43, ws_bill_addr_sk#44, ws_ext_sales_price#45] Condition : (isnotnull(ws_sold_date_sk#43) AND isnotnull(ws_bill_addr_sk#44)) -(100) ReusedExchange [Reuses operator id: 7] +(100) ReusedExchange [Reuses operator id: 28] Output [3]: [d_date_sk#69, d_year#70, d_qoy#71] (101) BroadcastHashJoin [codegen id : 35] @@ -632,26 +632,26 @@ Input [2]: [ca_county#74, web_sales#79] Arguments: HashedRelationBroadcastMode(List(input[0, string, true]),false), [id=#80] (113) BroadcastHashJoin [codegen id : 41] -Left keys [1]: [ca_county#62] +Left keys [1]: [ca_county#51] Right keys [1]: [ca_county#74] Join condition: None (114) Project [codegen id : 41] -Output [4]: [web_sales#56, ca_county#62, web_sales#67, web_sales#79] -Input [5]: [web_sales#56, ca_county#62, web_sales#67, ca_county#74, web_sales#79] +Output [4]: [ca_county#51, web_sales#56, web_sales#67, web_sales#79] +Input [5]: [ca_county#51, web_sales#56, web_sales#67, ca_county#74, web_sales#79] (115) BroadcastExchange -Input [4]: [web_sales#56, ca_county#62, web_sales#67, web_sales#79] -Arguments: HashedRelationBroadcastMode(List(input[1, string, true]),false), [id=#81] +Input [4]: [ca_county#51, web_sales#56, web_sales#67, web_sales#79] +Arguments: HashedRelationBroadcastMode(List(input[0, string, true]),false), [id=#81] (116) BroadcastHashJoin [codegen id : 42] Left keys [1]: [ca_county#36] -Right keys [1]: [ca_county#62] -Join condition: ((CASE WHEN (web_sales#67 > 0.00) THEN CheckOverflow((promote_precision(web_sales#56) / promote_precision(web_sales#67)), DecimalType(37,20), true) ELSE null END > CASE WHEN (store_sales#41 > 0.00) THEN CheckOverflow((promote_precision(store_sales#28) / promote_precision(store_sales#41)), DecimalType(37,20), true) ELSE null END) AND (CASE WHEN (web_sales#56 > 0.00) THEN CheckOverflow((promote_precision(web_sales#79) / promote_precision(web_sales#56)), DecimalType(37,20), true) ELSE null END > CASE WHEN (store_sales#28 > 0.00) THEN CheckOverflow((promote_precision(store_sales#16) / promote_precision(store_sales#28)), DecimalType(37,20), true) ELSE null END)) +Right keys [1]: [ca_county#51] +Join condition: ((CASE WHEN (web_sales#56 > 0.00) THEN CheckOverflow((promote_precision(web_sales#67) / promote_precision(web_sales#56)), DecimalType(37,20), true) ELSE null END > CASE WHEN (store_sales#41 > 0.00) THEN CheckOverflow((promote_precision(store_sales#16) / promote_precision(store_sales#41)), DecimalType(37,20), true) ELSE null END) AND (CASE WHEN (web_sales#67 > 0.00) THEN CheckOverflow((promote_precision(web_sales#79) / promote_precision(web_sales#67)), DecimalType(37,20), true) ELSE null END > CASE WHEN (store_sales#16 > 0.00) THEN CheckOverflow((promote_precision(store_sales#28) / promote_precision(store_sales#16)), DecimalType(37,20), true) ELSE null END)) (117) Project [codegen id : 42] -Output [6]: [ca_county#36, d_year#31, CheckOverflow((promote_precision(web_sales#56) / promote_precision(web_sales#67)), DecimalType(37,20), true) AS web_q1_q2_increase#82, CheckOverflow((promote_precision(store_sales#28) / promote_precision(store_sales#41)), DecimalType(37,20), true) AS store_q1_q2_increase#83, CheckOverflow((promote_precision(web_sales#79) / promote_precision(web_sales#56)), DecimalType(37,20), true) AS web_q2_q3_increase#84, CheckOverflow((promote_precision(store_sales#16) / promote_precision(store_sales#28)), DecimalType(37,20), true) AS store_q2_q3_increase#85] -Input [9]: [store_sales#16, store_sales#28, ca_county#36, d_year#31, store_sales#41, web_sales#56, ca_county#62, web_sales#67, web_sales#79] +Output [6]: [ca_county#36, d_year#31, CheckOverflow((promote_precision(web_sales#67) / promote_precision(web_sales#56)), DecimalType(37,20), true) AS web_q1_q2_increase#82, CheckOverflow((promote_precision(store_sales#16) / promote_precision(store_sales#41)), DecimalType(37,20), true) AS store_q1_q2_increase#83, CheckOverflow((promote_precision(web_sales#79) / promote_precision(web_sales#67)), DecimalType(37,20), true) AS web_q2_q3_increase#84, CheckOverflow((promote_precision(store_sales#28) / promote_precision(store_sales#16)), DecimalType(37,20), true) AS store_q2_q3_increase#85] +Input [9]: [store_sales#16, store_sales#28, ca_county#36, d_year#31, store_sales#41, ca_county#51, web_sales#56, web_sales#67, web_sales#79] (118) Exchange Input [6]: [ca_county#36, d_year#31, web_q1_q2_increase#82, store_q1_q2_increase#83, web_q2_q3_increase#84, store_q2_q3_increase#85] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q31.sf100/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q31.sf100/simplified.txt index 9ec06b597cb64..c7b69500ed8a6 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q31.sf100/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q31.sf100/simplified.txt @@ -7,7 +7,7 @@ WholeStageCodegen (43) BroadcastHashJoin [ca_county,ca_county,web_sales,web_sales,store_sales,store_sales,web_sales,store_sales] Project [store_sales,store_sales,ca_county,d_year,store_sales] BroadcastHashJoin [ca_county,ca_county] - Project [store_sales,ca_county,store_sales] + Project [ca_county,store_sales,store_sales] BroadcastHashJoin [ca_county,ca_county] HashAggregate [ca_county,d_qoy,d_year,sum] [sum(UnscaledValue(ss_ext_sales_price)),store_sales,sum] InputAdapter @@ -116,9 +116,9 @@ WholeStageCodegen (43) InputAdapter BroadcastExchange #14 WholeStageCodegen (41) - Project [web_sales,ca_county,web_sales,web_sales] + Project [ca_county,web_sales,web_sales,web_sales] BroadcastHashJoin [ca_county,ca_county] - Project [web_sales,ca_county,web_sales] + Project [ca_county,web_sales,web_sales] BroadcastHashJoin [ca_county,ca_county] HashAggregate [ca_county,d_qoy,d_year,sum] [sum(UnscaledValue(ws_ext_sales_price)),web_sales,sum] InputAdapter @@ -140,7 +140,7 @@ WholeStageCodegen (43) InputAdapter Scan parquet default.web_sales [ws_sold_date_sk,ws_bill_addr_sk,ws_ext_sales_price] InputAdapter - ReusedExchange [d_date_sk,d_year,d_qoy] #9 + ReusedExchange [d_date_sk,d_year,d_qoy] #13 InputAdapter WholeStageCodegen (25) Sort [ca_address_sk] @@ -169,7 +169,7 @@ WholeStageCodegen (43) InputAdapter Scan parquet default.web_sales [ws_sold_date_sk,ws_bill_addr_sk,ws_ext_sales_price] InputAdapter - ReusedExchange [d_date_sk,d_year,d_qoy] #13 + ReusedExchange [d_date_sk,d_year,d_qoy] #4 InputAdapter WholeStageCodegen (31) Sort [ca_address_sk] @@ -198,7 +198,7 @@ WholeStageCodegen (43) InputAdapter Scan parquet default.web_sales [ws_sold_date_sk,ws_bill_addr_sk,ws_ext_sales_price] InputAdapter - ReusedExchange [d_date_sk,d_year,d_qoy] #4 + ReusedExchange [d_date_sk,d_year,d_qoy] #9 InputAdapter WholeStageCodegen (38) Sort [ca_address_sk] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q33.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q33.sf100/explain.txt index 8185680b58670..cb8522545f1d3 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q33.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q33.sf100/explain.txt @@ -9,8 +9,8 @@ TakeOrderedAndProject (67) : +- * HashAggregate (30) : +- * Project (29) : +- * BroadcastHashJoin Inner BuildRight (28) - : :- * Project (22) - : : +- * BroadcastHashJoin Inner BuildRight (21) + : :- * Project (17) + : : +- * BroadcastHashJoin Inner BuildRight (16) : : :- * Project (10) : : : +- * BroadcastHashJoin Inner BuildRight (9) : : : :- * Filter (3) @@ -21,21 +21,21 @@ TakeOrderedAndProject (67) : : : +- * Filter (6) : : : +- * ColumnarToRow (5) : : : +- Scan parquet default.date_dim (4) - : : +- BroadcastExchange (20) - : : +- * BroadcastHashJoin LeftSemi BuildRight (19) - : : :- * Filter (13) - : : : +- * ColumnarToRow (12) - : : : +- Scan parquet default.item (11) - : : +- BroadcastExchange (18) - : : +- * Project (17) - : : +- * Filter (16) - : : +- * ColumnarToRow (15) - : : +- Scan parquet default.item (14) + : : +- BroadcastExchange (15) + : : +- * Project (14) + : : +- * Filter (13) + : : +- * ColumnarToRow (12) + : : +- Scan parquet default.customer_address (11) : +- BroadcastExchange (27) - : +- * Project (26) - : +- * Filter (25) - : +- * ColumnarToRow (24) - : +- Scan parquet default.customer_address (23) + : +- * BroadcastHashJoin LeftSemi BuildRight (26) + : :- * Filter (20) + : : +- * ColumnarToRow (19) + : : +- Scan parquet default.item (18) + : +- BroadcastExchange (25) + : +- * Project (24) + : +- * Filter (23) + : +- * ColumnarToRow (22) + : +- Scan parquet default.item (21) :- * HashAggregate (47) : +- Exchange (46) : +- * HashAggregate (45) @@ -113,108 +113,108 @@ Join condition: None Output [3]: [ss_item_sk#2, ss_addr_sk#3, ss_ext_sales_price#4] Input [5]: [ss_sold_date_sk#1, ss_item_sk#2, ss_addr_sk#3, ss_ext_sales_price#4, d_date_sk#5] -(11) Scan parquet default.item -Output [2]: [i_item_sk#9, i_manufact_id#10] +(11) Scan parquet default.customer_address +Output [2]: [ca_address_sk#9, ca_gmt_offset#10] +Batched: true +Location [not included in comparison]/{warehouse_dir}/customer_address] +PushedFilters: [IsNotNull(ca_gmt_offset), EqualTo(ca_gmt_offset,-5.00), IsNotNull(ca_address_sk)] +ReadSchema: struct + +(12) ColumnarToRow [codegen id : 2] +Input [2]: [ca_address_sk#9, ca_gmt_offset#10] + +(13) Filter [codegen id : 2] +Input [2]: [ca_address_sk#9, ca_gmt_offset#10] +Condition : ((isnotnull(ca_gmt_offset#10) AND (ca_gmt_offset#10 = -5.00)) AND isnotnull(ca_address_sk#9)) + +(14) Project [codegen id : 2] +Output [1]: [ca_address_sk#9] +Input [2]: [ca_address_sk#9, ca_gmt_offset#10] + +(15) BroadcastExchange +Input [1]: [ca_address_sk#9] +Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#11] + +(16) BroadcastHashJoin [codegen id : 5] +Left keys [1]: [ss_addr_sk#3] +Right keys [1]: [ca_address_sk#9] +Join condition: None + +(17) Project [codegen id : 5] +Output [2]: [ss_item_sk#2, ss_ext_sales_price#4] +Input [4]: [ss_item_sk#2, ss_addr_sk#3, ss_ext_sales_price#4, ca_address_sk#9] + +(18) Scan parquet default.item +Output [2]: [i_item_sk#12, i_manufact_id#13] Batched: true Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_item_sk)] ReadSchema: struct -(12) ColumnarToRow [codegen id : 3] -Input [2]: [i_item_sk#9, i_manufact_id#10] +(19) ColumnarToRow [codegen id : 4] +Input [2]: [i_item_sk#12, i_manufact_id#13] -(13) Filter [codegen id : 3] -Input [2]: [i_item_sk#9, i_manufact_id#10] -Condition : isnotnull(i_item_sk#9) +(20) Filter [codegen id : 4] +Input [2]: [i_item_sk#12, i_manufact_id#13] +Condition : isnotnull(i_item_sk#12) -(14) Scan parquet default.item -Output [2]: [i_category#11, i_manufact_id#10] +(21) Scan parquet default.item +Output [2]: [i_category#14, i_manufact_id#13] Batched: true Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_category), EqualTo(i_category,Electronics)] ReadSchema: struct -(15) ColumnarToRow [codegen id : 2] -Input [2]: [i_category#11, i_manufact_id#10] +(22) ColumnarToRow [codegen id : 3] +Input [2]: [i_category#14, i_manufact_id#13] -(16) Filter [codegen id : 2] -Input [2]: [i_category#11, i_manufact_id#10] -Condition : (isnotnull(i_category#11) AND (i_category#11 = Electronics)) +(23) Filter [codegen id : 3] +Input [2]: [i_category#14, i_manufact_id#13] +Condition : (isnotnull(i_category#14) AND (i_category#14 = Electronics)) -(17) Project [codegen id : 2] -Output [1]: [i_manufact_id#10 AS i_manufact_id#10#12] -Input [2]: [i_category#11, i_manufact_id#10] +(24) Project [codegen id : 3] +Output [1]: [i_manufact_id#13 AS i_manufact_id#13#15] +Input [2]: [i_category#14, i_manufact_id#13] -(18) BroadcastExchange -Input [1]: [i_manufact_id#10#12] -Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#13] +(25) BroadcastExchange +Input [1]: [i_manufact_id#13#15] +Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#16] -(19) BroadcastHashJoin [codegen id : 3] -Left keys [1]: [i_manufact_id#10] -Right keys [1]: [i_manufact_id#10#12] +(26) BroadcastHashJoin [codegen id : 4] +Left keys [1]: [i_manufact_id#13] +Right keys [1]: [i_manufact_id#13#15] Join condition: None -(20) BroadcastExchange -Input [2]: [i_item_sk#9, i_manufact_id#10] -Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, false] as bigint)),false), [id=#14] - -(21) BroadcastHashJoin [codegen id : 5] -Left keys [1]: [ss_item_sk#2] -Right keys [1]: [i_item_sk#9] -Join condition: None - -(22) Project [codegen id : 5] -Output [3]: [ss_addr_sk#3, ss_ext_sales_price#4, i_manufact_id#10] -Input [5]: [ss_item_sk#2, ss_addr_sk#3, ss_ext_sales_price#4, i_item_sk#9, i_manufact_id#10] - -(23) Scan parquet default.customer_address -Output [2]: [ca_address_sk#15, ca_gmt_offset#16] -Batched: true -Location [not included in comparison]/{warehouse_dir}/customer_address] -PushedFilters: [IsNotNull(ca_gmt_offset), EqualTo(ca_gmt_offset,-5.00), IsNotNull(ca_address_sk)] -ReadSchema: struct - -(24) ColumnarToRow [codegen id : 4] -Input [2]: [ca_address_sk#15, ca_gmt_offset#16] - -(25) Filter [codegen id : 4] -Input [2]: [ca_address_sk#15, ca_gmt_offset#16] -Condition : ((isnotnull(ca_gmt_offset#16) AND (ca_gmt_offset#16 = -5.00)) AND isnotnull(ca_address_sk#15)) - -(26) Project [codegen id : 4] -Output [1]: [ca_address_sk#15] -Input [2]: [ca_address_sk#15, ca_gmt_offset#16] - (27) BroadcastExchange -Input [1]: [ca_address_sk#15] -Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#17] +Input [2]: [i_item_sk#12, i_manufact_id#13] +Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, false] as bigint)),false), [id=#17] (28) BroadcastHashJoin [codegen id : 5] -Left keys [1]: [ss_addr_sk#3] -Right keys [1]: [ca_address_sk#15] +Left keys [1]: [ss_item_sk#2] +Right keys [1]: [i_item_sk#12] Join condition: None (29) Project [codegen id : 5] -Output [2]: [ss_ext_sales_price#4, i_manufact_id#10] -Input [4]: [ss_addr_sk#3, ss_ext_sales_price#4, i_manufact_id#10, ca_address_sk#15] +Output [2]: [ss_ext_sales_price#4, i_manufact_id#13] +Input [4]: [ss_item_sk#2, ss_ext_sales_price#4, i_item_sk#12, i_manufact_id#13] (30) HashAggregate [codegen id : 5] -Input [2]: [ss_ext_sales_price#4, i_manufact_id#10] -Keys [1]: [i_manufact_id#10] +Input [2]: [ss_ext_sales_price#4, i_manufact_id#13] +Keys [1]: [i_manufact_id#13] Functions [1]: [partial_sum(UnscaledValue(ss_ext_sales_price#4))] Aggregate Attributes [1]: [sum#18] -Results [2]: [i_manufact_id#10, sum#19] +Results [2]: [i_manufact_id#13, sum#19] (31) Exchange -Input [2]: [i_manufact_id#10, sum#19] -Arguments: hashpartitioning(i_manufact_id#10, 5), true, [id=#20] +Input [2]: [i_manufact_id#13, sum#19] +Arguments: hashpartitioning(i_manufact_id#13, 5), ENSURE_REQUIREMENTS, [id=#20] (32) HashAggregate [codegen id : 6] -Input [2]: [i_manufact_id#10, sum#19] -Keys [1]: [i_manufact_id#10] +Input [2]: [i_manufact_id#13, sum#19] +Keys [1]: [i_manufact_id#13] Functions [1]: [sum(UnscaledValue(ss_ext_sales_price#4))] Aggregate Attributes [1]: [sum(UnscaledValue(ss_ext_sales_price#4))#21] -Results [2]: [i_manufact_id#10, MakeDecimal(sum(UnscaledValue(ss_ext_sales_price#4))#21,17,2) AS total_sales#22] +Results [2]: [i_manufact_id#13, MakeDecimal(sum(UnscaledValue(ss_ext_sales_price#4))#21,17,2) AS total_sales#22] (33) Scan parquet default.catalog_sales Output [4]: [cs_sold_date_sk#23, cs_bill_addr_sk#24, cs_item_sk#25, cs_ext_sales_price#26] @@ -242,47 +242,47 @@ Join condition: None Output [3]: [cs_bill_addr_sk#24, cs_item_sk#25, cs_ext_sales_price#26] Input [5]: [cs_sold_date_sk#23, cs_bill_addr_sk#24, cs_item_sk#25, cs_ext_sales_price#26, d_date_sk#5] -(39) ReusedExchange [Reuses operator id: 20] -Output [2]: [i_item_sk#9, i_manufact_id#10] +(39) ReusedExchange [Reuses operator id: 15] +Output [1]: [ca_address_sk#9] (40) BroadcastHashJoin [codegen id : 11] -Left keys [1]: [cs_item_sk#25] -Right keys [1]: [i_item_sk#9] +Left keys [1]: [cs_bill_addr_sk#24] +Right keys [1]: [ca_address_sk#9] Join condition: None (41) Project [codegen id : 11] -Output [3]: [cs_bill_addr_sk#24, cs_ext_sales_price#26, i_manufact_id#10] -Input [5]: [cs_bill_addr_sk#24, cs_item_sk#25, cs_ext_sales_price#26, i_item_sk#9, i_manufact_id#10] +Output [2]: [cs_item_sk#25, cs_ext_sales_price#26] +Input [4]: [cs_bill_addr_sk#24, cs_item_sk#25, cs_ext_sales_price#26, ca_address_sk#9] (42) ReusedExchange [Reuses operator id: 27] -Output [1]: [ca_address_sk#15] +Output [2]: [i_item_sk#12, i_manufact_id#13] (43) BroadcastHashJoin [codegen id : 11] -Left keys [1]: [cs_bill_addr_sk#24] -Right keys [1]: [ca_address_sk#15] +Left keys [1]: [cs_item_sk#25] +Right keys [1]: [i_item_sk#12] Join condition: None (44) Project [codegen id : 11] -Output [2]: [cs_ext_sales_price#26, i_manufact_id#10] -Input [4]: [cs_bill_addr_sk#24, cs_ext_sales_price#26, i_manufact_id#10, ca_address_sk#15] +Output [2]: [cs_ext_sales_price#26, i_manufact_id#13] +Input [4]: [cs_item_sk#25, cs_ext_sales_price#26, i_item_sk#12, i_manufact_id#13] (45) HashAggregate [codegen id : 11] -Input [2]: [cs_ext_sales_price#26, i_manufact_id#10] -Keys [1]: [i_manufact_id#10] +Input [2]: [cs_ext_sales_price#26, i_manufact_id#13] +Keys [1]: [i_manufact_id#13] Functions [1]: [partial_sum(UnscaledValue(cs_ext_sales_price#26))] Aggregate Attributes [1]: [sum#27] -Results [2]: [i_manufact_id#10, sum#28] +Results [2]: [i_manufact_id#13, sum#28] (46) Exchange -Input [2]: [i_manufact_id#10, sum#28] -Arguments: hashpartitioning(i_manufact_id#10, 5), true, [id=#29] +Input [2]: [i_manufact_id#13, sum#28] +Arguments: hashpartitioning(i_manufact_id#13, 5), ENSURE_REQUIREMENTS, [id=#29] (47) HashAggregate [codegen id : 12] -Input [2]: [i_manufact_id#10, sum#28] -Keys [1]: [i_manufact_id#10] +Input [2]: [i_manufact_id#13, sum#28] +Keys [1]: [i_manufact_id#13] Functions [1]: [sum(UnscaledValue(cs_ext_sales_price#26))] Aggregate Attributes [1]: [sum(UnscaledValue(cs_ext_sales_price#26))#30] -Results [2]: [i_manufact_id#10, MakeDecimal(sum(UnscaledValue(cs_ext_sales_price#26))#30,17,2) AS total_sales#31] +Results [2]: [i_manufact_id#13, MakeDecimal(sum(UnscaledValue(cs_ext_sales_price#26))#30,17,2) AS total_sales#31] (48) Scan parquet default.web_sales Output [4]: [ws_sold_date_sk#32, ws_item_sk#33, ws_bill_addr_sk#34, ws_ext_sales_price#35] @@ -310,69 +310,69 @@ Join condition: None Output [3]: [ws_item_sk#33, ws_bill_addr_sk#34, ws_ext_sales_price#35] Input [5]: [ws_sold_date_sk#32, ws_item_sk#33, ws_bill_addr_sk#34, ws_ext_sales_price#35, d_date_sk#5] -(54) ReusedExchange [Reuses operator id: 20] -Output [2]: [i_item_sk#9, i_manufact_id#10] +(54) ReusedExchange [Reuses operator id: 15] +Output [1]: [ca_address_sk#9] (55) BroadcastHashJoin [codegen id : 17] -Left keys [1]: [ws_item_sk#33] -Right keys [1]: [i_item_sk#9] +Left keys [1]: [ws_bill_addr_sk#34] +Right keys [1]: [ca_address_sk#9] Join condition: None (56) Project [codegen id : 17] -Output [3]: [ws_bill_addr_sk#34, ws_ext_sales_price#35, i_manufact_id#10] -Input [5]: [ws_item_sk#33, ws_bill_addr_sk#34, ws_ext_sales_price#35, i_item_sk#9, i_manufact_id#10] +Output [2]: [ws_item_sk#33, ws_ext_sales_price#35] +Input [4]: [ws_item_sk#33, ws_bill_addr_sk#34, ws_ext_sales_price#35, ca_address_sk#9] (57) ReusedExchange [Reuses operator id: 27] -Output [1]: [ca_address_sk#15] +Output [2]: [i_item_sk#12, i_manufact_id#13] (58) BroadcastHashJoin [codegen id : 17] -Left keys [1]: [ws_bill_addr_sk#34] -Right keys [1]: [ca_address_sk#15] +Left keys [1]: [ws_item_sk#33] +Right keys [1]: [i_item_sk#12] Join condition: None (59) Project [codegen id : 17] -Output [2]: [ws_ext_sales_price#35, i_manufact_id#10] -Input [4]: [ws_bill_addr_sk#34, ws_ext_sales_price#35, i_manufact_id#10, ca_address_sk#15] +Output [2]: [ws_ext_sales_price#35, i_manufact_id#13] +Input [4]: [ws_item_sk#33, ws_ext_sales_price#35, i_item_sk#12, i_manufact_id#13] (60) HashAggregate [codegen id : 17] -Input [2]: [ws_ext_sales_price#35, i_manufact_id#10] -Keys [1]: [i_manufact_id#10] +Input [2]: [ws_ext_sales_price#35, i_manufact_id#13] +Keys [1]: [i_manufact_id#13] Functions [1]: [partial_sum(UnscaledValue(ws_ext_sales_price#35))] Aggregate Attributes [1]: [sum#36] -Results [2]: [i_manufact_id#10, sum#37] +Results [2]: [i_manufact_id#13, sum#37] (61) Exchange -Input [2]: [i_manufact_id#10, sum#37] -Arguments: hashpartitioning(i_manufact_id#10, 5), true, [id=#38] +Input [2]: [i_manufact_id#13, sum#37] +Arguments: hashpartitioning(i_manufact_id#13, 5), ENSURE_REQUIREMENTS, [id=#38] (62) HashAggregate [codegen id : 18] -Input [2]: [i_manufact_id#10, sum#37] -Keys [1]: [i_manufact_id#10] +Input [2]: [i_manufact_id#13, sum#37] +Keys [1]: [i_manufact_id#13] Functions [1]: [sum(UnscaledValue(ws_ext_sales_price#35))] Aggregate Attributes [1]: [sum(UnscaledValue(ws_ext_sales_price#35))#39] -Results [2]: [i_manufact_id#10, MakeDecimal(sum(UnscaledValue(ws_ext_sales_price#35))#39,17,2) AS total_sales#40] +Results [2]: [i_manufact_id#13, MakeDecimal(sum(UnscaledValue(ws_ext_sales_price#35))#39,17,2) AS total_sales#40] (63) Union (64) HashAggregate [codegen id : 19] -Input [2]: [i_manufact_id#10, total_sales#22] -Keys [1]: [i_manufact_id#10] +Input [2]: [i_manufact_id#13, total_sales#22] +Keys [1]: [i_manufact_id#13] Functions [1]: [partial_sum(total_sales#22)] Aggregate Attributes [2]: [sum#41, isEmpty#42] -Results [3]: [i_manufact_id#10, sum#43, isEmpty#44] +Results [3]: [i_manufact_id#13, sum#43, isEmpty#44] (65) Exchange -Input [3]: [i_manufact_id#10, sum#43, isEmpty#44] -Arguments: hashpartitioning(i_manufact_id#10, 5), true, [id=#45] +Input [3]: [i_manufact_id#13, sum#43, isEmpty#44] +Arguments: hashpartitioning(i_manufact_id#13, 5), ENSURE_REQUIREMENTS, [id=#45] (66) HashAggregate [codegen id : 20] -Input [3]: [i_manufact_id#10, sum#43, isEmpty#44] -Keys [1]: [i_manufact_id#10] +Input [3]: [i_manufact_id#13, sum#43, isEmpty#44] +Keys [1]: [i_manufact_id#13] Functions [1]: [sum(total_sales#22)] Aggregate Attributes [1]: [sum(total_sales#22)#46] -Results [2]: [i_manufact_id#10, sum(total_sales#22)#46 AS total_sales#47] +Results [2]: [i_manufact_id#13, sum(total_sales#22)#46 AS total_sales#47] (67) TakeOrderedAndProject -Input [2]: [i_manufact_id#10, total_sales#47] -Arguments: 100, [total_sales#47 ASC NULLS FIRST], [i_manufact_id#10, total_sales#47] +Input [2]: [i_manufact_id#13, total_sales#47] +Arguments: 100, [total_sales#47 ASC NULLS FIRST], [i_manufact_id#13, total_sales#47] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q33.sf100/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q33.sf100/simplified.txt index 410def2466e1a..14787f0bbce7b 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q33.sf100/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q33.sf100/simplified.txt @@ -14,9 +14,9 @@ TakeOrderedAndProject [total_sales,i_manufact_id] WholeStageCodegen (5) HashAggregate [i_manufact_id,ss_ext_sales_price] [sum,sum] Project [ss_ext_sales_price,i_manufact_id] - BroadcastHashJoin [ss_addr_sk,ca_address_sk] - Project [ss_addr_sk,ss_ext_sales_price,i_manufact_id] - BroadcastHashJoin [ss_item_sk,i_item_sk] + BroadcastHashJoin [ss_item_sk,i_item_sk] + Project [ss_item_sk,ss_ext_sales_price] + BroadcastHashJoin [ss_addr_sk,ca_address_sk] Project [ss_item_sk,ss_addr_sk,ss_ext_sales_price] BroadcastHashJoin [ss_sold_date_sk,d_date_sk] Filter [ss_sold_date_sk,ss_addr_sk,ss_item_sk] @@ -33,28 +33,28 @@ TakeOrderedAndProject [total_sales,i_manufact_id] Scan parquet default.date_dim [d_date_sk,d_year,d_moy] InputAdapter BroadcastExchange #4 - WholeStageCodegen (3) - BroadcastHashJoin [i_manufact_id,i_manufact_id] - Filter [i_item_sk] + WholeStageCodegen (2) + Project [ca_address_sk] + Filter [ca_gmt_offset,ca_address_sk] ColumnarToRow InputAdapter - Scan parquet default.item [i_item_sk,i_manufact_id] - InputAdapter - BroadcastExchange #5 - WholeStageCodegen (2) - Project [i_manufact_id] - Filter [i_category] - ColumnarToRow - InputAdapter - Scan parquet default.item [i_category,i_manufact_id] + Scan parquet default.customer_address [ca_address_sk,ca_gmt_offset] InputAdapter - BroadcastExchange #6 + BroadcastExchange #5 WholeStageCodegen (4) - Project [ca_address_sk] - Filter [ca_gmt_offset,ca_address_sk] + BroadcastHashJoin [i_manufact_id,i_manufact_id] + Filter [i_item_sk] ColumnarToRow InputAdapter - Scan parquet default.customer_address [ca_address_sk,ca_gmt_offset] + Scan parquet default.item [i_item_sk,i_manufact_id] + InputAdapter + BroadcastExchange #6 + WholeStageCodegen (3) + Project [i_manufact_id] + Filter [i_category] + ColumnarToRow + InputAdapter + Scan parquet default.item [i_category,i_manufact_id] WholeStageCodegen (12) HashAggregate [i_manufact_id,sum] [sum(UnscaledValue(cs_ext_sales_price)),total_sales,sum] InputAdapter @@ -62,9 +62,9 @@ TakeOrderedAndProject [total_sales,i_manufact_id] WholeStageCodegen (11) HashAggregate [i_manufact_id,cs_ext_sales_price] [sum,sum] Project [cs_ext_sales_price,i_manufact_id] - BroadcastHashJoin [cs_bill_addr_sk,ca_address_sk] - Project [cs_bill_addr_sk,cs_ext_sales_price,i_manufact_id] - BroadcastHashJoin [cs_item_sk,i_item_sk] + BroadcastHashJoin [cs_item_sk,i_item_sk] + Project [cs_item_sk,cs_ext_sales_price] + BroadcastHashJoin [cs_bill_addr_sk,ca_address_sk] Project [cs_bill_addr_sk,cs_item_sk,cs_ext_sales_price] BroadcastHashJoin [cs_sold_date_sk,d_date_sk] Filter [cs_sold_date_sk,cs_bill_addr_sk,cs_item_sk] @@ -74,9 +74,9 @@ TakeOrderedAndProject [total_sales,i_manufact_id] InputAdapter ReusedExchange [d_date_sk] #3 InputAdapter - ReusedExchange [i_item_sk,i_manufact_id] #4 + ReusedExchange [ca_address_sk] #4 InputAdapter - ReusedExchange [ca_address_sk] #6 + ReusedExchange [i_item_sk,i_manufact_id] #5 WholeStageCodegen (18) HashAggregate [i_manufact_id,sum] [sum(UnscaledValue(ws_ext_sales_price)),total_sales,sum] InputAdapter @@ -84,9 +84,9 @@ TakeOrderedAndProject [total_sales,i_manufact_id] WholeStageCodegen (17) HashAggregate [i_manufact_id,ws_ext_sales_price] [sum,sum] Project [ws_ext_sales_price,i_manufact_id] - BroadcastHashJoin [ws_bill_addr_sk,ca_address_sk] - Project [ws_bill_addr_sk,ws_ext_sales_price,i_manufact_id] - BroadcastHashJoin [ws_item_sk,i_item_sk] + BroadcastHashJoin [ws_item_sk,i_item_sk] + Project [ws_item_sk,ws_ext_sales_price] + BroadcastHashJoin [ws_bill_addr_sk,ca_address_sk] Project [ws_item_sk,ws_bill_addr_sk,ws_ext_sales_price] BroadcastHashJoin [ws_sold_date_sk,d_date_sk] Filter [ws_sold_date_sk,ws_bill_addr_sk,ws_item_sk] @@ -96,6 +96,6 @@ TakeOrderedAndProject [total_sales,i_manufact_id] InputAdapter ReusedExchange [d_date_sk] #3 InputAdapter - ReusedExchange [i_item_sk,i_manufact_id] #4 + ReusedExchange [ca_address_sk] #4 InputAdapter - ReusedExchange [ca_address_sk] #6 + ReusedExchange [i_item_sk,i_manufact_id] #5 diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q34.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q34.sf100/explain.txt index 17bb0e7e71d27..6fa9bb85f0b79 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q34.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q34.sf100/explain.txt @@ -120,7 +120,7 @@ Input [5]: [ss_customer_sk#2, ss_hdemo_sk#3, ss_store_sk#4, ss_ticket_number#5, Output [4]: [hd_demo_sk#13, hd_buy_potential#14, hd_dep_count#15, hd_vehicle_count#16] Batched: true Location [not included in comparison]/{warehouse_dir}/household_demographics] -PushedFilters: [IsNotNull(hd_vehicle_count), Or(EqualTo(hd_buy_potential,>10000),EqualTo(hd_buy_potential,unknown)), GreaterThan(hd_vehicle_count,0), IsNotNull(hd_demo_sk)] +PushedFilters: [IsNotNull(hd_vehicle_count), IsNotNull(hd_dep_count), Or(EqualTo(hd_buy_potential,>10000),EqualTo(hd_buy_potential,unknown)), GreaterThan(hd_vehicle_count,0), GreaterThan(hd_vehicle_count,0), IsNotNull(hd_demo_sk)] ReadSchema: struct (19) ColumnarToRow [codegen id : 3] @@ -128,7 +128,7 @@ Input [4]: [hd_demo_sk#13, hd_buy_potential#14, hd_dep_count#15, hd_vehicle_coun (20) Filter [codegen id : 3] Input [4]: [hd_demo_sk#13, hd_buy_potential#14, hd_dep_count#15, hd_vehicle_count#16] -Condition : ((((isnotnull(hd_vehicle_count#16) AND ((hd_buy_potential#14 = >10000) OR (hd_buy_potential#14 = unknown))) AND (hd_vehicle_count#16 > 0)) AND (CASE WHEN (hd_vehicle_count#16 > 0) THEN (cast(hd_dep_count#15 as double) / cast(hd_vehicle_count#16 as double)) ELSE null END > 1.2)) AND isnotnull(hd_demo_sk#13)) +Condition : (((((isnotnull(hd_vehicle_count#16) AND isnotnull(hd_dep_count#15)) AND ((hd_buy_potential#14 = >10000) OR (hd_buy_potential#14 = unknown))) AND (hd_vehicle_count#16 > 0)) AND ((cast(hd_dep_count#15 as double) / cast(hd_vehicle_count#16 as double)) > 1.2)) AND isnotnull(hd_demo_sk#13)) (21) Project [codegen id : 3] Output [1]: [hd_demo_sk#13] @@ -156,7 +156,7 @@ Results [3]: [ss_ticket_number#5, ss_customer_sk#2, count#19] (26) Exchange Input [3]: [ss_ticket_number#5, ss_customer_sk#2, count#19] -Arguments: hashpartitioning(ss_ticket_number#5, ss_customer_sk#2, 5), true, [id=#20] +Arguments: hashpartitioning(ss_ticket_number#5, ss_customer_sk#2, 5), ENSURE_REQUIREMENTS, [id=#20] (27) HashAggregate [codegen id : 5] Input [3]: [ss_ticket_number#5, ss_customer_sk#2, count#19] @@ -171,7 +171,7 @@ Condition : ((cnt#22 >= 15) AND (cnt#22 <= 20)) (29) Exchange Input [3]: [ss_ticket_number#5, ss_customer_sk#2, cnt#22] -Arguments: hashpartitioning(ss_customer_sk#2, 5), true, [id=#23] +Arguments: hashpartitioning(ss_customer_sk#2, 5), ENSURE_REQUIREMENTS, [id=#23] (30) Sort [codegen id : 6] Input [3]: [ss_ticket_number#5, ss_customer_sk#2, cnt#22] @@ -193,7 +193,7 @@ Condition : isnotnull(c_customer_sk#24) (34) Exchange Input [5]: [c_customer_sk#24, c_salutation#25, c_first_name#26, c_last_name#27, c_preferred_cust_flag#28] -Arguments: hashpartitioning(c_customer_sk#24, 5), true, [id=#29] +Arguments: hashpartitioning(c_customer_sk#24, 5), ENSURE_REQUIREMENTS, [id=#29] (35) Sort [codegen id : 8] Input [5]: [c_customer_sk#24, c_salutation#25, c_first_name#26, c_last_name#27, c_preferred_cust_flag#28] @@ -210,7 +210,7 @@ Input [8]: [ss_ticket_number#5, ss_customer_sk#2, cnt#22, c_customer_sk#24, c_sa (38) Exchange Input [6]: [c_last_name#27, c_first_name#26, c_salutation#25, c_preferred_cust_flag#28, ss_ticket_number#5, cnt#22] -Arguments: rangepartitioning(c_last_name#27 ASC NULLS FIRST, c_first_name#26 ASC NULLS FIRST, c_salutation#25 ASC NULLS FIRST, c_preferred_cust_flag#28 DESC NULLS LAST, 5), true, [id=#30] +Arguments: rangepartitioning(c_last_name#27 ASC NULLS FIRST, c_first_name#26 ASC NULLS FIRST, c_salutation#25 ASC NULLS FIRST, c_preferred_cust_flag#28 DESC NULLS LAST, 5), ENSURE_REQUIREMENTS, [id=#30] (39) Sort [codegen id : 10] Input [6]: [c_last_name#27, c_first_name#26, c_salutation#25, c_preferred_cust_flag#28, ss_ticket_number#5, cnt#22] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q34.sf100/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q34.sf100/simplified.txt index d9b416ddba9ef..c9945cda67746 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q34.sf100/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q34.sf100/simplified.txt @@ -47,7 +47,7 @@ WholeStageCodegen (10) BroadcastExchange #6 WholeStageCodegen (3) Project [hd_demo_sk] - Filter [hd_vehicle_count,hd_buy_potential,hd_dep_count,hd_demo_sk] + Filter [hd_vehicle_count,hd_dep_count,hd_buy_potential,hd_demo_sk] ColumnarToRow InputAdapter Scan parquet default.household_demographics [hd_demo_sk,hd_buy_potential,hd_dep_count,hd_vehicle_count] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q34/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q34/explain.txt index 18f465caea20d..1aea77422b14f 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q34/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q34/explain.txt @@ -117,7 +117,7 @@ Input [5]: [ss_customer_sk#2, ss_hdemo_sk#3, ss_store_sk#4, ss_ticket_number#5, Output [4]: [hd_demo_sk#13, hd_buy_potential#14, hd_dep_count#15, hd_vehicle_count#16] Batched: true Location [not included in comparison]/{warehouse_dir}/household_demographics] -PushedFilters: [IsNotNull(hd_vehicle_count), Or(EqualTo(hd_buy_potential,>10000),EqualTo(hd_buy_potential,unknown)), GreaterThan(hd_vehicle_count,0), IsNotNull(hd_demo_sk)] +PushedFilters: [IsNotNull(hd_vehicle_count), IsNotNull(hd_dep_count), Or(EqualTo(hd_buy_potential,>10000),EqualTo(hd_buy_potential,unknown)), GreaterThan(hd_vehicle_count,0), GreaterThan(hd_vehicle_count,0), IsNotNull(hd_demo_sk)] ReadSchema: struct (19) ColumnarToRow [codegen id : 3] @@ -125,7 +125,7 @@ Input [4]: [hd_demo_sk#13, hd_buy_potential#14, hd_dep_count#15, hd_vehicle_coun (20) Filter [codegen id : 3] Input [4]: [hd_demo_sk#13, hd_buy_potential#14, hd_dep_count#15, hd_vehicle_count#16] -Condition : ((((isnotnull(hd_vehicle_count#16) AND ((hd_buy_potential#14 = >10000) OR (hd_buy_potential#14 = unknown))) AND (hd_vehicle_count#16 > 0)) AND (CASE WHEN (hd_vehicle_count#16 > 0) THEN (cast(hd_dep_count#15 as double) / cast(hd_vehicle_count#16 as double)) ELSE null END > 1.2)) AND isnotnull(hd_demo_sk#13)) +Condition : (((((isnotnull(hd_vehicle_count#16) AND isnotnull(hd_dep_count#15)) AND ((hd_buy_potential#14 = >10000) OR (hd_buy_potential#14 = unknown))) AND (hd_vehicle_count#16 > 0)) AND ((cast(hd_dep_count#15 as double) / cast(hd_vehicle_count#16 as double)) > 1.2)) AND isnotnull(hd_demo_sk#13)) (21) Project [codegen id : 3] Output [1]: [hd_demo_sk#13] @@ -153,7 +153,7 @@ Results [3]: [ss_ticket_number#5, ss_customer_sk#2, count#19] (26) Exchange Input [3]: [ss_ticket_number#5, ss_customer_sk#2, count#19] -Arguments: hashpartitioning(ss_ticket_number#5, ss_customer_sk#2, 5), true, [id=#20] +Arguments: hashpartitioning(ss_ticket_number#5, ss_customer_sk#2, 5), ENSURE_REQUIREMENTS, [id=#20] (27) HashAggregate [codegen id : 6] Input [3]: [ss_ticket_number#5, ss_customer_sk#2, count#19] @@ -195,7 +195,7 @@ Input [8]: [ss_ticket_number#5, ss_customer_sk#2, cnt#22, c_customer_sk#23, c_sa (35) Exchange Input [6]: [c_last_name#26, c_first_name#25, c_salutation#24, c_preferred_cust_flag#27, ss_ticket_number#5, cnt#22] -Arguments: rangepartitioning(c_last_name#26 ASC NULLS FIRST, c_first_name#25 ASC NULLS FIRST, c_salutation#24 ASC NULLS FIRST, c_preferred_cust_flag#27 DESC NULLS LAST, 5), true, [id=#29] +Arguments: rangepartitioning(c_last_name#26 ASC NULLS FIRST, c_first_name#25 ASC NULLS FIRST, c_salutation#24 ASC NULLS FIRST, c_preferred_cust_flag#27 DESC NULLS LAST, 5), ENSURE_REQUIREMENTS, [id=#29] (36) Sort [codegen id : 7] Input [6]: [c_last_name#26, c_first_name#25, c_salutation#24, c_preferred_cust_flag#27, ss_ticket_number#5, cnt#22] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q34/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q34/simplified.txt index 5af07f1d4ddef..4484587f65355 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q34/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q34/simplified.txt @@ -41,7 +41,7 @@ WholeStageCodegen (7) BroadcastExchange #5 WholeStageCodegen (3) Project [hd_demo_sk] - Filter [hd_vehicle_count,hd_buy_potential,hd_dep_count,hd_demo_sk] + Filter [hd_vehicle_count,hd_dep_count,hd_buy_potential,hd_demo_sk] ColumnarToRow InputAdapter Scan parquet default.household_demographics [hd_demo_sk,hd_buy_potential,hd_dep_count,hd_vehicle_count] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q38.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q38.sf100/explain.txt index 92b9c26825e51..7465ddae84e8a 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q38.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q38.sf100/explain.txt @@ -1,72 +1,71 @@ == Physical Plan == -CollectLimit (68) -+- * HashAggregate (67) - +- Exchange (66) - +- * HashAggregate (65) - +- * HashAggregate (64) - +- * HashAggregate (63) - +- * HashAggregate (62) - +- * HashAggregate (61) - +- * HashAggregate (60) - +- Exchange (59) - +- * HashAggregate (58) - +- SortMergeJoin LeftSemi (57) - :- SortMergeJoin LeftSemi (39) - : :- * Sort (21) - : : +- Exchange (20) - : : +- * Project (19) - : : +- * SortMergeJoin Inner (18) - : : :- * Sort (12) - : : : +- Exchange (11) - : : : +- * Project (10) - : : : +- * BroadcastHashJoin Inner BuildRight (9) - : : : :- * Filter (3) - : : : : +- * ColumnarToRow (2) - : : : : +- Scan parquet default.store_sales (1) - : : : +- BroadcastExchange (8) - : : : +- * Project (7) - : : : +- * Filter (6) - : : : +- * ColumnarToRow (5) - : : : +- Scan parquet default.date_dim (4) - : : +- * Sort (17) - : : +- Exchange (16) - : : +- * Filter (15) - : : +- * ColumnarToRow (14) - : : +- Scan parquet default.customer (13) - : +- * Sort (38) - : +- Exchange (37) - : +- * HashAggregate (36) - : +- Exchange (35) - : +- * HashAggregate (34) - : +- * Project (33) - : +- * SortMergeJoin Inner (32) - : :- * Sort (29) - : : +- Exchange (28) - : : +- * Project (27) - : : +- * BroadcastHashJoin Inner BuildRight (26) - : : :- * Filter (24) - : : : +- * ColumnarToRow (23) - : : : +- Scan parquet default.catalog_sales (22) - : : +- ReusedExchange (25) - : +- * Sort (31) - : +- ReusedExchange (30) - +- * Sort (56) - +- Exchange (55) - +- * HashAggregate (54) - +- Exchange (53) - +- * HashAggregate (52) - +- * Project (51) - +- * SortMergeJoin Inner (50) - :- * Sort (47) - : +- Exchange (46) - : +- * Project (45) - : +- * BroadcastHashJoin Inner BuildRight (44) - : :- * Filter (42) - : : +- * ColumnarToRow (41) - : : +- Scan parquet default.web_sales (40) - : +- ReusedExchange (43) - +- * Sort (49) - +- ReusedExchange (48) +* HashAggregate (67) ++- Exchange (66) + +- * HashAggregate (65) + +- * HashAggregate (64) + +- * HashAggregate (63) + +- * HashAggregate (62) + +- * HashAggregate (61) + +- * HashAggregate (60) + +- Exchange (59) + +- * HashAggregate (58) + +- SortMergeJoin LeftSemi (57) + :- SortMergeJoin LeftSemi (39) + : :- * Sort (21) + : : +- Exchange (20) + : : +- * Project (19) + : : +- * SortMergeJoin Inner (18) + : : :- * Sort (12) + : : : +- Exchange (11) + : : : +- * Project (10) + : : : +- * BroadcastHashJoin Inner BuildRight (9) + : : : :- * Filter (3) + : : : : +- * ColumnarToRow (2) + : : : : +- Scan parquet default.store_sales (1) + : : : +- BroadcastExchange (8) + : : : +- * Project (7) + : : : +- * Filter (6) + : : : +- * ColumnarToRow (5) + : : : +- Scan parquet default.date_dim (4) + : : +- * Sort (17) + : : +- Exchange (16) + : : +- * Filter (15) + : : +- * ColumnarToRow (14) + : : +- Scan parquet default.customer (13) + : +- * Sort (38) + : +- Exchange (37) + : +- * HashAggregate (36) + : +- Exchange (35) + : +- * HashAggregate (34) + : +- * Project (33) + : +- * SortMergeJoin Inner (32) + : :- * Sort (29) + : : +- Exchange (28) + : : +- * Project (27) + : : +- * BroadcastHashJoin Inner BuildRight (26) + : : :- * Filter (24) + : : : +- * ColumnarToRow (23) + : : : +- Scan parquet default.catalog_sales (22) + : : +- ReusedExchange (25) + : +- * Sort (31) + : +- ReusedExchange (30) + +- * Sort (56) + +- Exchange (55) + +- * HashAggregate (54) + +- Exchange (53) + +- * HashAggregate (52) + +- * Project (51) + +- * SortMergeJoin Inner (50) + :- * Sort (47) + : +- Exchange (46) + : +- * Project (45) + : +- * BroadcastHashJoin Inner BuildRight (44) + : :- * Filter (42) + : : +- * ColumnarToRow (41) + : : +- Scan parquet default.web_sales (40) + : +- ReusedExchange (43) + +- * Sort (49) + +- ReusedExchange (48) (1) Scan parquet default.store_sales @@ -387,7 +386,3 @@ Functions [1]: [count(1)] Aggregate Attributes [1]: [count(1)#37] Results [1]: [count(1)#37 AS count(1)#38] -(68) CollectLimit -Input [1]: [count(1)#38] -Arguments: 100 - diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q38.sf100/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q38.sf100/simplified.txt index 5bcd7dbb93022..8dd59340cf069 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q38.sf100/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q38.sf100/simplified.txt @@ -1,118 +1,117 @@ -CollectLimit - WholeStageCodegen (26) - HashAggregate [count] [count(1),count(1),count] - InputAdapter - Exchange #1 - WholeStageCodegen (25) - HashAggregate [count,count] +WholeStageCodegen (26) + HashAggregate [count] [count(1),count(1),count] + InputAdapter + Exchange #1 + WholeStageCodegen (25) + HashAggregate [count,count] + HashAggregate [c_last_name,c_first_name,d_date] HashAggregate [c_last_name,c_first_name,d_date] HashAggregate [c_last_name,c_first_name,d_date] HashAggregate [c_last_name,c_first_name,d_date] HashAggregate [c_last_name,c_first_name,d_date] - HashAggregate [c_last_name,c_first_name,d_date] - InputAdapter - Exchange [c_last_name,c_first_name,d_date] #2 - WholeStageCodegen (24) - HashAggregate [c_last_name,c_first_name,d_date] - InputAdapter + InputAdapter + Exchange [c_last_name,c_first_name,d_date] #2 + WholeStageCodegen (24) + HashAggregate [c_last_name,c_first_name,d_date] + InputAdapter + SortMergeJoin [c_last_name,c_first_name,d_date,c_last_name,c_first_name,d_date] SortMergeJoin [c_last_name,c_first_name,d_date,c_last_name,c_first_name,d_date] - SortMergeJoin [c_last_name,c_first_name,d_date,c_last_name,c_first_name,d_date] - WholeStageCodegen (7) - Sort [c_last_name,c_first_name,d_date] - InputAdapter - Exchange [c_last_name,c_first_name,d_date] #3 - WholeStageCodegen (6) - Project [d_date,c_first_name,c_last_name] - SortMergeJoin [ss_customer_sk,c_customer_sk] - InputAdapter - WholeStageCodegen (3) - Sort [ss_customer_sk] - InputAdapter - Exchange [ss_customer_sk] #4 - WholeStageCodegen (2) - Project [ss_customer_sk,d_date] - BroadcastHashJoin [ss_sold_date_sk,d_date_sk] - Filter [ss_sold_date_sk,ss_customer_sk] - ColumnarToRow - InputAdapter - Scan parquet default.store_sales [ss_sold_date_sk,ss_customer_sk] - InputAdapter - BroadcastExchange #5 - WholeStageCodegen (1) - Project [d_date_sk,d_date] - Filter [d_month_seq,d_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.date_dim [d_date_sk,d_date,d_month_seq] - InputAdapter - WholeStageCodegen (5) - Sort [c_customer_sk] - InputAdapter - Exchange [c_customer_sk] #6 - WholeStageCodegen (4) - Filter [c_customer_sk] - ColumnarToRow - InputAdapter - Scan parquet default.customer [c_customer_sk,c_first_name,c_last_name] - WholeStageCodegen (15) - Sort [c_last_name,c_first_name,d_date] - InputAdapter - Exchange [c_last_name,c_first_name,d_date] #7 - WholeStageCodegen (14) - HashAggregate [c_last_name,c_first_name,d_date] + WholeStageCodegen (7) + Sort [c_last_name,c_first_name,d_date] + InputAdapter + Exchange [c_last_name,c_first_name,d_date] #3 + WholeStageCodegen (6) + Project [d_date,c_first_name,c_last_name] + SortMergeJoin [ss_customer_sk,c_customer_sk] InputAdapter - Exchange [c_last_name,c_first_name,d_date] #8 - WholeStageCodegen (13) - HashAggregate [c_last_name,c_first_name,d_date] - Project [c_last_name,c_first_name,d_date] - SortMergeJoin [cs_bill_customer_sk,c_customer_sk] - InputAdapter - WholeStageCodegen (10) - Sort [cs_bill_customer_sk] - InputAdapter - Exchange [cs_bill_customer_sk] #9 - WholeStageCodegen (9) - Project [cs_bill_customer_sk,d_date] - BroadcastHashJoin [cs_sold_date_sk,d_date_sk] - Filter [cs_sold_date_sk,cs_bill_customer_sk] - ColumnarToRow - InputAdapter - Scan parquet default.catalog_sales [cs_sold_date_sk,cs_bill_customer_sk] + WholeStageCodegen (3) + Sort [ss_customer_sk] + InputAdapter + Exchange [ss_customer_sk] #4 + WholeStageCodegen (2) + Project [ss_customer_sk,d_date] + BroadcastHashJoin [ss_sold_date_sk,d_date_sk] + Filter [ss_sold_date_sk,ss_customer_sk] + ColumnarToRow + InputAdapter + Scan parquet default.store_sales [ss_sold_date_sk,ss_customer_sk] + InputAdapter + BroadcastExchange #5 + WholeStageCodegen (1) + Project [d_date_sk,d_date] + Filter [d_month_seq,d_date_sk] + ColumnarToRow InputAdapter - ReusedExchange [d_date_sk,d_date] #5 - InputAdapter - WholeStageCodegen (12) - Sort [c_customer_sk] - InputAdapter - ReusedExchange [c_customer_sk,c_first_name,c_last_name] #6 - WholeStageCodegen (23) + Scan parquet default.date_dim [d_date_sk,d_date,d_month_seq] + InputAdapter + WholeStageCodegen (5) + Sort [c_customer_sk] + InputAdapter + Exchange [c_customer_sk] #6 + WholeStageCodegen (4) + Filter [c_customer_sk] + ColumnarToRow + InputAdapter + Scan parquet default.customer [c_customer_sk,c_first_name,c_last_name] + WholeStageCodegen (15) Sort [c_last_name,c_first_name,d_date] InputAdapter - Exchange [c_last_name,c_first_name,d_date] #10 - WholeStageCodegen (22) + Exchange [c_last_name,c_first_name,d_date] #7 + WholeStageCodegen (14) HashAggregate [c_last_name,c_first_name,d_date] InputAdapter - Exchange [c_last_name,c_first_name,d_date] #11 - WholeStageCodegen (21) + Exchange [c_last_name,c_first_name,d_date] #8 + WholeStageCodegen (13) HashAggregate [c_last_name,c_first_name,d_date] Project [c_last_name,c_first_name,d_date] - SortMergeJoin [ws_bill_customer_sk,c_customer_sk] + SortMergeJoin [cs_bill_customer_sk,c_customer_sk] InputAdapter - WholeStageCodegen (18) - Sort [ws_bill_customer_sk] + WholeStageCodegen (10) + Sort [cs_bill_customer_sk] InputAdapter - Exchange [ws_bill_customer_sk] #12 - WholeStageCodegen (17) - Project [ws_bill_customer_sk,d_date] - BroadcastHashJoin [ws_sold_date_sk,d_date_sk] - Filter [ws_sold_date_sk,ws_bill_customer_sk] + Exchange [cs_bill_customer_sk] #9 + WholeStageCodegen (9) + Project [cs_bill_customer_sk,d_date] + BroadcastHashJoin [cs_sold_date_sk,d_date_sk] + Filter [cs_sold_date_sk,cs_bill_customer_sk] ColumnarToRow InputAdapter - Scan parquet default.web_sales [ws_sold_date_sk,ws_bill_customer_sk] + Scan parquet default.catalog_sales [cs_sold_date_sk,cs_bill_customer_sk] InputAdapter ReusedExchange [d_date_sk,d_date] #5 InputAdapter - WholeStageCodegen (20) + WholeStageCodegen (12) Sort [c_customer_sk] InputAdapter ReusedExchange [c_customer_sk,c_first_name,c_last_name] #6 + WholeStageCodegen (23) + Sort [c_last_name,c_first_name,d_date] + InputAdapter + Exchange [c_last_name,c_first_name,d_date] #10 + WholeStageCodegen (22) + HashAggregate [c_last_name,c_first_name,d_date] + InputAdapter + Exchange [c_last_name,c_first_name,d_date] #11 + WholeStageCodegen (21) + HashAggregate [c_last_name,c_first_name,d_date] + Project [c_last_name,c_first_name,d_date] + SortMergeJoin [ws_bill_customer_sk,c_customer_sk] + InputAdapter + WholeStageCodegen (18) + Sort [ws_bill_customer_sk] + InputAdapter + Exchange [ws_bill_customer_sk] #12 + WholeStageCodegen (17) + Project [ws_bill_customer_sk,d_date] + BroadcastHashJoin [ws_sold_date_sk,d_date_sk] + Filter [ws_sold_date_sk,ws_bill_customer_sk] + ColumnarToRow + InputAdapter + Scan parquet default.web_sales [ws_sold_date_sk,ws_bill_customer_sk] + InputAdapter + ReusedExchange [d_date_sk,d_date] #5 + InputAdapter + WholeStageCodegen (20) + Sort [c_customer_sk] + InputAdapter + ReusedExchange [c_customer_sk,c_first_name,c_last_name] #6 diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q38/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q38/explain.txt index 09ab60c7cf651..74454cf32afd0 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q38/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q38/explain.txt @@ -1,59 +1,58 @@ == Physical Plan == -CollectLimit (55) -+- * HashAggregate (54) - +- Exchange (53) - +- * HashAggregate (52) - +- * HashAggregate (51) - +- * HashAggregate (50) - +- * HashAggregate (49) - +- * HashAggregate (48) - +- * HashAggregate (47) - +- Exchange (46) - +- * HashAggregate (45) - +- * BroadcastHashJoin LeftSemi BuildRight (44) - :- * BroadcastHashJoin LeftSemi BuildRight (30) - : :- * Project (16) - : : +- * BroadcastHashJoin Inner BuildRight (15) - : : :- * Project (10) - : : : +- * BroadcastHashJoin Inner BuildRight (9) - : : : :- * Filter (3) - : : : : +- * ColumnarToRow (2) - : : : : +- Scan parquet default.store_sales (1) - : : : +- BroadcastExchange (8) - : : : +- * Project (7) - : : : +- * Filter (6) - : : : +- * ColumnarToRow (5) - : : : +- Scan parquet default.date_dim (4) - : : +- BroadcastExchange (14) - : : +- * Filter (13) - : : +- * ColumnarToRow (12) - : : +- Scan parquet default.customer (11) - : +- BroadcastExchange (29) - : +- * HashAggregate (28) - : +- Exchange (27) - : +- * HashAggregate (26) - : +- * Project (25) - : +- * BroadcastHashJoin Inner BuildRight (24) - : :- * Project (22) - : : +- * BroadcastHashJoin Inner BuildRight (21) - : : :- * Filter (19) - : : : +- * ColumnarToRow (18) - : : : +- Scan parquet default.catalog_sales (17) - : : +- ReusedExchange (20) - : +- ReusedExchange (23) - +- BroadcastExchange (43) - +- * HashAggregate (42) - +- Exchange (41) - +- * HashAggregate (40) - +- * Project (39) - +- * BroadcastHashJoin Inner BuildRight (38) - :- * Project (36) - : +- * BroadcastHashJoin Inner BuildRight (35) - : :- * Filter (33) - : : +- * ColumnarToRow (32) - : : +- Scan parquet default.web_sales (31) - : +- ReusedExchange (34) - +- ReusedExchange (37) +* HashAggregate (54) ++- Exchange (53) + +- * HashAggregate (52) + +- * HashAggregate (51) + +- * HashAggregate (50) + +- * HashAggregate (49) + +- * HashAggregate (48) + +- * HashAggregate (47) + +- Exchange (46) + +- * HashAggregate (45) + +- * BroadcastHashJoin LeftSemi BuildRight (44) + :- * BroadcastHashJoin LeftSemi BuildRight (30) + : :- * Project (16) + : : +- * BroadcastHashJoin Inner BuildRight (15) + : : :- * Project (10) + : : : +- * BroadcastHashJoin Inner BuildRight (9) + : : : :- * Filter (3) + : : : : +- * ColumnarToRow (2) + : : : : +- Scan parquet default.store_sales (1) + : : : +- BroadcastExchange (8) + : : : +- * Project (7) + : : : +- * Filter (6) + : : : +- * ColumnarToRow (5) + : : : +- Scan parquet default.date_dim (4) + : : +- BroadcastExchange (14) + : : +- * Filter (13) + : : +- * ColumnarToRow (12) + : : +- Scan parquet default.customer (11) + : +- BroadcastExchange (29) + : +- * HashAggregate (28) + : +- Exchange (27) + : +- * HashAggregate (26) + : +- * Project (25) + : +- * BroadcastHashJoin Inner BuildRight (24) + : :- * Project (22) + : : +- * BroadcastHashJoin Inner BuildRight (21) + : : :- * Filter (19) + : : : +- * ColumnarToRow (18) + : : : +- Scan parquet default.catalog_sales (17) + : : +- ReusedExchange (20) + : +- ReusedExchange (23) + +- BroadcastExchange (43) + +- * HashAggregate (42) + +- Exchange (41) + +- * HashAggregate (40) + +- * Project (39) + +- * BroadcastHashJoin Inner BuildRight (38) + :- * Project (36) + : +- * BroadcastHashJoin Inner BuildRight (35) + : :- * Filter (33) + : : +- * ColumnarToRow (32) + : : +- Scan parquet default.web_sales (31) + : +- ReusedExchange (34) + +- ReusedExchange (37) (1) Scan parquet default.store_sales @@ -322,7 +321,3 @@ Functions [1]: [count(1)] Aggregate Attributes [1]: [count(1)#33] Results [1]: [count(1)#33 AS count(1)#34] -(55) CollectLimit -Input [1]: [count(1)#34] -Arguments: 100 - diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q38/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q38/simplified.txt index 10a2166ce761d..a5b57a4ac9450 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q38/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q38/simplified.txt @@ -1,81 +1,80 @@ -CollectLimit - WholeStageCodegen (13) - HashAggregate [count] [count(1),count(1),count] - InputAdapter - Exchange #1 - WholeStageCodegen (12) - HashAggregate [count,count] +WholeStageCodegen (13) + HashAggregate [count] [count(1),count(1),count] + InputAdapter + Exchange #1 + WholeStageCodegen (12) + HashAggregate [count,count] + HashAggregate [c_last_name,c_first_name,d_date] HashAggregate [c_last_name,c_first_name,d_date] HashAggregate [c_last_name,c_first_name,d_date] HashAggregate [c_last_name,c_first_name,d_date] HashAggregate [c_last_name,c_first_name,d_date] - HashAggregate [c_last_name,c_first_name,d_date] - InputAdapter - Exchange [c_last_name,c_first_name,d_date] #2 - WholeStageCodegen (11) - HashAggregate [c_last_name,c_first_name,d_date] + InputAdapter + Exchange [c_last_name,c_first_name,d_date] #2 + WholeStageCodegen (11) + HashAggregate [c_last_name,c_first_name,d_date] + BroadcastHashJoin [c_last_name,c_first_name,d_date,c_last_name,c_first_name,d_date] BroadcastHashJoin [c_last_name,c_first_name,d_date,c_last_name,c_first_name,d_date] - BroadcastHashJoin [c_last_name,c_first_name,d_date,c_last_name,c_first_name,d_date] - Project [d_date,c_first_name,c_last_name] - BroadcastHashJoin [ss_customer_sk,c_customer_sk] - Project [ss_customer_sk,d_date] - BroadcastHashJoin [ss_sold_date_sk,d_date_sk] - Filter [ss_sold_date_sk,ss_customer_sk] + Project [d_date,c_first_name,c_last_name] + BroadcastHashJoin [ss_customer_sk,c_customer_sk] + Project [ss_customer_sk,d_date] + BroadcastHashJoin [ss_sold_date_sk,d_date_sk] + Filter [ss_sold_date_sk,ss_customer_sk] + ColumnarToRow + InputAdapter + Scan parquet default.store_sales [ss_sold_date_sk,ss_customer_sk] + InputAdapter + BroadcastExchange #3 + WholeStageCodegen (1) + Project [d_date_sk,d_date] + Filter [d_month_seq,d_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.date_dim [d_date_sk,d_date,d_month_seq] + InputAdapter + BroadcastExchange #4 + WholeStageCodegen (2) + Filter [c_customer_sk] ColumnarToRow InputAdapter - Scan parquet default.store_sales [ss_sold_date_sk,ss_customer_sk] - InputAdapter - BroadcastExchange #3 - WholeStageCodegen (1) - Project [d_date_sk,d_date] - Filter [d_month_seq,d_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.date_dim [d_date_sk,d_date,d_month_seq] - InputAdapter - BroadcastExchange #4 - WholeStageCodegen (2) - Filter [c_customer_sk] - ColumnarToRow - InputAdapter - Scan parquet default.customer [c_customer_sk,c_first_name,c_last_name] - InputAdapter - BroadcastExchange #5 - WholeStageCodegen (6) - HashAggregate [c_last_name,c_first_name,d_date] - InputAdapter - Exchange [c_last_name,c_first_name,d_date] #6 - WholeStageCodegen (5) - HashAggregate [c_last_name,c_first_name,d_date] - Project [c_last_name,c_first_name,d_date] - BroadcastHashJoin [cs_bill_customer_sk,c_customer_sk] - Project [cs_bill_customer_sk,d_date] - BroadcastHashJoin [cs_sold_date_sk,d_date_sk] - Filter [cs_sold_date_sk,cs_bill_customer_sk] - ColumnarToRow - InputAdapter - Scan parquet default.catalog_sales [cs_sold_date_sk,cs_bill_customer_sk] - InputAdapter - ReusedExchange [d_date_sk,d_date] #3 - InputAdapter - ReusedExchange [c_customer_sk,c_first_name,c_last_name] #4 + Scan parquet default.customer [c_customer_sk,c_first_name,c_last_name] InputAdapter - BroadcastExchange #7 - WholeStageCodegen (10) + BroadcastExchange #5 + WholeStageCodegen (6) HashAggregate [c_last_name,c_first_name,d_date] InputAdapter - Exchange [c_last_name,c_first_name,d_date] #8 - WholeStageCodegen (9) + Exchange [c_last_name,c_first_name,d_date] #6 + WholeStageCodegen (5) HashAggregate [c_last_name,c_first_name,d_date] Project [c_last_name,c_first_name,d_date] - BroadcastHashJoin [ws_bill_customer_sk,c_customer_sk] - Project [ws_bill_customer_sk,d_date] - BroadcastHashJoin [ws_sold_date_sk,d_date_sk] - Filter [ws_sold_date_sk,ws_bill_customer_sk] + BroadcastHashJoin [cs_bill_customer_sk,c_customer_sk] + Project [cs_bill_customer_sk,d_date] + BroadcastHashJoin [cs_sold_date_sk,d_date_sk] + Filter [cs_sold_date_sk,cs_bill_customer_sk] ColumnarToRow InputAdapter - Scan parquet default.web_sales [ws_sold_date_sk,ws_bill_customer_sk] + Scan parquet default.catalog_sales [cs_sold_date_sk,cs_bill_customer_sk] InputAdapter ReusedExchange [d_date_sk,d_date] #3 InputAdapter ReusedExchange [c_customer_sk,c_first_name,c_last_name] #4 + InputAdapter + BroadcastExchange #7 + WholeStageCodegen (10) + HashAggregate [c_last_name,c_first_name,d_date] + InputAdapter + Exchange [c_last_name,c_first_name,d_date] #8 + WholeStageCodegen (9) + HashAggregate [c_last_name,c_first_name,d_date] + Project [c_last_name,c_first_name,d_date] + BroadcastHashJoin [ws_bill_customer_sk,c_customer_sk] + Project [ws_bill_customer_sk,d_date] + BroadcastHashJoin [ws_sold_date_sk,d_date_sk] + Filter [ws_sold_date_sk,ws_bill_customer_sk] + ColumnarToRow + InputAdapter + Scan parquet default.web_sales [ws_sold_date_sk,ws_bill_customer_sk] + InputAdapter + ReusedExchange [d_date_sk,d_date] #3 + InputAdapter + ReusedExchange [c_customer_sk,c_first_name,c_last_name] #4 diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q41.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q41.sf100/explain.txt index c5eb50e25d82c..13d73e61e1443 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q41.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q41.sf100/explain.txt @@ -73,19 +73,19 @@ Input [2]: [i_manufact#2, count#9] Keys [1]: [i_manufact#2] Functions [1]: [count(1)] Aggregate Attributes [1]: [count(1)#11] -Results [3]: [count(1)#11 AS item_cnt#12, i_manufact#2 AS i_manufact#2#13, true AS alwaysTrue#14] +Results [2]: [count(1)#11 AS item_cnt#12, i_manufact#2 AS i_manufact#2#13] (12) Filter [codegen id : 2] -Input [3]: [item_cnt#12, i_manufact#2#13, alwaysTrue#14] -Condition : (if (isnull(alwaysTrue#14)) 0 else item_cnt#12 > 0) +Input [2]: [item_cnt#12, i_manufact#2#13] +Condition : (item_cnt#12 > 0) (13) Project [codegen id : 2] Output [1]: [i_manufact#2#13] -Input [3]: [item_cnt#12, i_manufact#2#13, alwaysTrue#14] +Input [2]: [item_cnt#12, i_manufact#2#13] (14) BroadcastExchange Input [1]: [i_manufact#2#13] -Arguments: HashedRelationBroadcastMode(List(input[0, string, true]),false), [id=#15] +Arguments: HashedRelationBroadcastMode(List(input[0, string, true]),false), [id=#14] (15) BroadcastHashJoin [codegen id : 3] Left keys [1]: [i_manufact#2] @@ -105,7 +105,7 @@ Results [1]: [i_product_name#3] (18) Exchange Input [1]: [i_product_name#3] -Arguments: hashpartitioning(i_product_name#3, 5), true, [id=#16] +Arguments: hashpartitioning(i_product_name#3, 5), true, [id=#15] (19) HashAggregate [codegen id : 4] Input [1]: [i_product_name#3] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q41.sf100/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q41.sf100/simplified.txt index 350aa9a3c572b..2d14d75ca9362 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q41.sf100/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q41.sf100/simplified.txt @@ -16,8 +16,8 @@ TakeOrderedAndProject [i_product_name] BroadcastExchange #2 WholeStageCodegen (2) Project [i_manufact] - Filter [alwaysTrue,item_cnt] - HashAggregate [i_manufact,count] [count(1),item_cnt,i_manufact,alwaysTrue,count] + Filter [item_cnt] + HashAggregate [i_manufact,count] [count(1),item_cnt,i_manufact,count] InputAdapter Exchange [i_manufact] #3 WholeStageCodegen (1) diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q41/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q41/explain.txt index c5eb50e25d82c..13d73e61e1443 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q41/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q41/explain.txt @@ -73,19 +73,19 @@ Input [2]: [i_manufact#2, count#9] Keys [1]: [i_manufact#2] Functions [1]: [count(1)] Aggregate Attributes [1]: [count(1)#11] -Results [3]: [count(1)#11 AS item_cnt#12, i_manufact#2 AS i_manufact#2#13, true AS alwaysTrue#14] +Results [2]: [count(1)#11 AS item_cnt#12, i_manufact#2 AS i_manufact#2#13] (12) Filter [codegen id : 2] -Input [3]: [item_cnt#12, i_manufact#2#13, alwaysTrue#14] -Condition : (if (isnull(alwaysTrue#14)) 0 else item_cnt#12 > 0) +Input [2]: [item_cnt#12, i_manufact#2#13] +Condition : (item_cnt#12 > 0) (13) Project [codegen id : 2] Output [1]: [i_manufact#2#13] -Input [3]: [item_cnt#12, i_manufact#2#13, alwaysTrue#14] +Input [2]: [item_cnt#12, i_manufact#2#13] (14) BroadcastExchange Input [1]: [i_manufact#2#13] -Arguments: HashedRelationBroadcastMode(List(input[0, string, true]),false), [id=#15] +Arguments: HashedRelationBroadcastMode(List(input[0, string, true]),false), [id=#14] (15) BroadcastHashJoin [codegen id : 3] Left keys [1]: [i_manufact#2] @@ -105,7 +105,7 @@ Results [1]: [i_product_name#3] (18) Exchange Input [1]: [i_product_name#3] -Arguments: hashpartitioning(i_product_name#3, 5), true, [id=#16] +Arguments: hashpartitioning(i_product_name#3, 5), true, [id=#15] (19) HashAggregate [codegen id : 4] Input [1]: [i_product_name#3] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q41/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q41/simplified.txt index 350aa9a3c572b..2d14d75ca9362 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q41/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q41/simplified.txt @@ -16,8 +16,8 @@ TakeOrderedAndProject [i_product_name] BroadcastExchange #2 WholeStageCodegen (2) Project [i_manufact] - Filter [alwaysTrue,item_cnt] - HashAggregate [i_manufact,count] [count(1),item_cnt,i_manufact,alwaysTrue,count] + Filter [item_cnt] + HashAggregate [i_manufact,count] [count(1),item_cnt,i_manufact,count] InputAdapter Exchange [i_manufact] #3 WholeStageCodegen (1) diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q45.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q45.sf100/explain.txt index 0232d56ab7481..54e117e6cac10 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q45.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q45.sf100/explain.txt @@ -34,12 +34,12 @@ TakeOrderedAndProject (46) : : +- Exchange (22) : : +- * Filter (21) : : +- * ColumnarToRow (20) - : : +- Scan parquet default.customer_address (19) + : : +- Scan parquet default.customer (19) : +- * Sort (28) : +- Exchange (27) : +- * Filter (26) : +- * ColumnarToRow (25) - : +- Scan parquet default.customer (24) + : +- Scan parquet default.customer_address (24) +- BroadcastExchange (39) +- * Project (38) +- * Filter (37) @@ -127,75 +127,75 @@ Arguments: hashpartitioning(ws_bill_customer_sk#4, 5), true, [id=#13] Input [3]: [ws_bill_customer_sk#4, ws_sales_price#5, i_item_id#11] Arguments: [ws_bill_customer_sk#4 ASC NULLS FIRST], false, 0 -(19) Scan parquet default.customer_address -Output [3]: [ca_address_sk#14, ca_city#15, ca_zip#16] +(19) Scan parquet default.customer +Output [2]: [c_customer_sk#14, c_current_addr_sk#15] Batched: true -Location [not included in comparison]/{warehouse_dir}/customer_address] -PushedFilters: [IsNotNull(ca_address_sk)] -ReadSchema: struct +Location [not included in comparison]/{warehouse_dir}/customer] +PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_current_addr_sk)] +ReadSchema: struct (20) ColumnarToRow [codegen id : 5] -Input [3]: [ca_address_sk#14, ca_city#15, ca_zip#16] +Input [2]: [c_customer_sk#14, c_current_addr_sk#15] (21) Filter [codegen id : 5] -Input [3]: [ca_address_sk#14, ca_city#15, ca_zip#16] -Condition : isnotnull(ca_address_sk#14) +Input [2]: [c_customer_sk#14, c_current_addr_sk#15] +Condition : (isnotnull(c_customer_sk#14) AND isnotnull(c_current_addr_sk#15)) (22) Exchange -Input [3]: [ca_address_sk#14, ca_city#15, ca_zip#16] -Arguments: hashpartitioning(ca_address_sk#14, 5), true, [id=#17] +Input [2]: [c_customer_sk#14, c_current_addr_sk#15] +Arguments: hashpartitioning(c_current_addr_sk#15, 5), true, [id=#16] (23) Sort [codegen id : 6] -Input [3]: [ca_address_sk#14, ca_city#15, ca_zip#16] -Arguments: [ca_address_sk#14 ASC NULLS FIRST], false, 0 +Input [2]: [c_customer_sk#14, c_current_addr_sk#15] +Arguments: [c_current_addr_sk#15 ASC NULLS FIRST], false, 0 -(24) Scan parquet default.customer -Output [2]: [c_customer_sk#18, c_current_addr_sk#19] +(24) Scan parquet default.customer_address +Output [3]: [ca_address_sk#17, ca_city#18, ca_zip#19] Batched: true -Location [not included in comparison]/{warehouse_dir}/customer] -PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_current_addr_sk)] -ReadSchema: struct +Location [not included in comparison]/{warehouse_dir}/customer_address] +PushedFilters: [IsNotNull(ca_address_sk)] +ReadSchema: struct (25) ColumnarToRow [codegen id : 7] -Input [2]: [c_customer_sk#18, c_current_addr_sk#19] +Input [3]: [ca_address_sk#17, ca_city#18, ca_zip#19] (26) Filter [codegen id : 7] -Input [2]: [c_customer_sk#18, c_current_addr_sk#19] -Condition : (isnotnull(c_customer_sk#18) AND isnotnull(c_current_addr_sk#19)) +Input [3]: [ca_address_sk#17, ca_city#18, ca_zip#19] +Condition : isnotnull(ca_address_sk#17) (27) Exchange -Input [2]: [c_customer_sk#18, c_current_addr_sk#19] -Arguments: hashpartitioning(c_current_addr_sk#19, 5), true, [id=#20] +Input [3]: [ca_address_sk#17, ca_city#18, ca_zip#19] +Arguments: hashpartitioning(ca_address_sk#17, 5), true, [id=#20] (28) Sort [codegen id : 8] -Input [2]: [c_customer_sk#18, c_current_addr_sk#19] -Arguments: [c_current_addr_sk#19 ASC NULLS FIRST], false, 0 +Input [3]: [ca_address_sk#17, ca_city#18, ca_zip#19] +Arguments: [ca_address_sk#17 ASC NULLS FIRST], false, 0 (29) SortMergeJoin [codegen id : 9] -Left keys [1]: [ca_address_sk#14] -Right keys [1]: [c_current_addr_sk#19] +Left keys [1]: [c_current_addr_sk#15] +Right keys [1]: [ca_address_sk#17] Join condition: None (30) Project [codegen id : 9] -Output [3]: [ca_city#15, ca_zip#16, c_customer_sk#18] -Input [5]: [ca_address_sk#14, ca_city#15, ca_zip#16, c_customer_sk#18, c_current_addr_sk#19] +Output [3]: [c_customer_sk#14, ca_city#18, ca_zip#19] +Input [5]: [c_customer_sk#14, c_current_addr_sk#15, ca_address_sk#17, ca_city#18, ca_zip#19] (31) Exchange -Input [3]: [ca_city#15, ca_zip#16, c_customer_sk#18] -Arguments: hashpartitioning(c_customer_sk#18, 5), true, [id=#21] +Input [3]: [c_customer_sk#14, ca_city#18, ca_zip#19] +Arguments: hashpartitioning(c_customer_sk#14, 5), true, [id=#21] (32) Sort [codegen id : 10] -Input [3]: [ca_city#15, ca_zip#16, c_customer_sk#18] -Arguments: [c_customer_sk#18 ASC NULLS FIRST], false, 0 +Input [3]: [c_customer_sk#14, ca_city#18, ca_zip#19] +Arguments: [c_customer_sk#14 ASC NULLS FIRST], false, 0 (33) SortMergeJoin [codegen id : 12] Left keys [1]: [ws_bill_customer_sk#4] -Right keys [1]: [c_customer_sk#18] +Right keys [1]: [c_customer_sk#14] Join condition: None (34) Project [codegen id : 12] -Output [4]: [ws_sales_price#5, ca_city#15, ca_zip#16, i_item_id#11] -Input [6]: [ws_bill_customer_sk#4, ws_sales_price#5, i_item_id#11, ca_city#15, ca_zip#16, c_customer_sk#18] +Output [4]: [ws_sales_price#5, ca_city#18, ca_zip#19, i_item_id#11] +Input [6]: [ws_bill_customer_sk#4, ws_sales_price#5, i_item_id#11, c_customer_sk#14, ca_city#18, ca_zip#19] (35) Scan parquet default.item Output [2]: [i_item_sk#10, i_item_id#11] @@ -225,32 +225,32 @@ Right keys [1]: [i_item_id#11#22] Join condition: None (41) Filter [codegen id : 12] -Input [5]: [ws_sales_price#5, ca_city#15, ca_zip#16, i_item_id#11, exists#1] -Condition : (substr(ca_zip#16, 1, 5) IN (85669,86197,88274,83405,86475,85392,85460,80348,81792) OR exists#1) +Input [5]: [ws_sales_price#5, ca_city#18, ca_zip#19, i_item_id#11, exists#1] +Condition : (substr(ca_zip#19, 1, 5) IN (85669,86197,88274,83405,86475,85392,85460,80348,81792) OR exists#1) (42) Project [codegen id : 12] -Output [3]: [ws_sales_price#5, ca_city#15, ca_zip#16] -Input [5]: [ws_sales_price#5, ca_city#15, ca_zip#16, i_item_id#11, exists#1] +Output [3]: [ws_sales_price#5, ca_city#18, ca_zip#19] +Input [5]: [ws_sales_price#5, ca_city#18, ca_zip#19, i_item_id#11, exists#1] (43) HashAggregate [codegen id : 12] -Input [3]: [ws_sales_price#5, ca_city#15, ca_zip#16] -Keys [2]: [ca_zip#16, ca_city#15] +Input [3]: [ws_sales_price#5, ca_city#18, ca_zip#19] +Keys [2]: [ca_zip#19, ca_city#18] Functions [1]: [partial_sum(UnscaledValue(ws_sales_price#5))] Aggregate Attributes [1]: [sum#24] -Results [3]: [ca_zip#16, ca_city#15, sum#25] +Results [3]: [ca_zip#19, ca_city#18, sum#25] (44) Exchange -Input [3]: [ca_zip#16, ca_city#15, sum#25] -Arguments: hashpartitioning(ca_zip#16, ca_city#15, 5), true, [id=#26] +Input [3]: [ca_zip#19, ca_city#18, sum#25] +Arguments: hashpartitioning(ca_zip#19, ca_city#18, 5), true, [id=#26] (45) HashAggregate [codegen id : 13] -Input [3]: [ca_zip#16, ca_city#15, sum#25] -Keys [2]: [ca_zip#16, ca_city#15] +Input [3]: [ca_zip#19, ca_city#18, sum#25] +Keys [2]: [ca_zip#19, ca_city#18] Functions [1]: [sum(UnscaledValue(ws_sales_price#5))] Aggregate Attributes [1]: [sum(UnscaledValue(ws_sales_price#5))#27] -Results [3]: [ca_zip#16, ca_city#15, MakeDecimal(sum(UnscaledValue(ws_sales_price#5))#27,17,2) AS sum(ws_sales_price)#28] +Results [3]: [ca_zip#19, ca_city#18, MakeDecimal(sum(UnscaledValue(ws_sales_price#5))#27,17,2) AS sum(ws_sales_price)#28] (46) TakeOrderedAndProject -Input [3]: [ca_zip#16, ca_city#15, sum(ws_sales_price)#28] -Arguments: 100, [ca_zip#16 ASC NULLS FIRST, ca_city#15 ASC NULLS FIRST], [ca_zip#16, ca_city#15, sum(ws_sales_price)#28] +Input [3]: [ca_zip#19, ca_city#18, sum(ws_sales_price)#28] +Arguments: 100, [ca_zip#19 ASC NULLS FIRST, ca_city#18 ASC NULLS FIRST], [ca_zip#19, ca_city#18, sum(ws_sales_price)#28] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q45.sf100/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q45.sf100/simplified.txt index 1eab468e67bc0..0e9662bb6aca5 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q45.sf100/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q45.sf100/simplified.txt @@ -45,28 +45,28 @@ TakeOrderedAndProject [ca_zip,ca_city,sum(ws_sales_price)] InputAdapter Exchange [c_customer_sk] #5 WholeStageCodegen (9) - Project [ca_city,ca_zip,c_customer_sk] - SortMergeJoin [ca_address_sk,c_current_addr_sk] + Project [c_customer_sk,ca_city,ca_zip] + SortMergeJoin [c_current_addr_sk,ca_address_sk] InputAdapter WholeStageCodegen (6) - Sort [ca_address_sk] + Sort [c_current_addr_sk] InputAdapter - Exchange [ca_address_sk] #6 + Exchange [c_current_addr_sk] #6 WholeStageCodegen (5) - Filter [ca_address_sk] + Filter [c_customer_sk,c_current_addr_sk] ColumnarToRow InputAdapter - Scan parquet default.customer_address [ca_address_sk,ca_city,ca_zip] + Scan parquet default.customer [c_customer_sk,c_current_addr_sk] InputAdapter WholeStageCodegen (8) - Sort [c_current_addr_sk] + Sort [ca_address_sk] InputAdapter - Exchange [c_current_addr_sk] #7 + Exchange [ca_address_sk] #7 WholeStageCodegen (7) - Filter [c_customer_sk,c_current_addr_sk] + Filter [ca_address_sk] ColumnarToRow InputAdapter - Scan parquet default.customer [c_customer_sk,c_current_addr_sk] + Scan parquet default.customer_address [ca_address_sk,ca_city,ca_zip] InputAdapter BroadcastExchange #8 WholeStageCodegen (11) diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q5.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q5.sf100/explain.txt index 55bd25c501294..5a9c4715d4b05 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q5.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q5.sf100/explain.txt @@ -10,8 +10,8 @@ TakeOrderedAndProject (81) : +- * HashAggregate (23) : +- * Project (22) : +- * BroadcastHashJoin Inner BuildRight (21) - : :- * Project (16) - : : +- * BroadcastHashJoin Inner BuildRight (15) + : :- * Project (15) + : : +- * BroadcastHashJoin Inner BuildRight (14) : : :- Union (9) : : : :- * Project (4) : : : : +- * Filter (3) @@ -21,22 +21,22 @@ TakeOrderedAndProject (81) : : : +- * Filter (7) : : : +- * ColumnarToRow (6) : : : +- Scan parquet default.store_returns (5) - : : +- BroadcastExchange (14) - : : +- * Project (13) - : : +- * Filter (12) - : : +- * ColumnarToRow (11) - : : +- Scan parquet default.date_dim (10) + : : +- BroadcastExchange (13) + : : +- * Filter (12) + : : +- * ColumnarToRow (11) + : : +- Scan parquet default.store (10) : +- BroadcastExchange (20) - : +- * Filter (19) - : +- * ColumnarToRow (18) - : +- Scan parquet default.store (17) + : +- * Project (19) + : +- * Filter (18) + : +- * ColumnarToRow (17) + : +- Scan parquet default.date_dim (16) :- * HashAggregate (46) : +- Exchange (45) : +- * HashAggregate (44) : +- * Project (43) : +- * BroadcastHashJoin Inner BuildRight (42) - : :- * Project (37) - : : +- * BroadcastHashJoin Inner BuildRight (36) + : :- * Project (40) + : : +- * BroadcastHashJoin Inner BuildRight (39) : : :- Union (34) : : : :- * Project (29) : : : : +- * Filter (28) @@ -46,18 +46,18 @@ TakeOrderedAndProject (81) : : : +- * Filter (32) : : : +- * ColumnarToRow (31) : : : +- Scan parquet default.catalog_returns (30) - : : +- ReusedExchange (35) - : +- BroadcastExchange (41) - : +- * Filter (40) - : +- * ColumnarToRow (39) - : +- Scan parquet default.catalog_page (38) + : : +- BroadcastExchange (38) + : : +- * Filter (37) + : : +- * ColumnarToRow (36) + : : +- Scan parquet default.catalog_page (35) + : +- ReusedExchange (41) +- * HashAggregate (75) +- Exchange (74) +- * HashAggregate (73) +- * Project (72) +- * BroadcastHashJoin Inner BuildRight (71) - :- * Project (66) - : +- * BroadcastHashJoin Inner BuildRight (65) + :- * Project (69) + : +- * BroadcastHashJoin Inner BuildRight (68) : :- Union (63) : : :- * Project (50) : : : +- * Filter (49) @@ -75,11 +75,11 @@ TakeOrderedAndProject (81) : : +- * Filter (58) : : +- * ColumnarToRow (57) : : +- Scan parquet default.web_sales (56) - : +- ReusedExchange (64) - +- BroadcastExchange (70) - +- * Filter (69) - +- * ColumnarToRow (68) - +- Scan parquet default.web_site (67) + : +- BroadcastExchange (67) + : +- * Filter (66) + : +- * ColumnarToRow (65) + : +- Scan parquet default.web_site (64) + +- ReusedExchange (70) (1) Scan parquet default.store_sales @@ -119,81 +119,81 @@ Input [4]: [sr_returned_date_sk#11, sr_store_sk#12, sr_return_amt#13, sr_net_los (9) Union -(10) Scan parquet default.date_dim -Output [2]: [d_date_sk#21, d_date#22] +(10) Scan parquet default.store +Output [2]: [s_store_sk#21, s_store_id#22] Batched: true -Location [not included in comparison]/{warehouse_dir}/date_dim] -PushedFilters: [IsNotNull(d_date), GreaterThanOrEqual(d_date,2000-08-23), LessThanOrEqual(d_date,2000-09-06), IsNotNull(d_date_sk)] -ReadSchema: struct +Location [not included in comparison]/{warehouse_dir}/store] +PushedFilters: [IsNotNull(s_store_sk)] +ReadSchema: struct (11) ColumnarToRow [codegen id : 3] -Input [2]: [d_date_sk#21, d_date#22] +Input [2]: [s_store_sk#21, s_store_id#22] (12) Filter [codegen id : 3] -Input [2]: [d_date_sk#21, d_date#22] -Condition : (((isnotnull(d_date#22) AND (d_date#22 >= 11192)) AND (d_date#22 <= 11206)) AND isnotnull(d_date_sk#21)) +Input [2]: [s_store_sk#21, s_store_id#22] +Condition : isnotnull(s_store_sk#21) -(13) Project [codegen id : 3] -Output [1]: [d_date_sk#21] -Input [2]: [d_date_sk#21, d_date#22] +(13) BroadcastExchange +Input [2]: [s_store_sk#21, s_store_id#22] +Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, false] as bigint)),false), [id=#23] -(14) BroadcastExchange -Input [1]: [d_date_sk#21] -Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#23] - -(15) BroadcastHashJoin [codegen id : 5] -Left keys [1]: [date_sk#6] -Right keys [1]: [cast(d_date_sk#21 as bigint)] +(14) BroadcastHashJoin [codegen id : 5] +Left keys [1]: [store_sk#5] +Right keys [1]: [cast(s_store_sk#21 as bigint)] Join condition: None -(16) Project [codegen id : 5] -Output [5]: [store_sk#5, sales_price#7, profit#8, return_amt#9, net_loss#10] -Input [7]: [store_sk#5, date_sk#6, sales_price#7, profit#8, return_amt#9, net_loss#10, d_date_sk#21] +(15) Project [codegen id : 5] +Output [6]: [date_sk#6, sales_price#7, profit#8, return_amt#9, net_loss#10, s_store_id#22] +Input [8]: [store_sk#5, date_sk#6, sales_price#7, profit#8, return_amt#9, net_loss#10, s_store_sk#21, s_store_id#22] -(17) Scan parquet default.store -Output [2]: [s_store_sk#24, s_store_id#25] +(16) Scan parquet default.date_dim +Output [2]: [d_date_sk#24, d_date#25] Batched: true -Location [not included in comparison]/{warehouse_dir}/store] -PushedFilters: [IsNotNull(s_store_sk)] -ReadSchema: struct +Location [not included in comparison]/{warehouse_dir}/date_dim] +PushedFilters: [IsNotNull(d_date), GreaterThanOrEqual(d_date,2000-08-23), LessThanOrEqual(d_date,2000-09-06), IsNotNull(d_date_sk)] +ReadSchema: struct + +(17) ColumnarToRow [codegen id : 4] +Input [2]: [d_date_sk#24, d_date#25] -(18) ColumnarToRow [codegen id : 4] -Input [2]: [s_store_sk#24, s_store_id#25] +(18) Filter [codegen id : 4] +Input [2]: [d_date_sk#24, d_date#25] +Condition : (((isnotnull(d_date#25) AND (d_date#25 >= 11192)) AND (d_date#25 <= 11206)) AND isnotnull(d_date_sk#24)) -(19) Filter [codegen id : 4] -Input [2]: [s_store_sk#24, s_store_id#25] -Condition : isnotnull(s_store_sk#24) +(19) Project [codegen id : 4] +Output [1]: [d_date_sk#24] +Input [2]: [d_date_sk#24, d_date#25] (20) BroadcastExchange -Input [2]: [s_store_sk#24, s_store_id#25] -Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, false] as bigint)),false), [id=#26] +Input [1]: [d_date_sk#24] +Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#26] (21) BroadcastHashJoin [codegen id : 5] -Left keys [1]: [store_sk#5] -Right keys [1]: [cast(s_store_sk#24 as bigint)] +Left keys [1]: [date_sk#6] +Right keys [1]: [cast(d_date_sk#24 as bigint)] Join condition: None (22) Project [codegen id : 5] -Output [5]: [sales_price#7, profit#8, return_amt#9, net_loss#10, s_store_id#25] -Input [7]: [store_sk#5, sales_price#7, profit#8, return_amt#9, net_loss#10, s_store_sk#24, s_store_id#25] +Output [5]: [sales_price#7, profit#8, return_amt#9, net_loss#10, s_store_id#22] +Input [7]: [date_sk#6, sales_price#7, profit#8, return_amt#9, net_loss#10, s_store_id#22, d_date_sk#24] (23) HashAggregate [codegen id : 5] -Input [5]: [sales_price#7, profit#8, return_amt#9, net_loss#10, s_store_id#25] -Keys [1]: [s_store_id#25] +Input [5]: [sales_price#7, profit#8, return_amt#9, net_loss#10, s_store_id#22] +Keys [1]: [s_store_id#22] Functions [4]: [partial_sum(UnscaledValue(sales_price#7)), partial_sum(UnscaledValue(return_amt#9)), partial_sum(UnscaledValue(profit#8)), partial_sum(UnscaledValue(net_loss#10))] Aggregate Attributes [4]: [sum#27, sum#28, sum#29, sum#30] -Results [5]: [s_store_id#25, sum#31, sum#32, sum#33, sum#34] +Results [5]: [s_store_id#22, sum#31, sum#32, sum#33, sum#34] (24) Exchange -Input [5]: [s_store_id#25, sum#31, sum#32, sum#33, sum#34] -Arguments: hashpartitioning(s_store_id#25, 5), true, [id=#35] +Input [5]: [s_store_id#22, sum#31, sum#32, sum#33, sum#34] +Arguments: hashpartitioning(s_store_id#22, 5), ENSURE_REQUIREMENTS, [id=#35] (25) HashAggregate [codegen id : 6] -Input [5]: [s_store_id#25, sum#31, sum#32, sum#33, sum#34] -Keys [1]: [s_store_id#25] +Input [5]: [s_store_id#22, sum#31, sum#32, sum#33, sum#34] +Keys [1]: [s_store_id#22] Functions [4]: [sum(UnscaledValue(sales_price#7)), sum(UnscaledValue(return_amt#9)), sum(UnscaledValue(profit#8)), sum(UnscaledValue(net_loss#10))] Aggregate Attributes [4]: [sum(UnscaledValue(sales_price#7))#36, sum(UnscaledValue(return_amt#9))#37, sum(UnscaledValue(profit#8))#38, sum(UnscaledValue(net_loss#10))#39] -Results [5]: [MakeDecimal(sum(UnscaledValue(sales_price#7))#36,17,2) AS sales#40, MakeDecimal(sum(UnscaledValue(return_amt#9))#37,17,2) AS RETURNS#41, CheckOverflow((promote_precision(cast(MakeDecimal(sum(UnscaledValue(profit#8))#38,17,2) as decimal(18,2))) - promote_precision(cast(MakeDecimal(sum(UnscaledValue(net_loss#10))#39,17,2) as decimal(18,2)))), DecimalType(18,2), true) AS profit#42, store channel AS channel#43, concat(store, s_store_id#25) AS id#44] +Results [5]: [MakeDecimal(sum(UnscaledValue(sales_price#7))#36,17,2) AS sales#40, MakeDecimal(sum(UnscaledValue(return_amt#9))#37,17,2) AS RETURNS#41, CheckOverflow((promote_precision(cast(MakeDecimal(sum(UnscaledValue(profit#8))#38,17,2) as decimal(18,2))) - promote_precision(cast(MakeDecimal(sum(UnscaledValue(net_loss#10))#39,17,2) as decimal(18,2)))), DecimalType(18,2), true) AS profit#42, store channel AS channel#43, concat(store, s_store_id#22) AS id#44] (26) Scan parquet default.catalog_sales Output [4]: [cs_sold_date_sk#45, cs_catalog_page_sk#46, cs_ext_sales_price#47, cs_net_profit#48] @@ -233,44 +233,44 @@ Input [4]: [cr_returned_date_sk#55, cr_catalog_page_sk#56, cr_return_amount#57, (34) Union -(35) ReusedExchange [Reuses operator id: 14] -Output [1]: [d_date_sk#21] - -(36) BroadcastHashJoin [codegen id : 11] -Left keys [1]: [date_sk#50] -Right keys [1]: [d_date_sk#21] -Join condition: None - -(37) Project [codegen id : 11] -Output [5]: [page_sk#49, sales_price#51, profit#52, return_amt#53, net_loss#54] -Input [7]: [page_sk#49, date_sk#50, sales_price#51, profit#52, return_amt#53, net_loss#54, d_date_sk#21] - -(38) Scan parquet default.catalog_page +(35) Scan parquet default.catalog_page Output [2]: [cp_catalog_page_sk#65, cp_catalog_page_id#66] Batched: true Location [not included in comparison]/{warehouse_dir}/catalog_page] PushedFilters: [IsNotNull(cp_catalog_page_sk)] ReadSchema: struct -(39) ColumnarToRow [codegen id : 10] +(36) ColumnarToRow [codegen id : 9] Input [2]: [cp_catalog_page_sk#65, cp_catalog_page_id#66] -(40) Filter [codegen id : 10] +(37) Filter [codegen id : 9] Input [2]: [cp_catalog_page_sk#65, cp_catalog_page_id#66] Condition : isnotnull(cp_catalog_page_sk#65) -(41) BroadcastExchange +(38) BroadcastExchange Input [2]: [cp_catalog_page_sk#65, cp_catalog_page_id#66] Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, false] as bigint)),false), [id=#67] -(42) BroadcastHashJoin [codegen id : 11] +(39) BroadcastHashJoin [codegen id : 11] Left keys [1]: [page_sk#49] Right keys [1]: [cp_catalog_page_sk#65] Join condition: None +(40) Project [codegen id : 11] +Output [6]: [date_sk#50, sales_price#51, profit#52, return_amt#53, net_loss#54, cp_catalog_page_id#66] +Input [8]: [page_sk#49, date_sk#50, sales_price#51, profit#52, return_amt#53, net_loss#54, cp_catalog_page_sk#65, cp_catalog_page_id#66] + +(41) ReusedExchange [Reuses operator id: 20] +Output [1]: [d_date_sk#24] + +(42) BroadcastHashJoin [codegen id : 11] +Left keys [1]: [date_sk#50] +Right keys [1]: [d_date_sk#24] +Join condition: None + (43) Project [codegen id : 11] Output [5]: [sales_price#51, profit#52, return_amt#53, net_loss#54, cp_catalog_page_id#66] -Input [7]: [page_sk#49, sales_price#51, profit#52, return_amt#53, net_loss#54, cp_catalog_page_sk#65, cp_catalog_page_id#66] +Input [7]: [date_sk#50, sales_price#51, profit#52, return_amt#53, net_loss#54, cp_catalog_page_id#66, d_date_sk#24] (44) HashAggregate [codegen id : 11] Input [5]: [sales_price#51, profit#52, return_amt#53, net_loss#54, cp_catalog_page_id#66] @@ -281,7 +281,7 @@ Results [5]: [cp_catalog_page_id#66, sum#72, sum#73, sum#74, sum#75] (45) Exchange Input [5]: [cp_catalog_page_id#66, sum#72, sum#73, sum#74, sum#75] -Arguments: hashpartitioning(cp_catalog_page_id#66, 5), true, [id=#76] +Arguments: hashpartitioning(cp_catalog_page_id#66, 5), ENSURE_REQUIREMENTS, [id=#76] (46) HashAggregate [codegen id : 12] Input [5]: [cp_catalog_page_id#66, sum#72, sum#73, sum#74, sum#75] @@ -324,7 +324,7 @@ Condition : isnotnull(wr_returned_date_sk#96) (54) Exchange Input [5]: [wr_returned_date_sk#96, wr_item_sk#97, wr_order_number#98, wr_return_amt#99, wr_net_loss#100] -Arguments: hashpartitioning(wr_item_sk#97, wr_order_number#98, 5), true, [id=#101] +Arguments: hashpartitioning(wr_item_sk#97, wr_order_number#98, 5), ENSURE_REQUIREMENTS, [id=#101] (55) Sort [codegen id : 15] Input [5]: [wr_returned_date_sk#96, wr_item_sk#97, wr_order_number#98, wr_return_amt#99, wr_net_loss#100] @@ -346,7 +346,7 @@ Condition : ((isnotnull(ws_item_sk#102) AND isnotnull(ws_order_number#103)) AND (59) Exchange Input [3]: [ws_item_sk#102, ws_web_site_sk#87, ws_order_number#103] -Arguments: hashpartitioning(cast(ws_item_sk#102 as bigint), cast(ws_order_number#103 as bigint), 5), true, [id=#104] +Arguments: hashpartitioning(cast(ws_item_sk#102 as bigint), cast(ws_order_number#103 as bigint), 5), ENSURE_REQUIREMENTS, [id=#104] (60) Sort [codegen id : 17] Input [3]: [ws_item_sk#102, ws_web_site_sk#87, ws_order_number#103] @@ -363,44 +363,44 @@ Input [8]: [wr_returned_date_sk#96, wr_item_sk#97, wr_order_number#98, wr_return (63) Union -(64) ReusedExchange [Reuses operator id: 14] -Output [1]: [d_date_sk#21] - -(65) BroadcastHashJoin [codegen id : 21] -Left keys [1]: [date_sk#91] -Right keys [1]: [cast(d_date_sk#21 as bigint)] -Join condition: None - -(66) Project [codegen id : 21] -Output [5]: [wsr_web_site_sk#90, sales_price#92, profit#93, return_amt#94, net_loss#95] -Input [7]: [wsr_web_site_sk#90, date_sk#91, sales_price#92, profit#93, return_amt#94, net_loss#95, d_date_sk#21] - -(67) Scan parquet default.web_site +(64) Scan parquet default.web_site Output [2]: [web_site_sk#111, web_site_id#112] Batched: true Location [not included in comparison]/{warehouse_dir}/web_site] PushedFilters: [IsNotNull(web_site_sk)] ReadSchema: struct -(68) ColumnarToRow [codegen id : 20] +(65) ColumnarToRow [codegen id : 19] Input [2]: [web_site_sk#111, web_site_id#112] -(69) Filter [codegen id : 20] +(66) Filter [codegen id : 19] Input [2]: [web_site_sk#111, web_site_id#112] Condition : isnotnull(web_site_sk#111) -(70) BroadcastExchange +(67) BroadcastExchange Input [2]: [web_site_sk#111, web_site_id#112] Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, false] as bigint)),false), [id=#113] -(71) BroadcastHashJoin [codegen id : 21] +(68) BroadcastHashJoin [codegen id : 21] Left keys [1]: [wsr_web_site_sk#90] Right keys [1]: [web_site_sk#111] Join condition: None +(69) Project [codegen id : 21] +Output [6]: [date_sk#91, sales_price#92, profit#93, return_amt#94, net_loss#95, web_site_id#112] +Input [8]: [wsr_web_site_sk#90, date_sk#91, sales_price#92, profit#93, return_amt#94, net_loss#95, web_site_sk#111, web_site_id#112] + +(70) ReusedExchange [Reuses operator id: 20] +Output [1]: [d_date_sk#24] + +(71) BroadcastHashJoin [codegen id : 21] +Left keys [1]: [date_sk#91] +Right keys [1]: [cast(d_date_sk#24 as bigint)] +Join condition: None + (72) Project [codegen id : 21] Output [5]: [sales_price#92, profit#93, return_amt#94, net_loss#95, web_site_id#112] -Input [7]: [wsr_web_site_sk#90, sales_price#92, profit#93, return_amt#94, net_loss#95, web_site_sk#111, web_site_id#112] +Input [7]: [date_sk#91, sales_price#92, profit#93, return_amt#94, net_loss#95, web_site_id#112, d_date_sk#24] (73) HashAggregate [codegen id : 21] Input [5]: [sales_price#92, profit#93, return_amt#94, net_loss#95, web_site_id#112] @@ -411,7 +411,7 @@ Results [5]: [web_site_id#112, sum#118, sum#119, sum#120, sum#121] (74) Exchange Input [5]: [web_site_id#112, sum#118, sum#119, sum#120, sum#121] -Arguments: hashpartitioning(web_site_id#112, 5), true, [id=#122] +Arguments: hashpartitioning(web_site_id#112, 5), ENSURE_REQUIREMENTS, [id=#122] (75) HashAggregate [codegen id : 22] Input [5]: [web_site_id#112, sum#118, sum#119, sum#120, sum#121] @@ -435,7 +435,7 @@ Results [9]: [channel#132, id#133, spark_grouping_id#134, sum#141, isEmpty#142, (79) Exchange Input [9]: [channel#132, id#133, spark_grouping_id#134, sum#141, isEmpty#142, sum#143, isEmpty#144, sum#145, isEmpty#146] -Arguments: hashpartitioning(channel#132, id#133, spark_grouping_id#134, 5), true, [id=#147] +Arguments: hashpartitioning(channel#132, id#133, spark_grouping_id#134, 5), ENSURE_REQUIREMENTS, [id=#147] (80) HashAggregate [codegen id : 24] Input [9]: [channel#132, id#133, spark_grouping_id#134, sum#141, isEmpty#142, sum#143, isEmpty#144, sum#145, isEmpty#146] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q5.sf100/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q5.sf100/simplified.txt index 80b07a3712d36..2db6cf767729d 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q5.sf100/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q5.sf100/simplified.txt @@ -15,9 +15,9 @@ TakeOrderedAndProject [channel,id,sales,returns,profit] WholeStageCodegen (5) HashAggregate [s_store_id,sales_price,return_amt,profit,net_loss] [sum,sum,sum,sum,sum,sum,sum,sum] Project [sales_price,profit,return_amt,net_loss,s_store_id] - BroadcastHashJoin [store_sk,s_store_sk] - Project [store_sk,sales_price,profit,return_amt,net_loss] - BroadcastHashJoin [date_sk,d_date_sk] + BroadcastHashJoin [date_sk,d_date_sk] + Project [date_sk,sales_price,profit,return_amt,net_loss,s_store_id] + BroadcastHashJoin [store_sk,s_store_sk] InputAdapter Union WholeStageCodegen (1) @@ -35,18 +35,18 @@ TakeOrderedAndProject [channel,id,sales,returns,profit] InputAdapter BroadcastExchange #3 WholeStageCodegen (3) - Project [d_date_sk] - Filter [d_date,d_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.date_dim [d_date_sk,d_date] + Filter [s_store_sk] + ColumnarToRow + InputAdapter + Scan parquet default.store [s_store_sk,s_store_id] InputAdapter BroadcastExchange #4 WholeStageCodegen (4) - Filter [s_store_sk] - ColumnarToRow - InputAdapter - Scan parquet default.store [s_store_sk,s_store_id] + Project [d_date_sk] + Filter [d_date,d_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.date_dim [d_date_sk,d_date] WholeStageCodegen (12) HashAggregate [cp_catalog_page_id,sum,sum,sum,sum] [sum(UnscaledValue(sales_price)),sum(UnscaledValue(return_amt)),sum(UnscaledValue(profit)),sum(UnscaledValue(net_loss)),sales,RETURNS,profit,channel,id,sum,sum,sum,sum] InputAdapter @@ -54,9 +54,9 @@ TakeOrderedAndProject [channel,id,sales,returns,profit] WholeStageCodegen (11) HashAggregate [cp_catalog_page_id,sales_price,return_amt,profit,net_loss] [sum,sum,sum,sum,sum,sum,sum,sum] Project [sales_price,profit,return_amt,net_loss,cp_catalog_page_id] - BroadcastHashJoin [page_sk,cp_catalog_page_sk] - Project [page_sk,sales_price,profit,return_amt,net_loss] - BroadcastHashJoin [date_sk,d_date_sk] + BroadcastHashJoin [date_sk,d_date_sk] + Project [date_sk,sales_price,profit,return_amt,net_loss,cp_catalog_page_id] + BroadcastHashJoin [page_sk,cp_catalog_page_sk] InputAdapter Union WholeStageCodegen (7) @@ -72,14 +72,14 @@ TakeOrderedAndProject [channel,id,sales,returns,profit] InputAdapter Scan parquet default.catalog_returns [cr_returned_date_sk,cr_catalog_page_sk,cr_return_amount,cr_net_loss] InputAdapter - ReusedExchange [d_date_sk] #3 + BroadcastExchange #6 + WholeStageCodegen (9) + Filter [cp_catalog_page_sk] + ColumnarToRow + InputAdapter + Scan parquet default.catalog_page [cp_catalog_page_sk,cp_catalog_page_id] InputAdapter - BroadcastExchange #6 - WholeStageCodegen (10) - Filter [cp_catalog_page_sk] - ColumnarToRow - InputAdapter - Scan parquet default.catalog_page [cp_catalog_page_sk,cp_catalog_page_id] + ReusedExchange [d_date_sk] #4 WholeStageCodegen (22) HashAggregate [web_site_id,sum,sum,sum,sum] [sum(UnscaledValue(sales_price)),sum(UnscaledValue(return_amt)),sum(UnscaledValue(profit)),sum(UnscaledValue(net_loss)),sales,RETURNS,profit,channel,id,sum,sum,sum,sum] InputAdapter @@ -87,9 +87,9 @@ TakeOrderedAndProject [channel,id,sales,returns,profit] WholeStageCodegen (21) HashAggregate [web_site_id,sales_price,return_amt,profit,net_loss] [sum,sum,sum,sum,sum,sum,sum,sum] Project [sales_price,profit,return_amt,net_loss,web_site_id] - BroadcastHashJoin [wsr_web_site_sk,web_site_sk] - Project [wsr_web_site_sk,sales_price,profit,return_amt,net_loss] - BroadcastHashJoin [date_sk,d_date_sk] + BroadcastHashJoin [date_sk,d_date_sk] + Project [date_sk,sales_price,profit,return_amt,net_loss,web_site_id] + BroadcastHashJoin [wsr_web_site_sk,web_site_sk] InputAdapter Union WholeStageCodegen (13) @@ -122,11 +122,11 @@ TakeOrderedAndProject [channel,id,sales,returns,profit] InputAdapter Scan parquet default.web_sales [ws_item_sk,ws_web_site_sk,ws_order_number] InputAdapter - ReusedExchange [d_date_sk] #3 + BroadcastExchange #10 + WholeStageCodegen (19) + Filter [web_site_sk] + ColumnarToRow + InputAdapter + Scan parquet default.web_site [web_site_sk,web_site_id] InputAdapter - BroadcastExchange #10 - WholeStageCodegen (20) - Filter [web_site_sk] - ColumnarToRow - InputAdapter - Scan parquet default.web_site [web_site_sk,web_site_id] + ReusedExchange [d_date_sk] #4 diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q50.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q50.sf100/explain.txt index df1197d7c925e..69678ef86a0fc 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q50.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q50.sf100/explain.txt @@ -25,15 +25,15 @@ TakeOrderedAndProject (35) +- * Sort (29) +- Exchange (28) +- * Project (27) - +- * BroadcastHashJoin Inner BuildLeft (26) - :- BroadcastExchange (22) - : +- * Project (21) - : +- * Filter (20) - : +- * ColumnarToRow (19) - : +- Scan parquet default.date_dim (18) - +- * Filter (25) - +- * ColumnarToRow (24) - +- Scan parquet default.store_returns (23) + +- * BroadcastHashJoin Inner BuildRight (26) + :- * Filter (20) + : +- * ColumnarToRow (19) + : +- Scan parquet default.store_returns (18) + +- BroadcastExchange (25) + +- * Project (24) + +- * Filter (23) + +- * ColumnarToRow (22) + +- Scan parquet default.date_dim (21) (1) Scan parquet default.store_sales @@ -106,91 +106,91 @@ Input [16]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3, ss_store_sk#4, s (16) Exchange Input [14]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3, ss_ticket_number#5, s_store_name#9, s_company_id#10, s_street_number#11, s_street_name#12, s_street_type#13, s_suite_number#14, s_city#15, s_county#16, s_state#17, s_zip#18] -Arguments: hashpartitioning(cast(ss_ticket_number#5 as bigint), cast(ss_item_sk#2 as bigint), cast(ss_customer_sk#3 as bigint), 5), true, [id=#20] +Arguments: hashpartitioning(cast(ss_ticket_number#5 as bigint), cast(ss_item_sk#2 as bigint), cast(ss_customer_sk#3 as bigint), 5), ENSURE_REQUIREMENTS, [id=#20] (17) Sort [codegen id : 4] Input [14]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3, ss_ticket_number#5, s_store_name#9, s_company_id#10, s_street_number#11, s_street_name#12, s_street_type#13, s_suite_number#14, s_city#15, s_county#16, s_state#17, s_zip#18] Arguments: [cast(ss_ticket_number#5 as bigint) ASC NULLS FIRST, cast(ss_item_sk#2 as bigint) ASC NULLS FIRST, cast(ss_customer_sk#3 as bigint) ASC NULLS FIRST], false, 0 -(18) Scan parquet default.date_dim -Output [3]: [d_date_sk#21, d_year#22, d_moy#23] +(18) Scan parquet default.store_returns +Output [4]: [sr_returned_date_sk#21, sr_item_sk#22, sr_customer_sk#23, sr_ticket_number#24] Batched: true -Location [not included in comparison]/{warehouse_dir}/date_dim] -PushedFilters: [IsNotNull(d_year), IsNotNull(d_moy), EqualTo(d_year,2001), EqualTo(d_moy,8), IsNotNull(d_date_sk)] -ReadSchema: struct +Location [not included in comparison]/{warehouse_dir}/store_returns] +PushedFilters: [IsNotNull(sr_ticket_number), IsNotNull(sr_item_sk), IsNotNull(sr_customer_sk), IsNotNull(sr_returned_date_sk)] +ReadSchema: struct -(19) ColumnarToRow [codegen id : 5] -Input [3]: [d_date_sk#21, d_year#22, d_moy#23] +(19) ColumnarToRow [codegen id : 6] +Input [4]: [sr_returned_date_sk#21, sr_item_sk#22, sr_customer_sk#23, sr_ticket_number#24] -(20) Filter [codegen id : 5] -Input [3]: [d_date_sk#21, d_year#22, d_moy#23] -Condition : ((((isnotnull(d_year#22) AND isnotnull(d_moy#23)) AND (d_year#22 = 2001)) AND (d_moy#23 = 8)) AND isnotnull(d_date_sk#21)) +(20) Filter [codegen id : 6] +Input [4]: [sr_returned_date_sk#21, sr_item_sk#22, sr_customer_sk#23, sr_ticket_number#24] +Condition : (((isnotnull(sr_ticket_number#24) AND isnotnull(sr_item_sk#22)) AND isnotnull(sr_customer_sk#23)) AND isnotnull(sr_returned_date_sk#21)) -(21) Project [codegen id : 5] -Output [1]: [d_date_sk#21] -Input [3]: [d_date_sk#21, d_year#22, d_moy#23] +(21) Scan parquet default.date_dim +Output [3]: [d_date_sk#25, d_year#26, d_moy#27] +Batched: true +Location [not included in comparison]/{warehouse_dir}/date_dim] +PushedFilters: [IsNotNull(d_year), IsNotNull(d_moy), EqualTo(d_year,2001), EqualTo(d_moy,8), IsNotNull(d_date_sk)] +ReadSchema: struct -(22) BroadcastExchange -Input [1]: [d_date_sk#21] -Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#24] +(22) ColumnarToRow [codegen id : 5] +Input [3]: [d_date_sk#25, d_year#26, d_moy#27] -(23) Scan parquet default.store_returns -Output [4]: [sr_returned_date_sk#25, sr_item_sk#26, sr_customer_sk#27, sr_ticket_number#28] -Batched: true -Location [not included in comparison]/{warehouse_dir}/store_returns] -PushedFilters: [IsNotNull(sr_ticket_number), IsNotNull(sr_item_sk), IsNotNull(sr_customer_sk), IsNotNull(sr_returned_date_sk)] -ReadSchema: struct +(23) Filter [codegen id : 5] +Input [3]: [d_date_sk#25, d_year#26, d_moy#27] +Condition : ((((isnotnull(d_year#26) AND isnotnull(d_moy#27)) AND (d_year#26 = 2001)) AND (d_moy#27 = 8)) AND isnotnull(d_date_sk#25)) -(24) ColumnarToRow -Input [4]: [sr_returned_date_sk#25, sr_item_sk#26, sr_customer_sk#27, sr_ticket_number#28] +(24) Project [codegen id : 5] +Output [1]: [d_date_sk#25] +Input [3]: [d_date_sk#25, d_year#26, d_moy#27] -(25) Filter -Input [4]: [sr_returned_date_sk#25, sr_item_sk#26, sr_customer_sk#27, sr_ticket_number#28] -Condition : (((isnotnull(sr_ticket_number#28) AND isnotnull(sr_item_sk#26)) AND isnotnull(sr_customer_sk#27)) AND isnotnull(sr_returned_date_sk#25)) +(25) BroadcastExchange +Input [1]: [d_date_sk#25] +Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#28] (26) BroadcastHashJoin [codegen id : 6] -Left keys [1]: [cast(d_date_sk#21 as bigint)] -Right keys [1]: [sr_returned_date_sk#25] +Left keys [1]: [sr_returned_date_sk#21] +Right keys [1]: [cast(d_date_sk#25 as bigint)] Join condition: None (27) Project [codegen id : 6] -Output [4]: [sr_returned_date_sk#25, sr_item_sk#26, sr_customer_sk#27, sr_ticket_number#28] -Input [5]: [d_date_sk#21, sr_returned_date_sk#25, sr_item_sk#26, sr_customer_sk#27, sr_ticket_number#28] +Output [4]: [sr_returned_date_sk#21, sr_item_sk#22, sr_customer_sk#23, sr_ticket_number#24] +Input [5]: [sr_returned_date_sk#21, sr_item_sk#22, sr_customer_sk#23, sr_ticket_number#24, d_date_sk#25] (28) Exchange -Input [4]: [sr_returned_date_sk#25, sr_item_sk#26, sr_customer_sk#27, sr_ticket_number#28] -Arguments: hashpartitioning(sr_ticket_number#28, sr_item_sk#26, sr_customer_sk#27, 5), true, [id=#29] +Input [4]: [sr_returned_date_sk#21, sr_item_sk#22, sr_customer_sk#23, sr_ticket_number#24] +Arguments: hashpartitioning(sr_ticket_number#24, sr_item_sk#22, sr_customer_sk#23, 5), ENSURE_REQUIREMENTS, [id=#29] (29) Sort [codegen id : 7] -Input [4]: [sr_returned_date_sk#25, sr_item_sk#26, sr_customer_sk#27, sr_ticket_number#28] -Arguments: [sr_ticket_number#28 ASC NULLS FIRST, sr_item_sk#26 ASC NULLS FIRST, sr_customer_sk#27 ASC NULLS FIRST], false, 0 +Input [4]: [sr_returned_date_sk#21, sr_item_sk#22, sr_customer_sk#23, sr_ticket_number#24] +Arguments: [sr_ticket_number#24 ASC NULLS FIRST, sr_item_sk#22 ASC NULLS FIRST, sr_customer_sk#23 ASC NULLS FIRST], false, 0 (30) SortMergeJoin [codegen id : 8] Left keys [3]: [cast(ss_ticket_number#5 as bigint), cast(ss_item_sk#2 as bigint), cast(ss_customer_sk#3 as bigint)] -Right keys [3]: [sr_ticket_number#28, sr_item_sk#26, sr_customer_sk#27] +Right keys [3]: [sr_ticket_number#24, sr_item_sk#22, sr_customer_sk#23] Join condition: None (31) Project [codegen id : 8] -Output [12]: [ss_sold_date_sk#1, sr_returned_date_sk#25, s_store_name#9, s_company_id#10, s_street_number#11, s_street_name#12, s_street_type#13, s_suite_number#14, s_city#15, s_county#16, s_state#17, s_zip#18] -Input [18]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3, ss_ticket_number#5, s_store_name#9, s_company_id#10, s_street_number#11, s_street_name#12, s_street_type#13, s_suite_number#14, s_city#15, s_county#16, s_state#17, s_zip#18, sr_returned_date_sk#25, sr_item_sk#26, sr_customer_sk#27, sr_ticket_number#28] +Output [12]: [ss_sold_date_sk#1, sr_returned_date_sk#21, s_store_name#9, s_company_id#10, s_street_number#11, s_street_name#12, s_street_type#13, s_suite_number#14, s_city#15, s_county#16, s_state#17, s_zip#18] +Input [18]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3, ss_ticket_number#5, s_store_name#9, s_company_id#10, s_street_number#11, s_street_name#12, s_street_type#13, s_suite_number#14, s_city#15, s_county#16, s_state#17, s_zip#18, sr_returned_date_sk#21, sr_item_sk#22, sr_customer_sk#23, sr_ticket_number#24] (32) HashAggregate [codegen id : 8] -Input [12]: [ss_sold_date_sk#1, sr_returned_date_sk#25, s_store_name#9, s_company_id#10, s_street_number#11, s_street_name#12, s_street_type#13, s_suite_number#14, s_city#15, s_county#16, s_state#17, s_zip#18] +Input [12]: [ss_sold_date_sk#1, sr_returned_date_sk#21, s_store_name#9, s_company_id#10, s_street_number#11, s_street_name#12, s_street_type#13, s_suite_number#14, s_city#15, s_county#16, s_state#17, s_zip#18] Keys [10]: [s_store_name#9, s_company_id#10, s_street_number#11, s_street_name#12, s_street_type#13, s_suite_number#14, s_city#15, s_county#16, s_state#17, s_zip#18] -Functions [5]: [partial_sum(cast(CASE WHEN ((sr_returned_date_sk#25 - cast(ss_sold_date_sk#1 as bigint)) <= 30) THEN 1 ELSE 0 END as bigint)), partial_sum(cast(CASE WHEN (((sr_returned_date_sk#25 - cast(ss_sold_date_sk#1 as bigint)) > 30) AND ((sr_returned_date_sk#25 - cast(ss_sold_date_sk#1 as bigint)) <= 60)) THEN 1 ELSE 0 END as bigint)), partial_sum(cast(CASE WHEN (((sr_returned_date_sk#25 - cast(ss_sold_date_sk#1 as bigint)) > 60) AND ((sr_returned_date_sk#25 - cast(ss_sold_date_sk#1 as bigint)) <= 90)) THEN 1 ELSE 0 END as bigint)), partial_sum(cast(CASE WHEN (((sr_returned_date_sk#25 - cast(ss_sold_date_sk#1 as bigint)) > 90) AND ((sr_returned_date_sk#25 - cast(ss_sold_date_sk#1 as bigint)) <= 120)) THEN 1 ELSE 0 END as bigint)), partial_sum(cast(CASE WHEN ((sr_returned_date_sk#25 - cast(ss_sold_date_sk#1 as bigint)) > 120) THEN 1 ELSE 0 END as bigint))] +Functions [5]: [partial_sum(CASE WHEN ((sr_returned_date_sk#21 - cast(ss_sold_date_sk#1 as bigint)) <= 30) THEN 1 ELSE 0 END), partial_sum(CASE WHEN (((sr_returned_date_sk#21 - cast(ss_sold_date_sk#1 as bigint)) > 30) AND ((sr_returned_date_sk#21 - cast(ss_sold_date_sk#1 as bigint)) <= 60)) THEN 1 ELSE 0 END), partial_sum(CASE WHEN (((sr_returned_date_sk#21 - cast(ss_sold_date_sk#1 as bigint)) > 60) AND ((sr_returned_date_sk#21 - cast(ss_sold_date_sk#1 as bigint)) <= 90)) THEN 1 ELSE 0 END), partial_sum(CASE WHEN (((sr_returned_date_sk#21 - cast(ss_sold_date_sk#1 as bigint)) > 90) AND ((sr_returned_date_sk#21 - cast(ss_sold_date_sk#1 as bigint)) <= 120)) THEN 1 ELSE 0 END), partial_sum(CASE WHEN ((sr_returned_date_sk#21 - cast(ss_sold_date_sk#1 as bigint)) > 120) THEN 1 ELSE 0 END)] Aggregate Attributes [5]: [sum#30, sum#31, sum#32, sum#33, sum#34] Results [15]: [s_store_name#9, s_company_id#10, s_street_number#11, s_street_name#12, s_street_type#13, s_suite_number#14, s_city#15, s_county#16, s_state#17, s_zip#18, sum#35, sum#36, sum#37, sum#38, sum#39] (33) Exchange Input [15]: [s_store_name#9, s_company_id#10, s_street_number#11, s_street_name#12, s_street_type#13, s_suite_number#14, s_city#15, s_county#16, s_state#17, s_zip#18, sum#35, sum#36, sum#37, sum#38, sum#39] -Arguments: hashpartitioning(s_store_name#9, s_company_id#10, s_street_number#11, s_street_name#12, s_street_type#13, s_suite_number#14, s_city#15, s_county#16, s_state#17, s_zip#18, 5), true, [id=#40] +Arguments: hashpartitioning(s_store_name#9, s_company_id#10, s_street_number#11, s_street_name#12, s_street_type#13, s_suite_number#14, s_city#15, s_county#16, s_state#17, s_zip#18, 5), ENSURE_REQUIREMENTS, [id=#40] (34) HashAggregate [codegen id : 9] Input [15]: [s_store_name#9, s_company_id#10, s_street_number#11, s_street_name#12, s_street_type#13, s_suite_number#14, s_city#15, s_county#16, s_state#17, s_zip#18, sum#35, sum#36, sum#37, sum#38, sum#39] Keys [10]: [s_store_name#9, s_company_id#10, s_street_number#11, s_street_name#12, s_street_type#13, s_suite_number#14, s_city#15, s_county#16, s_state#17, s_zip#18] -Functions [5]: [sum(cast(CASE WHEN ((sr_returned_date_sk#25 - cast(ss_sold_date_sk#1 as bigint)) <= 30) THEN 1 ELSE 0 END as bigint)), sum(cast(CASE WHEN (((sr_returned_date_sk#25 - cast(ss_sold_date_sk#1 as bigint)) > 30) AND ((sr_returned_date_sk#25 - cast(ss_sold_date_sk#1 as bigint)) <= 60)) THEN 1 ELSE 0 END as bigint)), sum(cast(CASE WHEN (((sr_returned_date_sk#25 - cast(ss_sold_date_sk#1 as bigint)) > 60) AND ((sr_returned_date_sk#25 - cast(ss_sold_date_sk#1 as bigint)) <= 90)) THEN 1 ELSE 0 END as bigint)), sum(cast(CASE WHEN (((sr_returned_date_sk#25 - cast(ss_sold_date_sk#1 as bigint)) > 90) AND ((sr_returned_date_sk#25 - cast(ss_sold_date_sk#1 as bigint)) <= 120)) THEN 1 ELSE 0 END as bigint)), sum(cast(CASE WHEN ((sr_returned_date_sk#25 - cast(ss_sold_date_sk#1 as bigint)) > 120) THEN 1 ELSE 0 END as bigint))] -Aggregate Attributes [5]: [sum(cast(CASE WHEN ((sr_returned_date_sk#25 - cast(ss_sold_date_sk#1 as bigint)) <= 30) THEN 1 ELSE 0 END as bigint))#41, sum(cast(CASE WHEN (((sr_returned_date_sk#25 - cast(ss_sold_date_sk#1 as bigint)) > 30) AND ((sr_returned_date_sk#25 - cast(ss_sold_date_sk#1 as bigint)) <= 60)) THEN 1 ELSE 0 END as bigint))#42, sum(cast(CASE WHEN (((sr_returned_date_sk#25 - cast(ss_sold_date_sk#1 as bigint)) > 60) AND ((sr_returned_date_sk#25 - cast(ss_sold_date_sk#1 as bigint)) <= 90)) THEN 1 ELSE 0 END as bigint))#43, sum(cast(CASE WHEN (((sr_returned_date_sk#25 - cast(ss_sold_date_sk#1 as bigint)) > 90) AND ((sr_returned_date_sk#25 - cast(ss_sold_date_sk#1 as bigint)) <= 120)) THEN 1 ELSE 0 END as bigint))#44, sum(cast(CASE WHEN ((sr_returned_date_sk#25 - cast(ss_sold_date_sk#1 as bigint)) > 120) THEN 1 ELSE 0 END as bigint))#45] -Results [15]: [s_store_name#9, s_company_id#10, s_street_number#11, s_street_name#12, s_street_type#13, s_suite_number#14, s_city#15, s_county#16, s_state#17, s_zip#18, sum(cast(CASE WHEN ((sr_returned_date_sk#25 - cast(ss_sold_date_sk#1 as bigint)) <= 30) THEN 1 ELSE 0 END as bigint))#41 AS 30 days #46, sum(cast(CASE WHEN (((sr_returned_date_sk#25 - cast(ss_sold_date_sk#1 as bigint)) > 30) AND ((sr_returned_date_sk#25 - cast(ss_sold_date_sk#1 as bigint)) <= 60)) THEN 1 ELSE 0 END as bigint))#42 AS 31 - 60 days #47, sum(cast(CASE WHEN (((sr_returned_date_sk#25 - cast(ss_sold_date_sk#1 as bigint)) > 60) AND ((sr_returned_date_sk#25 - cast(ss_sold_date_sk#1 as bigint)) <= 90)) THEN 1 ELSE 0 END as bigint))#43 AS 61 - 90 days #48, sum(cast(CASE WHEN (((sr_returned_date_sk#25 - cast(ss_sold_date_sk#1 as bigint)) > 90) AND ((sr_returned_date_sk#25 - cast(ss_sold_date_sk#1 as bigint)) <= 120)) THEN 1 ELSE 0 END as bigint))#44 AS 91 - 120 days #49, sum(cast(CASE WHEN ((sr_returned_date_sk#25 - cast(ss_sold_date_sk#1 as bigint)) > 120) THEN 1 ELSE 0 END as bigint))#45 AS >120 days #50] +Functions [5]: [sum(CASE WHEN ((sr_returned_date_sk#21 - cast(ss_sold_date_sk#1 as bigint)) <= 30) THEN 1 ELSE 0 END), sum(CASE WHEN (((sr_returned_date_sk#21 - cast(ss_sold_date_sk#1 as bigint)) > 30) AND ((sr_returned_date_sk#21 - cast(ss_sold_date_sk#1 as bigint)) <= 60)) THEN 1 ELSE 0 END), sum(CASE WHEN (((sr_returned_date_sk#21 - cast(ss_sold_date_sk#1 as bigint)) > 60) AND ((sr_returned_date_sk#21 - cast(ss_sold_date_sk#1 as bigint)) <= 90)) THEN 1 ELSE 0 END), sum(CASE WHEN (((sr_returned_date_sk#21 - cast(ss_sold_date_sk#1 as bigint)) > 90) AND ((sr_returned_date_sk#21 - cast(ss_sold_date_sk#1 as bigint)) <= 120)) THEN 1 ELSE 0 END), sum(CASE WHEN ((sr_returned_date_sk#21 - cast(ss_sold_date_sk#1 as bigint)) > 120) THEN 1 ELSE 0 END)] +Aggregate Attributes [5]: [sum(CASE WHEN ((sr_returned_date_sk#21 - cast(ss_sold_date_sk#1 as bigint)) <= 30) THEN 1 ELSE 0 END)#41, sum(CASE WHEN (((sr_returned_date_sk#21 - cast(ss_sold_date_sk#1 as bigint)) > 30) AND ((sr_returned_date_sk#21 - cast(ss_sold_date_sk#1 as bigint)) <= 60)) THEN 1 ELSE 0 END)#42, sum(CASE WHEN (((sr_returned_date_sk#21 - cast(ss_sold_date_sk#1 as bigint)) > 60) AND ((sr_returned_date_sk#21 - cast(ss_sold_date_sk#1 as bigint)) <= 90)) THEN 1 ELSE 0 END)#43, sum(CASE WHEN (((sr_returned_date_sk#21 - cast(ss_sold_date_sk#1 as bigint)) > 90) AND ((sr_returned_date_sk#21 - cast(ss_sold_date_sk#1 as bigint)) <= 120)) THEN 1 ELSE 0 END)#44, sum(CASE WHEN ((sr_returned_date_sk#21 - cast(ss_sold_date_sk#1 as bigint)) > 120) THEN 1 ELSE 0 END)#45] +Results [15]: [s_store_name#9, s_company_id#10, s_street_number#11, s_street_name#12, s_street_type#13, s_suite_number#14, s_city#15, s_county#16, s_state#17, s_zip#18, sum(CASE WHEN ((sr_returned_date_sk#21 - cast(ss_sold_date_sk#1 as bigint)) <= 30) THEN 1 ELSE 0 END)#41 AS 30 days #46, sum(CASE WHEN (((sr_returned_date_sk#21 - cast(ss_sold_date_sk#1 as bigint)) > 30) AND ((sr_returned_date_sk#21 - cast(ss_sold_date_sk#1 as bigint)) <= 60)) THEN 1 ELSE 0 END)#42 AS 31 - 60 days #47, sum(CASE WHEN (((sr_returned_date_sk#21 - cast(ss_sold_date_sk#1 as bigint)) > 60) AND ((sr_returned_date_sk#21 - cast(ss_sold_date_sk#1 as bigint)) <= 90)) THEN 1 ELSE 0 END)#43 AS 61 - 90 days #48, sum(CASE WHEN (((sr_returned_date_sk#21 - cast(ss_sold_date_sk#1 as bigint)) > 90) AND ((sr_returned_date_sk#21 - cast(ss_sold_date_sk#1 as bigint)) <= 120)) THEN 1 ELSE 0 END)#44 AS 91 - 120 days #49, sum(CASE WHEN ((sr_returned_date_sk#21 - cast(ss_sold_date_sk#1 as bigint)) > 120) THEN 1 ELSE 0 END)#45 AS >120 days #50] (35) TakeOrderedAndProject Input [15]: [s_store_name#9, s_company_id#10, s_street_number#11, s_street_name#12, s_street_type#13, s_suite_number#14, s_city#15, s_county#16, s_state#17, s_zip#18, 30 days #46, 31 - 60 days #47, 61 - 90 days #48, 91 - 120 days #49, >120 days #50] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q50.sf100/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q50.sf100/simplified.txt index 5d6f38e882a5c..02ab8c946fd31 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q50.sf100/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q50.sf100/simplified.txt @@ -1,6 +1,6 @@ TakeOrderedAndProject [s_store_name,s_company_id,s_street_number,s_street_name,s_street_type,s_suite_number,s_city,s_county,s_state,s_zip,30 days ,31 - 60 days ,61 - 90 days ,91 - 120 days ,>120 days ] WholeStageCodegen (9) - HashAggregate [s_store_name,s_company_id,s_street_number,s_street_name,s_street_type,s_suite_number,s_city,s_county,s_state,s_zip,sum,sum,sum,sum,sum] [sum(cast(CASE WHEN ((sr_returned_date_skL - cast(ss_sold_date_sk as bigint)) <= 30) THEN 1 ELSE 0 END as bigint)),sum(cast(CASE WHEN (((sr_returned_date_skL - cast(ss_sold_date_sk as bigint)) > 30) AND ((sr_returned_date_skL - cast(ss_sold_date_sk as bigint)) <= 60)) THEN 1 ELSE 0 END as bigint)),sum(cast(CASE WHEN (((sr_returned_date_skL - cast(ss_sold_date_sk as bigint)) > 60) AND ((sr_returned_date_skL - cast(ss_sold_date_sk as bigint)) <= 90)) THEN 1 ELSE 0 END as bigint)),sum(cast(CASE WHEN (((sr_returned_date_skL - cast(ss_sold_date_sk as bigint)) > 90) AND ((sr_returned_date_skL - cast(ss_sold_date_sk as bigint)) <= 120)) THEN 1 ELSE 0 END as bigint)),sum(cast(CASE WHEN ((sr_returned_date_skL - cast(ss_sold_date_sk as bigint)) > 120) THEN 1 ELSE 0 END as bigint)),30 days ,31 - 60 days ,61 - 90 days ,91 - 120 days ,>120 days ,sum,sum,sum,sum,sum] + HashAggregate [s_store_name,s_company_id,s_street_number,s_street_name,s_street_type,s_suite_number,s_city,s_county,s_state,s_zip,sum,sum,sum,sum,sum] [sum(CASE WHEN ((sr_returned_date_skL - cast(ss_sold_date_sk as bigint)) <= 30) THEN 1 ELSE 0 END),sum(CASE WHEN (((sr_returned_date_skL - cast(ss_sold_date_sk as bigint)) > 30) AND ((sr_returned_date_skL - cast(ss_sold_date_sk as bigint)) <= 60)) THEN 1 ELSE 0 END),sum(CASE WHEN (((sr_returned_date_skL - cast(ss_sold_date_sk as bigint)) > 60) AND ((sr_returned_date_skL - cast(ss_sold_date_sk as bigint)) <= 90)) THEN 1 ELSE 0 END),sum(CASE WHEN (((sr_returned_date_skL - cast(ss_sold_date_sk as bigint)) > 90) AND ((sr_returned_date_skL - cast(ss_sold_date_sk as bigint)) <= 120)) THEN 1 ELSE 0 END),sum(CASE WHEN ((sr_returned_date_skL - cast(ss_sold_date_sk as bigint)) > 120) THEN 1 ELSE 0 END),30 days ,31 - 60 days ,61 - 90 days ,91 - 120 days ,>120 days ,sum,sum,sum,sum,sum] InputAdapter Exchange [s_store_name,s_company_id,s_street_number,s_street_name,s_street_type,s_suite_number,s_city,s_county,s_state,s_zip] #1 WholeStageCodegen (8) @@ -42,7 +42,11 @@ TakeOrderedAndProject [s_store_name,s_company_id,s_street_number,s_street_name,s Exchange [sr_ticket_number,sr_item_sk,sr_customer_sk] #5 WholeStageCodegen (6) Project [sr_returned_date_sk,sr_item_sk,sr_customer_sk,sr_ticket_number] - BroadcastHashJoin [d_date_sk,sr_returned_date_sk] + BroadcastHashJoin [sr_returned_date_sk,d_date_sk] + Filter [sr_ticket_number,sr_item_sk,sr_customer_sk,sr_returned_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.store_returns [sr_returned_date_sk,sr_item_sk,sr_customer_sk,sr_ticket_number] InputAdapter BroadcastExchange #6 WholeStageCodegen (5) @@ -51,7 +55,3 @@ TakeOrderedAndProject [s_store_name,s_company_id,s_street_number,s_street_name,s ColumnarToRow InputAdapter Scan parquet default.date_dim [d_date_sk,d_year,d_moy] - Filter [sr_ticket_number,sr_item_sk,sr_customer_sk,sr_returned_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.store_returns [sr_returned_date_sk,sr_item_sk,sr_customer_sk,sr_ticket_number] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q50/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q50/explain.txt index e083affa7261d..ecbd3ab5d3471 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q50/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q50/explain.txt @@ -162,20 +162,20 @@ Input [13]: [ss_sold_date_sk#1, sr_returned_date_sk#6, s_store_name#12, s_compan (29) HashAggregate [codegen id : 5] Input [12]: [ss_sold_date_sk#1, sr_returned_date_sk#6, s_store_name#12, s_company_id#13, s_street_number#14, s_street_name#15, s_street_type#16, s_suite_number#17, s_city#18, s_county#19, s_state#20, s_zip#21] Keys [10]: [s_store_name#12, s_company_id#13, s_street_number#14, s_street_name#15, s_street_type#16, s_suite_number#17, s_city#18, s_county#19, s_state#20, s_zip#21] -Functions [5]: [partial_sum(cast(CASE WHEN ((sr_returned_date_sk#6 - cast(ss_sold_date_sk#1 as bigint)) <= 30) THEN 1 ELSE 0 END as bigint)), partial_sum(cast(CASE WHEN (((sr_returned_date_sk#6 - cast(ss_sold_date_sk#1 as bigint)) > 30) AND ((sr_returned_date_sk#6 - cast(ss_sold_date_sk#1 as bigint)) <= 60)) THEN 1 ELSE 0 END as bigint)), partial_sum(cast(CASE WHEN (((sr_returned_date_sk#6 - cast(ss_sold_date_sk#1 as bigint)) > 60) AND ((sr_returned_date_sk#6 - cast(ss_sold_date_sk#1 as bigint)) <= 90)) THEN 1 ELSE 0 END as bigint)), partial_sum(cast(CASE WHEN (((sr_returned_date_sk#6 - cast(ss_sold_date_sk#1 as bigint)) > 90) AND ((sr_returned_date_sk#6 - cast(ss_sold_date_sk#1 as bigint)) <= 120)) THEN 1 ELSE 0 END as bigint)), partial_sum(cast(CASE WHEN ((sr_returned_date_sk#6 - cast(ss_sold_date_sk#1 as bigint)) > 120) THEN 1 ELSE 0 END as bigint))] +Functions [5]: [partial_sum(CASE WHEN ((sr_returned_date_sk#6 - cast(ss_sold_date_sk#1 as bigint)) <= 30) THEN 1 ELSE 0 END), partial_sum(CASE WHEN (((sr_returned_date_sk#6 - cast(ss_sold_date_sk#1 as bigint)) > 30) AND ((sr_returned_date_sk#6 - cast(ss_sold_date_sk#1 as bigint)) <= 60)) THEN 1 ELSE 0 END), partial_sum(CASE WHEN (((sr_returned_date_sk#6 - cast(ss_sold_date_sk#1 as bigint)) > 60) AND ((sr_returned_date_sk#6 - cast(ss_sold_date_sk#1 as bigint)) <= 90)) THEN 1 ELSE 0 END), partial_sum(CASE WHEN (((sr_returned_date_sk#6 - cast(ss_sold_date_sk#1 as bigint)) > 90) AND ((sr_returned_date_sk#6 - cast(ss_sold_date_sk#1 as bigint)) <= 120)) THEN 1 ELSE 0 END), partial_sum(CASE WHEN ((sr_returned_date_sk#6 - cast(ss_sold_date_sk#1 as bigint)) > 120) THEN 1 ELSE 0 END)] Aggregate Attributes [5]: [sum#29, sum#30, sum#31, sum#32, sum#33] Results [15]: [s_store_name#12, s_company_id#13, s_street_number#14, s_street_name#15, s_street_type#16, s_suite_number#17, s_city#18, s_county#19, s_state#20, s_zip#21, sum#34, sum#35, sum#36, sum#37, sum#38] (30) Exchange Input [15]: [s_store_name#12, s_company_id#13, s_street_number#14, s_street_name#15, s_street_type#16, s_suite_number#17, s_city#18, s_county#19, s_state#20, s_zip#21, sum#34, sum#35, sum#36, sum#37, sum#38] -Arguments: hashpartitioning(s_store_name#12, s_company_id#13, s_street_number#14, s_street_name#15, s_street_type#16, s_suite_number#17, s_city#18, s_county#19, s_state#20, s_zip#21, 5), true, [id=#39] +Arguments: hashpartitioning(s_store_name#12, s_company_id#13, s_street_number#14, s_street_name#15, s_street_type#16, s_suite_number#17, s_city#18, s_county#19, s_state#20, s_zip#21, 5), ENSURE_REQUIREMENTS, [id=#39] (31) HashAggregate [codegen id : 6] Input [15]: [s_store_name#12, s_company_id#13, s_street_number#14, s_street_name#15, s_street_type#16, s_suite_number#17, s_city#18, s_county#19, s_state#20, s_zip#21, sum#34, sum#35, sum#36, sum#37, sum#38] Keys [10]: [s_store_name#12, s_company_id#13, s_street_number#14, s_street_name#15, s_street_type#16, s_suite_number#17, s_city#18, s_county#19, s_state#20, s_zip#21] -Functions [5]: [sum(cast(CASE WHEN ((sr_returned_date_sk#6 - cast(ss_sold_date_sk#1 as bigint)) <= 30) THEN 1 ELSE 0 END as bigint)), sum(cast(CASE WHEN (((sr_returned_date_sk#6 - cast(ss_sold_date_sk#1 as bigint)) > 30) AND ((sr_returned_date_sk#6 - cast(ss_sold_date_sk#1 as bigint)) <= 60)) THEN 1 ELSE 0 END as bigint)), sum(cast(CASE WHEN (((sr_returned_date_sk#6 - cast(ss_sold_date_sk#1 as bigint)) > 60) AND ((sr_returned_date_sk#6 - cast(ss_sold_date_sk#1 as bigint)) <= 90)) THEN 1 ELSE 0 END as bigint)), sum(cast(CASE WHEN (((sr_returned_date_sk#6 - cast(ss_sold_date_sk#1 as bigint)) > 90) AND ((sr_returned_date_sk#6 - cast(ss_sold_date_sk#1 as bigint)) <= 120)) THEN 1 ELSE 0 END as bigint)), sum(cast(CASE WHEN ((sr_returned_date_sk#6 - cast(ss_sold_date_sk#1 as bigint)) > 120) THEN 1 ELSE 0 END as bigint))] -Aggregate Attributes [5]: [sum(cast(CASE WHEN ((sr_returned_date_sk#6 - cast(ss_sold_date_sk#1 as bigint)) <= 30) THEN 1 ELSE 0 END as bigint))#40, sum(cast(CASE WHEN (((sr_returned_date_sk#6 - cast(ss_sold_date_sk#1 as bigint)) > 30) AND ((sr_returned_date_sk#6 - cast(ss_sold_date_sk#1 as bigint)) <= 60)) THEN 1 ELSE 0 END as bigint))#41, sum(cast(CASE WHEN (((sr_returned_date_sk#6 - cast(ss_sold_date_sk#1 as bigint)) > 60) AND ((sr_returned_date_sk#6 - cast(ss_sold_date_sk#1 as bigint)) <= 90)) THEN 1 ELSE 0 END as bigint))#42, sum(cast(CASE WHEN (((sr_returned_date_sk#6 - cast(ss_sold_date_sk#1 as bigint)) > 90) AND ((sr_returned_date_sk#6 - cast(ss_sold_date_sk#1 as bigint)) <= 120)) THEN 1 ELSE 0 END as bigint))#43, sum(cast(CASE WHEN ((sr_returned_date_sk#6 - cast(ss_sold_date_sk#1 as bigint)) > 120) THEN 1 ELSE 0 END as bigint))#44] -Results [15]: [s_store_name#12, s_company_id#13, s_street_number#14, s_street_name#15, s_street_type#16, s_suite_number#17, s_city#18, s_county#19, s_state#20, s_zip#21, sum(cast(CASE WHEN ((sr_returned_date_sk#6 - cast(ss_sold_date_sk#1 as bigint)) <= 30) THEN 1 ELSE 0 END as bigint))#40 AS 30 days #45, sum(cast(CASE WHEN (((sr_returned_date_sk#6 - cast(ss_sold_date_sk#1 as bigint)) > 30) AND ((sr_returned_date_sk#6 - cast(ss_sold_date_sk#1 as bigint)) <= 60)) THEN 1 ELSE 0 END as bigint))#41 AS 31 - 60 days #46, sum(cast(CASE WHEN (((sr_returned_date_sk#6 - cast(ss_sold_date_sk#1 as bigint)) > 60) AND ((sr_returned_date_sk#6 - cast(ss_sold_date_sk#1 as bigint)) <= 90)) THEN 1 ELSE 0 END as bigint))#42 AS 61 - 90 days #47, sum(cast(CASE WHEN (((sr_returned_date_sk#6 - cast(ss_sold_date_sk#1 as bigint)) > 90) AND ((sr_returned_date_sk#6 - cast(ss_sold_date_sk#1 as bigint)) <= 120)) THEN 1 ELSE 0 END as bigint))#43 AS 91 - 120 days #48, sum(cast(CASE WHEN ((sr_returned_date_sk#6 - cast(ss_sold_date_sk#1 as bigint)) > 120) THEN 1 ELSE 0 END as bigint))#44 AS >120 days #49] +Functions [5]: [sum(CASE WHEN ((sr_returned_date_sk#6 - cast(ss_sold_date_sk#1 as bigint)) <= 30) THEN 1 ELSE 0 END), sum(CASE WHEN (((sr_returned_date_sk#6 - cast(ss_sold_date_sk#1 as bigint)) > 30) AND ((sr_returned_date_sk#6 - cast(ss_sold_date_sk#1 as bigint)) <= 60)) THEN 1 ELSE 0 END), sum(CASE WHEN (((sr_returned_date_sk#6 - cast(ss_sold_date_sk#1 as bigint)) > 60) AND ((sr_returned_date_sk#6 - cast(ss_sold_date_sk#1 as bigint)) <= 90)) THEN 1 ELSE 0 END), sum(CASE WHEN (((sr_returned_date_sk#6 - cast(ss_sold_date_sk#1 as bigint)) > 90) AND ((sr_returned_date_sk#6 - cast(ss_sold_date_sk#1 as bigint)) <= 120)) THEN 1 ELSE 0 END), sum(CASE WHEN ((sr_returned_date_sk#6 - cast(ss_sold_date_sk#1 as bigint)) > 120) THEN 1 ELSE 0 END)] +Aggregate Attributes [5]: [sum(CASE WHEN ((sr_returned_date_sk#6 - cast(ss_sold_date_sk#1 as bigint)) <= 30) THEN 1 ELSE 0 END)#40, sum(CASE WHEN (((sr_returned_date_sk#6 - cast(ss_sold_date_sk#1 as bigint)) > 30) AND ((sr_returned_date_sk#6 - cast(ss_sold_date_sk#1 as bigint)) <= 60)) THEN 1 ELSE 0 END)#41, sum(CASE WHEN (((sr_returned_date_sk#6 - cast(ss_sold_date_sk#1 as bigint)) > 60) AND ((sr_returned_date_sk#6 - cast(ss_sold_date_sk#1 as bigint)) <= 90)) THEN 1 ELSE 0 END)#42, sum(CASE WHEN (((sr_returned_date_sk#6 - cast(ss_sold_date_sk#1 as bigint)) > 90) AND ((sr_returned_date_sk#6 - cast(ss_sold_date_sk#1 as bigint)) <= 120)) THEN 1 ELSE 0 END)#43, sum(CASE WHEN ((sr_returned_date_sk#6 - cast(ss_sold_date_sk#1 as bigint)) > 120) THEN 1 ELSE 0 END)#44] +Results [15]: [s_store_name#12, s_company_id#13, s_street_number#14, s_street_name#15, s_street_type#16, s_suite_number#17, s_city#18, s_county#19, s_state#20, s_zip#21, sum(CASE WHEN ((sr_returned_date_sk#6 - cast(ss_sold_date_sk#1 as bigint)) <= 30) THEN 1 ELSE 0 END)#40 AS 30 days #45, sum(CASE WHEN (((sr_returned_date_sk#6 - cast(ss_sold_date_sk#1 as bigint)) > 30) AND ((sr_returned_date_sk#6 - cast(ss_sold_date_sk#1 as bigint)) <= 60)) THEN 1 ELSE 0 END)#41 AS 31 - 60 days #46, sum(CASE WHEN (((sr_returned_date_sk#6 - cast(ss_sold_date_sk#1 as bigint)) > 60) AND ((sr_returned_date_sk#6 - cast(ss_sold_date_sk#1 as bigint)) <= 90)) THEN 1 ELSE 0 END)#42 AS 61 - 90 days #47, sum(CASE WHEN (((sr_returned_date_sk#6 - cast(ss_sold_date_sk#1 as bigint)) > 90) AND ((sr_returned_date_sk#6 - cast(ss_sold_date_sk#1 as bigint)) <= 120)) THEN 1 ELSE 0 END)#43 AS 91 - 120 days #48, sum(CASE WHEN ((sr_returned_date_sk#6 - cast(ss_sold_date_sk#1 as bigint)) > 120) THEN 1 ELSE 0 END)#44 AS >120 days #49] (32) TakeOrderedAndProject Input [15]: [s_store_name#12, s_company_id#13, s_street_number#14, s_street_name#15, s_street_type#16, s_suite_number#17, s_city#18, s_county#19, s_state#20, s_zip#21, 30 days #45, 31 - 60 days #46, 61 - 90 days #47, 91 - 120 days #48, >120 days #49] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q50/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q50/simplified.txt index 43e7773855595..4ab50bf6c135d 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q50/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q50/simplified.txt @@ -1,6 +1,6 @@ TakeOrderedAndProject [s_store_name,s_company_id,s_street_number,s_street_name,s_street_type,s_suite_number,s_city,s_county,s_state,s_zip,30 days ,31 - 60 days ,61 - 90 days ,91 - 120 days ,>120 days ] WholeStageCodegen (6) - HashAggregate [s_store_name,s_company_id,s_street_number,s_street_name,s_street_type,s_suite_number,s_city,s_county,s_state,s_zip,sum,sum,sum,sum,sum] [sum(cast(CASE WHEN ((sr_returned_date_skL - cast(ss_sold_date_sk as bigint)) <= 30) THEN 1 ELSE 0 END as bigint)),sum(cast(CASE WHEN (((sr_returned_date_skL - cast(ss_sold_date_sk as bigint)) > 30) AND ((sr_returned_date_skL - cast(ss_sold_date_sk as bigint)) <= 60)) THEN 1 ELSE 0 END as bigint)),sum(cast(CASE WHEN (((sr_returned_date_skL - cast(ss_sold_date_sk as bigint)) > 60) AND ((sr_returned_date_skL - cast(ss_sold_date_sk as bigint)) <= 90)) THEN 1 ELSE 0 END as bigint)),sum(cast(CASE WHEN (((sr_returned_date_skL - cast(ss_sold_date_sk as bigint)) > 90) AND ((sr_returned_date_skL - cast(ss_sold_date_sk as bigint)) <= 120)) THEN 1 ELSE 0 END as bigint)),sum(cast(CASE WHEN ((sr_returned_date_skL - cast(ss_sold_date_sk as bigint)) > 120) THEN 1 ELSE 0 END as bigint)),30 days ,31 - 60 days ,61 - 90 days ,91 - 120 days ,>120 days ,sum,sum,sum,sum,sum] + HashAggregate [s_store_name,s_company_id,s_street_number,s_street_name,s_street_type,s_suite_number,s_city,s_county,s_state,s_zip,sum,sum,sum,sum,sum] [sum(CASE WHEN ((sr_returned_date_skL - cast(ss_sold_date_sk as bigint)) <= 30) THEN 1 ELSE 0 END),sum(CASE WHEN (((sr_returned_date_skL - cast(ss_sold_date_sk as bigint)) > 30) AND ((sr_returned_date_skL - cast(ss_sold_date_sk as bigint)) <= 60)) THEN 1 ELSE 0 END),sum(CASE WHEN (((sr_returned_date_skL - cast(ss_sold_date_sk as bigint)) > 60) AND ((sr_returned_date_skL - cast(ss_sold_date_sk as bigint)) <= 90)) THEN 1 ELSE 0 END),sum(CASE WHEN (((sr_returned_date_skL - cast(ss_sold_date_sk as bigint)) > 90) AND ((sr_returned_date_skL - cast(ss_sold_date_sk as bigint)) <= 120)) THEN 1 ELSE 0 END),sum(CASE WHEN ((sr_returned_date_skL - cast(ss_sold_date_sk as bigint)) > 120) THEN 1 ELSE 0 END),30 days ,31 - 60 days ,61 - 90 days ,91 - 120 days ,>120 days ,sum,sum,sum,sum,sum] InputAdapter Exchange [s_store_name,s_company_id,s_street_number,s_street_name,s_street_type,s_suite_number,s_city,s_county,s_state,s_zip] #1 WholeStageCodegen (5) diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q52.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q52.sf100/explain.txt index d7a8c103285cb..6492918d3aa13 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q52.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q52.sf100/explain.txt @@ -6,117 +6,117 @@ TakeOrderedAndProject (21) +- * Project (17) +- * BroadcastHashJoin Inner BuildRight (16) :- * Project (10) - : +- * BroadcastHashJoin Inner BuildRight (9) - : :- * Filter (3) - : : +- * ColumnarToRow (2) - : : +- Scan parquet default.store_sales (1) - : +- BroadcastExchange (8) - : +- * Project (7) - : +- * Filter (6) - : +- * ColumnarToRow (5) - : +- Scan parquet default.item (4) + : +- * BroadcastHashJoin Inner BuildLeft (9) + : :- BroadcastExchange (5) + : : +- * Project (4) + : : +- * Filter (3) + : : +- * ColumnarToRow (2) + : : +- Scan parquet default.date_dim (1) + : +- * Filter (8) + : +- * ColumnarToRow (7) + : +- Scan parquet default.store_sales (6) +- BroadcastExchange (15) +- * Project (14) +- * Filter (13) +- * ColumnarToRow (12) - +- Scan parquet default.date_dim (11) + +- Scan parquet default.item (11) -(1) Scan parquet default.store_sales -Output [3]: [ss_sold_date_sk#1, ss_item_sk#2, ss_ext_sales_price#3] +(1) Scan parquet default.date_dim +Output [3]: [d_date_sk#1, d_year#2, d_moy#3] Batched: true -Location [not included in comparison]/{warehouse_dir}/store_sales] -PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_item_sk)] -ReadSchema: struct +Location [not included in comparison]/{warehouse_dir}/date_dim] +PushedFilters: [IsNotNull(d_moy), IsNotNull(d_year), EqualTo(d_moy,11), EqualTo(d_year,2000), IsNotNull(d_date_sk)] +ReadSchema: struct -(2) ColumnarToRow [codegen id : 3] -Input [3]: [ss_sold_date_sk#1, ss_item_sk#2, ss_ext_sales_price#3] +(2) ColumnarToRow [codegen id : 1] +Input [3]: [d_date_sk#1, d_year#2, d_moy#3] -(3) Filter [codegen id : 3] -Input [3]: [ss_sold_date_sk#1, ss_item_sk#2, ss_ext_sales_price#3] -Condition : (isnotnull(ss_sold_date_sk#1) AND isnotnull(ss_item_sk#2)) +(3) Filter [codegen id : 1] +Input [3]: [d_date_sk#1, d_year#2, d_moy#3] +Condition : ((((isnotnull(d_moy#3) AND isnotnull(d_year#2)) AND (d_moy#3 = 11)) AND (d_year#2 = 2000)) AND isnotnull(d_date_sk#1)) -(4) Scan parquet default.item -Output [4]: [i_item_sk#4, i_brand_id#5, i_brand#6, i_manager_id#7] -Batched: true -Location [not included in comparison]/{warehouse_dir}/item] -PushedFilters: [IsNotNull(i_manager_id), EqualTo(i_manager_id,1), IsNotNull(i_item_sk)] -ReadSchema: struct +(4) Project [codegen id : 1] +Output [2]: [d_date_sk#1, d_year#2] +Input [3]: [d_date_sk#1, d_year#2, d_moy#3] -(5) ColumnarToRow [codegen id : 1] -Input [4]: [i_item_sk#4, i_brand_id#5, i_brand#6, i_manager_id#7] +(5) BroadcastExchange +Input [2]: [d_date_sk#1, d_year#2] +Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#4] -(6) Filter [codegen id : 1] -Input [4]: [i_item_sk#4, i_brand_id#5, i_brand#6, i_manager_id#7] -Condition : ((isnotnull(i_manager_id#7) AND (i_manager_id#7 = 1)) AND isnotnull(i_item_sk#4)) +(6) Scan parquet default.store_sales +Output [3]: [ss_sold_date_sk#5, ss_item_sk#6, ss_ext_sales_price#7] +Batched: true +Location [not included in comparison]/{warehouse_dir}/store_sales] +PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_item_sk)] +ReadSchema: struct -(7) Project [codegen id : 1] -Output [3]: [i_item_sk#4, i_brand_id#5, i_brand#6] -Input [4]: [i_item_sk#4, i_brand_id#5, i_brand#6, i_manager_id#7] +(7) ColumnarToRow +Input [3]: [ss_sold_date_sk#5, ss_item_sk#6, ss_ext_sales_price#7] -(8) BroadcastExchange -Input [3]: [i_item_sk#4, i_brand_id#5, i_brand#6] -Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#8] +(8) Filter +Input [3]: [ss_sold_date_sk#5, ss_item_sk#6, ss_ext_sales_price#7] +Condition : (isnotnull(ss_sold_date_sk#5) AND isnotnull(ss_item_sk#6)) (9) BroadcastHashJoin [codegen id : 3] -Left keys [1]: [ss_item_sk#2] -Right keys [1]: [i_item_sk#4] +Left keys [1]: [d_date_sk#1] +Right keys [1]: [ss_sold_date_sk#5] Join condition: None (10) Project [codegen id : 3] -Output [4]: [ss_sold_date_sk#1, ss_ext_sales_price#3, i_brand_id#5, i_brand#6] -Input [6]: [ss_sold_date_sk#1, ss_item_sk#2, ss_ext_sales_price#3, i_item_sk#4, i_brand_id#5, i_brand#6] +Output [3]: [d_year#2, ss_item_sk#6, ss_ext_sales_price#7] +Input [5]: [d_date_sk#1, d_year#2, ss_sold_date_sk#5, ss_item_sk#6, ss_ext_sales_price#7] -(11) Scan parquet default.date_dim -Output [3]: [d_date_sk#9, d_year#10, d_moy#11] +(11) Scan parquet default.item +Output [4]: [i_item_sk#8, i_brand_id#9, i_brand#10, i_manager_id#11] Batched: true -Location [not included in comparison]/{warehouse_dir}/date_dim] -PushedFilters: [IsNotNull(d_moy), IsNotNull(d_year), EqualTo(d_moy,11), EqualTo(d_year,2000), IsNotNull(d_date_sk)] -ReadSchema: struct +Location [not included in comparison]/{warehouse_dir}/item] +PushedFilters: [IsNotNull(i_manager_id), EqualTo(i_manager_id,1), IsNotNull(i_item_sk)] +ReadSchema: struct (12) ColumnarToRow [codegen id : 2] -Input [3]: [d_date_sk#9, d_year#10, d_moy#11] +Input [4]: [i_item_sk#8, i_brand_id#9, i_brand#10, i_manager_id#11] (13) Filter [codegen id : 2] -Input [3]: [d_date_sk#9, d_year#10, d_moy#11] -Condition : ((((isnotnull(d_moy#11) AND isnotnull(d_year#10)) AND (d_moy#11 = 11)) AND (d_year#10 = 2000)) AND isnotnull(d_date_sk#9)) +Input [4]: [i_item_sk#8, i_brand_id#9, i_brand#10, i_manager_id#11] +Condition : ((isnotnull(i_manager_id#11) AND (i_manager_id#11 = 1)) AND isnotnull(i_item_sk#8)) (14) Project [codegen id : 2] -Output [2]: [d_date_sk#9, d_year#10] -Input [3]: [d_date_sk#9, d_year#10, d_moy#11] +Output [3]: [i_item_sk#8, i_brand_id#9, i_brand#10] +Input [4]: [i_item_sk#8, i_brand_id#9, i_brand#10, i_manager_id#11] (15) BroadcastExchange -Input [2]: [d_date_sk#9, d_year#10] +Input [3]: [i_item_sk#8, i_brand_id#9, i_brand#10] Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#12] (16) BroadcastHashJoin [codegen id : 3] -Left keys [1]: [ss_sold_date_sk#1] -Right keys [1]: [d_date_sk#9] +Left keys [1]: [ss_item_sk#6] +Right keys [1]: [i_item_sk#8] Join condition: None (17) Project [codegen id : 3] -Output [4]: [d_year#10, ss_ext_sales_price#3, i_brand_id#5, i_brand#6] -Input [6]: [ss_sold_date_sk#1, ss_ext_sales_price#3, i_brand_id#5, i_brand#6, d_date_sk#9, d_year#10] +Output [4]: [d_year#2, ss_ext_sales_price#7, i_brand_id#9, i_brand#10] +Input [6]: [d_year#2, ss_item_sk#6, ss_ext_sales_price#7, i_item_sk#8, i_brand_id#9, i_brand#10] (18) HashAggregate [codegen id : 3] -Input [4]: [d_year#10, ss_ext_sales_price#3, i_brand_id#5, i_brand#6] -Keys [3]: [d_year#10, i_brand#6, i_brand_id#5] -Functions [1]: [partial_sum(UnscaledValue(ss_ext_sales_price#3))] +Input [4]: [d_year#2, ss_ext_sales_price#7, i_brand_id#9, i_brand#10] +Keys [3]: [d_year#2, i_brand#10, i_brand_id#9] +Functions [1]: [partial_sum(UnscaledValue(ss_ext_sales_price#7))] Aggregate Attributes [1]: [sum#13] -Results [4]: [d_year#10, i_brand#6, i_brand_id#5, sum#14] +Results [4]: [d_year#2, i_brand#10, i_brand_id#9, sum#14] (19) Exchange -Input [4]: [d_year#10, i_brand#6, i_brand_id#5, sum#14] -Arguments: hashpartitioning(d_year#10, i_brand#6, i_brand_id#5, 5), true, [id=#15] +Input [4]: [d_year#2, i_brand#10, i_brand_id#9, sum#14] +Arguments: hashpartitioning(d_year#2, i_brand#10, i_brand_id#9, 5), ENSURE_REQUIREMENTS, [id=#15] (20) HashAggregate [codegen id : 4] -Input [4]: [d_year#10, i_brand#6, i_brand_id#5, sum#14] -Keys [3]: [d_year#10, i_brand#6, i_brand_id#5] -Functions [1]: [sum(UnscaledValue(ss_ext_sales_price#3))] -Aggregate Attributes [1]: [sum(UnscaledValue(ss_ext_sales_price#3))#16] -Results [4]: [d_year#10, i_brand_id#5 AS brand_id#17, i_brand#6 AS brand#18, MakeDecimal(sum(UnscaledValue(ss_ext_sales_price#3))#16,17,2) AS ext_price#19] +Input [4]: [d_year#2, i_brand#10, i_brand_id#9, sum#14] +Keys [3]: [d_year#2, i_brand#10, i_brand_id#9] +Functions [1]: [sum(UnscaledValue(ss_ext_sales_price#7))] +Aggregate Attributes [1]: [sum(UnscaledValue(ss_ext_sales_price#7))#16] +Results [4]: [d_year#2, i_brand_id#9 AS brand_id#17, i_brand#10 AS brand#18, MakeDecimal(sum(UnscaledValue(ss_ext_sales_price#7))#16,17,2) AS ext_price#19] (21) TakeOrderedAndProject -Input [4]: [d_year#10, brand_id#17, brand#18, ext_price#19] -Arguments: 100, [d_year#10 ASC NULLS FIRST, ext_price#19 DESC NULLS LAST, brand_id#17 ASC NULLS FIRST], [d_year#10, brand_id#17, brand#18, ext_price#19] +Input [4]: [d_year#2, brand_id#17, brand#18, ext_price#19] +Arguments: 100, [d_year#2 ASC NULLS FIRST, ext_price#19 DESC NULLS LAST, brand_id#17 ASC NULLS FIRST], [d_year#2, brand_id#17, brand#18, ext_price#19] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q52.sf100/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q52.sf100/simplified.txt index 8ed500d84390c..f4aaf3df75135 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q52.sf100/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q52.sf100/simplified.txt @@ -6,26 +6,26 @@ TakeOrderedAndProject [d_year,ext_price,brand_id,brand] WholeStageCodegen (3) HashAggregate [d_year,i_brand,i_brand_id,ss_ext_sales_price] [sum,sum] Project [d_year,ss_ext_sales_price,i_brand_id,i_brand] - BroadcastHashJoin [ss_sold_date_sk,d_date_sk] - Project [ss_sold_date_sk,ss_ext_sales_price,i_brand_id,i_brand] - BroadcastHashJoin [ss_item_sk,i_item_sk] - Filter [ss_sold_date_sk,ss_item_sk] - ColumnarToRow - InputAdapter - Scan parquet default.store_sales [ss_sold_date_sk,ss_item_sk,ss_ext_sales_price] + BroadcastHashJoin [ss_item_sk,i_item_sk] + Project [d_year,ss_item_sk,ss_ext_sales_price] + BroadcastHashJoin [d_date_sk,ss_sold_date_sk] InputAdapter BroadcastExchange #2 WholeStageCodegen (1) - Project [i_item_sk,i_brand_id,i_brand] - Filter [i_manager_id,i_item_sk] + Project [d_date_sk,d_year] + Filter [d_moy,d_year,d_date_sk] ColumnarToRow InputAdapter - Scan parquet default.item [i_item_sk,i_brand_id,i_brand,i_manager_id] + Scan parquet default.date_dim [d_date_sk,d_year,d_moy] + Filter [ss_sold_date_sk,ss_item_sk] + ColumnarToRow + InputAdapter + Scan parquet default.store_sales [ss_sold_date_sk,ss_item_sk,ss_ext_sales_price] InputAdapter BroadcastExchange #3 WholeStageCodegen (2) - Project [d_date_sk,d_year] - Filter [d_moy,d_year,d_date_sk] + Project [i_item_sk,i_brand_id,i_brand] + Filter [i_manager_id,i_item_sk] ColumnarToRow InputAdapter - Scan parquet default.date_dim [d_date_sk,d_year,d_moy] + Scan parquet default.item [i_item_sk,i_brand_id,i_brand,i_manager_id] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q54.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q54.sf100/explain.txt index d78565986bc0a..a504149b00b94 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q54.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q54.sf100/explain.txt @@ -1,494 +1,474 @@ == Physical Plan == -TakeOrderedAndProject (71) -+- * HashAggregate (70) - +- Exchange (69) - +- * HashAggregate (68) - +- * HashAggregate (67) - +- Exchange (66) - +- * HashAggregate (65) - +- * Project (64) - +- * BroadcastHashJoin Inner BuildRight (63) - :- * Project (57) - : +- * BroadcastHashJoin Inner BuildRight (56) - : :- * Project (51) - : : +- * SortMergeJoin Inner (50) - : : :- * Sort (44) - : : : +- Exchange (43) - : : : +- * Project (42) - : : : +- * SortMergeJoin Inner (41) - : : : :- * Sort (35) - : : : : +- * HashAggregate (34) - : : : : +- * HashAggregate (33) - : : : : +- * Project (32) - : : : : +- * SortMergeJoin Inner (31) - : : : : :- * Sort (25) - : : : : : +- Exchange (24) - : : : : : +- * Project (23) - : : : : : +- * BroadcastHashJoin Inner BuildRight (22) - : : : : : :- * Project (16) - : : : : : : +- * BroadcastHashJoin Inner BuildRight (15) - : : : : : : :- Union (9) - : : : : : : : :- * Project (4) - : : : : : : : : +- * Filter (3) - : : : : : : : : +- * ColumnarToRow (2) - : : : : : : : : +- Scan parquet default.catalog_sales (1) - : : : : : : : +- * Project (8) - : : : : : : : +- * Filter (7) - : : : : : : : +- * ColumnarToRow (6) - : : : : : : : +- Scan parquet default.web_sales (5) - : : : : : : +- BroadcastExchange (14) - : : : : : : +- * Project (13) - : : : : : : +- * Filter (12) - : : : : : : +- * ColumnarToRow (11) - : : : : : : +- Scan parquet default.item (10) - : : : : : +- BroadcastExchange (21) - : : : : : +- * Project (20) - : : : : : +- * Filter (19) - : : : : : +- * ColumnarToRow (18) - : : : : : +- Scan parquet default.date_dim (17) - : : : : +- * Sort (30) - : : : : +- Exchange (29) - : : : : +- * Filter (28) - : : : : +- * ColumnarToRow (27) - : : : : +- Scan parquet default.customer (26) - : : : +- * Sort (40) - : : : +- Exchange (39) - : : : +- * Filter (38) - : : : +- * ColumnarToRow (37) - : : : +- Scan parquet default.store_sales (36) - : : +- * Sort (49) - : : +- Exchange (48) - : : +- * Filter (47) - : : +- * ColumnarToRow (46) - : : +- Scan parquet default.customer_address (45) - : +- BroadcastExchange (55) - : +- * Filter (54) - : +- * ColumnarToRow (53) - : +- Scan parquet default.store (52) - +- BroadcastExchange (62) - +- * Project (61) - +- * Filter (60) - +- * ColumnarToRow (59) - +- Scan parquet default.date_dim (58) - - -(1) Scan parquet default.catalog_sales -Output [3]: [cs_sold_date_sk#1, cs_bill_customer_sk#2, cs_item_sk#3] +TakeOrderedAndProject (67) ++- * HashAggregate (66) + +- Exchange (65) + +- * HashAggregate (64) + +- * HashAggregate (63) + +- * HashAggregate (62) + +- * Project (61) + +- * SortMergeJoin Inner (60) + :- * Sort (47) + : +- * Project (46) + : +- * BroadcastHashJoin Inner BuildLeft (45) + : :- BroadcastExchange (10) + : : +- * Project (9) + : : +- * BroadcastHashJoin Inner BuildRight (8) + : : :- * Filter (3) + : : : +- * ColumnarToRow (2) + : : : +- Scan parquet default.customer_address (1) + : : +- BroadcastExchange (7) + : : +- * Filter (6) + : : +- * ColumnarToRow (5) + : : +- Scan parquet default.store (4) + : +- * HashAggregate (44) + : +- * HashAggregate (43) + : +- * Project (42) + : +- * SortMergeJoin Inner (41) + : :- * Sort (35) + : : +- Exchange (34) + : : +- * Project (33) + : : +- * BroadcastHashJoin Inner BuildRight (32) + : : :- * Project (26) + : : : +- * BroadcastHashJoin Inner BuildRight (25) + : : : :- Union (19) + : : : : :- * Project (14) + : : : : : +- * Filter (13) + : : : : : +- * ColumnarToRow (12) + : : : : : +- Scan parquet default.catalog_sales (11) + : : : : +- * Project (18) + : : : : +- * Filter (17) + : : : : +- * ColumnarToRow (16) + : : : : +- Scan parquet default.web_sales (15) + : : : +- BroadcastExchange (24) + : : : +- * Project (23) + : : : +- * Filter (22) + : : : +- * ColumnarToRow (21) + : : : +- Scan parquet default.date_dim (20) + : : +- BroadcastExchange (31) + : : +- * Project (30) + : : +- * Filter (29) + : : +- * ColumnarToRow (28) + : : +- Scan parquet default.item (27) + : +- * Sort (40) + : +- Exchange (39) + : +- * Filter (38) + : +- * ColumnarToRow (37) + : +- Scan parquet default.customer (36) + +- * Sort (59) + +- Exchange (58) + +- * Project (57) + +- * BroadcastHashJoin Inner BuildRight (56) + :- * Filter (50) + : +- * ColumnarToRow (49) + : +- Scan parquet default.store_sales (48) + +- BroadcastExchange (55) + +- * Project (54) + +- * Filter (53) + +- * ColumnarToRow (52) + +- Scan parquet default.date_dim (51) + + +(1) Scan parquet default.customer_address +Output [3]: [ca_address_sk#1, ca_county#2, ca_state#3] +Batched: true +Location [not included in comparison]/{warehouse_dir}/customer_address] +PushedFilters: [IsNotNull(ca_address_sk), IsNotNull(ca_county), IsNotNull(ca_state)] +ReadSchema: struct + +(2) ColumnarToRow [codegen id : 2] +Input [3]: [ca_address_sk#1, ca_county#2, ca_state#3] + +(3) Filter [codegen id : 2] +Input [3]: [ca_address_sk#1, ca_county#2, ca_state#3] +Condition : ((isnotnull(ca_address_sk#1) AND isnotnull(ca_county#2)) AND isnotnull(ca_state#3)) + +(4) Scan parquet default.store +Output [2]: [s_county#4, s_state#5] +Batched: true +Location [not included in comparison]/{warehouse_dir}/store] +PushedFilters: [IsNotNull(s_county), IsNotNull(s_state)] +ReadSchema: struct + +(5) ColumnarToRow [codegen id : 1] +Input [2]: [s_county#4, s_state#5] + +(6) Filter [codegen id : 1] +Input [2]: [s_county#4, s_state#5] +Condition : (isnotnull(s_county#4) AND isnotnull(s_state#5)) + +(7) BroadcastExchange +Input [2]: [s_county#4, s_state#5] +Arguments: HashedRelationBroadcastMode(List(input[0, string, false], input[1, string, false]),false), [id=#6] + +(8) BroadcastHashJoin [codegen id : 2] +Left keys [2]: [ca_county#2, ca_state#3] +Right keys [2]: [s_county#4, s_state#5] +Join condition: None + +(9) Project [codegen id : 2] +Output [1]: [ca_address_sk#1] +Input [5]: [ca_address_sk#1, ca_county#2, ca_state#3, s_county#4, s_state#5] + +(10) BroadcastExchange +Input [1]: [ca_address_sk#1] +Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#7] + +(11) Scan parquet default.catalog_sales +Output [3]: [cs_sold_date_sk#8, cs_bill_customer_sk#9, cs_item_sk#10] Batched: true Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_item_sk), IsNotNull(cs_sold_date_sk), IsNotNull(cs_bill_customer_sk)] ReadSchema: struct -(2) ColumnarToRow [codegen id : 1] -Input [3]: [cs_sold_date_sk#1, cs_bill_customer_sk#2, cs_item_sk#3] +(12) ColumnarToRow [codegen id : 3] +Input [3]: [cs_sold_date_sk#8, cs_bill_customer_sk#9, cs_item_sk#10] -(3) Filter [codegen id : 1] -Input [3]: [cs_sold_date_sk#1, cs_bill_customer_sk#2, cs_item_sk#3] -Condition : ((isnotnull(cs_item_sk#3) AND isnotnull(cs_sold_date_sk#1)) AND isnotnull(cs_bill_customer_sk#2)) +(13) Filter [codegen id : 3] +Input [3]: [cs_sold_date_sk#8, cs_bill_customer_sk#9, cs_item_sk#10] +Condition : ((isnotnull(cs_item_sk#10) AND isnotnull(cs_sold_date_sk#8)) AND isnotnull(cs_bill_customer_sk#9)) -(4) Project [codegen id : 1] -Output [3]: [cs_sold_date_sk#1 AS sold_date_sk#4, cs_bill_customer_sk#2 AS customer_sk#5, cs_item_sk#3 AS item_sk#6] -Input [3]: [cs_sold_date_sk#1, cs_bill_customer_sk#2, cs_item_sk#3] +(14) Project [codegen id : 3] +Output [3]: [cs_sold_date_sk#8 AS sold_date_sk#11, cs_bill_customer_sk#9 AS customer_sk#12, cs_item_sk#10 AS item_sk#13] +Input [3]: [cs_sold_date_sk#8, cs_bill_customer_sk#9, cs_item_sk#10] -(5) Scan parquet default.web_sales -Output [3]: [ws_sold_date_sk#7, ws_item_sk#8, ws_bill_customer_sk#9] +(15) Scan parquet default.web_sales +Output [3]: [ws_sold_date_sk#14, ws_item_sk#15, ws_bill_customer_sk#16] Batched: true Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_item_sk), IsNotNull(ws_sold_date_sk), IsNotNull(ws_bill_customer_sk)] ReadSchema: struct -(6) ColumnarToRow [codegen id : 2] -Input [3]: [ws_sold_date_sk#7, ws_item_sk#8, ws_bill_customer_sk#9] +(16) ColumnarToRow [codegen id : 4] +Input [3]: [ws_sold_date_sk#14, ws_item_sk#15, ws_bill_customer_sk#16] -(7) Filter [codegen id : 2] -Input [3]: [ws_sold_date_sk#7, ws_item_sk#8, ws_bill_customer_sk#9] -Condition : ((isnotnull(ws_item_sk#8) AND isnotnull(ws_sold_date_sk#7)) AND isnotnull(ws_bill_customer_sk#9)) +(17) Filter [codegen id : 4] +Input [3]: [ws_sold_date_sk#14, ws_item_sk#15, ws_bill_customer_sk#16] +Condition : ((isnotnull(ws_item_sk#15) AND isnotnull(ws_sold_date_sk#14)) AND isnotnull(ws_bill_customer_sk#16)) -(8) Project [codegen id : 2] -Output [3]: [ws_sold_date_sk#7 AS sold_date_sk#10, ws_bill_customer_sk#9 AS customer_sk#11, ws_item_sk#8 AS item_sk#12] -Input [3]: [ws_sold_date_sk#7, ws_item_sk#8, ws_bill_customer_sk#9] +(18) Project [codegen id : 4] +Output [3]: [ws_sold_date_sk#14 AS sold_date_sk#17, ws_bill_customer_sk#16 AS customer_sk#18, ws_item_sk#15 AS item_sk#19] +Input [3]: [ws_sold_date_sk#14, ws_item_sk#15, ws_bill_customer_sk#16] -(9) Union +(19) Union -(10) Scan parquet default.item -Output [3]: [i_item_sk#13, i_class#14, i_category#15] +(20) Scan parquet default.date_dim +Output [3]: [d_date_sk#20, d_year#21, d_moy#22] Batched: true -Location [not included in comparison]/{warehouse_dir}/item] -PushedFilters: [IsNotNull(i_category), IsNotNull(i_class), EqualTo(i_category,Women), EqualTo(i_class,maternity), IsNotNull(i_item_sk)] -ReadSchema: struct +Location [not included in comparison]/{warehouse_dir}/date_dim] +PushedFilters: [IsNotNull(d_moy), IsNotNull(d_year), EqualTo(d_moy,12), EqualTo(d_year,1998), IsNotNull(d_date_sk)] +ReadSchema: struct -(11) ColumnarToRow [codegen id : 3] -Input [3]: [i_item_sk#13, i_class#14, i_category#15] +(21) ColumnarToRow [codegen id : 5] +Input [3]: [d_date_sk#20, d_year#21, d_moy#22] -(12) Filter [codegen id : 3] -Input [3]: [i_item_sk#13, i_class#14, i_category#15] -Condition : ((((isnotnull(i_category#15) AND isnotnull(i_class#14)) AND (i_category#15 = Women)) AND (i_class#14 = maternity)) AND isnotnull(i_item_sk#13)) +(22) Filter [codegen id : 5] +Input [3]: [d_date_sk#20, d_year#21, d_moy#22] +Condition : ((((isnotnull(d_moy#22) AND isnotnull(d_year#21)) AND (d_moy#22 = 12)) AND (d_year#21 = 1998)) AND isnotnull(d_date_sk#20)) -(13) Project [codegen id : 3] -Output [1]: [i_item_sk#13] -Input [3]: [i_item_sk#13, i_class#14, i_category#15] +(23) Project [codegen id : 5] +Output [1]: [d_date_sk#20] +Input [3]: [d_date_sk#20, d_year#21, d_moy#22] -(14) BroadcastExchange -Input [1]: [i_item_sk#13] -Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#16] +(24) BroadcastExchange +Input [1]: [d_date_sk#20] +Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#23] -(15) BroadcastHashJoin [codegen id : 5] -Left keys [1]: [item_sk#6] -Right keys [1]: [i_item_sk#13] +(25) BroadcastHashJoin [codegen id : 7] +Left keys [1]: [sold_date_sk#11] +Right keys [1]: [d_date_sk#20] Join condition: None -(16) Project [codegen id : 5] -Output [2]: [sold_date_sk#4, customer_sk#5] -Input [4]: [sold_date_sk#4, customer_sk#5, item_sk#6, i_item_sk#13] +(26) Project [codegen id : 7] +Output [2]: [customer_sk#12, item_sk#13] +Input [4]: [sold_date_sk#11, customer_sk#12, item_sk#13, d_date_sk#20] -(17) Scan parquet default.date_dim -Output [3]: [d_date_sk#17, d_year#18, d_moy#19] +(27) Scan parquet default.item +Output [3]: [i_item_sk#24, i_class#25, i_category#26] Batched: true -Location [not included in comparison]/{warehouse_dir}/date_dim] -PushedFilters: [IsNotNull(d_moy), IsNotNull(d_year), EqualTo(d_moy,12), EqualTo(d_year,1998), IsNotNull(d_date_sk)] -ReadSchema: struct +Location [not included in comparison]/{warehouse_dir}/item] +PushedFilters: [IsNotNull(i_category), IsNotNull(i_class), EqualTo(i_category,Women), EqualTo(i_class,maternity), IsNotNull(i_item_sk)] +ReadSchema: struct -(18) ColumnarToRow [codegen id : 4] -Input [3]: [d_date_sk#17, d_year#18, d_moy#19] +(28) ColumnarToRow [codegen id : 6] +Input [3]: [i_item_sk#24, i_class#25, i_category#26] -(19) Filter [codegen id : 4] -Input [3]: [d_date_sk#17, d_year#18, d_moy#19] -Condition : ((((isnotnull(d_moy#19) AND isnotnull(d_year#18)) AND (d_moy#19 = 12)) AND (d_year#18 = 1998)) AND isnotnull(d_date_sk#17)) +(29) Filter [codegen id : 6] +Input [3]: [i_item_sk#24, i_class#25, i_category#26] +Condition : ((((isnotnull(i_category#26) AND isnotnull(i_class#25)) AND (i_category#26 = Women)) AND (i_class#25 = maternity)) AND isnotnull(i_item_sk#24)) -(20) Project [codegen id : 4] -Output [1]: [d_date_sk#17] -Input [3]: [d_date_sk#17, d_year#18, d_moy#19] +(30) Project [codegen id : 6] +Output [1]: [i_item_sk#24] +Input [3]: [i_item_sk#24, i_class#25, i_category#26] -(21) BroadcastExchange -Input [1]: [d_date_sk#17] -Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#20] +(31) BroadcastExchange +Input [1]: [i_item_sk#24] +Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#27] -(22) BroadcastHashJoin [codegen id : 5] -Left keys [1]: [sold_date_sk#4] -Right keys [1]: [d_date_sk#17] +(32) BroadcastHashJoin [codegen id : 7] +Left keys [1]: [item_sk#13] +Right keys [1]: [i_item_sk#24] Join condition: None -(23) Project [codegen id : 5] -Output [1]: [customer_sk#5] -Input [3]: [sold_date_sk#4, customer_sk#5, d_date_sk#17] +(33) Project [codegen id : 7] +Output [1]: [customer_sk#12] +Input [3]: [customer_sk#12, item_sk#13, i_item_sk#24] -(24) Exchange -Input [1]: [customer_sk#5] -Arguments: hashpartitioning(customer_sk#5, 5), true, [id=#21] +(34) Exchange +Input [1]: [customer_sk#12] +Arguments: hashpartitioning(customer_sk#12, 5), ENSURE_REQUIREMENTS, [id=#28] -(25) Sort [codegen id : 6] -Input [1]: [customer_sk#5] -Arguments: [customer_sk#5 ASC NULLS FIRST], false, 0 +(35) Sort [codegen id : 8] +Input [1]: [customer_sk#12] +Arguments: [customer_sk#12 ASC NULLS FIRST], false, 0 -(26) Scan parquet default.customer -Output [2]: [c_customer_sk#22, c_current_addr_sk#23] +(36) Scan parquet default.customer +Output [2]: [c_customer_sk#29, c_current_addr_sk#30] Batched: true Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_current_addr_sk)] ReadSchema: struct -(27) ColumnarToRow [codegen id : 7] -Input [2]: [c_customer_sk#22, c_current_addr_sk#23] +(37) ColumnarToRow [codegen id : 9] +Input [2]: [c_customer_sk#29, c_current_addr_sk#30] -(28) Filter [codegen id : 7] -Input [2]: [c_customer_sk#22, c_current_addr_sk#23] -Condition : (isnotnull(c_customer_sk#22) AND isnotnull(c_current_addr_sk#23)) +(38) Filter [codegen id : 9] +Input [2]: [c_customer_sk#29, c_current_addr_sk#30] +Condition : (isnotnull(c_customer_sk#29) AND isnotnull(c_current_addr_sk#30)) -(29) Exchange -Input [2]: [c_customer_sk#22, c_current_addr_sk#23] -Arguments: hashpartitioning(c_customer_sk#22, 5), true, [id=#24] +(39) Exchange +Input [2]: [c_customer_sk#29, c_current_addr_sk#30] +Arguments: hashpartitioning(c_customer_sk#29, 5), ENSURE_REQUIREMENTS, [id=#31] -(30) Sort [codegen id : 8] -Input [2]: [c_customer_sk#22, c_current_addr_sk#23] -Arguments: [c_customer_sk#22 ASC NULLS FIRST], false, 0 +(40) Sort [codegen id : 10] +Input [2]: [c_customer_sk#29, c_current_addr_sk#30] +Arguments: [c_customer_sk#29 ASC NULLS FIRST], false, 0 -(31) SortMergeJoin [codegen id : 9] -Left keys [1]: [customer_sk#5] -Right keys [1]: [c_customer_sk#22] +(41) SortMergeJoin +Left keys [1]: [customer_sk#12] +Right keys [1]: [c_customer_sk#29] Join condition: None -(32) Project [codegen id : 9] -Output [2]: [c_customer_sk#22, c_current_addr_sk#23] -Input [3]: [customer_sk#5, c_customer_sk#22, c_current_addr_sk#23] +(42) Project +Output [2]: [c_customer_sk#29, c_current_addr_sk#30] +Input [3]: [customer_sk#12, c_customer_sk#29, c_current_addr_sk#30] -(33) HashAggregate [codegen id : 9] -Input [2]: [c_customer_sk#22, c_current_addr_sk#23] -Keys [2]: [c_customer_sk#22, c_current_addr_sk#23] +(43) HashAggregate +Input [2]: [c_customer_sk#29, c_current_addr_sk#30] +Keys [2]: [c_customer_sk#29, c_current_addr_sk#30] Functions: [] Aggregate Attributes: [] -Results [2]: [c_customer_sk#22, c_current_addr_sk#23] +Results [2]: [c_customer_sk#29, c_current_addr_sk#30] -(34) HashAggregate [codegen id : 9] -Input [2]: [c_customer_sk#22, c_current_addr_sk#23] -Keys [2]: [c_customer_sk#22, c_current_addr_sk#23] +(44) HashAggregate +Input [2]: [c_customer_sk#29, c_current_addr_sk#30] +Keys [2]: [c_customer_sk#29, c_current_addr_sk#30] Functions: [] Aggregate Attributes: [] -Results [2]: [c_customer_sk#22, c_current_addr_sk#23] +Results [2]: [c_customer_sk#29, c_current_addr_sk#30] -(35) Sort [codegen id : 9] -Input [2]: [c_customer_sk#22, c_current_addr_sk#23] -Arguments: [c_customer_sk#22 ASC NULLS FIRST], false, 0 +(45) BroadcastHashJoin [codegen id : 11] +Left keys [1]: [ca_address_sk#1] +Right keys [1]: [c_current_addr_sk#30] +Join condition: None -(36) Scan parquet default.store_sales -Output [3]: [ss_sold_date_sk#25, ss_customer_sk#26, ss_ext_sales_price#27] +(46) Project [codegen id : 11] +Output [1]: [c_customer_sk#29] +Input [3]: [ca_address_sk#1, c_customer_sk#29, c_current_addr_sk#30] + +(47) Sort [codegen id : 11] +Input [1]: [c_customer_sk#29] +Arguments: [c_customer_sk#29 ASC NULLS FIRST], false, 0 + +(48) Scan parquet default.store_sales +Output [3]: [ss_sold_date_sk#32, ss_customer_sk#33, ss_ext_sales_price#34] Batched: true Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_customer_sk), IsNotNull(ss_sold_date_sk)] ReadSchema: struct -(37) ColumnarToRow [codegen id : 10] -Input [3]: [ss_sold_date_sk#25, ss_customer_sk#26, ss_ext_sales_price#27] - -(38) Filter [codegen id : 10] -Input [3]: [ss_sold_date_sk#25, ss_customer_sk#26, ss_ext_sales_price#27] -Condition : (isnotnull(ss_customer_sk#26) AND isnotnull(ss_sold_date_sk#25)) - -(39) Exchange -Input [3]: [ss_sold_date_sk#25, ss_customer_sk#26, ss_ext_sales_price#27] -Arguments: hashpartitioning(ss_customer_sk#26, 5), true, [id=#28] - -(40) Sort [codegen id : 11] -Input [3]: [ss_sold_date_sk#25, ss_customer_sk#26, ss_ext_sales_price#27] -Arguments: [ss_customer_sk#26 ASC NULLS FIRST], false, 0 - -(41) SortMergeJoin [codegen id : 12] -Left keys [1]: [c_customer_sk#22] -Right keys [1]: [ss_customer_sk#26] -Join condition: None - -(42) Project [codegen id : 12] -Output [4]: [c_customer_sk#22, c_current_addr_sk#23, ss_sold_date_sk#25, ss_ext_sales_price#27] -Input [5]: [c_customer_sk#22, c_current_addr_sk#23, ss_sold_date_sk#25, ss_customer_sk#26, ss_ext_sales_price#27] +(49) ColumnarToRow [codegen id : 13] +Input [3]: [ss_sold_date_sk#32, ss_customer_sk#33, ss_ext_sales_price#34] -(43) Exchange -Input [4]: [c_customer_sk#22, c_current_addr_sk#23, ss_sold_date_sk#25, ss_ext_sales_price#27] -Arguments: hashpartitioning(c_current_addr_sk#23, 5), true, [id=#29] +(50) Filter [codegen id : 13] +Input [3]: [ss_sold_date_sk#32, ss_customer_sk#33, ss_ext_sales_price#34] +Condition : (isnotnull(ss_customer_sk#33) AND isnotnull(ss_sold_date_sk#32)) -(44) Sort [codegen id : 13] -Input [4]: [c_customer_sk#22, c_current_addr_sk#23, ss_sold_date_sk#25, ss_ext_sales_price#27] -Arguments: [c_current_addr_sk#23 ASC NULLS FIRST], false, 0 - -(45) Scan parquet default.customer_address -Output [3]: [ca_address_sk#30, ca_county#31, ca_state#32] +(51) Scan parquet default.date_dim +Output [2]: [d_date_sk#20, d_month_seq#35] Batched: true -Location [not included in comparison]/{warehouse_dir}/customer_address] -PushedFilters: [IsNotNull(ca_address_sk), IsNotNull(ca_county), IsNotNull(ca_state)] -ReadSchema: struct - -(46) ColumnarToRow [codegen id : 14] -Input [3]: [ca_address_sk#30, ca_county#31, ca_state#32] - -(47) Filter [codegen id : 14] -Input [3]: [ca_address_sk#30, ca_county#31, ca_state#32] -Condition : ((isnotnull(ca_address_sk#30) AND isnotnull(ca_county#31)) AND isnotnull(ca_state#32)) - -(48) Exchange -Input [3]: [ca_address_sk#30, ca_county#31, ca_state#32] -Arguments: hashpartitioning(ca_address_sk#30, 5), true, [id=#33] - -(49) Sort [codegen id : 15] -Input [3]: [ca_address_sk#30, ca_county#31, ca_state#32] -Arguments: [ca_address_sk#30 ASC NULLS FIRST], false, 0 - -(50) SortMergeJoin [codegen id : 18] -Left keys [1]: [c_current_addr_sk#23] -Right keys [1]: [ca_address_sk#30] -Join condition: None +Location [not included in comparison]/{warehouse_dir}/date_dim] +PushedFilters: [IsNotNull(d_month_seq), IsNotNull(d_date_sk)] +ReadSchema: struct -(51) Project [codegen id : 18] -Output [5]: [c_customer_sk#22, ss_sold_date_sk#25, ss_ext_sales_price#27, ca_county#31, ca_state#32] -Input [7]: [c_customer_sk#22, c_current_addr_sk#23, ss_sold_date_sk#25, ss_ext_sales_price#27, ca_address_sk#30, ca_county#31, ca_state#32] +(52) ColumnarToRow [codegen id : 12] +Input [2]: [d_date_sk#20, d_month_seq#35] -(52) Scan parquet default.store -Output [2]: [s_county#34, s_state#35] -Batched: true -Location [not included in comparison]/{warehouse_dir}/store] -PushedFilters: [IsNotNull(s_county), IsNotNull(s_state)] -ReadSchema: struct +(53) Filter [codegen id : 12] +Input [2]: [d_date_sk#20, d_month_seq#35] +Condition : (((isnotnull(d_month_seq#35) AND (d_month_seq#35 >= Subquery scalar-subquery#36, [id=#37])) AND (d_month_seq#35 <= Subquery scalar-subquery#38, [id=#39])) AND isnotnull(d_date_sk#20)) -(53) ColumnarToRow [codegen id : 16] -Input [2]: [s_county#34, s_state#35] - -(54) Filter [codegen id : 16] -Input [2]: [s_county#34, s_state#35] -Condition : (isnotnull(s_county#34) AND isnotnull(s_state#35)) +(54) Project [codegen id : 12] +Output [1]: [d_date_sk#20] +Input [2]: [d_date_sk#20, d_month_seq#35] (55) BroadcastExchange -Input [2]: [s_county#34, s_state#35] -Arguments: HashedRelationBroadcastMode(List(input[0, string, false], input[1, string, false]),false), [id=#36] +Input [1]: [d_date_sk#20] +Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#40] -(56) BroadcastHashJoin [codegen id : 18] -Left keys [2]: [ca_county#31, ca_state#32] -Right keys [2]: [s_county#34, s_state#35] +(56) BroadcastHashJoin [codegen id : 13] +Left keys [1]: [ss_sold_date_sk#32] +Right keys [1]: [d_date_sk#20] Join condition: None -(57) Project [codegen id : 18] -Output [3]: [c_customer_sk#22, ss_sold_date_sk#25, ss_ext_sales_price#27] -Input [7]: [c_customer_sk#22, ss_sold_date_sk#25, ss_ext_sales_price#27, ca_county#31, ca_state#32, s_county#34, s_state#35] - -(58) Scan parquet default.date_dim -Output [2]: [d_date_sk#17, d_month_seq#37] -Batched: true -Location [not included in comparison]/{warehouse_dir}/date_dim] -PushedFilters: [IsNotNull(d_month_seq), IsNotNull(d_date_sk)] -ReadSchema: struct - -(59) ColumnarToRow [codegen id : 17] -Input [2]: [d_date_sk#17, d_month_seq#37] - -(60) Filter [codegen id : 17] -Input [2]: [d_date_sk#17, d_month_seq#37] -Condition : (((isnotnull(d_month_seq#37) AND (d_month_seq#37 >= Subquery scalar-subquery#38, [id=#39])) AND (d_month_seq#37 <= Subquery scalar-subquery#40, [id=#41])) AND isnotnull(d_date_sk#17)) +(57) Project [codegen id : 13] +Output [2]: [ss_customer_sk#33, ss_ext_sales_price#34] +Input [4]: [ss_sold_date_sk#32, ss_customer_sk#33, ss_ext_sales_price#34, d_date_sk#20] -(61) Project [codegen id : 17] -Output [1]: [d_date_sk#17] -Input [2]: [d_date_sk#17, d_month_seq#37] +(58) Exchange +Input [2]: [ss_customer_sk#33, ss_ext_sales_price#34] +Arguments: hashpartitioning(ss_customer_sk#33, 5), ENSURE_REQUIREMENTS, [id=#41] -(62) BroadcastExchange -Input [1]: [d_date_sk#17] -Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#42] +(59) Sort [codegen id : 14] +Input [2]: [ss_customer_sk#33, ss_ext_sales_price#34] +Arguments: [ss_customer_sk#33 ASC NULLS FIRST], false, 0 -(63) BroadcastHashJoin [codegen id : 18] -Left keys [1]: [ss_sold_date_sk#25] -Right keys [1]: [d_date_sk#17] +(60) SortMergeJoin [codegen id : 15] +Left keys [1]: [c_customer_sk#29] +Right keys [1]: [ss_customer_sk#33] Join condition: None -(64) Project [codegen id : 18] -Output [2]: [c_customer_sk#22, ss_ext_sales_price#27] -Input [4]: [c_customer_sk#22, ss_sold_date_sk#25, ss_ext_sales_price#27, d_date_sk#17] - -(65) HashAggregate [codegen id : 18] -Input [2]: [c_customer_sk#22, ss_ext_sales_price#27] -Keys [1]: [c_customer_sk#22] -Functions [1]: [partial_sum(UnscaledValue(ss_ext_sales_price#27))] -Aggregate Attributes [1]: [sum#43] -Results [2]: [c_customer_sk#22, sum#44] - -(66) Exchange -Input [2]: [c_customer_sk#22, sum#44] -Arguments: hashpartitioning(c_customer_sk#22, 5), true, [id=#45] - -(67) HashAggregate [codegen id : 19] -Input [2]: [c_customer_sk#22, sum#44] -Keys [1]: [c_customer_sk#22] -Functions [1]: [sum(UnscaledValue(ss_ext_sales_price#27))] -Aggregate Attributes [1]: [sum(UnscaledValue(ss_ext_sales_price#27))#46] -Results [1]: [cast(CheckOverflow((promote_precision(MakeDecimal(sum(UnscaledValue(ss_ext_sales_price#27))#46,17,2)) / 50.00), DecimalType(21,6), true) as int) AS segment#47] - -(68) HashAggregate [codegen id : 19] -Input [1]: [segment#47] -Keys [1]: [segment#47] +(61) Project [codegen id : 15] +Output [2]: [c_customer_sk#29, ss_ext_sales_price#34] +Input [3]: [c_customer_sk#29, ss_customer_sk#33, ss_ext_sales_price#34] + +(62) HashAggregate [codegen id : 15] +Input [2]: [c_customer_sk#29, ss_ext_sales_price#34] +Keys [1]: [c_customer_sk#29] +Functions [1]: [partial_sum(UnscaledValue(ss_ext_sales_price#34))] +Aggregate Attributes [1]: [sum#42] +Results [2]: [c_customer_sk#29, sum#43] + +(63) HashAggregate [codegen id : 15] +Input [2]: [c_customer_sk#29, sum#43] +Keys [1]: [c_customer_sk#29] +Functions [1]: [sum(UnscaledValue(ss_ext_sales_price#34))] +Aggregate Attributes [1]: [sum(UnscaledValue(ss_ext_sales_price#34))#44] +Results [1]: [cast(CheckOverflow((promote_precision(MakeDecimal(sum(UnscaledValue(ss_ext_sales_price#34))#44,17,2)) / 50.00), DecimalType(21,6), true) as int) AS segment#45] + +(64) HashAggregate [codegen id : 15] +Input [1]: [segment#45] +Keys [1]: [segment#45] Functions [1]: [partial_count(1)] -Aggregate Attributes [1]: [count#48] -Results [2]: [segment#47, count#49] +Aggregate Attributes [1]: [count#46] +Results [2]: [segment#45, count#47] -(69) Exchange -Input [2]: [segment#47, count#49] -Arguments: hashpartitioning(segment#47, 5), true, [id=#50] +(65) Exchange +Input [2]: [segment#45, count#47] +Arguments: hashpartitioning(segment#45, 5), ENSURE_REQUIREMENTS, [id=#48] -(70) HashAggregate [codegen id : 20] -Input [2]: [segment#47, count#49] -Keys [1]: [segment#47] +(66) HashAggregate [codegen id : 16] +Input [2]: [segment#45, count#47] +Keys [1]: [segment#45] Functions [1]: [count(1)] -Aggregate Attributes [1]: [count(1)#51] -Results [3]: [segment#47, count(1)#51 AS num_customers#52, (segment#47 * 50) AS segment_base#53] +Aggregate Attributes [1]: [count(1)#49] +Results [3]: [segment#45, count(1)#49 AS num_customers#50, (segment#45 * 50) AS segment_base#51] -(71) TakeOrderedAndProject -Input [3]: [segment#47, num_customers#52, segment_base#53] -Arguments: 100, [segment#47 ASC NULLS FIRST, num_customers#52 ASC NULLS FIRST], [segment#47, num_customers#52, segment_base#53] +(67) TakeOrderedAndProject +Input [3]: [segment#45, num_customers#50, segment_base#51] +Arguments: 100, [segment#45 ASC NULLS FIRST, num_customers#50 ASC NULLS FIRST], [segment#45, num_customers#50, segment_base#51] ===== Subqueries ===== -Subquery:1 Hosting operator id = 60 Hosting Expression = Subquery scalar-subquery#38, [id=#39] -* HashAggregate (78) -+- Exchange (77) - +- * HashAggregate (76) - +- * Project (75) - +- * Filter (74) - +- * ColumnarToRow (73) - +- Scan parquet default.date_dim (72) +Subquery:1 Hosting operator id = 53 Hosting Expression = Subquery scalar-subquery#36, [id=#37] +* HashAggregate (74) ++- Exchange (73) + +- * HashAggregate (72) + +- * Project (71) + +- * Filter (70) + +- * ColumnarToRow (69) + +- Scan parquet default.date_dim (68) -(72) Scan parquet default.date_dim -Output [3]: [d_month_seq#37, d_year#18, d_moy#19] +(68) Scan parquet default.date_dim +Output [3]: [d_month_seq#35, d_year#21, d_moy#22] Batched: true Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), IsNotNull(d_moy), EqualTo(d_year,1998), EqualTo(d_moy,12)] ReadSchema: struct -(73) ColumnarToRow [codegen id : 1] -Input [3]: [d_month_seq#37, d_year#18, d_moy#19] +(69) ColumnarToRow [codegen id : 1] +Input [3]: [d_month_seq#35, d_year#21, d_moy#22] -(74) Filter [codegen id : 1] -Input [3]: [d_month_seq#37, d_year#18, d_moy#19] -Condition : (((isnotnull(d_year#18) AND isnotnull(d_moy#19)) AND (d_year#18 = 1998)) AND (d_moy#19 = 12)) +(70) Filter [codegen id : 1] +Input [3]: [d_month_seq#35, d_year#21, d_moy#22] +Condition : (((isnotnull(d_year#21) AND isnotnull(d_moy#22)) AND (d_year#21 = 1998)) AND (d_moy#22 = 12)) -(75) Project [codegen id : 1] -Output [1]: [(d_month_seq#37 + 1) AS (d_month_seq + 1)#54] -Input [3]: [d_month_seq#37, d_year#18, d_moy#19] +(71) Project [codegen id : 1] +Output [1]: [(d_month_seq#35 + 1) AS (d_month_seq + 1)#52] +Input [3]: [d_month_seq#35, d_year#21, d_moy#22] -(76) HashAggregate [codegen id : 1] -Input [1]: [(d_month_seq + 1)#54] -Keys [1]: [(d_month_seq + 1)#54] +(72) HashAggregate [codegen id : 1] +Input [1]: [(d_month_seq + 1)#52] +Keys [1]: [(d_month_seq + 1)#52] Functions: [] Aggregate Attributes: [] -Results [1]: [(d_month_seq + 1)#54] +Results [1]: [(d_month_seq + 1)#52] -(77) Exchange -Input [1]: [(d_month_seq + 1)#54] -Arguments: hashpartitioning((d_month_seq + 1)#54, 5), true, [id=#55] +(73) Exchange +Input [1]: [(d_month_seq + 1)#52] +Arguments: hashpartitioning((d_month_seq + 1)#52, 5), ENSURE_REQUIREMENTS, [id=#53] -(78) HashAggregate [codegen id : 2] -Input [1]: [(d_month_seq + 1)#54] -Keys [1]: [(d_month_seq + 1)#54] +(74) HashAggregate [codegen id : 2] +Input [1]: [(d_month_seq + 1)#52] +Keys [1]: [(d_month_seq + 1)#52] Functions: [] Aggregate Attributes: [] -Results [1]: [(d_month_seq + 1)#54] +Results [1]: [(d_month_seq + 1)#52] -Subquery:2 Hosting operator id = 60 Hosting Expression = Subquery scalar-subquery#40, [id=#41] -* HashAggregate (85) -+- Exchange (84) - +- * HashAggregate (83) - +- * Project (82) - +- * Filter (81) - +- * ColumnarToRow (80) - +- Scan parquet default.date_dim (79) +Subquery:2 Hosting operator id = 53 Hosting Expression = Subquery scalar-subquery#38, [id=#39] +* HashAggregate (81) ++- Exchange (80) + +- * HashAggregate (79) + +- * Project (78) + +- * Filter (77) + +- * ColumnarToRow (76) + +- Scan parquet default.date_dim (75) -(79) Scan parquet default.date_dim -Output [3]: [d_month_seq#37, d_year#18, d_moy#19] +(75) Scan parquet default.date_dim +Output [3]: [d_month_seq#35, d_year#21, d_moy#22] Batched: true Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), IsNotNull(d_moy), EqualTo(d_year,1998), EqualTo(d_moy,12)] ReadSchema: struct -(80) ColumnarToRow [codegen id : 1] -Input [3]: [d_month_seq#37, d_year#18, d_moy#19] +(76) ColumnarToRow [codegen id : 1] +Input [3]: [d_month_seq#35, d_year#21, d_moy#22] -(81) Filter [codegen id : 1] -Input [3]: [d_month_seq#37, d_year#18, d_moy#19] -Condition : (((isnotnull(d_year#18) AND isnotnull(d_moy#19)) AND (d_year#18 = 1998)) AND (d_moy#19 = 12)) +(77) Filter [codegen id : 1] +Input [3]: [d_month_seq#35, d_year#21, d_moy#22] +Condition : (((isnotnull(d_year#21) AND isnotnull(d_moy#22)) AND (d_year#21 = 1998)) AND (d_moy#22 = 12)) -(82) Project [codegen id : 1] -Output [1]: [(d_month_seq#37 + 3) AS (d_month_seq + 3)#56] -Input [3]: [d_month_seq#37, d_year#18, d_moy#19] +(78) Project [codegen id : 1] +Output [1]: [(d_month_seq#35 + 3) AS (d_month_seq + 3)#54] +Input [3]: [d_month_seq#35, d_year#21, d_moy#22] -(83) HashAggregate [codegen id : 1] -Input [1]: [(d_month_seq + 3)#56] -Keys [1]: [(d_month_seq + 3)#56] +(79) HashAggregate [codegen id : 1] +Input [1]: [(d_month_seq + 3)#54] +Keys [1]: [(d_month_seq + 3)#54] Functions: [] Aggregate Attributes: [] -Results [1]: [(d_month_seq + 3)#56] +Results [1]: [(d_month_seq + 3)#54] -(84) Exchange -Input [1]: [(d_month_seq + 3)#56] -Arguments: hashpartitioning((d_month_seq + 3)#56, 5), true, [id=#57] +(80) Exchange +Input [1]: [(d_month_seq + 3)#54] +Arguments: hashpartitioning((d_month_seq + 3)#54, 5), ENSURE_REQUIREMENTS, [id=#55] -(85) HashAggregate [codegen id : 2] -Input [1]: [(d_month_seq + 3)#56] -Keys [1]: [(d_month_seq + 3)#56] +(81) HashAggregate [codegen id : 2] +Input [1]: [(d_month_seq + 3)#54] +Keys [1]: [(d_month_seq + 3)#54] Functions: [] Aggregate Attributes: [] -Results [1]: [(d_month_seq + 3)#56] +Results [1]: [(d_month_seq + 3)#54] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q54.sf100/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q54.sf100/simplified.txt index cb7130f53c9a9..3b0622cbf9264 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q54.sf100/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q54.sf100/simplified.txt @@ -1,142 +1,130 @@ TakeOrderedAndProject [segment,num_customers,segment_base] - WholeStageCodegen (20) + WholeStageCodegen (16) HashAggregate [segment,count] [count(1),num_customers,segment_base,count] InputAdapter Exchange [segment] #1 - WholeStageCodegen (19) + WholeStageCodegen (15) HashAggregate [segment] [count,count] HashAggregate [c_customer_sk,sum] [sum(UnscaledValue(ss_ext_sales_price)),segment,sum] - InputAdapter - Exchange [c_customer_sk] #2 - WholeStageCodegen (18) - HashAggregate [c_customer_sk,ss_ext_sales_price] [sum,sum] - Project [c_customer_sk,ss_ext_sales_price] - BroadcastHashJoin [ss_sold_date_sk,d_date_sk] - Project [c_customer_sk,ss_sold_date_sk,ss_ext_sales_price] - BroadcastHashJoin [ca_county,ca_state,s_county,s_state] - Project [c_customer_sk,ss_sold_date_sk,ss_ext_sales_price,ca_county,ca_state] - SortMergeJoin [c_current_addr_sk,ca_address_sk] - InputAdapter - WholeStageCodegen (13) - Sort [c_current_addr_sk] + HashAggregate [c_customer_sk,ss_ext_sales_price] [sum,sum] + Project [c_customer_sk,ss_ext_sales_price] + SortMergeJoin [c_customer_sk,ss_customer_sk] + InputAdapter + WholeStageCodegen (11) + Sort [c_customer_sk] + Project [c_customer_sk] + BroadcastHashJoin [ca_address_sk,c_current_addr_sk] + InputAdapter + BroadcastExchange #2 + WholeStageCodegen (2) + Project [ca_address_sk] + BroadcastHashJoin [ca_county,ca_state,s_county,s_state] + Filter [ca_address_sk,ca_county,ca_state] + ColumnarToRow + InputAdapter + Scan parquet default.customer_address [ca_address_sk,ca_county,ca_state] InputAdapter - Exchange [c_current_addr_sk] #3 - WholeStageCodegen (12) - Project [c_customer_sk,c_current_addr_sk,ss_sold_date_sk,ss_ext_sales_price] - SortMergeJoin [c_customer_sk,ss_customer_sk] + BroadcastExchange #3 + WholeStageCodegen (1) + Filter [s_county,s_state] + ColumnarToRow InputAdapter - WholeStageCodegen (9) - Sort [c_customer_sk] - HashAggregate [c_customer_sk,c_current_addr_sk] - HashAggregate [c_customer_sk,c_current_addr_sk] - Project [c_customer_sk,c_current_addr_sk] - SortMergeJoin [customer_sk,c_customer_sk] - InputAdapter - WholeStageCodegen (6) - Sort [customer_sk] + Scan parquet default.store [s_county,s_state] + HashAggregate [c_customer_sk,c_current_addr_sk] + HashAggregate [c_customer_sk,c_current_addr_sk] + Project [c_customer_sk,c_current_addr_sk] + SortMergeJoin [customer_sk,c_customer_sk] + InputAdapter + WholeStageCodegen (8) + Sort [customer_sk] + InputAdapter + Exchange [customer_sk] #4 + WholeStageCodegen (7) + Project [customer_sk] + BroadcastHashJoin [item_sk,i_item_sk] + Project [customer_sk,item_sk] + BroadcastHashJoin [sold_date_sk,d_date_sk] + InputAdapter + Union + WholeStageCodegen (3) + Project [cs_sold_date_sk,cs_bill_customer_sk,cs_item_sk] + Filter [cs_item_sk,cs_sold_date_sk,cs_bill_customer_sk] + ColumnarToRow InputAdapter - Exchange [customer_sk] #4 - WholeStageCodegen (5) - Project [customer_sk] - BroadcastHashJoin [sold_date_sk,d_date_sk] - Project [sold_date_sk,customer_sk] - BroadcastHashJoin [item_sk,i_item_sk] - InputAdapter - Union - WholeStageCodegen (1) - Project [cs_sold_date_sk,cs_bill_customer_sk,cs_item_sk] - Filter [cs_item_sk,cs_sold_date_sk,cs_bill_customer_sk] - ColumnarToRow - InputAdapter - Scan parquet default.catalog_sales [cs_sold_date_sk,cs_bill_customer_sk,cs_item_sk] - WholeStageCodegen (2) - Project [ws_sold_date_sk,ws_bill_customer_sk,ws_item_sk] - Filter [ws_item_sk,ws_sold_date_sk,ws_bill_customer_sk] - ColumnarToRow - InputAdapter - Scan parquet default.web_sales [ws_sold_date_sk,ws_item_sk,ws_bill_customer_sk] - InputAdapter - BroadcastExchange #5 - WholeStageCodegen (3) - Project [i_item_sk] - Filter [i_category,i_class,i_item_sk] - ColumnarToRow - InputAdapter - Scan parquet default.item [i_item_sk,i_class,i_category] - InputAdapter - BroadcastExchange #6 - WholeStageCodegen (4) - Project [d_date_sk] - Filter [d_moy,d_year,d_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.date_dim [d_date_sk,d_year,d_moy] - InputAdapter - WholeStageCodegen (8) - Sort [c_customer_sk] + Scan parquet default.catalog_sales [cs_sold_date_sk,cs_bill_customer_sk,cs_item_sk] + WholeStageCodegen (4) + Project [ws_sold_date_sk,ws_bill_customer_sk,ws_item_sk] + Filter [ws_item_sk,ws_sold_date_sk,ws_bill_customer_sk] + ColumnarToRow InputAdapter - Exchange [c_customer_sk] #7 - WholeStageCodegen (7) - Filter [c_customer_sk,c_current_addr_sk] - ColumnarToRow - InputAdapter - Scan parquet default.customer [c_customer_sk,c_current_addr_sk] - InputAdapter - WholeStageCodegen (11) - Sort [ss_customer_sk] - InputAdapter - Exchange [ss_customer_sk] #8 - WholeStageCodegen (10) - Filter [ss_customer_sk,ss_sold_date_sk] + Scan parquet default.web_sales [ws_sold_date_sk,ws_item_sk,ws_bill_customer_sk] + InputAdapter + BroadcastExchange #5 + WholeStageCodegen (5) + Project [d_date_sk] + Filter [d_moy,d_year,d_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.date_dim [d_date_sk,d_year,d_moy] + InputAdapter + BroadcastExchange #6 + WholeStageCodegen (6) + Project [i_item_sk] + Filter [i_category,i_class,i_item_sk] ColumnarToRow InputAdapter - Scan parquet default.store_sales [ss_sold_date_sk,ss_customer_sk,ss_ext_sales_price] - InputAdapter - WholeStageCodegen (15) - Sort [ca_address_sk] - InputAdapter - Exchange [ca_address_sk] #9 - WholeStageCodegen (14) - Filter [ca_address_sk,ca_county,ca_state] - ColumnarToRow - InputAdapter - Scan parquet default.customer_address [ca_address_sk,ca_county,ca_state] - InputAdapter - BroadcastExchange #10 - WholeStageCodegen (16) - Filter [s_county,s_state] + Scan parquet default.item [i_item_sk,i_class,i_category] + InputAdapter + WholeStageCodegen (10) + Sort [c_customer_sk] + InputAdapter + Exchange [c_customer_sk] #7 + WholeStageCodegen (9) + Filter [c_customer_sk,c_current_addr_sk] + ColumnarToRow + InputAdapter + Scan parquet default.customer [c_customer_sk,c_current_addr_sk] + InputAdapter + WholeStageCodegen (14) + Sort [ss_customer_sk] + InputAdapter + Exchange [ss_customer_sk] #8 + WholeStageCodegen (13) + Project [ss_customer_sk,ss_ext_sales_price] + BroadcastHashJoin [ss_sold_date_sk,d_date_sk] + Filter [ss_customer_sk,ss_sold_date_sk] ColumnarToRow InputAdapter - Scan parquet default.store [s_county,s_state] - InputAdapter - BroadcastExchange #11 - WholeStageCodegen (17) - Project [d_date_sk] - Filter [d_month_seq,d_date_sk] - Subquery #1 - WholeStageCodegen (2) - HashAggregate [(d_month_seq + 1)] - InputAdapter - Exchange [(d_month_seq + 1)] #12 - WholeStageCodegen (1) - HashAggregate [(d_month_seq + 1)] - Project [d_month_seq] - Filter [d_year,d_moy] - ColumnarToRow - InputAdapter - Scan parquet default.date_dim [d_month_seq,d_year,d_moy] - Subquery #2 - WholeStageCodegen (2) - HashAggregate [(d_month_seq + 3)] - InputAdapter - Exchange [(d_month_seq + 3)] #13 - WholeStageCodegen (1) - HashAggregate [(d_month_seq + 3)] - Project [d_month_seq] - Filter [d_year,d_moy] - ColumnarToRow - InputAdapter - Scan parquet default.date_dim [d_month_seq,d_year,d_moy] - ColumnarToRow - InputAdapter - Scan parquet default.date_dim [d_date_sk,d_month_seq] + Scan parquet default.store_sales [ss_sold_date_sk,ss_customer_sk,ss_ext_sales_price] + InputAdapter + BroadcastExchange #9 + WholeStageCodegen (12) + Project [d_date_sk] + Filter [d_month_seq,d_date_sk] + Subquery #1 + WholeStageCodegen (2) + HashAggregate [(d_month_seq + 1)] + InputAdapter + Exchange [(d_month_seq + 1)] #10 + WholeStageCodegen (1) + HashAggregate [(d_month_seq + 1)] + Project [d_month_seq] + Filter [d_year,d_moy] + ColumnarToRow + InputAdapter + Scan parquet default.date_dim [d_month_seq,d_year,d_moy] + Subquery #2 + WholeStageCodegen (2) + HashAggregate [(d_month_seq + 3)] + InputAdapter + Exchange [(d_month_seq + 3)] #11 + WholeStageCodegen (1) + HashAggregate [(d_month_seq + 3)] + Project [d_month_seq] + Filter [d_year,d_moy] + ColumnarToRow + InputAdapter + Scan parquet default.date_dim [d_month_seq,d_year,d_moy] + ColumnarToRow + InputAdapter + Scan parquet default.date_dim [d_date_sk,d_month_seq] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q55.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q55.sf100/explain.txt index a1257cd292e48..b8d8aa358d532 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q55.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q55.sf100/explain.txt @@ -6,115 +6,115 @@ TakeOrderedAndProject (21) +- * Project (17) +- * BroadcastHashJoin Inner BuildRight (16) :- * Project (10) - : +- * BroadcastHashJoin Inner BuildRight (9) - : :- * Filter (3) - : : +- * ColumnarToRow (2) - : : +- Scan parquet default.store_sales (1) - : +- BroadcastExchange (8) - : +- * Project (7) - : +- * Filter (6) - : +- * ColumnarToRow (5) - : +- Scan parquet default.item (4) + : +- * BroadcastHashJoin Inner BuildLeft (9) + : :- BroadcastExchange (5) + : : +- * Project (4) + : : +- * Filter (3) + : : +- * ColumnarToRow (2) + : : +- Scan parquet default.date_dim (1) + : +- * Filter (8) + : +- * ColumnarToRow (7) + : +- Scan parquet default.store_sales (6) +- BroadcastExchange (15) +- * Project (14) +- * Filter (13) +- * ColumnarToRow (12) - +- Scan parquet default.date_dim (11) + +- Scan parquet default.item (11) -(1) Scan parquet default.store_sales -Output [3]: [ss_sold_date_sk#1, ss_item_sk#2, ss_ext_sales_price#3] +(1) Scan parquet default.date_dim +Output [3]: [d_date_sk#1, d_year#2, d_moy#3] Batched: true -Location [not included in comparison]/{warehouse_dir}/store_sales] -PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_item_sk)] -ReadSchema: struct +Location [not included in comparison]/{warehouse_dir}/date_dim] +PushedFilters: [IsNotNull(d_moy), IsNotNull(d_year), EqualTo(d_moy,11), EqualTo(d_year,1999), IsNotNull(d_date_sk)] +ReadSchema: struct -(2) ColumnarToRow [codegen id : 3] -Input [3]: [ss_sold_date_sk#1, ss_item_sk#2, ss_ext_sales_price#3] +(2) ColumnarToRow [codegen id : 1] +Input [3]: [d_date_sk#1, d_year#2, d_moy#3] -(3) Filter [codegen id : 3] -Input [3]: [ss_sold_date_sk#1, ss_item_sk#2, ss_ext_sales_price#3] -Condition : (isnotnull(ss_sold_date_sk#1) AND isnotnull(ss_item_sk#2)) +(3) Filter [codegen id : 1] +Input [3]: [d_date_sk#1, d_year#2, d_moy#3] +Condition : ((((isnotnull(d_moy#3) AND isnotnull(d_year#2)) AND (d_moy#3 = 11)) AND (d_year#2 = 1999)) AND isnotnull(d_date_sk#1)) -(4) Scan parquet default.item -Output [4]: [i_item_sk#4, i_brand_id#5, i_brand#6, i_manager_id#7] -Batched: true -Location [not included in comparison]/{warehouse_dir}/item] -PushedFilters: [IsNotNull(i_manager_id), EqualTo(i_manager_id,28), IsNotNull(i_item_sk)] -ReadSchema: struct +(4) Project [codegen id : 1] +Output [1]: [d_date_sk#1] +Input [3]: [d_date_sk#1, d_year#2, d_moy#3] -(5) ColumnarToRow [codegen id : 1] -Input [4]: [i_item_sk#4, i_brand_id#5, i_brand#6, i_manager_id#7] +(5) BroadcastExchange +Input [1]: [d_date_sk#1] +Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#4] -(6) Filter [codegen id : 1] -Input [4]: [i_item_sk#4, i_brand_id#5, i_brand#6, i_manager_id#7] -Condition : ((isnotnull(i_manager_id#7) AND (i_manager_id#7 = 28)) AND isnotnull(i_item_sk#4)) +(6) Scan parquet default.store_sales +Output [3]: [ss_sold_date_sk#5, ss_item_sk#6, ss_ext_sales_price#7] +Batched: true +Location [not included in comparison]/{warehouse_dir}/store_sales] +PushedFilters: [IsNotNull(ss_sold_date_sk), IsNotNull(ss_item_sk)] +ReadSchema: struct -(7) Project [codegen id : 1] -Output [3]: [i_item_sk#4, i_brand_id#5, i_brand#6] -Input [4]: [i_item_sk#4, i_brand_id#5, i_brand#6, i_manager_id#7] +(7) ColumnarToRow +Input [3]: [ss_sold_date_sk#5, ss_item_sk#6, ss_ext_sales_price#7] -(8) BroadcastExchange -Input [3]: [i_item_sk#4, i_brand_id#5, i_brand#6] -Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#8] +(8) Filter +Input [3]: [ss_sold_date_sk#5, ss_item_sk#6, ss_ext_sales_price#7] +Condition : (isnotnull(ss_sold_date_sk#5) AND isnotnull(ss_item_sk#6)) (9) BroadcastHashJoin [codegen id : 3] -Left keys [1]: [ss_item_sk#2] -Right keys [1]: [i_item_sk#4] +Left keys [1]: [d_date_sk#1] +Right keys [1]: [ss_sold_date_sk#5] Join condition: None (10) Project [codegen id : 3] -Output [4]: [ss_sold_date_sk#1, ss_ext_sales_price#3, i_brand_id#5, i_brand#6] -Input [6]: [ss_sold_date_sk#1, ss_item_sk#2, ss_ext_sales_price#3, i_item_sk#4, i_brand_id#5, i_brand#6] +Output [2]: [ss_item_sk#6, ss_ext_sales_price#7] +Input [4]: [d_date_sk#1, ss_sold_date_sk#5, ss_item_sk#6, ss_ext_sales_price#7] -(11) Scan parquet default.date_dim -Output [3]: [d_date_sk#9, d_year#10, d_moy#11] +(11) Scan parquet default.item +Output [4]: [i_item_sk#8, i_brand_id#9, i_brand#10, i_manager_id#11] Batched: true -Location [not included in comparison]/{warehouse_dir}/date_dim] -PushedFilters: [IsNotNull(d_moy), IsNotNull(d_year), EqualTo(d_moy,11), EqualTo(d_year,1999), IsNotNull(d_date_sk)] -ReadSchema: struct +Location [not included in comparison]/{warehouse_dir}/item] +PushedFilters: [IsNotNull(i_manager_id), EqualTo(i_manager_id,28), IsNotNull(i_item_sk)] +ReadSchema: struct (12) ColumnarToRow [codegen id : 2] -Input [3]: [d_date_sk#9, d_year#10, d_moy#11] +Input [4]: [i_item_sk#8, i_brand_id#9, i_brand#10, i_manager_id#11] (13) Filter [codegen id : 2] -Input [3]: [d_date_sk#9, d_year#10, d_moy#11] -Condition : ((((isnotnull(d_moy#11) AND isnotnull(d_year#10)) AND (d_moy#11 = 11)) AND (d_year#10 = 1999)) AND isnotnull(d_date_sk#9)) +Input [4]: [i_item_sk#8, i_brand_id#9, i_brand#10, i_manager_id#11] +Condition : ((isnotnull(i_manager_id#11) AND (i_manager_id#11 = 28)) AND isnotnull(i_item_sk#8)) (14) Project [codegen id : 2] -Output [1]: [d_date_sk#9] -Input [3]: [d_date_sk#9, d_year#10, d_moy#11] +Output [3]: [i_item_sk#8, i_brand_id#9, i_brand#10] +Input [4]: [i_item_sk#8, i_brand_id#9, i_brand#10, i_manager_id#11] (15) BroadcastExchange -Input [1]: [d_date_sk#9] +Input [3]: [i_item_sk#8, i_brand_id#9, i_brand#10] Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#12] (16) BroadcastHashJoin [codegen id : 3] -Left keys [1]: [ss_sold_date_sk#1] -Right keys [1]: [d_date_sk#9] +Left keys [1]: [ss_item_sk#6] +Right keys [1]: [i_item_sk#8] Join condition: None (17) Project [codegen id : 3] -Output [3]: [ss_ext_sales_price#3, i_brand_id#5, i_brand#6] -Input [5]: [ss_sold_date_sk#1, ss_ext_sales_price#3, i_brand_id#5, i_brand#6, d_date_sk#9] +Output [3]: [ss_ext_sales_price#7, i_brand_id#9, i_brand#10] +Input [5]: [ss_item_sk#6, ss_ext_sales_price#7, i_item_sk#8, i_brand_id#9, i_brand#10] (18) HashAggregate [codegen id : 3] -Input [3]: [ss_ext_sales_price#3, i_brand_id#5, i_brand#6] -Keys [2]: [i_brand#6, i_brand_id#5] -Functions [1]: [partial_sum(UnscaledValue(ss_ext_sales_price#3))] +Input [3]: [ss_ext_sales_price#7, i_brand_id#9, i_brand#10] +Keys [2]: [i_brand#10, i_brand_id#9] +Functions [1]: [partial_sum(UnscaledValue(ss_ext_sales_price#7))] Aggregate Attributes [1]: [sum#13] -Results [3]: [i_brand#6, i_brand_id#5, sum#14] +Results [3]: [i_brand#10, i_brand_id#9, sum#14] (19) Exchange -Input [3]: [i_brand#6, i_brand_id#5, sum#14] -Arguments: hashpartitioning(i_brand#6, i_brand_id#5, 5), true, [id=#15] +Input [3]: [i_brand#10, i_brand_id#9, sum#14] +Arguments: hashpartitioning(i_brand#10, i_brand_id#9, 5), ENSURE_REQUIREMENTS, [id=#15] (20) HashAggregate [codegen id : 4] -Input [3]: [i_brand#6, i_brand_id#5, sum#14] -Keys [2]: [i_brand#6, i_brand_id#5] -Functions [1]: [sum(UnscaledValue(ss_ext_sales_price#3))] -Aggregate Attributes [1]: [sum(UnscaledValue(ss_ext_sales_price#3))#16] -Results [3]: [i_brand_id#5 AS brand_id#17, i_brand#6 AS brand#18, MakeDecimal(sum(UnscaledValue(ss_ext_sales_price#3))#16,17,2) AS ext_price#19] +Input [3]: [i_brand#10, i_brand_id#9, sum#14] +Keys [2]: [i_brand#10, i_brand_id#9] +Functions [1]: [sum(UnscaledValue(ss_ext_sales_price#7))] +Aggregate Attributes [1]: [sum(UnscaledValue(ss_ext_sales_price#7))#16] +Results [3]: [i_brand_id#9 AS brand_id#17, i_brand#10 AS brand#18, MakeDecimal(sum(UnscaledValue(ss_ext_sales_price#7))#16,17,2) AS ext_price#19] (21) TakeOrderedAndProject Input [3]: [brand_id#17, brand#18, ext_price#19] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q55.sf100/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q55.sf100/simplified.txt index b0d0e0d809441..4f375c80678e8 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q55.sf100/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q55.sf100/simplified.txt @@ -6,26 +6,26 @@ TakeOrderedAndProject [ext_price,brand_id,brand] WholeStageCodegen (3) HashAggregate [i_brand,i_brand_id,ss_ext_sales_price] [sum,sum] Project [ss_ext_sales_price,i_brand_id,i_brand] - BroadcastHashJoin [ss_sold_date_sk,d_date_sk] - Project [ss_sold_date_sk,ss_ext_sales_price,i_brand_id,i_brand] - BroadcastHashJoin [ss_item_sk,i_item_sk] - Filter [ss_sold_date_sk,ss_item_sk] - ColumnarToRow - InputAdapter - Scan parquet default.store_sales [ss_sold_date_sk,ss_item_sk,ss_ext_sales_price] + BroadcastHashJoin [ss_item_sk,i_item_sk] + Project [ss_item_sk,ss_ext_sales_price] + BroadcastHashJoin [d_date_sk,ss_sold_date_sk] InputAdapter BroadcastExchange #2 WholeStageCodegen (1) - Project [i_item_sk,i_brand_id,i_brand] - Filter [i_manager_id,i_item_sk] + Project [d_date_sk] + Filter [d_moy,d_year,d_date_sk] ColumnarToRow InputAdapter - Scan parquet default.item [i_item_sk,i_brand_id,i_brand,i_manager_id] + Scan parquet default.date_dim [d_date_sk,d_year,d_moy] + Filter [ss_sold_date_sk,ss_item_sk] + ColumnarToRow + InputAdapter + Scan parquet default.store_sales [ss_sold_date_sk,ss_item_sk,ss_ext_sales_price] InputAdapter BroadcastExchange #3 WholeStageCodegen (2) - Project [d_date_sk] - Filter [d_moy,d_year,d_date_sk] + Project [i_item_sk,i_brand_id,i_brand] + Filter [i_manager_id,i_item_sk] ColumnarToRow InputAdapter - Scan parquet default.date_dim [d_date_sk,d_year,d_moy] + Scan parquet default.item [i_item_sk,i_brand_id,i_brand,i_manager_id] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q6.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q6.sf100/explain.txt index 511e1b46cd7a7..675cff99ad729 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q6.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q6.sf100/explain.txt @@ -11,30 +11,30 @@ TakeOrderedAndProject (50) : +- Exchange (27) : +- * Project (26) : +- * BroadcastHashJoin Inner BuildRight (25) - : :- * Project (10) - : : +- * BroadcastHashJoin Inner BuildRight (9) + : :- * Project (19) + : : +- * BroadcastHashJoin Inner BuildRight (18) : : :- * Filter (3) : : : +- * ColumnarToRow (2) : : : +- Scan parquet default.store_sales (1) - : : +- BroadcastExchange (8) - : : +- * Project (7) - : : +- * Filter (6) - : : +- * ColumnarToRow (5) - : : +- Scan parquet default.date_dim (4) + : : +- BroadcastExchange (17) + : : +- * Project (16) + : : +- * Filter (15) + : : +- * BroadcastHashJoin LeftOuter BuildRight (14) + : : :- * Filter (6) + : : : +- * ColumnarToRow (5) + : : : +- Scan parquet default.item (4) + : : +- BroadcastExchange (13) + : : +- * HashAggregate (12) + : : +- Exchange (11) + : : +- * HashAggregate (10) + : : +- * Filter (9) + : : +- * ColumnarToRow (8) + : : +- Scan parquet default.item (7) : +- BroadcastExchange (24) : +- * Project (23) : +- * Filter (22) - : +- * BroadcastHashJoin LeftOuter BuildRight (21) - : :- * Filter (13) - : : +- * ColumnarToRow (12) - : : +- Scan parquet default.item (11) - : +- BroadcastExchange (20) - : +- * HashAggregate (19) - : +- Exchange (18) - : +- * HashAggregate (17) - : +- * Filter (16) - : +- * ColumnarToRow (15) - : +- Scan parquet default.item (14) + : +- * ColumnarToRow (21) + : +- Scan parquet default.date_dim (20) +- * Sort (42) +- Exchange (41) +- * Project (40) @@ -65,112 +65,112 @@ Input [3]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3] Input [3]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3] Condition : ((isnotnull(ss_customer_sk#3) AND isnotnull(ss_sold_date_sk#1)) AND isnotnull(ss_item_sk#2)) -(4) Scan parquet default.date_dim -Output [2]: [d_date_sk#4, d_month_seq#5] -Batched: true -Location [not included in comparison]/{warehouse_dir}/date_dim] -PushedFilters: [IsNotNull(d_month_seq), IsNotNull(d_date_sk)] -ReadSchema: struct - -(5) ColumnarToRow [codegen id : 1] -Input [2]: [d_date_sk#4, d_month_seq#5] - -(6) Filter [codegen id : 1] -Input [2]: [d_date_sk#4, d_month_seq#5] -Condition : ((isnotnull(d_month_seq#5) AND (d_month_seq#5 = Subquery scalar-subquery#6, [id=#7])) AND isnotnull(d_date_sk#4)) - -(7) Project [codegen id : 1] -Output [1]: [d_date_sk#4] -Input [2]: [d_date_sk#4, d_month_seq#5] - -(8) BroadcastExchange -Input [1]: [d_date_sk#4] -Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#8] - -(9) BroadcastHashJoin [codegen id : 5] -Left keys [1]: [ss_sold_date_sk#1] -Right keys [1]: [d_date_sk#4] -Join condition: None - -(10) Project [codegen id : 5] -Output [2]: [ss_item_sk#2, ss_customer_sk#3] -Input [4]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3, d_date_sk#4] - -(11) Scan parquet default.item -Output [3]: [i_item_sk#9, i_current_price#10, i_category#11] +(4) Scan parquet default.item +Output [3]: [i_item_sk#4, i_current_price#5, i_category#6] Batched: true Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_current_price), IsNotNull(i_item_sk)] ReadSchema: struct -(12) ColumnarToRow [codegen id : 4] -Input [3]: [i_item_sk#9, i_current_price#10, i_category#11] +(5) ColumnarToRow [codegen id : 3] +Input [3]: [i_item_sk#4, i_current_price#5, i_category#6] -(13) Filter [codegen id : 4] -Input [3]: [i_item_sk#9, i_current_price#10, i_category#11] -Condition : (isnotnull(i_current_price#10) AND isnotnull(i_item_sk#9)) +(6) Filter [codegen id : 3] +Input [3]: [i_item_sk#4, i_current_price#5, i_category#6] +Condition : (isnotnull(i_current_price#5) AND isnotnull(i_item_sk#4)) -(14) Scan parquet default.item -Output [2]: [i_current_price#10, i_category#11] +(7) Scan parquet default.item +Output [2]: [i_current_price#5, i_category#6] Batched: true Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_category)] ReadSchema: struct -(15) ColumnarToRow [codegen id : 2] -Input [2]: [i_current_price#10, i_category#11] - -(16) Filter [codegen id : 2] -Input [2]: [i_current_price#10, i_category#11] -Condition : isnotnull(i_category#11) - -(17) HashAggregate [codegen id : 2] -Input [2]: [i_current_price#10, i_category#11] -Keys [1]: [i_category#11] -Functions [1]: [partial_avg(UnscaledValue(i_current_price#10))] -Aggregate Attributes [2]: [sum#12, count#13] -Results [3]: [i_category#11, sum#14, count#15] - -(18) Exchange -Input [3]: [i_category#11, sum#14, count#15] -Arguments: hashpartitioning(i_category#11, 5), true, [id=#16] - -(19) HashAggregate [codegen id : 3] -Input [3]: [i_category#11, sum#14, count#15] -Keys [1]: [i_category#11] -Functions [1]: [avg(UnscaledValue(i_current_price#10))] -Aggregate Attributes [1]: [avg(UnscaledValue(i_current_price#10))#17] -Results [2]: [cast((avg(UnscaledValue(i_current_price#10))#17 / 100.0) as decimal(11,6)) AS avg(i_current_price)#18, i_category#11 AS i_category#11#19] - -(20) BroadcastExchange -Input [2]: [avg(i_current_price)#18, i_category#11#19] -Arguments: HashedRelationBroadcastMode(List(input[1, string, true]),false), [id=#20] - -(21) BroadcastHashJoin [codegen id : 4] -Left keys [1]: [i_category#11] -Right keys [1]: [i_category#11#19] +(8) ColumnarToRow [codegen id : 1] +Input [2]: [i_current_price#5, i_category#6] + +(9) Filter [codegen id : 1] +Input [2]: [i_current_price#5, i_category#6] +Condition : isnotnull(i_category#6) + +(10) HashAggregate [codegen id : 1] +Input [2]: [i_current_price#5, i_category#6] +Keys [1]: [i_category#6] +Functions [1]: [partial_avg(UnscaledValue(i_current_price#5))] +Aggregate Attributes [2]: [sum#7, count#8] +Results [3]: [i_category#6, sum#9, count#10] + +(11) Exchange +Input [3]: [i_category#6, sum#9, count#10] +Arguments: hashpartitioning(i_category#6, 5), true, [id=#11] + +(12) HashAggregate [codegen id : 2] +Input [3]: [i_category#6, sum#9, count#10] +Keys [1]: [i_category#6] +Functions [1]: [avg(UnscaledValue(i_current_price#5))] +Aggregate Attributes [1]: [avg(UnscaledValue(i_current_price#5))#12] +Results [2]: [cast((avg(UnscaledValue(i_current_price#5))#12 / 100.0) as decimal(11,6)) AS avg(i_current_price)#13, i_category#6 AS i_category#6#14] + +(13) BroadcastExchange +Input [2]: [avg(i_current_price)#13, i_category#6#14] +Arguments: HashedRelationBroadcastMode(List(input[1, string, true]),false), [id=#15] + +(14) BroadcastHashJoin [codegen id : 3] +Left keys [1]: [i_category#6] +Right keys [1]: [i_category#6#14] +Join condition: None + +(15) Filter [codegen id : 3] +Input [5]: [i_item_sk#4, i_current_price#5, i_category#6, avg(i_current_price)#13, i_category#6#14] +Condition : (cast(i_current_price#5 as decimal(14,7)) > CheckOverflow((1.200000 * promote_precision(avg(i_current_price)#13)), DecimalType(14,7), true)) + +(16) Project [codegen id : 3] +Output [1]: [i_item_sk#4] +Input [5]: [i_item_sk#4, i_current_price#5, i_category#6, avg(i_current_price)#13, i_category#6#14] + +(17) BroadcastExchange +Input [1]: [i_item_sk#4] +Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#16] + +(18) BroadcastHashJoin [codegen id : 5] +Left keys [1]: [ss_item_sk#2] +Right keys [1]: [i_item_sk#4] Join condition: None +(19) Project [codegen id : 5] +Output [2]: [ss_sold_date_sk#1, ss_customer_sk#3] +Input [4]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3, i_item_sk#4] + +(20) Scan parquet default.date_dim +Output [2]: [d_date_sk#17, d_month_seq#18] +Batched: true +Location [not included in comparison]/{warehouse_dir}/date_dim] +PushedFilters: [IsNotNull(d_month_seq), IsNotNull(d_date_sk)] +ReadSchema: struct + +(21) ColumnarToRow [codegen id : 4] +Input [2]: [d_date_sk#17, d_month_seq#18] + (22) Filter [codegen id : 4] -Input [5]: [i_item_sk#9, i_current_price#10, i_category#11, avg(i_current_price)#18, i_category#11#19] -Condition : (cast(i_current_price#10 as decimal(14,7)) > CheckOverflow((1.200000 * promote_precision(avg(i_current_price)#18)), DecimalType(14,7), true)) +Input [2]: [d_date_sk#17, d_month_seq#18] +Condition : ((isnotnull(d_month_seq#18) AND (d_month_seq#18 = Subquery scalar-subquery#19, [id=#20])) AND isnotnull(d_date_sk#17)) (23) Project [codegen id : 4] -Output [1]: [i_item_sk#9] -Input [5]: [i_item_sk#9, i_current_price#10, i_category#11, avg(i_current_price)#18, i_category#11#19] +Output [1]: [d_date_sk#17] +Input [2]: [d_date_sk#17, d_month_seq#18] (24) BroadcastExchange -Input [1]: [i_item_sk#9] +Input [1]: [d_date_sk#17] Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#21] (25) BroadcastHashJoin [codegen id : 5] -Left keys [1]: [ss_item_sk#2] -Right keys [1]: [i_item_sk#9] +Left keys [1]: [ss_sold_date_sk#1] +Right keys [1]: [d_date_sk#17] Join condition: None (26) Project [codegen id : 5] Output [1]: [ss_customer_sk#3] -Input [3]: [ss_item_sk#2, ss_customer_sk#3, i_item_sk#9] +Input [3]: [ss_sold_date_sk#1, ss_customer_sk#3, d_date_sk#17] (27) Exchange Input [1]: [ss_customer_sk#3] @@ -282,7 +282,7 @@ Arguments: 100, [cnt#35 ASC NULLS FIRST], [state#34, cnt#35] ===== Subqueries ===== -Subquery:1 Hosting operator id = 6 Hosting Expression = Subquery scalar-subquery#6, [id=#7] +Subquery:1 Hosting operator id = 22 Hosting Expression = Subquery scalar-subquery#19, [id=#20] * HashAggregate (57) +- Exchange (56) +- * HashAggregate (55) @@ -293,39 +293,39 @@ Subquery:1 Hosting operator id = 6 Hosting Expression = Subquery scalar-subquery (51) Scan parquet default.date_dim -Output [3]: [d_month_seq#5, d_year#37, d_moy#38] +Output [3]: [d_month_seq#18, d_year#37, d_moy#38] Batched: true Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), IsNotNull(d_moy), EqualTo(d_year,2000), EqualTo(d_moy,1)] ReadSchema: struct (52) ColumnarToRow [codegen id : 1] -Input [3]: [d_month_seq#5, d_year#37, d_moy#38] +Input [3]: [d_month_seq#18, d_year#37, d_moy#38] (53) Filter [codegen id : 1] -Input [3]: [d_month_seq#5, d_year#37, d_moy#38] +Input [3]: [d_month_seq#18, d_year#37, d_moy#38] Condition : (((isnotnull(d_year#37) AND isnotnull(d_moy#38)) AND (d_year#37 = 2000)) AND (d_moy#38 = 1)) (54) Project [codegen id : 1] -Output [1]: [d_month_seq#5] -Input [3]: [d_month_seq#5, d_year#37, d_moy#38] +Output [1]: [d_month_seq#18] +Input [3]: [d_month_seq#18, d_year#37, d_moy#38] (55) HashAggregate [codegen id : 1] -Input [1]: [d_month_seq#5] -Keys [1]: [d_month_seq#5] +Input [1]: [d_month_seq#18] +Keys [1]: [d_month_seq#18] Functions: [] Aggregate Attributes: [] -Results [1]: [d_month_seq#5] +Results [1]: [d_month_seq#18] (56) Exchange -Input [1]: [d_month_seq#5] -Arguments: hashpartitioning(d_month_seq#5, 5), true, [id=#39] +Input [1]: [d_month_seq#18] +Arguments: hashpartitioning(d_month_seq#18, 5), true, [id=#39] (57) HashAggregate [codegen id : 2] -Input [1]: [d_month_seq#5] -Keys [1]: [d_month_seq#5] +Input [1]: [d_month_seq#18] +Keys [1]: [d_month_seq#18] Functions: [] Aggregate Attributes: [] -Results [1]: [d_month_seq#5] +Results [1]: [d_month_seq#18] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q6.sf100/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q6.sf100/simplified.txt index dcebba331afb3..73d42163240f0 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q6.sf100/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q6.sf100/simplified.txt @@ -16,55 +16,55 @@ TakeOrderedAndProject [cnt,state] Exchange [ss_customer_sk] #2 WholeStageCodegen (5) Project [ss_customer_sk] - BroadcastHashJoin [ss_item_sk,i_item_sk] - Project [ss_item_sk,ss_customer_sk] - BroadcastHashJoin [ss_sold_date_sk,d_date_sk] + BroadcastHashJoin [ss_sold_date_sk,d_date_sk] + Project [ss_sold_date_sk,ss_customer_sk] + BroadcastHashJoin [ss_item_sk,i_item_sk] Filter [ss_customer_sk,ss_sold_date_sk,ss_item_sk] ColumnarToRow InputAdapter Scan parquet default.store_sales [ss_sold_date_sk,ss_item_sk,ss_customer_sk] InputAdapter BroadcastExchange #3 - WholeStageCodegen (1) - Project [d_date_sk] - Filter [d_month_seq,d_date_sk] - Subquery #1 - WholeStageCodegen (2) - HashAggregate [d_month_seq] + WholeStageCodegen (3) + Project [i_item_sk] + Filter [i_current_price,avg(i_current_price)] + BroadcastHashJoin [i_category,i_category] + Filter [i_current_price,i_item_sk] + ColumnarToRow InputAdapter - Exchange [d_month_seq] #4 - WholeStageCodegen (1) - HashAggregate [d_month_seq] - Project [d_month_seq] - Filter [d_year,d_moy] - ColumnarToRow - InputAdapter - Scan parquet default.date_dim [d_month_seq,d_year,d_moy] - ColumnarToRow + Scan parquet default.item [i_item_sk,i_current_price,i_category] InputAdapter - Scan parquet default.date_dim [d_date_sk,d_month_seq] + BroadcastExchange #4 + WholeStageCodegen (2) + HashAggregate [i_category,sum,count] [avg(UnscaledValue(i_current_price)),avg(i_current_price),i_category,sum,count] + InputAdapter + Exchange [i_category] #5 + WholeStageCodegen (1) + HashAggregate [i_category,i_current_price] [sum,count,sum,count] + Filter [i_category] + ColumnarToRow + InputAdapter + Scan parquet default.item [i_current_price,i_category] InputAdapter - BroadcastExchange #5 + BroadcastExchange #6 WholeStageCodegen (4) - Project [i_item_sk] - Filter [i_current_price,avg(i_current_price)] - BroadcastHashJoin [i_category,i_category] - Filter [i_current_price,i_item_sk] - ColumnarToRow + Project [d_date_sk] + Filter [d_month_seq,d_date_sk] + Subquery #1 + WholeStageCodegen (2) + HashAggregate [d_month_seq] InputAdapter - Scan parquet default.item [i_item_sk,i_current_price,i_category] + Exchange [d_month_seq] #7 + WholeStageCodegen (1) + HashAggregate [d_month_seq] + Project [d_month_seq] + Filter [d_year,d_moy] + ColumnarToRow + InputAdapter + Scan parquet default.date_dim [d_month_seq,d_year,d_moy] + ColumnarToRow InputAdapter - BroadcastExchange #6 - WholeStageCodegen (3) - HashAggregate [i_category,sum,count] [avg(UnscaledValue(i_current_price)),avg(i_current_price),i_category,sum,count] - InputAdapter - Exchange [i_category] #7 - WholeStageCodegen (2) - HashAggregate [i_category,i_current_price] [sum,count,sum,count] - Filter [i_category] - ColumnarToRow - InputAdapter - Scan parquet default.item [i_current_price,i_category] + Scan parquet default.date_dim [d_date_sk,d_month_seq] InputAdapter WholeStageCodegen (12) Sort [c_customer_sk] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q61.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q61.sf100/explain.txt index e616934bbd073..5574e5b16c578 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q61.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q61.sf100/explain.txt @@ -1,79 +1,74 @@ == Physical Plan == -TakeOrderedAndProject (75) -+- * Project (74) - +- BroadcastNestedLoopJoin Inner BuildRight (73) - :- * HashAggregate (47) - : +- Exchange (46) - : +- * HashAggregate (45) - : +- * Project (44) - : +- * BroadcastHashJoin Inner BuildRight (43) - : :- * Project (31) - : : +- * BroadcastHashJoin Inner BuildRight (30) - : : :- * Project (24) - : : : +- * BroadcastHashJoin Inner BuildRight (23) - : : : :- * Project (17) - : : : : +- * BroadcastHashJoin Inner BuildRight (16) - : : : : :- * Project (10) - : : : : : +- * BroadcastHashJoin Inner BuildRight (9) - : : : : : :- * Filter (3) - : : : : : : +- * ColumnarToRow (2) - : : : : : : +- Scan parquet default.store_sales (1) - : : : : : +- BroadcastExchange (8) - : : : : : +- * Project (7) - : : : : : +- * Filter (6) - : : : : : +- * ColumnarToRow (5) - : : : : : +- Scan parquet default.date_dim (4) - : : : : +- BroadcastExchange (15) - : : : : +- * Project (14) - : : : : +- * Filter (13) - : : : : +- * ColumnarToRow (12) - : : : : +- Scan parquet default.item (11) - : : : +- BroadcastExchange (22) - : : : +- * Project (21) - : : : +- * Filter (20) - : : : +- * ColumnarToRow (19) - : : : +- Scan parquet default.promotion (18) - : : +- BroadcastExchange (29) - : : +- * Project (28) - : : +- * Filter (27) - : : +- * ColumnarToRow (26) - : : +- Scan parquet default.store (25) - : +- BroadcastExchange (42) - : +- * Project (41) - : +- * BroadcastHashJoin Inner BuildRight (40) - : :- * Filter (34) - : : +- * ColumnarToRow (33) - : : +- Scan parquet default.customer (32) - : +- BroadcastExchange (39) - : +- * Project (38) - : +- * Filter (37) - : +- * ColumnarToRow (36) - : +- Scan parquet default.customer_address (35) - +- BroadcastExchange (72) - +- * HashAggregate (71) - +- Exchange (70) - +- * HashAggregate (69) - +- * Project (68) - +- * BroadcastHashJoin Inner BuildRight (67) - :- * Project (59) - : +- * BroadcastHashJoin Inner BuildRight (58) - : :- * Project (56) - : : +- * BroadcastHashJoin Inner BuildRight (55) - : : :- * Project (53) - : : : +- * BroadcastHashJoin Inner BuildLeft (52) - : : : :- ReusedExchange (48) - : : : +- * Filter (51) - : : : +- * ColumnarToRow (50) - : : : +- Scan parquet default.store_sales (49) - : : +- ReusedExchange (54) - : +- ReusedExchange (57) - +- BroadcastExchange (66) - +- * Project (65) - +- * BroadcastHashJoin Inner BuildLeft (64) - :- ReusedExchange (60) - +- * Filter (63) - +- * ColumnarToRow (62) - +- Scan parquet default.customer (61) +* Sort (70) ++- Exchange (69) + +- * Project (68) + +- BroadcastNestedLoopJoin Inner BuildRight (67) + :- * HashAggregate (47) + : +- Exchange (46) + : +- * HashAggregate (45) + : +- * Project (44) + : +- * BroadcastHashJoin Inner BuildRight (43) + : :- * Project (31) + : : +- * BroadcastHashJoin Inner BuildRight (30) + : : :- * Project (24) + : : : +- * BroadcastHashJoin Inner BuildRight (23) + : : : :- * Project (17) + : : : : +- * BroadcastHashJoin Inner BuildRight (16) + : : : : :- * Project (10) + : : : : : +- * BroadcastHashJoin Inner BuildRight (9) + : : : : : :- * Filter (3) + : : : : : : +- * ColumnarToRow (2) + : : : : : : +- Scan parquet default.store_sales (1) + : : : : : +- BroadcastExchange (8) + : : : : : +- * Project (7) + : : : : : +- * Filter (6) + : : : : : +- * ColumnarToRow (5) + : : : : : +- Scan parquet default.date_dim (4) + : : : : +- BroadcastExchange (15) + : : : : +- * Project (14) + : : : : +- * Filter (13) + : : : : +- * ColumnarToRow (12) + : : : : +- Scan parquet default.item (11) + : : : +- BroadcastExchange (22) + : : : +- * Project (21) + : : : +- * Filter (20) + : : : +- * ColumnarToRow (19) + : : : +- Scan parquet default.promotion (18) + : : +- BroadcastExchange (29) + : : +- * Project (28) + : : +- * Filter (27) + : : +- * ColumnarToRow (26) + : : +- Scan parquet default.store (25) + : +- BroadcastExchange (42) + : +- * Project (41) + : +- * BroadcastHashJoin Inner BuildRight (40) + : :- * Filter (34) + : : +- * ColumnarToRow (33) + : : +- Scan parquet default.customer (32) + : +- BroadcastExchange (39) + : +- * Project (38) + : +- * Filter (37) + : +- * ColumnarToRow (36) + : +- Scan parquet default.customer_address (35) + +- BroadcastExchange (66) + +- * HashAggregate (65) + +- Exchange (64) + +- * HashAggregate (63) + +- * Project (62) + +- * BroadcastHashJoin Inner BuildRight (61) + :- * Project (59) + : +- * BroadcastHashJoin Inner BuildRight (58) + : :- * Project (56) + : : +- * BroadcastHashJoin Inner BuildRight (55) + : : :- * Project (53) + : : : +- * BroadcastHashJoin Inner BuildRight (52) + : : : :- * Filter (50) + : : : : +- * ColumnarToRow (49) + : : : : +- Scan parquet default.store_sales (48) + : : : +- ReusedExchange (51) + : : +- ReusedExchange (54) + : +- ReusedExchange (57) + +- ReusedExchange (60) (1) Scan parquet default.store_sales @@ -290,31 +285,31 @@ Functions [1]: [sum(UnscaledValue(ss_ext_sales_price#6))] Aggregate Attributes [1]: [sum(UnscaledValue(ss_ext_sales_price#6))#31] Results [1]: [MakeDecimal(sum(UnscaledValue(ss_ext_sales_price#6))#31,17,2) AS promotions#32] -(48) ReusedExchange [Reuses operator id: 8] -Output [1]: [d_date_sk#7] - -(49) Scan parquet default.store_sales +(48) Scan parquet default.store_sales Output [5]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3, ss_store_sk#4, ss_ext_sales_price#6] Batched: true Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_store_sk), IsNotNull(ss_sold_date_sk), IsNotNull(ss_customer_sk), IsNotNull(ss_item_sk)] ReadSchema: struct -(50) ColumnarToRow +(49) ColumnarToRow [codegen id : 14] Input [5]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3, ss_store_sk#4, ss_ext_sales_price#6] -(51) Filter +(50) Filter [codegen id : 14] Input [5]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3, ss_store_sk#4, ss_ext_sales_price#6] Condition : (((isnotnull(ss_store_sk#4) AND isnotnull(ss_sold_date_sk#1)) AND isnotnull(ss_customer_sk#3)) AND isnotnull(ss_item_sk#2)) +(51) ReusedExchange [Reuses operator id: 8] +Output [1]: [d_date_sk#7] + (52) BroadcastHashJoin [codegen id : 14] -Left keys [1]: [d_date_sk#7] -Right keys [1]: [ss_sold_date_sk#1] +Left keys [1]: [ss_sold_date_sk#1] +Right keys [1]: [d_date_sk#7] Join condition: None (53) Project [codegen id : 14] Output [4]: [ss_item_sk#2, ss_customer_sk#3, ss_store_sk#4, ss_ext_sales_price#6] -Input [6]: [d_date_sk#7, ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3, ss_store_sk#4, ss_ext_sales_price#6] +Input [6]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3, ss_store_sk#4, ss_ext_sales_price#6, d_date_sk#7] (54) ReusedExchange [Reuses operator id: 15] Output [1]: [i_item_sk#11] @@ -340,75 +335,52 @@ Join condition: None Output [2]: [ss_customer_sk#3, ss_ext_sales_price#6] Input [4]: [ss_customer_sk#3, ss_store_sk#4, ss_ext_sales_price#6, s_store_sk#19] -(60) ReusedExchange [Reuses operator id: 39] -Output [1]: [ca_address_sk#24] - -(61) Scan parquet default.customer -Output [2]: [c_customer_sk#22, c_current_addr_sk#23] -Batched: true -Location [not included in comparison]/{warehouse_dir}/customer] -PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_current_addr_sk)] -ReadSchema: struct - -(62) ColumnarToRow -Input [2]: [c_customer_sk#22, c_current_addr_sk#23] - -(63) Filter -Input [2]: [c_customer_sk#22, c_current_addr_sk#23] -Condition : (isnotnull(c_customer_sk#22) AND isnotnull(c_current_addr_sk#23)) - -(64) BroadcastHashJoin [codegen id : 13] -Left keys [1]: [ca_address_sk#24] -Right keys [1]: [c_current_addr_sk#23] -Join condition: None - -(65) Project [codegen id : 13] +(60) ReusedExchange [Reuses operator id: 42] Output [1]: [c_customer_sk#22] -Input [3]: [ca_address_sk#24, c_customer_sk#22, c_current_addr_sk#23] - -(66) BroadcastExchange -Input [1]: [c_customer_sk#22] -Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#33] -(67) BroadcastHashJoin [codegen id : 14] +(61) BroadcastHashJoin [codegen id : 14] Left keys [1]: [ss_customer_sk#3] Right keys [1]: [c_customer_sk#22] Join condition: None -(68) Project [codegen id : 14] +(62) Project [codegen id : 14] Output [1]: [ss_ext_sales_price#6] Input [3]: [ss_customer_sk#3, ss_ext_sales_price#6, c_customer_sk#22] -(69) HashAggregate [codegen id : 14] +(63) HashAggregate [codegen id : 14] Input [1]: [ss_ext_sales_price#6] Keys: [] Functions [1]: [partial_sum(UnscaledValue(ss_ext_sales_price#6))] -Aggregate Attributes [1]: [sum#34] -Results [1]: [sum#35] +Aggregate Attributes [1]: [sum#33] +Results [1]: [sum#34] -(70) Exchange -Input [1]: [sum#35] -Arguments: SinglePartition, true, [id=#36] +(64) Exchange +Input [1]: [sum#34] +Arguments: SinglePartition, true, [id=#35] -(71) HashAggregate [codegen id : 15] -Input [1]: [sum#35] +(65) HashAggregate [codegen id : 15] +Input [1]: [sum#34] Keys: [] Functions [1]: [sum(UnscaledValue(ss_ext_sales_price#6))] -Aggregate Attributes [1]: [sum(UnscaledValue(ss_ext_sales_price#6))#37] -Results [1]: [MakeDecimal(sum(UnscaledValue(ss_ext_sales_price#6))#37,17,2) AS total#38] +Aggregate Attributes [1]: [sum(UnscaledValue(ss_ext_sales_price#6))#36] +Results [1]: [MakeDecimal(sum(UnscaledValue(ss_ext_sales_price#6))#36,17,2) AS total#37] -(72) BroadcastExchange -Input [1]: [total#38] -Arguments: IdentityBroadcastMode, [id=#39] +(66) BroadcastExchange +Input [1]: [total#37] +Arguments: IdentityBroadcastMode, [id=#38] -(73) BroadcastNestedLoopJoin +(67) BroadcastNestedLoopJoin Join condition: None -(74) Project [codegen id : 16] -Output [3]: [promotions#32, total#38, CheckOverflow((promote_precision(CheckOverflow((promote_precision(cast(promotions#32 as decimal(15,4))) / promote_precision(cast(total#38 as decimal(15,4)))), DecimalType(35,20), true)) * 100.00000000000000000000), DecimalType(38,19), true) AS (CAST((CAST(CAST(promotions AS DECIMAL(15,4)) AS DECIMAL(15,4)) / CAST(CAST(total AS DECIMAL(15,4)) AS DECIMAL(15,4))) AS DECIMAL(35,20)) * CAST(CAST(100 AS DECIMAL(3,0)) AS DECIMAL(35,20)))#40] -Input [2]: [promotions#32, total#38] +(68) Project [codegen id : 16] +Output [3]: [promotions#32, total#37, CheckOverflow((promote_precision(CheckOverflow((promote_precision(cast(promotions#32 as decimal(15,4))) / promote_precision(cast(total#37 as decimal(15,4)))), DecimalType(35,20), true)) * 100.00000000000000000000), DecimalType(38,19), true) AS (CAST((CAST(CAST(promotions AS DECIMAL(15,4)) AS DECIMAL(15,4)) / CAST(CAST(total AS DECIMAL(15,4)) AS DECIMAL(15,4))) AS DECIMAL(35,20)) * CAST(CAST(100 AS DECIMAL(3,0)) AS DECIMAL(35,20)))#39] +Input [2]: [promotions#32, total#37] + +(69) Exchange +Input [3]: [promotions#32, total#37, (CAST((CAST(CAST(promotions AS DECIMAL(15,4)) AS DECIMAL(15,4)) / CAST(CAST(total AS DECIMAL(15,4)) AS DECIMAL(15,4))) AS DECIMAL(35,20)) * CAST(CAST(100 AS DECIMAL(3,0)) AS DECIMAL(35,20)))#39] +Arguments: rangepartitioning(promotions#32 ASC NULLS FIRST, total#37 ASC NULLS FIRST, 5), true, [id=#40] -(75) TakeOrderedAndProject -Input [3]: [promotions#32, total#38, (CAST((CAST(CAST(promotions AS DECIMAL(15,4)) AS DECIMAL(15,4)) / CAST(CAST(total AS DECIMAL(15,4)) AS DECIMAL(15,4))) AS DECIMAL(35,20)) * CAST(CAST(100 AS DECIMAL(3,0)) AS DECIMAL(35,20)))#40] -Arguments: 100, [promotions#32 ASC NULLS FIRST, total#38 ASC NULLS FIRST], [promotions#32, total#38, (CAST((CAST(CAST(promotions AS DECIMAL(15,4)) AS DECIMAL(15,4)) / CAST(CAST(total AS DECIMAL(15,4)) AS DECIMAL(15,4))) AS DECIMAL(35,20)) * CAST(CAST(100 AS DECIMAL(3,0)) AS DECIMAL(35,20)))#40] +(70) Sort [codegen id : 17] +Input [3]: [promotions#32, total#37, (CAST((CAST(CAST(promotions AS DECIMAL(15,4)) AS DECIMAL(15,4)) / CAST(CAST(total AS DECIMAL(15,4)) AS DECIMAL(15,4))) AS DECIMAL(35,20)) * CAST(CAST(100 AS DECIMAL(3,0)) AS DECIMAL(35,20)))#39] +Arguments: [promotions#32 ASC NULLS FIRST, total#37 ASC NULLS FIRST], true, 0 diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q61.sf100/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q61.sf100/simplified.txt index 039ccb1aa18cf..1ebad2d825be6 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q61.sf100/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q61.sf100/simplified.txt @@ -1,110 +1,104 @@ -TakeOrderedAndProject [promotions,total,(CAST((CAST(CAST(promotions AS DECIMAL(15,4)) AS DECIMAL(15,4)) / CAST(CAST(total AS DECIMAL(15,4)) AS DECIMAL(15,4))) AS DECIMAL(35,20)) * CAST(CAST(100 AS DECIMAL(3,0)) AS DECIMAL(35,20)))] - WholeStageCodegen (16) - Project [promotions,total] - InputAdapter - BroadcastNestedLoopJoin - WholeStageCodegen (8) - HashAggregate [sum] [sum(UnscaledValue(ss_ext_sales_price)),promotions,sum] - InputAdapter - Exchange #1 - WholeStageCodegen (7) - HashAggregate [ss_ext_sales_price] [sum,sum] - Project [ss_ext_sales_price] - BroadcastHashJoin [ss_customer_sk,c_customer_sk] - Project [ss_customer_sk,ss_ext_sales_price] - BroadcastHashJoin [ss_store_sk,s_store_sk] - Project [ss_customer_sk,ss_store_sk,ss_ext_sales_price] - BroadcastHashJoin [ss_promo_sk,p_promo_sk] - Project [ss_customer_sk,ss_store_sk,ss_promo_sk,ss_ext_sales_price] - BroadcastHashJoin [ss_item_sk,i_item_sk] - Project [ss_item_sk,ss_customer_sk,ss_store_sk,ss_promo_sk,ss_ext_sales_price] - BroadcastHashJoin [ss_sold_date_sk,d_date_sk] - Filter [ss_store_sk,ss_promo_sk,ss_sold_date_sk,ss_customer_sk,ss_item_sk] +WholeStageCodegen (17) + Sort [promotions,total] + InputAdapter + Exchange [promotions,total] #1 + WholeStageCodegen (16) + Project [promotions,total] + InputAdapter + BroadcastNestedLoopJoin + WholeStageCodegen (8) + HashAggregate [sum] [sum(UnscaledValue(ss_ext_sales_price)),promotions,sum] + InputAdapter + Exchange #2 + WholeStageCodegen (7) + HashAggregate [ss_ext_sales_price] [sum,sum] + Project [ss_ext_sales_price] + BroadcastHashJoin [ss_customer_sk,c_customer_sk] + Project [ss_customer_sk,ss_ext_sales_price] + BroadcastHashJoin [ss_store_sk,s_store_sk] + Project [ss_customer_sk,ss_store_sk,ss_ext_sales_price] + BroadcastHashJoin [ss_promo_sk,p_promo_sk] + Project [ss_customer_sk,ss_store_sk,ss_promo_sk,ss_ext_sales_price] + BroadcastHashJoin [ss_item_sk,i_item_sk] + Project [ss_item_sk,ss_customer_sk,ss_store_sk,ss_promo_sk,ss_ext_sales_price] + BroadcastHashJoin [ss_sold_date_sk,d_date_sk] + Filter [ss_store_sk,ss_promo_sk,ss_sold_date_sk,ss_customer_sk,ss_item_sk] + ColumnarToRow + InputAdapter + Scan parquet default.store_sales [ss_sold_date_sk,ss_item_sk,ss_customer_sk,ss_store_sk,ss_promo_sk,ss_ext_sales_price] + InputAdapter + BroadcastExchange #3 + WholeStageCodegen (1) + Project [d_date_sk] + Filter [d_year,d_moy,d_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.date_dim [d_date_sk,d_year,d_moy] + InputAdapter + BroadcastExchange #4 + WholeStageCodegen (2) + Project [i_item_sk] + Filter [i_category,i_item_sk] + ColumnarToRow + InputAdapter + Scan parquet default.item [i_item_sk,i_category] + InputAdapter + BroadcastExchange #5 + WholeStageCodegen (3) + Project [p_promo_sk] + Filter [p_channel_dmail,p_channel_email,p_channel_tv,p_promo_sk] + ColumnarToRow + InputAdapter + Scan parquet default.promotion [p_promo_sk,p_channel_dmail,p_channel_email,p_channel_tv] + InputAdapter + BroadcastExchange #6 + WholeStageCodegen (4) + Project [s_store_sk] + Filter [s_gmt_offset,s_store_sk] + ColumnarToRow + InputAdapter + Scan parquet default.store [s_store_sk,s_gmt_offset] + InputAdapter + BroadcastExchange #7 + WholeStageCodegen (6) + Project [c_customer_sk] + BroadcastHashJoin [c_current_addr_sk,ca_address_sk] + Filter [c_customer_sk,c_current_addr_sk] ColumnarToRow InputAdapter - Scan parquet default.store_sales [ss_sold_date_sk,ss_item_sk,ss_customer_sk,ss_store_sk,ss_promo_sk,ss_ext_sales_price] + Scan parquet default.customer [c_customer_sk,c_current_addr_sk] InputAdapter - BroadcastExchange #2 - WholeStageCodegen (1) - Project [d_date_sk] - Filter [d_year,d_moy,d_date_sk] + BroadcastExchange #8 + WholeStageCodegen (5) + Project [ca_address_sk] + Filter [ca_gmt_offset,ca_address_sk] ColumnarToRow InputAdapter - Scan parquet default.date_dim [d_date_sk,d_year,d_moy] - InputAdapter - BroadcastExchange #3 - WholeStageCodegen (2) - Project [i_item_sk] - Filter [i_category,i_item_sk] + Scan parquet default.customer_address [ca_address_sk,ca_gmt_offset] + BroadcastExchange #9 + WholeStageCodegen (15) + HashAggregate [sum] [sum(UnscaledValue(ss_ext_sales_price)),total,sum] + InputAdapter + Exchange #10 + WholeStageCodegen (14) + HashAggregate [ss_ext_sales_price] [sum,sum] + Project [ss_ext_sales_price] + BroadcastHashJoin [ss_customer_sk,c_customer_sk] + Project [ss_customer_sk,ss_ext_sales_price] + BroadcastHashJoin [ss_store_sk,s_store_sk] + Project [ss_customer_sk,ss_store_sk,ss_ext_sales_price] + BroadcastHashJoin [ss_item_sk,i_item_sk] + Project [ss_item_sk,ss_customer_sk,ss_store_sk,ss_ext_sales_price] + BroadcastHashJoin [ss_sold_date_sk,d_date_sk] + Filter [ss_store_sk,ss_sold_date_sk,ss_customer_sk,ss_item_sk] ColumnarToRow InputAdapter - Scan parquet default.item [i_item_sk,i_category] - InputAdapter - BroadcastExchange #4 - WholeStageCodegen (3) - Project [p_promo_sk] - Filter [p_channel_dmail,p_channel_email,p_channel_tv,p_promo_sk] - ColumnarToRow + Scan parquet default.store_sales [ss_sold_date_sk,ss_item_sk,ss_customer_sk,ss_store_sk,ss_ext_sales_price] InputAdapter - Scan parquet default.promotion [p_promo_sk,p_channel_dmail,p_channel_email,p_channel_tv] - InputAdapter - BroadcastExchange #5 - WholeStageCodegen (4) - Project [s_store_sk] - Filter [s_gmt_offset,s_store_sk] - ColumnarToRow + ReusedExchange [d_date_sk] #3 InputAdapter - Scan parquet default.store [s_store_sk,s_gmt_offset] - InputAdapter - BroadcastExchange #6 - WholeStageCodegen (6) - Project [c_customer_sk] - BroadcastHashJoin [c_current_addr_sk,ca_address_sk] - Filter [c_customer_sk,c_current_addr_sk] - ColumnarToRow - InputAdapter - Scan parquet default.customer [c_customer_sk,c_current_addr_sk] - InputAdapter - BroadcastExchange #7 - WholeStageCodegen (5) - Project [ca_address_sk] - Filter [ca_gmt_offset,ca_address_sk] - ColumnarToRow - InputAdapter - Scan parquet default.customer_address [ca_address_sk,ca_gmt_offset] - BroadcastExchange #8 - WholeStageCodegen (15) - HashAggregate [sum] [sum(UnscaledValue(ss_ext_sales_price)),total,sum] - InputAdapter - Exchange #9 - WholeStageCodegen (14) - HashAggregate [ss_ext_sales_price] [sum,sum] - Project [ss_ext_sales_price] - BroadcastHashJoin [ss_customer_sk,c_customer_sk] - Project [ss_customer_sk,ss_ext_sales_price] - BroadcastHashJoin [ss_store_sk,s_store_sk] - Project [ss_customer_sk,ss_store_sk,ss_ext_sales_price] - BroadcastHashJoin [ss_item_sk,i_item_sk] - Project [ss_item_sk,ss_customer_sk,ss_store_sk,ss_ext_sales_price] - BroadcastHashJoin [d_date_sk,ss_sold_date_sk] - InputAdapter - ReusedExchange [d_date_sk] #2 - Filter [ss_store_sk,ss_sold_date_sk,ss_customer_sk,ss_item_sk] - ColumnarToRow - InputAdapter - Scan parquet default.store_sales [ss_sold_date_sk,ss_item_sk,ss_customer_sk,ss_store_sk,ss_ext_sales_price] - InputAdapter - ReusedExchange [i_item_sk] #3 - InputAdapter - ReusedExchange [s_store_sk] #5 - InputAdapter - BroadcastExchange #10 - WholeStageCodegen (13) - Project [c_customer_sk] - BroadcastHashJoin [ca_address_sk,c_current_addr_sk] + ReusedExchange [i_item_sk] #4 InputAdapter - ReusedExchange [ca_address_sk] #7 - Filter [c_customer_sk,c_current_addr_sk] - ColumnarToRow - InputAdapter - Scan parquet default.customer [c_customer_sk,c_current_addr_sk] + ReusedExchange [s_store_sk] #6 + InputAdapter + ReusedExchange [c_customer_sk] #7 diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q61/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q61/explain.txt index f56f48726c4ad..8025461181031 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q61/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q61/explain.txt @@ -1,76 +1,77 @@ == Physical Plan == -TakeOrderedAndProject (72) -+- * Project (71) - +- BroadcastNestedLoopJoin Inner BuildRight (70) - :- * HashAggregate (47) - : +- Exchange (46) - : +- * HashAggregate (45) - : +- * Project (44) - : +- * BroadcastHashJoin Inner BuildRight (43) - : :- * Project (37) - : : +- * BroadcastHashJoin Inner BuildRight (36) - : : :- * Project (30) - : : : +- * BroadcastHashJoin Inner BuildRight (29) - : : : :- * Project (24) - : : : : +- * BroadcastHashJoin Inner BuildRight (23) - : : : : :- * Project (17) - : : : : : +- * BroadcastHashJoin Inner BuildRight (16) - : : : : : :- * Project (10) - : : : : : : +- * BroadcastHashJoin Inner BuildRight (9) - : : : : : : :- * Filter (3) - : : : : : : : +- * ColumnarToRow (2) - : : : : : : : +- Scan parquet default.store_sales (1) - : : : : : : +- BroadcastExchange (8) - : : : : : : +- * Project (7) - : : : : : : +- * Filter (6) - : : : : : : +- * ColumnarToRow (5) - : : : : : : +- Scan parquet default.store (4) - : : : : : +- BroadcastExchange (15) - : : : : : +- * Project (14) - : : : : : +- * Filter (13) - : : : : : +- * ColumnarToRow (12) - : : : : : +- Scan parquet default.promotion (11) - : : : : +- BroadcastExchange (22) - : : : : +- * Project (21) - : : : : +- * Filter (20) - : : : : +- * ColumnarToRow (19) - : : : : +- Scan parquet default.date_dim (18) - : : : +- BroadcastExchange (28) - : : : +- * Filter (27) - : : : +- * ColumnarToRow (26) - : : : +- Scan parquet default.customer (25) - : : +- BroadcastExchange (35) - : : +- * Project (34) - : : +- * Filter (33) - : : +- * ColumnarToRow (32) - : : +- Scan parquet default.customer_address (31) - : +- BroadcastExchange (42) - : +- * Project (41) - : +- * Filter (40) - : +- * ColumnarToRow (39) - : +- Scan parquet default.item (38) - +- BroadcastExchange (69) - +- * HashAggregate (68) - +- Exchange (67) - +- * HashAggregate (66) - +- * Project (65) - +- * BroadcastHashJoin Inner BuildRight (64) - :- * Project (62) - : +- * BroadcastHashJoin Inner BuildRight (61) - : :- * Project (59) - : : +- * BroadcastHashJoin Inner BuildRight (58) - : : :- * Project (56) - : : : +- * BroadcastHashJoin Inner BuildRight (55) - : : : :- * Project (53) - : : : : +- * BroadcastHashJoin Inner BuildRight (52) - : : : : :- * Filter (50) - : : : : : +- * ColumnarToRow (49) - : : : : : +- Scan parquet default.store_sales (48) - : : : : +- ReusedExchange (51) - : : : +- ReusedExchange (54) - : : +- ReusedExchange (57) - : +- ReusedExchange (60) - +- ReusedExchange (63) +* Sort (73) ++- Exchange (72) + +- * Project (71) + +- BroadcastNestedLoopJoin Inner BuildRight (70) + :- * HashAggregate (47) + : +- Exchange (46) + : +- * HashAggregate (45) + : +- * Project (44) + : +- * BroadcastHashJoin Inner BuildRight (43) + : :- * Project (37) + : : +- * BroadcastHashJoin Inner BuildRight (36) + : : :- * Project (30) + : : : +- * BroadcastHashJoin Inner BuildRight (29) + : : : :- * Project (24) + : : : : +- * BroadcastHashJoin Inner BuildRight (23) + : : : : :- * Project (17) + : : : : : +- * BroadcastHashJoin Inner BuildRight (16) + : : : : : :- * Project (10) + : : : : : : +- * BroadcastHashJoin Inner BuildRight (9) + : : : : : : :- * Filter (3) + : : : : : : : +- * ColumnarToRow (2) + : : : : : : : +- Scan parquet default.store_sales (1) + : : : : : : +- BroadcastExchange (8) + : : : : : : +- * Project (7) + : : : : : : +- * Filter (6) + : : : : : : +- * ColumnarToRow (5) + : : : : : : +- Scan parquet default.store (4) + : : : : : +- BroadcastExchange (15) + : : : : : +- * Project (14) + : : : : : +- * Filter (13) + : : : : : +- * ColumnarToRow (12) + : : : : : +- Scan parquet default.promotion (11) + : : : : +- BroadcastExchange (22) + : : : : +- * Project (21) + : : : : +- * Filter (20) + : : : : +- * ColumnarToRow (19) + : : : : +- Scan parquet default.date_dim (18) + : : : +- BroadcastExchange (28) + : : : +- * Filter (27) + : : : +- * ColumnarToRow (26) + : : : +- Scan parquet default.customer (25) + : : +- BroadcastExchange (35) + : : +- * Project (34) + : : +- * Filter (33) + : : +- * ColumnarToRow (32) + : : +- Scan parquet default.customer_address (31) + : +- BroadcastExchange (42) + : +- * Project (41) + : +- * Filter (40) + : +- * ColumnarToRow (39) + : +- Scan parquet default.item (38) + +- BroadcastExchange (69) + +- * HashAggregate (68) + +- Exchange (67) + +- * HashAggregate (66) + +- * Project (65) + +- * BroadcastHashJoin Inner BuildRight (64) + :- * Project (62) + : +- * BroadcastHashJoin Inner BuildRight (61) + : :- * Project (59) + : : +- * BroadcastHashJoin Inner BuildRight (58) + : : :- * Project (56) + : : : +- * BroadcastHashJoin Inner BuildRight (55) + : : : :- * Project (53) + : : : : +- * BroadcastHashJoin Inner BuildRight (52) + : : : : :- * Filter (50) + : : : : : +- * ColumnarToRow (49) + : : : : : +- Scan parquet default.store_sales (48) + : : : : +- ReusedExchange (51) + : : : +- ReusedExchange (54) + : : +- ReusedExchange (57) + : +- ReusedExchange (60) + +- ReusedExchange (63) (1) Scan parquet default.store_sales @@ -390,7 +391,11 @@ Join condition: None Output [3]: [promotions#32, total#37, CheckOverflow((promote_precision(CheckOverflow((promote_precision(cast(promotions#32 as decimal(15,4))) / promote_precision(cast(total#37 as decimal(15,4)))), DecimalType(35,20), true)) * 100.00000000000000000000), DecimalType(38,19), true) AS (CAST((CAST(CAST(promotions AS DECIMAL(15,4)) AS DECIMAL(15,4)) / CAST(CAST(total AS DECIMAL(15,4)) AS DECIMAL(15,4))) AS DECIMAL(35,20)) * CAST(CAST(100 AS DECIMAL(3,0)) AS DECIMAL(35,20)))#39] Input [2]: [promotions#32, total#37] -(72) TakeOrderedAndProject +(72) Exchange Input [3]: [promotions#32, total#37, (CAST((CAST(CAST(promotions AS DECIMAL(15,4)) AS DECIMAL(15,4)) / CAST(CAST(total AS DECIMAL(15,4)) AS DECIMAL(15,4))) AS DECIMAL(35,20)) * CAST(CAST(100 AS DECIMAL(3,0)) AS DECIMAL(35,20)))#39] -Arguments: 100, [promotions#32 ASC NULLS FIRST, total#37 ASC NULLS FIRST], [promotions#32, total#37, (CAST((CAST(CAST(promotions AS DECIMAL(15,4)) AS DECIMAL(15,4)) / CAST(CAST(total AS DECIMAL(15,4)) AS DECIMAL(15,4))) AS DECIMAL(35,20)) * CAST(CAST(100 AS DECIMAL(3,0)) AS DECIMAL(35,20)))#39] +Arguments: rangepartitioning(promotions#32 ASC NULLS FIRST, total#37 ASC NULLS FIRST, 5), true, [id=#40] + +(73) Sort [codegen id : 17] +Input [3]: [promotions#32, total#37, (CAST((CAST(CAST(promotions AS DECIMAL(15,4)) AS DECIMAL(15,4)) / CAST(CAST(total AS DECIMAL(15,4)) AS DECIMAL(15,4))) AS DECIMAL(35,20)) * CAST(CAST(100 AS DECIMAL(3,0)) AS DECIMAL(35,20)))#39] +Arguments: [promotions#32 ASC NULLS FIRST, total#37 ASC NULLS FIRST], true, 0 diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q61/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q61/simplified.txt index da75651673cfe..3b476544403e0 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q61/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q61/simplified.txt @@ -1,105 +1,108 @@ -TakeOrderedAndProject [promotions,total,(CAST((CAST(CAST(promotions AS DECIMAL(15,4)) AS DECIMAL(15,4)) / CAST(CAST(total AS DECIMAL(15,4)) AS DECIMAL(15,4))) AS DECIMAL(35,20)) * CAST(CAST(100 AS DECIMAL(3,0)) AS DECIMAL(35,20)))] - WholeStageCodegen (16) - Project [promotions,total] - InputAdapter - BroadcastNestedLoopJoin - WholeStageCodegen (8) - HashAggregate [sum] [sum(UnscaledValue(ss_ext_sales_price)),promotions,sum] - InputAdapter - Exchange #1 - WholeStageCodegen (7) - HashAggregate [ss_ext_sales_price] [sum,sum] - Project [ss_ext_sales_price] - BroadcastHashJoin [ss_item_sk,i_item_sk] - Project [ss_item_sk,ss_ext_sales_price] - BroadcastHashJoin [c_current_addr_sk,ca_address_sk] - Project [ss_item_sk,ss_ext_sales_price,c_current_addr_sk] - BroadcastHashJoin [ss_customer_sk,c_customer_sk] - Project [ss_item_sk,ss_customer_sk,ss_ext_sales_price] - BroadcastHashJoin [ss_sold_date_sk,d_date_sk] - Project [ss_sold_date_sk,ss_item_sk,ss_customer_sk,ss_ext_sales_price] - BroadcastHashJoin [ss_promo_sk,p_promo_sk] - Project [ss_sold_date_sk,ss_item_sk,ss_customer_sk,ss_promo_sk,ss_ext_sales_price] - BroadcastHashJoin [ss_store_sk,s_store_sk] - Filter [ss_store_sk,ss_promo_sk,ss_sold_date_sk,ss_customer_sk,ss_item_sk] +WholeStageCodegen (17) + Sort [promotions,total] + InputAdapter + Exchange [promotions,total] #1 + WholeStageCodegen (16) + Project [promotions,total] + InputAdapter + BroadcastNestedLoopJoin + WholeStageCodegen (8) + HashAggregate [sum] [sum(UnscaledValue(ss_ext_sales_price)),promotions,sum] + InputAdapter + Exchange #2 + WholeStageCodegen (7) + HashAggregate [ss_ext_sales_price] [sum,sum] + Project [ss_ext_sales_price] + BroadcastHashJoin [ss_item_sk,i_item_sk] + Project [ss_item_sk,ss_ext_sales_price] + BroadcastHashJoin [c_current_addr_sk,ca_address_sk] + Project [ss_item_sk,ss_ext_sales_price,c_current_addr_sk] + BroadcastHashJoin [ss_customer_sk,c_customer_sk] + Project [ss_item_sk,ss_customer_sk,ss_ext_sales_price] + BroadcastHashJoin [ss_sold_date_sk,d_date_sk] + Project [ss_sold_date_sk,ss_item_sk,ss_customer_sk,ss_ext_sales_price] + BroadcastHashJoin [ss_promo_sk,p_promo_sk] + Project [ss_sold_date_sk,ss_item_sk,ss_customer_sk,ss_promo_sk,ss_ext_sales_price] + BroadcastHashJoin [ss_store_sk,s_store_sk] + Filter [ss_store_sk,ss_promo_sk,ss_sold_date_sk,ss_customer_sk,ss_item_sk] + ColumnarToRow + InputAdapter + Scan parquet default.store_sales [ss_sold_date_sk,ss_item_sk,ss_customer_sk,ss_store_sk,ss_promo_sk,ss_ext_sales_price] + InputAdapter + BroadcastExchange #3 + WholeStageCodegen (1) + Project [s_store_sk] + Filter [s_gmt_offset,s_store_sk] + ColumnarToRow + InputAdapter + Scan parquet default.store [s_store_sk,s_gmt_offset] + InputAdapter + BroadcastExchange #4 + WholeStageCodegen (2) + Project [p_promo_sk] + Filter [p_channel_dmail,p_channel_email,p_channel_tv,p_promo_sk] + ColumnarToRow + InputAdapter + Scan parquet default.promotion [p_promo_sk,p_channel_dmail,p_channel_email,p_channel_tv] + InputAdapter + BroadcastExchange #5 + WholeStageCodegen (3) + Project [d_date_sk] + Filter [d_year,d_moy,d_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.date_dim [d_date_sk,d_year,d_moy] + InputAdapter + BroadcastExchange #6 + WholeStageCodegen (4) + Filter [c_customer_sk,c_current_addr_sk] ColumnarToRow InputAdapter - Scan parquet default.store_sales [ss_sold_date_sk,ss_item_sk,ss_customer_sk,ss_store_sk,ss_promo_sk,ss_ext_sales_price] - InputAdapter - BroadcastExchange #2 - WholeStageCodegen (1) - Project [s_store_sk] - Filter [s_gmt_offset,s_store_sk] - ColumnarToRow - InputAdapter - Scan parquet default.store [s_store_sk,s_gmt_offset] - InputAdapter - BroadcastExchange #3 - WholeStageCodegen (2) - Project [p_promo_sk] - Filter [p_channel_dmail,p_channel_email,p_channel_tv,p_promo_sk] + Scan parquet default.customer [c_customer_sk,c_current_addr_sk] + InputAdapter + BroadcastExchange #7 + WholeStageCodegen (5) + Project [ca_address_sk] + Filter [ca_gmt_offset,ca_address_sk] + ColumnarToRow + InputAdapter + Scan parquet default.customer_address [ca_address_sk,ca_gmt_offset] + InputAdapter + BroadcastExchange #8 + WholeStageCodegen (6) + Project [i_item_sk] + Filter [i_category,i_item_sk] + ColumnarToRow + InputAdapter + Scan parquet default.item [i_item_sk,i_category] + BroadcastExchange #9 + WholeStageCodegen (15) + HashAggregate [sum] [sum(UnscaledValue(ss_ext_sales_price)),total,sum] + InputAdapter + Exchange #10 + WholeStageCodegen (14) + HashAggregate [ss_ext_sales_price] [sum,sum] + Project [ss_ext_sales_price] + BroadcastHashJoin [ss_item_sk,i_item_sk] + Project [ss_item_sk,ss_ext_sales_price] + BroadcastHashJoin [c_current_addr_sk,ca_address_sk] + Project [ss_item_sk,ss_ext_sales_price,c_current_addr_sk] + BroadcastHashJoin [ss_customer_sk,c_customer_sk] + Project [ss_item_sk,ss_customer_sk,ss_ext_sales_price] + BroadcastHashJoin [ss_sold_date_sk,d_date_sk] + Project [ss_sold_date_sk,ss_item_sk,ss_customer_sk,ss_ext_sales_price] + BroadcastHashJoin [ss_store_sk,s_store_sk] + Filter [ss_store_sk,ss_sold_date_sk,ss_customer_sk,ss_item_sk] ColumnarToRow InputAdapter - Scan parquet default.promotion [p_promo_sk,p_channel_dmail,p_channel_email,p_channel_tv] - InputAdapter - BroadcastExchange #4 - WholeStageCodegen (3) - Project [d_date_sk] - Filter [d_year,d_moy,d_date_sk] - ColumnarToRow + Scan parquet default.store_sales [ss_sold_date_sk,ss_item_sk,ss_customer_sk,ss_store_sk,ss_ext_sales_price] InputAdapter - Scan parquet default.date_dim [d_date_sk,d_year,d_moy] - InputAdapter - BroadcastExchange #5 - WholeStageCodegen (4) - Filter [c_customer_sk,c_current_addr_sk] - ColumnarToRow - InputAdapter - Scan parquet default.customer [c_customer_sk,c_current_addr_sk] - InputAdapter - BroadcastExchange #6 - WholeStageCodegen (5) - Project [ca_address_sk] - Filter [ca_gmt_offset,ca_address_sk] - ColumnarToRow + ReusedExchange [s_store_sk] #3 + InputAdapter + ReusedExchange [d_date_sk] #5 InputAdapter - Scan parquet default.customer_address [ca_address_sk,ca_gmt_offset] - InputAdapter - BroadcastExchange #7 - WholeStageCodegen (6) - Project [i_item_sk] - Filter [i_category,i_item_sk] - ColumnarToRow + ReusedExchange [c_customer_sk,c_current_addr_sk] #6 InputAdapter - Scan parquet default.item [i_item_sk,i_category] - BroadcastExchange #8 - WholeStageCodegen (15) - HashAggregate [sum] [sum(UnscaledValue(ss_ext_sales_price)),total,sum] - InputAdapter - Exchange #9 - WholeStageCodegen (14) - HashAggregate [ss_ext_sales_price] [sum,sum] - Project [ss_ext_sales_price] - BroadcastHashJoin [ss_item_sk,i_item_sk] - Project [ss_item_sk,ss_ext_sales_price] - BroadcastHashJoin [c_current_addr_sk,ca_address_sk] - Project [ss_item_sk,ss_ext_sales_price,c_current_addr_sk] - BroadcastHashJoin [ss_customer_sk,c_customer_sk] - Project [ss_item_sk,ss_customer_sk,ss_ext_sales_price] - BroadcastHashJoin [ss_sold_date_sk,d_date_sk] - Project [ss_sold_date_sk,ss_item_sk,ss_customer_sk,ss_ext_sales_price] - BroadcastHashJoin [ss_store_sk,s_store_sk] - Filter [ss_store_sk,ss_sold_date_sk,ss_customer_sk,ss_item_sk] - ColumnarToRow - InputAdapter - Scan parquet default.store_sales [ss_sold_date_sk,ss_item_sk,ss_customer_sk,ss_store_sk,ss_ext_sales_price] - InputAdapter - ReusedExchange [s_store_sk] #2 - InputAdapter - ReusedExchange [d_date_sk] #4 - InputAdapter - ReusedExchange [c_customer_sk,c_current_addr_sk] #5 - InputAdapter - ReusedExchange [ca_address_sk] #6 - InputAdapter - ReusedExchange [i_item_sk] #7 + ReusedExchange [ca_address_sk] #7 + InputAdapter + ReusedExchange [i_item_sk] #8 diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q62.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q62.sf100/explain.txt index e9a2b7a375b01..90e48794201c4 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q62.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q62.sf100/explain.txt @@ -10,15 +10,15 @@ TakeOrderedAndProject (32) : :- * Project (16) : : +- * BroadcastHashJoin Inner BuildRight (15) : : :- * Project (10) - : : : +- * BroadcastHashJoin Inner BuildLeft (9) - : : : :- BroadcastExchange (5) - : : : : +- * Project (4) - : : : : +- * Filter (3) - : : : : +- * ColumnarToRow (2) - : : : : +- Scan parquet default.date_dim (1) - : : : +- * Filter (8) - : : : +- * ColumnarToRow (7) - : : : +- Scan parquet default.web_sales (6) + : : : +- * BroadcastHashJoin Inner BuildRight (9) + : : : :- * Filter (3) + : : : : +- * ColumnarToRow (2) + : : : : +- Scan parquet default.web_sales (1) + : : : +- BroadcastExchange (8) + : : : +- * Project (7) + : : : +- * Filter (6) + : : : +- * ColumnarToRow (5) + : : : +- Scan parquet default.date_dim (4) : : +- BroadcastExchange (14) : : +- * Filter (13) : : +- * ColumnarToRow (12) @@ -33,50 +33,50 @@ TakeOrderedAndProject (32) +- Scan parquet default.warehouse (23) -(1) Scan parquet default.date_dim -Output [2]: [d_date_sk#1, d_month_seq#2] +(1) Scan parquet default.web_sales +Output [5]: [ws_sold_date_sk#1, ws_ship_date_sk#2, ws_web_site_sk#3, ws_ship_mode_sk#4, ws_warehouse_sk#5] Batched: true -Location [not included in comparison]/{warehouse_dir}/date_dim] -PushedFilters: [IsNotNull(d_month_seq), GreaterThanOrEqual(d_month_seq,1200), LessThanOrEqual(d_month_seq,1211), IsNotNull(d_date_sk)] -ReadSchema: struct +Location [not included in comparison]/{warehouse_dir}/web_sales] +PushedFilters: [IsNotNull(ws_warehouse_sk), IsNotNull(ws_ship_mode_sk), IsNotNull(ws_web_site_sk), IsNotNull(ws_ship_date_sk)] +ReadSchema: struct -(2) ColumnarToRow [codegen id : 1] -Input [2]: [d_date_sk#1, d_month_seq#2] +(2) ColumnarToRow [codegen id : 5] +Input [5]: [ws_sold_date_sk#1, ws_ship_date_sk#2, ws_web_site_sk#3, ws_ship_mode_sk#4, ws_warehouse_sk#5] -(3) Filter [codegen id : 1] -Input [2]: [d_date_sk#1, d_month_seq#2] -Condition : (((isnotnull(d_month_seq#2) AND (d_month_seq#2 >= 1200)) AND (d_month_seq#2 <= 1211)) AND isnotnull(d_date_sk#1)) +(3) Filter [codegen id : 5] +Input [5]: [ws_sold_date_sk#1, ws_ship_date_sk#2, ws_web_site_sk#3, ws_ship_mode_sk#4, ws_warehouse_sk#5] +Condition : (((isnotnull(ws_warehouse_sk#5) AND isnotnull(ws_ship_mode_sk#4)) AND isnotnull(ws_web_site_sk#3)) AND isnotnull(ws_ship_date_sk#2)) -(4) Project [codegen id : 1] -Output [1]: [d_date_sk#1] -Input [2]: [d_date_sk#1, d_month_seq#2] +(4) Scan parquet default.date_dim +Output [2]: [d_date_sk#6, d_month_seq#7] +Batched: true +Location [not included in comparison]/{warehouse_dir}/date_dim] +PushedFilters: [IsNotNull(d_month_seq), GreaterThanOrEqual(d_month_seq,1200), LessThanOrEqual(d_month_seq,1211), IsNotNull(d_date_sk)] +ReadSchema: struct -(5) BroadcastExchange -Input [1]: [d_date_sk#1] -Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#3] +(5) ColumnarToRow [codegen id : 1] +Input [2]: [d_date_sk#6, d_month_seq#7] -(6) Scan parquet default.web_sales -Output [5]: [ws_sold_date_sk#4, ws_ship_date_sk#5, ws_web_site_sk#6, ws_ship_mode_sk#7, ws_warehouse_sk#8] -Batched: true -Location [not included in comparison]/{warehouse_dir}/web_sales] -PushedFilters: [IsNotNull(ws_warehouse_sk), IsNotNull(ws_ship_mode_sk), IsNotNull(ws_web_site_sk), IsNotNull(ws_ship_date_sk)] -ReadSchema: struct +(6) Filter [codegen id : 1] +Input [2]: [d_date_sk#6, d_month_seq#7] +Condition : (((isnotnull(d_month_seq#7) AND (d_month_seq#7 >= 1200)) AND (d_month_seq#7 <= 1211)) AND isnotnull(d_date_sk#6)) -(7) ColumnarToRow -Input [5]: [ws_sold_date_sk#4, ws_ship_date_sk#5, ws_web_site_sk#6, ws_ship_mode_sk#7, ws_warehouse_sk#8] +(7) Project [codegen id : 1] +Output [1]: [d_date_sk#6] +Input [2]: [d_date_sk#6, d_month_seq#7] -(8) Filter -Input [5]: [ws_sold_date_sk#4, ws_ship_date_sk#5, ws_web_site_sk#6, ws_ship_mode_sk#7, ws_warehouse_sk#8] -Condition : (((isnotnull(ws_warehouse_sk#8) AND isnotnull(ws_ship_mode_sk#7)) AND isnotnull(ws_web_site_sk#6)) AND isnotnull(ws_ship_date_sk#5)) +(8) BroadcastExchange +Input [1]: [d_date_sk#6] +Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#8] (9) BroadcastHashJoin [codegen id : 5] -Left keys [1]: [d_date_sk#1] -Right keys [1]: [ws_ship_date_sk#5] +Left keys [1]: [ws_ship_date_sk#2] +Right keys [1]: [d_date_sk#6] Join condition: None (10) Project [codegen id : 5] -Output [5]: [ws_sold_date_sk#4, ws_ship_date_sk#5, ws_web_site_sk#6, ws_ship_mode_sk#7, ws_warehouse_sk#8] -Input [6]: [d_date_sk#1, ws_sold_date_sk#4, ws_ship_date_sk#5, ws_web_site_sk#6, ws_ship_mode_sk#7, ws_warehouse_sk#8] +Output [5]: [ws_sold_date_sk#1, ws_ship_date_sk#2, ws_web_site_sk#3, ws_ship_mode_sk#4, ws_warehouse_sk#5] +Input [6]: [ws_sold_date_sk#1, ws_ship_date_sk#2, ws_web_site_sk#3, ws_ship_mode_sk#4, ws_warehouse_sk#5, d_date_sk#6] (11) Scan parquet default.web_site Output [2]: [web_site_sk#9, web_name#10] @@ -97,13 +97,13 @@ Input [2]: [web_site_sk#9, web_name#10] Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, false] as bigint)),false), [id=#11] (15) BroadcastHashJoin [codegen id : 5] -Left keys [1]: [ws_web_site_sk#6] +Left keys [1]: [ws_web_site_sk#3] Right keys [1]: [web_site_sk#9] Join condition: None (16) Project [codegen id : 5] -Output [5]: [ws_sold_date_sk#4, ws_ship_date_sk#5, ws_ship_mode_sk#7, ws_warehouse_sk#8, web_name#10] -Input [7]: [ws_sold_date_sk#4, ws_ship_date_sk#5, ws_web_site_sk#6, ws_ship_mode_sk#7, ws_warehouse_sk#8, web_site_sk#9, web_name#10] +Output [5]: [ws_sold_date_sk#1, ws_ship_date_sk#2, ws_ship_mode_sk#4, ws_warehouse_sk#5, web_name#10] +Input [7]: [ws_sold_date_sk#1, ws_ship_date_sk#2, ws_web_site_sk#3, ws_ship_mode_sk#4, ws_warehouse_sk#5, web_site_sk#9, web_name#10] (17) Scan parquet default.ship_mode Output [2]: [sm_ship_mode_sk#12, sm_type#13] @@ -124,13 +124,13 @@ Input [2]: [sm_ship_mode_sk#12, sm_type#13] Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, false] as bigint)),false), [id=#14] (21) BroadcastHashJoin [codegen id : 5] -Left keys [1]: [ws_ship_mode_sk#7] +Left keys [1]: [ws_ship_mode_sk#4] Right keys [1]: [sm_ship_mode_sk#12] Join condition: None (22) Project [codegen id : 5] -Output [5]: [ws_sold_date_sk#4, ws_ship_date_sk#5, ws_warehouse_sk#8, web_name#10, sm_type#13] -Input [7]: [ws_sold_date_sk#4, ws_ship_date_sk#5, ws_ship_mode_sk#7, ws_warehouse_sk#8, web_name#10, sm_ship_mode_sk#12, sm_type#13] +Output [5]: [ws_sold_date_sk#1, ws_ship_date_sk#2, ws_warehouse_sk#5, web_name#10, sm_type#13] +Input [7]: [ws_sold_date_sk#1, ws_ship_date_sk#2, ws_ship_mode_sk#4, ws_warehouse_sk#5, web_name#10, sm_ship_mode_sk#12, sm_type#13] (23) Scan parquet default.warehouse Output [2]: [w_warehouse_sk#15, w_warehouse_name#16] @@ -151,31 +151,31 @@ Input [2]: [w_warehouse_sk#15, w_warehouse_name#16] Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, false] as bigint)),false), [id=#17] (27) BroadcastHashJoin [codegen id : 5] -Left keys [1]: [ws_warehouse_sk#8] +Left keys [1]: [ws_warehouse_sk#5] Right keys [1]: [w_warehouse_sk#15] Join condition: None (28) Project [codegen id : 5] -Output [5]: [ws_sold_date_sk#4, ws_ship_date_sk#5, w_warehouse_name#16, sm_type#13, web_name#10] -Input [7]: [ws_sold_date_sk#4, ws_ship_date_sk#5, ws_warehouse_sk#8, web_name#10, sm_type#13, w_warehouse_sk#15, w_warehouse_name#16] +Output [5]: [ws_sold_date_sk#1, ws_ship_date_sk#2, w_warehouse_name#16, sm_type#13, web_name#10] +Input [7]: [ws_sold_date_sk#1, ws_ship_date_sk#2, ws_warehouse_sk#5, web_name#10, sm_type#13, w_warehouse_sk#15, w_warehouse_name#16] (29) HashAggregate [codegen id : 5] -Input [5]: [ws_sold_date_sk#4, ws_ship_date_sk#5, w_warehouse_name#16, sm_type#13, web_name#10] +Input [5]: [ws_sold_date_sk#1, ws_ship_date_sk#2, w_warehouse_name#16, sm_type#13, web_name#10] Keys [3]: [substr(w_warehouse_name#16, 1, 20) AS substr(w_warehouse_name#16, 1, 20)#18, sm_type#13, web_name#10] -Functions [5]: [partial_sum(cast(CASE WHEN ((ws_ship_date_sk#5 - ws_sold_date_sk#4) <= 30) THEN 1 ELSE 0 END as bigint)), partial_sum(cast(CASE WHEN (((ws_ship_date_sk#5 - ws_sold_date_sk#4) > 30) AND ((ws_ship_date_sk#5 - ws_sold_date_sk#4) <= 60)) THEN 1 ELSE 0 END as bigint)), partial_sum(cast(CASE WHEN (((ws_ship_date_sk#5 - ws_sold_date_sk#4) > 60) AND ((ws_ship_date_sk#5 - ws_sold_date_sk#4) <= 90)) THEN 1 ELSE 0 END as bigint)), partial_sum(cast(CASE WHEN (((ws_ship_date_sk#5 - ws_sold_date_sk#4) > 90) AND ((ws_ship_date_sk#5 - ws_sold_date_sk#4) <= 120)) THEN 1 ELSE 0 END as bigint)), partial_sum(cast(CASE WHEN ((ws_ship_date_sk#5 - ws_sold_date_sk#4) > 120) THEN 1 ELSE 0 END as bigint))] +Functions [5]: [partial_sum(CASE WHEN ((ws_ship_date_sk#2 - ws_sold_date_sk#1) <= 30) THEN 1 ELSE 0 END), partial_sum(CASE WHEN (((ws_ship_date_sk#2 - ws_sold_date_sk#1) > 30) AND ((ws_ship_date_sk#2 - ws_sold_date_sk#1) <= 60)) THEN 1 ELSE 0 END), partial_sum(CASE WHEN (((ws_ship_date_sk#2 - ws_sold_date_sk#1) > 60) AND ((ws_ship_date_sk#2 - ws_sold_date_sk#1) <= 90)) THEN 1 ELSE 0 END), partial_sum(CASE WHEN (((ws_ship_date_sk#2 - ws_sold_date_sk#1) > 90) AND ((ws_ship_date_sk#2 - ws_sold_date_sk#1) <= 120)) THEN 1 ELSE 0 END), partial_sum(CASE WHEN ((ws_ship_date_sk#2 - ws_sold_date_sk#1) > 120) THEN 1 ELSE 0 END)] Aggregate Attributes [5]: [sum#19, sum#20, sum#21, sum#22, sum#23] Results [8]: [substr(w_warehouse_name#16, 1, 20)#18, sm_type#13, web_name#10, sum#24, sum#25, sum#26, sum#27, sum#28] (30) Exchange Input [8]: [substr(w_warehouse_name#16, 1, 20)#18, sm_type#13, web_name#10, sum#24, sum#25, sum#26, sum#27, sum#28] -Arguments: hashpartitioning(substr(w_warehouse_name#16, 1, 20)#18, sm_type#13, web_name#10, 5), true, [id=#29] +Arguments: hashpartitioning(substr(w_warehouse_name#16, 1, 20)#18, sm_type#13, web_name#10, 5), ENSURE_REQUIREMENTS, [id=#29] (31) HashAggregate [codegen id : 6] Input [8]: [substr(w_warehouse_name#16, 1, 20)#18, sm_type#13, web_name#10, sum#24, sum#25, sum#26, sum#27, sum#28] Keys [3]: [substr(w_warehouse_name#16, 1, 20)#18, sm_type#13, web_name#10] -Functions [5]: [sum(cast(CASE WHEN ((ws_ship_date_sk#5 - ws_sold_date_sk#4) <= 30) THEN 1 ELSE 0 END as bigint)), sum(cast(CASE WHEN (((ws_ship_date_sk#5 - ws_sold_date_sk#4) > 30) AND ((ws_ship_date_sk#5 - ws_sold_date_sk#4) <= 60)) THEN 1 ELSE 0 END as bigint)), sum(cast(CASE WHEN (((ws_ship_date_sk#5 - ws_sold_date_sk#4) > 60) AND ((ws_ship_date_sk#5 - ws_sold_date_sk#4) <= 90)) THEN 1 ELSE 0 END as bigint)), sum(cast(CASE WHEN (((ws_ship_date_sk#5 - ws_sold_date_sk#4) > 90) AND ((ws_ship_date_sk#5 - ws_sold_date_sk#4) <= 120)) THEN 1 ELSE 0 END as bigint)), sum(cast(CASE WHEN ((ws_ship_date_sk#5 - ws_sold_date_sk#4) > 120) THEN 1 ELSE 0 END as bigint))] -Aggregate Attributes [5]: [sum(cast(CASE WHEN ((ws_ship_date_sk#5 - ws_sold_date_sk#4) <= 30) THEN 1 ELSE 0 END as bigint))#30, sum(cast(CASE WHEN (((ws_ship_date_sk#5 - ws_sold_date_sk#4) > 30) AND ((ws_ship_date_sk#5 - ws_sold_date_sk#4) <= 60)) THEN 1 ELSE 0 END as bigint))#31, sum(cast(CASE WHEN (((ws_ship_date_sk#5 - ws_sold_date_sk#4) > 60) AND ((ws_ship_date_sk#5 - ws_sold_date_sk#4) <= 90)) THEN 1 ELSE 0 END as bigint))#32, sum(cast(CASE WHEN (((ws_ship_date_sk#5 - ws_sold_date_sk#4) > 90) AND ((ws_ship_date_sk#5 - ws_sold_date_sk#4) <= 120)) THEN 1 ELSE 0 END as bigint))#33, sum(cast(CASE WHEN ((ws_ship_date_sk#5 - ws_sold_date_sk#4) > 120) THEN 1 ELSE 0 END as bigint))#34] -Results [8]: [substr(w_warehouse_name#16, 1, 20)#18 AS substr(w_warehouse_name, 1, 20)#35, sm_type#13, web_name#10, sum(cast(CASE WHEN ((ws_ship_date_sk#5 - ws_sold_date_sk#4) <= 30) THEN 1 ELSE 0 END as bigint))#30 AS 30 days #36, sum(cast(CASE WHEN (((ws_ship_date_sk#5 - ws_sold_date_sk#4) > 30) AND ((ws_ship_date_sk#5 - ws_sold_date_sk#4) <= 60)) THEN 1 ELSE 0 END as bigint))#31 AS 31 - 60 days #37, sum(cast(CASE WHEN (((ws_ship_date_sk#5 - ws_sold_date_sk#4) > 60) AND ((ws_ship_date_sk#5 - ws_sold_date_sk#4) <= 90)) THEN 1 ELSE 0 END as bigint))#32 AS 61 - 90 days #38, sum(cast(CASE WHEN (((ws_ship_date_sk#5 - ws_sold_date_sk#4) > 90) AND ((ws_ship_date_sk#5 - ws_sold_date_sk#4) <= 120)) THEN 1 ELSE 0 END as bigint))#33 AS 91 - 120 days #39, sum(cast(CASE WHEN ((ws_ship_date_sk#5 - ws_sold_date_sk#4) > 120) THEN 1 ELSE 0 END as bigint))#34 AS >120 days #40] +Functions [5]: [sum(CASE WHEN ((ws_ship_date_sk#2 - ws_sold_date_sk#1) <= 30) THEN 1 ELSE 0 END), sum(CASE WHEN (((ws_ship_date_sk#2 - ws_sold_date_sk#1) > 30) AND ((ws_ship_date_sk#2 - ws_sold_date_sk#1) <= 60)) THEN 1 ELSE 0 END), sum(CASE WHEN (((ws_ship_date_sk#2 - ws_sold_date_sk#1) > 60) AND ((ws_ship_date_sk#2 - ws_sold_date_sk#1) <= 90)) THEN 1 ELSE 0 END), sum(CASE WHEN (((ws_ship_date_sk#2 - ws_sold_date_sk#1) > 90) AND ((ws_ship_date_sk#2 - ws_sold_date_sk#1) <= 120)) THEN 1 ELSE 0 END), sum(CASE WHEN ((ws_ship_date_sk#2 - ws_sold_date_sk#1) > 120) THEN 1 ELSE 0 END)] +Aggregate Attributes [5]: [sum(CASE WHEN ((ws_ship_date_sk#2 - ws_sold_date_sk#1) <= 30) THEN 1 ELSE 0 END)#30, sum(CASE WHEN (((ws_ship_date_sk#2 - ws_sold_date_sk#1) > 30) AND ((ws_ship_date_sk#2 - ws_sold_date_sk#1) <= 60)) THEN 1 ELSE 0 END)#31, sum(CASE WHEN (((ws_ship_date_sk#2 - ws_sold_date_sk#1) > 60) AND ((ws_ship_date_sk#2 - ws_sold_date_sk#1) <= 90)) THEN 1 ELSE 0 END)#32, sum(CASE WHEN (((ws_ship_date_sk#2 - ws_sold_date_sk#1) > 90) AND ((ws_ship_date_sk#2 - ws_sold_date_sk#1) <= 120)) THEN 1 ELSE 0 END)#33, sum(CASE WHEN ((ws_ship_date_sk#2 - ws_sold_date_sk#1) > 120) THEN 1 ELSE 0 END)#34] +Results [8]: [substr(w_warehouse_name#16, 1, 20)#18 AS substr(w_warehouse_name, 1, 20)#35, sm_type#13, web_name#10, sum(CASE WHEN ((ws_ship_date_sk#2 - ws_sold_date_sk#1) <= 30) THEN 1 ELSE 0 END)#30 AS 30 days #36, sum(CASE WHEN (((ws_ship_date_sk#2 - ws_sold_date_sk#1) > 30) AND ((ws_ship_date_sk#2 - ws_sold_date_sk#1) <= 60)) THEN 1 ELSE 0 END)#31 AS 31 - 60 days #37, sum(CASE WHEN (((ws_ship_date_sk#2 - ws_sold_date_sk#1) > 60) AND ((ws_ship_date_sk#2 - ws_sold_date_sk#1) <= 90)) THEN 1 ELSE 0 END)#32 AS 61 - 90 days #38, sum(CASE WHEN (((ws_ship_date_sk#2 - ws_sold_date_sk#1) > 90) AND ((ws_ship_date_sk#2 - ws_sold_date_sk#1) <= 120)) THEN 1 ELSE 0 END)#33 AS 91 - 120 days #39, sum(CASE WHEN ((ws_ship_date_sk#2 - ws_sold_date_sk#1) > 120) THEN 1 ELSE 0 END)#34 AS >120 days #40] (32) TakeOrderedAndProject Input [8]: [substr(w_warehouse_name, 1, 20)#35, sm_type#13, web_name#10, 30 days #36, 31 - 60 days #37, 61 - 90 days #38, 91 - 120 days #39, >120 days #40] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q62.sf100/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q62.sf100/simplified.txt index 59cfc4b7b249a..a2e1d28e1b911 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q62.sf100/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q62.sf100/simplified.txt @@ -1,6 +1,6 @@ TakeOrderedAndProject [substr(w_warehouse_name, 1, 20),sm_type,web_name,30 days ,31 - 60 days ,61 - 90 days ,91 - 120 days ,>120 days ] WholeStageCodegen (6) - HashAggregate [substr(w_warehouse_name, 1, 20),sm_type,web_name,sum,sum,sum,sum,sum] [sum(cast(CASE WHEN ((ws_ship_date_sk - ws_sold_date_sk) <= 30) THEN 1 ELSE 0 END as bigint)),sum(cast(CASE WHEN (((ws_ship_date_sk - ws_sold_date_sk) > 30) AND ((ws_ship_date_sk - ws_sold_date_sk) <= 60)) THEN 1 ELSE 0 END as bigint)),sum(cast(CASE WHEN (((ws_ship_date_sk - ws_sold_date_sk) > 60) AND ((ws_ship_date_sk - ws_sold_date_sk) <= 90)) THEN 1 ELSE 0 END as bigint)),sum(cast(CASE WHEN (((ws_ship_date_sk - ws_sold_date_sk) > 90) AND ((ws_ship_date_sk - ws_sold_date_sk) <= 120)) THEN 1 ELSE 0 END as bigint)),sum(cast(CASE WHEN ((ws_ship_date_sk - ws_sold_date_sk) > 120) THEN 1 ELSE 0 END as bigint)),substr(w_warehouse_name, 1, 20),30 days ,31 - 60 days ,61 - 90 days ,91 - 120 days ,>120 days ,sum,sum,sum,sum,sum] + HashAggregate [substr(w_warehouse_name, 1, 20),sm_type,web_name,sum,sum,sum,sum,sum] [sum(CASE WHEN ((ws_ship_date_sk - ws_sold_date_sk) <= 30) THEN 1 ELSE 0 END),sum(CASE WHEN (((ws_ship_date_sk - ws_sold_date_sk) > 30) AND ((ws_ship_date_sk - ws_sold_date_sk) <= 60)) THEN 1 ELSE 0 END),sum(CASE WHEN (((ws_ship_date_sk - ws_sold_date_sk) > 60) AND ((ws_ship_date_sk - ws_sold_date_sk) <= 90)) THEN 1 ELSE 0 END),sum(CASE WHEN (((ws_ship_date_sk - ws_sold_date_sk) > 90) AND ((ws_ship_date_sk - ws_sold_date_sk) <= 120)) THEN 1 ELSE 0 END),sum(CASE WHEN ((ws_ship_date_sk - ws_sold_date_sk) > 120) THEN 1 ELSE 0 END),substr(w_warehouse_name, 1, 20),30 days ,31 - 60 days ,61 - 90 days ,91 - 120 days ,>120 days ,sum,sum,sum,sum,sum] InputAdapter Exchange [substr(w_warehouse_name, 1, 20),sm_type,web_name] #1 WholeStageCodegen (5) @@ -12,7 +12,11 @@ TakeOrderedAndProject [substr(w_warehouse_name, 1, 20),sm_type,web_name,30 days Project [ws_sold_date_sk,ws_ship_date_sk,ws_ship_mode_sk,ws_warehouse_sk,web_name] BroadcastHashJoin [ws_web_site_sk,web_site_sk] Project [ws_sold_date_sk,ws_ship_date_sk,ws_web_site_sk,ws_ship_mode_sk,ws_warehouse_sk] - BroadcastHashJoin [d_date_sk,ws_ship_date_sk] + BroadcastHashJoin [ws_ship_date_sk,d_date_sk] + Filter [ws_warehouse_sk,ws_ship_mode_sk,ws_web_site_sk,ws_ship_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.web_sales [ws_sold_date_sk,ws_ship_date_sk,ws_web_site_sk,ws_ship_mode_sk,ws_warehouse_sk] InputAdapter BroadcastExchange #2 WholeStageCodegen (1) @@ -21,10 +25,6 @@ TakeOrderedAndProject [substr(w_warehouse_name, 1, 20),sm_type,web_name,30 days ColumnarToRow InputAdapter Scan parquet default.date_dim [d_date_sk,d_month_seq] - Filter [ws_warehouse_sk,ws_ship_mode_sk,ws_web_site_sk,ws_ship_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.web_sales [ws_sold_date_sk,ws_ship_date_sk,ws_web_site_sk,ws_ship_mode_sk,ws_warehouse_sk] InputAdapter BroadcastExchange #3 WholeStageCodegen (2) diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q62/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q62/explain.txt index 05ce467c349a3..b6c467d0e9863 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q62/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q62/explain.txt @@ -162,20 +162,20 @@ Input [6]: [ws_sold_date_sk#1, ws_ship_date_sk#2, w_warehouse_name#7, sm_type#10 (29) HashAggregate [codegen id : 5] Input [5]: [ws_sold_date_sk#1, ws_ship_date_sk#2, w_warehouse_name#7, sm_type#10, web_name#13] Keys [3]: [substr(w_warehouse_name#7, 1, 20) AS substr(w_warehouse_name#7, 1, 20)#18, sm_type#10, web_name#13] -Functions [5]: [partial_sum(cast(CASE WHEN ((ws_ship_date_sk#2 - ws_sold_date_sk#1) <= 30) THEN 1 ELSE 0 END as bigint)), partial_sum(cast(CASE WHEN (((ws_ship_date_sk#2 - ws_sold_date_sk#1) > 30) AND ((ws_ship_date_sk#2 - ws_sold_date_sk#1) <= 60)) THEN 1 ELSE 0 END as bigint)), partial_sum(cast(CASE WHEN (((ws_ship_date_sk#2 - ws_sold_date_sk#1) > 60) AND ((ws_ship_date_sk#2 - ws_sold_date_sk#1) <= 90)) THEN 1 ELSE 0 END as bigint)), partial_sum(cast(CASE WHEN (((ws_ship_date_sk#2 - ws_sold_date_sk#1) > 90) AND ((ws_ship_date_sk#2 - ws_sold_date_sk#1) <= 120)) THEN 1 ELSE 0 END as bigint)), partial_sum(cast(CASE WHEN ((ws_ship_date_sk#2 - ws_sold_date_sk#1) > 120) THEN 1 ELSE 0 END as bigint))] +Functions [5]: [partial_sum(CASE WHEN ((ws_ship_date_sk#2 - ws_sold_date_sk#1) <= 30) THEN 1 ELSE 0 END), partial_sum(CASE WHEN (((ws_ship_date_sk#2 - ws_sold_date_sk#1) > 30) AND ((ws_ship_date_sk#2 - ws_sold_date_sk#1) <= 60)) THEN 1 ELSE 0 END), partial_sum(CASE WHEN (((ws_ship_date_sk#2 - ws_sold_date_sk#1) > 60) AND ((ws_ship_date_sk#2 - ws_sold_date_sk#1) <= 90)) THEN 1 ELSE 0 END), partial_sum(CASE WHEN (((ws_ship_date_sk#2 - ws_sold_date_sk#1) > 90) AND ((ws_ship_date_sk#2 - ws_sold_date_sk#1) <= 120)) THEN 1 ELSE 0 END), partial_sum(CASE WHEN ((ws_ship_date_sk#2 - ws_sold_date_sk#1) > 120) THEN 1 ELSE 0 END)] Aggregate Attributes [5]: [sum#19, sum#20, sum#21, sum#22, sum#23] Results [8]: [substr(w_warehouse_name#7, 1, 20)#18, sm_type#10, web_name#13, sum#24, sum#25, sum#26, sum#27, sum#28] (30) Exchange Input [8]: [substr(w_warehouse_name#7, 1, 20)#18, sm_type#10, web_name#13, sum#24, sum#25, sum#26, sum#27, sum#28] -Arguments: hashpartitioning(substr(w_warehouse_name#7, 1, 20)#18, sm_type#10, web_name#13, 5), true, [id=#29] +Arguments: hashpartitioning(substr(w_warehouse_name#7, 1, 20)#18, sm_type#10, web_name#13, 5), ENSURE_REQUIREMENTS, [id=#29] (31) HashAggregate [codegen id : 6] Input [8]: [substr(w_warehouse_name#7, 1, 20)#18, sm_type#10, web_name#13, sum#24, sum#25, sum#26, sum#27, sum#28] Keys [3]: [substr(w_warehouse_name#7, 1, 20)#18, sm_type#10, web_name#13] -Functions [5]: [sum(cast(CASE WHEN ((ws_ship_date_sk#2 - ws_sold_date_sk#1) <= 30) THEN 1 ELSE 0 END as bigint)), sum(cast(CASE WHEN (((ws_ship_date_sk#2 - ws_sold_date_sk#1) > 30) AND ((ws_ship_date_sk#2 - ws_sold_date_sk#1) <= 60)) THEN 1 ELSE 0 END as bigint)), sum(cast(CASE WHEN (((ws_ship_date_sk#2 - ws_sold_date_sk#1) > 60) AND ((ws_ship_date_sk#2 - ws_sold_date_sk#1) <= 90)) THEN 1 ELSE 0 END as bigint)), sum(cast(CASE WHEN (((ws_ship_date_sk#2 - ws_sold_date_sk#1) > 90) AND ((ws_ship_date_sk#2 - ws_sold_date_sk#1) <= 120)) THEN 1 ELSE 0 END as bigint)), sum(cast(CASE WHEN ((ws_ship_date_sk#2 - ws_sold_date_sk#1) > 120) THEN 1 ELSE 0 END as bigint))] -Aggregate Attributes [5]: [sum(cast(CASE WHEN ((ws_ship_date_sk#2 - ws_sold_date_sk#1) <= 30) THEN 1 ELSE 0 END as bigint))#30, sum(cast(CASE WHEN (((ws_ship_date_sk#2 - ws_sold_date_sk#1) > 30) AND ((ws_ship_date_sk#2 - ws_sold_date_sk#1) <= 60)) THEN 1 ELSE 0 END as bigint))#31, sum(cast(CASE WHEN (((ws_ship_date_sk#2 - ws_sold_date_sk#1) > 60) AND ((ws_ship_date_sk#2 - ws_sold_date_sk#1) <= 90)) THEN 1 ELSE 0 END as bigint))#32, sum(cast(CASE WHEN (((ws_ship_date_sk#2 - ws_sold_date_sk#1) > 90) AND ((ws_ship_date_sk#2 - ws_sold_date_sk#1) <= 120)) THEN 1 ELSE 0 END as bigint))#33, sum(cast(CASE WHEN ((ws_ship_date_sk#2 - ws_sold_date_sk#1) > 120) THEN 1 ELSE 0 END as bigint))#34] -Results [8]: [substr(w_warehouse_name#7, 1, 20)#18 AS substr(w_warehouse_name, 1, 20)#35, sm_type#10, web_name#13, sum(cast(CASE WHEN ((ws_ship_date_sk#2 - ws_sold_date_sk#1) <= 30) THEN 1 ELSE 0 END as bigint))#30 AS 30 days #36, sum(cast(CASE WHEN (((ws_ship_date_sk#2 - ws_sold_date_sk#1) > 30) AND ((ws_ship_date_sk#2 - ws_sold_date_sk#1) <= 60)) THEN 1 ELSE 0 END as bigint))#31 AS 31 - 60 days #37, sum(cast(CASE WHEN (((ws_ship_date_sk#2 - ws_sold_date_sk#1) > 60) AND ((ws_ship_date_sk#2 - ws_sold_date_sk#1) <= 90)) THEN 1 ELSE 0 END as bigint))#32 AS 61 - 90 days #38, sum(cast(CASE WHEN (((ws_ship_date_sk#2 - ws_sold_date_sk#1) > 90) AND ((ws_ship_date_sk#2 - ws_sold_date_sk#1) <= 120)) THEN 1 ELSE 0 END as bigint))#33 AS 91 - 120 days #39, sum(cast(CASE WHEN ((ws_ship_date_sk#2 - ws_sold_date_sk#1) > 120) THEN 1 ELSE 0 END as bigint))#34 AS >120 days #40] +Functions [5]: [sum(CASE WHEN ((ws_ship_date_sk#2 - ws_sold_date_sk#1) <= 30) THEN 1 ELSE 0 END), sum(CASE WHEN (((ws_ship_date_sk#2 - ws_sold_date_sk#1) > 30) AND ((ws_ship_date_sk#2 - ws_sold_date_sk#1) <= 60)) THEN 1 ELSE 0 END), sum(CASE WHEN (((ws_ship_date_sk#2 - ws_sold_date_sk#1) > 60) AND ((ws_ship_date_sk#2 - ws_sold_date_sk#1) <= 90)) THEN 1 ELSE 0 END), sum(CASE WHEN (((ws_ship_date_sk#2 - ws_sold_date_sk#1) > 90) AND ((ws_ship_date_sk#2 - ws_sold_date_sk#1) <= 120)) THEN 1 ELSE 0 END), sum(CASE WHEN ((ws_ship_date_sk#2 - ws_sold_date_sk#1) > 120) THEN 1 ELSE 0 END)] +Aggregate Attributes [5]: [sum(CASE WHEN ((ws_ship_date_sk#2 - ws_sold_date_sk#1) <= 30) THEN 1 ELSE 0 END)#30, sum(CASE WHEN (((ws_ship_date_sk#2 - ws_sold_date_sk#1) > 30) AND ((ws_ship_date_sk#2 - ws_sold_date_sk#1) <= 60)) THEN 1 ELSE 0 END)#31, sum(CASE WHEN (((ws_ship_date_sk#2 - ws_sold_date_sk#1) > 60) AND ((ws_ship_date_sk#2 - ws_sold_date_sk#1) <= 90)) THEN 1 ELSE 0 END)#32, sum(CASE WHEN (((ws_ship_date_sk#2 - ws_sold_date_sk#1) > 90) AND ((ws_ship_date_sk#2 - ws_sold_date_sk#1) <= 120)) THEN 1 ELSE 0 END)#33, sum(CASE WHEN ((ws_ship_date_sk#2 - ws_sold_date_sk#1) > 120) THEN 1 ELSE 0 END)#34] +Results [8]: [substr(w_warehouse_name#7, 1, 20)#18 AS substr(w_warehouse_name, 1, 20)#35, sm_type#10, web_name#13, sum(CASE WHEN ((ws_ship_date_sk#2 - ws_sold_date_sk#1) <= 30) THEN 1 ELSE 0 END)#30 AS 30 days #36, sum(CASE WHEN (((ws_ship_date_sk#2 - ws_sold_date_sk#1) > 30) AND ((ws_ship_date_sk#2 - ws_sold_date_sk#1) <= 60)) THEN 1 ELSE 0 END)#31 AS 31 - 60 days #37, sum(CASE WHEN (((ws_ship_date_sk#2 - ws_sold_date_sk#1) > 60) AND ((ws_ship_date_sk#2 - ws_sold_date_sk#1) <= 90)) THEN 1 ELSE 0 END)#32 AS 61 - 90 days #38, sum(CASE WHEN (((ws_ship_date_sk#2 - ws_sold_date_sk#1) > 90) AND ((ws_ship_date_sk#2 - ws_sold_date_sk#1) <= 120)) THEN 1 ELSE 0 END)#33 AS 91 - 120 days #39, sum(CASE WHEN ((ws_ship_date_sk#2 - ws_sold_date_sk#1) > 120) THEN 1 ELSE 0 END)#34 AS >120 days #40] (32) TakeOrderedAndProject Input [8]: [substr(w_warehouse_name, 1, 20)#35, sm_type#10, web_name#13, 30 days #36, 31 - 60 days #37, 61 - 90 days #38, 91 - 120 days #39, >120 days #40] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q62/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q62/simplified.txt index 803326b2afd30..017ba3adcefe9 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q62/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q62/simplified.txt @@ -1,6 +1,6 @@ TakeOrderedAndProject [substr(w_warehouse_name, 1, 20),sm_type,web_name,30 days ,31 - 60 days ,61 - 90 days ,91 - 120 days ,>120 days ] WholeStageCodegen (6) - HashAggregate [substr(w_warehouse_name, 1, 20),sm_type,web_name,sum,sum,sum,sum,sum] [sum(cast(CASE WHEN ((ws_ship_date_sk - ws_sold_date_sk) <= 30) THEN 1 ELSE 0 END as bigint)),sum(cast(CASE WHEN (((ws_ship_date_sk - ws_sold_date_sk) > 30) AND ((ws_ship_date_sk - ws_sold_date_sk) <= 60)) THEN 1 ELSE 0 END as bigint)),sum(cast(CASE WHEN (((ws_ship_date_sk - ws_sold_date_sk) > 60) AND ((ws_ship_date_sk - ws_sold_date_sk) <= 90)) THEN 1 ELSE 0 END as bigint)),sum(cast(CASE WHEN (((ws_ship_date_sk - ws_sold_date_sk) > 90) AND ((ws_ship_date_sk - ws_sold_date_sk) <= 120)) THEN 1 ELSE 0 END as bigint)),sum(cast(CASE WHEN ((ws_ship_date_sk - ws_sold_date_sk) > 120) THEN 1 ELSE 0 END as bigint)),substr(w_warehouse_name, 1, 20),30 days ,31 - 60 days ,61 - 90 days ,91 - 120 days ,>120 days ,sum,sum,sum,sum,sum] + HashAggregate [substr(w_warehouse_name, 1, 20),sm_type,web_name,sum,sum,sum,sum,sum] [sum(CASE WHEN ((ws_ship_date_sk - ws_sold_date_sk) <= 30) THEN 1 ELSE 0 END),sum(CASE WHEN (((ws_ship_date_sk - ws_sold_date_sk) > 30) AND ((ws_ship_date_sk - ws_sold_date_sk) <= 60)) THEN 1 ELSE 0 END),sum(CASE WHEN (((ws_ship_date_sk - ws_sold_date_sk) > 60) AND ((ws_ship_date_sk - ws_sold_date_sk) <= 90)) THEN 1 ELSE 0 END),sum(CASE WHEN (((ws_ship_date_sk - ws_sold_date_sk) > 90) AND ((ws_ship_date_sk - ws_sold_date_sk) <= 120)) THEN 1 ELSE 0 END),sum(CASE WHEN ((ws_ship_date_sk - ws_sold_date_sk) > 120) THEN 1 ELSE 0 END),substr(w_warehouse_name, 1, 20),30 days ,31 - 60 days ,61 - 90 days ,91 - 120 days ,>120 days ,sum,sum,sum,sum,sum] InputAdapter Exchange [substr(w_warehouse_name, 1, 20),sm_type,web_name] #1 WholeStageCodegen (5) diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q66.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q66.sf100/explain.txt index 4b863587b08d9..5db04537d6371 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q66.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q66.sf100/explain.txt @@ -14,15 +14,15 @@ TakeOrderedAndProject (55) : : :- * Project (17) : : : +- * BroadcastHashJoin Inner BuildRight (16) : : : :- * Project (10) - : : : : +- * BroadcastHashJoin Inner BuildLeft (9) - : : : : :- BroadcastExchange (5) - : : : : : +- * Project (4) - : : : : : +- * Filter (3) - : : : : : +- * ColumnarToRow (2) - : : : : : +- Scan parquet default.ship_mode (1) - : : : : +- * Filter (8) - : : : : +- * ColumnarToRow (7) - : : : : +- Scan parquet default.web_sales (6) + : : : : +- * BroadcastHashJoin Inner BuildRight (9) + : : : : :- * Filter (3) + : : : : : +- * ColumnarToRow (2) + : : : : : +- Scan parquet default.web_sales (1) + : : : : +- BroadcastExchange (8) + : : : : +- * Project (7) + : : : : +- * Filter (6) + : : : : +- * ColumnarToRow (5) + : : : : +- Scan parquet default.ship_mode (4) : : : +- BroadcastExchange (15) : : : +- * Project (14) : : : +- * Filter (13) @@ -46,60 +46,60 @@ TakeOrderedAndProject (55) : :- * Project (41) : : +- * BroadcastHashJoin Inner BuildRight (40) : : :- * Project (38) - : : : +- * BroadcastHashJoin Inner BuildLeft (37) - : : : :- ReusedExchange (33) - : : : +- * Filter (36) - : : : +- * ColumnarToRow (35) - : : : +- Scan parquet default.catalog_sales (34) + : : : +- * BroadcastHashJoin Inner BuildRight (37) + : : : :- * Filter (35) + : : : : +- * ColumnarToRow (34) + : : : : +- Scan parquet default.catalog_sales (33) + : : : +- ReusedExchange (36) : : +- ReusedExchange (39) : +- ReusedExchange (42) +- ReusedExchange (45) -(1) Scan parquet default.ship_mode -Output [2]: [sm_ship_mode_sk#1, sm_carrier#2] +(1) Scan parquet default.web_sales +Output [7]: [ws_sold_date_sk#1, ws_sold_time_sk#2, ws_ship_mode_sk#3, ws_warehouse_sk#4, ws_quantity#5, ws_ext_sales_price#6, ws_net_paid#7] Batched: true -Location [not included in comparison]/{warehouse_dir}/ship_mode] -PushedFilters: [In(sm_carrier, [DHL,BARIAN]), IsNotNull(sm_ship_mode_sk)] -ReadSchema: struct +Location [not included in comparison]/{warehouse_dir}/web_sales] +PushedFilters: [IsNotNull(ws_warehouse_sk), IsNotNull(ws_sold_date_sk), IsNotNull(ws_sold_time_sk), IsNotNull(ws_ship_mode_sk)] +ReadSchema: struct -(2) ColumnarToRow [codegen id : 1] -Input [2]: [sm_ship_mode_sk#1, sm_carrier#2] +(2) ColumnarToRow [codegen id : 5] +Input [7]: [ws_sold_date_sk#1, ws_sold_time_sk#2, ws_ship_mode_sk#3, ws_warehouse_sk#4, ws_quantity#5, ws_ext_sales_price#6, ws_net_paid#7] -(3) Filter [codegen id : 1] -Input [2]: [sm_ship_mode_sk#1, sm_carrier#2] -Condition : (sm_carrier#2 IN (DHL,BARIAN) AND isnotnull(sm_ship_mode_sk#1)) +(3) Filter [codegen id : 5] +Input [7]: [ws_sold_date_sk#1, ws_sold_time_sk#2, ws_ship_mode_sk#3, ws_warehouse_sk#4, ws_quantity#5, ws_ext_sales_price#6, ws_net_paid#7] +Condition : (((isnotnull(ws_warehouse_sk#4) AND isnotnull(ws_sold_date_sk#1)) AND isnotnull(ws_sold_time_sk#2)) AND isnotnull(ws_ship_mode_sk#3)) -(4) Project [codegen id : 1] -Output [1]: [sm_ship_mode_sk#1] -Input [2]: [sm_ship_mode_sk#1, sm_carrier#2] +(4) Scan parquet default.ship_mode +Output [2]: [sm_ship_mode_sk#8, sm_carrier#9] +Batched: true +Location [not included in comparison]/{warehouse_dir}/ship_mode] +PushedFilters: [In(sm_carrier, [DHL,BARIAN]), IsNotNull(sm_ship_mode_sk)] +ReadSchema: struct -(5) BroadcastExchange -Input [1]: [sm_ship_mode_sk#1] -Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#3] +(5) ColumnarToRow [codegen id : 1] +Input [2]: [sm_ship_mode_sk#8, sm_carrier#9] -(6) Scan parquet default.web_sales -Output [7]: [ws_sold_date_sk#4, ws_sold_time_sk#5, ws_ship_mode_sk#6, ws_warehouse_sk#7, ws_quantity#8, ws_ext_sales_price#9, ws_net_paid#10] -Batched: true -Location [not included in comparison]/{warehouse_dir}/web_sales] -PushedFilters: [IsNotNull(ws_warehouse_sk), IsNotNull(ws_sold_date_sk), IsNotNull(ws_sold_time_sk), IsNotNull(ws_ship_mode_sk)] -ReadSchema: struct +(6) Filter [codegen id : 1] +Input [2]: [sm_ship_mode_sk#8, sm_carrier#9] +Condition : (sm_carrier#9 IN (DHL,BARIAN) AND isnotnull(sm_ship_mode_sk#8)) -(7) ColumnarToRow -Input [7]: [ws_sold_date_sk#4, ws_sold_time_sk#5, ws_ship_mode_sk#6, ws_warehouse_sk#7, ws_quantity#8, ws_ext_sales_price#9, ws_net_paid#10] +(7) Project [codegen id : 1] +Output [1]: [sm_ship_mode_sk#8] +Input [2]: [sm_ship_mode_sk#8, sm_carrier#9] -(8) Filter -Input [7]: [ws_sold_date_sk#4, ws_sold_time_sk#5, ws_ship_mode_sk#6, ws_warehouse_sk#7, ws_quantity#8, ws_ext_sales_price#9, ws_net_paid#10] -Condition : (((isnotnull(ws_warehouse_sk#7) AND isnotnull(ws_sold_date_sk#4)) AND isnotnull(ws_sold_time_sk#5)) AND isnotnull(ws_ship_mode_sk#6)) +(8) BroadcastExchange +Input [1]: [sm_ship_mode_sk#8] +Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#10] (9) BroadcastHashJoin [codegen id : 5] -Left keys [1]: [sm_ship_mode_sk#1] -Right keys [1]: [ws_ship_mode_sk#6] +Left keys [1]: [ws_ship_mode_sk#3] +Right keys [1]: [sm_ship_mode_sk#8] Join condition: None (10) Project [codegen id : 5] -Output [6]: [ws_sold_date_sk#4, ws_sold_time_sk#5, ws_warehouse_sk#7, ws_quantity#8, ws_ext_sales_price#9, ws_net_paid#10] -Input [8]: [sm_ship_mode_sk#1, ws_sold_date_sk#4, ws_sold_time_sk#5, ws_ship_mode_sk#6, ws_warehouse_sk#7, ws_quantity#8, ws_ext_sales_price#9, ws_net_paid#10] +Output [6]: [ws_sold_date_sk#1, ws_sold_time_sk#2, ws_warehouse_sk#4, ws_quantity#5, ws_ext_sales_price#6, ws_net_paid#7] +Input [8]: [ws_sold_date_sk#1, ws_sold_time_sk#2, ws_ship_mode_sk#3, ws_warehouse_sk#4, ws_quantity#5, ws_ext_sales_price#6, ws_net_paid#7, sm_ship_mode_sk#8] (11) Scan parquet default.time_dim Output [2]: [t_time_sk#11, t_time#12] @@ -124,13 +124,13 @@ Input [1]: [t_time_sk#11] Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#13] (16) BroadcastHashJoin [codegen id : 5] -Left keys [1]: [ws_sold_time_sk#5] +Left keys [1]: [ws_sold_time_sk#2] Right keys [1]: [t_time_sk#11] Join condition: None (17) Project [codegen id : 5] -Output [5]: [ws_sold_date_sk#4, ws_warehouse_sk#7, ws_quantity#8, ws_ext_sales_price#9, ws_net_paid#10] -Input [7]: [ws_sold_date_sk#4, ws_sold_time_sk#5, ws_warehouse_sk#7, ws_quantity#8, ws_ext_sales_price#9, ws_net_paid#10, t_time_sk#11] +Output [5]: [ws_sold_date_sk#1, ws_warehouse_sk#4, ws_quantity#5, ws_ext_sales_price#6, ws_net_paid#7] +Input [7]: [ws_sold_date_sk#1, ws_sold_time_sk#2, ws_warehouse_sk#4, ws_quantity#5, ws_ext_sales_price#6, ws_net_paid#7, t_time_sk#11] (18) Scan parquet default.date_dim Output [3]: [d_date_sk#14, d_year#15, d_moy#16] @@ -151,13 +151,13 @@ Input [3]: [d_date_sk#14, d_year#15, d_moy#16] Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, false] as bigint)),false), [id=#17] (22) BroadcastHashJoin [codegen id : 5] -Left keys [1]: [ws_sold_date_sk#4] +Left keys [1]: [ws_sold_date_sk#1] Right keys [1]: [d_date_sk#14] Join condition: None (23) Project [codegen id : 5] -Output [6]: [ws_warehouse_sk#7, ws_quantity#8, ws_ext_sales_price#9, ws_net_paid#10, d_year#15, d_moy#16] -Input [8]: [ws_sold_date_sk#4, ws_warehouse_sk#7, ws_quantity#8, ws_ext_sales_price#9, ws_net_paid#10, d_date_sk#14, d_year#15, d_moy#16] +Output [6]: [ws_warehouse_sk#4, ws_quantity#5, ws_ext_sales_price#6, ws_net_paid#7, d_year#15, d_moy#16] +Input [8]: [ws_sold_date_sk#1, ws_warehouse_sk#4, ws_quantity#5, ws_ext_sales_price#6, ws_net_paid#7, d_date_sk#14, d_year#15, d_moy#16] (24) Scan parquet default.warehouse Output [7]: [w_warehouse_sk#18, w_warehouse_name#19, w_warehouse_sq_ft#20, w_city#21, w_county#22, w_state#23, w_country#24] @@ -178,18 +178,18 @@ Input [7]: [w_warehouse_sk#18, w_warehouse_name#19, w_warehouse_sq_ft#20, w_city Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, false] as bigint)),false), [id=#25] (28) BroadcastHashJoin [codegen id : 5] -Left keys [1]: [ws_warehouse_sk#7] +Left keys [1]: [ws_warehouse_sk#4] Right keys [1]: [w_warehouse_sk#18] Join condition: None (29) Project [codegen id : 5] -Output [11]: [ws_quantity#8, ws_ext_sales_price#9, ws_net_paid#10, w_warehouse_name#19, w_warehouse_sq_ft#20, w_city#21, w_county#22, w_state#23, w_country#24, d_year#15, d_moy#16] -Input [13]: [ws_warehouse_sk#7, ws_quantity#8, ws_ext_sales_price#9, ws_net_paid#10, d_year#15, d_moy#16, w_warehouse_sk#18, w_warehouse_name#19, w_warehouse_sq_ft#20, w_city#21, w_county#22, w_state#23, w_country#24] +Output [11]: [ws_quantity#5, ws_ext_sales_price#6, ws_net_paid#7, w_warehouse_name#19, w_warehouse_sq_ft#20, w_city#21, w_county#22, w_state#23, w_country#24, d_year#15, d_moy#16] +Input [13]: [ws_warehouse_sk#4, ws_quantity#5, ws_ext_sales_price#6, ws_net_paid#7, d_year#15, d_moy#16, w_warehouse_sk#18, w_warehouse_name#19, w_warehouse_sq_ft#20, w_city#21, w_county#22, w_state#23, w_country#24] (30) HashAggregate [codegen id : 5] -Input [11]: [ws_quantity#8, ws_ext_sales_price#9, ws_net_paid#10, w_warehouse_name#19, w_warehouse_sq_ft#20, w_city#21, w_county#22, w_state#23, w_country#24, d_year#15, d_moy#16] +Input [11]: [ws_quantity#5, ws_ext_sales_price#6, ws_net_paid#7, w_warehouse_name#19, w_warehouse_sq_ft#20, w_city#21, w_county#22, w_state#23, w_country#24, d_year#15, d_moy#16] Keys [7]: [w_warehouse_name#19, w_warehouse_sq_ft#20, w_city#21, w_county#22, w_state#23, w_country#24, d_year#15] -Functions [24]: [partial_sum(CASE WHEN (d_moy#16 = 1) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#9 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), partial_sum(CASE WHEN (d_moy#16 = 2) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#9 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), partial_sum(CASE WHEN (d_moy#16 = 3) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#9 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), partial_sum(CASE WHEN (d_moy#16 = 4) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#9 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), partial_sum(CASE WHEN (d_moy#16 = 5) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#9 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), partial_sum(CASE WHEN (d_moy#16 = 6) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#9 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), partial_sum(CASE WHEN (d_moy#16 = 7) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#9 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), partial_sum(CASE WHEN (d_moy#16 = 8) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#9 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), partial_sum(CASE WHEN (d_moy#16 = 9) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#9 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), partial_sum(CASE WHEN (d_moy#16 = 10) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#9 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), partial_sum(CASE WHEN (d_moy#16 = 11) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#9 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), partial_sum(CASE WHEN (d_moy#16 = 12) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#9 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), partial_sum(CASE WHEN (d_moy#16 = 1) THEN CheckOverflow((promote_precision(cast(ws_net_paid#10 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), partial_sum(CASE WHEN (d_moy#16 = 2) THEN CheckOverflow((promote_precision(cast(ws_net_paid#10 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), partial_sum(CASE WHEN (d_moy#16 = 3) THEN CheckOverflow((promote_precision(cast(ws_net_paid#10 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), partial_sum(CASE WHEN (d_moy#16 = 4) THEN CheckOverflow((promote_precision(cast(ws_net_paid#10 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), partial_sum(CASE WHEN (d_moy#16 = 5) THEN CheckOverflow((promote_precision(cast(ws_net_paid#10 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), partial_sum(CASE WHEN (d_moy#16 = 6) THEN CheckOverflow((promote_precision(cast(ws_net_paid#10 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), partial_sum(CASE WHEN (d_moy#16 = 7) THEN CheckOverflow((promote_precision(cast(ws_net_paid#10 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), partial_sum(CASE WHEN (d_moy#16 = 8) THEN CheckOverflow((promote_precision(cast(ws_net_paid#10 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), partial_sum(CASE WHEN (d_moy#16 = 9) THEN CheckOverflow((promote_precision(cast(ws_net_paid#10 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), partial_sum(CASE WHEN (d_moy#16 = 10) THEN CheckOverflow((promote_precision(cast(ws_net_paid#10 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), partial_sum(CASE WHEN (d_moy#16 = 11) THEN CheckOverflow((promote_precision(cast(ws_net_paid#10 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), partial_sum(CASE WHEN (d_moy#16 = 12) THEN CheckOverflow((promote_precision(cast(ws_net_paid#10 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)] +Functions [24]: [partial_sum(CASE WHEN (d_moy#16 = 1) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#6 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), partial_sum(CASE WHEN (d_moy#16 = 2) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#6 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), partial_sum(CASE WHEN (d_moy#16 = 3) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#6 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), partial_sum(CASE WHEN (d_moy#16 = 4) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#6 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), partial_sum(CASE WHEN (d_moy#16 = 5) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#6 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), partial_sum(CASE WHEN (d_moy#16 = 6) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#6 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), partial_sum(CASE WHEN (d_moy#16 = 7) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#6 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), partial_sum(CASE WHEN (d_moy#16 = 8) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#6 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), partial_sum(CASE WHEN (d_moy#16 = 9) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#6 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), partial_sum(CASE WHEN (d_moy#16 = 10) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#6 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), partial_sum(CASE WHEN (d_moy#16 = 11) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#6 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), partial_sum(CASE WHEN (d_moy#16 = 12) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#6 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), partial_sum(CASE WHEN (d_moy#16 = 1) THEN CheckOverflow((promote_precision(cast(ws_net_paid#7 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), partial_sum(CASE WHEN (d_moy#16 = 2) THEN CheckOverflow((promote_precision(cast(ws_net_paid#7 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), partial_sum(CASE WHEN (d_moy#16 = 3) THEN CheckOverflow((promote_precision(cast(ws_net_paid#7 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), partial_sum(CASE WHEN (d_moy#16 = 4) THEN CheckOverflow((promote_precision(cast(ws_net_paid#7 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), partial_sum(CASE WHEN (d_moy#16 = 5) THEN CheckOverflow((promote_precision(cast(ws_net_paid#7 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), partial_sum(CASE WHEN (d_moy#16 = 6) THEN CheckOverflow((promote_precision(cast(ws_net_paid#7 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), partial_sum(CASE WHEN (d_moy#16 = 7) THEN CheckOverflow((promote_precision(cast(ws_net_paid#7 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), partial_sum(CASE WHEN (d_moy#16 = 8) THEN CheckOverflow((promote_precision(cast(ws_net_paid#7 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), partial_sum(CASE WHEN (d_moy#16 = 9) THEN CheckOverflow((promote_precision(cast(ws_net_paid#7 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), partial_sum(CASE WHEN (d_moy#16 = 10) THEN CheckOverflow((promote_precision(cast(ws_net_paid#7 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), partial_sum(CASE WHEN (d_moy#16 = 11) THEN CheckOverflow((promote_precision(cast(ws_net_paid#7 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), partial_sum(CASE WHEN (d_moy#16 = 12) THEN CheckOverflow((promote_precision(cast(ws_net_paid#7 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)] Aggregate Attributes [48]: [sum#26, isEmpty#27, sum#28, isEmpty#29, sum#30, isEmpty#31, sum#32, isEmpty#33, sum#34, isEmpty#35, sum#36, isEmpty#37, sum#38, isEmpty#39, sum#40, isEmpty#41, sum#42, isEmpty#43, sum#44, isEmpty#45, sum#46, isEmpty#47, sum#48, isEmpty#49, sum#50, isEmpty#51, sum#52, isEmpty#53, sum#54, isEmpty#55, sum#56, isEmpty#57, sum#58, isEmpty#59, sum#60, isEmpty#61, sum#62, isEmpty#63, sum#64, isEmpty#65, sum#66, isEmpty#67, sum#68, isEmpty#69, sum#70, isEmpty#71, sum#72, isEmpty#73] Results [55]: [w_warehouse_name#19, w_warehouse_sq_ft#20, w_city#21, w_county#22, w_state#23, w_country#24, d_year#15, sum#74, isEmpty#75, sum#76, isEmpty#77, sum#78, isEmpty#79, sum#80, isEmpty#81, sum#82, isEmpty#83, sum#84, isEmpty#85, sum#86, isEmpty#87, sum#88, isEmpty#89, sum#90, isEmpty#91, sum#92, isEmpty#93, sum#94, isEmpty#95, sum#96, isEmpty#97, sum#98, isEmpty#99, sum#100, isEmpty#101, sum#102, isEmpty#103, sum#104, isEmpty#105, sum#106, isEmpty#107, sum#108, isEmpty#109, sum#110, isEmpty#111, sum#112, isEmpty#113, sum#114, isEmpty#115, sum#116, isEmpty#117, sum#118, isEmpty#119, sum#120, isEmpty#121] @@ -200,35 +200,35 @@ Arguments: hashpartitioning(w_warehouse_name#19, w_warehouse_sq_ft#20, w_city#21 (32) HashAggregate [codegen id : 6] Input [55]: [w_warehouse_name#19, w_warehouse_sq_ft#20, w_city#21, w_county#22, w_state#23, w_country#24, d_year#15, sum#74, isEmpty#75, sum#76, isEmpty#77, sum#78, isEmpty#79, sum#80, isEmpty#81, sum#82, isEmpty#83, sum#84, isEmpty#85, sum#86, isEmpty#87, sum#88, isEmpty#89, sum#90, isEmpty#91, sum#92, isEmpty#93, sum#94, isEmpty#95, sum#96, isEmpty#97, sum#98, isEmpty#99, sum#100, isEmpty#101, sum#102, isEmpty#103, sum#104, isEmpty#105, sum#106, isEmpty#107, sum#108, isEmpty#109, sum#110, isEmpty#111, sum#112, isEmpty#113, sum#114, isEmpty#115, sum#116, isEmpty#117, sum#118, isEmpty#119, sum#120, isEmpty#121] Keys [7]: [w_warehouse_name#19, w_warehouse_sq_ft#20, w_city#21, w_county#22, w_state#23, w_country#24, d_year#15] -Functions [24]: [sum(CASE WHEN (d_moy#16 = 1) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#9 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), sum(CASE WHEN (d_moy#16 = 2) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#9 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), sum(CASE WHEN (d_moy#16 = 3) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#9 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), sum(CASE WHEN (d_moy#16 = 4) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#9 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), sum(CASE WHEN (d_moy#16 = 5) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#9 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), sum(CASE WHEN (d_moy#16 = 6) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#9 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), sum(CASE WHEN (d_moy#16 = 7) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#9 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), sum(CASE WHEN (d_moy#16 = 8) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#9 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), sum(CASE WHEN (d_moy#16 = 9) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#9 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), sum(CASE WHEN (d_moy#16 = 10) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#9 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), sum(CASE WHEN (d_moy#16 = 11) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#9 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), sum(CASE WHEN (d_moy#16 = 12) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#9 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), sum(CASE WHEN (d_moy#16 = 1) THEN CheckOverflow((promote_precision(cast(ws_net_paid#10 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), sum(CASE WHEN (d_moy#16 = 2) THEN CheckOverflow((promote_precision(cast(ws_net_paid#10 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), sum(CASE WHEN (d_moy#16 = 3) THEN CheckOverflow((promote_precision(cast(ws_net_paid#10 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), sum(CASE WHEN (d_moy#16 = 4) THEN CheckOverflow((promote_precision(cast(ws_net_paid#10 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), sum(CASE WHEN (d_moy#16 = 5) THEN CheckOverflow((promote_precision(cast(ws_net_paid#10 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), sum(CASE WHEN (d_moy#16 = 6) THEN CheckOverflow((promote_precision(cast(ws_net_paid#10 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), sum(CASE WHEN (d_moy#16 = 7) THEN CheckOverflow((promote_precision(cast(ws_net_paid#10 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), sum(CASE WHEN (d_moy#16 = 8) THEN CheckOverflow((promote_precision(cast(ws_net_paid#10 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), sum(CASE WHEN (d_moy#16 = 9) THEN CheckOverflow((promote_precision(cast(ws_net_paid#10 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), sum(CASE WHEN (d_moy#16 = 10) THEN CheckOverflow((promote_precision(cast(ws_net_paid#10 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), sum(CASE WHEN (d_moy#16 = 11) THEN CheckOverflow((promote_precision(cast(ws_net_paid#10 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), sum(CASE WHEN (d_moy#16 = 12) THEN CheckOverflow((promote_precision(cast(ws_net_paid#10 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)] -Aggregate Attributes [24]: [sum(CASE WHEN (d_moy#16 = 1) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#9 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#123, sum(CASE WHEN (d_moy#16 = 2) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#9 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#124, sum(CASE WHEN (d_moy#16 = 3) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#9 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#125, sum(CASE WHEN (d_moy#16 = 4) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#9 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#126, sum(CASE WHEN (d_moy#16 = 5) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#9 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#127, sum(CASE WHEN (d_moy#16 = 6) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#9 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#128, sum(CASE WHEN (d_moy#16 = 7) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#9 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#129, sum(CASE WHEN (d_moy#16 = 8) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#9 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#130, sum(CASE WHEN (d_moy#16 = 9) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#9 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#131, sum(CASE WHEN (d_moy#16 = 10) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#9 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#132, sum(CASE WHEN (d_moy#16 = 11) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#9 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#133, sum(CASE WHEN (d_moy#16 = 12) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#9 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#134, sum(CASE WHEN (d_moy#16 = 1) THEN CheckOverflow((promote_precision(cast(ws_net_paid#10 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#135, sum(CASE WHEN (d_moy#16 = 2) THEN CheckOverflow((promote_precision(cast(ws_net_paid#10 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#136, sum(CASE WHEN (d_moy#16 = 3) THEN CheckOverflow((promote_precision(cast(ws_net_paid#10 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#137, sum(CASE WHEN (d_moy#16 = 4) THEN CheckOverflow((promote_precision(cast(ws_net_paid#10 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#138, sum(CASE WHEN (d_moy#16 = 5) THEN CheckOverflow((promote_precision(cast(ws_net_paid#10 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#139, sum(CASE WHEN (d_moy#16 = 6) THEN CheckOverflow((promote_precision(cast(ws_net_paid#10 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#140, sum(CASE WHEN (d_moy#16 = 7) THEN CheckOverflow((promote_precision(cast(ws_net_paid#10 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#141, sum(CASE WHEN (d_moy#16 = 8) THEN CheckOverflow((promote_precision(cast(ws_net_paid#10 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#142, sum(CASE WHEN (d_moy#16 = 9) THEN CheckOverflow((promote_precision(cast(ws_net_paid#10 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#143, sum(CASE WHEN (d_moy#16 = 10) THEN CheckOverflow((promote_precision(cast(ws_net_paid#10 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#144, sum(CASE WHEN (d_moy#16 = 11) THEN CheckOverflow((promote_precision(cast(ws_net_paid#10 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#145, sum(CASE WHEN (d_moy#16 = 12) THEN CheckOverflow((promote_precision(cast(ws_net_paid#10 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#146] -Results [32]: [w_warehouse_name#19, w_warehouse_sq_ft#20, w_city#21, w_county#22, w_state#23, w_country#24, DHL,BARIAN AS ship_carriers#147, d_year#15 AS year#148, sum(CASE WHEN (d_moy#16 = 1) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#9 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#123 AS jan_sales#149, sum(CASE WHEN (d_moy#16 = 2) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#9 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#124 AS feb_sales#150, sum(CASE WHEN (d_moy#16 = 3) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#9 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#125 AS mar_sales#151, sum(CASE WHEN (d_moy#16 = 4) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#9 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#126 AS apr_sales#152, sum(CASE WHEN (d_moy#16 = 5) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#9 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#127 AS may_sales#153, sum(CASE WHEN (d_moy#16 = 6) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#9 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#128 AS jun_sales#154, sum(CASE WHEN (d_moy#16 = 7) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#9 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#129 AS jul_sales#155, sum(CASE WHEN (d_moy#16 = 8) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#9 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#130 AS aug_sales#156, sum(CASE WHEN (d_moy#16 = 9) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#9 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#131 AS sep_sales#157, sum(CASE WHEN (d_moy#16 = 10) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#9 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#132 AS oct_sales#158, sum(CASE WHEN (d_moy#16 = 11) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#9 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#133 AS nov_sales#159, sum(CASE WHEN (d_moy#16 = 12) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#9 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#134 AS dec_sales#160, sum(CASE WHEN (d_moy#16 = 1) THEN CheckOverflow((promote_precision(cast(ws_net_paid#10 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#135 AS jan_net#161, sum(CASE WHEN (d_moy#16 = 2) THEN CheckOverflow((promote_precision(cast(ws_net_paid#10 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#136 AS feb_net#162, sum(CASE WHEN (d_moy#16 = 3) THEN CheckOverflow((promote_precision(cast(ws_net_paid#10 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#137 AS mar_net#163, sum(CASE WHEN (d_moy#16 = 4) THEN CheckOverflow((promote_precision(cast(ws_net_paid#10 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#138 AS apr_net#164, sum(CASE WHEN (d_moy#16 = 5) THEN CheckOverflow((promote_precision(cast(ws_net_paid#10 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#139 AS may_net#165, sum(CASE WHEN (d_moy#16 = 6) THEN CheckOverflow((promote_precision(cast(ws_net_paid#10 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#140 AS jun_net#166, sum(CASE WHEN (d_moy#16 = 7) THEN CheckOverflow((promote_precision(cast(ws_net_paid#10 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#141 AS jul_net#167, sum(CASE WHEN (d_moy#16 = 8) THEN CheckOverflow((promote_precision(cast(ws_net_paid#10 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#142 AS aug_net#168, sum(CASE WHEN (d_moy#16 = 9) THEN CheckOverflow((promote_precision(cast(ws_net_paid#10 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#143 AS sep_net#169, sum(CASE WHEN (d_moy#16 = 10) THEN CheckOverflow((promote_precision(cast(ws_net_paid#10 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#144 AS oct_net#170, sum(CASE WHEN (d_moy#16 = 11) THEN CheckOverflow((promote_precision(cast(ws_net_paid#10 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#145 AS nov_net#171, sum(CASE WHEN (d_moy#16 = 12) THEN CheckOverflow((promote_precision(cast(ws_net_paid#10 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#8 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#146 AS dec_net#172] +Functions [24]: [sum(CASE WHEN (d_moy#16 = 1) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#6 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), sum(CASE WHEN (d_moy#16 = 2) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#6 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), sum(CASE WHEN (d_moy#16 = 3) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#6 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), sum(CASE WHEN (d_moy#16 = 4) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#6 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), sum(CASE WHEN (d_moy#16 = 5) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#6 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), sum(CASE WHEN (d_moy#16 = 6) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#6 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), sum(CASE WHEN (d_moy#16 = 7) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#6 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), sum(CASE WHEN (d_moy#16 = 8) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#6 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), sum(CASE WHEN (d_moy#16 = 9) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#6 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), sum(CASE WHEN (d_moy#16 = 10) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#6 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), sum(CASE WHEN (d_moy#16 = 11) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#6 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), sum(CASE WHEN (d_moy#16 = 12) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#6 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), sum(CASE WHEN (d_moy#16 = 1) THEN CheckOverflow((promote_precision(cast(ws_net_paid#7 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), sum(CASE WHEN (d_moy#16 = 2) THEN CheckOverflow((promote_precision(cast(ws_net_paid#7 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), sum(CASE WHEN (d_moy#16 = 3) THEN CheckOverflow((promote_precision(cast(ws_net_paid#7 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), sum(CASE WHEN (d_moy#16 = 4) THEN CheckOverflow((promote_precision(cast(ws_net_paid#7 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), sum(CASE WHEN (d_moy#16 = 5) THEN CheckOverflow((promote_precision(cast(ws_net_paid#7 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), sum(CASE WHEN (d_moy#16 = 6) THEN CheckOverflow((promote_precision(cast(ws_net_paid#7 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), sum(CASE WHEN (d_moy#16 = 7) THEN CheckOverflow((promote_precision(cast(ws_net_paid#7 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), sum(CASE WHEN (d_moy#16 = 8) THEN CheckOverflow((promote_precision(cast(ws_net_paid#7 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), sum(CASE WHEN (d_moy#16 = 9) THEN CheckOverflow((promote_precision(cast(ws_net_paid#7 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), sum(CASE WHEN (d_moy#16 = 10) THEN CheckOverflow((promote_precision(cast(ws_net_paid#7 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), sum(CASE WHEN (d_moy#16 = 11) THEN CheckOverflow((promote_precision(cast(ws_net_paid#7 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END), sum(CASE WHEN (d_moy#16 = 12) THEN CheckOverflow((promote_precision(cast(ws_net_paid#7 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)] +Aggregate Attributes [24]: [sum(CASE WHEN (d_moy#16 = 1) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#6 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#123, sum(CASE WHEN (d_moy#16 = 2) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#6 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#124, sum(CASE WHEN (d_moy#16 = 3) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#6 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#125, sum(CASE WHEN (d_moy#16 = 4) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#6 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#126, sum(CASE WHEN (d_moy#16 = 5) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#6 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#127, sum(CASE WHEN (d_moy#16 = 6) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#6 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#128, sum(CASE WHEN (d_moy#16 = 7) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#6 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#129, sum(CASE WHEN (d_moy#16 = 8) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#6 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#130, sum(CASE WHEN (d_moy#16 = 9) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#6 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#131, sum(CASE WHEN (d_moy#16 = 10) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#6 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#132, sum(CASE WHEN (d_moy#16 = 11) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#6 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#133, sum(CASE WHEN (d_moy#16 = 12) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#6 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#134, sum(CASE WHEN (d_moy#16 = 1) THEN CheckOverflow((promote_precision(cast(ws_net_paid#7 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#135, sum(CASE WHEN (d_moy#16 = 2) THEN CheckOverflow((promote_precision(cast(ws_net_paid#7 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#136, sum(CASE WHEN (d_moy#16 = 3) THEN CheckOverflow((promote_precision(cast(ws_net_paid#7 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#137, sum(CASE WHEN (d_moy#16 = 4) THEN CheckOverflow((promote_precision(cast(ws_net_paid#7 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#138, sum(CASE WHEN (d_moy#16 = 5) THEN CheckOverflow((promote_precision(cast(ws_net_paid#7 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#139, sum(CASE WHEN (d_moy#16 = 6) THEN CheckOverflow((promote_precision(cast(ws_net_paid#7 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#140, sum(CASE WHEN (d_moy#16 = 7) THEN CheckOverflow((promote_precision(cast(ws_net_paid#7 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#141, sum(CASE WHEN (d_moy#16 = 8) THEN CheckOverflow((promote_precision(cast(ws_net_paid#7 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#142, sum(CASE WHEN (d_moy#16 = 9) THEN CheckOverflow((promote_precision(cast(ws_net_paid#7 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#143, sum(CASE WHEN (d_moy#16 = 10) THEN CheckOverflow((promote_precision(cast(ws_net_paid#7 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#144, sum(CASE WHEN (d_moy#16 = 11) THEN CheckOverflow((promote_precision(cast(ws_net_paid#7 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#145, sum(CASE WHEN (d_moy#16 = 12) THEN CheckOverflow((promote_precision(cast(ws_net_paid#7 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#146] +Results [32]: [w_warehouse_name#19, w_warehouse_sq_ft#20, w_city#21, w_county#22, w_state#23, w_country#24, DHL,BARIAN AS ship_carriers#147, d_year#15 AS year#148, sum(CASE WHEN (d_moy#16 = 1) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#6 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#123 AS jan_sales#149, sum(CASE WHEN (d_moy#16 = 2) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#6 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#124 AS feb_sales#150, sum(CASE WHEN (d_moy#16 = 3) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#6 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#125 AS mar_sales#151, sum(CASE WHEN (d_moy#16 = 4) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#6 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#126 AS apr_sales#152, sum(CASE WHEN (d_moy#16 = 5) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#6 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#127 AS may_sales#153, sum(CASE WHEN (d_moy#16 = 6) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#6 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#128 AS jun_sales#154, sum(CASE WHEN (d_moy#16 = 7) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#6 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#129 AS jul_sales#155, sum(CASE WHEN (d_moy#16 = 8) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#6 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#130 AS aug_sales#156, sum(CASE WHEN (d_moy#16 = 9) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#6 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#131 AS sep_sales#157, sum(CASE WHEN (d_moy#16 = 10) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#6 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#132 AS oct_sales#158, sum(CASE WHEN (d_moy#16 = 11) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#6 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#133 AS nov_sales#159, sum(CASE WHEN (d_moy#16 = 12) THEN CheckOverflow((promote_precision(cast(ws_ext_sales_price#6 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#134 AS dec_sales#160, sum(CASE WHEN (d_moy#16 = 1) THEN CheckOverflow((promote_precision(cast(ws_net_paid#7 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#135 AS jan_net#161, sum(CASE WHEN (d_moy#16 = 2) THEN CheckOverflow((promote_precision(cast(ws_net_paid#7 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#136 AS feb_net#162, sum(CASE WHEN (d_moy#16 = 3) THEN CheckOverflow((promote_precision(cast(ws_net_paid#7 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#137 AS mar_net#163, sum(CASE WHEN (d_moy#16 = 4) THEN CheckOverflow((promote_precision(cast(ws_net_paid#7 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#138 AS apr_net#164, sum(CASE WHEN (d_moy#16 = 5) THEN CheckOverflow((promote_precision(cast(ws_net_paid#7 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#139 AS may_net#165, sum(CASE WHEN (d_moy#16 = 6) THEN CheckOverflow((promote_precision(cast(ws_net_paid#7 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#140 AS jun_net#166, sum(CASE WHEN (d_moy#16 = 7) THEN CheckOverflow((promote_precision(cast(ws_net_paid#7 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#141 AS jul_net#167, sum(CASE WHEN (d_moy#16 = 8) THEN CheckOverflow((promote_precision(cast(ws_net_paid#7 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#142 AS aug_net#168, sum(CASE WHEN (d_moy#16 = 9) THEN CheckOverflow((promote_precision(cast(ws_net_paid#7 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#143 AS sep_net#169, sum(CASE WHEN (d_moy#16 = 10) THEN CheckOverflow((promote_precision(cast(ws_net_paid#7 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#144 AS oct_net#170, sum(CASE WHEN (d_moy#16 = 11) THEN CheckOverflow((promote_precision(cast(ws_net_paid#7 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#145 AS nov_net#171, sum(CASE WHEN (d_moy#16 = 12) THEN CheckOverflow((promote_precision(cast(ws_net_paid#7 as decimal(12,2))) * promote_precision(cast(cast(ws_quantity#5 as decimal(10,0)) as decimal(12,2)))), DecimalType(18,2), true) ELSE 0.00 END)#146 AS dec_net#172] -(33) ReusedExchange [Reuses operator id: 5] -Output [1]: [sm_ship_mode_sk#1] - -(34) Scan parquet default.catalog_sales +(33) Scan parquet default.catalog_sales Output [7]: [cs_sold_date_sk#173, cs_sold_time_sk#174, cs_ship_mode_sk#175, cs_warehouse_sk#176, cs_quantity#177, cs_sales_price#178, cs_net_paid_inc_tax#179] Batched: true Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_warehouse_sk), IsNotNull(cs_sold_date_sk), IsNotNull(cs_sold_time_sk), IsNotNull(cs_ship_mode_sk)] ReadSchema: struct -(35) ColumnarToRow +(34) ColumnarToRow [codegen id : 11] Input [7]: [cs_sold_date_sk#173, cs_sold_time_sk#174, cs_ship_mode_sk#175, cs_warehouse_sk#176, cs_quantity#177, cs_sales_price#178, cs_net_paid_inc_tax#179] -(36) Filter +(35) Filter [codegen id : 11] Input [7]: [cs_sold_date_sk#173, cs_sold_time_sk#174, cs_ship_mode_sk#175, cs_warehouse_sk#176, cs_quantity#177, cs_sales_price#178, cs_net_paid_inc_tax#179] Condition : (((isnotnull(cs_warehouse_sk#176) AND isnotnull(cs_sold_date_sk#173)) AND isnotnull(cs_sold_time_sk#174)) AND isnotnull(cs_ship_mode_sk#175)) +(36) ReusedExchange [Reuses operator id: 8] +Output [1]: [sm_ship_mode_sk#8] + (37) BroadcastHashJoin [codegen id : 11] -Left keys [1]: [sm_ship_mode_sk#1] -Right keys [1]: [cs_ship_mode_sk#175] +Left keys [1]: [cs_ship_mode_sk#175] +Right keys [1]: [sm_ship_mode_sk#8] Join condition: None (38) Project [codegen id : 11] Output [6]: [cs_sold_date_sk#173, cs_sold_time_sk#174, cs_warehouse_sk#176, cs_quantity#177, cs_sales_price#178, cs_net_paid_inc_tax#179] -Input [8]: [sm_ship_mode_sk#1, cs_sold_date_sk#173, cs_sold_time_sk#174, cs_ship_mode_sk#175, cs_warehouse_sk#176, cs_quantity#177, cs_sales_price#178, cs_net_paid_inc_tax#179] +Input [8]: [cs_sold_date_sk#173, cs_sold_time_sk#174, cs_ship_mode_sk#175, cs_warehouse_sk#176, cs_quantity#177, cs_sales_price#178, cs_net_paid_inc_tax#179, sm_ship_mode_sk#8] (39) ReusedExchange [Reuses operator id: 15] Output [1]: [t_time_sk#11] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q66.sf100/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q66.sf100/simplified.txt index 465d269a847c3..ddfb04d8df5e3 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q66.sf100/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q66.sf100/simplified.txt @@ -20,7 +20,11 @@ TakeOrderedAndProject [w_warehouse_name,w_warehouse_sq_ft,w_city,w_county,w_stat Project [ws_sold_date_sk,ws_warehouse_sk,ws_quantity,ws_ext_sales_price,ws_net_paid] BroadcastHashJoin [ws_sold_time_sk,t_time_sk] Project [ws_sold_date_sk,ws_sold_time_sk,ws_warehouse_sk,ws_quantity,ws_ext_sales_price,ws_net_paid] - BroadcastHashJoin [sm_ship_mode_sk,ws_ship_mode_sk] + BroadcastHashJoin [ws_ship_mode_sk,sm_ship_mode_sk] + Filter [ws_warehouse_sk,ws_sold_date_sk,ws_sold_time_sk,ws_ship_mode_sk] + ColumnarToRow + InputAdapter + Scan parquet default.web_sales [ws_sold_date_sk,ws_sold_time_sk,ws_ship_mode_sk,ws_warehouse_sk,ws_quantity,ws_ext_sales_price,ws_net_paid] InputAdapter BroadcastExchange #3 WholeStageCodegen (1) @@ -29,10 +33,6 @@ TakeOrderedAndProject [w_warehouse_name,w_warehouse_sq_ft,w_city,w_county,w_stat ColumnarToRow InputAdapter Scan parquet default.ship_mode [sm_ship_mode_sk,sm_carrier] - Filter [ws_warehouse_sk,ws_sold_date_sk,ws_sold_time_sk,ws_ship_mode_sk] - ColumnarToRow - InputAdapter - Scan parquet default.web_sales [ws_sold_date_sk,ws_sold_time_sk,ws_ship_mode_sk,ws_warehouse_sk,ws_quantity,ws_ext_sales_price,ws_net_paid] InputAdapter BroadcastExchange #4 WholeStageCodegen (2) @@ -68,13 +68,13 @@ TakeOrderedAndProject [w_warehouse_name,w_warehouse_sq_ft,w_city,w_county,w_stat Project [cs_sold_date_sk,cs_warehouse_sk,cs_quantity,cs_sales_price,cs_net_paid_inc_tax] BroadcastHashJoin [cs_sold_time_sk,t_time_sk] Project [cs_sold_date_sk,cs_sold_time_sk,cs_warehouse_sk,cs_quantity,cs_sales_price,cs_net_paid_inc_tax] - BroadcastHashJoin [sm_ship_mode_sk,cs_ship_mode_sk] - InputAdapter - ReusedExchange [sm_ship_mode_sk] #3 + BroadcastHashJoin [cs_ship_mode_sk,sm_ship_mode_sk] Filter [cs_warehouse_sk,cs_sold_date_sk,cs_sold_time_sk,cs_ship_mode_sk] ColumnarToRow InputAdapter Scan parquet default.catalog_sales [cs_sold_date_sk,cs_sold_time_sk,cs_ship_mode_sk,cs_warehouse_sk,cs_quantity,cs_sales_price,cs_net_paid_inc_tax] + InputAdapter + ReusedExchange [sm_ship_mode_sk] #3 InputAdapter ReusedExchange [t_time_sk] #4 InputAdapter diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q72.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q72.sf100/explain.txt index a100b6659f162..3007b11a1a860 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q72.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q72.sf100/explain.txt @@ -11,64 +11,64 @@ TakeOrderedAndProject (79) : +- * BroadcastHashJoin LeftOuter BuildRight (65) : :- * Project (60) : : +- * SortMergeJoin Inner (59) - : : :- * Sort (47) - : : : +- Exchange (46) - : : : +- * Project (45) - : : : +- * BroadcastHashJoin Inner BuildRight (44) - : : : :- * Project (32) - : : : : +- * SortMergeJoin Inner (31) - : : : : :- * Sort (25) - : : : : : +- Exchange (24) - : : : : : +- * Project (23) - : : : : : +- * BroadcastHashJoin Inner BuildRight (22) - : : : : : :- * Project (17) - : : : : : : +- * BroadcastHashJoin Inner BuildRight (16) - : : : : : : :- * Project (10) - : : : : : : : +- * BroadcastHashJoin Inner BuildLeft (9) - : : : : : : : :- BroadcastExchange (5) - : : : : : : : : +- * Project (4) - : : : : : : : : +- * Filter (3) - : : : : : : : : +- * ColumnarToRow (2) - : : : : : : : : +- Scan parquet default.household_demographics (1) - : : : : : : : +- * Filter (8) - : : : : : : : +- * ColumnarToRow (7) - : : : : : : : +- Scan parquet default.catalog_sales (6) - : : : : : : +- BroadcastExchange (15) - : : : : : : +- * Project (14) - : : : : : : +- * Filter (13) - : : : : : : +- * ColumnarToRow (12) - : : : : : : +- Scan parquet default.customer_demographics (11) - : : : : : +- BroadcastExchange (21) - : : : : : +- * Filter (20) - : : : : : +- * ColumnarToRow (19) - : : : : : +- Scan parquet default.date_dim (18) - : : : : +- * Sort (30) - : : : : +- Exchange (29) - : : : : +- * Filter (28) - : : : : +- * ColumnarToRow (27) - : : : : +- Scan parquet default.item (26) - : : : +- BroadcastExchange (43) - : : : +- * Project (42) - : : : +- * BroadcastHashJoin Inner BuildRight (41) - : : : :- * Filter (35) - : : : : +- * ColumnarToRow (34) - : : : : +- Scan parquet default.date_dim (33) - : : : +- BroadcastExchange (40) - : : : +- * Project (39) - : : : +- * Filter (38) - : : : +- * ColumnarToRow (37) - : : : +- Scan parquet default.date_dim (36) + : : :- * Sort (34) + : : : +- Exchange (33) + : : : +- * Project (32) + : : : +- * SortMergeJoin Inner (31) + : : : :- * Sort (25) + : : : : +- Exchange (24) + : : : : +- * Project (23) + : : : : +- * BroadcastHashJoin Inner BuildRight (22) + : : : : :- * Project (17) + : : : : : +- * BroadcastHashJoin Inner BuildRight (16) + : : : : : :- * Project (10) + : : : : : : +- * BroadcastHashJoin Inner BuildRight (9) + : : : : : : :- * Filter (3) + : : : : : : : +- * ColumnarToRow (2) + : : : : : : : +- Scan parquet default.catalog_sales (1) + : : : : : : +- BroadcastExchange (8) + : : : : : : +- * Project (7) + : : : : : : +- * Filter (6) + : : : : : : +- * ColumnarToRow (5) + : : : : : : +- Scan parquet default.household_demographics (4) + : : : : : +- BroadcastExchange (15) + : : : : : +- * Project (14) + : : : : : +- * Filter (13) + : : : : : +- * ColumnarToRow (12) + : : : : : +- Scan parquet default.customer_demographics (11) + : : : : +- BroadcastExchange (21) + : : : : +- * Filter (20) + : : : : +- * ColumnarToRow (19) + : : : : +- Scan parquet default.date_dim (18) + : : : +- * Sort (30) + : : : +- Exchange (29) + : : : +- * Filter (28) + : : : +- * ColumnarToRow (27) + : : : +- Scan parquet default.item (26) : : +- * Sort (58) : : +- Exchange (57) : : +- * Project (56) - : : +- * BroadcastHashJoin Inner BuildLeft (55) - : : :- BroadcastExchange (51) - : : : +- * Filter (50) - : : : +- * ColumnarToRow (49) - : : : +- Scan parquet default.warehouse (48) - : : +- * Filter (54) - : : +- * ColumnarToRow (53) - : : +- Scan parquet default.inventory (52) + : : +- * BroadcastHashJoin Inner BuildRight (55) + : : :- * Project (50) + : : : +- * BroadcastHashJoin Inner BuildLeft (49) + : : : :- BroadcastExchange (45) + : : : : +- * Project (44) + : : : : +- * BroadcastHashJoin Inner BuildLeft (43) + : : : : :- BroadcastExchange (39) + : : : : : +- * Project (38) + : : : : : +- * Filter (37) + : : : : : +- * ColumnarToRow (36) + : : : : : +- Scan parquet default.date_dim (35) + : : : : +- * Filter (42) + : : : : +- * ColumnarToRow (41) + : : : : +- Scan parquet default.date_dim (40) + : : : +- * Filter (48) + : : : +- * ColumnarToRow (47) + : : : +- Scan parquet default.inventory (46) + : : +- BroadcastExchange (54) + : : +- * Filter (53) + : : +- * ColumnarToRow (52) + : : +- Scan parquet default.warehouse (51) : +- BroadcastExchange (64) : +- * Filter (63) : +- * ColumnarToRow (62) @@ -80,50 +80,50 @@ TakeOrderedAndProject (79) +- Scan parquet default.catalog_returns (69) -(1) Scan parquet default.household_demographics -Output [2]: [hd_demo_sk#1, hd_buy_potential#2] +(1) Scan parquet default.catalog_sales +Output [8]: [cs_sold_date_sk#1, cs_ship_date_sk#2, cs_bill_cdemo_sk#3, cs_bill_hdemo_sk#4, cs_item_sk#5, cs_promo_sk#6, cs_order_number#7, cs_quantity#8] Batched: true -Location [not included in comparison]/{warehouse_dir}/household_demographics] -PushedFilters: [IsNotNull(hd_buy_potential), EqualTo(hd_buy_potential,>10000), IsNotNull(hd_demo_sk)] -ReadSchema: struct +Location [not included in comparison]/{warehouse_dir}/catalog_sales] +PushedFilters: [IsNotNull(cs_quantity), IsNotNull(cs_item_sk), IsNotNull(cs_bill_cdemo_sk), IsNotNull(cs_bill_hdemo_sk), IsNotNull(cs_sold_date_sk), IsNotNull(cs_ship_date_sk)] +ReadSchema: struct -(2) ColumnarToRow [codegen id : 1] -Input [2]: [hd_demo_sk#1, hd_buy_potential#2] +(2) ColumnarToRow [codegen id : 4] +Input [8]: [cs_sold_date_sk#1, cs_ship_date_sk#2, cs_bill_cdemo_sk#3, cs_bill_hdemo_sk#4, cs_item_sk#5, cs_promo_sk#6, cs_order_number#7, cs_quantity#8] -(3) Filter [codegen id : 1] -Input [2]: [hd_demo_sk#1, hd_buy_potential#2] -Condition : ((isnotnull(hd_buy_potential#2) AND (hd_buy_potential#2 = >10000)) AND isnotnull(hd_demo_sk#1)) +(3) Filter [codegen id : 4] +Input [8]: [cs_sold_date_sk#1, cs_ship_date_sk#2, cs_bill_cdemo_sk#3, cs_bill_hdemo_sk#4, cs_item_sk#5, cs_promo_sk#6, cs_order_number#7, cs_quantity#8] +Condition : (((((isnotnull(cs_quantity#8) AND isnotnull(cs_item_sk#5)) AND isnotnull(cs_bill_cdemo_sk#3)) AND isnotnull(cs_bill_hdemo_sk#4)) AND isnotnull(cs_sold_date_sk#1)) AND isnotnull(cs_ship_date_sk#2)) -(4) Project [codegen id : 1] -Output [1]: [hd_demo_sk#1] -Input [2]: [hd_demo_sk#1, hd_buy_potential#2] +(4) Scan parquet default.household_demographics +Output [2]: [hd_demo_sk#9, hd_buy_potential#10] +Batched: true +Location [not included in comparison]/{warehouse_dir}/household_demographics] +PushedFilters: [IsNotNull(hd_buy_potential), EqualTo(hd_buy_potential,>10000), IsNotNull(hd_demo_sk)] +ReadSchema: struct -(5) BroadcastExchange -Input [1]: [hd_demo_sk#1] -Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#3] +(5) ColumnarToRow [codegen id : 1] +Input [2]: [hd_demo_sk#9, hd_buy_potential#10] -(6) Scan parquet default.catalog_sales -Output [8]: [cs_sold_date_sk#4, cs_ship_date_sk#5, cs_bill_cdemo_sk#6, cs_bill_hdemo_sk#7, cs_item_sk#8, cs_promo_sk#9, cs_order_number#10, cs_quantity#11] -Batched: true -Location [not included in comparison]/{warehouse_dir}/catalog_sales] -PushedFilters: [IsNotNull(cs_quantity), IsNotNull(cs_item_sk), IsNotNull(cs_bill_cdemo_sk), IsNotNull(cs_bill_hdemo_sk), IsNotNull(cs_sold_date_sk), IsNotNull(cs_ship_date_sk)] -ReadSchema: struct +(6) Filter [codegen id : 1] +Input [2]: [hd_demo_sk#9, hd_buy_potential#10] +Condition : ((isnotnull(hd_buy_potential#10) AND (hd_buy_potential#10 = >10000)) AND isnotnull(hd_demo_sk#9)) -(7) ColumnarToRow -Input [8]: [cs_sold_date_sk#4, cs_ship_date_sk#5, cs_bill_cdemo_sk#6, cs_bill_hdemo_sk#7, cs_item_sk#8, cs_promo_sk#9, cs_order_number#10, cs_quantity#11] +(7) Project [codegen id : 1] +Output [1]: [hd_demo_sk#9] +Input [2]: [hd_demo_sk#9, hd_buy_potential#10] -(8) Filter -Input [8]: [cs_sold_date_sk#4, cs_ship_date_sk#5, cs_bill_cdemo_sk#6, cs_bill_hdemo_sk#7, cs_item_sk#8, cs_promo_sk#9, cs_order_number#10, cs_quantity#11] -Condition : (((((isnotnull(cs_quantity#11) AND isnotnull(cs_item_sk#8)) AND isnotnull(cs_bill_cdemo_sk#6)) AND isnotnull(cs_bill_hdemo_sk#7)) AND isnotnull(cs_sold_date_sk#4)) AND isnotnull(cs_ship_date_sk#5)) +(8) BroadcastExchange +Input [1]: [hd_demo_sk#9] +Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#11] (9) BroadcastHashJoin [codegen id : 4] -Left keys [1]: [hd_demo_sk#1] -Right keys [1]: [cs_bill_hdemo_sk#7] +Left keys [1]: [cs_bill_hdemo_sk#4] +Right keys [1]: [hd_demo_sk#9] Join condition: None (10) Project [codegen id : 4] -Output [7]: [cs_sold_date_sk#4, cs_ship_date_sk#5, cs_bill_cdemo_sk#6, cs_item_sk#8, cs_promo_sk#9, cs_order_number#10, cs_quantity#11] -Input [9]: [hd_demo_sk#1, cs_sold_date_sk#4, cs_ship_date_sk#5, cs_bill_cdemo_sk#6, cs_bill_hdemo_sk#7, cs_item_sk#8, cs_promo_sk#9, cs_order_number#10, cs_quantity#11] +Output [7]: [cs_sold_date_sk#1, cs_ship_date_sk#2, cs_bill_cdemo_sk#3, cs_item_sk#5, cs_promo_sk#6, cs_order_number#7, cs_quantity#8] +Input [9]: [cs_sold_date_sk#1, cs_ship_date_sk#2, cs_bill_cdemo_sk#3, cs_bill_hdemo_sk#4, cs_item_sk#5, cs_promo_sk#6, cs_order_number#7, cs_quantity#8, hd_demo_sk#9] (11) Scan parquet default.customer_demographics Output [2]: [cd_demo_sk#12, cd_marital_status#13] @@ -148,13 +148,13 @@ Input [1]: [cd_demo_sk#12] Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#14] (16) BroadcastHashJoin [codegen id : 4] -Left keys [1]: [cs_bill_cdemo_sk#6] +Left keys [1]: [cs_bill_cdemo_sk#3] Right keys [1]: [cd_demo_sk#12] Join condition: None (17) Project [codegen id : 4] -Output [6]: [cs_sold_date_sk#4, cs_ship_date_sk#5, cs_item_sk#8, cs_promo_sk#9, cs_order_number#10, cs_quantity#11] -Input [8]: [cs_sold_date_sk#4, cs_ship_date_sk#5, cs_bill_cdemo_sk#6, cs_item_sk#8, cs_promo_sk#9, cs_order_number#10, cs_quantity#11, cd_demo_sk#12] +Output [6]: [cs_sold_date_sk#1, cs_ship_date_sk#2, cs_item_sk#5, cs_promo_sk#6, cs_order_number#7, cs_quantity#8] +Input [8]: [cs_sold_date_sk#1, cs_ship_date_sk#2, cs_bill_cdemo_sk#3, cs_item_sk#5, cs_promo_sk#6, cs_order_number#7, cs_quantity#8, cd_demo_sk#12] (18) Scan parquet default.date_dim Output [2]: [d_date_sk#15, d_date#16] @@ -175,21 +175,21 @@ Input [2]: [d_date_sk#15, d_date#16] Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, false] as bigint)),false), [id=#17] (22) BroadcastHashJoin [codegen id : 4] -Left keys [1]: [cs_ship_date_sk#5] +Left keys [1]: [cs_ship_date_sk#2] Right keys [1]: [d_date_sk#15] Join condition: None (23) Project [codegen id : 4] -Output [6]: [cs_sold_date_sk#4, cs_item_sk#8, cs_promo_sk#9, cs_order_number#10, cs_quantity#11, d_date#16] -Input [8]: [cs_sold_date_sk#4, cs_ship_date_sk#5, cs_item_sk#8, cs_promo_sk#9, cs_order_number#10, cs_quantity#11, d_date_sk#15, d_date#16] +Output [6]: [cs_sold_date_sk#1, cs_item_sk#5, cs_promo_sk#6, cs_order_number#7, cs_quantity#8, d_date#16] +Input [8]: [cs_sold_date_sk#1, cs_ship_date_sk#2, cs_item_sk#5, cs_promo_sk#6, cs_order_number#7, cs_quantity#8, d_date_sk#15, d_date#16] (24) Exchange -Input [6]: [cs_sold_date_sk#4, cs_item_sk#8, cs_promo_sk#9, cs_order_number#10, cs_quantity#11, d_date#16] -Arguments: hashpartitioning(cs_item_sk#8, 5), true, [id=#18] +Input [6]: [cs_sold_date_sk#1, cs_item_sk#5, cs_promo_sk#6, cs_order_number#7, cs_quantity#8, d_date#16] +Arguments: hashpartitioning(cs_item_sk#5, 5), ENSURE_REQUIREMENTS, [id=#18] (25) Sort [codegen id : 5] -Input [6]: [cs_sold_date_sk#4, cs_item_sk#8, cs_promo_sk#9, cs_order_number#10, cs_quantity#11, d_date#16] -Arguments: [cs_item_sk#8 ASC NULLS FIRST], false, 0 +Input [6]: [cs_sold_date_sk#1, cs_item_sk#5, cs_promo_sk#6, cs_order_number#7, cs_quantity#8, d_date#16] +Arguments: [cs_item_sk#5 ASC NULLS FIRST], false, 0 (26) Scan parquet default.item Output [2]: [i_item_sk#19, i_item_desc#20] @@ -207,144 +207,144 @@ Condition : isnotnull(i_item_sk#19) (29) Exchange Input [2]: [i_item_sk#19, i_item_desc#20] -Arguments: hashpartitioning(i_item_sk#19, 5), true, [id=#21] +Arguments: hashpartitioning(i_item_sk#19, 5), ENSURE_REQUIREMENTS, [id=#21] (30) Sort [codegen id : 7] Input [2]: [i_item_sk#19, i_item_desc#20] Arguments: [i_item_sk#19 ASC NULLS FIRST], false, 0 -(31) SortMergeJoin [codegen id : 10] -Left keys [1]: [cs_item_sk#8] +(31) SortMergeJoin [codegen id : 8] +Left keys [1]: [cs_item_sk#5] Right keys [1]: [i_item_sk#19] Join condition: None -(32) Project [codegen id : 10] -Output [7]: [cs_sold_date_sk#4, cs_item_sk#8, cs_promo_sk#9, cs_order_number#10, cs_quantity#11, d_date#16, i_item_desc#20] -Input [8]: [cs_sold_date_sk#4, cs_item_sk#8, cs_promo_sk#9, cs_order_number#10, cs_quantity#11, d_date#16, i_item_sk#19, i_item_desc#20] - -(33) Scan parquet default.date_dim -Output [2]: [d_date_sk#22, d_week_seq#23] -Batched: true -Location [not included in comparison]/{warehouse_dir}/date_dim] -PushedFilters: [IsNotNull(d_week_seq), IsNotNull(d_date_sk)] -ReadSchema: struct +(32) Project [codegen id : 8] +Output [7]: [cs_sold_date_sk#1, cs_item_sk#5, cs_promo_sk#6, cs_order_number#7, cs_quantity#8, d_date#16, i_item_desc#20] +Input [8]: [cs_sold_date_sk#1, cs_item_sk#5, cs_promo_sk#6, cs_order_number#7, cs_quantity#8, d_date#16, i_item_sk#19, i_item_desc#20] -(34) ColumnarToRow [codegen id : 9] -Input [2]: [d_date_sk#22, d_week_seq#23] +(33) Exchange +Input [7]: [cs_sold_date_sk#1, cs_item_sk#5, cs_promo_sk#6, cs_order_number#7, cs_quantity#8, d_date#16, i_item_desc#20] +Arguments: hashpartitioning(cs_item_sk#5, cs_sold_date_sk#1, 5), ENSURE_REQUIREMENTS, [id=#22] -(35) Filter [codegen id : 9] -Input [2]: [d_date_sk#22, d_week_seq#23] -Condition : (isnotnull(d_week_seq#23) AND isnotnull(d_date_sk#22)) +(34) Sort [codegen id : 9] +Input [7]: [cs_sold_date_sk#1, cs_item_sk#5, cs_promo_sk#6, cs_order_number#7, cs_quantity#8, d_date#16, i_item_desc#20] +Arguments: [cs_item_sk#5 ASC NULLS FIRST, cs_sold_date_sk#1 ASC NULLS FIRST], false, 0 -(36) Scan parquet default.date_dim -Output [4]: [d_date_sk#24, d_date#25, d_week_seq#26, d_year#27] +(35) Scan parquet default.date_dim +Output [4]: [d_date_sk#23, d_date#24, d_week_seq#25, d_year#26] Batched: true Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,1999), IsNotNull(d_date_sk), IsNotNull(d_week_seq), IsNotNull(d_date)] ReadSchema: struct -(37) ColumnarToRow [codegen id : 8] -Input [4]: [d_date_sk#24, d_date#25, d_week_seq#26, d_year#27] +(36) ColumnarToRow [codegen id : 10] +Input [4]: [d_date_sk#23, d_date#24, d_week_seq#25, d_year#26] -(38) Filter [codegen id : 8] -Input [4]: [d_date_sk#24, d_date#25, d_week_seq#26, d_year#27] -Condition : ((((isnotnull(d_year#27) AND (d_year#27 = 1999)) AND isnotnull(d_date_sk#24)) AND isnotnull(d_week_seq#26)) AND isnotnull(d_date#25)) +(37) Filter [codegen id : 10] +Input [4]: [d_date_sk#23, d_date#24, d_week_seq#25, d_year#26] +Condition : ((((isnotnull(d_year#26) AND (d_year#26 = 1999)) AND isnotnull(d_date_sk#23)) AND isnotnull(d_week_seq#25)) AND isnotnull(d_date#24)) -(39) Project [codegen id : 8] -Output [3]: [d_date_sk#24, d_date#25, d_week_seq#26] -Input [4]: [d_date_sk#24, d_date#25, d_week_seq#26, d_year#27] +(38) Project [codegen id : 10] +Output [3]: [d_date_sk#23, d_date#24, d_week_seq#25] +Input [4]: [d_date_sk#23, d_date#24, d_week_seq#25, d_year#26] -(40) BroadcastExchange -Input [3]: [d_date_sk#24, d_date#25, d_week_seq#26] -Arguments: HashedRelationBroadcastMode(List(cast(input[2, int, true] as bigint)),false), [id=#28] +(39) BroadcastExchange +Input [3]: [d_date_sk#23, d_date#24, d_week_seq#25] +Arguments: HashedRelationBroadcastMode(List(cast(input[2, int, true] as bigint)),false), [id=#27] -(41) BroadcastHashJoin [codegen id : 9] -Left keys [1]: [d_week_seq#23] -Right keys [1]: [d_week_seq#26] +(40) Scan parquet default.date_dim +Output [2]: [d_date_sk#28, d_week_seq#29] +Batched: true +Location [not included in comparison]/{warehouse_dir}/date_dim] +PushedFilters: [IsNotNull(d_week_seq), IsNotNull(d_date_sk)] +ReadSchema: struct + +(41) ColumnarToRow +Input [2]: [d_date_sk#28, d_week_seq#29] + +(42) Filter +Input [2]: [d_date_sk#28, d_week_seq#29] +Condition : (isnotnull(d_week_seq#29) AND isnotnull(d_date_sk#28)) + +(43) BroadcastHashJoin [codegen id : 11] +Left keys [1]: [d_week_seq#25] +Right keys [1]: [d_week_seq#29] Join condition: None -(42) Project [codegen id : 9] -Output [4]: [d_date_sk#22, d_date_sk#24, d_date#25, d_week_seq#26] -Input [5]: [d_date_sk#22, d_week_seq#23, d_date_sk#24, d_date#25, d_week_seq#26] +(44) Project [codegen id : 11] +Output [4]: [d_date_sk#23, d_date#24, d_week_seq#25, d_date_sk#28] +Input [5]: [d_date_sk#23, d_date#24, d_week_seq#25, d_date_sk#28, d_week_seq#29] -(43) BroadcastExchange -Input [4]: [d_date_sk#22, d_date_sk#24, d_date#25, d_week_seq#26] -Arguments: HashedRelationBroadcastMode(List(cast(input[1, int, true] as bigint)),false), [id=#29] +(45) BroadcastExchange +Input [4]: [d_date_sk#23, d_date#24, d_week_seq#25, d_date_sk#28] +Arguments: HashedRelationBroadcastMode(List(cast(input[3, int, true] as bigint)),false), [id=#30] -(44) BroadcastHashJoin [codegen id : 10] -Left keys [1]: [cs_sold_date_sk#4] -Right keys [1]: [d_date_sk#24] -Join condition: (d_date#16 > d_date#25 + 5 days) +(46) Scan parquet default.inventory +Output [4]: [inv_date_sk#31, inv_item_sk#32, inv_warehouse_sk#33, inv_quantity_on_hand#34] +Batched: true +Location [not included in comparison]/{warehouse_dir}/inventory] +PushedFilters: [IsNotNull(inv_quantity_on_hand), IsNotNull(inv_item_sk), IsNotNull(inv_warehouse_sk), IsNotNull(inv_date_sk)] +ReadSchema: struct -(45) Project [codegen id : 10] -Output [7]: [cs_item_sk#8, cs_promo_sk#9, cs_order_number#10, cs_quantity#11, i_item_desc#20, d_date_sk#22, d_week_seq#26] -Input [11]: [cs_sold_date_sk#4, cs_item_sk#8, cs_promo_sk#9, cs_order_number#10, cs_quantity#11, d_date#16, i_item_desc#20, d_date_sk#22, d_date_sk#24, d_date#25, d_week_seq#26] +(47) ColumnarToRow +Input [4]: [inv_date_sk#31, inv_item_sk#32, inv_warehouse_sk#33, inv_quantity_on_hand#34] -(46) Exchange -Input [7]: [cs_item_sk#8, cs_promo_sk#9, cs_order_number#10, cs_quantity#11, i_item_desc#20, d_date_sk#22, d_week_seq#26] -Arguments: hashpartitioning(cs_item_sk#8, d_date_sk#22, 5), true, [id=#30] +(48) Filter +Input [4]: [inv_date_sk#31, inv_item_sk#32, inv_warehouse_sk#33, inv_quantity_on_hand#34] +Condition : (((isnotnull(inv_quantity_on_hand#34) AND isnotnull(inv_item_sk#32)) AND isnotnull(inv_warehouse_sk#33)) AND isnotnull(inv_date_sk#31)) -(47) Sort [codegen id : 11] -Input [7]: [cs_item_sk#8, cs_promo_sk#9, cs_order_number#10, cs_quantity#11, i_item_desc#20, d_date_sk#22, d_week_seq#26] -Arguments: [cs_item_sk#8 ASC NULLS FIRST, d_date_sk#22 ASC NULLS FIRST], false, 0 +(49) BroadcastHashJoin [codegen id : 13] +Left keys [1]: [d_date_sk#28] +Right keys [1]: [inv_date_sk#31] +Join condition: None + +(50) Project [codegen id : 13] +Output [6]: [d_date_sk#23, d_date#24, d_week_seq#25, inv_item_sk#32, inv_warehouse_sk#33, inv_quantity_on_hand#34] +Input [8]: [d_date_sk#23, d_date#24, d_week_seq#25, d_date_sk#28, inv_date_sk#31, inv_item_sk#32, inv_warehouse_sk#33, inv_quantity_on_hand#34] -(48) Scan parquet default.warehouse -Output [2]: [w_warehouse_sk#31, w_warehouse_name#32] +(51) Scan parquet default.warehouse +Output [2]: [w_warehouse_sk#35, w_warehouse_name#36] Batched: true Location [not included in comparison]/{warehouse_dir}/warehouse] PushedFilters: [IsNotNull(w_warehouse_sk)] ReadSchema: struct -(49) ColumnarToRow [codegen id : 12] -Input [2]: [w_warehouse_sk#31, w_warehouse_name#32] - -(50) Filter [codegen id : 12] -Input [2]: [w_warehouse_sk#31, w_warehouse_name#32] -Condition : isnotnull(w_warehouse_sk#31) - -(51) BroadcastExchange -Input [2]: [w_warehouse_sk#31, w_warehouse_name#32] -Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, false] as bigint)),false), [id=#33] - -(52) Scan parquet default.inventory -Output [4]: [inv_date_sk#34, inv_item_sk#35, inv_warehouse_sk#36, inv_quantity_on_hand#37] -Batched: true -Location [not included in comparison]/{warehouse_dir}/inventory] -PushedFilters: [IsNotNull(inv_quantity_on_hand), IsNotNull(inv_item_sk), IsNotNull(inv_warehouse_sk), IsNotNull(inv_date_sk)] -ReadSchema: struct +(52) ColumnarToRow [codegen id : 12] +Input [2]: [w_warehouse_sk#35, w_warehouse_name#36] -(53) ColumnarToRow -Input [4]: [inv_date_sk#34, inv_item_sk#35, inv_warehouse_sk#36, inv_quantity_on_hand#37] +(53) Filter [codegen id : 12] +Input [2]: [w_warehouse_sk#35, w_warehouse_name#36] +Condition : isnotnull(w_warehouse_sk#35) -(54) Filter -Input [4]: [inv_date_sk#34, inv_item_sk#35, inv_warehouse_sk#36, inv_quantity_on_hand#37] -Condition : (((isnotnull(inv_quantity_on_hand#37) AND isnotnull(inv_item_sk#35)) AND isnotnull(inv_warehouse_sk#36)) AND isnotnull(inv_date_sk#34)) +(54) BroadcastExchange +Input [2]: [w_warehouse_sk#35, w_warehouse_name#36] +Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, false] as bigint)),false), [id=#37] (55) BroadcastHashJoin [codegen id : 13] -Left keys [1]: [w_warehouse_sk#31] -Right keys [1]: [inv_warehouse_sk#36] +Left keys [1]: [inv_warehouse_sk#33] +Right keys [1]: [w_warehouse_sk#35] Join condition: None (56) Project [codegen id : 13] -Output [4]: [w_warehouse_name#32, inv_date_sk#34, inv_item_sk#35, inv_quantity_on_hand#37] -Input [6]: [w_warehouse_sk#31, w_warehouse_name#32, inv_date_sk#34, inv_item_sk#35, inv_warehouse_sk#36, inv_quantity_on_hand#37] +Output [6]: [d_date_sk#23, d_date#24, d_week_seq#25, inv_item_sk#32, inv_quantity_on_hand#34, w_warehouse_name#36] +Input [8]: [d_date_sk#23, d_date#24, d_week_seq#25, inv_item_sk#32, inv_warehouse_sk#33, inv_quantity_on_hand#34, w_warehouse_sk#35, w_warehouse_name#36] (57) Exchange -Input [4]: [w_warehouse_name#32, inv_date_sk#34, inv_item_sk#35, inv_quantity_on_hand#37] -Arguments: hashpartitioning(inv_item_sk#35, inv_date_sk#34, 5), true, [id=#38] +Input [6]: [d_date_sk#23, d_date#24, d_week_seq#25, inv_item_sk#32, inv_quantity_on_hand#34, w_warehouse_name#36] +Arguments: hashpartitioning(inv_item_sk#32, d_date_sk#23, 5), ENSURE_REQUIREMENTS, [id=#38] (58) Sort [codegen id : 14] -Input [4]: [w_warehouse_name#32, inv_date_sk#34, inv_item_sk#35, inv_quantity_on_hand#37] -Arguments: [inv_item_sk#35 ASC NULLS FIRST, inv_date_sk#34 ASC NULLS FIRST], false, 0 +Input [6]: [d_date_sk#23, d_date#24, d_week_seq#25, inv_item_sk#32, inv_quantity_on_hand#34, w_warehouse_name#36] +Arguments: [inv_item_sk#32 ASC NULLS FIRST, d_date_sk#23 ASC NULLS FIRST], false, 0 (59) SortMergeJoin [codegen id : 16] -Left keys [2]: [cs_item_sk#8, d_date_sk#22] -Right keys [2]: [inv_item_sk#35, inv_date_sk#34] -Join condition: (inv_quantity_on_hand#37 < cs_quantity#11) +Left keys [2]: [cs_item_sk#5, cs_sold_date_sk#1] +Right keys [2]: [inv_item_sk#32, d_date_sk#23] +Join condition: ((inv_quantity_on_hand#34 < cs_quantity#8) AND (d_date#16 > d_date#24 + 5 days)) (60) Project [codegen id : 16] -Output [6]: [cs_item_sk#8, cs_promo_sk#9, cs_order_number#10, w_warehouse_name#32, i_item_desc#20, d_week_seq#26] -Input [11]: [cs_item_sk#8, cs_promo_sk#9, cs_order_number#10, cs_quantity#11, i_item_desc#20, d_date_sk#22, d_week_seq#26, w_warehouse_name#32, inv_date_sk#34, inv_item_sk#35, inv_quantity_on_hand#37] +Output [6]: [cs_item_sk#5, cs_promo_sk#6, cs_order_number#7, w_warehouse_name#36, i_item_desc#20, d_week_seq#25] +Input [13]: [cs_sold_date_sk#1, cs_item_sk#5, cs_promo_sk#6, cs_order_number#7, cs_quantity#8, d_date#16, i_item_desc#20, d_date_sk#23, d_date#24, d_week_seq#25, inv_item_sk#32, inv_quantity_on_hand#34, w_warehouse_name#36] (61) Scan parquet default.promotion Output [1]: [p_promo_sk#39] @@ -365,21 +365,21 @@ Input [1]: [p_promo_sk#39] Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, false] as bigint)),false), [id=#40] (65) BroadcastHashJoin [codegen id : 16] -Left keys [1]: [cs_promo_sk#9] +Left keys [1]: [cs_promo_sk#6] Right keys [1]: [p_promo_sk#39] Join condition: None (66) Project [codegen id : 16] -Output [5]: [cs_item_sk#8, cs_order_number#10, w_warehouse_name#32, i_item_desc#20, d_week_seq#26] -Input [7]: [cs_item_sk#8, cs_promo_sk#9, cs_order_number#10, w_warehouse_name#32, i_item_desc#20, d_week_seq#26, p_promo_sk#39] +Output [5]: [cs_item_sk#5, cs_order_number#7, w_warehouse_name#36, i_item_desc#20, d_week_seq#25] +Input [7]: [cs_item_sk#5, cs_promo_sk#6, cs_order_number#7, w_warehouse_name#36, i_item_desc#20, d_week_seq#25, p_promo_sk#39] (67) Exchange -Input [5]: [cs_item_sk#8, cs_order_number#10, w_warehouse_name#32, i_item_desc#20, d_week_seq#26] -Arguments: hashpartitioning(cs_item_sk#8, cs_order_number#10, 5), true, [id=#41] +Input [5]: [cs_item_sk#5, cs_order_number#7, w_warehouse_name#36, i_item_desc#20, d_week_seq#25] +Arguments: hashpartitioning(cs_item_sk#5, cs_order_number#7, 5), ENSURE_REQUIREMENTS, [id=#41] (68) Sort [codegen id : 17] -Input [5]: [cs_item_sk#8, cs_order_number#10, w_warehouse_name#32, i_item_desc#20, d_week_seq#26] -Arguments: [cs_item_sk#8 ASC NULLS FIRST, cs_order_number#10 ASC NULLS FIRST], false, 0 +Input [5]: [cs_item_sk#5, cs_order_number#7, w_warehouse_name#36, i_item_desc#20, d_week_seq#25] +Arguments: [cs_item_sk#5 ASC NULLS FIRST, cs_order_number#7 ASC NULLS FIRST], false, 0 (69) Scan parquet default.catalog_returns Output [2]: [cr_item_sk#42, cr_order_number#43] @@ -397,40 +397,40 @@ Condition : (isnotnull(cr_item_sk#42) AND isnotnull(cr_order_number#43)) (72) Exchange Input [2]: [cr_item_sk#42, cr_order_number#43] -Arguments: hashpartitioning(cr_item_sk#42, cr_order_number#43, 5), true, [id=#44] +Arguments: hashpartitioning(cr_item_sk#42, cr_order_number#43, 5), ENSURE_REQUIREMENTS, [id=#44] (73) Sort [codegen id : 19] Input [2]: [cr_item_sk#42, cr_order_number#43] Arguments: [cr_item_sk#42 ASC NULLS FIRST, cr_order_number#43 ASC NULLS FIRST], false, 0 (74) SortMergeJoin -Left keys [2]: [cs_item_sk#8, cs_order_number#10] +Left keys [2]: [cs_item_sk#5, cs_order_number#7] Right keys [2]: [cr_item_sk#42, cr_order_number#43] Join condition: None (75) Project [codegen id : 20] -Output [3]: [w_warehouse_name#32, i_item_desc#20, d_week_seq#26] -Input [7]: [cs_item_sk#8, cs_order_number#10, w_warehouse_name#32, i_item_desc#20, d_week_seq#26, cr_item_sk#42, cr_order_number#43] +Output [3]: [w_warehouse_name#36, i_item_desc#20, d_week_seq#25] +Input [7]: [cs_item_sk#5, cs_order_number#7, w_warehouse_name#36, i_item_desc#20, d_week_seq#25, cr_item_sk#42, cr_order_number#43] (76) HashAggregate [codegen id : 20] -Input [3]: [w_warehouse_name#32, i_item_desc#20, d_week_seq#26] -Keys [3]: [i_item_desc#20, w_warehouse_name#32, d_week_seq#26] +Input [3]: [w_warehouse_name#36, i_item_desc#20, d_week_seq#25] +Keys [3]: [i_item_desc#20, w_warehouse_name#36, d_week_seq#25] Functions [1]: [partial_count(1)] Aggregate Attributes [1]: [count#45] -Results [4]: [i_item_desc#20, w_warehouse_name#32, d_week_seq#26, count#46] +Results [4]: [i_item_desc#20, w_warehouse_name#36, d_week_seq#25, count#46] (77) Exchange -Input [4]: [i_item_desc#20, w_warehouse_name#32, d_week_seq#26, count#46] -Arguments: hashpartitioning(i_item_desc#20, w_warehouse_name#32, d_week_seq#26, 5), true, [id=#47] +Input [4]: [i_item_desc#20, w_warehouse_name#36, d_week_seq#25, count#46] +Arguments: hashpartitioning(i_item_desc#20, w_warehouse_name#36, d_week_seq#25, 5), ENSURE_REQUIREMENTS, [id=#47] (78) HashAggregate [codegen id : 21] -Input [4]: [i_item_desc#20, w_warehouse_name#32, d_week_seq#26, count#46] -Keys [3]: [i_item_desc#20, w_warehouse_name#32, d_week_seq#26] +Input [4]: [i_item_desc#20, w_warehouse_name#36, d_week_seq#25, count#46] +Keys [3]: [i_item_desc#20, w_warehouse_name#36, d_week_seq#25] Functions [1]: [count(1)] Aggregate Attributes [1]: [count(1)#48] -Results [6]: [i_item_desc#20, w_warehouse_name#32, d_week_seq#26, count(1)#48 AS no_promo#49, count(1)#48 AS promo#50, count(1)#48 AS total_cnt#51] +Results [6]: [i_item_desc#20, w_warehouse_name#36, d_week_seq#25, count(1)#48 AS no_promo#49, count(1)#48 AS promo#50, count(1)#48 AS total_cnt#51] (79) TakeOrderedAndProject -Input [6]: [i_item_desc#20, w_warehouse_name#32, d_week_seq#26, no_promo#49, promo#50, total_cnt#51] -Arguments: 100, [total_cnt#51 DESC NULLS LAST, i_item_desc#20 ASC NULLS FIRST, w_warehouse_name#32 ASC NULLS FIRST, d_week_seq#26 ASC NULLS FIRST], [i_item_desc#20, w_warehouse_name#32, d_week_seq#26, no_promo#49, promo#50, total_cnt#51] +Input [6]: [i_item_desc#20, w_warehouse_name#36, d_week_seq#25, no_promo#49, promo#50, total_cnt#51] +Arguments: 100, [total_cnt#51 DESC NULLS LAST, i_item_desc#20 ASC NULLS FIRST, w_warehouse_name#36 ASC NULLS FIRST, d_week_seq#25 ASC NULLS FIRST], [i_item_desc#20, w_warehouse_name#36, d_week_seq#25, no_promo#49, promo#50, total_cnt#51] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q72.sf100/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q72.sf100/simplified.txt index 39dba3af02359..b88505ad7b9bc 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q72.sf100/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q72.sf100/simplified.txt @@ -16,91 +16,95 @@ TakeOrderedAndProject [total_cnt,i_item_desc,w_warehouse_name,d_week_seq,no_prom Project [cs_item_sk,cs_order_number,w_warehouse_name,i_item_desc,d_week_seq] BroadcastHashJoin [cs_promo_sk,p_promo_sk] Project [cs_item_sk,cs_promo_sk,cs_order_number,w_warehouse_name,i_item_desc,d_week_seq] - SortMergeJoin [cs_item_sk,d_date_sk,inv_item_sk,inv_date_sk,inv_quantity_on_hand,cs_quantity] + SortMergeJoin [cs_item_sk,cs_sold_date_sk,inv_item_sk,d_date_sk,inv_quantity_on_hand,cs_quantity,d_date,d_date] InputAdapter - WholeStageCodegen (11) - Sort [cs_item_sk,d_date_sk] + WholeStageCodegen (9) + Sort [cs_item_sk,cs_sold_date_sk] InputAdapter - Exchange [cs_item_sk,d_date_sk] #3 - WholeStageCodegen (10) - Project [cs_item_sk,cs_promo_sk,cs_order_number,cs_quantity,i_item_desc,d_date_sk,d_week_seq] - BroadcastHashJoin [cs_sold_date_sk,d_date_sk,d_date,d_date] - Project [cs_sold_date_sk,cs_item_sk,cs_promo_sk,cs_order_number,cs_quantity,d_date,i_item_desc] - SortMergeJoin [cs_item_sk,i_item_sk] - InputAdapter - WholeStageCodegen (5) - Sort [cs_item_sk] - InputAdapter - Exchange [cs_item_sk] #4 - WholeStageCodegen (4) - Project [cs_sold_date_sk,cs_item_sk,cs_promo_sk,cs_order_number,cs_quantity,d_date] - BroadcastHashJoin [cs_ship_date_sk,d_date_sk] - Project [cs_sold_date_sk,cs_ship_date_sk,cs_item_sk,cs_promo_sk,cs_order_number,cs_quantity] - BroadcastHashJoin [cs_bill_cdemo_sk,cd_demo_sk] - Project [cs_sold_date_sk,cs_ship_date_sk,cs_bill_cdemo_sk,cs_item_sk,cs_promo_sk,cs_order_number,cs_quantity] - BroadcastHashJoin [hd_demo_sk,cs_bill_hdemo_sk] + Exchange [cs_item_sk,cs_sold_date_sk] #3 + WholeStageCodegen (8) + Project [cs_sold_date_sk,cs_item_sk,cs_promo_sk,cs_order_number,cs_quantity,d_date,i_item_desc] + SortMergeJoin [cs_item_sk,i_item_sk] + InputAdapter + WholeStageCodegen (5) + Sort [cs_item_sk] + InputAdapter + Exchange [cs_item_sk] #4 + WholeStageCodegen (4) + Project [cs_sold_date_sk,cs_item_sk,cs_promo_sk,cs_order_number,cs_quantity,d_date] + BroadcastHashJoin [cs_ship_date_sk,d_date_sk] + Project [cs_sold_date_sk,cs_ship_date_sk,cs_item_sk,cs_promo_sk,cs_order_number,cs_quantity] + BroadcastHashJoin [cs_bill_cdemo_sk,cd_demo_sk] + Project [cs_sold_date_sk,cs_ship_date_sk,cs_bill_cdemo_sk,cs_item_sk,cs_promo_sk,cs_order_number,cs_quantity] + BroadcastHashJoin [cs_bill_hdemo_sk,hd_demo_sk] + Filter [cs_quantity,cs_item_sk,cs_bill_cdemo_sk,cs_bill_hdemo_sk,cs_sold_date_sk,cs_ship_date_sk] + ColumnarToRow InputAdapter - BroadcastExchange #5 - WholeStageCodegen (1) - Project [hd_demo_sk] - Filter [hd_buy_potential,hd_demo_sk] - ColumnarToRow - InputAdapter - Scan parquet default.household_demographics [hd_demo_sk,hd_buy_potential] - Filter [cs_quantity,cs_item_sk,cs_bill_cdemo_sk,cs_bill_hdemo_sk,cs_sold_date_sk,cs_ship_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.catalog_sales [cs_sold_date_sk,cs_ship_date_sk,cs_bill_cdemo_sk,cs_bill_hdemo_sk,cs_item_sk,cs_promo_sk,cs_order_number,cs_quantity] + Scan parquet default.catalog_sales [cs_sold_date_sk,cs_ship_date_sk,cs_bill_cdemo_sk,cs_bill_hdemo_sk,cs_item_sk,cs_promo_sk,cs_order_number,cs_quantity] InputAdapter - BroadcastExchange #6 - WholeStageCodegen (2) - Project [cd_demo_sk] - Filter [cd_marital_status,cd_demo_sk] + BroadcastExchange #5 + WholeStageCodegen (1) + Project [hd_demo_sk] + Filter [hd_buy_potential,hd_demo_sk] ColumnarToRow InputAdapter - Scan parquet default.customer_demographics [cd_demo_sk,cd_marital_status] - InputAdapter - BroadcastExchange #7 - WholeStageCodegen (3) - Filter [d_date,d_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.date_dim [d_date_sk,d_date] - InputAdapter - WholeStageCodegen (7) - Sort [i_item_sk] - InputAdapter - Exchange [i_item_sk] #8 - WholeStageCodegen (6) - Filter [i_item_sk] - ColumnarToRow + Scan parquet default.household_demographics [hd_demo_sk,hd_buy_potential] InputAdapter - Scan parquet default.item [i_item_sk,i_item_desc] + BroadcastExchange #6 + WholeStageCodegen (2) + Project [cd_demo_sk] + Filter [cd_marital_status,cd_demo_sk] + ColumnarToRow + InputAdapter + Scan parquet default.customer_demographics [cd_demo_sk,cd_marital_status] + InputAdapter + BroadcastExchange #7 + WholeStageCodegen (3) + Filter [d_date,d_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.date_dim [d_date_sk,d_date] InputAdapter - BroadcastExchange #9 - WholeStageCodegen (9) - Project [d_date_sk,d_date_sk,d_date,d_week_seq] - BroadcastHashJoin [d_week_seq,d_week_seq] - Filter [d_week_seq,d_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.date_dim [d_date_sk,d_week_seq] - InputAdapter - BroadcastExchange #10 - WholeStageCodegen (8) - Project [d_date_sk,d_date,d_week_seq] - Filter [d_year,d_date_sk,d_week_seq,d_date] - ColumnarToRow - InputAdapter - Scan parquet default.date_dim [d_date_sk,d_date,d_week_seq,d_year] + WholeStageCodegen (7) + Sort [i_item_sk] + InputAdapter + Exchange [i_item_sk] #8 + WholeStageCodegen (6) + Filter [i_item_sk] + ColumnarToRow + InputAdapter + Scan parquet default.item [i_item_sk,i_item_desc] InputAdapter WholeStageCodegen (14) - Sort [inv_item_sk,inv_date_sk] + Sort [inv_item_sk,d_date_sk] InputAdapter - Exchange [inv_item_sk,inv_date_sk] #11 + Exchange [inv_item_sk,d_date_sk] #9 WholeStageCodegen (13) - Project [w_warehouse_name,inv_date_sk,inv_item_sk,inv_quantity_on_hand] - BroadcastHashJoin [w_warehouse_sk,inv_warehouse_sk] + Project [d_date_sk,d_date,d_week_seq,inv_item_sk,inv_quantity_on_hand,w_warehouse_name] + BroadcastHashJoin [inv_warehouse_sk,w_warehouse_sk] + Project [d_date_sk,d_date,d_week_seq,inv_item_sk,inv_warehouse_sk,inv_quantity_on_hand] + BroadcastHashJoin [d_date_sk,inv_date_sk] + InputAdapter + BroadcastExchange #10 + WholeStageCodegen (11) + Project [d_date_sk,d_date,d_week_seq,d_date_sk] + BroadcastHashJoin [d_week_seq,d_week_seq] + InputAdapter + BroadcastExchange #11 + WholeStageCodegen (10) + Project [d_date_sk,d_date,d_week_seq] + Filter [d_year,d_date_sk,d_week_seq,d_date] + ColumnarToRow + InputAdapter + Scan parquet default.date_dim [d_date_sk,d_date,d_week_seq,d_year] + Filter [d_week_seq,d_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.date_dim [d_date_sk,d_week_seq] + Filter [inv_quantity_on_hand,inv_item_sk,inv_warehouse_sk,inv_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.inventory [inv_date_sk,inv_item_sk,inv_warehouse_sk,inv_quantity_on_hand] InputAdapter BroadcastExchange #12 WholeStageCodegen (12) @@ -108,10 +112,6 @@ TakeOrderedAndProject [total_cnt,i_item_desc,w_warehouse_name,d_week_seq,no_prom ColumnarToRow InputAdapter Scan parquet default.warehouse [w_warehouse_sk,w_warehouse_name] - Filter [inv_quantity_on_hand,inv_item_sk,inv_warehouse_sk,inv_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.inventory [inv_date_sk,inv_item_sk,inv_warehouse_sk,inv_quantity_on_hand] InputAdapter BroadcastExchange #13 WholeStageCodegen (15) diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q73.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q73.sf100/explain.txt index 4af604ca3f65f..f88f1f48ac2b7 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q73.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q73.sf100/explain.txt @@ -120,7 +120,7 @@ Input [5]: [ss_customer_sk#2, ss_hdemo_sk#3, ss_store_sk#4, ss_ticket_number#5, Output [4]: [hd_demo_sk#13, hd_buy_potential#14, hd_dep_count#15, hd_vehicle_count#16] Batched: true Location [not included in comparison]/{warehouse_dir}/household_demographics] -PushedFilters: [IsNotNull(hd_vehicle_count), Or(EqualTo(hd_buy_potential,>10000),EqualTo(hd_buy_potential,unknown)), GreaterThan(hd_vehicle_count,0), IsNotNull(hd_demo_sk)] +PushedFilters: [IsNotNull(hd_vehicle_count), IsNotNull(hd_dep_count), Or(EqualTo(hd_buy_potential,>10000),EqualTo(hd_buy_potential,unknown)), GreaterThan(hd_vehicle_count,0), GreaterThan(hd_vehicle_count,0), IsNotNull(hd_demo_sk)] ReadSchema: struct (19) ColumnarToRow [codegen id : 3] @@ -128,7 +128,7 @@ Input [4]: [hd_demo_sk#13, hd_buy_potential#14, hd_dep_count#15, hd_vehicle_coun (20) Filter [codegen id : 3] Input [4]: [hd_demo_sk#13, hd_buy_potential#14, hd_dep_count#15, hd_vehicle_count#16] -Condition : ((((isnotnull(hd_vehicle_count#16) AND ((hd_buy_potential#14 = >10000) OR (hd_buy_potential#14 = unknown))) AND (hd_vehicle_count#16 > 0)) AND (CASE WHEN (hd_vehicle_count#16 > 0) THEN (cast(hd_dep_count#15 as double) / cast(hd_vehicle_count#16 as double)) ELSE null END > 1.0)) AND isnotnull(hd_demo_sk#13)) +Condition : (((((isnotnull(hd_vehicle_count#16) AND isnotnull(hd_dep_count#15)) AND ((hd_buy_potential#14 = >10000) OR (hd_buy_potential#14 = unknown))) AND (hd_vehicle_count#16 > 0)) AND ((cast(hd_dep_count#15 as double) / cast(hd_vehicle_count#16 as double)) > 1.0)) AND isnotnull(hd_demo_sk#13)) (21) Project [codegen id : 3] Output [1]: [hd_demo_sk#13] @@ -156,7 +156,7 @@ Results [3]: [ss_ticket_number#5, ss_customer_sk#2, count#19] (26) Exchange Input [3]: [ss_ticket_number#5, ss_customer_sk#2, count#19] -Arguments: hashpartitioning(ss_ticket_number#5, ss_customer_sk#2, 5), true, [id=#20] +Arguments: hashpartitioning(ss_ticket_number#5, ss_customer_sk#2, 5), ENSURE_REQUIREMENTS, [id=#20] (27) HashAggregate [codegen id : 5] Input [3]: [ss_ticket_number#5, ss_customer_sk#2, count#19] @@ -171,7 +171,7 @@ Condition : ((cnt#22 >= 1) AND (cnt#22 <= 5)) (29) Exchange Input [3]: [ss_ticket_number#5, ss_customer_sk#2, cnt#22] -Arguments: hashpartitioning(ss_customer_sk#2, 5), true, [id=#23] +Arguments: hashpartitioning(ss_customer_sk#2, 5), ENSURE_REQUIREMENTS, [id=#23] (30) Sort [codegen id : 6] Input [3]: [ss_ticket_number#5, ss_customer_sk#2, cnt#22] @@ -193,7 +193,7 @@ Condition : isnotnull(c_customer_sk#24) (34) Exchange Input [5]: [c_customer_sk#24, c_salutation#25, c_first_name#26, c_last_name#27, c_preferred_cust_flag#28] -Arguments: hashpartitioning(c_customer_sk#24, 5), true, [id=#29] +Arguments: hashpartitioning(c_customer_sk#24, 5), ENSURE_REQUIREMENTS, [id=#29] (35) Sort [codegen id : 8] Input [5]: [c_customer_sk#24, c_salutation#25, c_first_name#26, c_last_name#27, c_preferred_cust_flag#28] @@ -210,7 +210,7 @@ Input [8]: [ss_ticket_number#5, ss_customer_sk#2, cnt#22, c_customer_sk#24, c_sa (38) Exchange Input [6]: [c_last_name#27, c_first_name#26, c_salutation#25, c_preferred_cust_flag#28, ss_ticket_number#5, cnt#22] -Arguments: rangepartitioning(cnt#22 DESC NULLS LAST, 5), true, [id=#30] +Arguments: rangepartitioning(cnt#22 DESC NULLS LAST, 5), ENSURE_REQUIREMENTS, [id=#30] (39) Sort [codegen id : 10] Input [6]: [c_last_name#27, c_first_name#26, c_salutation#25, c_preferred_cust_flag#28, ss_ticket_number#5, cnt#22] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q73.sf100/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q73.sf100/simplified.txt index af8527f155c8e..9de2f2ab4cd68 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q73.sf100/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q73.sf100/simplified.txt @@ -47,7 +47,7 @@ WholeStageCodegen (10) BroadcastExchange #6 WholeStageCodegen (3) Project [hd_demo_sk] - Filter [hd_vehicle_count,hd_buy_potential,hd_dep_count,hd_demo_sk] + Filter [hd_vehicle_count,hd_dep_count,hd_buy_potential,hd_demo_sk] ColumnarToRow InputAdapter Scan parquet default.household_demographics [hd_demo_sk,hd_buy_potential,hd_dep_count,hd_vehicle_count] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q73/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q73/explain.txt index f4565c3edb172..43c73f3c7af61 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q73/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q73/explain.txt @@ -117,7 +117,7 @@ Input [5]: [ss_customer_sk#2, ss_hdemo_sk#3, ss_store_sk#4, ss_ticket_number#5, Output [4]: [hd_demo_sk#13, hd_buy_potential#14, hd_dep_count#15, hd_vehicle_count#16] Batched: true Location [not included in comparison]/{warehouse_dir}/household_demographics] -PushedFilters: [IsNotNull(hd_vehicle_count), Or(EqualTo(hd_buy_potential,>10000),EqualTo(hd_buy_potential,unknown)), GreaterThan(hd_vehicle_count,0), IsNotNull(hd_demo_sk)] +PushedFilters: [IsNotNull(hd_vehicle_count), IsNotNull(hd_dep_count), Or(EqualTo(hd_buy_potential,>10000),EqualTo(hd_buy_potential,unknown)), GreaterThan(hd_vehicle_count,0), GreaterThan(hd_vehicle_count,0), IsNotNull(hd_demo_sk)] ReadSchema: struct (19) ColumnarToRow [codegen id : 3] @@ -125,7 +125,7 @@ Input [4]: [hd_demo_sk#13, hd_buy_potential#14, hd_dep_count#15, hd_vehicle_coun (20) Filter [codegen id : 3] Input [4]: [hd_demo_sk#13, hd_buy_potential#14, hd_dep_count#15, hd_vehicle_count#16] -Condition : ((((isnotnull(hd_vehicle_count#16) AND ((hd_buy_potential#14 = >10000) OR (hd_buy_potential#14 = unknown))) AND (hd_vehicle_count#16 > 0)) AND (CASE WHEN (hd_vehicle_count#16 > 0) THEN (cast(hd_dep_count#15 as double) / cast(hd_vehicle_count#16 as double)) ELSE null END > 1.0)) AND isnotnull(hd_demo_sk#13)) +Condition : (((((isnotnull(hd_vehicle_count#16) AND isnotnull(hd_dep_count#15)) AND ((hd_buy_potential#14 = >10000) OR (hd_buy_potential#14 = unknown))) AND (hd_vehicle_count#16 > 0)) AND ((cast(hd_dep_count#15 as double) / cast(hd_vehicle_count#16 as double)) > 1.0)) AND isnotnull(hd_demo_sk#13)) (21) Project [codegen id : 3] Output [1]: [hd_demo_sk#13] @@ -153,7 +153,7 @@ Results [3]: [ss_ticket_number#5, ss_customer_sk#2, count#19] (26) Exchange Input [3]: [ss_ticket_number#5, ss_customer_sk#2, count#19] -Arguments: hashpartitioning(ss_ticket_number#5, ss_customer_sk#2, 5), true, [id=#20] +Arguments: hashpartitioning(ss_ticket_number#5, ss_customer_sk#2, 5), ENSURE_REQUIREMENTS, [id=#20] (27) HashAggregate [codegen id : 6] Input [3]: [ss_ticket_number#5, ss_customer_sk#2, count#19] @@ -195,7 +195,7 @@ Input [8]: [ss_ticket_number#5, ss_customer_sk#2, cnt#22, c_customer_sk#23, c_sa (35) Exchange Input [6]: [c_last_name#26, c_first_name#25, c_salutation#24, c_preferred_cust_flag#27, ss_ticket_number#5, cnt#22] -Arguments: rangepartitioning(cnt#22 DESC NULLS LAST, 5), true, [id=#29] +Arguments: rangepartitioning(cnt#22 DESC NULLS LAST, 5), ENSURE_REQUIREMENTS, [id=#29] (36) Sort [codegen id : 7] Input [6]: [c_last_name#26, c_first_name#25, c_salutation#24, c_preferred_cust_flag#27, ss_ticket_number#5, cnt#22] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q73/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q73/simplified.txt index 46b7241565719..5e49f6cb603d5 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q73/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q73/simplified.txt @@ -41,7 +41,7 @@ WholeStageCodegen (7) BroadcastExchange #5 WholeStageCodegen (3) Project [hd_demo_sk] - Filter [hd_vehicle_count,hd_buy_potential,hd_dep_count,hd_demo_sk] + Filter [hd_vehicle_count,hd_dep_count,hd_buy_potential,hd_demo_sk] ColumnarToRow InputAdapter Scan parquet default.household_demographics [hd_demo_sk,hd_buy_potential,hd_dep_count,hd_vehicle_count] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q75.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q75.sf100/explain.txt index 39748bdd2772b..1d546a445b202 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q75.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q75.sf100/explain.txt @@ -1,142 +1,134 @@ == Physical Plan == -TakeOrderedAndProject (138) -+- * Project (137) - +- * SortMergeJoin Inner (136) - :- * Sort (74) - : +- Exchange (73) - : +- * HashAggregate (72) - : +- Exchange (71) - : +- * HashAggregate (70) - : +- * HashAggregate (69) - : +- Exchange (68) - : +- * HashAggregate (67) - : +- Union (66) - : :- * HashAggregate (47) - : : +- Exchange (46) - : : +- * HashAggregate (45) - : : +- Union (44) - : : :- * Project (25) - : : : +- SortMergeJoin LeftOuter (24) - : : : :- * Sort (18) - : : : : +- Exchange (17) - : : : : +- * Project (16) - : : : : +- * BroadcastHashJoin Inner BuildRight (15) - : : : : :- * Project (10) - : : : : : +- * BroadcastHashJoin Inner BuildRight (9) - : : : : : :- * Filter (3) - : : : : : : +- * ColumnarToRow (2) - : : : : : : +- Scan parquet default.catalog_sales (1) - : : : : : +- BroadcastExchange (8) - : : : : : +- * Project (7) - : : : : : +- * Filter (6) - : : : : : +- * ColumnarToRow (5) - : : : : : +- Scan parquet default.item (4) - : : : : +- BroadcastExchange (14) - : : : : +- * Filter (13) - : : : : +- * ColumnarToRow (12) - : : : : +- Scan parquet default.date_dim (11) - : : : +- * Sort (23) - : : : +- Exchange (22) - : : : +- * Filter (21) - : : : +- * ColumnarToRow (20) - : : : +- Scan parquet default.catalog_returns (19) - : : +- * Project (43) - : : +- SortMergeJoin LeftOuter (42) - : : :- * Sort (36) - : : : +- Exchange (35) - : : : +- * Project (34) - : : : +- * BroadcastHashJoin Inner BuildRight (33) - : : : :- * Project (31) - : : : : +- * BroadcastHashJoin Inner BuildRight (30) - : : : : :- * Filter (28) - : : : : : +- * ColumnarToRow (27) - : : : : : +- Scan parquet default.store_sales (26) - : : : : +- ReusedExchange (29) - : : : +- ReusedExchange (32) - : : +- * Sort (41) - : : +- Exchange (40) - : : +- * Filter (39) - : : +- * ColumnarToRow (38) - : : +- Scan parquet default.store_returns (37) - : +- * Project (65) - : +- SortMergeJoin LeftOuter (64) - : :- * Sort (58) - : : +- Exchange (57) - : : +- * Project (56) - : : +- * BroadcastHashJoin Inner BuildRight (55) - : : :- * Project (53) - : : : +- * BroadcastHashJoin Inner BuildRight (52) - : : : :- * Filter (50) - : : : : +- * ColumnarToRow (49) - : : : : +- Scan parquet default.web_sales (48) - : : : +- ReusedExchange (51) - : : +- ReusedExchange (54) - : +- * Sort (63) - : +- Exchange (62) - : +- * Filter (61) - : +- * ColumnarToRow (60) - : +- Scan parquet default.web_returns (59) - +- * Sort (135) - +- Exchange (134) - +- * HashAggregate (133) - +- Exchange (132) - +- * HashAggregate (131) - +- * HashAggregate (130) - +- Exchange (129) - +- * HashAggregate (128) - +- Union (127) - :- * HashAggregate (111) - : +- Exchange (110) - : +- * HashAggregate (109) - : +- Union (108) - : :- * Project (92) - : : +- SortMergeJoin LeftOuter (91) - : : :- * Sort (88) - : : : +- Exchange (87) - : : : +- * Project (86) - : : : +- * BroadcastHashJoin Inner BuildRight (85) - : : : :- * Project (80) - : : : : +- * BroadcastHashJoin Inner BuildRight (79) - : : : : :- * Filter (77) - : : : : : +- * ColumnarToRow (76) - : : : : : +- Scan parquet default.catalog_sales (75) - : : : : +- ReusedExchange (78) - : : : +- BroadcastExchange (84) - : : : +- * Filter (83) - : : : +- * ColumnarToRow (82) - : : : +- Scan parquet default.date_dim (81) - : : +- * Sort (90) - : : +- ReusedExchange (89) - : +- * Project (107) - : +- SortMergeJoin LeftOuter (106) - : :- * Sort (103) - : : +- Exchange (102) - : : +- * Project (101) - : : +- * BroadcastHashJoin Inner BuildRight (100) - : : :- * Project (98) - : : : +- * BroadcastHashJoin Inner BuildRight (97) - : : : :- * Filter (95) - : : : : +- * ColumnarToRow (94) - : : : : +- Scan parquet default.store_sales (93) - : : : +- ReusedExchange (96) - : : +- ReusedExchange (99) - : +- * Sort (105) - : +- ReusedExchange (104) - +- * Project (126) - +- SortMergeJoin LeftOuter (125) - :- * Sort (122) - : +- Exchange (121) - : +- * Project (120) - : +- * BroadcastHashJoin Inner BuildRight (119) - : :- * Project (117) - : : +- * BroadcastHashJoin Inner BuildRight (116) - : : :- * Filter (114) - : : : +- * ColumnarToRow (113) - : : : +- Scan parquet default.web_sales (112) - : : +- ReusedExchange (115) - : +- ReusedExchange (118) - +- * Sort (124) - +- ReusedExchange (123) +TakeOrderedAndProject (130) ++- * Project (129) + +- * SortMergeJoin Inner (128) + :- * Sort (70) + : +- Exchange (69) + : +- * HashAggregate (68) + : +- Exchange (67) + : +- * HashAggregate (66) + : +- * HashAggregate (65) + : +- Exchange (64) + : +- * HashAggregate (63) + : +- Union (62) + : :- * Project (25) + : : +- SortMergeJoin LeftOuter (24) + : : :- * Sort (18) + : : : +- Exchange (17) + : : : +- * Project (16) + : : : +- * BroadcastHashJoin Inner BuildRight (15) + : : : :- * Project (10) + : : : : +- * BroadcastHashJoin Inner BuildRight (9) + : : : : :- * Filter (3) + : : : : : +- * ColumnarToRow (2) + : : : : : +- Scan parquet default.catalog_sales (1) + : : : : +- BroadcastExchange (8) + : : : : +- * Project (7) + : : : : +- * Filter (6) + : : : : +- * ColumnarToRow (5) + : : : : +- Scan parquet default.item (4) + : : : +- BroadcastExchange (14) + : : : +- * Filter (13) + : : : +- * ColumnarToRow (12) + : : : +- Scan parquet default.date_dim (11) + : : +- * Sort (23) + : : +- Exchange (22) + : : +- * Filter (21) + : : +- * ColumnarToRow (20) + : : +- Scan parquet default.catalog_returns (19) + : :- * Project (43) + : : +- SortMergeJoin LeftOuter (42) + : : :- * Sort (36) + : : : +- Exchange (35) + : : : +- * Project (34) + : : : +- * BroadcastHashJoin Inner BuildRight (33) + : : : :- * Project (31) + : : : : +- * BroadcastHashJoin Inner BuildRight (30) + : : : : :- * Filter (28) + : : : : : +- * ColumnarToRow (27) + : : : : : +- Scan parquet default.store_sales (26) + : : : : +- ReusedExchange (29) + : : : +- ReusedExchange (32) + : : +- * Sort (41) + : : +- Exchange (40) + : : +- * Filter (39) + : : +- * ColumnarToRow (38) + : : +- Scan parquet default.store_returns (37) + : +- * Project (61) + : +- SortMergeJoin LeftOuter (60) + : :- * Sort (54) + : : +- Exchange (53) + : : +- * Project (52) + : : +- * BroadcastHashJoin Inner BuildRight (51) + : : :- * Project (49) + : : : +- * BroadcastHashJoin Inner BuildRight (48) + : : : :- * Filter (46) + : : : : +- * ColumnarToRow (45) + : : : : +- Scan parquet default.web_sales (44) + : : : +- ReusedExchange (47) + : : +- ReusedExchange (50) + : +- * Sort (59) + : +- Exchange (58) + : +- * Filter (57) + : +- * ColumnarToRow (56) + : +- Scan parquet default.web_returns (55) + +- * Sort (127) + +- Exchange (126) + +- * HashAggregate (125) + +- Exchange (124) + +- * HashAggregate (123) + +- * HashAggregate (122) + +- Exchange (121) + +- * HashAggregate (120) + +- Union (119) + :- * Project (88) + : +- SortMergeJoin LeftOuter (87) + : :- * Sort (84) + : : +- Exchange (83) + : : +- * Project (82) + : : +- * BroadcastHashJoin Inner BuildRight (81) + : : :- * Project (76) + : : : +- * BroadcastHashJoin Inner BuildRight (75) + : : : :- * Filter (73) + : : : : +- * ColumnarToRow (72) + : : : : +- Scan parquet default.catalog_sales (71) + : : : +- ReusedExchange (74) + : : +- BroadcastExchange (80) + : : +- * Filter (79) + : : +- * ColumnarToRow (78) + : : +- Scan parquet default.date_dim (77) + : +- * Sort (86) + : +- ReusedExchange (85) + :- * Project (103) + : +- SortMergeJoin LeftOuter (102) + : :- * Sort (99) + : : +- Exchange (98) + : : +- * Project (97) + : : +- * BroadcastHashJoin Inner BuildRight (96) + : : :- * Project (94) + : : : +- * BroadcastHashJoin Inner BuildRight (93) + : : : :- * Filter (91) + : : : : +- * ColumnarToRow (90) + : : : : +- Scan parquet default.store_sales (89) + : : : +- ReusedExchange (92) + : : +- ReusedExchange (95) + : +- * Sort (101) + : +- ReusedExchange (100) + +- * Project (118) + +- SortMergeJoin LeftOuter (117) + :- * Sort (114) + : +- Exchange (113) + : +- * Project (112) + : +- * BroadcastHashJoin Inner BuildRight (111) + : :- * Project (109) + : : +- * BroadcastHashJoin Inner BuildRight (108) + : : :- * Filter (106) + : : : +- * ColumnarToRow (105) + : : : +- Scan parquet default.web_sales (104) + : : +- ReusedExchange (107) + : +- ReusedExchange (110) + +- * Sort (116) + +- ReusedExchange (115) (1) Scan parquet default.catalog_sales @@ -213,7 +205,7 @@ Input [11]: [cs_sold_date_sk#1, cs_item_sk#2, cs_order_number#3, cs_quantity#4, (17) Exchange Input [9]: [cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price#5, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, d_year#14] -Arguments: hashpartitioning(cs_order_number#3, cs_item_sk#2, 5), true, [id=#16] +Arguments: hashpartitioning(cs_order_number#3, cs_item_sk#2, 5), ENSURE_REQUIREMENTS, [id=#16] (18) Sort [codegen id : 4] Input [9]: [cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price#5, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, d_year#14] @@ -235,7 +227,7 @@ Condition : (isnotnull(cr_order_number#18) AND isnotnull(cr_item_sk#17)) (22) Exchange Input [4]: [cr_item_sk#17, cr_order_number#18, cr_return_quantity#19, cr_return_amount#20] -Arguments: hashpartitioning(cr_order_number#18, cr_item_sk#17, 5), true, [id=#21] +Arguments: hashpartitioning(cr_order_number#18, cr_item_sk#17, 5), ENSURE_REQUIREMENTS, [id=#21] (23) Sort [codegen id : 6] Input [4]: [cr_item_sk#17, cr_order_number#18, cr_return_quantity#19, cr_return_amount#20] @@ -290,7 +282,7 @@ Input [11]: [ss_sold_date_sk#24, ss_item_sk#25, ss_ticket_number#26, ss_quantity (35) Exchange Input [9]: [ss_item_sk#25, ss_ticket_number#26, ss_quantity#27, ss_ext_sales_price#28, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, d_year#14] -Arguments: hashpartitioning(cast(ss_ticket_number#26 as bigint), cast(ss_item_sk#25 as bigint), 5), true, [id=#29] +Arguments: hashpartitioning(cast(ss_ticket_number#26 as bigint), cast(ss_item_sk#25 as bigint), 5), ENSURE_REQUIREMENTS, [id=#29] (36) Sort [codegen id : 11] Input [9]: [ss_item_sk#25, ss_ticket_number#26, ss_quantity#27, ss_ext_sales_price#28, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, d_year#14] @@ -312,7 +304,7 @@ Condition : (isnotnull(sr_ticket_number#31) AND isnotnull(sr_item_sk#30)) (40) Exchange Input [4]: [sr_item_sk#30, sr_ticket_number#31, sr_return_quantity#32, sr_return_amt#33] -Arguments: hashpartitioning(sr_ticket_number#31, sr_item_sk#30, 5), true, [id=#34] +Arguments: hashpartitioning(sr_ticket_number#31, sr_item_sk#30, 5), ENSURE_REQUIREMENTS, [id=#34] (41) Sort [codegen id : 13] Input [4]: [sr_item_sk#30, sr_ticket_number#31, sr_return_quantity#32, sr_return_amt#33] @@ -327,426 +319,386 @@ Join condition: None Output [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, (ss_quantity#27 - coalesce(sr_return_quantity#32, 0)) AS sales_cnt#35, CheckOverflow((promote_precision(cast(ss_ext_sales_price#28 as decimal(8,2))) - promote_precision(cast(coalesce(sr_return_amt#33, 0.00) as decimal(8,2)))), DecimalType(8,2), true) AS sales_amt#36] Input [13]: [ss_item_sk#25, ss_ticket_number#26, ss_quantity#27, ss_ext_sales_price#28, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, d_year#14, sr_item_sk#30, sr_ticket_number#31, sr_return_quantity#32, sr_return_amt#33] -(44) Union - -(45) HashAggregate [codegen id : 15] -Input [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#22, sales_amt#23] -Keys [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#22, sales_amt#23] -Functions: [] -Aggregate Attributes: [] -Results [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#22, sales_amt#23] - -(46) Exchange -Input [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#22, sales_amt#23] -Arguments: hashpartitioning(d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#22, sales_amt#23, 5), true, [id=#37] - -(47) HashAggregate [codegen id : 16] -Input [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#22, sales_amt#23] -Keys [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#22, sales_amt#23] -Functions: [] -Aggregate Attributes: [] -Results [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#22, sales_amt#23] - -(48) Scan parquet default.web_sales -Output [5]: [ws_sold_date_sk#38, ws_item_sk#39, ws_order_number#40, ws_quantity#41, ws_ext_sales_price#42] +(44) Scan parquet default.web_sales +Output [5]: [ws_sold_date_sk#37, ws_item_sk#38, ws_order_number#39, ws_quantity#40, ws_ext_sales_price#41] Batched: true Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_item_sk), IsNotNull(ws_sold_date_sk)] ReadSchema: struct -(49) ColumnarToRow [codegen id : 19] -Input [5]: [ws_sold_date_sk#38, ws_item_sk#39, ws_order_number#40, ws_quantity#41, ws_ext_sales_price#42] +(45) ColumnarToRow [codegen id : 17] +Input [5]: [ws_sold_date_sk#37, ws_item_sk#38, ws_order_number#39, ws_quantity#40, ws_ext_sales_price#41] -(50) Filter [codegen id : 19] -Input [5]: [ws_sold_date_sk#38, ws_item_sk#39, ws_order_number#40, ws_quantity#41, ws_ext_sales_price#42] -Condition : (isnotnull(ws_item_sk#39) AND isnotnull(ws_sold_date_sk#38)) +(46) Filter [codegen id : 17] +Input [5]: [ws_sold_date_sk#37, ws_item_sk#38, ws_order_number#39, ws_quantity#40, ws_ext_sales_price#41] +Condition : (isnotnull(ws_item_sk#38) AND isnotnull(ws_sold_date_sk#37)) -(51) ReusedExchange [Reuses operator id: 8] +(47) ReusedExchange [Reuses operator id: 8] Output [5]: [i_item_sk#6, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11] -(52) BroadcastHashJoin [codegen id : 19] -Left keys [1]: [ws_item_sk#39] +(48) BroadcastHashJoin [codegen id : 17] +Left keys [1]: [ws_item_sk#38] Right keys [1]: [i_item_sk#6] Join condition: None -(53) Project [codegen id : 19] -Output [9]: [ws_sold_date_sk#38, ws_item_sk#39, ws_order_number#40, ws_quantity#41, ws_ext_sales_price#42, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11] -Input [10]: [ws_sold_date_sk#38, ws_item_sk#39, ws_order_number#40, ws_quantity#41, ws_ext_sales_price#42, i_item_sk#6, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11] +(49) Project [codegen id : 17] +Output [9]: [ws_sold_date_sk#37, ws_item_sk#38, ws_order_number#39, ws_quantity#40, ws_ext_sales_price#41, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11] +Input [10]: [ws_sold_date_sk#37, ws_item_sk#38, ws_order_number#39, ws_quantity#40, ws_ext_sales_price#41, i_item_sk#6, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11] -(54) ReusedExchange [Reuses operator id: 14] +(50) ReusedExchange [Reuses operator id: 14] Output [2]: [d_date_sk#13, d_year#14] -(55) BroadcastHashJoin [codegen id : 19] -Left keys [1]: [ws_sold_date_sk#38] +(51) BroadcastHashJoin [codegen id : 17] +Left keys [1]: [ws_sold_date_sk#37] Right keys [1]: [d_date_sk#13] Join condition: None -(56) Project [codegen id : 19] -Output [9]: [ws_item_sk#39, ws_order_number#40, ws_quantity#41, ws_ext_sales_price#42, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, d_year#14] -Input [11]: [ws_sold_date_sk#38, ws_item_sk#39, ws_order_number#40, ws_quantity#41, ws_ext_sales_price#42, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, d_date_sk#13, d_year#14] +(52) Project [codegen id : 17] +Output [9]: [ws_item_sk#38, ws_order_number#39, ws_quantity#40, ws_ext_sales_price#41, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, d_year#14] +Input [11]: [ws_sold_date_sk#37, ws_item_sk#38, ws_order_number#39, ws_quantity#40, ws_ext_sales_price#41, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, d_date_sk#13, d_year#14] -(57) Exchange -Input [9]: [ws_item_sk#39, ws_order_number#40, ws_quantity#41, ws_ext_sales_price#42, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, d_year#14] -Arguments: hashpartitioning(cast(ws_order_number#40 as bigint), cast(ws_item_sk#39 as bigint), 5), true, [id=#43] +(53) Exchange +Input [9]: [ws_item_sk#38, ws_order_number#39, ws_quantity#40, ws_ext_sales_price#41, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, d_year#14] +Arguments: hashpartitioning(cast(ws_order_number#39 as bigint), cast(ws_item_sk#38 as bigint), 5), ENSURE_REQUIREMENTS, [id=#42] -(58) Sort [codegen id : 20] -Input [9]: [ws_item_sk#39, ws_order_number#40, ws_quantity#41, ws_ext_sales_price#42, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, d_year#14] -Arguments: [cast(ws_order_number#40 as bigint) ASC NULLS FIRST, cast(ws_item_sk#39 as bigint) ASC NULLS FIRST], false, 0 +(54) Sort [codegen id : 18] +Input [9]: [ws_item_sk#38, ws_order_number#39, ws_quantity#40, ws_ext_sales_price#41, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, d_year#14] +Arguments: [cast(ws_order_number#39 as bigint) ASC NULLS FIRST, cast(ws_item_sk#38 as bigint) ASC NULLS FIRST], false, 0 -(59) Scan parquet default.web_returns -Output [4]: [wr_item_sk#44, wr_order_number#45, wr_return_quantity#46, wr_return_amt#47] +(55) Scan parquet default.web_returns +Output [4]: [wr_item_sk#43, wr_order_number#44, wr_return_quantity#45, wr_return_amt#46] Batched: true Location [not included in comparison]/{warehouse_dir}/web_returns] PushedFilters: [IsNotNull(wr_order_number), IsNotNull(wr_item_sk)] ReadSchema: struct -(60) ColumnarToRow [codegen id : 21] -Input [4]: [wr_item_sk#44, wr_order_number#45, wr_return_quantity#46, wr_return_amt#47] +(56) ColumnarToRow [codegen id : 19] +Input [4]: [wr_item_sk#43, wr_order_number#44, wr_return_quantity#45, wr_return_amt#46] -(61) Filter [codegen id : 21] -Input [4]: [wr_item_sk#44, wr_order_number#45, wr_return_quantity#46, wr_return_amt#47] -Condition : (isnotnull(wr_order_number#45) AND isnotnull(wr_item_sk#44)) +(57) Filter [codegen id : 19] +Input [4]: [wr_item_sk#43, wr_order_number#44, wr_return_quantity#45, wr_return_amt#46] +Condition : (isnotnull(wr_order_number#44) AND isnotnull(wr_item_sk#43)) -(62) Exchange -Input [4]: [wr_item_sk#44, wr_order_number#45, wr_return_quantity#46, wr_return_amt#47] -Arguments: hashpartitioning(wr_order_number#45, wr_item_sk#44, 5), true, [id=#48] +(58) Exchange +Input [4]: [wr_item_sk#43, wr_order_number#44, wr_return_quantity#45, wr_return_amt#46] +Arguments: hashpartitioning(wr_order_number#44, wr_item_sk#43, 5), ENSURE_REQUIREMENTS, [id=#47] -(63) Sort [codegen id : 22] -Input [4]: [wr_item_sk#44, wr_order_number#45, wr_return_quantity#46, wr_return_amt#47] -Arguments: [wr_order_number#45 ASC NULLS FIRST, wr_item_sk#44 ASC NULLS FIRST], false, 0 +(59) Sort [codegen id : 20] +Input [4]: [wr_item_sk#43, wr_order_number#44, wr_return_quantity#45, wr_return_amt#46] +Arguments: [wr_order_number#44 ASC NULLS FIRST, wr_item_sk#43 ASC NULLS FIRST], false, 0 -(64) SortMergeJoin -Left keys [2]: [cast(ws_order_number#40 as bigint), cast(ws_item_sk#39 as bigint)] -Right keys [2]: [wr_order_number#45, wr_item_sk#44] +(60) SortMergeJoin +Left keys [2]: [cast(ws_order_number#39 as bigint), cast(ws_item_sk#38 as bigint)] +Right keys [2]: [wr_order_number#44, wr_item_sk#43] Join condition: None -(65) Project [codegen id : 23] -Output [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, (ws_quantity#41 - coalesce(wr_return_quantity#46, 0)) AS sales_cnt#49, CheckOverflow((promote_precision(cast(ws_ext_sales_price#42 as decimal(8,2))) - promote_precision(cast(coalesce(wr_return_amt#47, 0.00) as decimal(8,2)))), DecimalType(8,2), true) AS sales_amt#50] -Input [13]: [ws_item_sk#39, ws_order_number#40, ws_quantity#41, ws_ext_sales_price#42, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, d_year#14, wr_item_sk#44, wr_order_number#45, wr_return_quantity#46, wr_return_amt#47] +(61) Project [codegen id : 21] +Output [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, (ws_quantity#40 - coalesce(wr_return_quantity#45, 0)) AS sales_cnt#48, CheckOverflow((promote_precision(cast(ws_ext_sales_price#41 as decimal(8,2))) - promote_precision(cast(coalesce(wr_return_amt#46, 0.00) as decimal(8,2)))), DecimalType(8,2), true) AS sales_amt#49] +Input [13]: [ws_item_sk#38, ws_order_number#39, ws_quantity#40, ws_ext_sales_price#41, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, d_year#14, wr_item_sk#43, wr_order_number#44, wr_return_quantity#45, wr_return_amt#46] -(66) Union +(62) Union -(67) HashAggregate [codegen id : 24] +(63) HashAggregate [codegen id : 22] Input [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#22, sales_amt#23] Keys [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#22, sales_amt#23] Functions: [] Aggregate Attributes: [] Results [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#22, sales_amt#23] -(68) Exchange +(64) Exchange Input [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#22, sales_amt#23] -Arguments: hashpartitioning(d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#22, sales_amt#23, 5), true, [id=#51] +Arguments: hashpartitioning(d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#22, sales_amt#23, 5), ENSURE_REQUIREMENTS, [id=#50] -(69) HashAggregate [codegen id : 25] +(65) HashAggregate [codegen id : 23] Input [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#22, sales_amt#23] Keys [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#22, sales_amt#23] Functions: [] Aggregate Attributes: [] Results [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#22, sales_amt#23] -(70) HashAggregate [codegen id : 25] +(66) HashAggregate [codegen id : 23] Input [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#22, sales_amt#23] Keys [5]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11] Functions [2]: [partial_sum(cast(sales_cnt#22 as bigint)), partial_sum(UnscaledValue(sales_amt#23))] -Aggregate Attributes [2]: [sum#52, sum#53] -Results [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sum#54, sum#55] +Aggregate Attributes [2]: [sum#51, sum#52] +Results [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sum#53, sum#54] -(71) Exchange -Input [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sum#54, sum#55] -Arguments: hashpartitioning(d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, 5), true, [id=#56] +(67) Exchange +Input [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sum#53, sum#54] +Arguments: hashpartitioning(d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, 5), ENSURE_REQUIREMENTS, [id=#55] -(72) HashAggregate [codegen id : 26] -Input [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sum#54, sum#55] +(68) HashAggregate [codegen id : 24] +Input [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sum#53, sum#54] Keys [5]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11] Functions [2]: [sum(cast(sales_cnt#22 as bigint)), sum(UnscaledValue(sales_amt#23))] -Aggregate Attributes [2]: [sum(cast(sales_cnt#22 as bigint))#57, sum(UnscaledValue(sales_amt#23))#58] -Results [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sum(cast(sales_cnt#22 as bigint))#57 AS sales_cnt#59, MakeDecimal(sum(UnscaledValue(sales_amt#23))#58,18,2) AS sales_amt#60] +Aggregate Attributes [2]: [sum(cast(sales_cnt#22 as bigint))#56, sum(UnscaledValue(sales_amt#23))#57] +Results [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sum(cast(sales_cnt#22 as bigint))#56 AS sales_cnt#58, MakeDecimal(sum(UnscaledValue(sales_amt#23))#57,18,2) AS sales_amt#59] -(73) Exchange -Input [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#59, sales_amt#60] -Arguments: hashpartitioning(i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, 5), true, [id=#61] +(69) Exchange +Input [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#58, sales_amt#59] +Arguments: hashpartitioning(i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, 5), ENSURE_REQUIREMENTS, [id=#60] -(74) Sort [codegen id : 27] -Input [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#59, sales_amt#60] +(70) Sort [codegen id : 25] +Input [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#58, sales_amt#59] Arguments: [i_brand_id#7 ASC NULLS FIRST, i_class_id#8 ASC NULLS FIRST, i_category_id#9 ASC NULLS FIRST, i_manufact_id#11 ASC NULLS FIRST], false, 0 -(75) Scan parquet default.catalog_sales +(71) Scan parquet default.catalog_sales Output [5]: [cs_sold_date_sk#1, cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price#5] Batched: true Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_item_sk), IsNotNull(cs_sold_date_sk)] ReadSchema: struct -(76) ColumnarToRow [codegen id : 30] +(72) ColumnarToRow [codegen id : 28] Input [5]: [cs_sold_date_sk#1, cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price#5] -(77) Filter [codegen id : 30] +(73) Filter [codegen id : 28] Input [5]: [cs_sold_date_sk#1, cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price#5] Condition : (isnotnull(cs_item_sk#2) AND isnotnull(cs_sold_date_sk#1)) -(78) ReusedExchange [Reuses operator id: 8] -Output [5]: [i_item_sk#62, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66] +(74) ReusedExchange [Reuses operator id: 8] +Output [5]: [i_item_sk#61, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65] -(79) BroadcastHashJoin [codegen id : 30] +(75) BroadcastHashJoin [codegen id : 28] Left keys [1]: [cs_item_sk#2] -Right keys [1]: [i_item_sk#62] +Right keys [1]: [i_item_sk#61] Join condition: None -(80) Project [codegen id : 30] -Output [9]: [cs_sold_date_sk#1, cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price#5, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66] -Input [10]: [cs_sold_date_sk#1, cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price#5, i_item_sk#62, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66] +(76) Project [codegen id : 28] +Output [9]: [cs_sold_date_sk#1, cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price#5, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65] +Input [10]: [cs_sold_date_sk#1, cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price#5, i_item_sk#61, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65] -(81) Scan parquet default.date_dim -Output [2]: [d_date_sk#67, d_year#68] +(77) Scan parquet default.date_dim +Output [2]: [d_date_sk#66, d_year#67] Batched: true Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2001), IsNotNull(d_date_sk)] ReadSchema: struct -(82) ColumnarToRow [codegen id : 29] -Input [2]: [d_date_sk#67, d_year#68] +(78) ColumnarToRow [codegen id : 27] +Input [2]: [d_date_sk#66, d_year#67] -(83) Filter [codegen id : 29] -Input [2]: [d_date_sk#67, d_year#68] -Condition : ((isnotnull(d_year#68) AND (d_year#68 = 2001)) AND isnotnull(d_date_sk#67)) +(79) Filter [codegen id : 27] +Input [2]: [d_date_sk#66, d_year#67] +Condition : ((isnotnull(d_year#67) AND (d_year#67 = 2001)) AND isnotnull(d_date_sk#66)) -(84) BroadcastExchange -Input [2]: [d_date_sk#67, d_year#68] -Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, false] as bigint)),false), [id=#69] +(80) BroadcastExchange +Input [2]: [d_date_sk#66, d_year#67] +Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, false] as bigint)),false), [id=#68] -(85) BroadcastHashJoin [codegen id : 30] +(81) BroadcastHashJoin [codegen id : 28] Left keys [1]: [cs_sold_date_sk#1] -Right keys [1]: [d_date_sk#67] +Right keys [1]: [d_date_sk#66] Join condition: None -(86) Project [codegen id : 30] -Output [9]: [cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price#5, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, d_year#68] -Input [11]: [cs_sold_date_sk#1, cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price#5, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, d_date_sk#67, d_year#68] +(82) Project [codegen id : 28] +Output [9]: [cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price#5, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65, d_year#67] +Input [11]: [cs_sold_date_sk#1, cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price#5, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65, d_date_sk#66, d_year#67] -(87) Exchange -Input [9]: [cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price#5, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, d_year#68] -Arguments: hashpartitioning(cs_order_number#3, cs_item_sk#2, 5), true, [id=#70] +(83) Exchange +Input [9]: [cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price#5, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65, d_year#67] +Arguments: hashpartitioning(cs_order_number#3, cs_item_sk#2, 5), ENSURE_REQUIREMENTS, [id=#69] -(88) Sort [codegen id : 31] -Input [9]: [cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price#5, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, d_year#68] +(84) Sort [codegen id : 29] +Input [9]: [cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price#5, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65, d_year#67] Arguments: [cs_order_number#3 ASC NULLS FIRST, cs_item_sk#2 ASC NULLS FIRST], false, 0 -(89) ReusedExchange [Reuses operator id: 22] +(85) ReusedExchange [Reuses operator id: 22] Output [4]: [cr_item_sk#17, cr_order_number#18, cr_return_quantity#19, cr_return_amount#20] -(90) Sort [codegen id : 33] +(86) Sort [codegen id : 31] Input [4]: [cr_item_sk#17, cr_order_number#18, cr_return_quantity#19, cr_return_amount#20] Arguments: [cr_order_number#18 ASC NULLS FIRST, cr_item_sk#17 ASC NULLS FIRST], false, 0 -(91) SortMergeJoin +(87) SortMergeJoin Left keys [2]: [cs_order_number#3, cs_item_sk#2] Right keys [2]: [cr_order_number#18, cr_item_sk#17] Join condition: None -(92) Project [codegen id : 34] -Output [7]: [d_year#68, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, (cs_quantity#4 - coalesce(cr_return_quantity#19, 0)) AS sales_cnt#22, CheckOverflow((promote_precision(cast(cs_ext_sales_price#5 as decimal(8,2))) - promote_precision(cast(coalesce(cr_return_amount#20, 0.00) as decimal(8,2)))), DecimalType(8,2), true) AS sales_amt#23] -Input [13]: [cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price#5, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, d_year#68, cr_item_sk#17, cr_order_number#18, cr_return_quantity#19, cr_return_amount#20] +(88) Project [codegen id : 32] +Output [7]: [d_year#67, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65, (cs_quantity#4 - coalesce(cr_return_quantity#19, 0)) AS sales_cnt#22, CheckOverflow((promote_precision(cast(cs_ext_sales_price#5 as decimal(8,2))) - promote_precision(cast(coalesce(cr_return_amount#20, 0.00) as decimal(8,2)))), DecimalType(8,2), true) AS sales_amt#23] +Input [13]: [cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price#5, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65, d_year#67, cr_item_sk#17, cr_order_number#18, cr_return_quantity#19, cr_return_amount#20] -(93) Scan parquet default.store_sales +(89) Scan parquet default.store_sales Output [5]: [ss_sold_date_sk#24, ss_item_sk#25, ss_ticket_number#26, ss_quantity#27, ss_ext_sales_price#28] Batched: true Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_item_sk), IsNotNull(ss_sold_date_sk)] ReadSchema: struct -(94) ColumnarToRow [codegen id : 37] +(90) ColumnarToRow [codegen id : 35] Input [5]: [ss_sold_date_sk#24, ss_item_sk#25, ss_ticket_number#26, ss_quantity#27, ss_ext_sales_price#28] -(95) Filter [codegen id : 37] +(91) Filter [codegen id : 35] Input [5]: [ss_sold_date_sk#24, ss_item_sk#25, ss_ticket_number#26, ss_quantity#27, ss_ext_sales_price#28] Condition : (isnotnull(ss_item_sk#25) AND isnotnull(ss_sold_date_sk#24)) -(96) ReusedExchange [Reuses operator id: 8] -Output [5]: [i_item_sk#62, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66] +(92) ReusedExchange [Reuses operator id: 8] +Output [5]: [i_item_sk#61, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65] -(97) BroadcastHashJoin [codegen id : 37] +(93) BroadcastHashJoin [codegen id : 35] Left keys [1]: [ss_item_sk#25] -Right keys [1]: [i_item_sk#62] +Right keys [1]: [i_item_sk#61] Join condition: None -(98) Project [codegen id : 37] -Output [9]: [ss_sold_date_sk#24, ss_item_sk#25, ss_ticket_number#26, ss_quantity#27, ss_ext_sales_price#28, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66] -Input [10]: [ss_sold_date_sk#24, ss_item_sk#25, ss_ticket_number#26, ss_quantity#27, ss_ext_sales_price#28, i_item_sk#62, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66] +(94) Project [codegen id : 35] +Output [9]: [ss_sold_date_sk#24, ss_item_sk#25, ss_ticket_number#26, ss_quantity#27, ss_ext_sales_price#28, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65] +Input [10]: [ss_sold_date_sk#24, ss_item_sk#25, ss_ticket_number#26, ss_quantity#27, ss_ext_sales_price#28, i_item_sk#61, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65] -(99) ReusedExchange [Reuses operator id: 84] -Output [2]: [d_date_sk#67, d_year#68] +(95) ReusedExchange [Reuses operator id: 80] +Output [2]: [d_date_sk#66, d_year#67] -(100) BroadcastHashJoin [codegen id : 37] +(96) BroadcastHashJoin [codegen id : 35] Left keys [1]: [ss_sold_date_sk#24] -Right keys [1]: [d_date_sk#67] +Right keys [1]: [d_date_sk#66] Join condition: None -(101) Project [codegen id : 37] -Output [9]: [ss_item_sk#25, ss_ticket_number#26, ss_quantity#27, ss_ext_sales_price#28, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, d_year#68] -Input [11]: [ss_sold_date_sk#24, ss_item_sk#25, ss_ticket_number#26, ss_quantity#27, ss_ext_sales_price#28, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, d_date_sk#67, d_year#68] +(97) Project [codegen id : 35] +Output [9]: [ss_item_sk#25, ss_ticket_number#26, ss_quantity#27, ss_ext_sales_price#28, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65, d_year#67] +Input [11]: [ss_sold_date_sk#24, ss_item_sk#25, ss_ticket_number#26, ss_quantity#27, ss_ext_sales_price#28, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65, d_date_sk#66, d_year#67] -(102) Exchange -Input [9]: [ss_item_sk#25, ss_ticket_number#26, ss_quantity#27, ss_ext_sales_price#28, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, d_year#68] -Arguments: hashpartitioning(cast(ss_ticket_number#26 as bigint), cast(ss_item_sk#25 as bigint), 5), true, [id=#71] +(98) Exchange +Input [9]: [ss_item_sk#25, ss_ticket_number#26, ss_quantity#27, ss_ext_sales_price#28, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65, d_year#67] +Arguments: hashpartitioning(cast(ss_ticket_number#26 as bigint), cast(ss_item_sk#25 as bigint), 5), ENSURE_REQUIREMENTS, [id=#70] -(103) Sort [codegen id : 38] -Input [9]: [ss_item_sk#25, ss_ticket_number#26, ss_quantity#27, ss_ext_sales_price#28, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, d_year#68] +(99) Sort [codegen id : 36] +Input [9]: [ss_item_sk#25, ss_ticket_number#26, ss_quantity#27, ss_ext_sales_price#28, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65, d_year#67] Arguments: [cast(ss_ticket_number#26 as bigint) ASC NULLS FIRST, cast(ss_item_sk#25 as bigint) ASC NULLS FIRST], false, 0 -(104) ReusedExchange [Reuses operator id: 40] +(100) ReusedExchange [Reuses operator id: 40] Output [4]: [sr_item_sk#30, sr_ticket_number#31, sr_return_quantity#32, sr_return_amt#33] -(105) Sort [codegen id : 40] +(101) Sort [codegen id : 38] Input [4]: [sr_item_sk#30, sr_ticket_number#31, sr_return_quantity#32, sr_return_amt#33] Arguments: [sr_ticket_number#31 ASC NULLS FIRST, sr_item_sk#30 ASC NULLS FIRST], false, 0 -(106) SortMergeJoin +(102) SortMergeJoin Left keys [2]: [cast(ss_ticket_number#26 as bigint), cast(ss_item_sk#25 as bigint)] Right keys [2]: [sr_ticket_number#31, sr_item_sk#30] Join condition: None -(107) Project [codegen id : 41] -Output [7]: [d_year#68, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, (ss_quantity#27 - coalesce(sr_return_quantity#32, 0)) AS sales_cnt#72, CheckOverflow((promote_precision(cast(ss_ext_sales_price#28 as decimal(8,2))) - promote_precision(cast(coalesce(sr_return_amt#33, 0.00) as decimal(8,2)))), DecimalType(8,2), true) AS sales_amt#73] -Input [13]: [ss_item_sk#25, ss_ticket_number#26, ss_quantity#27, ss_ext_sales_price#28, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, d_year#68, sr_item_sk#30, sr_ticket_number#31, sr_return_quantity#32, sr_return_amt#33] - -(108) Union - -(109) HashAggregate [codegen id : 42] -Input [7]: [d_year#68, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, sales_cnt#22, sales_amt#23] -Keys [7]: [d_year#68, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, sales_cnt#22, sales_amt#23] -Functions: [] -Aggregate Attributes: [] -Results [7]: [d_year#68, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, sales_cnt#22, sales_amt#23] - -(110) Exchange -Input [7]: [d_year#68, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, sales_cnt#22, sales_amt#23] -Arguments: hashpartitioning(d_year#68, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, sales_cnt#22, sales_amt#23, 5), true, [id=#74] - -(111) HashAggregate [codegen id : 43] -Input [7]: [d_year#68, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, sales_cnt#22, sales_amt#23] -Keys [7]: [d_year#68, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, sales_cnt#22, sales_amt#23] -Functions: [] -Aggregate Attributes: [] -Results [7]: [d_year#68, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, sales_cnt#22, sales_amt#23] +(103) Project [codegen id : 39] +Output [7]: [d_year#67, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65, (ss_quantity#27 - coalesce(sr_return_quantity#32, 0)) AS sales_cnt#71, CheckOverflow((promote_precision(cast(ss_ext_sales_price#28 as decimal(8,2))) - promote_precision(cast(coalesce(sr_return_amt#33, 0.00) as decimal(8,2)))), DecimalType(8,2), true) AS sales_amt#72] +Input [13]: [ss_item_sk#25, ss_ticket_number#26, ss_quantity#27, ss_ext_sales_price#28, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65, d_year#67, sr_item_sk#30, sr_ticket_number#31, sr_return_quantity#32, sr_return_amt#33] -(112) Scan parquet default.web_sales -Output [5]: [ws_sold_date_sk#38, ws_item_sk#39, ws_order_number#40, ws_quantity#41, ws_ext_sales_price#42] +(104) Scan parquet default.web_sales +Output [5]: [ws_sold_date_sk#37, ws_item_sk#38, ws_order_number#39, ws_quantity#40, ws_ext_sales_price#41] Batched: true Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_item_sk), IsNotNull(ws_sold_date_sk)] ReadSchema: struct -(113) ColumnarToRow [codegen id : 46] -Input [5]: [ws_sold_date_sk#38, ws_item_sk#39, ws_order_number#40, ws_quantity#41, ws_ext_sales_price#42] +(105) ColumnarToRow [codegen id : 42] +Input [5]: [ws_sold_date_sk#37, ws_item_sk#38, ws_order_number#39, ws_quantity#40, ws_ext_sales_price#41] -(114) Filter [codegen id : 46] -Input [5]: [ws_sold_date_sk#38, ws_item_sk#39, ws_order_number#40, ws_quantity#41, ws_ext_sales_price#42] -Condition : (isnotnull(ws_item_sk#39) AND isnotnull(ws_sold_date_sk#38)) +(106) Filter [codegen id : 42] +Input [5]: [ws_sold_date_sk#37, ws_item_sk#38, ws_order_number#39, ws_quantity#40, ws_ext_sales_price#41] +Condition : (isnotnull(ws_item_sk#38) AND isnotnull(ws_sold_date_sk#37)) -(115) ReusedExchange [Reuses operator id: 8] -Output [5]: [i_item_sk#62, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66] +(107) ReusedExchange [Reuses operator id: 8] +Output [5]: [i_item_sk#61, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65] -(116) BroadcastHashJoin [codegen id : 46] -Left keys [1]: [ws_item_sk#39] -Right keys [1]: [i_item_sk#62] +(108) BroadcastHashJoin [codegen id : 42] +Left keys [1]: [ws_item_sk#38] +Right keys [1]: [i_item_sk#61] Join condition: None -(117) Project [codegen id : 46] -Output [9]: [ws_sold_date_sk#38, ws_item_sk#39, ws_order_number#40, ws_quantity#41, ws_ext_sales_price#42, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66] -Input [10]: [ws_sold_date_sk#38, ws_item_sk#39, ws_order_number#40, ws_quantity#41, ws_ext_sales_price#42, i_item_sk#62, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66] +(109) Project [codegen id : 42] +Output [9]: [ws_sold_date_sk#37, ws_item_sk#38, ws_order_number#39, ws_quantity#40, ws_ext_sales_price#41, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65] +Input [10]: [ws_sold_date_sk#37, ws_item_sk#38, ws_order_number#39, ws_quantity#40, ws_ext_sales_price#41, i_item_sk#61, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65] -(118) ReusedExchange [Reuses operator id: 84] -Output [2]: [d_date_sk#67, d_year#68] +(110) ReusedExchange [Reuses operator id: 80] +Output [2]: [d_date_sk#66, d_year#67] -(119) BroadcastHashJoin [codegen id : 46] -Left keys [1]: [ws_sold_date_sk#38] -Right keys [1]: [d_date_sk#67] +(111) BroadcastHashJoin [codegen id : 42] +Left keys [1]: [ws_sold_date_sk#37] +Right keys [1]: [d_date_sk#66] Join condition: None -(120) Project [codegen id : 46] -Output [9]: [ws_item_sk#39, ws_order_number#40, ws_quantity#41, ws_ext_sales_price#42, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, d_year#68] -Input [11]: [ws_sold_date_sk#38, ws_item_sk#39, ws_order_number#40, ws_quantity#41, ws_ext_sales_price#42, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, d_date_sk#67, d_year#68] +(112) Project [codegen id : 42] +Output [9]: [ws_item_sk#38, ws_order_number#39, ws_quantity#40, ws_ext_sales_price#41, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65, d_year#67] +Input [11]: [ws_sold_date_sk#37, ws_item_sk#38, ws_order_number#39, ws_quantity#40, ws_ext_sales_price#41, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65, d_date_sk#66, d_year#67] -(121) Exchange -Input [9]: [ws_item_sk#39, ws_order_number#40, ws_quantity#41, ws_ext_sales_price#42, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, d_year#68] -Arguments: hashpartitioning(cast(ws_order_number#40 as bigint), cast(ws_item_sk#39 as bigint), 5), true, [id=#75] +(113) Exchange +Input [9]: [ws_item_sk#38, ws_order_number#39, ws_quantity#40, ws_ext_sales_price#41, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65, d_year#67] +Arguments: hashpartitioning(cast(ws_order_number#39 as bigint), cast(ws_item_sk#38 as bigint), 5), ENSURE_REQUIREMENTS, [id=#73] -(122) Sort [codegen id : 47] -Input [9]: [ws_item_sk#39, ws_order_number#40, ws_quantity#41, ws_ext_sales_price#42, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, d_year#68] -Arguments: [cast(ws_order_number#40 as bigint) ASC NULLS FIRST, cast(ws_item_sk#39 as bigint) ASC NULLS FIRST], false, 0 +(114) Sort [codegen id : 43] +Input [9]: [ws_item_sk#38, ws_order_number#39, ws_quantity#40, ws_ext_sales_price#41, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65, d_year#67] +Arguments: [cast(ws_order_number#39 as bigint) ASC NULLS FIRST, cast(ws_item_sk#38 as bigint) ASC NULLS FIRST], false, 0 -(123) ReusedExchange [Reuses operator id: 62] -Output [4]: [wr_item_sk#44, wr_order_number#45, wr_return_quantity#46, wr_return_amt#47] +(115) ReusedExchange [Reuses operator id: 58] +Output [4]: [wr_item_sk#43, wr_order_number#44, wr_return_quantity#45, wr_return_amt#46] -(124) Sort [codegen id : 49] -Input [4]: [wr_item_sk#44, wr_order_number#45, wr_return_quantity#46, wr_return_amt#47] -Arguments: [wr_order_number#45 ASC NULLS FIRST, wr_item_sk#44 ASC NULLS FIRST], false, 0 +(116) Sort [codegen id : 45] +Input [4]: [wr_item_sk#43, wr_order_number#44, wr_return_quantity#45, wr_return_amt#46] +Arguments: [wr_order_number#44 ASC NULLS FIRST, wr_item_sk#43 ASC NULLS FIRST], false, 0 -(125) SortMergeJoin -Left keys [2]: [cast(ws_order_number#40 as bigint), cast(ws_item_sk#39 as bigint)] -Right keys [2]: [wr_order_number#45, wr_item_sk#44] +(117) SortMergeJoin +Left keys [2]: [cast(ws_order_number#39 as bigint), cast(ws_item_sk#38 as bigint)] +Right keys [2]: [wr_order_number#44, wr_item_sk#43] Join condition: None -(126) Project [codegen id : 50] -Output [7]: [d_year#68, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, (ws_quantity#41 - coalesce(wr_return_quantity#46, 0)) AS sales_cnt#76, CheckOverflow((promote_precision(cast(ws_ext_sales_price#42 as decimal(8,2))) - promote_precision(cast(coalesce(wr_return_amt#47, 0.00) as decimal(8,2)))), DecimalType(8,2), true) AS sales_amt#77] -Input [13]: [ws_item_sk#39, ws_order_number#40, ws_quantity#41, ws_ext_sales_price#42, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, d_year#68, wr_item_sk#44, wr_order_number#45, wr_return_quantity#46, wr_return_amt#47] +(118) Project [codegen id : 46] +Output [7]: [d_year#67, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65, (ws_quantity#40 - coalesce(wr_return_quantity#45, 0)) AS sales_cnt#74, CheckOverflow((promote_precision(cast(ws_ext_sales_price#41 as decimal(8,2))) - promote_precision(cast(coalesce(wr_return_amt#46, 0.00) as decimal(8,2)))), DecimalType(8,2), true) AS sales_amt#75] +Input [13]: [ws_item_sk#38, ws_order_number#39, ws_quantity#40, ws_ext_sales_price#41, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65, d_year#67, wr_item_sk#43, wr_order_number#44, wr_return_quantity#45, wr_return_amt#46] -(127) Union +(119) Union -(128) HashAggregate [codegen id : 51] -Input [7]: [d_year#68, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, sales_cnt#22, sales_amt#23] -Keys [7]: [d_year#68, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, sales_cnt#22, sales_amt#23] +(120) HashAggregate [codegen id : 47] +Input [7]: [d_year#67, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65, sales_cnt#22, sales_amt#23] +Keys [7]: [d_year#67, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65, sales_cnt#22, sales_amt#23] Functions: [] Aggregate Attributes: [] -Results [7]: [d_year#68, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, sales_cnt#22, sales_amt#23] +Results [7]: [d_year#67, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65, sales_cnt#22, sales_amt#23] -(129) Exchange -Input [7]: [d_year#68, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, sales_cnt#22, sales_amt#23] -Arguments: hashpartitioning(d_year#68, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, sales_cnt#22, sales_amt#23, 5), true, [id=#78] +(121) Exchange +Input [7]: [d_year#67, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65, sales_cnt#22, sales_amt#23] +Arguments: hashpartitioning(d_year#67, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65, sales_cnt#22, sales_amt#23, 5), ENSURE_REQUIREMENTS, [id=#76] -(130) HashAggregate [codegen id : 52] -Input [7]: [d_year#68, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, sales_cnt#22, sales_amt#23] -Keys [7]: [d_year#68, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, sales_cnt#22, sales_amt#23] +(122) HashAggregate [codegen id : 48] +Input [7]: [d_year#67, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65, sales_cnt#22, sales_amt#23] +Keys [7]: [d_year#67, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65, sales_cnt#22, sales_amt#23] Functions: [] Aggregate Attributes: [] -Results [7]: [d_year#68, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, sales_cnt#22, sales_amt#23] +Results [7]: [d_year#67, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65, sales_cnt#22, sales_amt#23] -(131) HashAggregate [codegen id : 52] -Input [7]: [d_year#68, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, sales_cnt#22, sales_amt#23] -Keys [5]: [d_year#68, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66] +(123) HashAggregate [codegen id : 48] +Input [7]: [d_year#67, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65, sales_cnt#22, sales_amt#23] +Keys [5]: [d_year#67, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65] Functions [2]: [partial_sum(cast(sales_cnt#22 as bigint)), partial_sum(UnscaledValue(sales_amt#23))] -Aggregate Attributes [2]: [sum#79, sum#80] -Results [7]: [d_year#68, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, sum#81, sum#82] +Aggregate Attributes [2]: [sum#77, sum#78] +Results [7]: [d_year#67, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65, sum#79, sum#80] -(132) Exchange -Input [7]: [d_year#68, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, sum#81, sum#82] -Arguments: hashpartitioning(d_year#68, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, 5), true, [id=#83] +(124) Exchange +Input [7]: [d_year#67, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65, sum#79, sum#80] +Arguments: hashpartitioning(d_year#67, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65, 5), ENSURE_REQUIREMENTS, [id=#81] -(133) HashAggregate [codegen id : 53] -Input [7]: [d_year#68, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, sum#81, sum#82] -Keys [5]: [d_year#68, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66] +(125) HashAggregate [codegen id : 49] +Input [7]: [d_year#67, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65, sum#79, sum#80] +Keys [5]: [d_year#67, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65] Functions [2]: [sum(cast(sales_cnt#22 as bigint)), sum(UnscaledValue(sales_amt#23))] -Aggregate Attributes [2]: [sum(cast(sales_cnt#22 as bigint))#84, sum(UnscaledValue(sales_amt#23))#85] -Results [7]: [d_year#68, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, sum(cast(sales_cnt#22 as bigint))#84 AS sales_cnt#86, MakeDecimal(sum(UnscaledValue(sales_amt#23))#85,18,2) AS sales_amt#87] +Aggregate Attributes [2]: [sum(cast(sales_cnt#22 as bigint))#82, sum(UnscaledValue(sales_amt#23))#83] +Results [7]: [d_year#67, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65, sum(cast(sales_cnt#22 as bigint))#82 AS sales_cnt#84, MakeDecimal(sum(UnscaledValue(sales_amt#23))#83,18,2) AS sales_amt#85] -(134) Exchange -Input [7]: [d_year#68, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, sales_cnt#86, sales_amt#87] -Arguments: hashpartitioning(i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, 5), true, [id=#88] +(126) Exchange +Input [7]: [d_year#67, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65, sales_cnt#84, sales_amt#85] +Arguments: hashpartitioning(i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65, 5), ENSURE_REQUIREMENTS, [id=#86] -(135) Sort [codegen id : 54] -Input [7]: [d_year#68, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, sales_cnt#86, sales_amt#87] -Arguments: [i_brand_id#63 ASC NULLS FIRST, i_class_id#64 ASC NULLS FIRST, i_category_id#65 ASC NULLS FIRST, i_manufact_id#66 ASC NULLS FIRST], false, 0 +(127) Sort [codegen id : 50] +Input [7]: [d_year#67, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65, sales_cnt#84, sales_amt#85] +Arguments: [i_brand_id#62 ASC NULLS FIRST, i_class_id#63 ASC NULLS FIRST, i_category_id#64 ASC NULLS FIRST, i_manufact_id#65 ASC NULLS FIRST], false, 0 -(136) SortMergeJoin [codegen id : 55] +(128) SortMergeJoin [codegen id : 51] Left keys [4]: [i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11] -Right keys [4]: [i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66] -Join condition: (CheckOverflow((promote_precision(cast(sales_cnt#59 as decimal(17,2))) / promote_precision(cast(sales_cnt#86 as decimal(17,2)))), DecimalType(37,20), true) < 0.90000000000000000000) +Right keys [4]: [i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65] +Join condition: (CheckOverflow((promote_precision(cast(sales_cnt#58 as decimal(17,2))) / promote_precision(cast(sales_cnt#84 as decimal(17,2)))), DecimalType(37,20), true) < 0.90000000000000000000) -(137) Project [codegen id : 55] -Output [10]: [d_year#68 AS prev_year#89, d_year#14 AS year#90, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#86 AS prev_yr_cnt#91, sales_cnt#59 AS curr_yr_cnt#92, (sales_cnt#59 - sales_cnt#86) AS sales_cnt_diff#93, CheckOverflow((promote_precision(cast(sales_amt#60 as decimal(19,2))) - promote_precision(cast(sales_amt#87 as decimal(19,2)))), DecimalType(19,2), true) AS sales_amt_diff#94] -Input [14]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#59, sales_amt#60, d_year#68, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, sales_cnt#86, sales_amt#87] +(129) Project [codegen id : 51] +Output [10]: [d_year#67 AS prev_year#87, d_year#14 AS year#88, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#84 AS prev_yr_cnt#89, sales_cnt#58 AS curr_yr_cnt#90, (sales_cnt#58 - sales_cnt#84) AS sales_cnt_diff#91, CheckOverflow((promote_precision(cast(sales_amt#59 as decimal(19,2))) - promote_precision(cast(sales_amt#85 as decimal(19,2)))), DecimalType(19,2), true) AS sales_amt_diff#92] +Input [14]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#58, sales_amt#59, d_year#67, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65, sales_cnt#84, sales_amt#85] -(138) TakeOrderedAndProject -Input [10]: [prev_year#89, year#90, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, prev_yr_cnt#91, curr_yr_cnt#92, sales_cnt_diff#93, sales_amt_diff#94] -Arguments: 100, [sales_cnt_diff#93 ASC NULLS FIRST], [prev_year#89, year#90, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, prev_yr_cnt#91, curr_yr_cnt#92, sales_cnt_diff#93, sales_amt_diff#94] +(130) TakeOrderedAndProject +Input [10]: [prev_year#87, year#88, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, prev_yr_cnt#89, curr_yr_cnt#90, sales_cnt_diff#91, sales_amt_diff#92] +Arguments: 100, [sales_cnt_diff#91 ASC NULLS FIRST], [prev_year#87, year#88, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, prev_yr_cnt#89, curr_yr_cnt#90, sales_cnt_diff#91, sales_amt_diff#92] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q75.sf100/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q75.sf100/simplified.txt index d8d1a3976559d..bac8f252c2983 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q75.sf100/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q75.sf100/simplified.txt @@ -1,113 +1,105 @@ TakeOrderedAndProject [sales_cnt_diff,prev_year,year,i_brand_id,i_class_id,i_category_id,i_manufact_id,prev_yr_cnt,curr_yr_cnt,sales_amt_diff] - WholeStageCodegen (55) + WholeStageCodegen (51) Project [d_year,d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,sales_cnt,sales_cnt,sales_amt,sales_amt] SortMergeJoin [i_brand_id,i_class_id,i_category_id,i_manufact_id,i_brand_id,i_class_id,i_category_id,i_manufact_id,sales_cnt,sales_cnt] InputAdapter - WholeStageCodegen (27) + WholeStageCodegen (25) Sort [i_brand_id,i_class_id,i_category_id,i_manufact_id] InputAdapter Exchange [i_brand_id,i_class_id,i_category_id,i_manufact_id] #1 - WholeStageCodegen (26) + WholeStageCodegen (24) HashAggregate [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,sum,sum] [sum(cast(sales_cnt as bigint)),sum(UnscaledValue(sales_amt)),sales_cnt,sales_amt,sum,sum] InputAdapter Exchange [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id] #2 - WholeStageCodegen (25) + WholeStageCodegen (23) HashAggregate [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,sales_cnt,sales_amt] [sum,sum,sum,sum] HashAggregate [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,sales_cnt,sales_amt] InputAdapter Exchange [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,sales_cnt,sales_amt] #3 - WholeStageCodegen (24) + WholeStageCodegen (22) HashAggregate [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,sales_cnt,sales_amt] InputAdapter Union - WholeStageCodegen (16) - HashAggregate [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,sales_cnt,sales_amt] + WholeStageCodegen (7) + Project [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,cs_quantity,cr_return_quantity,cs_ext_sales_price,cr_return_amount] InputAdapter - Exchange [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,sales_cnt,sales_amt] #4 - WholeStageCodegen (15) - HashAggregate [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,sales_cnt,sales_amt] + SortMergeJoin [cs_order_number,cs_item_sk,cr_order_number,cr_item_sk] + WholeStageCodegen (4) + Sort [cs_order_number,cs_item_sk] InputAdapter - Union - WholeStageCodegen (7) - Project [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,cs_quantity,cr_return_quantity,cs_ext_sales_price,cr_return_amount] - InputAdapter - SortMergeJoin [cs_order_number,cs_item_sk,cr_order_number,cr_item_sk] - WholeStageCodegen (4) - Sort [cs_order_number,cs_item_sk] - InputAdapter - Exchange [cs_order_number,cs_item_sk] #5 - WholeStageCodegen (3) - Project [cs_item_sk,cs_order_number,cs_quantity,cs_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id,d_year] - BroadcastHashJoin [cs_sold_date_sk,d_date_sk] - Project [cs_sold_date_sk,cs_item_sk,cs_order_number,cs_quantity,cs_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id] - BroadcastHashJoin [cs_item_sk,i_item_sk] - Filter [cs_item_sk,cs_sold_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.catalog_sales [cs_sold_date_sk,cs_item_sk,cs_order_number,cs_quantity,cs_ext_sales_price] - InputAdapter - BroadcastExchange #6 - WholeStageCodegen (1) - Project [i_item_sk,i_brand_id,i_class_id,i_category_id,i_manufact_id] - Filter [i_category,i_item_sk,i_brand_id,i_class_id,i_category_id,i_manufact_id] - ColumnarToRow - InputAdapter - Scan parquet default.item [i_item_sk,i_brand_id,i_class_id,i_category_id,i_category,i_manufact_id] - InputAdapter - BroadcastExchange #7 - WholeStageCodegen (2) - Filter [d_year,d_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.date_dim [d_date_sk,d_year] - WholeStageCodegen (6) - Sort [cr_order_number,cr_item_sk] - InputAdapter - Exchange [cr_order_number,cr_item_sk] #8 - WholeStageCodegen (5) - Filter [cr_order_number,cr_item_sk] - ColumnarToRow - InputAdapter - Scan parquet default.catalog_returns [cr_item_sk,cr_order_number,cr_return_quantity,cr_return_amount] - WholeStageCodegen (14) - Project [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,ss_quantity,sr_return_quantity,ss_ext_sales_price,sr_return_amt] - InputAdapter - SortMergeJoin [ss_ticket_number,ss_item_sk,sr_ticket_number,sr_item_sk] - WholeStageCodegen (11) - Sort [ss_ticket_number,ss_item_sk] - InputAdapter - Exchange [ss_ticket_number,ss_item_sk] #9 - WholeStageCodegen (10) - Project [ss_item_sk,ss_ticket_number,ss_quantity,ss_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id,d_year] - BroadcastHashJoin [ss_sold_date_sk,d_date_sk] - Project [ss_sold_date_sk,ss_item_sk,ss_ticket_number,ss_quantity,ss_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id] - BroadcastHashJoin [ss_item_sk,i_item_sk] - Filter [ss_item_sk,ss_sold_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.store_sales [ss_sold_date_sk,ss_item_sk,ss_ticket_number,ss_quantity,ss_ext_sales_price] - InputAdapter - ReusedExchange [i_item_sk,i_brand_id,i_class_id,i_category_id,i_manufact_id] #6 - InputAdapter - ReusedExchange [d_date_sk,d_year] #7 - WholeStageCodegen (13) - Sort [sr_ticket_number,sr_item_sk] - InputAdapter - Exchange [sr_ticket_number,sr_item_sk] #10 - WholeStageCodegen (12) - Filter [sr_ticket_number,sr_item_sk] + Exchange [cs_order_number,cs_item_sk] #4 + WholeStageCodegen (3) + Project [cs_item_sk,cs_order_number,cs_quantity,cs_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id,d_year] + BroadcastHashJoin [cs_sold_date_sk,d_date_sk] + Project [cs_sold_date_sk,cs_item_sk,cs_order_number,cs_quantity,cs_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id] + BroadcastHashJoin [cs_item_sk,i_item_sk] + Filter [cs_item_sk,cs_sold_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.catalog_sales [cs_sold_date_sk,cs_item_sk,cs_order_number,cs_quantity,cs_ext_sales_price] + InputAdapter + BroadcastExchange #5 + WholeStageCodegen (1) + Project [i_item_sk,i_brand_id,i_class_id,i_category_id,i_manufact_id] + Filter [i_category,i_item_sk,i_brand_id,i_class_id,i_category_id,i_manufact_id] ColumnarToRow InputAdapter - Scan parquet default.store_returns [sr_item_sk,sr_ticket_number,sr_return_quantity,sr_return_amt] - WholeStageCodegen (23) + Scan parquet default.item [i_item_sk,i_brand_id,i_class_id,i_category_id,i_category,i_manufact_id] + InputAdapter + BroadcastExchange #6 + WholeStageCodegen (2) + Filter [d_year,d_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.date_dim [d_date_sk,d_year] + WholeStageCodegen (6) + Sort [cr_order_number,cr_item_sk] + InputAdapter + Exchange [cr_order_number,cr_item_sk] #7 + WholeStageCodegen (5) + Filter [cr_order_number,cr_item_sk] + ColumnarToRow + InputAdapter + Scan parquet default.catalog_returns [cr_item_sk,cr_order_number,cr_return_quantity,cr_return_amount] + WholeStageCodegen (14) + Project [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,ss_quantity,sr_return_quantity,ss_ext_sales_price,sr_return_amt] + InputAdapter + SortMergeJoin [ss_ticket_number,ss_item_sk,sr_ticket_number,sr_item_sk] + WholeStageCodegen (11) + Sort [ss_ticket_number,ss_item_sk] + InputAdapter + Exchange [ss_ticket_number,ss_item_sk] #8 + WholeStageCodegen (10) + Project [ss_item_sk,ss_ticket_number,ss_quantity,ss_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id,d_year] + BroadcastHashJoin [ss_sold_date_sk,d_date_sk] + Project [ss_sold_date_sk,ss_item_sk,ss_ticket_number,ss_quantity,ss_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id] + BroadcastHashJoin [ss_item_sk,i_item_sk] + Filter [ss_item_sk,ss_sold_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.store_sales [ss_sold_date_sk,ss_item_sk,ss_ticket_number,ss_quantity,ss_ext_sales_price] + InputAdapter + ReusedExchange [i_item_sk,i_brand_id,i_class_id,i_category_id,i_manufact_id] #5 + InputAdapter + ReusedExchange [d_date_sk,d_year] #6 + WholeStageCodegen (13) + Sort [sr_ticket_number,sr_item_sk] + InputAdapter + Exchange [sr_ticket_number,sr_item_sk] #9 + WholeStageCodegen (12) + Filter [sr_ticket_number,sr_item_sk] + ColumnarToRow + InputAdapter + Scan parquet default.store_returns [sr_item_sk,sr_ticket_number,sr_return_quantity,sr_return_amt] + WholeStageCodegen (21) Project [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,ws_quantity,wr_return_quantity,ws_ext_sales_price,wr_return_amt] InputAdapter SortMergeJoin [ws_order_number,ws_item_sk,wr_order_number,wr_item_sk] - WholeStageCodegen (20) + WholeStageCodegen (18) Sort [ws_order_number,ws_item_sk] InputAdapter - Exchange [ws_order_number,ws_item_sk] #11 - WholeStageCodegen (19) + Exchange [ws_order_number,ws_item_sk] #10 + WholeStageCodegen (17) Project [ws_item_sk,ws_order_number,ws_quantity,ws_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id,d_year] BroadcastHashJoin [ws_sold_date_sk,d_date_sk] Project [ws_sold_date_sk,ws_item_sk,ws_order_number,ws_quantity,ws_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id] @@ -117,108 +109,100 @@ TakeOrderedAndProject [sales_cnt_diff,prev_year,year,i_brand_id,i_class_id,i_cat InputAdapter Scan parquet default.web_sales [ws_sold_date_sk,ws_item_sk,ws_order_number,ws_quantity,ws_ext_sales_price] InputAdapter - ReusedExchange [i_item_sk,i_brand_id,i_class_id,i_category_id,i_manufact_id] #6 + ReusedExchange [i_item_sk,i_brand_id,i_class_id,i_category_id,i_manufact_id] #5 InputAdapter - ReusedExchange [d_date_sk,d_year] #7 - WholeStageCodegen (22) + ReusedExchange [d_date_sk,d_year] #6 + WholeStageCodegen (20) Sort [wr_order_number,wr_item_sk] InputAdapter - Exchange [wr_order_number,wr_item_sk] #12 - WholeStageCodegen (21) + Exchange [wr_order_number,wr_item_sk] #11 + WholeStageCodegen (19) Filter [wr_order_number,wr_item_sk] ColumnarToRow InputAdapter Scan parquet default.web_returns [wr_item_sk,wr_order_number,wr_return_quantity,wr_return_amt] InputAdapter - WholeStageCodegen (54) + WholeStageCodegen (50) Sort [i_brand_id,i_class_id,i_category_id,i_manufact_id] InputAdapter - Exchange [i_brand_id,i_class_id,i_category_id,i_manufact_id] #13 - WholeStageCodegen (53) + Exchange [i_brand_id,i_class_id,i_category_id,i_manufact_id] #12 + WholeStageCodegen (49) HashAggregate [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,sum,sum] [sum(cast(sales_cnt as bigint)),sum(UnscaledValue(sales_amt)),sales_cnt,sales_amt,sum,sum] InputAdapter - Exchange [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id] #14 - WholeStageCodegen (52) + Exchange [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id] #13 + WholeStageCodegen (48) HashAggregate [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,sales_cnt,sales_amt] [sum,sum,sum,sum] HashAggregate [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,sales_cnt,sales_amt] InputAdapter - Exchange [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,sales_cnt,sales_amt] #15 - WholeStageCodegen (51) + Exchange [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,sales_cnt,sales_amt] #14 + WholeStageCodegen (47) HashAggregate [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,sales_cnt,sales_amt] InputAdapter Union - WholeStageCodegen (43) - HashAggregate [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,sales_cnt,sales_amt] + WholeStageCodegen (32) + Project [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,cs_quantity,cr_return_quantity,cs_ext_sales_price,cr_return_amount] InputAdapter - Exchange [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,sales_cnt,sales_amt] #16 - WholeStageCodegen (42) - HashAggregate [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,sales_cnt,sales_amt] + SortMergeJoin [cs_order_number,cs_item_sk,cr_order_number,cr_item_sk] + WholeStageCodegen (29) + Sort [cs_order_number,cs_item_sk] InputAdapter - Union - WholeStageCodegen (34) - Project [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,cs_quantity,cr_return_quantity,cs_ext_sales_price,cr_return_amount] - InputAdapter - SortMergeJoin [cs_order_number,cs_item_sk,cr_order_number,cr_item_sk] - WholeStageCodegen (31) - Sort [cs_order_number,cs_item_sk] - InputAdapter - Exchange [cs_order_number,cs_item_sk] #17 - WholeStageCodegen (30) - Project [cs_item_sk,cs_order_number,cs_quantity,cs_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id,d_year] - BroadcastHashJoin [cs_sold_date_sk,d_date_sk] - Project [cs_sold_date_sk,cs_item_sk,cs_order_number,cs_quantity,cs_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id] - BroadcastHashJoin [cs_item_sk,i_item_sk] - Filter [cs_item_sk,cs_sold_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.catalog_sales [cs_sold_date_sk,cs_item_sk,cs_order_number,cs_quantity,cs_ext_sales_price] - InputAdapter - ReusedExchange [i_item_sk,i_brand_id,i_class_id,i_category_id,i_manufact_id] #6 - InputAdapter - BroadcastExchange #18 - WholeStageCodegen (29) - Filter [d_year,d_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.date_dim [d_date_sk,d_year] - WholeStageCodegen (33) - Sort [cr_order_number,cr_item_sk] - InputAdapter - ReusedExchange [cr_item_sk,cr_order_number,cr_return_quantity,cr_return_amount] #8 - WholeStageCodegen (41) - Project [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,ss_quantity,sr_return_quantity,ss_ext_sales_price,sr_return_amt] - InputAdapter - SortMergeJoin [ss_ticket_number,ss_item_sk,sr_ticket_number,sr_item_sk] - WholeStageCodegen (38) - Sort [ss_ticket_number,ss_item_sk] - InputAdapter - Exchange [ss_ticket_number,ss_item_sk] #19 - WholeStageCodegen (37) - Project [ss_item_sk,ss_ticket_number,ss_quantity,ss_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id,d_year] - BroadcastHashJoin [ss_sold_date_sk,d_date_sk] - Project [ss_sold_date_sk,ss_item_sk,ss_ticket_number,ss_quantity,ss_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id] - BroadcastHashJoin [ss_item_sk,i_item_sk] - Filter [ss_item_sk,ss_sold_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.store_sales [ss_sold_date_sk,ss_item_sk,ss_ticket_number,ss_quantity,ss_ext_sales_price] - InputAdapter - ReusedExchange [i_item_sk,i_brand_id,i_class_id,i_category_id,i_manufact_id] #6 - InputAdapter - ReusedExchange [d_date_sk,d_year] #18 - WholeStageCodegen (40) - Sort [sr_ticket_number,sr_item_sk] - InputAdapter - ReusedExchange [sr_item_sk,sr_ticket_number,sr_return_quantity,sr_return_amt] #10 - WholeStageCodegen (50) + Exchange [cs_order_number,cs_item_sk] #15 + WholeStageCodegen (28) + Project [cs_item_sk,cs_order_number,cs_quantity,cs_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id,d_year] + BroadcastHashJoin [cs_sold_date_sk,d_date_sk] + Project [cs_sold_date_sk,cs_item_sk,cs_order_number,cs_quantity,cs_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id] + BroadcastHashJoin [cs_item_sk,i_item_sk] + Filter [cs_item_sk,cs_sold_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.catalog_sales [cs_sold_date_sk,cs_item_sk,cs_order_number,cs_quantity,cs_ext_sales_price] + InputAdapter + ReusedExchange [i_item_sk,i_brand_id,i_class_id,i_category_id,i_manufact_id] #5 + InputAdapter + BroadcastExchange #16 + WholeStageCodegen (27) + Filter [d_year,d_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.date_dim [d_date_sk,d_year] + WholeStageCodegen (31) + Sort [cr_order_number,cr_item_sk] + InputAdapter + ReusedExchange [cr_item_sk,cr_order_number,cr_return_quantity,cr_return_amount] #7 + WholeStageCodegen (39) + Project [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,ss_quantity,sr_return_quantity,ss_ext_sales_price,sr_return_amt] + InputAdapter + SortMergeJoin [ss_ticket_number,ss_item_sk,sr_ticket_number,sr_item_sk] + WholeStageCodegen (36) + Sort [ss_ticket_number,ss_item_sk] + InputAdapter + Exchange [ss_ticket_number,ss_item_sk] #17 + WholeStageCodegen (35) + Project [ss_item_sk,ss_ticket_number,ss_quantity,ss_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id,d_year] + BroadcastHashJoin [ss_sold_date_sk,d_date_sk] + Project [ss_sold_date_sk,ss_item_sk,ss_ticket_number,ss_quantity,ss_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id] + BroadcastHashJoin [ss_item_sk,i_item_sk] + Filter [ss_item_sk,ss_sold_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.store_sales [ss_sold_date_sk,ss_item_sk,ss_ticket_number,ss_quantity,ss_ext_sales_price] + InputAdapter + ReusedExchange [i_item_sk,i_brand_id,i_class_id,i_category_id,i_manufact_id] #5 + InputAdapter + ReusedExchange [d_date_sk,d_year] #16 + WholeStageCodegen (38) + Sort [sr_ticket_number,sr_item_sk] + InputAdapter + ReusedExchange [sr_item_sk,sr_ticket_number,sr_return_quantity,sr_return_amt] #9 + WholeStageCodegen (46) Project [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,ws_quantity,wr_return_quantity,ws_ext_sales_price,wr_return_amt] InputAdapter SortMergeJoin [ws_order_number,ws_item_sk,wr_order_number,wr_item_sk] - WholeStageCodegen (47) + WholeStageCodegen (43) Sort [ws_order_number,ws_item_sk] InputAdapter - Exchange [ws_order_number,ws_item_sk] #20 - WholeStageCodegen (46) + Exchange [ws_order_number,ws_item_sk] #18 + WholeStageCodegen (42) Project [ws_item_sk,ws_order_number,ws_quantity,ws_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id,d_year] BroadcastHashJoin [ws_sold_date_sk,d_date_sk] Project [ws_sold_date_sk,ws_item_sk,ws_order_number,ws_quantity,ws_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id] @@ -228,10 +212,10 @@ TakeOrderedAndProject [sales_cnt_diff,prev_year,year,i_brand_id,i_class_id,i_cat InputAdapter Scan parquet default.web_sales [ws_sold_date_sk,ws_item_sk,ws_order_number,ws_quantity,ws_ext_sales_price] InputAdapter - ReusedExchange [i_item_sk,i_brand_id,i_class_id,i_category_id,i_manufact_id] #6 + ReusedExchange [i_item_sk,i_brand_id,i_class_id,i_category_id,i_manufact_id] #5 InputAdapter - ReusedExchange [d_date_sk,d_year] #18 - WholeStageCodegen (49) + ReusedExchange [d_date_sk,d_year] #16 + WholeStageCodegen (45) Sort [wr_order_number,wr_item_sk] InputAdapter - ReusedExchange [wr_item_sk,wr_order_number,wr_return_quantity,wr_return_amt] #12 + ReusedExchange [wr_item_sk,wr_order_number,wr_return_quantity,wr_return_amt] #11 diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q75/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q75/explain.txt index 292a44930ed3d..3d52a795bb44e 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q75/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q75/explain.txt @@ -1,121 +1,113 @@ == Physical Plan == -TakeOrderedAndProject (117) -+- * Project (116) - +- * BroadcastHashJoin Inner BuildRight (115) - :- * HashAggregate (63) - : +- Exchange (62) - : +- * HashAggregate (61) - : +- * HashAggregate (60) - : +- Exchange (59) - : +- * HashAggregate (58) - : +- Union (57) - : :- * HashAggregate (41) - : : +- Exchange (40) - : : +- * HashAggregate (39) - : : +- Union (38) - : : :- * Project (22) - : : : +- * BroadcastHashJoin LeftOuter BuildRight (21) - : : : :- * Project (16) - : : : : +- * BroadcastHashJoin Inner BuildRight (15) - : : : : :- * Project (10) - : : : : : +- * BroadcastHashJoin Inner BuildRight (9) - : : : : : :- * Filter (3) - : : : : : : +- * ColumnarToRow (2) - : : : : : : +- Scan parquet default.catalog_sales (1) - : : : : : +- BroadcastExchange (8) - : : : : : +- * Project (7) - : : : : : +- * Filter (6) - : : : : : +- * ColumnarToRow (5) - : : : : : +- Scan parquet default.item (4) - : : : : +- BroadcastExchange (14) - : : : : +- * Filter (13) - : : : : +- * ColumnarToRow (12) - : : : : +- Scan parquet default.date_dim (11) - : : : +- BroadcastExchange (20) - : : : +- * Filter (19) - : : : +- * ColumnarToRow (18) - : : : +- Scan parquet default.catalog_returns (17) - : : +- * Project (37) - : : +- * BroadcastHashJoin LeftOuter BuildRight (36) - : : :- * Project (31) - : : : +- * BroadcastHashJoin Inner BuildRight (30) - : : : :- * Project (28) - : : : : +- * BroadcastHashJoin Inner BuildRight (27) - : : : : :- * Filter (25) - : : : : : +- * ColumnarToRow (24) - : : : : : +- Scan parquet default.store_sales (23) - : : : : +- ReusedExchange (26) - : : : +- ReusedExchange (29) - : : +- BroadcastExchange (35) - : : +- * Filter (34) - : : +- * ColumnarToRow (33) - : : +- Scan parquet default.store_returns (32) - : +- * Project (56) - : +- * BroadcastHashJoin LeftOuter BuildRight (55) - : :- * Project (50) - : : +- * BroadcastHashJoin Inner BuildRight (49) - : : :- * Project (47) - : : : +- * BroadcastHashJoin Inner BuildRight (46) - : : : :- * Filter (44) - : : : : +- * ColumnarToRow (43) - : : : : +- Scan parquet default.web_sales (42) - : : : +- ReusedExchange (45) - : : +- ReusedExchange (48) - : +- BroadcastExchange (54) - : +- * Filter (53) - : +- * ColumnarToRow (52) - : +- Scan parquet default.web_returns (51) - +- BroadcastExchange (114) - +- * HashAggregate (113) - +- Exchange (112) - +- * HashAggregate (111) - +- * HashAggregate (110) - +- Exchange (109) - +- * HashAggregate (108) - +- Union (107) - :- * HashAggregate (94) - : +- Exchange (93) - : +- * HashAggregate (92) - : +- Union (91) - : :- * Project (78) - : : +- * BroadcastHashJoin LeftOuter BuildRight (77) - : : :- * Project (75) - : : : +- * BroadcastHashJoin Inner BuildRight (74) - : : : :- * Project (69) - : : : : +- * BroadcastHashJoin Inner BuildRight (68) - : : : : :- * Filter (66) - : : : : : +- * ColumnarToRow (65) - : : : : : +- Scan parquet default.catalog_sales (64) - : : : : +- ReusedExchange (67) - : : : +- BroadcastExchange (73) - : : : +- * Filter (72) - : : : +- * ColumnarToRow (71) - : : : +- Scan parquet default.date_dim (70) - : : +- ReusedExchange (76) - : +- * Project (90) - : +- * BroadcastHashJoin LeftOuter BuildRight (89) - : :- * Project (87) - : : +- * BroadcastHashJoin Inner BuildRight (86) - : : :- * Project (84) - : : : +- * BroadcastHashJoin Inner BuildRight (83) - : : : :- * Filter (81) - : : : : +- * ColumnarToRow (80) - : : : : +- Scan parquet default.store_sales (79) - : : : +- ReusedExchange (82) - : : +- ReusedExchange (85) - : +- ReusedExchange (88) - +- * Project (106) - +- * BroadcastHashJoin LeftOuter BuildRight (105) - :- * Project (103) - : +- * BroadcastHashJoin Inner BuildRight (102) - : :- * Project (100) - : : +- * BroadcastHashJoin Inner BuildRight (99) - : : :- * Filter (97) - : : : +- * ColumnarToRow (96) - : : : +- Scan parquet default.web_sales (95) - : : +- ReusedExchange (98) - : +- ReusedExchange (101) - +- ReusedExchange (104) +TakeOrderedAndProject (109) ++- * Project (108) + +- * BroadcastHashJoin Inner BuildRight (107) + :- * HashAggregate (59) + : +- Exchange (58) + : +- * HashAggregate (57) + : +- * HashAggregate (56) + : +- Exchange (55) + : +- * HashAggregate (54) + : +- Union (53) + : :- * Project (22) + : : +- * BroadcastHashJoin LeftOuter BuildRight (21) + : : :- * Project (16) + : : : +- * BroadcastHashJoin Inner BuildRight (15) + : : : :- * Project (10) + : : : : +- * BroadcastHashJoin Inner BuildRight (9) + : : : : :- * Filter (3) + : : : : : +- * ColumnarToRow (2) + : : : : : +- Scan parquet default.catalog_sales (1) + : : : : +- BroadcastExchange (8) + : : : : +- * Project (7) + : : : : +- * Filter (6) + : : : : +- * ColumnarToRow (5) + : : : : +- Scan parquet default.item (4) + : : : +- BroadcastExchange (14) + : : : +- * Filter (13) + : : : +- * ColumnarToRow (12) + : : : +- Scan parquet default.date_dim (11) + : : +- BroadcastExchange (20) + : : +- * Filter (19) + : : +- * ColumnarToRow (18) + : : +- Scan parquet default.catalog_returns (17) + : :- * Project (37) + : : +- * BroadcastHashJoin LeftOuter BuildRight (36) + : : :- * Project (31) + : : : +- * BroadcastHashJoin Inner BuildRight (30) + : : : :- * Project (28) + : : : : +- * BroadcastHashJoin Inner BuildRight (27) + : : : : :- * Filter (25) + : : : : : +- * ColumnarToRow (24) + : : : : : +- Scan parquet default.store_sales (23) + : : : : +- ReusedExchange (26) + : : : +- ReusedExchange (29) + : : +- BroadcastExchange (35) + : : +- * Filter (34) + : : +- * ColumnarToRow (33) + : : +- Scan parquet default.store_returns (32) + : +- * Project (52) + : +- * BroadcastHashJoin LeftOuter BuildRight (51) + : :- * Project (46) + : : +- * BroadcastHashJoin Inner BuildRight (45) + : : :- * Project (43) + : : : +- * BroadcastHashJoin Inner BuildRight (42) + : : : :- * Filter (40) + : : : : +- * ColumnarToRow (39) + : : : : +- Scan parquet default.web_sales (38) + : : : +- ReusedExchange (41) + : : +- ReusedExchange (44) + : +- BroadcastExchange (50) + : +- * Filter (49) + : +- * ColumnarToRow (48) + : +- Scan parquet default.web_returns (47) + +- BroadcastExchange (106) + +- * HashAggregate (105) + +- Exchange (104) + +- * HashAggregate (103) + +- * HashAggregate (102) + +- Exchange (101) + +- * HashAggregate (100) + +- Union (99) + :- * Project (74) + : +- * BroadcastHashJoin LeftOuter BuildRight (73) + : :- * Project (71) + : : +- * BroadcastHashJoin Inner BuildRight (70) + : : :- * Project (65) + : : : +- * BroadcastHashJoin Inner BuildRight (64) + : : : :- * Filter (62) + : : : : +- * ColumnarToRow (61) + : : : : +- Scan parquet default.catalog_sales (60) + : : : +- ReusedExchange (63) + : : +- BroadcastExchange (69) + : : +- * Filter (68) + : : +- * ColumnarToRow (67) + : : +- Scan parquet default.date_dim (66) + : +- ReusedExchange (72) + :- * Project (86) + : +- * BroadcastHashJoin LeftOuter BuildRight (85) + : :- * Project (83) + : : +- * BroadcastHashJoin Inner BuildRight (82) + : : :- * Project (80) + : : : +- * BroadcastHashJoin Inner BuildRight (79) + : : : :- * Filter (77) + : : : : +- * ColumnarToRow (76) + : : : : +- Scan parquet default.store_sales (75) + : : : +- ReusedExchange (78) + : : +- ReusedExchange (81) + : +- ReusedExchange (84) + +- * Project (98) + +- * BroadcastHashJoin LeftOuter BuildRight (97) + :- * Project (95) + : +- * BroadcastHashJoin Inner BuildRight (94) + : :- * Project (92) + : : +- * BroadcastHashJoin Inner BuildRight (91) + : : :- * Filter (89) + : : : +- * ColumnarToRow (88) + : : : +- Scan parquet default.web_sales (87) + : : +- ReusedExchange (90) + : +- ReusedExchange (93) + +- ReusedExchange (96) (1) Scan parquet default.catalog_sales @@ -282,366 +274,326 @@ Join condition: None Output [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, (ss_quantity#26 - coalesce(sr_return_quantity#30, 0)) AS sales_cnt#33, CheckOverflow((promote_precision(cast(ss_ext_sales_price#27 as decimal(8,2))) - promote_precision(cast(coalesce(sr_return_amt#31, 0.00) as decimal(8,2)))), DecimalType(8,2), true) AS sales_amt#34] Input [13]: [ss_item_sk#24, ss_ticket_number#25, ss_quantity#26, ss_ext_sales_price#27, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, d_year#14, sr_item_sk#28, sr_ticket_number#29, sr_return_quantity#30, sr_return_amt#31] -(38) Union - -(39) HashAggregate [codegen id : 9] -Input [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#21, sales_amt#22] -Keys [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#21, sales_amt#22] -Functions: [] -Aggregate Attributes: [] -Results [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#21, sales_amt#22] - -(40) Exchange -Input [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#21, sales_amt#22] -Arguments: hashpartitioning(d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#21, sales_amt#22, 5), true, [id=#35] - -(41) HashAggregate [codegen id : 10] -Input [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#21, sales_amt#22] -Keys [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#21, sales_amt#22] -Functions: [] -Aggregate Attributes: [] -Results [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#21, sales_amt#22] - -(42) Scan parquet default.web_sales -Output [5]: [ws_sold_date_sk#36, ws_item_sk#37, ws_order_number#38, ws_quantity#39, ws_ext_sales_price#40] +(38) Scan parquet default.web_sales +Output [5]: [ws_sold_date_sk#35, ws_item_sk#36, ws_order_number#37, ws_quantity#38, ws_ext_sales_price#39] Batched: true Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_item_sk), IsNotNull(ws_sold_date_sk)] ReadSchema: struct -(43) ColumnarToRow [codegen id : 14] -Input [5]: [ws_sold_date_sk#36, ws_item_sk#37, ws_order_number#38, ws_quantity#39, ws_ext_sales_price#40] +(39) ColumnarToRow [codegen id : 12] +Input [5]: [ws_sold_date_sk#35, ws_item_sk#36, ws_order_number#37, ws_quantity#38, ws_ext_sales_price#39] -(44) Filter [codegen id : 14] -Input [5]: [ws_sold_date_sk#36, ws_item_sk#37, ws_order_number#38, ws_quantity#39, ws_ext_sales_price#40] -Condition : (isnotnull(ws_item_sk#37) AND isnotnull(ws_sold_date_sk#36)) +(40) Filter [codegen id : 12] +Input [5]: [ws_sold_date_sk#35, ws_item_sk#36, ws_order_number#37, ws_quantity#38, ws_ext_sales_price#39] +Condition : (isnotnull(ws_item_sk#36) AND isnotnull(ws_sold_date_sk#35)) -(45) ReusedExchange [Reuses operator id: 8] +(41) ReusedExchange [Reuses operator id: 8] Output [5]: [i_item_sk#6, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11] -(46) BroadcastHashJoin [codegen id : 14] -Left keys [1]: [ws_item_sk#37] +(42) BroadcastHashJoin [codegen id : 12] +Left keys [1]: [ws_item_sk#36] Right keys [1]: [i_item_sk#6] Join condition: None -(47) Project [codegen id : 14] -Output [9]: [ws_sold_date_sk#36, ws_item_sk#37, ws_order_number#38, ws_quantity#39, ws_ext_sales_price#40, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11] -Input [10]: [ws_sold_date_sk#36, ws_item_sk#37, ws_order_number#38, ws_quantity#39, ws_ext_sales_price#40, i_item_sk#6, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11] +(43) Project [codegen id : 12] +Output [9]: [ws_sold_date_sk#35, ws_item_sk#36, ws_order_number#37, ws_quantity#38, ws_ext_sales_price#39, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11] +Input [10]: [ws_sold_date_sk#35, ws_item_sk#36, ws_order_number#37, ws_quantity#38, ws_ext_sales_price#39, i_item_sk#6, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11] -(48) ReusedExchange [Reuses operator id: 14] +(44) ReusedExchange [Reuses operator id: 14] Output [2]: [d_date_sk#13, d_year#14] -(49) BroadcastHashJoin [codegen id : 14] -Left keys [1]: [ws_sold_date_sk#36] +(45) BroadcastHashJoin [codegen id : 12] +Left keys [1]: [ws_sold_date_sk#35] Right keys [1]: [d_date_sk#13] Join condition: None -(50) Project [codegen id : 14] -Output [9]: [ws_item_sk#37, ws_order_number#38, ws_quantity#39, ws_ext_sales_price#40, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, d_year#14] -Input [11]: [ws_sold_date_sk#36, ws_item_sk#37, ws_order_number#38, ws_quantity#39, ws_ext_sales_price#40, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, d_date_sk#13, d_year#14] +(46) Project [codegen id : 12] +Output [9]: [ws_item_sk#36, ws_order_number#37, ws_quantity#38, ws_ext_sales_price#39, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, d_year#14] +Input [11]: [ws_sold_date_sk#35, ws_item_sk#36, ws_order_number#37, ws_quantity#38, ws_ext_sales_price#39, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, d_date_sk#13, d_year#14] -(51) Scan parquet default.web_returns -Output [4]: [wr_item_sk#41, wr_order_number#42, wr_return_quantity#43, wr_return_amt#44] +(47) Scan parquet default.web_returns +Output [4]: [wr_item_sk#40, wr_order_number#41, wr_return_quantity#42, wr_return_amt#43] Batched: true Location [not included in comparison]/{warehouse_dir}/web_returns] PushedFilters: [IsNotNull(wr_order_number), IsNotNull(wr_item_sk)] ReadSchema: struct -(52) ColumnarToRow [codegen id : 13] -Input [4]: [wr_item_sk#41, wr_order_number#42, wr_return_quantity#43, wr_return_amt#44] +(48) ColumnarToRow [codegen id : 11] +Input [4]: [wr_item_sk#40, wr_order_number#41, wr_return_quantity#42, wr_return_amt#43] -(53) Filter [codegen id : 13] -Input [4]: [wr_item_sk#41, wr_order_number#42, wr_return_quantity#43, wr_return_amt#44] -Condition : (isnotnull(wr_order_number#42) AND isnotnull(wr_item_sk#41)) +(49) Filter [codegen id : 11] +Input [4]: [wr_item_sk#40, wr_order_number#41, wr_return_quantity#42, wr_return_amt#43] +Condition : (isnotnull(wr_order_number#41) AND isnotnull(wr_item_sk#40)) -(54) BroadcastExchange -Input [4]: [wr_item_sk#41, wr_order_number#42, wr_return_quantity#43, wr_return_amt#44] -Arguments: HashedRelationBroadcastMode(List(input[1, bigint, false], input[0, bigint, false]),false), [id=#45] +(50) BroadcastExchange +Input [4]: [wr_item_sk#40, wr_order_number#41, wr_return_quantity#42, wr_return_amt#43] +Arguments: HashedRelationBroadcastMode(List(input[1, bigint, false], input[0, bigint, false]),false), [id=#44] -(55) BroadcastHashJoin [codegen id : 14] -Left keys [2]: [cast(ws_order_number#38 as bigint), cast(ws_item_sk#37 as bigint)] -Right keys [2]: [wr_order_number#42, wr_item_sk#41] +(51) BroadcastHashJoin [codegen id : 12] +Left keys [2]: [cast(ws_order_number#37 as bigint), cast(ws_item_sk#36 as bigint)] +Right keys [2]: [wr_order_number#41, wr_item_sk#40] Join condition: None -(56) Project [codegen id : 14] -Output [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, (ws_quantity#39 - coalesce(wr_return_quantity#43, 0)) AS sales_cnt#46, CheckOverflow((promote_precision(cast(ws_ext_sales_price#40 as decimal(8,2))) - promote_precision(cast(coalesce(wr_return_amt#44, 0.00) as decimal(8,2)))), DecimalType(8,2), true) AS sales_amt#47] -Input [13]: [ws_item_sk#37, ws_order_number#38, ws_quantity#39, ws_ext_sales_price#40, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, d_year#14, wr_item_sk#41, wr_order_number#42, wr_return_quantity#43, wr_return_amt#44] +(52) Project [codegen id : 12] +Output [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, (ws_quantity#38 - coalesce(wr_return_quantity#42, 0)) AS sales_cnt#45, CheckOverflow((promote_precision(cast(ws_ext_sales_price#39 as decimal(8,2))) - promote_precision(cast(coalesce(wr_return_amt#43, 0.00) as decimal(8,2)))), DecimalType(8,2), true) AS sales_amt#46] +Input [13]: [ws_item_sk#36, ws_order_number#37, ws_quantity#38, ws_ext_sales_price#39, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, d_year#14, wr_item_sk#40, wr_order_number#41, wr_return_quantity#42, wr_return_amt#43] -(57) Union +(53) Union -(58) HashAggregate [codegen id : 15] +(54) HashAggregate [codegen id : 13] Input [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#21, sales_amt#22] Keys [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#21, sales_amt#22] Functions: [] Aggregate Attributes: [] Results [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#21, sales_amt#22] -(59) Exchange +(55) Exchange Input [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#21, sales_amt#22] -Arguments: hashpartitioning(d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#21, sales_amt#22, 5), true, [id=#48] +Arguments: hashpartitioning(d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#21, sales_amt#22, 5), ENSURE_REQUIREMENTS, [id=#47] -(60) HashAggregate [codegen id : 16] +(56) HashAggregate [codegen id : 14] Input [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#21, sales_amt#22] Keys [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#21, sales_amt#22] Functions: [] Aggregate Attributes: [] Results [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#21, sales_amt#22] -(61) HashAggregate [codegen id : 16] +(57) HashAggregate [codegen id : 14] Input [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#21, sales_amt#22] Keys [5]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11] Functions [2]: [partial_sum(cast(sales_cnt#21 as bigint)), partial_sum(UnscaledValue(sales_amt#22))] -Aggregate Attributes [2]: [sum#49, sum#50] -Results [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sum#51, sum#52] +Aggregate Attributes [2]: [sum#48, sum#49] +Results [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sum#50, sum#51] -(62) Exchange -Input [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sum#51, sum#52] -Arguments: hashpartitioning(d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, 5), true, [id=#53] +(58) Exchange +Input [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sum#50, sum#51] +Arguments: hashpartitioning(d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, 5), ENSURE_REQUIREMENTS, [id=#52] -(63) HashAggregate [codegen id : 34] -Input [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sum#51, sum#52] +(59) HashAggregate [codegen id : 30] +Input [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sum#50, sum#51] Keys [5]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11] Functions [2]: [sum(cast(sales_cnt#21 as bigint)), sum(UnscaledValue(sales_amt#22))] -Aggregate Attributes [2]: [sum(cast(sales_cnt#21 as bigint))#54, sum(UnscaledValue(sales_amt#22))#55] -Results [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sum(cast(sales_cnt#21 as bigint))#54 AS sales_cnt#56, MakeDecimal(sum(UnscaledValue(sales_amt#22))#55,18,2) AS sales_amt#57] +Aggregate Attributes [2]: [sum(cast(sales_cnt#21 as bigint))#53, sum(UnscaledValue(sales_amt#22))#54] +Results [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sum(cast(sales_cnt#21 as bigint))#53 AS sales_cnt#55, MakeDecimal(sum(UnscaledValue(sales_amt#22))#54,18,2) AS sales_amt#56] -(64) Scan parquet default.catalog_sales +(60) Scan parquet default.catalog_sales Output [5]: [cs_sold_date_sk#1, cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price#5] Batched: true Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_item_sk), IsNotNull(cs_sold_date_sk)] ReadSchema: struct -(65) ColumnarToRow [codegen id : 20] +(61) ColumnarToRow [codegen id : 18] Input [5]: [cs_sold_date_sk#1, cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price#5] -(66) Filter [codegen id : 20] +(62) Filter [codegen id : 18] Input [5]: [cs_sold_date_sk#1, cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price#5] Condition : (isnotnull(cs_item_sk#2) AND isnotnull(cs_sold_date_sk#1)) -(67) ReusedExchange [Reuses operator id: 8] -Output [5]: [i_item_sk#58, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62] +(63) ReusedExchange [Reuses operator id: 8] +Output [5]: [i_item_sk#57, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61] -(68) BroadcastHashJoin [codegen id : 20] +(64) BroadcastHashJoin [codegen id : 18] Left keys [1]: [cs_item_sk#2] -Right keys [1]: [i_item_sk#58] +Right keys [1]: [i_item_sk#57] Join condition: None -(69) Project [codegen id : 20] -Output [9]: [cs_sold_date_sk#1, cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price#5, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62] -Input [10]: [cs_sold_date_sk#1, cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price#5, i_item_sk#58, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62] +(65) Project [codegen id : 18] +Output [9]: [cs_sold_date_sk#1, cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price#5, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61] +Input [10]: [cs_sold_date_sk#1, cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price#5, i_item_sk#57, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61] -(70) Scan parquet default.date_dim -Output [2]: [d_date_sk#63, d_year#64] +(66) Scan parquet default.date_dim +Output [2]: [d_date_sk#62, d_year#63] Batched: true Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2001), IsNotNull(d_date_sk)] ReadSchema: struct -(71) ColumnarToRow [codegen id : 18] -Input [2]: [d_date_sk#63, d_year#64] +(67) ColumnarToRow [codegen id : 16] +Input [2]: [d_date_sk#62, d_year#63] -(72) Filter [codegen id : 18] -Input [2]: [d_date_sk#63, d_year#64] -Condition : ((isnotnull(d_year#64) AND (d_year#64 = 2001)) AND isnotnull(d_date_sk#63)) +(68) Filter [codegen id : 16] +Input [2]: [d_date_sk#62, d_year#63] +Condition : ((isnotnull(d_year#63) AND (d_year#63 = 2001)) AND isnotnull(d_date_sk#62)) -(73) BroadcastExchange -Input [2]: [d_date_sk#63, d_year#64] -Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, false] as bigint)),false), [id=#65] +(69) BroadcastExchange +Input [2]: [d_date_sk#62, d_year#63] +Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, false] as bigint)),false), [id=#64] -(74) BroadcastHashJoin [codegen id : 20] +(70) BroadcastHashJoin [codegen id : 18] Left keys [1]: [cs_sold_date_sk#1] -Right keys [1]: [d_date_sk#63] +Right keys [1]: [d_date_sk#62] Join condition: None -(75) Project [codegen id : 20] -Output [9]: [cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price#5, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62, d_year#64] -Input [11]: [cs_sold_date_sk#1, cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price#5, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62, d_date_sk#63, d_year#64] +(71) Project [codegen id : 18] +Output [9]: [cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price#5, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61, d_year#63] +Input [11]: [cs_sold_date_sk#1, cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price#5, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61, d_date_sk#62, d_year#63] -(76) ReusedExchange [Reuses operator id: 20] +(72) ReusedExchange [Reuses operator id: 20] Output [4]: [cr_item_sk#16, cr_order_number#17, cr_return_quantity#18, cr_return_amount#19] -(77) BroadcastHashJoin [codegen id : 20] +(73) BroadcastHashJoin [codegen id : 18] Left keys [2]: [cs_order_number#3, cs_item_sk#2] Right keys [2]: [cr_order_number#17, cr_item_sk#16] Join condition: None -(78) Project [codegen id : 20] -Output [7]: [d_year#64, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62, (cs_quantity#4 - coalesce(cr_return_quantity#18, 0)) AS sales_cnt#21, CheckOverflow((promote_precision(cast(cs_ext_sales_price#5 as decimal(8,2))) - promote_precision(cast(coalesce(cr_return_amount#19, 0.00) as decimal(8,2)))), DecimalType(8,2), true) AS sales_amt#22] -Input [13]: [cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price#5, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62, d_year#64, cr_item_sk#16, cr_order_number#17, cr_return_quantity#18, cr_return_amount#19] +(74) Project [codegen id : 18] +Output [7]: [d_year#63, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61, (cs_quantity#4 - coalesce(cr_return_quantity#18, 0)) AS sales_cnt#21, CheckOverflow((promote_precision(cast(cs_ext_sales_price#5 as decimal(8,2))) - promote_precision(cast(coalesce(cr_return_amount#19, 0.00) as decimal(8,2)))), DecimalType(8,2), true) AS sales_amt#22] +Input [13]: [cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price#5, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61, d_year#63, cr_item_sk#16, cr_order_number#17, cr_return_quantity#18, cr_return_amount#19] -(79) Scan parquet default.store_sales +(75) Scan parquet default.store_sales Output [5]: [ss_sold_date_sk#23, ss_item_sk#24, ss_ticket_number#25, ss_quantity#26, ss_ext_sales_price#27] Batched: true Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_item_sk), IsNotNull(ss_sold_date_sk)] ReadSchema: struct -(80) ColumnarToRow [codegen id : 24] +(76) ColumnarToRow [codegen id : 22] Input [5]: [ss_sold_date_sk#23, ss_item_sk#24, ss_ticket_number#25, ss_quantity#26, ss_ext_sales_price#27] -(81) Filter [codegen id : 24] +(77) Filter [codegen id : 22] Input [5]: [ss_sold_date_sk#23, ss_item_sk#24, ss_ticket_number#25, ss_quantity#26, ss_ext_sales_price#27] Condition : (isnotnull(ss_item_sk#24) AND isnotnull(ss_sold_date_sk#23)) -(82) ReusedExchange [Reuses operator id: 8] -Output [5]: [i_item_sk#58, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62] +(78) ReusedExchange [Reuses operator id: 8] +Output [5]: [i_item_sk#57, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61] -(83) BroadcastHashJoin [codegen id : 24] +(79) BroadcastHashJoin [codegen id : 22] Left keys [1]: [ss_item_sk#24] -Right keys [1]: [i_item_sk#58] +Right keys [1]: [i_item_sk#57] Join condition: None -(84) Project [codegen id : 24] -Output [9]: [ss_sold_date_sk#23, ss_item_sk#24, ss_ticket_number#25, ss_quantity#26, ss_ext_sales_price#27, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62] -Input [10]: [ss_sold_date_sk#23, ss_item_sk#24, ss_ticket_number#25, ss_quantity#26, ss_ext_sales_price#27, i_item_sk#58, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62] +(80) Project [codegen id : 22] +Output [9]: [ss_sold_date_sk#23, ss_item_sk#24, ss_ticket_number#25, ss_quantity#26, ss_ext_sales_price#27, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61] +Input [10]: [ss_sold_date_sk#23, ss_item_sk#24, ss_ticket_number#25, ss_quantity#26, ss_ext_sales_price#27, i_item_sk#57, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61] -(85) ReusedExchange [Reuses operator id: 73] -Output [2]: [d_date_sk#63, d_year#64] +(81) ReusedExchange [Reuses operator id: 69] +Output [2]: [d_date_sk#62, d_year#63] -(86) BroadcastHashJoin [codegen id : 24] +(82) BroadcastHashJoin [codegen id : 22] Left keys [1]: [ss_sold_date_sk#23] -Right keys [1]: [d_date_sk#63] +Right keys [1]: [d_date_sk#62] Join condition: None -(87) Project [codegen id : 24] -Output [9]: [ss_item_sk#24, ss_ticket_number#25, ss_quantity#26, ss_ext_sales_price#27, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62, d_year#64] -Input [11]: [ss_sold_date_sk#23, ss_item_sk#24, ss_ticket_number#25, ss_quantity#26, ss_ext_sales_price#27, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62, d_date_sk#63, d_year#64] +(83) Project [codegen id : 22] +Output [9]: [ss_item_sk#24, ss_ticket_number#25, ss_quantity#26, ss_ext_sales_price#27, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61, d_year#63] +Input [11]: [ss_sold_date_sk#23, ss_item_sk#24, ss_ticket_number#25, ss_quantity#26, ss_ext_sales_price#27, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61, d_date_sk#62, d_year#63] -(88) ReusedExchange [Reuses operator id: 35] +(84) ReusedExchange [Reuses operator id: 35] Output [4]: [sr_item_sk#28, sr_ticket_number#29, sr_return_quantity#30, sr_return_amt#31] -(89) BroadcastHashJoin [codegen id : 24] +(85) BroadcastHashJoin [codegen id : 22] Left keys [2]: [cast(ss_ticket_number#25 as bigint), cast(ss_item_sk#24 as bigint)] Right keys [2]: [sr_ticket_number#29, sr_item_sk#28] Join condition: None -(90) Project [codegen id : 24] -Output [7]: [d_year#64, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62, (ss_quantity#26 - coalesce(sr_return_quantity#30, 0)) AS sales_cnt#66, CheckOverflow((promote_precision(cast(ss_ext_sales_price#27 as decimal(8,2))) - promote_precision(cast(coalesce(sr_return_amt#31, 0.00) as decimal(8,2)))), DecimalType(8,2), true) AS sales_amt#67] -Input [13]: [ss_item_sk#24, ss_ticket_number#25, ss_quantity#26, ss_ext_sales_price#27, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62, d_year#64, sr_item_sk#28, sr_ticket_number#29, sr_return_quantity#30, sr_return_amt#31] - -(91) Union - -(92) HashAggregate [codegen id : 25] -Input [7]: [d_year#64, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62, sales_cnt#21, sales_amt#22] -Keys [7]: [d_year#64, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62, sales_cnt#21, sales_amt#22] -Functions: [] -Aggregate Attributes: [] -Results [7]: [d_year#64, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62, sales_cnt#21, sales_amt#22] - -(93) Exchange -Input [7]: [d_year#64, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62, sales_cnt#21, sales_amt#22] -Arguments: hashpartitioning(d_year#64, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62, sales_cnt#21, sales_amt#22, 5), true, [id=#68] - -(94) HashAggregate [codegen id : 26] -Input [7]: [d_year#64, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62, sales_cnt#21, sales_amt#22] -Keys [7]: [d_year#64, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62, sales_cnt#21, sales_amt#22] -Functions: [] -Aggregate Attributes: [] -Results [7]: [d_year#64, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62, sales_cnt#21, sales_amt#22] +(86) Project [codegen id : 22] +Output [7]: [d_year#63, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61, (ss_quantity#26 - coalesce(sr_return_quantity#30, 0)) AS sales_cnt#65, CheckOverflow((promote_precision(cast(ss_ext_sales_price#27 as decimal(8,2))) - promote_precision(cast(coalesce(sr_return_amt#31, 0.00) as decimal(8,2)))), DecimalType(8,2), true) AS sales_amt#66] +Input [13]: [ss_item_sk#24, ss_ticket_number#25, ss_quantity#26, ss_ext_sales_price#27, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61, d_year#63, sr_item_sk#28, sr_ticket_number#29, sr_return_quantity#30, sr_return_amt#31] -(95) Scan parquet default.web_sales -Output [5]: [ws_sold_date_sk#36, ws_item_sk#37, ws_order_number#38, ws_quantity#39, ws_ext_sales_price#40] +(87) Scan parquet default.web_sales +Output [5]: [ws_sold_date_sk#35, ws_item_sk#36, ws_order_number#37, ws_quantity#38, ws_ext_sales_price#39] Batched: true Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_item_sk), IsNotNull(ws_sold_date_sk)] ReadSchema: struct -(96) ColumnarToRow [codegen id : 30] -Input [5]: [ws_sold_date_sk#36, ws_item_sk#37, ws_order_number#38, ws_quantity#39, ws_ext_sales_price#40] +(88) ColumnarToRow [codegen id : 26] +Input [5]: [ws_sold_date_sk#35, ws_item_sk#36, ws_order_number#37, ws_quantity#38, ws_ext_sales_price#39] -(97) Filter [codegen id : 30] -Input [5]: [ws_sold_date_sk#36, ws_item_sk#37, ws_order_number#38, ws_quantity#39, ws_ext_sales_price#40] -Condition : (isnotnull(ws_item_sk#37) AND isnotnull(ws_sold_date_sk#36)) +(89) Filter [codegen id : 26] +Input [5]: [ws_sold_date_sk#35, ws_item_sk#36, ws_order_number#37, ws_quantity#38, ws_ext_sales_price#39] +Condition : (isnotnull(ws_item_sk#36) AND isnotnull(ws_sold_date_sk#35)) -(98) ReusedExchange [Reuses operator id: 8] -Output [5]: [i_item_sk#58, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62] +(90) ReusedExchange [Reuses operator id: 8] +Output [5]: [i_item_sk#57, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61] -(99) BroadcastHashJoin [codegen id : 30] -Left keys [1]: [ws_item_sk#37] -Right keys [1]: [i_item_sk#58] +(91) BroadcastHashJoin [codegen id : 26] +Left keys [1]: [ws_item_sk#36] +Right keys [1]: [i_item_sk#57] Join condition: None -(100) Project [codegen id : 30] -Output [9]: [ws_sold_date_sk#36, ws_item_sk#37, ws_order_number#38, ws_quantity#39, ws_ext_sales_price#40, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62] -Input [10]: [ws_sold_date_sk#36, ws_item_sk#37, ws_order_number#38, ws_quantity#39, ws_ext_sales_price#40, i_item_sk#58, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62] +(92) Project [codegen id : 26] +Output [9]: [ws_sold_date_sk#35, ws_item_sk#36, ws_order_number#37, ws_quantity#38, ws_ext_sales_price#39, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61] +Input [10]: [ws_sold_date_sk#35, ws_item_sk#36, ws_order_number#37, ws_quantity#38, ws_ext_sales_price#39, i_item_sk#57, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61] -(101) ReusedExchange [Reuses operator id: 73] -Output [2]: [d_date_sk#63, d_year#64] +(93) ReusedExchange [Reuses operator id: 69] +Output [2]: [d_date_sk#62, d_year#63] -(102) BroadcastHashJoin [codegen id : 30] -Left keys [1]: [ws_sold_date_sk#36] -Right keys [1]: [d_date_sk#63] +(94) BroadcastHashJoin [codegen id : 26] +Left keys [1]: [ws_sold_date_sk#35] +Right keys [1]: [d_date_sk#62] Join condition: None -(103) Project [codegen id : 30] -Output [9]: [ws_item_sk#37, ws_order_number#38, ws_quantity#39, ws_ext_sales_price#40, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62, d_year#64] -Input [11]: [ws_sold_date_sk#36, ws_item_sk#37, ws_order_number#38, ws_quantity#39, ws_ext_sales_price#40, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62, d_date_sk#63, d_year#64] +(95) Project [codegen id : 26] +Output [9]: [ws_item_sk#36, ws_order_number#37, ws_quantity#38, ws_ext_sales_price#39, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61, d_year#63] +Input [11]: [ws_sold_date_sk#35, ws_item_sk#36, ws_order_number#37, ws_quantity#38, ws_ext_sales_price#39, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61, d_date_sk#62, d_year#63] -(104) ReusedExchange [Reuses operator id: 54] -Output [4]: [wr_item_sk#41, wr_order_number#42, wr_return_quantity#43, wr_return_amt#44] +(96) ReusedExchange [Reuses operator id: 50] +Output [4]: [wr_item_sk#40, wr_order_number#41, wr_return_quantity#42, wr_return_amt#43] -(105) BroadcastHashJoin [codegen id : 30] -Left keys [2]: [cast(ws_order_number#38 as bigint), cast(ws_item_sk#37 as bigint)] -Right keys [2]: [wr_order_number#42, wr_item_sk#41] +(97) BroadcastHashJoin [codegen id : 26] +Left keys [2]: [cast(ws_order_number#37 as bigint), cast(ws_item_sk#36 as bigint)] +Right keys [2]: [wr_order_number#41, wr_item_sk#40] Join condition: None -(106) Project [codegen id : 30] -Output [7]: [d_year#64, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62, (ws_quantity#39 - coalesce(wr_return_quantity#43, 0)) AS sales_cnt#69, CheckOverflow((promote_precision(cast(ws_ext_sales_price#40 as decimal(8,2))) - promote_precision(cast(coalesce(wr_return_amt#44, 0.00) as decimal(8,2)))), DecimalType(8,2), true) AS sales_amt#70] -Input [13]: [ws_item_sk#37, ws_order_number#38, ws_quantity#39, ws_ext_sales_price#40, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62, d_year#64, wr_item_sk#41, wr_order_number#42, wr_return_quantity#43, wr_return_amt#44] +(98) Project [codegen id : 26] +Output [7]: [d_year#63, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61, (ws_quantity#38 - coalesce(wr_return_quantity#42, 0)) AS sales_cnt#67, CheckOverflow((promote_precision(cast(ws_ext_sales_price#39 as decimal(8,2))) - promote_precision(cast(coalesce(wr_return_amt#43, 0.00) as decimal(8,2)))), DecimalType(8,2), true) AS sales_amt#68] +Input [13]: [ws_item_sk#36, ws_order_number#37, ws_quantity#38, ws_ext_sales_price#39, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61, d_year#63, wr_item_sk#40, wr_order_number#41, wr_return_quantity#42, wr_return_amt#43] -(107) Union +(99) Union -(108) HashAggregate [codegen id : 31] -Input [7]: [d_year#64, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62, sales_cnt#21, sales_amt#22] -Keys [7]: [d_year#64, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62, sales_cnt#21, sales_amt#22] +(100) HashAggregate [codegen id : 27] +Input [7]: [d_year#63, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61, sales_cnt#21, sales_amt#22] +Keys [7]: [d_year#63, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61, sales_cnt#21, sales_amt#22] Functions: [] Aggregate Attributes: [] -Results [7]: [d_year#64, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62, sales_cnt#21, sales_amt#22] +Results [7]: [d_year#63, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61, sales_cnt#21, sales_amt#22] -(109) Exchange -Input [7]: [d_year#64, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62, sales_cnt#21, sales_amt#22] -Arguments: hashpartitioning(d_year#64, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62, sales_cnt#21, sales_amt#22, 5), true, [id=#71] +(101) Exchange +Input [7]: [d_year#63, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61, sales_cnt#21, sales_amt#22] +Arguments: hashpartitioning(d_year#63, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61, sales_cnt#21, sales_amt#22, 5), ENSURE_REQUIREMENTS, [id=#69] -(110) HashAggregate [codegen id : 32] -Input [7]: [d_year#64, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62, sales_cnt#21, sales_amt#22] -Keys [7]: [d_year#64, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62, sales_cnt#21, sales_amt#22] +(102) HashAggregate [codegen id : 28] +Input [7]: [d_year#63, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61, sales_cnt#21, sales_amt#22] +Keys [7]: [d_year#63, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61, sales_cnt#21, sales_amt#22] Functions: [] Aggregate Attributes: [] -Results [7]: [d_year#64, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62, sales_cnt#21, sales_amt#22] +Results [7]: [d_year#63, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61, sales_cnt#21, sales_amt#22] -(111) HashAggregate [codegen id : 32] -Input [7]: [d_year#64, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62, sales_cnt#21, sales_amt#22] -Keys [5]: [d_year#64, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62] +(103) HashAggregate [codegen id : 28] +Input [7]: [d_year#63, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61, sales_cnt#21, sales_amt#22] +Keys [5]: [d_year#63, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61] Functions [2]: [partial_sum(cast(sales_cnt#21 as bigint)), partial_sum(UnscaledValue(sales_amt#22))] -Aggregate Attributes [2]: [sum#72, sum#73] -Results [7]: [d_year#64, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62, sum#74, sum#75] +Aggregate Attributes [2]: [sum#70, sum#71] +Results [7]: [d_year#63, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61, sum#72, sum#73] -(112) Exchange -Input [7]: [d_year#64, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62, sum#74, sum#75] -Arguments: hashpartitioning(d_year#64, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62, 5), true, [id=#76] +(104) Exchange +Input [7]: [d_year#63, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61, sum#72, sum#73] +Arguments: hashpartitioning(d_year#63, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61, 5), ENSURE_REQUIREMENTS, [id=#74] -(113) HashAggregate [codegen id : 33] -Input [7]: [d_year#64, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62, sum#74, sum#75] -Keys [5]: [d_year#64, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62] +(105) HashAggregate [codegen id : 29] +Input [7]: [d_year#63, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61, sum#72, sum#73] +Keys [5]: [d_year#63, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61] Functions [2]: [sum(cast(sales_cnt#21 as bigint)), sum(UnscaledValue(sales_amt#22))] -Aggregate Attributes [2]: [sum(cast(sales_cnt#21 as bigint))#77, sum(UnscaledValue(sales_amt#22))#78] -Results [7]: [d_year#64, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62, sum(cast(sales_cnt#21 as bigint))#77 AS sales_cnt#79, MakeDecimal(sum(UnscaledValue(sales_amt#22))#78,18,2) AS sales_amt#80] +Aggregate Attributes [2]: [sum(cast(sales_cnt#21 as bigint))#75, sum(UnscaledValue(sales_amt#22))#76] +Results [7]: [d_year#63, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61, sum(cast(sales_cnt#21 as bigint))#75 AS sales_cnt#77, MakeDecimal(sum(UnscaledValue(sales_amt#22))#76,18,2) AS sales_amt#78] -(114) BroadcastExchange -Input [7]: [d_year#64, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62, sales_cnt#79, sales_amt#80] -Arguments: HashedRelationBroadcastMode(List(input[1, int, true], input[2, int, true], input[3, int, true], input[4, int, true]),false), [id=#81] +(106) BroadcastExchange +Input [7]: [d_year#63, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61, sales_cnt#77, sales_amt#78] +Arguments: HashedRelationBroadcastMode(List(input[1, int, true], input[2, int, true], input[3, int, true], input[4, int, true]),false), [id=#79] -(115) BroadcastHashJoin [codegen id : 34] +(107) BroadcastHashJoin [codegen id : 30] Left keys [4]: [i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11] -Right keys [4]: [i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62] -Join condition: (CheckOverflow((promote_precision(cast(sales_cnt#56 as decimal(17,2))) / promote_precision(cast(sales_cnt#79 as decimal(17,2)))), DecimalType(37,20), true) < 0.90000000000000000000) +Right keys [4]: [i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61] +Join condition: (CheckOverflow((promote_precision(cast(sales_cnt#55 as decimal(17,2))) / promote_precision(cast(sales_cnt#77 as decimal(17,2)))), DecimalType(37,20), true) < 0.90000000000000000000) -(116) Project [codegen id : 34] -Output [10]: [d_year#64 AS prev_year#82, d_year#14 AS year#83, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#79 AS prev_yr_cnt#84, sales_cnt#56 AS curr_yr_cnt#85, (sales_cnt#56 - sales_cnt#79) AS sales_cnt_diff#86, CheckOverflow((promote_precision(cast(sales_amt#57 as decimal(19,2))) - promote_precision(cast(sales_amt#80 as decimal(19,2)))), DecimalType(19,2), true) AS sales_amt_diff#87] -Input [14]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#56, sales_amt#57, d_year#64, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62, sales_cnt#79, sales_amt#80] +(108) Project [codegen id : 30] +Output [10]: [d_year#63 AS prev_year#80, d_year#14 AS year#81, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#77 AS prev_yr_cnt#82, sales_cnt#55 AS curr_yr_cnt#83, (sales_cnt#55 - sales_cnt#77) AS sales_cnt_diff#84, CheckOverflow((promote_precision(cast(sales_amt#56 as decimal(19,2))) - promote_precision(cast(sales_amt#78 as decimal(19,2)))), DecimalType(19,2), true) AS sales_amt_diff#85] +Input [14]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#55, sales_amt#56, d_year#63, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61, sales_cnt#77, sales_amt#78] -(117) TakeOrderedAndProject -Input [10]: [prev_year#82, year#83, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, prev_yr_cnt#84, curr_yr_cnt#85, sales_cnt_diff#86, sales_amt_diff#87] -Arguments: 100, [sales_cnt_diff#86 ASC NULLS FIRST], [prev_year#82, year#83, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, prev_yr_cnt#84, curr_yr_cnt#85, sales_cnt_diff#86, sales_amt_diff#87] +(109) TakeOrderedAndProject +Input [10]: [prev_year#80, year#81, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, prev_yr_cnt#82, curr_yr_cnt#83, sales_cnt_diff#84, sales_amt_diff#85] +Arguments: 100, [sales_cnt_diff#84 ASC NULLS FIRST], [prev_year#80, year#81, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, prev_yr_cnt#82, curr_yr_cnt#83, sales_cnt_diff#84, sales_amt_diff#85] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q75/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q75/simplified.txt index 298a06b87762f..0eeca93ed7d08 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q75/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q75/simplified.txt @@ -1,83 +1,75 @@ TakeOrderedAndProject [sales_cnt_diff,prev_year,year,i_brand_id,i_class_id,i_category_id,i_manufact_id,prev_yr_cnt,curr_yr_cnt,sales_amt_diff] - WholeStageCodegen (34) + WholeStageCodegen (30) Project [d_year,d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,sales_cnt,sales_cnt,sales_amt,sales_amt] BroadcastHashJoin [i_brand_id,i_class_id,i_category_id,i_manufact_id,i_brand_id,i_class_id,i_category_id,i_manufact_id,sales_cnt,sales_cnt] HashAggregate [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,sum,sum] [sum(cast(sales_cnt as bigint)),sum(UnscaledValue(sales_amt)),sales_cnt,sales_amt,sum,sum] InputAdapter Exchange [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id] #1 - WholeStageCodegen (16) + WholeStageCodegen (14) HashAggregate [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,sales_cnt,sales_amt] [sum,sum,sum,sum] HashAggregate [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,sales_cnt,sales_amt] InputAdapter Exchange [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,sales_cnt,sales_amt] #2 - WholeStageCodegen (15) + WholeStageCodegen (13) HashAggregate [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,sales_cnt,sales_amt] InputAdapter Union - WholeStageCodegen (10) - HashAggregate [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,sales_cnt,sales_amt] - InputAdapter - Exchange [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,sales_cnt,sales_amt] #3 - WholeStageCodegen (9) - HashAggregate [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,sales_cnt,sales_amt] - InputAdapter - Union - WholeStageCodegen (4) - Project [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,cs_quantity,cr_return_quantity,cs_ext_sales_price,cr_return_amount] - BroadcastHashJoin [cs_order_number,cs_item_sk,cr_order_number,cr_item_sk] - Project [cs_item_sk,cs_order_number,cs_quantity,cs_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id,d_year] - BroadcastHashJoin [cs_sold_date_sk,d_date_sk] - Project [cs_sold_date_sk,cs_item_sk,cs_order_number,cs_quantity,cs_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id] - BroadcastHashJoin [cs_item_sk,i_item_sk] - Filter [cs_item_sk,cs_sold_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.catalog_sales [cs_sold_date_sk,cs_item_sk,cs_order_number,cs_quantity,cs_ext_sales_price] - InputAdapter - BroadcastExchange #4 - WholeStageCodegen (1) - Project [i_item_sk,i_brand_id,i_class_id,i_category_id,i_manufact_id] - Filter [i_category,i_item_sk,i_brand_id,i_class_id,i_category_id,i_manufact_id] - ColumnarToRow - InputAdapter - Scan parquet default.item [i_item_sk,i_brand_id,i_class_id,i_category_id,i_category,i_manufact_id] - InputAdapter - BroadcastExchange #5 - WholeStageCodegen (2) - Filter [d_year,d_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.date_dim [d_date_sk,d_year] - InputAdapter - BroadcastExchange #6 - WholeStageCodegen (3) - Filter [cr_order_number,cr_item_sk] - ColumnarToRow - InputAdapter - Scan parquet default.catalog_returns [cr_item_sk,cr_order_number,cr_return_quantity,cr_return_amount] - WholeStageCodegen (8) - Project [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,ss_quantity,sr_return_quantity,ss_ext_sales_price,sr_return_amt] - BroadcastHashJoin [ss_ticket_number,ss_item_sk,sr_ticket_number,sr_item_sk] - Project [ss_item_sk,ss_ticket_number,ss_quantity,ss_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id,d_year] - BroadcastHashJoin [ss_sold_date_sk,d_date_sk] - Project [ss_sold_date_sk,ss_item_sk,ss_ticket_number,ss_quantity,ss_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id] - BroadcastHashJoin [ss_item_sk,i_item_sk] - Filter [ss_item_sk,ss_sold_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.store_sales [ss_sold_date_sk,ss_item_sk,ss_ticket_number,ss_quantity,ss_ext_sales_price] - InputAdapter - ReusedExchange [i_item_sk,i_brand_id,i_class_id,i_category_id,i_manufact_id] #4 + WholeStageCodegen (4) + Project [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,cs_quantity,cr_return_quantity,cs_ext_sales_price,cr_return_amount] + BroadcastHashJoin [cs_order_number,cs_item_sk,cr_order_number,cr_item_sk] + Project [cs_item_sk,cs_order_number,cs_quantity,cs_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id,d_year] + BroadcastHashJoin [cs_sold_date_sk,d_date_sk] + Project [cs_sold_date_sk,cs_item_sk,cs_order_number,cs_quantity,cs_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id] + BroadcastHashJoin [cs_item_sk,i_item_sk] + Filter [cs_item_sk,cs_sold_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.catalog_sales [cs_sold_date_sk,cs_item_sk,cs_order_number,cs_quantity,cs_ext_sales_price] + InputAdapter + BroadcastExchange #3 + WholeStageCodegen (1) + Project [i_item_sk,i_brand_id,i_class_id,i_category_id,i_manufact_id] + Filter [i_category,i_item_sk,i_brand_id,i_class_id,i_category_id,i_manufact_id] + ColumnarToRow InputAdapter - ReusedExchange [d_date_sk,d_year] #5 - InputAdapter - BroadcastExchange #7 - WholeStageCodegen (7) - Filter [sr_ticket_number,sr_item_sk] - ColumnarToRow - InputAdapter - Scan parquet default.store_returns [sr_item_sk,sr_ticket_number,sr_return_quantity,sr_return_amt] - WholeStageCodegen (14) + Scan parquet default.item [i_item_sk,i_brand_id,i_class_id,i_category_id,i_category,i_manufact_id] + InputAdapter + BroadcastExchange #4 + WholeStageCodegen (2) + Filter [d_year,d_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.date_dim [d_date_sk,d_year] + InputAdapter + BroadcastExchange #5 + WholeStageCodegen (3) + Filter [cr_order_number,cr_item_sk] + ColumnarToRow + InputAdapter + Scan parquet default.catalog_returns [cr_item_sk,cr_order_number,cr_return_quantity,cr_return_amount] + WholeStageCodegen (8) + Project [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,ss_quantity,sr_return_quantity,ss_ext_sales_price,sr_return_amt] + BroadcastHashJoin [ss_ticket_number,ss_item_sk,sr_ticket_number,sr_item_sk] + Project [ss_item_sk,ss_ticket_number,ss_quantity,ss_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id,d_year] + BroadcastHashJoin [ss_sold_date_sk,d_date_sk] + Project [ss_sold_date_sk,ss_item_sk,ss_ticket_number,ss_quantity,ss_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id] + BroadcastHashJoin [ss_item_sk,i_item_sk] + Filter [ss_item_sk,ss_sold_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.store_sales [ss_sold_date_sk,ss_item_sk,ss_ticket_number,ss_quantity,ss_ext_sales_price] + InputAdapter + ReusedExchange [i_item_sk,i_brand_id,i_class_id,i_category_id,i_manufact_id] #3 + InputAdapter + ReusedExchange [d_date_sk,d_year] #4 + InputAdapter + BroadcastExchange #6 + WholeStageCodegen (7) + Filter [sr_ticket_number,sr_item_sk] + ColumnarToRow + InputAdapter + Scan parquet default.store_returns [sr_item_sk,sr_ticket_number,sr_return_quantity,sr_return_amt] + WholeStageCodegen (12) Project [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,ws_quantity,wr_return_quantity,ws_ext_sales_price,wr_return_amt] BroadcastHashJoin [ws_order_number,ws_item_sk,wr_order_number,wr_item_sk] Project [ws_item_sk,ws_order_number,ws_quantity,ws_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id,d_year] @@ -89,79 +81,71 @@ TakeOrderedAndProject [sales_cnt_diff,prev_year,year,i_brand_id,i_class_id,i_cat InputAdapter Scan parquet default.web_sales [ws_sold_date_sk,ws_item_sk,ws_order_number,ws_quantity,ws_ext_sales_price] InputAdapter - ReusedExchange [i_item_sk,i_brand_id,i_class_id,i_category_id,i_manufact_id] #4 + ReusedExchange [i_item_sk,i_brand_id,i_class_id,i_category_id,i_manufact_id] #3 InputAdapter - ReusedExchange [d_date_sk,d_year] #5 + ReusedExchange [d_date_sk,d_year] #4 InputAdapter - BroadcastExchange #8 - WholeStageCodegen (13) + BroadcastExchange #7 + WholeStageCodegen (11) Filter [wr_order_number,wr_item_sk] ColumnarToRow InputAdapter Scan parquet default.web_returns [wr_item_sk,wr_order_number,wr_return_quantity,wr_return_amt] InputAdapter - BroadcastExchange #9 - WholeStageCodegen (33) + BroadcastExchange #8 + WholeStageCodegen (29) HashAggregate [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,sum,sum] [sum(cast(sales_cnt as bigint)),sum(UnscaledValue(sales_amt)),sales_cnt,sales_amt,sum,sum] InputAdapter - Exchange [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id] #10 - WholeStageCodegen (32) + Exchange [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id] #9 + WholeStageCodegen (28) HashAggregate [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,sales_cnt,sales_amt] [sum,sum,sum,sum] HashAggregate [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,sales_cnt,sales_amt] InputAdapter - Exchange [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,sales_cnt,sales_amt] #11 - WholeStageCodegen (31) + Exchange [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,sales_cnt,sales_amt] #10 + WholeStageCodegen (27) HashAggregate [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,sales_cnt,sales_amt] InputAdapter Union + WholeStageCodegen (18) + Project [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,cs_quantity,cr_return_quantity,cs_ext_sales_price,cr_return_amount] + BroadcastHashJoin [cs_order_number,cs_item_sk,cr_order_number,cr_item_sk] + Project [cs_item_sk,cs_order_number,cs_quantity,cs_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id,d_year] + BroadcastHashJoin [cs_sold_date_sk,d_date_sk] + Project [cs_sold_date_sk,cs_item_sk,cs_order_number,cs_quantity,cs_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id] + BroadcastHashJoin [cs_item_sk,i_item_sk] + Filter [cs_item_sk,cs_sold_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.catalog_sales [cs_sold_date_sk,cs_item_sk,cs_order_number,cs_quantity,cs_ext_sales_price] + InputAdapter + ReusedExchange [i_item_sk,i_brand_id,i_class_id,i_category_id,i_manufact_id] #3 + InputAdapter + BroadcastExchange #11 + WholeStageCodegen (16) + Filter [d_year,d_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.date_dim [d_date_sk,d_year] + InputAdapter + ReusedExchange [cr_item_sk,cr_order_number,cr_return_quantity,cr_return_amount] #5 + WholeStageCodegen (22) + Project [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,ss_quantity,sr_return_quantity,ss_ext_sales_price,sr_return_amt] + BroadcastHashJoin [ss_ticket_number,ss_item_sk,sr_ticket_number,sr_item_sk] + Project [ss_item_sk,ss_ticket_number,ss_quantity,ss_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id,d_year] + BroadcastHashJoin [ss_sold_date_sk,d_date_sk] + Project [ss_sold_date_sk,ss_item_sk,ss_ticket_number,ss_quantity,ss_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id] + BroadcastHashJoin [ss_item_sk,i_item_sk] + Filter [ss_item_sk,ss_sold_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.store_sales [ss_sold_date_sk,ss_item_sk,ss_ticket_number,ss_quantity,ss_ext_sales_price] + InputAdapter + ReusedExchange [i_item_sk,i_brand_id,i_class_id,i_category_id,i_manufact_id] #3 + InputAdapter + ReusedExchange [d_date_sk,d_year] #11 + InputAdapter + ReusedExchange [sr_item_sk,sr_ticket_number,sr_return_quantity,sr_return_amt] #6 WholeStageCodegen (26) - HashAggregate [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,sales_cnt,sales_amt] - InputAdapter - Exchange [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,sales_cnt,sales_amt] #12 - WholeStageCodegen (25) - HashAggregate [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,sales_cnt,sales_amt] - InputAdapter - Union - WholeStageCodegen (20) - Project [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,cs_quantity,cr_return_quantity,cs_ext_sales_price,cr_return_amount] - BroadcastHashJoin [cs_order_number,cs_item_sk,cr_order_number,cr_item_sk] - Project [cs_item_sk,cs_order_number,cs_quantity,cs_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id,d_year] - BroadcastHashJoin [cs_sold_date_sk,d_date_sk] - Project [cs_sold_date_sk,cs_item_sk,cs_order_number,cs_quantity,cs_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id] - BroadcastHashJoin [cs_item_sk,i_item_sk] - Filter [cs_item_sk,cs_sold_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.catalog_sales [cs_sold_date_sk,cs_item_sk,cs_order_number,cs_quantity,cs_ext_sales_price] - InputAdapter - ReusedExchange [i_item_sk,i_brand_id,i_class_id,i_category_id,i_manufact_id] #4 - InputAdapter - BroadcastExchange #13 - WholeStageCodegen (18) - Filter [d_year,d_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.date_dim [d_date_sk,d_year] - InputAdapter - ReusedExchange [cr_item_sk,cr_order_number,cr_return_quantity,cr_return_amount] #6 - WholeStageCodegen (24) - Project [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,ss_quantity,sr_return_quantity,ss_ext_sales_price,sr_return_amt] - BroadcastHashJoin [ss_ticket_number,ss_item_sk,sr_ticket_number,sr_item_sk] - Project [ss_item_sk,ss_ticket_number,ss_quantity,ss_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id,d_year] - BroadcastHashJoin [ss_sold_date_sk,d_date_sk] - Project [ss_sold_date_sk,ss_item_sk,ss_ticket_number,ss_quantity,ss_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id] - BroadcastHashJoin [ss_item_sk,i_item_sk] - Filter [ss_item_sk,ss_sold_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.store_sales [ss_sold_date_sk,ss_item_sk,ss_ticket_number,ss_quantity,ss_ext_sales_price] - InputAdapter - ReusedExchange [i_item_sk,i_brand_id,i_class_id,i_category_id,i_manufact_id] #4 - InputAdapter - ReusedExchange [d_date_sk,d_year] #13 - InputAdapter - ReusedExchange [sr_item_sk,sr_ticket_number,sr_return_quantity,sr_return_amt] #7 - WholeStageCodegen (30) Project [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,ws_quantity,wr_return_quantity,ws_ext_sales_price,wr_return_amt] BroadcastHashJoin [ws_order_number,ws_item_sk,wr_order_number,wr_item_sk] Project [ws_item_sk,ws_order_number,ws_quantity,ws_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id,d_year] @@ -173,8 +157,8 @@ TakeOrderedAndProject [sales_cnt_diff,prev_year,year,i_brand_id,i_class_id,i_cat InputAdapter Scan parquet default.web_sales [ws_sold_date_sk,ws_item_sk,ws_order_number,ws_quantity,ws_ext_sales_price] InputAdapter - ReusedExchange [i_item_sk,i_brand_id,i_class_id,i_category_id,i_manufact_id] #4 + ReusedExchange [i_item_sk,i_brand_id,i_class_id,i_category_id,i_manufact_id] #3 InputAdapter - ReusedExchange [d_date_sk,d_year] #13 + ReusedExchange [d_date_sk,d_year] #11 InputAdapter - ReusedExchange [wr_item_sk,wr_order_number,wr_return_quantity,wr_return_amt] #8 + ReusedExchange [wr_item_sk,wr_order_number,wr_return_quantity,wr_return_amt] #7 diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q80.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q80.sf100/explain.txt index 057d786afbcdd..9ac081b356c94 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q80.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q80.sf100/explain.txt @@ -37,12 +37,12 @@ TakeOrderedAndProject (108) : : : +- * Project (23) : : : +- * Filter (22) : : : +- * ColumnarToRow (21) - : : : +- Scan parquet default.date_dim (20) + : : : +- Scan parquet default.promotion (20) : : +- BroadcastExchange (31) : : +- * Project (30) : : +- * Filter (29) : : +- * ColumnarToRow (28) - : : +- Scan parquet default.promotion (27) + : : +- Scan parquet default.date_dim (27) : +- BroadcastExchange (37) : +- * Filter (36) : +- * ColumnarToRow (35) @@ -193,67 +193,67 @@ Join condition: None Output [7]: [ss_sold_date_sk#1, ss_store_sk#3, ss_promo_sk#4, ss_ext_sales_price#6, ss_net_profit#7, sr_return_amt#11, sr_net_loss#12] Input [9]: [ss_sold_date_sk#1, ss_item_sk#2, ss_store_sk#3, ss_promo_sk#4, ss_ext_sales_price#6, ss_net_profit#7, sr_return_amt#11, sr_net_loss#12, i_item_sk#14] -(20) Scan parquet default.date_dim -Output [2]: [d_date_sk#17, d_date#18] +(20) Scan parquet default.promotion +Output [2]: [p_promo_sk#17, p_channel_tv#18] Batched: true -Location [not included in comparison]/{warehouse_dir}/date_dim] -PushedFilters: [IsNotNull(d_date), GreaterThanOrEqual(d_date,2000-08-23), LessThanOrEqual(d_date,2000-09-22), IsNotNull(d_date_sk)] -ReadSchema: struct +Location [not included in comparison]/{warehouse_dir}/promotion] +PushedFilters: [IsNotNull(p_channel_tv), EqualTo(p_channel_tv,N), IsNotNull(p_promo_sk)] +ReadSchema: struct (21) ColumnarToRow [codegen id : 6] -Input [2]: [d_date_sk#17, d_date#18] +Input [2]: [p_promo_sk#17, p_channel_tv#18] (22) Filter [codegen id : 6] -Input [2]: [d_date_sk#17, d_date#18] -Condition : (((isnotnull(d_date#18) AND (d_date#18 >= 11192)) AND (d_date#18 <= 11222)) AND isnotnull(d_date_sk#17)) +Input [2]: [p_promo_sk#17, p_channel_tv#18] +Condition : ((isnotnull(p_channel_tv#18) AND (p_channel_tv#18 = N)) AND isnotnull(p_promo_sk#17)) (23) Project [codegen id : 6] -Output [1]: [d_date_sk#17] -Input [2]: [d_date_sk#17, d_date#18] +Output [1]: [p_promo_sk#17] +Input [2]: [p_promo_sk#17, p_channel_tv#18] (24) BroadcastExchange -Input [1]: [d_date_sk#17] +Input [1]: [p_promo_sk#17] Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#19] (25) BroadcastHashJoin [codegen id : 9] -Left keys [1]: [ss_sold_date_sk#1] -Right keys [1]: [d_date_sk#17] +Left keys [1]: [ss_promo_sk#4] +Right keys [1]: [p_promo_sk#17] Join condition: None (26) Project [codegen id : 9] -Output [6]: [ss_store_sk#3, ss_promo_sk#4, ss_ext_sales_price#6, ss_net_profit#7, sr_return_amt#11, sr_net_loss#12] -Input [8]: [ss_sold_date_sk#1, ss_store_sk#3, ss_promo_sk#4, ss_ext_sales_price#6, ss_net_profit#7, sr_return_amt#11, sr_net_loss#12, d_date_sk#17] +Output [6]: [ss_sold_date_sk#1, ss_store_sk#3, ss_ext_sales_price#6, ss_net_profit#7, sr_return_amt#11, sr_net_loss#12] +Input [8]: [ss_sold_date_sk#1, ss_store_sk#3, ss_promo_sk#4, ss_ext_sales_price#6, ss_net_profit#7, sr_return_amt#11, sr_net_loss#12, p_promo_sk#17] -(27) Scan parquet default.promotion -Output [2]: [p_promo_sk#20, p_channel_tv#21] +(27) Scan parquet default.date_dim +Output [2]: [d_date_sk#20, d_date#21] Batched: true -Location [not included in comparison]/{warehouse_dir}/promotion] -PushedFilters: [IsNotNull(p_channel_tv), EqualTo(p_channel_tv,N), IsNotNull(p_promo_sk)] -ReadSchema: struct +Location [not included in comparison]/{warehouse_dir}/date_dim] +PushedFilters: [IsNotNull(d_date), GreaterThanOrEqual(d_date,2000-08-23), LessThanOrEqual(d_date,2000-09-22), IsNotNull(d_date_sk)] +ReadSchema: struct (28) ColumnarToRow [codegen id : 7] -Input [2]: [p_promo_sk#20, p_channel_tv#21] +Input [2]: [d_date_sk#20, d_date#21] (29) Filter [codegen id : 7] -Input [2]: [p_promo_sk#20, p_channel_tv#21] -Condition : ((isnotnull(p_channel_tv#21) AND (p_channel_tv#21 = N)) AND isnotnull(p_promo_sk#20)) +Input [2]: [d_date_sk#20, d_date#21] +Condition : (((isnotnull(d_date#21) AND (d_date#21 >= 11192)) AND (d_date#21 <= 11222)) AND isnotnull(d_date_sk#20)) (30) Project [codegen id : 7] -Output [1]: [p_promo_sk#20] -Input [2]: [p_promo_sk#20, p_channel_tv#21] +Output [1]: [d_date_sk#20] +Input [2]: [d_date_sk#20, d_date#21] (31) BroadcastExchange -Input [1]: [p_promo_sk#20] +Input [1]: [d_date_sk#20] Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#22] (32) BroadcastHashJoin [codegen id : 9] -Left keys [1]: [ss_promo_sk#4] -Right keys [1]: [p_promo_sk#20] +Left keys [1]: [ss_sold_date_sk#1] +Right keys [1]: [d_date_sk#20] Join condition: None (33) Project [codegen id : 9] Output [5]: [ss_store_sk#3, ss_ext_sales_price#6, ss_net_profit#7, sr_return_amt#11, sr_net_loss#12] -Input [7]: [ss_store_sk#3, ss_promo_sk#4, ss_ext_sales_price#6, ss_net_profit#7, sr_return_amt#11, sr_net_loss#12, p_promo_sk#20] +Input [7]: [ss_sold_date_sk#1, ss_store_sk#3, ss_ext_sales_price#6, ss_net_profit#7, sr_return_amt#11, sr_net_loss#12, d_date_sk#20] (34) Scan parquet default.store Output [2]: [s_store_sk#23, s_store_id#24] @@ -366,28 +366,28 @@ Output [7]: [cs_sold_date_sk#45, cs_catalog_page_sk#46, cs_promo_sk#48, cs_ext_s Input [9]: [cs_sold_date_sk#45, cs_catalog_page_sk#46, cs_item_sk#47, cs_promo_sk#48, cs_ext_sales_price#50, cs_net_profit#51, cr_return_amount#55, cr_net_loss#56, i_item_sk#14] (58) ReusedExchange [Reuses operator id: 24] -Output [1]: [d_date_sk#17] +Output [1]: [p_promo_sk#17] (59) BroadcastHashJoin [codegen id : 19] -Left keys [1]: [cs_sold_date_sk#45] -Right keys [1]: [d_date_sk#17] +Left keys [1]: [cs_promo_sk#48] +Right keys [1]: [p_promo_sk#17] Join condition: None (60) Project [codegen id : 19] -Output [6]: [cs_catalog_page_sk#46, cs_promo_sk#48, cs_ext_sales_price#50, cs_net_profit#51, cr_return_amount#55, cr_net_loss#56] -Input [8]: [cs_sold_date_sk#45, cs_catalog_page_sk#46, cs_promo_sk#48, cs_ext_sales_price#50, cs_net_profit#51, cr_return_amount#55, cr_net_loss#56, d_date_sk#17] +Output [6]: [cs_sold_date_sk#45, cs_catalog_page_sk#46, cs_ext_sales_price#50, cs_net_profit#51, cr_return_amount#55, cr_net_loss#56] +Input [8]: [cs_sold_date_sk#45, cs_catalog_page_sk#46, cs_promo_sk#48, cs_ext_sales_price#50, cs_net_profit#51, cr_return_amount#55, cr_net_loss#56, p_promo_sk#17] (61) ReusedExchange [Reuses operator id: 31] -Output [1]: [p_promo_sk#20] +Output [1]: [d_date_sk#20] (62) BroadcastHashJoin [codegen id : 19] -Left keys [1]: [cs_promo_sk#48] -Right keys [1]: [p_promo_sk#20] +Left keys [1]: [cs_sold_date_sk#45] +Right keys [1]: [d_date_sk#20] Join condition: None (63) Project [codegen id : 19] Output [5]: [cs_catalog_page_sk#46, cs_ext_sales_price#50, cs_net_profit#51, cr_return_amount#55, cr_net_loss#56] -Input [7]: [cs_catalog_page_sk#46, cs_promo_sk#48, cs_ext_sales_price#50, cs_net_profit#51, cr_return_amount#55, cr_net_loss#56, p_promo_sk#20] +Input [7]: [cs_sold_date_sk#45, cs_catalog_page_sk#46, cs_ext_sales_price#50, cs_net_profit#51, cr_return_amount#55, cr_net_loss#56, d_date_sk#20] (64) Scan parquet default.catalog_page Output [2]: [cp_catalog_page_sk#58, cp_catalog_page_id#59] @@ -500,28 +500,28 @@ Output [7]: [ws_sold_date_sk#80, ws_web_site_sk#82, ws_promo_sk#83, ws_ext_sales Input [9]: [ws_sold_date_sk#80, ws_item_sk#81, ws_web_site_sk#82, ws_promo_sk#83, ws_ext_sales_price#85, ws_net_profit#86, wr_return_amt#90, wr_net_loss#91, i_item_sk#14] (88) ReusedExchange [Reuses operator id: 24] -Output [1]: [d_date_sk#17] +Output [1]: [p_promo_sk#17] (89) BroadcastHashJoin [codegen id : 29] -Left keys [1]: [ws_sold_date_sk#80] -Right keys [1]: [d_date_sk#17] +Left keys [1]: [ws_promo_sk#83] +Right keys [1]: [p_promo_sk#17] Join condition: None (90) Project [codegen id : 29] -Output [6]: [ws_web_site_sk#82, ws_promo_sk#83, ws_ext_sales_price#85, ws_net_profit#86, wr_return_amt#90, wr_net_loss#91] -Input [8]: [ws_sold_date_sk#80, ws_web_site_sk#82, ws_promo_sk#83, ws_ext_sales_price#85, ws_net_profit#86, wr_return_amt#90, wr_net_loss#91, d_date_sk#17] +Output [6]: [ws_sold_date_sk#80, ws_web_site_sk#82, ws_ext_sales_price#85, ws_net_profit#86, wr_return_amt#90, wr_net_loss#91] +Input [8]: [ws_sold_date_sk#80, ws_web_site_sk#82, ws_promo_sk#83, ws_ext_sales_price#85, ws_net_profit#86, wr_return_amt#90, wr_net_loss#91, p_promo_sk#17] (91) ReusedExchange [Reuses operator id: 31] -Output [1]: [p_promo_sk#20] +Output [1]: [d_date_sk#20] (92) BroadcastHashJoin [codegen id : 29] -Left keys [1]: [ws_promo_sk#83] -Right keys [1]: [p_promo_sk#20] +Left keys [1]: [ws_sold_date_sk#80] +Right keys [1]: [d_date_sk#20] Join condition: None (93) Project [codegen id : 29] Output [5]: [ws_web_site_sk#82, ws_ext_sales_price#85, ws_net_profit#86, wr_return_amt#90, wr_net_loss#91] -Input [7]: [ws_web_site_sk#82, ws_promo_sk#83, ws_ext_sales_price#85, ws_net_profit#86, wr_return_amt#90, wr_net_loss#91, p_promo_sk#20] +Input [7]: [ws_sold_date_sk#80, ws_web_site_sk#82, ws_ext_sales_price#85, ws_net_profit#86, wr_return_amt#90, wr_net_loss#91, d_date_sk#20] (94) Scan parquet default.web_site Output [2]: [web_site_sk#93, web_site_id#94] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q80.sf100/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q80.sf100/simplified.txt index 7b73e4307dcf0..ec00b49e71989 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q80.sf100/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q80.sf100/simplified.txt @@ -17,9 +17,9 @@ TakeOrderedAndProject [channel,id,sales,returns,profit] Project [ss_ext_sales_price,ss_net_profit,sr_return_amt,sr_net_loss,s_store_id] BroadcastHashJoin [ss_store_sk,s_store_sk] Project [ss_store_sk,ss_ext_sales_price,ss_net_profit,sr_return_amt,sr_net_loss] - BroadcastHashJoin [ss_promo_sk,p_promo_sk] - Project [ss_store_sk,ss_promo_sk,ss_ext_sales_price,ss_net_profit,sr_return_amt,sr_net_loss] - BroadcastHashJoin [ss_sold_date_sk,d_date_sk] + BroadcastHashJoin [ss_sold_date_sk,d_date_sk] + Project [ss_sold_date_sk,ss_store_sk,ss_ext_sales_price,ss_net_profit,sr_return_amt,sr_net_loss] + BroadcastHashJoin [ss_promo_sk,p_promo_sk] Project [ss_sold_date_sk,ss_store_sk,ss_promo_sk,ss_ext_sales_price,ss_net_profit,sr_return_amt,sr_net_loss] BroadcastHashJoin [ss_item_sk,i_item_sk] Project [ss_sold_date_sk,ss_item_sk,ss_store_sk,ss_promo_sk,ss_ext_sales_price,ss_net_profit,sr_return_amt,sr_net_loss] @@ -54,19 +54,19 @@ TakeOrderedAndProject [channel,id,sales,returns,profit] InputAdapter BroadcastExchange #6 WholeStageCodegen (6) - Project [d_date_sk] - Filter [d_date,d_date_sk] + Project [p_promo_sk] + Filter [p_channel_tv,p_promo_sk] ColumnarToRow InputAdapter - Scan parquet default.date_dim [d_date_sk,d_date] + Scan parquet default.promotion [p_promo_sk,p_channel_tv] InputAdapter BroadcastExchange #7 WholeStageCodegen (7) - Project [p_promo_sk] - Filter [p_channel_tv,p_promo_sk] + Project [d_date_sk] + Filter [d_date,d_date_sk] ColumnarToRow InputAdapter - Scan parquet default.promotion [p_promo_sk,p_channel_tv] + Scan parquet default.date_dim [d_date_sk,d_date] InputAdapter BroadcastExchange #8 WholeStageCodegen (8) @@ -83,9 +83,9 @@ TakeOrderedAndProject [channel,id,sales,returns,profit] Project [cs_ext_sales_price,cs_net_profit,cr_return_amount,cr_net_loss,cp_catalog_page_id] BroadcastHashJoin [cs_catalog_page_sk,cp_catalog_page_sk] Project [cs_catalog_page_sk,cs_ext_sales_price,cs_net_profit,cr_return_amount,cr_net_loss] - BroadcastHashJoin [cs_promo_sk,p_promo_sk] - Project [cs_catalog_page_sk,cs_promo_sk,cs_ext_sales_price,cs_net_profit,cr_return_amount,cr_net_loss] - BroadcastHashJoin [cs_sold_date_sk,d_date_sk] + BroadcastHashJoin [cs_sold_date_sk,d_date_sk] + Project [cs_sold_date_sk,cs_catalog_page_sk,cs_ext_sales_price,cs_net_profit,cr_return_amount,cr_net_loss] + BroadcastHashJoin [cs_promo_sk,p_promo_sk] Project [cs_sold_date_sk,cs_catalog_page_sk,cs_promo_sk,cs_ext_sales_price,cs_net_profit,cr_return_amount,cr_net_loss] BroadcastHashJoin [cs_item_sk,i_item_sk] Project [cs_sold_date_sk,cs_catalog_page_sk,cs_item_sk,cs_promo_sk,cs_ext_sales_price,cs_net_profit,cr_return_amount,cr_net_loss] @@ -112,9 +112,9 @@ TakeOrderedAndProject [channel,id,sales,returns,profit] InputAdapter ReusedExchange [i_item_sk] #5 InputAdapter - ReusedExchange [d_date_sk] #6 + ReusedExchange [p_promo_sk] #6 InputAdapter - ReusedExchange [p_promo_sk] #7 + ReusedExchange [d_date_sk] #7 InputAdapter BroadcastExchange #12 WholeStageCodegen (18) @@ -131,9 +131,9 @@ TakeOrderedAndProject [channel,id,sales,returns,profit] Project [ws_ext_sales_price,ws_net_profit,wr_return_amt,wr_net_loss,web_site_id] BroadcastHashJoin [ws_web_site_sk,web_site_sk] Project [ws_web_site_sk,ws_ext_sales_price,ws_net_profit,wr_return_amt,wr_net_loss] - BroadcastHashJoin [ws_promo_sk,p_promo_sk] - Project [ws_web_site_sk,ws_promo_sk,ws_ext_sales_price,ws_net_profit,wr_return_amt,wr_net_loss] - BroadcastHashJoin [ws_sold_date_sk,d_date_sk] + BroadcastHashJoin [ws_sold_date_sk,d_date_sk] + Project [ws_sold_date_sk,ws_web_site_sk,ws_ext_sales_price,ws_net_profit,wr_return_amt,wr_net_loss] + BroadcastHashJoin [ws_promo_sk,p_promo_sk] Project [ws_sold_date_sk,ws_web_site_sk,ws_promo_sk,ws_ext_sales_price,ws_net_profit,wr_return_amt,wr_net_loss] BroadcastHashJoin [ws_item_sk,i_item_sk] Project [ws_sold_date_sk,ws_item_sk,ws_web_site_sk,ws_promo_sk,ws_ext_sales_price,ws_net_profit,wr_return_amt,wr_net_loss] @@ -160,9 +160,9 @@ TakeOrderedAndProject [channel,id,sales,returns,profit] InputAdapter ReusedExchange [i_item_sk] #5 InputAdapter - ReusedExchange [d_date_sk] #6 + ReusedExchange [p_promo_sk] #6 InputAdapter - ReusedExchange [p_promo_sk] #7 + ReusedExchange [d_date_sk] #7 InputAdapter BroadcastExchange #16 WholeStageCodegen (28) diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q81.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q81.sf100/explain.txt index 6e757528a3e68..6813696266ac5 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q81.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q81.sf100/explain.txt @@ -1,343 +1,343 @@ == Physical Plan == TakeOrderedAndProject (61) +- * Project (60) - +- * BroadcastHashJoin Inner BuildRight (59) - :- * Project (38) - : +- * SortMergeJoin Inner (37) - : :- * Sort (11) - : : +- Exchange (10) - : : +- * Project (9) - : : +- * BroadcastHashJoin Inner BuildRight (8) - : : :- * Filter (3) - : : : +- * ColumnarToRow (2) - : : : +- Scan parquet default.customer (1) - : : +- BroadcastExchange (7) - : : +- * Filter (6) - : : +- * ColumnarToRow (5) - : : +- Scan parquet default.customer_address (4) - : +- * Sort (36) - : +- Exchange (35) - : +- * Filter (34) - : +- * HashAggregate (33) - : +- Exchange (32) - : +- * HashAggregate (31) - : +- * Project (30) - : +- * SortMergeJoin Inner (29) - : :- * Sort (23) - : : +- Exchange (22) - : : +- * Project (21) - : : +- * BroadcastHashJoin Inner BuildRight (20) - : : :- * Filter (14) - : : : +- * ColumnarToRow (13) - : : : +- Scan parquet default.catalog_returns (12) - : : +- BroadcastExchange (19) - : : +- * Project (18) - : : +- * Filter (17) - : : +- * ColumnarToRow (16) - : : +- Scan parquet default.date_dim (15) - : +- * Sort (28) - : +- Exchange (27) - : +- * Filter (26) - : +- * ColumnarToRow (25) - : +- Scan parquet default.customer_address (24) - +- BroadcastExchange (58) - +- * Filter (57) - +- * HashAggregate (56) - +- Exchange (55) - +- * HashAggregate (54) - +- * HashAggregate (53) - +- Exchange (52) - +- * HashAggregate (51) - +- * Project (50) - +- * SortMergeJoin Inner (49) - :- * Sort (46) - : +- Exchange (45) - : +- * Project (44) - : +- * BroadcastHashJoin Inner BuildRight (43) - : :- * Filter (41) - : : +- * ColumnarToRow (40) - : : +- Scan parquet default.catalog_returns (39) - : +- ReusedExchange (42) - +- * Sort (48) - +- ReusedExchange (47) - - -(1) Scan parquet default.customer -Output [6]: [c_customer_sk#1, c_customer_id#2, c_current_addr_sk#3, c_salutation#4, c_first_name#5, c_last_name#6] + +- * SortMergeJoin Inner (59) + :- * Sort (47) + : +- Exchange (46) + : +- * Project (45) + : +- * BroadcastHashJoin Inner BuildRight (44) + : :- * Filter (23) + : : +- * HashAggregate (22) + : : +- Exchange (21) + : : +- * HashAggregate (20) + : : +- * Project (19) + : : +- * SortMergeJoin Inner (18) + : : :- * Sort (12) + : : : +- Exchange (11) + : : : +- * Project (10) + : : : +- * BroadcastHashJoin Inner BuildRight (9) + : : : :- * Filter (3) + : : : : +- * ColumnarToRow (2) + : : : : +- Scan parquet default.catalog_returns (1) + : : : +- BroadcastExchange (8) + : : : +- * Project (7) + : : : +- * Filter (6) + : : : +- * ColumnarToRow (5) + : : : +- Scan parquet default.date_dim (4) + : : +- * Sort (17) + : : +- Exchange (16) + : : +- * Filter (15) + : : +- * ColumnarToRow (14) + : : +- Scan parquet default.customer_address (13) + : +- BroadcastExchange (43) + : +- * Filter (42) + : +- * HashAggregate (41) + : +- Exchange (40) + : +- * HashAggregate (39) + : +- * HashAggregate (38) + : +- Exchange (37) + : +- * HashAggregate (36) + : +- * Project (35) + : +- * SortMergeJoin Inner (34) + : :- * Sort (31) + : : +- Exchange (30) + : : +- * Project (29) + : : +- * BroadcastHashJoin Inner BuildRight (28) + : : :- * Filter (26) + : : : +- * ColumnarToRow (25) + : : : +- Scan parquet default.catalog_returns (24) + : : +- ReusedExchange (27) + : +- * Sort (33) + : +- ReusedExchange (32) + +- * Sort (58) + +- Exchange (57) + +- * Project (56) + +- * BroadcastHashJoin Inner BuildRight (55) + :- * Filter (50) + : +- * ColumnarToRow (49) + : +- Scan parquet default.customer (48) + +- BroadcastExchange (54) + +- * Filter (53) + +- * ColumnarToRow (52) + +- Scan parquet default.customer_address (51) + + +(1) Scan parquet default.catalog_returns +Output [4]: [cr_returned_date_sk#1, cr_returning_customer_sk#2, cr_returning_addr_sk#3, cr_return_amt_inc_tax#4] Batched: true -Location [not included in comparison]/{warehouse_dir}/customer] -PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_current_addr_sk)] -ReadSchema: struct +Location [not included in comparison]/{warehouse_dir}/catalog_returns] +PushedFilters: [IsNotNull(cr_returned_date_sk), IsNotNull(cr_returning_addr_sk), IsNotNull(cr_returning_customer_sk)] +ReadSchema: struct (2) ColumnarToRow [codegen id : 2] -Input [6]: [c_customer_sk#1, c_customer_id#2, c_current_addr_sk#3, c_salutation#4, c_first_name#5, c_last_name#6] +Input [4]: [cr_returned_date_sk#1, cr_returning_customer_sk#2, cr_returning_addr_sk#3, cr_return_amt_inc_tax#4] (3) Filter [codegen id : 2] -Input [6]: [c_customer_sk#1, c_customer_id#2, c_current_addr_sk#3, c_salutation#4, c_first_name#5, c_last_name#6] -Condition : (isnotnull(c_customer_sk#1) AND isnotnull(c_current_addr_sk#3)) +Input [4]: [cr_returned_date_sk#1, cr_returning_customer_sk#2, cr_returning_addr_sk#3, cr_return_amt_inc_tax#4] +Condition : ((isnotnull(cr_returned_date_sk#1) AND isnotnull(cr_returning_addr_sk#3)) AND isnotnull(cr_returning_customer_sk#2)) -(4) Scan parquet default.customer_address -Output [12]: [ca_address_sk#7, ca_street_number#8, ca_street_name#9, ca_street_type#10, ca_suite_number#11, ca_city#12, ca_county#13, ca_state#14, ca_zip#15, ca_country#16, ca_gmt_offset#17, ca_location_type#18] +(4) Scan parquet default.date_dim +Output [2]: [d_date_sk#5, d_year#6] Batched: true -Location [not included in comparison]/{warehouse_dir}/customer_address] -PushedFilters: [IsNotNull(ca_state), EqualTo(ca_state,GA), IsNotNull(ca_address_sk)] -ReadSchema: struct +Location [not included in comparison]/{warehouse_dir}/date_dim] +PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2000), IsNotNull(d_date_sk)] +ReadSchema: struct (5) ColumnarToRow [codegen id : 1] -Input [12]: [ca_address_sk#7, ca_street_number#8, ca_street_name#9, ca_street_type#10, ca_suite_number#11, ca_city#12, ca_county#13, ca_state#14, ca_zip#15, ca_country#16, ca_gmt_offset#17, ca_location_type#18] +Input [2]: [d_date_sk#5, d_year#6] (6) Filter [codegen id : 1] -Input [12]: [ca_address_sk#7, ca_street_number#8, ca_street_name#9, ca_street_type#10, ca_suite_number#11, ca_city#12, ca_county#13, ca_state#14, ca_zip#15, ca_country#16, ca_gmt_offset#17, ca_location_type#18] -Condition : ((isnotnull(ca_state#14) AND (ca_state#14 = GA)) AND isnotnull(ca_address_sk#7)) +Input [2]: [d_date_sk#5, d_year#6] +Condition : ((isnotnull(d_year#6) AND (d_year#6 = 2000)) AND isnotnull(d_date_sk#5)) + +(7) Project [codegen id : 1] +Output [1]: [d_date_sk#5] +Input [2]: [d_date_sk#5, d_year#6] -(7) BroadcastExchange -Input [12]: [ca_address_sk#7, ca_street_number#8, ca_street_name#9, ca_street_type#10, ca_suite_number#11, ca_city#12, ca_county#13, ca_state#14, ca_zip#15, ca_country#16, ca_gmt_offset#17, ca_location_type#18] -Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, false] as bigint)),false), [id=#19] +(8) BroadcastExchange +Input [1]: [d_date_sk#5] +Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#7] -(8) BroadcastHashJoin [codegen id : 2] -Left keys [1]: [c_current_addr_sk#3] -Right keys [1]: [ca_address_sk#7] +(9) BroadcastHashJoin [codegen id : 2] +Left keys [1]: [cr_returned_date_sk#1] +Right keys [1]: [d_date_sk#5] Join condition: None -(9) Project [codegen id : 2] -Output [16]: [c_customer_sk#1, c_customer_id#2, c_salutation#4, c_first_name#5, c_last_name#6, ca_street_number#8, ca_street_name#9, ca_street_type#10, ca_suite_number#11, ca_city#12, ca_county#13, ca_state#14, ca_zip#15, ca_country#16, ca_gmt_offset#17, ca_location_type#18] -Input [18]: [c_customer_sk#1, c_customer_id#2, c_current_addr_sk#3, c_salutation#4, c_first_name#5, c_last_name#6, ca_address_sk#7, ca_street_number#8, ca_street_name#9, ca_street_type#10, ca_suite_number#11, ca_city#12, ca_county#13, ca_state#14, ca_zip#15, ca_country#16, ca_gmt_offset#17, ca_location_type#18] +(10) Project [codegen id : 2] +Output [3]: [cr_returning_customer_sk#2, cr_returning_addr_sk#3, cr_return_amt_inc_tax#4] +Input [5]: [cr_returned_date_sk#1, cr_returning_customer_sk#2, cr_returning_addr_sk#3, cr_return_amt_inc_tax#4, d_date_sk#5] -(10) Exchange -Input [16]: [c_customer_sk#1, c_customer_id#2, c_salutation#4, c_first_name#5, c_last_name#6, ca_street_number#8, ca_street_name#9, ca_street_type#10, ca_suite_number#11, ca_city#12, ca_county#13, ca_state#14, ca_zip#15, ca_country#16, ca_gmt_offset#17, ca_location_type#18] -Arguments: hashpartitioning(c_customer_sk#1, 5), true, [id=#20] +(11) Exchange +Input [3]: [cr_returning_customer_sk#2, cr_returning_addr_sk#3, cr_return_amt_inc_tax#4] +Arguments: hashpartitioning(cr_returning_addr_sk#3, 5), ENSURE_REQUIREMENTS, [id=#8] -(11) Sort [codegen id : 3] -Input [16]: [c_customer_sk#1, c_customer_id#2, c_salutation#4, c_first_name#5, c_last_name#6, ca_street_number#8, ca_street_name#9, ca_street_type#10, ca_suite_number#11, ca_city#12, ca_county#13, ca_state#14, ca_zip#15, ca_country#16, ca_gmt_offset#17, ca_location_type#18] -Arguments: [c_customer_sk#1 ASC NULLS FIRST], false, 0 +(12) Sort [codegen id : 3] +Input [3]: [cr_returning_customer_sk#2, cr_returning_addr_sk#3, cr_return_amt_inc_tax#4] +Arguments: [cr_returning_addr_sk#3 ASC NULLS FIRST], false, 0 -(12) Scan parquet default.catalog_returns -Output [4]: [cr_returned_date_sk#21, cr_returning_customer_sk#22, cr_returning_addr_sk#23, cr_return_amt_inc_tax#24] +(13) Scan parquet default.customer_address +Output [2]: [ca_address_sk#9, ca_state#10] Batched: true -Location [not included in comparison]/{warehouse_dir}/catalog_returns] -PushedFilters: [IsNotNull(cr_returned_date_sk), IsNotNull(cr_returning_addr_sk), IsNotNull(cr_returning_customer_sk)] -ReadSchema: struct - -(13) ColumnarToRow [codegen id : 5] -Input [4]: [cr_returned_date_sk#21, cr_returning_customer_sk#22, cr_returning_addr_sk#23, cr_return_amt_inc_tax#24] +Location [not included in comparison]/{warehouse_dir}/customer_address] +PushedFilters: [IsNotNull(ca_address_sk), IsNotNull(ca_state)] +ReadSchema: struct -(14) Filter [codegen id : 5] -Input [4]: [cr_returned_date_sk#21, cr_returning_customer_sk#22, cr_returning_addr_sk#23, cr_return_amt_inc_tax#24] -Condition : ((isnotnull(cr_returned_date_sk#21) AND isnotnull(cr_returning_addr_sk#23)) AND isnotnull(cr_returning_customer_sk#22)) +(14) ColumnarToRow [codegen id : 4] +Input [2]: [ca_address_sk#9, ca_state#10] -(15) Scan parquet default.date_dim -Output [2]: [d_date_sk#25, d_year#26] -Batched: true -Location [not included in comparison]/{warehouse_dir}/date_dim] -PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2000), IsNotNull(d_date_sk)] -ReadSchema: struct +(15) Filter [codegen id : 4] +Input [2]: [ca_address_sk#9, ca_state#10] +Condition : (isnotnull(ca_address_sk#9) AND isnotnull(ca_state#10)) -(16) ColumnarToRow [codegen id : 4] -Input [2]: [d_date_sk#25, d_year#26] +(16) Exchange +Input [2]: [ca_address_sk#9, ca_state#10] +Arguments: hashpartitioning(ca_address_sk#9, 5), ENSURE_REQUIREMENTS, [id=#11] -(17) Filter [codegen id : 4] -Input [2]: [d_date_sk#25, d_year#26] -Condition : ((isnotnull(d_year#26) AND (d_year#26 = 2000)) AND isnotnull(d_date_sk#25)) +(17) Sort [codegen id : 5] +Input [2]: [ca_address_sk#9, ca_state#10] +Arguments: [ca_address_sk#9 ASC NULLS FIRST], false, 0 -(18) Project [codegen id : 4] -Output [1]: [d_date_sk#25] -Input [2]: [d_date_sk#25, d_year#26] +(18) SortMergeJoin [codegen id : 6] +Left keys [1]: [cr_returning_addr_sk#3] +Right keys [1]: [ca_address_sk#9] +Join condition: None -(19) BroadcastExchange -Input [1]: [d_date_sk#25] -Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#27] +(19) Project [codegen id : 6] +Output [3]: [cr_returning_customer_sk#2, cr_return_amt_inc_tax#4, ca_state#10] +Input [5]: [cr_returning_customer_sk#2, cr_returning_addr_sk#3, cr_return_amt_inc_tax#4, ca_address_sk#9, ca_state#10] + +(20) HashAggregate [codegen id : 6] +Input [3]: [cr_returning_customer_sk#2, cr_return_amt_inc_tax#4, ca_state#10] +Keys [2]: [cr_returning_customer_sk#2, ca_state#10] +Functions [1]: [partial_sum(UnscaledValue(cr_return_amt_inc_tax#4))] +Aggregate Attributes [1]: [sum#12] +Results [3]: [cr_returning_customer_sk#2, ca_state#10, sum#13] + +(21) Exchange +Input [3]: [cr_returning_customer_sk#2, ca_state#10, sum#13] +Arguments: hashpartitioning(cr_returning_customer_sk#2, ca_state#10, 5), ENSURE_REQUIREMENTS, [id=#14] + +(22) HashAggregate [codegen id : 15] +Input [3]: [cr_returning_customer_sk#2, ca_state#10, sum#13] +Keys [2]: [cr_returning_customer_sk#2, ca_state#10] +Functions [1]: [sum(UnscaledValue(cr_return_amt_inc_tax#4))] +Aggregate Attributes [1]: [sum(UnscaledValue(cr_return_amt_inc_tax#4))#15] +Results [3]: [cr_returning_customer_sk#2 AS ctr_customer_sk#16, ca_state#10 AS ctr_state#17, MakeDecimal(sum(UnscaledValue(cr_return_amt_inc_tax#4))#15,17,2) AS ctr_total_return#18] + +(23) Filter [codegen id : 15] +Input [3]: [ctr_customer_sk#16, ctr_state#17, ctr_total_return#18] +Condition : isnotnull(ctr_total_return#18) + +(24) Scan parquet default.catalog_returns +Output [4]: [cr_returned_date_sk#1, cr_returning_customer_sk#2, cr_returning_addr_sk#3, cr_return_amt_inc_tax#4] +Batched: true +Location [not included in comparison]/{warehouse_dir}/catalog_returns] +PushedFilters: [IsNotNull(cr_returned_date_sk), IsNotNull(cr_returning_addr_sk)] +ReadSchema: struct -(20) BroadcastHashJoin [codegen id : 5] -Left keys [1]: [cr_returned_date_sk#21] -Right keys [1]: [d_date_sk#25] -Join condition: None +(25) ColumnarToRow [codegen id : 8] +Input [4]: [cr_returned_date_sk#1, cr_returning_customer_sk#2, cr_returning_addr_sk#3, cr_return_amt_inc_tax#4] -(21) Project [codegen id : 5] -Output [3]: [cr_returning_customer_sk#22, cr_returning_addr_sk#23, cr_return_amt_inc_tax#24] -Input [5]: [cr_returned_date_sk#21, cr_returning_customer_sk#22, cr_returning_addr_sk#23, cr_return_amt_inc_tax#24, d_date_sk#25] +(26) Filter [codegen id : 8] +Input [4]: [cr_returned_date_sk#1, cr_returning_customer_sk#2, cr_returning_addr_sk#3, cr_return_amt_inc_tax#4] +Condition : (isnotnull(cr_returned_date_sk#1) AND isnotnull(cr_returning_addr_sk#3)) -(22) Exchange -Input [3]: [cr_returning_customer_sk#22, cr_returning_addr_sk#23, cr_return_amt_inc_tax#24] -Arguments: hashpartitioning(cr_returning_addr_sk#23, 5), true, [id=#28] +(27) ReusedExchange [Reuses operator id: 8] +Output [1]: [d_date_sk#5] -(23) Sort [codegen id : 6] -Input [3]: [cr_returning_customer_sk#22, cr_returning_addr_sk#23, cr_return_amt_inc_tax#24] -Arguments: [cr_returning_addr_sk#23 ASC NULLS FIRST], false, 0 +(28) BroadcastHashJoin [codegen id : 8] +Left keys [1]: [cr_returned_date_sk#1] +Right keys [1]: [d_date_sk#5] +Join condition: None -(24) Scan parquet default.customer_address -Output [2]: [ca_address_sk#7, ca_state#14] -Batched: true -Location [not included in comparison]/{warehouse_dir}/customer_address] -PushedFilters: [IsNotNull(ca_address_sk), IsNotNull(ca_state)] -ReadSchema: struct +(29) Project [codegen id : 8] +Output [3]: [cr_returning_customer_sk#2, cr_returning_addr_sk#3, cr_return_amt_inc_tax#4] +Input [5]: [cr_returned_date_sk#1, cr_returning_customer_sk#2, cr_returning_addr_sk#3, cr_return_amt_inc_tax#4, d_date_sk#5] -(25) ColumnarToRow [codegen id : 7] -Input [2]: [ca_address_sk#7, ca_state#14] +(30) Exchange +Input [3]: [cr_returning_customer_sk#2, cr_returning_addr_sk#3, cr_return_amt_inc_tax#4] +Arguments: hashpartitioning(cr_returning_addr_sk#3, 5), ENSURE_REQUIREMENTS, [id=#19] -(26) Filter [codegen id : 7] -Input [2]: [ca_address_sk#7, ca_state#14] -Condition : (isnotnull(ca_address_sk#7) AND isnotnull(ca_state#14)) +(31) Sort [codegen id : 9] +Input [3]: [cr_returning_customer_sk#2, cr_returning_addr_sk#3, cr_return_amt_inc_tax#4] +Arguments: [cr_returning_addr_sk#3 ASC NULLS FIRST], false, 0 -(27) Exchange -Input [2]: [ca_address_sk#7, ca_state#14] -Arguments: hashpartitioning(ca_address_sk#7, 5), true, [id=#29] +(32) ReusedExchange [Reuses operator id: 16] +Output [2]: [ca_address_sk#9, ca_state#10] -(28) Sort [codegen id : 8] -Input [2]: [ca_address_sk#7, ca_state#14] -Arguments: [ca_address_sk#7 ASC NULLS FIRST], false, 0 +(33) Sort [codegen id : 11] +Input [2]: [ca_address_sk#9, ca_state#10] +Arguments: [ca_address_sk#9 ASC NULLS FIRST], false, 0 -(29) SortMergeJoin [codegen id : 9] -Left keys [1]: [cr_returning_addr_sk#23] -Right keys [1]: [ca_address_sk#7] +(34) SortMergeJoin [codegen id : 12] +Left keys [1]: [cr_returning_addr_sk#3] +Right keys [1]: [ca_address_sk#9] Join condition: None -(30) Project [codegen id : 9] -Output [3]: [cr_returning_customer_sk#22, cr_return_amt_inc_tax#24, ca_state#14] -Input [5]: [cr_returning_customer_sk#22, cr_returning_addr_sk#23, cr_return_amt_inc_tax#24, ca_address_sk#7, ca_state#14] - -(31) HashAggregate [codegen id : 9] -Input [3]: [cr_returning_customer_sk#22, cr_return_amt_inc_tax#24, ca_state#14] -Keys [2]: [cr_returning_customer_sk#22, ca_state#14] -Functions [1]: [partial_sum(UnscaledValue(cr_return_amt_inc_tax#24))] -Aggregate Attributes [1]: [sum#30] -Results [3]: [cr_returning_customer_sk#22, ca_state#14, sum#31] - -(32) Exchange -Input [3]: [cr_returning_customer_sk#22, ca_state#14, sum#31] -Arguments: hashpartitioning(cr_returning_customer_sk#22, ca_state#14, 5), true, [id=#32] - -(33) HashAggregate [codegen id : 10] -Input [3]: [cr_returning_customer_sk#22, ca_state#14, sum#31] -Keys [2]: [cr_returning_customer_sk#22, ca_state#14] -Functions [1]: [sum(UnscaledValue(cr_return_amt_inc_tax#24))] -Aggregate Attributes [1]: [sum(UnscaledValue(cr_return_amt_inc_tax#24))#33] -Results [3]: [cr_returning_customer_sk#22 AS ctr_customer_sk#34, ca_state#14 AS ctr_state#35, MakeDecimal(sum(UnscaledValue(cr_return_amt_inc_tax#24))#33,17,2) AS ctr_total_return#36] - -(34) Filter [codegen id : 10] -Input [3]: [ctr_customer_sk#34, ctr_state#35, ctr_total_return#36] -Condition : isnotnull(ctr_total_return#36) - -(35) Exchange -Input [3]: [ctr_customer_sk#34, ctr_state#35, ctr_total_return#36] -Arguments: hashpartitioning(ctr_customer_sk#34, 5), true, [id=#37] - -(36) Sort [codegen id : 11] -Input [3]: [ctr_customer_sk#34, ctr_state#35, ctr_total_return#36] -Arguments: [ctr_customer_sk#34 ASC NULLS FIRST], false, 0 - -(37) SortMergeJoin [codegen id : 20] -Left keys [1]: [c_customer_sk#1] -Right keys [1]: [ctr_customer_sk#34] -Join condition: None +(35) Project [codegen id : 12] +Output [3]: [cr_returning_customer_sk#2, cr_return_amt_inc_tax#4, ca_state#10] +Input [5]: [cr_returning_customer_sk#2, cr_returning_addr_sk#3, cr_return_amt_inc_tax#4, ca_address_sk#9, ca_state#10] + +(36) HashAggregate [codegen id : 12] +Input [3]: [cr_returning_customer_sk#2, cr_return_amt_inc_tax#4, ca_state#10] +Keys [2]: [cr_returning_customer_sk#2, ca_state#10] +Functions [1]: [partial_sum(UnscaledValue(cr_return_amt_inc_tax#4))] +Aggregate Attributes [1]: [sum#20] +Results [3]: [cr_returning_customer_sk#2, ca_state#10, sum#21] + +(37) Exchange +Input [3]: [cr_returning_customer_sk#2, ca_state#10, sum#21] +Arguments: hashpartitioning(cr_returning_customer_sk#2, ca_state#10, 5), ENSURE_REQUIREMENTS, [id=#22] + +(38) HashAggregate [codegen id : 13] +Input [3]: [cr_returning_customer_sk#2, ca_state#10, sum#21] +Keys [2]: [cr_returning_customer_sk#2, ca_state#10] +Functions [1]: [sum(UnscaledValue(cr_return_amt_inc_tax#4))] +Aggregate Attributes [1]: [sum(UnscaledValue(cr_return_amt_inc_tax#4))#23] +Results [2]: [ca_state#10 AS ctr_state#17, MakeDecimal(sum(UnscaledValue(cr_return_amt_inc_tax#4))#23,17,2) AS ctr_total_return#18] + +(39) HashAggregate [codegen id : 13] +Input [2]: [ctr_state#17, ctr_total_return#18] +Keys [1]: [ctr_state#17] +Functions [1]: [partial_avg(ctr_total_return#18)] +Aggregate Attributes [2]: [sum#24, count#25] +Results [3]: [ctr_state#17, sum#26, count#27] + +(40) Exchange +Input [3]: [ctr_state#17, sum#26, count#27] +Arguments: hashpartitioning(ctr_state#17, 5), ENSURE_REQUIREMENTS, [id=#28] + +(41) HashAggregate [codegen id : 14] +Input [3]: [ctr_state#17, sum#26, count#27] +Keys [1]: [ctr_state#17] +Functions [1]: [avg(ctr_total_return#18)] +Aggregate Attributes [1]: [avg(ctr_total_return#18)#29] +Results [2]: [CheckOverflow((promote_precision(avg(ctr_total_return#18)#29) * 1.200000), DecimalType(24,7), true) AS (CAST(avg(ctr_total_return) AS DECIMAL(21,6)) * CAST(1.2 AS DECIMAL(21,6)))#30, ctr_state#17 AS ctr_state#17#31] + +(42) Filter [codegen id : 14] +Input [2]: [(CAST(avg(ctr_total_return) AS DECIMAL(21,6)) * CAST(1.2 AS DECIMAL(21,6)))#30, ctr_state#17#31] +Condition : isnotnull((CAST(avg(ctr_total_return) AS DECIMAL(21,6)) * CAST(1.2 AS DECIMAL(21,6)))#30) + +(43) BroadcastExchange +Input [2]: [(CAST(avg(ctr_total_return) AS DECIMAL(21,6)) * CAST(1.2 AS DECIMAL(21,6)))#30, ctr_state#17#31] +Arguments: HashedRelationBroadcastMode(List(input[1, string, true]),false), [id=#32] + +(44) BroadcastHashJoin [codegen id : 15] +Left keys [1]: [ctr_state#17] +Right keys [1]: [ctr_state#17#31] +Join condition: (cast(ctr_total_return#18 as decimal(24,7)) > (CAST(avg(ctr_total_return) AS DECIMAL(21,6)) * CAST(1.2 AS DECIMAL(21,6)))#30) + +(45) Project [codegen id : 15] +Output [2]: [ctr_customer_sk#16, ctr_total_return#18] +Input [5]: [ctr_customer_sk#16, ctr_state#17, ctr_total_return#18, (CAST(avg(ctr_total_return) AS DECIMAL(21,6)) * CAST(1.2 AS DECIMAL(21,6)))#30, ctr_state#17#31] + +(46) Exchange +Input [2]: [ctr_customer_sk#16, ctr_total_return#18] +Arguments: hashpartitioning(ctr_customer_sk#16, 5), ENSURE_REQUIREMENTS, [id=#33] + +(47) Sort [codegen id : 16] +Input [2]: [ctr_customer_sk#16, ctr_total_return#18] +Arguments: [ctr_customer_sk#16 ASC NULLS FIRST], false, 0 + +(48) Scan parquet default.customer +Output [6]: [c_customer_sk#34, c_customer_id#35, c_current_addr_sk#36, c_salutation#37, c_first_name#38, c_last_name#39] +Batched: true +Location [not included in comparison]/{warehouse_dir}/customer] +PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_current_addr_sk)] +ReadSchema: struct + +(49) ColumnarToRow [codegen id : 18] +Input [6]: [c_customer_sk#34, c_customer_id#35, c_current_addr_sk#36, c_salutation#37, c_first_name#38, c_last_name#39] -(38) Project [codegen id : 20] -Output [17]: [c_customer_id#2, c_salutation#4, c_first_name#5, c_last_name#6, ca_street_number#8, ca_street_name#9, ca_street_type#10, ca_suite_number#11, ca_city#12, ca_county#13, ca_state#14, ca_zip#15, ca_country#16, ca_gmt_offset#17, ca_location_type#18, ctr_state#35, ctr_total_return#36] -Input [19]: [c_customer_sk#1, c_customer_id#2, c_salutation#4, c_first_name#5, c_last_name#6, ca_street_number#8, ca_street_name#9, ca_street_type#10, ca_suite_number#11, ca_city#12, ca_county#13, ca_state#14, ca_zip#15, ca_country#16, ca_gmt_offset#17, ca_location_type#18, ctr_customer_sk#34, ctr_state#35, ctr_total_return#36] +(50) Filter [codegen id : 18] +Input [6]: [c_customer_sk#34, c_customer_id#35, c_current_addr_sk#36, c_salutation#37, c_first_name#38, c_last_name#39] +Condition : (isnotnull(c_customer_sk#34) AND isnotnull(c_current_addr_sk#36)) -(39) Scan parquet default.catalog_returns -Output [4]: [cr_returned_date_sk#21, cr_returning_customer_sk#22, cr_returning_addr_sk#23, cr_return_amt_inc_tax#24] +(51) Scan parquet default.customer_address +Output [12]: [ca_address_sk#9, ca_street_number#40, ca_street_name#41, ca_street_type#42, ca_suite_number#43, ca_city#44, ca_county#45, ca_state#10, ca_zip#46, ca_country#47, ca_gmt_offset#48, ca_location_type#49] Batched: true -Location [not included in comparison]/{warehouse_dir}/catalog_returns] -PushedFilters: [IsNotNull(cr_returned_date_sk), IsNotNull(cr_returning_addr_sk)] -ReadSchema: struct +Location [not included in comparison]/{warehouse_dir}/customer_address] +PushedFilters: [IsNotNull(ca_state), EqualTo(ca_state,GA), IsNotNull(ca_address_sk)] +ReadSchema: struct -(40) ColumnarToRow [codegen id : 13] -Input [4]: [cr_returned_date_sk#21, cr_returning_customer_sk#22, cr_returning_addr_sk#23, cr_return_amt_inc_tax#24] +(52) ColumnarToRow [codegen id : 17] +Input [12]: [ca_address_sk#9, ca_street_number#40, ca_street_name#41, ca_street_type#42, ca_suite_number#43, ca_city#44, ca_county#45, ca_state#10, ca_zip#46, ca_country#47, ca_gmt_offset#48, ca_location_type#49] -(41) Filter [codegen id : 13] -Input [4]: [cr_returned_date_sk#21, cr_returning_customer_sk#22, cr_returning_addr_sk#23, cr_return_amt_inc_tax#24] -Condition : (isnotnull(cr_returned_date_sk#21) AND isnotnull(cr_returning_addr_sk#23)) +(53) Filter [codegen id : 17] +Input [12]: [ca_address_sk#9, ca_street_number#40, ca_street_name#41, ca_street_type#42, ca_suite_number#43, ca_city#44, ca_county#45, ca_state#10, ca_zip#46, ca_country#47, ca_gmt_offset#48, ca_location_type#49] +Condition : ((isnotnull(ca_state#10) AND (ca_state#10 = GA)) AND isnotnull(ca_address_sk#9)) -(42) ReusedExchange [Reuses operator id: 19] -Output [1]: [d_date_sk#25] +(54) BroadcastExchange +Input [12]: [ca_address_sk#9, ca_street_number#40, ca_street_name#41, ca_street_type#42, ca_suite_number#43, ca_city#44, ca_county#45, ca_state#10, ca_zip#46, ca_country#47, ca_gmt_offset#48, ca_location_type#49] +Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, false] as bigint)),false), [id=#50] -(43) BroadcastHashJoin [codegen id : 13] -Left keys [1]: [cr_returned_date_sk#21] -Right keys [1]: [d_date_sk#25] +(55) BroadcastHashJoin [codegen id : 18] +Left keys [1]: [c_current_addr_sk#36] +Right keys [1]: [ca_address_sk#9] Join condition: None -(44) Project [codegen id : 13] -Output [3]: [cr_returning_customer_sk#22, cr_returning_addr_sk#23, cr_return_amt_inc_tax#24] -Input [5]: [cr_returned_date_sk#21, cr_returning_customer_sk#22, cr_returning_addr_sk#23, cr_return_amt_inc_tax#24, d_date_sk#25] +(56) Project [codegen id : 18] +Output [16]: [c_customer_sk#34, c_customer_id#35, c_salutation#37, c_first_name#38, c_last_name#39, ca_street_number#40, ca_street_name#41, ca_street_type#42, ca_suite_number#43, ca_city#44, ca_county#45, ca_state#10, ca_zip#46, ca_country#47, ca_gmt_offset#48, ca_location_type#49] +Input [18]: [c_customer_sk#34, c_customer_id#35, c_current_addr_sk#36, c_salutation#37, c_first_name#38, c_last_name#39, ca_address_sk#9, ca_street_number#40, ca_street_name#41, ca_street_type#42, ca_suite_number#43, ca_city#44, ca_county#45, ca_state#10, ca_zip#46, ca_country#47, ca_gmt_offset#48, ca_location_type#49] -(45) Exchange -Input [3]: [cr_returning_customer_sk#22, cr_returning_addr_sk#23, cr_return_amt_inc_tax#24] -Arguments: hashpartitioning(cr_returning_addr_sk#23, 5), true, [id=#38] +(57) Exchange +Input [16]: [c_customer_sk#34, c_customer_id#35, c_salutation#37, c_first_name#38, c_last_name#39, ca_street_number#40, ca_street_name#41, ca_street_type#42, ca_suite_number#43, ca_city#44, ca_county#45, ca_state#10, ca_zip#46, ca_country#47, ca_gmt_offset#48, ca_location_type#49] +Arguments: hashpartitioning(c_customer_sk#34, 5), ENSURE_REQUIREMENTS, [id=#51] -(46) Sort [codegen id : 14] -Input [3]: [cr_returning_customer_sk#22, cr_returning_addr_sk#23, cr_return_amt_inc_tax#24] -Arguments: [cr_returning_addr_sk#23 ASC NULLS FIRST], false, 0 +(58) Sort [codegen id : 19] +Input [16]: [c_customer_sk#34, c_customer_id#35, c_salutation#37, c_first_name#38, c_last_name#39, ca_street_number#40, ca_street_name#41, ca_street_type#42, ca_suite_number#43, ca_city#44, ca_county#45, ca_state#10, ca_zip#46, ca_country#47, ca_gmt_offset#48, ca_location_type#49] +Arguments: [c_customer_sk#34 ASC NULLS FIRST], false, 0 -(47) ReusedExchange [Reuses operator id: 27] -Output [2]: [ca_address_sk#7, ca_state#14] - -(48) Sort [codegen id : 16] -Input [2]: [ca_address_sk#7, ca_state#14] -Arguments: [ca_address_sk#7 ASC NULLS FIRST], false, 0 - -(49) SortMergeJoin [codegen id : 17] -Left keys [1]: [cr_returning_addr_sk#23] -Right keys [1]: [ca_address_sk#7] +(59) SortMergeJoin [codegen id : 20] +Left keys [1]: [ctr_customer_sk#16] +Right keys [1]: [c_customer_sk#34] Join condition: None -(50) Project [codegen id : 17] -Output [3]: [cr_returning_customer_sk#22, cr_return_amt_inc_tax#24, ca_state#14] -Input [5]: [cr_returning_customer_sk#22, cr_returning_addr_sk#23, cr_return_amt_inc_tax#24, ca_address_sk#7, ca_state#14] - -(51) HashAggregate [codegen id : 17] -Input [3]: [cr_returning_customer_sk#22, cr_return_amt_inc_tax#24, ca_state#14] -Keys [2]: [cr_returning_customer_sk#22, ca_state#14] -Functions [1]: [partial_sum(UnscaledValue(cr_return_amt_inc_tax#24))] -Aggregate Attributes [1]: [sum#39] -Results [3]: [cr_returning_customer_sk#22, ca_state#14, sum#40] - -(52) Exchange -Input [3]: [cr_returning_customer_sk#22, ca_state#14, sum#40] -Arguments: hashpartitioning(cr_returning_customer_sk#22, ca_state#14, 5), true, [id=#41] - -(53) HashAggregate [codegen id : 18] -Input [3]: [cr_returning_customer_sk#22, ca_state#14, sum#40] -Keys [2]: [cr_returning_customer_sk#22, ca_state#14] -Functions [1]: [sum(UnscaledValue(cr_return_amt_inc_tax#24))] -Aggregate Attributes [1]: [sum(UnscaledValue(cr_return_amt_inc_tax#24))#42] -Results [2]: [ca_state#14 AS ctr_state#35, MakeDecimal(sum(UnscaledValue(cr_return_amt_inc_tax#24))#42,17,2) AS ctr_total_return#36] - -(54) HashAggregate [codegen id : 18] -Input [2]: [ctr_state#35, ctr_total_return#36] -Keys [1]: [ctr_state#35] -Functions [1]: [partial_avg(ctr_total_return#36)] -Aggregate Attributes [2]: [sum#43, count#44] -Results [3]: [ctr_state#35, sum#45, count#46] - -(55) Exchange -Input [3]: [ctr_state#35, sum#45, count#46] -Arguments: hashpartitioning(ctr_state#35, 5), true, [id=#47] - -(56) HashAggregate [codegen id : 19] -Input [3]: [ctr_state#35, sum#45, count#46] -Keys [1]: [ctr_state#35] -Functions [1]: [avg(ctr_total_return#36)] -Aggregate Attributes [1]: [avg(ctr_total_return#36)#48] -Results [2]: [CheckOverflow((promote_precision(avg(ctr_total_return#36)#48) * 1.200000), DecimalType(24,7), true) AS (CAST(avg(ctr_total_return) AS DECIMAL(21,6)) * CAST(1.2 AS DECIMAL(21,6)))#49, ctr_state#35 AS ctr_state#35#50] - -(57) Filter [codegen id : 19] -Input [2]: [(CAST(avg(ctr_total_return) AS DECIMAL(21,6)) * CAST(1.2 AS DECIMAL(21,6)))#49, ctr_state#35#50] -Condition : isnotnull((CAST(avg(ctr_total_return) AS DECIMAL(21,6)) * CAST(1.2 AS DECIMAL(21,6)))#49) - -(58) BroadcastExchange -Input [2]: [(CAST(avg(ctr_total_return) AS DECIMAL(21,6)) * CAST(1.2 AS DECIMAL(21,6)))#49, ctr_state#35#50] -Arguments: HashedRelationBroadcastMode(List(input[1, string, true]),false), [id=#51] - -(59) BroadcastHashJoin [codegen id : 20] -Left keys [1]: [ctr_state#35] -Right keys [1]: [ctr_state#35#50] -Join condition: (cast(ctr_total_return#36 as decimal(24,7)) > (CAST(avg(ctr_total_return) AS DECIMAL(21,6)) * CAST(1.2 AS DECIMAL(21,6)))#49) - (60) Project [codegen id : 20] -Output [16]: [c_customer_id#2, c_salutation#4, c_first_name#5, c_last_name#6, ca_street_number#8, ca_street_name#9, ca_street_type#10, ca_suite_number#11, ca_city#12, ca_county#13, ca_state#14, ca_zip#15, ca_country#16, ca_gmt_offset#17, ca_location_type#18, ctr_total_return#36] -Input [19]: [c_customer_id#2, c_salutation#4, c_first_name#5, c_last_name#6, ca_street_number#8, ca_street_name#9, ca_street_type#10, ca_suite_number#11, ca_city#12, ca_county#13, ca_state#14, ca_zip#15, ca_country#16, ca_gmt_offset#17, ca_location_type#18, ctr_state#35, ctr_total_return#36, (CAST(avg(ctr_total_return) AS DECIMAL(21,6)) * CAST(1.2 AS DECIMAL(21,6)))#49, ctr_state#35#50] +Output [16]: [c_customer_id#35, c_salutation#37, c_first_name#38, c_last_name#39, ca_street_number#40, ca_street_name#41, ca_street_type#42, ca_suite_number#43, ca_city#44, ca_county#45, ca_state#10, ca_zip#46, ca_country#47, ca_gmt_offset#48, ca_location_type#49, ctr_total_return#18] +Input [18]: [ctr_customer_sk#16, ctr_total_return#18, c_customer_sk#34, c_customer_id#35, c_salutation#37, c_first_name#38, c_last_name#39, ca_street_number#40, ca_street_name#41, ca_street_type#42, ca_suite_number#43, ca_city#44, ca_county#45, ca_state#10, ca_zip#46, ca_country#47, ca_gmt_offset#48, ca_location_type#49] (61) TakeOrderedAndProject -Input [16]: [c_customer_id#2, c_salutation#4, c_first_name#5, c_last_name#6, ca_street_number#8, ca_street_name#9, ca_street_type#10, ca_suite_number#11, ca_city#12, ca_county#13, ca_state#14, ca_zip#15, ca_country#16, ca_gmt_offset#17, ca_location_type#18, ctr_total_return#36] -Arguments: 100, [c_customer_id#2 ASC NULLS FIRST, c_salutation#4 ASC NULLS FIRST, c_first_name#5 ASC NULLS FIRST, c_last_name#6 ASC NULLS FIRST, ca_street_number#8 ASC NULLS FIRST, ca_street_name#9 ASC NULLS FIRST, ca_street_type#10 ASC NULLS FIRST, ca_suite_number#11 ASC NULLS FIRST, ca_city#12 ASC NULLS FIRST, ca_county#13 ASC NULLS FIRST, ca_state#14 ASC NULLS FIRST, ca_zip#15 ASC NULLS FIRST, ca_country#16 ASC NULLS FIRST, ca_gmt_offset#17 ASC NULLS FIRST, ca_location_type#18 ASC NULLS FIRST, ctr_total_return#36 ASC NULLS FIRST], [c_customer_id#2, c_salutation#4, c_first_name#5, c_last_name#6, ca_street_number#8, ca_street_name#9, ca_street_type#10, ca_suite_number#11, ca_city#12, ca_county#13, ca_state#14, ca_zip#15, ca_country#16, ca_gmt_offset#17, ca_location_type#18, ctr_total_return#36] +Input [16]: [c_customer_id#35, c_salutation#37, c_first_name#38, c_last_name#39, ca_street_number#40, ca_street_name#41, ca_street_type#42, ca_suite_number#43, ca_city#44, ca_county#45, ca_state#10, ca_zip#46, ca_country#47, ca_gmt_offset#48, ca_location_type#49, ctr_total_return#18] +Arguments: 100, [c_customer_id#35 ASC NULLS FIRST, c_salutation#37 ASC NULLS FIRST, c_first_name#38 ASC NULLS FIRST, c_last_name#39 ASC NULLS FIRST, ca_street_number#40 ASC NULLS FIRST, ca_street_name#41 ASC NULLS FIRST, ca_street_type#42 ASC NULLS FIRST, ca_suite_number#43 ASC NULLS FIRST, ca_city#44 ASC NULLS FIRST, ca_county#45 ASC NULLS FIRST, ca_state#10 ASC NULLS FIRST, ca_zip#46 ASC NULLS FIRST, ca_country#47 ASC NULLS FIRST, ca_gmt_offset#48 ASC NULLS FIRST, ca_location_type#49 ASC NULLS FIRST, ctr_total_return#18 ASC NULLS FIRST], [c_customer_id#35, c_salutation#37, c_first_name#38, c_last_name#39, ca_street_number#40, ca_street_name#41, ca_street_type#42, ca_suite_number#43, ca_city#44, ca_county#45, ca_state#10, ca_zip#46, ca_country#47, ca_gmt_offset#48, ca_location_type#49, ctr_total_return#18] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q81.sf100/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q81.sf100/simplified.txt index c603ab5194286..99677b6e39736 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q81.sf100/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q81.sf100/simplified.txt @@ -1,48 +1,29 @@ TakeOrderedAndProject [c_customer_id,c_salutation,c_first_name,c_last_name,ca_street_number,ca_street_name,ca_street_type,ca_suite_number,ca_city,ca_county,ca_state,ca_zip,ca_country,ca_gmt_offset,ca_location_type,ctr_total_return] WholeStageCodegen (20) Project [c_customer_id,c_salutation,c_first_name,c_last_name,ca_street_number,ca_street_name,ca_street_type,ca_suite_number,ca_city,ca_county,ca_state,ca_zip,ca_country,ca_gmt_offset,ca_location_type,ctr_total_return] - BroadcastHashJoin [ctr_state,ctr_state,ctr_total_return,(CAST(avg(ctr_total_return) AS DECIMAL(21,6)) * CAST(1.2 AS DECIMAL(21,6)))] - Project [c_customer_id,c_salutation,c_first_name,c_last_name,ca_street_number,ca_street_name,ca_street_type,ca_suite_number,ca_city,ca_county,ca_state,ca_zip,ca_country,ca_gmt_offset,ca_location_type,ctr_state,ctr_total_return] - SortMergeJoin [c_customer_sk,ctr_customer_sk] - InputAdapter - WholeStageCodegen (3) - Sort [c_customer_sk] - InputAdapter - Exchange [c_customer_sk] #1 - WholeStageCodegen (2) - Project [c_customer_sk,c_customer_id,c_salutation,c_first_name,c_last_name,ca_street_number,ca_street_name,ca_street_type,ca_suite_number,ca_city,ca_county,ca_state,ca_zip,ca_country,ca_gmt_offset,ca_location_type] - BroadcastHashJoin [c_current_addr_sk,ca_address_sk] - Filter [c_customer_sk,c_current_addr_sk] - ColumnarToRow - InputAdapter - Scan parquet default.customer [c_customer_sk,c_customer_id,c_current_addr_sk,c_salutation,c_first_name,c_last_name] - InputAdapter - BroadcastExchange #2 - WholeStageCodegen (1) - Filter [ca_state,ca_address_sk] - ColumnarToRow - InputAdapter - Scan parquet default.customer_address [ca_address_sk,ca_street_number,ca_street_name,ca_street_type,ca_suite_number,ca_city,ca_county,ca_state,ca_zip,ca_country,ca_gmt_offset,ca_location_type] - InputAdapter - WholeStageCodegen (11) - Sort [ctr_customer_sk] - InputAdapter - Exchange [ctr_customer_sk] #3 - WholeStageCodegen (10) + SortMergeJoin [ctr_customer_sk,c_customer_sk] + InputAdapter + WholeStageCodegen (16) + Sort [ctr_customer_sk] + InputAdapter + Exchange [ctr_customer_sk] #1 + WholeStageCodegen (15) + Project [ctr_customer_sk,ctr_total_return] + BroadcastHashJoin [ctr_state,ctr_state,ctr_total_return,(CAST(avg(ctr_total_return) AS DECIMAL(21,6)) * CAST(1.2 AS DECIMAL(21,6)))] Filter [ctr_total_return] HashAggregate [cr_returning_customer_sk,ca_state,sum] [sum(UnscaledValue(cr_return_amt_inc_tax)),ctr_customer_sk,ctr_state,ctr_total_return,sum] InputAdapter - Exchange [cr_returning_customer_sk,ca_state] #4 - WholeStageCodegen (9) + Exchange [cr_returning_customer_sk,ca_state] #2 + WholeStageCodegen (6) HashAggregate [cr_returning_customer_sk,ca_state,cr_return_amt_inc_tax] [sum,sum] Project [cr_returning_customer_sk,cr_return_amt_inc_tax,ca_state] SortMergeJoin [cr_returning_addr_sk,ca_address_sk] InputAdapter - WholeStageCodegen (6) + WholeStageCodegen (3) Sort [cr_returning_addr_sk] InputAdapter - Exchange [cr_returning_addr_sk] #5 - WholeStageCodegen (5) + Exchange [cr_returning_addr_sk] #3 + WholeStageCodegen (2) Project [cr_returning_customer_sk,cr_returning_addr_sk,cr_return_amt_inc_tax] BroadcastHashJoin [cr_returned_date_sk,d_date_sk] Filter [cr_returned_date_sk,cr_returning_addr_sk,cr_returning_customer_sk] @@ -50,55 +31,74 @@ TakeOrderedAndProject [c_customer_id,c_salutation,c_first_name,c_last_name,ca_st InputAdapter Scan parquet default.catalog_returns [cr_returned_date_sk,cr_returning_customer_sk,cr_returning_addr_sk,cr_return_amt_inc_tax] InputAdapter - BroadcastExchange #6 - WholeStageCodegen (4) + BroadcastExchange #4 + WholeStageCodegen (1) Project [d_date_sk] Filter [d_year,d_date_sk] ColumnarToRow InputAdapter Scan parquet default.date_dim [d_date_sk,d_year] InputAdapter - WholeStageCodegen (8) + WholeStageCodegen (5) Sort [ca_address_sk] InputAdapter - Exchange [ca_address_sk] #7 - WholeStageCodegen (7) + Exchange [ca_address_sk] #5 + WholeStageCodegen (4) Filter [ca_address_sk,ca_state] ColumnarToRow InputAdapter Scan parquet default.customer_address [ca_address_sk,ca_state] + InputAdapter + BroadcastExchange #6 + WholeStageCodegen (14) + Filter [(CAST(avg(ctr_total_return) AS DECIMAL(21,6)) * CAST(1.2 AS DECIMAL(21,6)))] + HashAggregate [ctr_state,sum,count] [avg(ctr_total_return),(CAST(avg(ctr_total_return) AS DECIMAL(21,6)) * CAST(1.2 AS DECIMAL(21,6))),ctr_state,sum,count] + InputAdapter + Exchange [ctr_state] #7 + WholeStageCodegen (13) + HashAggregate [ctr_state,ctr_total_return] [sum,count,sum,count] + HashAggregate [cr_returning_customer_sk,ca_state,sum] [sum(UnscaledValue(cr_return_amt_inc_tax)),ctr_state,ctr_total_return,sum] + InputAdapter + Exchange [cr_returning_customer_sk,ca_state] #8 + WholeStageCodegen (12) + HashAggregate [cr_returning_customer_sk,ca_state,cr_return_amt_inc_tax] [sum,sum] + Project [cr_returning_customer_sk,cr_return_amt_inc_tax,ca_state] + SortMergeJoin [cr_returning_addr_sk,ca_address_sk] + InputAdapter + WholeStageCodegen (9) + Sort [cr_returning_addr_sk] + InputAdapter + Exchange [cr_returning_addr_sk] #9 + WholeStageCodegen (8) + Project [cr_returning_customer_sk,cr_returning_addr_sk,cr_return_amt_inc_tax] + BroadcastHashJoin [cr_returned_date_sk,d_date_sk] + Filter [cr_returned_date_sk,cr_returning_addr_sk] + ColumnarToRow + InputAdapter + Scan parquet default.catalog_returns [cr_returned_date_sk,cr_returning_customer_sk,cr_returning_addr_sk,cr_return_amt_inc_tax] + InputAdapter + ReusedExchange [d_date_sk] #4 + InputAdapter + WholeStageCodegen (11) + Sort [ca_address_sk] + InputAdapter + ReusedExchange [ca_address_sk,ca_state] #5 InputAdapter - BroadcastExchange #8 - WholeStageCodegen (19) - Filter [(CAST(avg(ctr_total_return) AS DECIMAL(21,6)) * CAST(1.2 AS DECIMAL(21,6)))] - HashAggregate [ctr_state,sum,count] [avg(ctr_total_return),(CAST(avg(ctr_total_return) AS DECIMAL(21,6)) * CAST(1.2 AS DECIMAL(21,6))),ctr_state,sum,count] - InputAdapter - Exchange [ctr_state] #9 - WholeStageCodegen (18) - HashAggregate [ctr_state,ctr_total_return] [sum,count,sum,count] - HashAggregate [cr_returning_customer_sk,ca_state,sum] [sum(UnscaledValue(cr_return_amt_inc_tax)),ctr_state,ctr_total_return,sum] + WholeStageCodegen (19) + Sort [c_customer_sk] + InputAdapter + Exchange [c_customer_sk] #10 + WholeStageCodegen (18) + Project [c_customer_sk,c_customer_id,c_salutation,c_first_name,c_last_name,ca_street_number,ca_street_name,ca_street_type,ca_suite_number,ca_city,ca_county,ca_state,ca_zip,ca_country,ca_gmt_offset,ca_location_type] + BroadcastHashJoin [c_current_addr_sk,ca_address_sk] + Filter [c_customer_sk,c_current_addr_sk] + ColumnarToRow InputAdapter - Exchange [cr_returning_customer_sk,ca_state] #10 - WholeStageCodegen (17) - HashAggregate [cr_returning_customer_sk,ca_state,cr_return_amt_inc_tax] [sum,sum] - Project [cr_returning_customer_sk,cr_return_amt_inc_tax,ca_state] - SortMergeJoin [cr_returning_addr_sk,ca_address_sk] - InputAdapter - WholeStageCodegen (14) - Sort [cr_returning_addr_sk] - InputAdapter - Exchange [cr_returning_addr_sk] #11 - WholeStageCodegen (13) - Project [cr_returning_customer_sk,cr_returning_addr_sk,cr_return_amt_inc_tax] - BroadcastHashJoin [cr_returned_date_sk,d_date_sk] - Filter [cr_returned_date_sk,cr_returning_addr_sk] - ColumnarToRow - InputAdapter - Scan parquet default.catalog_returns [cr_returned_date_sk,cr_returning_customer_sk,cr_returning_addr_sk,cr_return_amt_inc_tax] - InputAdapter - ReusedExchange [d_date_sk] #6 - InputAdapter - WholeStageCodegen (16) - Sort [ca_address_sk] - InputAdapter - ReusedExchange [ca_address_sk,ca_state] #7 + Scan parquet default.customer [c_customer_sk,c_customer_id,c_current_addr_sk,c_salutation,c_first_name,c_last_name] + InputAdapter + BroadcastExchange #11 + WholeStageCodegen (17) + Filter [ca_state,ca_address_sk] + ColumnarToRow + InputAdapter + Scan parquet default.customer_address [ca_address_sk,ca_street_number,ca_street_name,ca_street_type,ca_suite_number,ca_city,ca_county,ca_state,ca_zip,ca_country,ca_gmt_offset,ca_location_type] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q84.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q84.sf100/explain.txt index ae0b996ec28be..83ec6391d7736 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q84.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q84.sf100/explain.txt @@ -20,15 +20,15 @@ TakeOrderedAndProject (36) : : : +- Scan parquet default.customer_address (4) : : +- BroadcastExchange (21) : : +- * Project (20) - : : +- * BroadcastHashJoin Inner BuildLeft (19) - : : :- BroadcastExchange (15) - : : : +- * Project (14) - : : : +- * Filter (13) - : : : +- * ColumnarToRow (12) - : : : +- Scan parquet default.income_band (11) - : : +- * Filter (18) - : : +- * ColumnarToRow (17) - : : +- Scan parquet default.household_demographics (16) + : : +- * BroadcastHashJoin Inner BuildRight (19) + : : :- * Filter (13) + : : : +- * ColumnarToRow (12) + : : : +- Scan parquet default.household_demographics (11) + : : +- BroadcastExchange (18) + : : +- * Project (17) + : : +- * Filter (16) + : : +- * ColumnarToRow (15) + : : +- Scan parquet default.income_band (14) : +- * Filter (27) : +- * ColumnarToRow (26) : +- Scan parquet default.customer_demographics (25) @@ -82,63 +82,63 @@ Join condition: None Output [5]: [c_customer_id#1, c_current_cdemo_sk#2, c_current_hdemo_sk#3, c_first_name#5, c_last_name#6] Input [7]: [c_customer_id#1, c_current_cdemo_sk#2, c_current_hdemo_sk#3, c_current_addr_sk#4, c_first_name#5, c_last_name#6, ca_address_sk#7] -(11) Scan parquet default.income_band -Output [3]: [ib_income_band_sk#10, ib_lower_bound#11, ib_upper_bound#12] +(11) Scan parquet default.household_demographics +Output [2]: [hd_demo_sk#10, hd_income_band_sk#11] Batched: true -Location [not included in comparison]/{warehouse_dir}/income_band] -PushedFilters: [IsNotNull(ib_lower_bound), IsNotNull(ib_upper_bound), GreaterThanOrEqual(ib_lower_bound,38128), LessThanOrEqual(ib_upper_bound,88128), IsNotNull(ib_income_band_sk)] -ReadSchema: struct +Location [not included in comparison]/{warehouse_dir}/household_demographics] +PushedFilters: [IsNotNull(hd_demo_sk), IsNotNull(hd_income_band_sk)] +ReadSchema: struct -(12) ColumnarToRow [codegen id : 2] -Input [3]: [ib_income_band_sk#10, ib_lower_bound#11, ib_upper_bound#12] +(12) ColumnarToRow [codegen id : 3] +Input [2]: [hd_demo_sk#10, hd_income_band_sk#11] -(13) Filter [codegen id : 2] -Input [3]: [ib_income_band_sk#10, ib_lower_bound#11, ib_upper_bound#12] -Condition : ((((isnotnull(ib_lower_bound#11) AND isnotnull(ib_upper_bound#12)) AND (ib_lower_bound#11 >= 38128)) AND (ib_upper_bound#12 <= 88128)) AND isnotnull(ib_income_band_sk#10)) +(13) Filter [codegen id : 3] +Input [2]: [hd_demo_sk#10, hd_income_band_sk#11] +Condition : (isnotnull(hd_demo_sk#10) AND isnotnull(hd_income_band_sk#11)) -(14) Project [codegen id : 2] -Output [1]: [ib_income_band_sk#10] -Input [3]: [ib_income_band_sk#10, ib_lower_bound#11, ib_upper_bound#12] +(14) Scan parquet default.income_band +Output [3]: [ib_income_band_sk#12, ib_lower_bound#13, ib_upper_bound#14] +Batched: true +Location [not included in comparison]/{warehouse_dir}/income_band] +PushedFilters: [IsNotNull(ib_lower_bound), IsNotNull(ib_upper_bound), GreaterThanOrEqual(ib_lower_bound,38128), LessThanOrEqual(ib_upper_bound,88128), IsNotNull(ib_income_band_sk)] +ReadSchema: struct -(15) BroadcastExchange -Input [1]: [ib_income_band_sk#10] -Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#13] +(15) ColumnarToRow [codegen id : 2] +Input [3]: [ib_income_band_sk#12, ib_lower_bound#13, ib_upper_bound#14] -(16) Scan parquet default.household_demographics -Output [2]: [hd_demo_sk#14, hd_income_band_sk#15] -Batched: true -Location [not included in comparison]/{warehouse_dir}/household_demographics] -PushedFilters: [IsNotNull(hd_demo_sk), IsNotNull(hd_income_band_sk)] -ReadSchema: struct +(16) Filter [codegen id : 2] +Input [3]: [ib_income_band_sk#12, ib_lower_bound#13, ib_upper_bound#14] +Condition : ((((isnotnull(ib_lower_bound#13) AND isnotnull(ib_upper_bound#14)) AND (ib_lower_bound#13 >= 38128)) AND (ib_upper_bound#14 <= 88128)) AND isnotnull(ib_income_band_sk#12)) -(17) ColumnarToRow -Input [2]: [hd_demo_sk#14, hd_income_band_sk#15] +(17) Project [codegen id : 2] +Output [1]: [ib_income_band_sk#12] +Input [3]: [ib_income_band_sk#12, ib_lower_bound#13, ib_upper_bound#14] -(18) Filter -Input [2]: [hd_demo_sk#14, hd_income_band_sk#15] -Condition : (isnotnull(hd_demo_sk#14) AND isnotnull(hd_income_band_sk#15)) +(18) BroadcastExchange +Input [1]: [ib_income_band_sk#12] +Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#15] (19) BroadcastHashJoin [codegen id : 3] -Left keys [1]: [ib_income_band_sk#10] -Right keys [1]: [hd_income_band_sk#15] +Left keys [1]: [hd_income_band_sk#11] +Right keys [1]: [ib_income_band_sk#12] Join condition: None (20) Project [codegen id : 3] -Output [1]: [hd_demo_sk#14] -Input [3]: [ib_income_band_sk#10, hd_demo_sk#14, hd_income_band_sk#15] +Output [1]: [hd_demo_sk#10] +Input [3]: [hd_demo_sk#10, hd_income_band_sk#11, ib_income_band_sk#12] (21) BroadcastExchange -Input [1]: [hd_demo_sk#14] +Input [1]: [hd_demo_sk#10] Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#16] (22) BroadcastHashJoin [codegen id : 4] Left keys [1]: [c_current_hdemo_sk#3] -Right keys [1]: [hd_demo_sk#14] +Right keys [1]: [hd_demo_sk#10] Join condition: None (23) Project [codegen id : 4] Output [4]: [c_customer_id#1, c_current_cdemo_sk#2, c_first_name#5, c_last_name#6] -Input [6]: [c_customer_id#1, c_current_cdemo_sk#2, c_current_hdemo_sk#3, c_first_name#5, c_last_name#6, hd_demo_sk#14] +Input [6]: [c_customer_id#1, c_current_cdemo_sk#2, c_current_hdemo_sk#3, c_first_name#5, c_last_name#6, hd_demo_sk#10] (24) BroadcastExchange Input [4]: [c_customer_id#1, c_current_cdemo_sk#2, c_first_name#5, c_last_name#6] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q84.sf100/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q84.sf100/simplified.txt index 1fbc57ee7e47a..16087526bc130 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q84.sf100/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q84.sf100/simplified.txt @@ -30,7 +30,11 @@ TakeOrderedAndProject [c_customer_id,customer_id,customername] BroadcastExchange #4 WholeStageCodegen (3) Project [hd_demo_sk] - BroadcastHashJoin [ib_income_band_sk,hd_income_band_sk] + BroadcastHashJoin [hd_income_band_sk,ib_income_band_sk] + Filter [hd_demo_sk,hd_income_band_sk] + ColumnarToRow + InputAdapter + Scan parquet default.household_demographics [hd_demo_sk,hd_income_band_sk] InputAdapter BroadcastExchange #5 WholeStageCodegen (2) @@ -39,10 +43,6 @@ TakeOrderedAndProject [c_customer_id,customer_id,customername] ColumnarToRow InputAdapter Scan parquet default.income_band [ib_income_band_sk,ib_lower_bound,ib_upper_bound] - Filter [hd_demo_sk,hd_income_band_sk] - ColumnarToRow - InputAdapter - Scan parquet default.household_demographics [hd_demo_sk,hd_income_band_sk] Filter [cd_demo_sk] ColumnarToRow InputAdapter diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q85.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q85.sf100/explain.txt index ee550f1af4947..7c3f00d33f24e 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q85.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q85.sf100/explain.txt @@ -12,30 +12,30 @@ TakeOrderedAndProject (57) : :- * Project (31) : : +- * BroadcastHashJoin Inner BuildRight (30) : : :- * Project (25) - : : : +- * BroadcastHashJoin Inner BuildLeft (24) - : : : :- BroadcastExchange (5) - : : : : +- * Project (4) - : : : : +- * Filter (3) - : : : : +- * ColumnarToRow (2) - : : : : +- Scan parquet default.date_dim (1) - : : : +- * Project (23) - : : : +- * SortMergeJoin Inner (22) - : : : :- * Sort (16) - : : : : +- Exchange (15) - : : : : +- * Project (14) - : : : : +- * BroadcastHashJoin Inner BuildRight (13) - : : : : :- * Filter (8) - : : : : : +- * ColumnarToRow (7) - : : : : : +- Scan parquet default.web_sales (6) - : : : : +- BroadcastExchange (12) - : : : : +- * Filter (11) - : : : : +- * ColumnarToRow (10) - : : : : +- Scan parquet default.web_page (9) - : : : +- * Sort (21) - : : : +- Exchange (20) - : : : +- * Filter (19) - : : : +- * ColumnarToRow (18) - : : : +- Scan parquet default.web_returns (17) + : : : +- * BroadcastHashJoin Inner BuildRight (24) + : : : :- * Project (18) + : : : : +- * SortMergeJoin Inner (17) + : : : : :- * Sort (11) + : : : : : +- Exchange (10) + : : : : : +- * Project (9) + : : : : : +- * BroadcastHashJoin Inner BuildRight (8) + : : : : : :- * Filter (3) + : : : : : : +- * ColumnarToRow (2) + : : : : : : +- Scan parquet default.web_sales (1) + : : : : : +- BroadcastExchange (7) + : : : : : +- * Filter (6) + : : : : : +- * ColumnarToRow (5) + : : : : : +- Scan parquet default.web_page (4) + : : : : +- * Sort (16) + : : : : +- Exchange (15) + : : : : +- * Filter (14) + : : : : +- * ColumnarToRow (13) + : : : : +- Scan parquet default.web_returns (12) + : : : +- BroadcastExchange (23) + : : : +- * Project (22) + : : : +- * Filter (21) + : : : +- * ColumnarToRow (20) + : : : +- Scan parquet default.date_dim (19) : : +- BroadcastExchange (29) : : +- * Filter (28) : : +- * ColumnarToRow (27) @@ -48,126 +48,126 @@ TakeOrderedAndProject (57) +- * Sort (51) +- Exchange (50) +- * Project (49) - +- * BroadcastHashJoin Inner BuildRight (48) - :- * Filter (43) - : +- * ColumnarToRow (42) - : +- Scan parquet default.customer_demographics (41) - +- BroadcastExchange (47) - +- * Filter (46) - +- * ColumnarToRow (45) - +- Scan parquet default.customer_demographics (44) - - -(1) Scan parquet default.date_dim -Output [2]: [d_date_sk#1, d_year#2] -Batched: true -Location [not included in comparison]/{warehouse_dir}/date_dim] -PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2000), IsNotNull(d_date_sk)] -ReadSchema: struct - -(2) ColumnarToRow [codegen id : 1] -Input [2]: [d_date_sk#1, d_year#2] - -(3) Filter [codegen id : 1] -Input [2]: [d_date_sk#1, d_year#2] -Condition : ((isnotnull(d_year#2) AND (d_year#2 = 2000)) AND isnotnull(d_date_sk#1)) - -(4) Project [codegen id : 1] -Output [1]: [d_date_sk#1] -Input [2]: [d_date_sk#1, d_year#2] - -(5) BroadcastExchange -Input [1]: [d_date_sk#1] -Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#3] - -(6) Scan parquet default.web_sales -Output [7]: [ws_sold_date_sk#4, ws_item_sk#5, ws_web_page_sk#6, ws_order_number#7, ws_quantity#8, ws_sales_price#9, ws_net_profit#10] + +- * BroadcastHashJoin Inner BuildLeft (48) + :- BroadcastExchange (44) + : +- * Filter (43) + : +- * ColumnarToRow (42) + : +- Scan parquet default.customer_demographics (41) + +- * Filter (47) + +- * ColumnarToRow (46) + +- Scan parquet default.customer_demographics (45) + + +(1) Scan parquet default.web_sales +Output [7]: [ws_sold_date_sk#1, ws_item_sk#2, ws_web_page_sk#3, ws_order_number#4, ws_quantity#5, ws_sales_price#6, ws_net_profit#7] Batched: true Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_item_sk), IsNotNull(ws_order_number), IsNotNull(ws_web_page_sk), IsNotNull(ws_sold_date_sk), Or(Or(And(GreaterThanOrEqual(ws_sales_price,100.00),LessThanOrEqual(ws_sales_price,150.00)),And(GreaterThanOrEqual(ws_sales_price,50.00),LessThanOrEqual(ws_sales_price,100.00))),And(GreaterThanOrEqual(ws_sales_price,150.00),LessThanOrEqual(ws_sales_price,200.00))), Or(Or(And(GreaterThanOrEqual(ws_net_profit,100.00),LessThanOrEqual(ws_net_profit,200.00)),And(GreaterThanOrEqual(ws_net_profit,150.00),LessThanOrEqual(ws_net_profit,300.00))),And(GreaterThanOrEqual(ws_net_profit,50.00),LessThanOrEqual(ws_net_profit,250.00)))] ReadSchema: struct -(7) ColumnarToRow [codegen id : 3] -Input [7]: [ws_sold_date_sk#4, ws_item_sk#5, ws_web_page_sk#6, ws_order_number#7, ws_quantity#8, ws_sales_price#9, ws_net_profit#10] +(2) ColumnarToRow [codegen id : 2] +Input [7]: [ws_sold_date_sk#1, ws_item_sk#2, ws_web_page_sk#3, ws_order_number#4, ws_quantity#5, ws_sales_price#6, ws_net_profit#7] -(8) Filter [codegen id : 3] -Input [7]: [ws_sold_date_sk#4, ws_item_sk#5, ws_web_page_sk#6, ws_order_number#7, ws_quantity#8, ws_sales_price#9, ws_net_profit#10] -Condition : (((((isnotnull(ws_item_sk#5) AND isnotnull(ws_order_number#7)) AND isnotnull(ws_web_page_sk#6)) AND isnotnull(ws_sold_date_sk#4)) AND ((((ws_sales_price#9 >= 100.00) AND (ws_sales_price#9 <= 150.00)) OR ((ws_sales_price#9 >= 50.00) AND (ws_sales_price#9 <= 100.00))) OR ((ws_sales_price#9 >= 150.00) AND (ws_sales_price#9 <= 200.00)))) AND ((((ws_net_profit#10 >= 100.00) AND (ws_net_profit#10 <= 200.00)) OR ((ws_net_profit#10 >= 150.00) AND (ws_net_profit#10 <= 300.00))) OR ((ws_net_profit#10 >= 50.00) AND (ws_net_profit#10 <= 250.00)))) +(3) Filter [codegen id : 2] +Input [7]: [ws_sold_date_sk#1, ws_item_sk#2, ws_web_page_sk#3, ws_order_number#4, ws_quantity#5, ws_sales_price#6, ws_net_profit#7] +Condition : (((((isnotnull(ws_item_sk#2) AND isnotnull(ws_order_number#4)) AND isnotnull(ws_web_page_sk#3)) AND isnotnull(ws_sold_date_sk#1)) AND ((((ws_sales_price#6 >= 100.00) AND (ws_sales_price#6 <= 150.00)) OR ((ws_sales_price#6 >= 50.00) AND (ws_sales_price#6 <= 100.00))) OR ((ws_sales_price#6 >= 150.00) AND (ws_sales_price#6 <= 200.00)))) AND ((((ws_net_profit#7 >= 100.00) AND (ws_net_profit#7 <= 200.00)) OR ((ws_net_profit#7 >= 150.00) AND (ws_net_profit#7 <= 300.00))) OR ((ws_net_profit#7 >= 50.00) AND (ws_net_profit#7 <= 250.00)))) -(9) Scan parquet default.web_page -Output [1]: [wp_web_page_sk#11] +(4) Scan parquet default.web_page +Output [1]: [wp_web_page_sk#8] Batched: true Location [not included in comparison]/{warehouse_dir}/web_page] PushedFilters: [IsNotNull(wp_web_page_sk)] ReadSchema: struct -(10) ColumnarToRow [codegen id : 2] -Input [1]: [wp_web_page_sk#11] +(5) ColumnarToRow [codegen id : 1] +Input [1]: [wp_web_page_sk#8] -(11) Filter [codegen id : 2] -Input [1]: [wp_web_page_sk#11] -Condition : isnotnull(wp_web_page_sk#11) +(6) Filter [codegen id : 1] +Input [1]: [wp_web_page_sk#8] +Condition : isnotnull(wp_web_page_sk#8) -(12) BroadcastExchange -Input [1]: [wp_web_page_sk#11] -Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, false] as bigint)),false), [id=#12] +(7) BroadcastExchange +Input [1]: [wp_web_page_sk#8] +Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, false] as bigint)),false), [id=#9] -(13) BroadcastHashJoin [codegen id : 3] -Left keys [1]: [ws_web_page_sk#6] -Right keys [1]: [wp_web_page_sk#11] +(8) BroadcastHashJoin [codegen id : 2] +Left keys [1]: [ws_web_page_sk#3] +Right keys [1]: [wp_web_page_sk#8] Join condition: None -(14) Project [codegen id : 3] -Output [6]: [ws_sold_date_sk#4, ws_item_sk#5, ws_order_number#7, ws_quantity#8, ws_sales_price#9, ws_net_profit#10] -Input [8]: [ws_sold_date_sk#4, ws_item_sk#5, ws_web_page_sk#6, ws_order_number#7, ws_quantity#8, ws_sales_price#9, ws_net_profit#10, wp_web_page_sk#11] +(9) Project [codegen id : 2] +Output [6]: [ws_sold_date_sk#1, ws_item_sk#2, ws_order_number#4, ws_quantity#5, ws_sales_price#6, ws_net_profit#7] +Input [8]: [ws_sold_date_sk#1, ws_item_sk#2, ws_web_page_sk#3, ws_order_number#4, ws_quantity#5, ws_sales_price#6, ws_net_profit#7, wp_web_page_sk#8] -(15) Exchange -Input [6]: [ws_sold_date_sk#4, ws_item_sk#5, ws_order_number#7, ws_quantity#8, ws_sales_price#9, ws_net_profit#10] -Arguments: hashpartitioning(cast(ws_item_sk#5 as bigint), cast(ws_order_number#7 as bigint), 5), true, [id=#13] +(10) Exchange +Input [6]: [ws_sold_date_sk#1, ws_item_sk#2, ws_order_number#4, ws_quantity#5, ws_sales_price#6, ws_net_profit#7] +Arguments: hashpartitioning(cast(ws_item_sk#2 as bigint), cast(ws_order_number#4 as bigint), 5), true, [id=#10] -(16) Sort [codegen id : 4] -Input [6]: [ws_sold_date_sk#4, ws_item_sk#5, ws_order_number#7, ws_quantity#8, ws_sales_price#9, ws_net_profit#10] -Arguments: [cast(ws_item_sk#5 as bigint) ASC NULLS FIRST, cast(ws_order_number#7 as bigint) ASC NULLS FIRST], false, 0 +(11) Sort [codegen id : 3] +Input [6]: [ws_sold_date_sk#1, ws_item_sk#2, ws_order_number#4, ws_quantity#5, ws_sales_price#6, ws_net_profit#7] +Arguments: [cast(ws_item_sk#2 as bigint) ASC NULLS FIRST, cast(ws_order_number#4 as bigint) ASC NULLS FIRST], false, 0 -(17) Scan parquet default.web_returns -Output [8]: [wr_item_sk#14, wr_refunded_cdemo_sk#15, wr_refunded_addr_sk#16, wr_returning_cdemo_sk#17, wr_reason_sk#18, wr_order_number#19, wr_fee#20, wr_refunded_cash#21] +(12) Scan parquet default.web_returns +Output [8]: [wr_item_sk#11, wr_refunded_cdemo_sk#12, wr_refunded_addr_sk#13, wr_returning_cdemo_sk#14, wr_reason_sk#15, wr_order_number#16, wr_fee#17, wr_refunded_cash#18] Batched: true Location [not included in comparison]/{warehouse_dir}/web_returns] PushedFilters: [IsNotNull(wr_item_sk), IsNotNull(wr_order_number), IsNotNull(wr_refunded_cdemo_sk), IsNotNull(wr_returning_cdemo_sk), IsNotNull(wr_refunded_addr_sk), IsNotNull(wr_reason_sk)] ReadSchema: struct -(18) ColumnarToRow [codegen id : 5] -Input [8]: [wr_item_sk#14, wr_refunded_cdemo_sk#15, wr_refunded_addr_sk#16, wr_returning_cdemo_sk#17, wr_reason_sk#18, wr_order_number#19, wr_fee#20, wr_refunded_cash#21] +(13) ColumnarToRow [codegen id : 4] +Input [8]: [wr_item_sk#11, wr_refunded_cdemo_sk#12, wr_refunded_addr_sk#13, wr_returning_cdemo_sk#14, wr_reason_sk#15, wr_order_number#16, wr_fee#17, wr_refunded_cash#18] -(19) Filter [codegen id : 5] -Input [8]: [wr_item_sk#14, wr_refunded_cdemo_sk#15, wr_refunded_addr_sk#16, wr_returning_cdemo_sk#17, wr_reason_sk#18, wr_order_number#19, wr_fee#20, wr_refunded_cash#21] -Condition : (((((isnotnull(wr_item_sk#14) AND isnotnull(wr_order_number#19)) AND isnotnull(wr_refunded_cdemo_sk#15)) AND isnotnull(wr_returning_cdemo_sk#17)) AND isnotnull(wr_refunded_addr_sk#16)) AND isnotnull(wr_reason_sk#18)) +(14) Filter [codegen id : 4] +Input [8]: [wr_item_sk#11, wr_refunded_cdemo_sk#12, wr_refunded_addr_sk#13, wr_returning_cdemo_sk#14, wr_reason_sk#15, wr_order_number#16, wr_fee#17, wr_refunded_cash#18] +Condition : (((((isnotnull(wr_item_sk#11) AND isnotnull(wr_order_number#16)) AND isnotnull(wr_refunded_cdemo_sk#12)) AND isnotnull(wr_returning_cdemo_sk#14)) AND isnotnull(wr_refunded_addr_sk#13)) AND isnotnull(wr_reason_sk#15)) -(20) Exchange -Input [8]: [wr_item_sk#14, wr_refunded_cdemo_sk#15, wr_refunded_addr_sk#16, wr_returning_cdemo_sk#17, wr_reason_sk#18, wr_order_number#19, wr_fee#20, wr_refunded_cash#21] -Arguments: hashpartitioning(wr_item_sk#14, wr_order_number#19, 5), true, [id=#22] +(15) Exchange +Input [8]: [wr_item_sk#11, wr_refunded_cdemo_sk#12, wr_refunded_addr_sk#13, wr_returning_cdemo_sk#14, wr_reason_sk#15, wr_order_number#16, wr_fee#17, wr_refunded_cash#18] +Arguments: hashpartitioning(wr_item_sk#11, wr_order_number#16, 5), true, [id=#19] -(21) Sort [codegen id : 6] -Input [8]: [wr_item_sk#14, wr_refunded_cdemo_sk#15, wr_refunded_addr_sk#16, wr_returning_cdemo_sk#17, wr_reason_sk#18, wr_order_number#19, wr_fee#20, wr_refunded_cash#21] -Arguments: [wr_item_sk#14 ASC NULLS FIRST, wr_order_number#19 ASC NULLS FIRST], false, 0 +(16) Sort [codegen id : 5] +Input [8]: [wr_item_sk#11, wr_refunded_cdemo_sk#12, wr_refunded_addr_sk#13, wr_returning_cdemo_sk#14, wr_reason_sk#15, wr_order_number#16, wr_fee#17, wr_refunded_cash#18] +Arguments: [wr_item_sk#11 ASC NULLS FIRST, wr_order_number#16 ASC NULLS FIRST], false, 0 -(22) SortMergeJoin -Left keys [2]: [cast(ws_item_sk#5 as bigint), cast(ws_order_number#7 as bigint)] -Right keys [2]: [wr_item_sk#14, wr_order_number#19] +(17) SortMergeJoin [codegen id : 9] +Left keys [2]: [cast(ws_item_sk#2 as bigint), cast(ws_order_number#4 as bigint)] +Right keys [2]: [wr_item_sk#11, wr_order_number#16] Join condition: None -(23) Project -Output [10]: [ws_sold_date_sk#4, ws_quantity#8, ws_sales_price#9, ws_net_profit#10, wr_refunded_cdemo_sk#15, wr_refunded_addr_sk#16, wr_returning_cdemo_sk#17, wr_reason_sk#18, wr_fee#20, wr_refunded_cash#21] -Input [14]: [ws_sold_date_sk#4, ws_item_sk#5, ws_order_number#7, ws_quantity#8, ws_sales_price#9, ws_net_profit#10, wr_item_sk#14, wr_refunded_cdemo_sk#15, wr_refunded_addr_sk#16, wr_returning_cdemo_sk#17, wr_reason_sk#18, wr_order_number#19, wr_fee#20, wr_refunded_cash#21] +(18) Project [codegen id : 9] +Output [10]: [ws_sold_date_sk#1, ws_quantity#5, ws_sales_price#6, ws_net_profit#7, wr_refunded_cdemo_sk#12, wr_refunded_addr_sk#13, wr_returning_cdemo_sk#14, wr_reason_sk#15, wr_fee#17, wr_refunded_cash#18] +Input [14]: [ws_sold_date_sk#1, ws_item_sk#2, ws_order_number#4, ws_quantity#5, ws_sales_price#6, ws_net_profit#7, wr_item_sk#11, wr_refunded_cdemo_sk#12, wr_refunded_addr_sk#13, wr_returning_cdemo_sk#14, wr_reason_sk#15, wr_order_number#16, wr_fee#17, wr_refunded_cash#18] + +(19) Scan parquet default.date_dim +Output [2]: [d_date_sk#20, d_year#21] +Batched: true +Location [not included in comparison]/{warehouse_dir}/date_dim] +PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2000), IsNotNull(d_date_sk)] +ReadSchema: struct + +(20) ColumnarToRow [codegen id : 6] +Input [2]: [d_date_sk#20, d_year#21] + +(21) Filter [codegen id : 6] +Input [2]: [d_date_sk#20, d_year#21] +Condition : ((isnotnull(d_year#21) AND (d_year#21 = 2000)) AND isnotnull(d_date_sk#20)) + +(22) Project [codegen id : 6] +Output [1]: [d_date_sk#20] +Input [2]: [d_date_sk#20, d_year#21] + +(23) BroadcastExchange +Input [1]: [d_date_sk#20] +Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#22] (24) BroadcastHashJoin [codegen id : 9] -Left keys [1]: [d_date_sk#1] -Right keys [1]: [ws_sold_date_sk#4] +Left keys [1]: [ws_sold_date_sk#1] +Right keys [1]: [d_date_sk#20] Join condition: None (25) Project [codegen id : 9] -Output [9]: [ws_quantity#8, ws_sales_price#9, ws_net_profit#10, wr_refunded_cdemo_sk#15, wr_refunded_addr_sk#16, wr_returning_cdemo_sk#17, wr_reason_sk#18, wr_fee#20, wr_refunded_cash#21] -Input [11]: [d_date_sk#1, ws_sold_date_sk#4, ws_quantity#8, ws_sales_price#9, ws_net_profit#10, wr_refunded_cdemo_sk#15, wr_refunded_addr_sk#16, wr_returning_cdemo_sk#17, wr_reason_sk#18, wr_fee#20, wr_refunded_cash#21] +Output [9]: [ws_quantity#5, ws_sales_price#6, ws_net_profit#7, wr_refunded_cdemo_sk#12, wr_refunded_addr_sk#13, wr_returning_cdemo_sk#14, wr_reason_sk#15, wr_fee#17, wr_refunded_cash#18] +Input [11]: [ws_sold_date_sk#1, ws_quantity#5, ws_sales_price#6, ws_net_profit#7, wr_refunded_cdemo_sk#12, wr_refunded_addr_sk#13, wr_returning_cdemo_sk#14, wr_reason_sk#15, wr_fee#17, wr_refunded_cash#18, d_date_sk#20] (26) Scan parquet default.reason Output [2]: [r_reason_sk#23, r_reason_desc#24] @@ -188,13 +188,13 @@ Input [2]: [r_reason_sk#23, r_reason_desc#24] Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, false] as bigint)),false), [id=#25] (30) BroadcastHashJoin [codegen id : 9] -Left keys [1]: [wr_reason_sk#18] +Left keys [1]: [wr_reason_sk#15] Right keys [1]: [cast(r_reason_sk#23 as bigint)] Join condition: None (31) Project [codegen id : 9] -Output [9]: [ws_quantity#8, ws_sales_price#9, ws_net_profit#10, wr_refunded_cdemo_sk#15, wr_refunded_addr_sk#16, wr_returning_cdemo_sk#17, wr_fee#20, wr_refunded_cash#21, r_reason_desc#24] -Input [11]: [ws_quantity#8, ws_sales_price#9, ws_net_profit#10, wr_refunded_cdemo_sk#15, wr_refunded_addr_sk#16, wr_returning_cdemo_sk#17, wr_reason_sk#18, wr_fee#20, wr_refunded_cash#21, r_reason_sk#23, r_reason_desc#24] +Output [9]: [ws_quantity#5, ws_sales_price#6, ws_net_profit#7, wr_refunded_cdemo_sk#12, wr_refunded_addr_sk#13, wr_returning_cdemo_sk#14, wr_fee#17, wr_refunded_cash#18, r_reason_desc#24] +Input [11]: [ws_quantity#5, ws_sales_price#6, ws_net_profit#7, wr_refunded_cdemo_sk#12, wr_refunded_addr_sk#13, wr_returning_cdemo_sk#14, wr_reason_sk#15, wr_fee#17, wr_refunded_cash#18, r_reason_sk#23, r_reason_desc#24] (32) Scan parquet default.customer_address Output [3]: [ca_address_sk#26, ca_state#27, ca_country#28] @@ -219,84 +219,84 @@ Input [2]: [ca_address_sk#26, ca_state#27] Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#29] (37) BroadcastHashJoin [codegen id : 9] -Left keys [1]: [wr_refunded_addr_sk#16] +Left keys [1]: [wr_refunded_addr_sk#13] Right keys [1]: [cast(ca_address_sk#26 as bigint)] -Join condition: ((((ca_state#27 IN (IN,OH,NJ) AND (ws_net_profit#10 >= 100.00)) AND (ws_net_profit#10 <= 200.00)) OR ((ca_state#27 IN (WI,CT,KY) AND (ws_net_profit#10 >= 150.00)) AND (ws_net_profit#10 <= 300.00))) OR ((ca_state#27 IN (LA,IA,AR) AND (ws_net_profit#10 >= 50.00)) AND (ws_net_profit#10 <= 250.00))) +Join condition: ((((ca_state#27 IN (IN,OH,NJ) AND (ws_net_profit#7 >= 100.00)) AND (ws_net_profit#7 <= 200.00)) OR ((ca_state#27 IN (WI,CT,KY) AND (ws_net_profit#7 >= 150.00)) AND (ws_net_profit#7 <= 300.00))) OR ((ca_state#27 IN (LA,IA,AR) AND (ws_net_profit#7 >= 50.00)) AND (ws_net_profit#7 <= 250.00))) (38) Project [codegen id : 9] -Output [7]: [ws_quantity#8, ws_sales_price#9, wr_refunded_cdemo_sk#15, wr_returning_cdemo_sk#17, wr_fee#20, wr_refunded_cash#21, r_reason_desc#24] -Input [11]: [ws_quantity#8, ws_sales_price#9, ws_net_profit#10, wr_refunded_cdemo_sk#15, wr_refunded_addr_sk#16, wr_returning_cdemo_sk#17, wr_fee#20, wr_refunded_cash#21, r_reason_desc#24, ca_address_sk#26, ca_state#27] +Output [7]: [ws_quantity#5, ws_sales_price#6, wr_refunded_cdemo_sk#12, wr_returning_cdemo_sk#14, wr_fee#17, wr_refunded_cash#18, r_reason_desc#24] +Input [11]: [ws_quantity#5, ws_sales_price#6, ws_net_profit#7, wr_refunded_cdemo_sk#12, wr_refunded_addr_sk#13, wr_returning_cdemo_sk#14, wr_fee#17, wr_refunded_cash#18, r_reason_desc#24, ca_address_sk#26, ca_state#27] (39) Exchange -Input [7]: [ws_quantity#8, ws_sales_price#9, wr_refunded_cdemo_sk#15, wr_returning_cdemo_sk#17, wr_fee#20, wr_refunded_cash#21, r_reason_desc#24] -Arguments: hashpartitioning(wr_refunded_cdemo_sk#15, wr_returning_cdemo_sk#17, 5), true, [id=#30] +Input [7]: [ws_quantity#5, ws_sales_price#6, wr_refunded_cdemo_sk#12, wr_returning_cdemo_sk#14, wr_fee#17, wr_refunded_cash#18, r_reason_desc#24] +Arguments: hashpartitioning(wr_refunded_cdemo_sk#12, wr_returning_cdemo_sk#14, 5), true, [id=#30] (40) Sort [codegen id : 10] -Input [7]: [ws_quantity#8, ws_sales_price#9, wr_refunded_cdemo_sk#15, wr_returning_cdemo_sk#17, wr_fee#20, wr_refunded_cash#21, r_reason_desc#24] -Arguments: [wr_refunded_cdemo_sk#15 ASC NULLS FIRST, wr_returning_cdemo_sk#17 ASC NULLS FIRST], false, 0 +Input [7]: [ws_quantity#5, ws_sales_price#6, wr_refunded_cdemo_sk#12, wr_returning_cdemo_sk#14, wr_fee#17, wr_refunded_cash#18, r_reason_desc#24] +Arguments: [wr_refunded_cdemo_sk#12 ASC NULLS FIRST, wr_returning_cdemo_sk#14 ASC NULLS FIRST], false, 0 (41) Scan parquet default.customer_demographics Output [3]: [cd_demo_sk#31, cd_marital_status#32, cd_education_status#33] Batched: true Location [not included in comparison]/{warehouse_dir}/customer_demographics] -PushedFilters: [IsNotNull(cd_demo_sk), IsNotNull(cd_marital_status), IsNotNull(cd_education_status)] +PushedFilters: [IsNotNull(cd_demo_sk), IsNotNull(cd_marital_status), IsNotNull(cd_education_status), Or(Or(And(EqualTo(cd_marital_status,M),EqualTo(cd_education_status,Advanced Degree)),And(EqualTo(cd_marital_status,S),EqualTo(cd_education_status,College))),And(EqualTo(cd_marital_status,W),EqualTo(cd_education_status,2 yr Degree)))] ReadSchema: struct -(42) ColumnarToRow [codegen id : 12] +(42) ColumnarToRow [codegen id : 11] +Input [3]: [cd_demo_sk#31, cd_marital_status#32, cd_education_status#33] + +(43) Filter [codegen id : 11] Input [3]: [cd_demo_sk#31, cd_marital_status#32, cd_education_status#33] +Condition : (((isnotnull(cd_demo_sk#31) AND isnotnull(cd_marital_status#32)) AND isnotnull(cd_education_status#33)) AND ((((cd_marital_status#32 = M) AND (cd_education_status#33 = Advanced Degree)) OR ((cd_marital_status#32 = S) AND (cd_education_status#33 = College))) OR ((cd_marital_status#32 = W) AND (cd_education_status#33 = 2 yr Degree)))) -(43) Filter [codegen id : 12] +(44) BroadcastExchange Input [3]: [cd_demo_sk#31, cd_marital_status#32, cd_education_status#33] -Condition : ((isnotnull(cd_demo_sk#31) AND isnotnull(cd_marital_status#32)) AND isnotnull(cd_education_status#33)) +Arguments: HashedRelationBroadcastMode(List(input[1, string, false], input[2, string, false]),false), [id=#34] -(44) Scan parquet default.customer_demographics -Output [3]: [cd_demo_sk#34, cd_marital_status#35, cd_education_status#36] +(45) Scan parquet default.customer_demographics +Output [3]: [cd_demo_sk#35, cd_marital_status#36, cd_education_status#37] Batched: true Location [not included in comparison]/{warehouse_dir}/customer_demographics] -PushedFilters: [IsNotNull(cd_demo_sk), IsNotNull(cd_marital_status), IsNotNull(cd_education_status), Or(Or(And(EqualTo(cd_marital_status,M),EqualTo(cd_education_status,Advanced Degree)),And(EqualTo(cd_marital_status,S),EqualTo(cd_education_status,College))),And(EqualTo(cd_marital_status,W),EqualTo(cd_education_status,2 yr Degree)))] +PushedFilters: [IsNotNull(cd_demo_sk), IsNotNull(cd_marital_status), IsNotNull(cd_education_status)] ReadSchema: struct -(45) ColumnarToRow [codegen id : 11] -Input [3]: [cd_demo_sk#34, cd_marital_status#35, cd_education_status#36] - -(46) Filter [codegen id : 11] -Input [3]: [cd_demo_sk#34, cd_marital_status#35, cd_education_status#36] -Condition : (((isnotnull(cd_demo_sk#34) AND isnotnull(cd_marital_status#35)) AND isnotnull(cd_education_status#36)) AND ((((cd_marital_status#35 = M) AND (cd_education_status#36 = Advanced Degree)) OR ((cd_marital_status#35 = S) AND (cd_education_status#36 = College))) OR ((cd_marital_status#35 = W) AND (cd_education_status#36 = 2 yr Degree)))) +(46) ColumnarToRow +Input [3]: [cd_demo_sk#35, cd_marital_status#36, cd_education_status#37] -(47) BroadcastExchange -Input [3]: [cd_demo_sk#34, cd_marital_status#35, cd_education_status#36] -Arguments: HashedRelationBroadcastMode(List(input[1, string, false], input[2, string, false]),false), [id=#37] +(47) Filter +Input [3]: [cd_demo_sk#35, cd_marital_status#36, cd_education_status#37] +Condition : ((isnotnull(cd_demo_sk#35) AND isnotnull(cd_marital_status#36)) AND isnotnull(cd_education_status#37)) (48) BroadcastHashJoin [codegen id : 12] Left keys [2]: [cd_marital_status#32, cd_education_status#33] -Right keys [2]: [cd_marital_status#35, cd_education_status#36] +Right keys [2]: [cd_marital_status#36, cd_education_status#37] Join condition: None (49) Project [codegen id : 12] -Output [4]: [cd_demo_sk#31, cd_demo_sk#34, cd_marital_status#35, cd_education_status#36] -Input [6]: [cd_demo_sk#31, cd_marital_status#32, cd_education_status#33, cd_demo_sk#34, cd_marital_status#35, cd_education_status#36] +Output [4]: [cd_demo_sk#31, cd_marital_status#32, cd_education_status#33, cd_demo_sk#35] +Input [6]: [cd_demo_sk#31, cd_marital_status#32, cd_education_status#33, cd_demo_sk#35, cd_marital_status#36, cd_education_status#37] (50) Exchange -Input [4]: [cd_demo_sk#31, cd_demo_sk#34, cd_marital_status#35, cd_education_status#36] -Arguments: hashpartitioning(cast(cd_demo_sk#34 as bigint), cast(cd_demo_sk#31 as bigint), 5), true, [id=#38] +Input [4]: [cd_demo_sk#31, cd_marital_status#32, cd_education_status#33, cd_demo_sk#35] +Arguments: hashpartitioning(cast(cd_demo_sk#31 as bigint), cast(cd_demo_sk#35 as bigint), 5), true, [id=#38] (51) Sort [codegen id : 13] -Input [4]: [cd_demo_sk#31, cd_demo_sk#34, cd_marital_status#35, cd_education_status#36] -Arguments: [cast(cd_demo_sk#34 as bigint) ASC NULLS FIRST, cast(cd_demo_sk#31 as bigint) ASC NULLS FIRST], false, 0 +Input [4]: [cd_demo_sk#31, cd_marital_status#32, cd_education_status#33, cd_demo_sk#35] +Arguments: [cast(cd_demo_sk#31 as bigint) ASC NULLS FIRST, cast(cd_demo_sk#35 as bigint) ASC NULLS FIRST], false, 0 (52) SortMergeJoin [codegen id : 14] -Left keys [2]: [wr_refunded_cdemo_sk#15, wr_returning_cdemo_sk#17] -Right keys [2]: [cast(cd_demo_sk#34 as bigint), cast(cd_demo_sk#31 as bigint)] -Join condition: ((((((cd_marital_status#35 = M) AND (cd_education_status#36 = Advanced Degree)) AND (ws_sales_price#9 >= 100.00)) AND (ws_sales_price#9 <= 150.00)) OR ((((cd_marital_status#35 = S) AND (cd_education_status#36 = College)) AND (ws_sales_price#9 >= 50.00)) AND (ws_sales_price#9 <= 100.00))) OR ((((cd_marital_status#35 = W) AND (cd_education_status#36 = 2 yr Degree)) AND (ws_sales_price#9 >= 150.00)) AND (ws_sales_price#9 <= 200.00))) +Left keys [2]: [wr_refunded_cdemo_sk#12, wr_returning_cdemo_sk#14] +Right keys [2]: [cast(cd_demo_sk#31 as bigint), cast(cd_demo_sk#35 as bigint)] +Join condition: ((((((cd_marital_status#32 = M) AND (cd_education_status#33 = Advanced Degree)) AND (ws_sales_price#6 >= 100.00)) AND (ws_sales_price#6 <= 150.00)) OR ((((cd_marital_status#32 = S) AND (cd_education_status#33 = College)) AND (ws_sales_price#6 >= 50.00)) AND (ws_sales_price#6 <= 100.00))) OR ((((cd_marital_status#32 = W) AND (cd_education_status#33 = 2 yr Degree)) AND (ws_sales_price#6 >= 150.00)) AND (ws_sales_price#6 <= 200.00))) (53) Project [codegen id : 14] -Output [4]: [ws_quantity#8, wr_fee#20, wr_refunded_cash#21, r_reason_desc#24] -Input [11]: [ws_quantity#8, ws_sales_price#9, wr_refunded_cdemo_sk#15, wr_returning_cdemo_sk#17, wr_fee#20, wr_refunded_cash#21, r_reason_desc#24, cd_demo_sk#31, cd_demo_sk#34, cd_marital_status#35, cd_education_status#36] +Output [4]: [ws_quantity#5, wr_fee#17, wr_refunded_cash#18, r_reason_desc#24] +Input [11]: [ws_quantity#5, ws_sales_price#6, wr_refunded_cdemo_sk#12, wr_returning_cdemo_sk#14, wr_fee#17, wr_refunded_cash#18, r_reason_desc#24, cd_demo_sk#31, cd_marital_status#32, cd_education_status#33, cd_demo_sk#35] (54) HashAggregate [codegen id : 14] -Input [4]: [ws_quantity#8, wr_fee#20, wr_refunded_cash#21, r_reason_desc#24] +Input [4]: [ws_quantity#5, wr_fee#17, wr_refunded_cash#18, r_reason_desc#24] Keys [1]: [r_reason_desc#24] -Functions [3]: [partial_avg(cast(ws_quantity#8 as bigint)), partial_avg(UnscaledValue(wr_refunded_cash#21)), partial_avg(UnscaledValue(wr_fee#20))] +Functions [3]: [partial_avg(cast(ws_quantity#5 as bigint)), partial_avg(UnscaledValue(wr_refunded_cash#18)), partial_avg(UnscaledValue(wr_fee#17))] Aggregate Attributes [6]: [sum#39, count#40, sum#41, count#42, sum#43, count#44] Results [7]: [r_reason_desc#24, sum#45, count#46, sum#47, count#48, sum#49, count#50] @@ -307,9 +307,9 @@ Arguments: hashpartitioning(r_reason_desc#24, 5), true, [id=#51] (56) HashAggregate [codegen id : 15] Input [7]: [r_reason_desc#24, sum#45, count#46, sum#47, count#48, sum#49, count#50] Keys [1]: [r_reason_desc#24] -Functions [3]: [avg(cast(ws_quantity#8 as bigint)), avg(UnscaledValue(wr_refunded_cash#21)), avg(UnscaledValue(wr_fee#20))] -Aggregate Attributes [3]: [avg(cast(ws_quantity#8 as bigint))#52, avg(UnscaledValue(wr_refunded_cash#21))#53, avg(UnscaledValue(wr_fee#20))#54] -Results [5]: [substr(r_reason_desc#24, 1, 20) AS substr(r_reason_desc, 1, 20)#55, avg(cast(ws_quantity#8 as bigint))#52 AS avg(ws_quantity)#56, cast((avg(UnscaledValue(wr_refunded_cash#21))#53 / 100.0) as decimal(11,6)) AS avg(wr_refunded_cash)#57, cast((avg(UnscaledValue(wr_fee#20))#54 / 100.0) as decimal(11,6)) AS avg(wr_fee)#58, avg(cast(ws_quantity#8 as bigint))#52 AS aggOrder#59] +Functions [3]: [avg(cast(ws_quantity#5 as bigint)), avg(UnscaledValue(wr_refunded_cash#18)), avg(UnscaledValue(wr_fee#17))] +Aggregate Attributes [3]: [avg(cast(ws_quantity#5 as bigint))#52, avg(UnscaledValue(wr_refunded_cash#18))#53, avg(UnscaledValue(wr_fee#17))#54] +Results [5]: [substr(r_reason_desc#24, 1, 20) AS substr(r_reason_desc, 1, 20)#55, avg(cast(ws_quantity#5 as bigint))#52 AS avg(ws_quantity)#56, cast((avg(UnscaledValue(wr_refunded_cash#18))#53 / 100.0) as decimal(11,6)) AS avg(wr_refunded_cash)#57, cast((avg(UnscaledValue(wr_fee#17))#54 / 100.0) as decimal(11,6)) AS avg(wr_fee)#58, avg(cast(ws_quantity#5 as bigint))#52 AS aggOrder#59] (57) TakeOrderedAndProject Input [5]: [substr(r_reason_desc, 1, 20)#55, avg(ws_quantity)#56, avg(wr_refunded_cash)#57, avg(wr_fee)#58, aggOrder#59] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q85.sf100/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q85.sf100/simplified.txt index e7aee17172e60..3fa7d84f55966 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q85.sf100/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q85.sf100/simplified.txt @@ -18,23 +18,15 @@ TakeOrderedAndProject [substr(r_reason_desc, 1, 20),aggOrder,avg(wr_refunded_cas Project [ws_quantity,ws_sales_price,ws_net_profit,wr_refunded_cdemo_sk,wr_refunded_addr_sk,wr_returning_cdemo_sk,wr_fee,wr_refunded_cash,r_reason_desc] BroadcastHashJoin [wr_reason_sk,r_reason_sk] Project [ws_quantity,ws_sales_price,ws_net_profit,wr_refunded_cdemo_sk,wr_refunded_addr_sk,wr_returning_cdemo_sk,wr_reason_sk,wr_fee,wr_refunded_cash] - BroadcastHashJoin [d_date_sk,ws_sold_date_sk] - InputAdapter - BroadcastExchange #3 - WholeStageCodegen (1) - Project [d_date_sk] - Filter [d_year,d_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.date_dim [d_date_sk,d_year] + BroadcastHashJoin [ws_sold_date_sk,d_date_sk] Project [ws_sold_date_sk,ws_quantity,ws_sales_price,ws_net_profit,wr_refunded_cdemo_sk,wr_refunded_addr_sk,wr_returning_cdemo_sk,wr_reason_sk,wr_fee,wr_refunded_cash] SortMergeJoin [ws_item_sk,ws_order_number,wr_item_sk,wr_order_number] InputAdapter - WholeStageCodegen (4) + WholeStageCodegen (3) Sort [ws_item_sk,ws_order_number] InputAdapter - Exchange [ws_item_sk,ws_order_number] #4 - WholeStageCodegen (3) + Exchange [ws_item_sk,ws_order_number] #3 + WholeStageCodegen (2) Project [ws_sold_date_sk,ws_item_sk,ws_order_number,ws_quantity,ws_sales_price,ws_net_profit] BroadcastHashJoin [ws_web_page_sk,wp_web_page_sk] Filter [ws_item_sk,ws_order_number,ws_web_page_sk,ws_sold_date_sk,ws_sales_price,ws_net_profit] @@ -42,22 +34,30 @@ TakeOrderedAndProject [substr(r_reason_desc, 1, 20),aggOrder,avg(wr_refunded_cas InputAdapter Scan parquet default.web_sales [ws_sold_date_sk,ws_item_sk,ws_web_page_sk,ws_order_number,ws_quantity,ws_sales_price,ws_net_profit] InputAdapter - BroadcastExchange #5 - WholeStageCodegen (2) + BroadcastExchange #4 + WholeStageCodegen (1) Filter [wp_web_page_sk] ColumnarToRow InputAdapter Scan parquet default.web_page [wp_web_page_sk] InputAdapter - WholeStageCodegen (6) + WholeStageCodegen (5) Sort [wr_item_sk,wr_order_number] InputAdapter - Exchange [wr_item_sk,wr_order_number] #6 - WholeStageCodegen (5) + Exchange [wr_item_sk,wr_order_number] #5 + WholeStageCodegen (4) Filter [wr_item_sk,wr_order_number,wr_refunded_cdemo_sk,wr_returning_cdemo_sk,wr_refunded_addr_sk,wr_reason_sk] ColumnarToRow InputAdapter Scan parquet default.web_returns [wr_item_sk,wr_refunded_cdemo_sk,wr_refunded_addr_sk,wr_returning_cdemo_sk,wr_reason_sk,wr_order_number,wr_fee,wr_refunded_cash] + InputAdapter + BroadcastExchange #6 + WholeStageCodegen (6) + Project [d_date_sk] + Filter [d_year,d_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.date_dim [d_date_sk,d_year] InputAdapter BroadcastExchange #7 WholeStageCodegen (7) @@ -79,12 +79,8 @@ TakeOrderedAndProject [substr(r_reason_desc, 1, 20),aggOrder,avg(wr_refunded_cas InputAdapter Exchange [cd_demo_sk,cd_demo_sk] #9 WholeStageCodegen (12) - Project [cd_demo_sk,cd_demo_sk,cd_marital_status,cd_education_status] + Project [cd_demo_sk,cd_marital_status,cd_education_status,cd_demo_sk] BroadcastHashJoin [cd_marital_status,cd_education_status,cd_marital_status,cd_education_status] - Filter [cd_demo_sk,cd_marital_status,cd_education_status] - ColumnarToRow - InputAdapter - Scan parquet default.customer_demographics [cd_demo_sk,cd_marital_status,cd_education_status] InputAdapter BroadcastExchange #10 WholeStageCodegen (11) @@ -92,3 +88,7 @@ TakeOrderedAndProject [substr(r_reason_desc, 1, 20),aggOrder,avg(wr_refunded_cas ColumnarToRow InputAdapter Scan parquet default.customer_demographics [cd_demo_sk,cd_marital_status,cd_education_status] + Filter [cd_demo_sk,cd_marital_status,cd_education_status] + ColumnarToRow + InputAdapter + Scan parquet default.customer_demographics [cd_demo_sk,cd_marital_status,cd_education_status] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q90.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q90.sf100/explain.txt index 3f787bfb99b67..e279902a125c5 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q90.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q90.sf100/explain.txt @@ -1,54 +1,55 @@ == Physical Plan == -TakeOrderedAndProject (50) -+- * Project (49) - +- BroadcastNestedLoopJoin Inner BuildRight (48) - :- * HashAggregate (27) - : +- Exchange (26) - : +- * HashAggregate (25) - : +- * Project (24) - : +- * BroadcastHashJoin Inner BuildRight (23) - : :- * Project (17) - : : +- * BroadcastHashJoin Inner BuildRight (16) - : : :- * Project (10) - : : : +- * BroadcastHashJoin Inner BuildRight (9) - : : : :- * Filter (3) - : : : : +- * ColumnarToRow (2) - : : : : +- Scan parquet default.web_sales (1) - : : : +- BroadcastExchange (8) - : : : +- * Project (7) - : : : +- * Filter (6) - : : : +- * ColumnarToRow (5) - : : : +- Scan parquet default.web_page (4) - : : +- BroadcastExchange (15) - : : +- * Project (14) - : : +- * Filter (13) - : : +- * ColumnarToRow (12) - : : +- Scan parquet default.household_demographics (11) - : +- BroadcastExchange (22) - : +- * Project (21) - : +- * Filter (20) - : +- * ColumnarToRow (19) - : +- Scan parquet default.time_dim (18) - +- BroadcastExchange (47) - +- * HashAggregate (46) - +- Exchange (45) - +- * HashAggregate (44) - +- * Project (43) - +- * BroadcastHashJoin Inner BuildRight (42) - :- * Project (36) - : +- * BroadcastHashJoin Inner BuildRight (35) - : :- * Project (33) - : : +- * BroadcastHashJoin Inner BuildRight (32) - : : :- * Filter (30) - : : : +- * ColumnarToRow (29) - : : : +- Scan parquet default.web_sales (28) - : : +- ReusedExchange (31) - : +- ReusedExchange (34) - +- BroadcastExchange (41) - +- * Project (40) - +- * Filter (39) - +- * ColumnarToRow (38) - +- Scan parquet default.time_dim (37) +* Sort (51) ++- Exchange (50) + +- * Project (49) + +- BroadcastNestedLoopJoin Inner BuildRight (48) + :- * HashAggregate (27) + : +- Exchange (26) + : +- * HashAggregate (25) + : +- * Project (24) + : +- * BroadcastHashJoin Inner BuildRight (23) + : :- * Project (17) + : : +- * BroadcastHashJoin Inner BuildRight (16) + : : :- * Project (10) + : : : +- * BroadcastHashJoin Inner BuildRight (9) + : : : :- * Filter (3) + : : : : +- * ColumnarToRow (2) + : : : : +- Scan parquet default.web_sales (1) + : : : +- BroadcastExchange (8) + : : : +- * Project (7) + : : : +- * Filter (6) + : : : +- * ColumnarToRow (5) + : : : +- Scan parquet default.web_page (4) + : : +- BroadcastExchange (15) + : : +- * Project (14) + : : +- * Filter (13) + : : +- * ColumnarToRow (12) + : : +- Scan parquet default.household_demographics (11) + : +- BroadcastExchange (22) + : +- * Project (21) + : +- * Filter (20) + : +- * ColumnarToRow (19) + : +- Scan parquet default.time_dim (18) + +- BroadcastExchange (47) + +- * HashAggregate (46) + +- Exchange (45) + +- * HashAggregate (44) + +- * Project (43) + +- * BroadcastHashJoin Inner BuildRight (42) + :- * Project (36) + : +- * BroadcastHashJoin Inner BuildRight (35) + : :- * Project (33) + : : +- * BroadcastHashJoin Inner BuildRight (32) + : : :- * Filter (30) + : : : +- * ColumnarToRow (29) + : : : +- Scan parquet default.web_sales (28) + : : +- ReusedExchange (31) + : +- ReusedExchange (34) + +- BroadcastExchange (41) + +- * Project (40) + +- * Filter (39) + +- * ColumnarToRow (38) + +- Scan parquet default.time_dim (37) (1) Scan parquet default.web_sales @@ -274,7 +275,11 @@ Join condition: None Output [1]: [CheckOverflow((promote_precision(cast(amc#17 as decimal(15,4))) / promote_precision(cast(pmc#23 as decimal(15,4)))), DecimalType(35,20), true) AS am_pm_ratio#25] Input [2]: [amc#17, pmc#23] -(50) TakeOrderedAndProject +(50) Exchange Input [1]: [am_pm_ratio#25] -Arguments: 100, [am_pm_ratio#25 ASC NULLS FIRST], [am_pm_ratio#25] +Arguments: rangepartitioning(am_pm_ratio#25 ASC NULLS FIRST, 5), true, [id=#26] + +(51) Sort [codegen id : 12] +Input [1]: [am_pm_ratio#25] +Arguments: [am_pm_ratio#25 ASC NULLS FIRST], true, 0 diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q90.sf100/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q90.sf100/simplified.txt index 1fe0442eab13f..5b33a90675699 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q90.sf100/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q90.sf100/simplified.txt @@ -1,74 +1,77 @@ -TakeOrderedAndProject [am_pm_ratio] - WholeStageCodegen (11) - Project [amc,pmc] - InputAdapter - BroadcastNestedLoopJoin - WholeStageCodegen (5) - HashAggregate [count] [count(1),amc,count] - InputAdapter - Exchange #1 - WholeStageCodegen (4) - HashAggregate [count,count] - Project - BroadcastHashJoin [ws_sold_time_sk,t_time_sk] - Project [ws_sold_time_sk] - BroadcastHashJoin [ws_ship_hdemo_sk,hd_demo_sk] - Project [ws_sold_time_sk,ws_ship_hdemo_sk] - BroadcastHashJoin [ws_web_page_sk,wp_web_page_sk] - Filter [ws_ship_hdemo_sk,ws_sold_time_sk,ws_web_page_sk] - ColumnarToRow - InputAdapter - Scan parquet default.web_sales [ws_sold_time_sk,ws_ship_hdemo_sk,ws_web_page_sk] - InputAdapter - BroadcastExchange #2 - WholeStageCodegen (1) - Project [wp_web_page_sk] - Filter [wp_char_count,wp_web_page_sk] +WholeStageCodegen (12) + Sort [am_pm_ratio] + InputAdapter + Exchange [am_pm_ratio] #1 + WholeStageCodegen (11) + Project [amc,pmc] + InputAdapter + BroadcastNestedLoopJoin + WholeStageCodegen (5) + HashAggregate [count] [count(1),amc,count] + InputAdapter + Exchange #2 + WholeStageCodegen (4) + HashAggregate [count,count] + Project + BroadcastHashJoin [ws_sold_time_sk,t_time_sk] + Project [ws_sold_time_sk] + BroadcastHashJoin [ws_ship_hdemo_sk,hd_demo_sk] + Project [ws_sold_time_sk,ws_ship_hdemo_sk] + BroadcastHashJoin [ws_web_page_sk,wp_web_page_sk] + Filter [ws_ship_hdemo_sk,ws_sold_time_sk,ws_web_page_sk] + ColumnarToRow + InputAdapter + Scan parquet default.web_sales [ws_sold_time_sk,ws_ship_hdemo_sk,ws_web_page_sk] + InputAdapter + BroadcastExchange #3 + WholeStageCodegen (1) + Project [wp_web_page_sk] + Filter [wp_char_count,wp_web_page_sk] + ColumnarToRow + InputAdapter + Scan parquet default.web_page [wp_web_page_sk,wp_char_count] + InputAdapter + BroadcastExchange #4 + WholeStageCodegen (2) + Project [hd_demo_sk] + Filter [hd_dep_count,hd_demo_sk] + ColumnarToRow + InputAdapter + Scan parquet default.household_demographics [hd_demo_sk,hd_dep_count] + InputAdapter + BroadcastExchange #5 + WholeStageCodegen (3) + Project [t_time_sk] + Filter [t_hour,t_time_sk] + ColumnarToRow + InputAdapter + Scan parquet default.time_dim [t_time_sk,t_hour] + BroadcastExchange #6 + WholeStageCodegen (10) + HashAggregate [count] [count(1),pmc,count] + InputAdapter + Exchange #7 + WholeStageCodegen (9) + HashAggregate [count,count] + Project + BroadcastHashJoin [ws_sold_time_sk,t_time_sk] + Project [ws_sold_time_sk] + BroadcastHashJoin [ws_ship_hdemo_sk,hd_demo_sk] + Project [ws_sold_time_sk,ws_ship_hdemo_sk] + BroadcastHashJoin [ws_web_page_sk,wp_web_page_sk] + Filter [ws_ship_hdemo_sk,ws_sold_time_sk,ws_web_page_sk] ColumnarToRow InputAdapter - Scan parquet default.web_page [wp_web_page_sk,wp_char_count] - InputAdapter - BroadcastExchange #3 - WholeStageCodegen (2) - Project [hd_demo_sk] - Filter [hd_dep_count,hd_demo_sk] - ColumnarToRow + Scan parquet default.web_sales [ws_sold_time_sk,ws_ship_hdemo_sk,ws_web_page_sk] InputAdapter - Scan parquet default.household_demographics [hd_demo_sk,hd_dep_count] - InputAdapter - BroadcastExchange #4 - WholeStageCodegen (3) - Project [t_time_sk] - Filter [t_hour,t_time_sk] - ColumnarToRow + ReusedExchange [wp_web_page_sk] #3 InputAdapter - Scan parquet default.time_dim [t_time_sk,t_hour] - BroadcastExchange #5 - WholeStageCodegen (10) - HashAggregate [count] [count(1),pmc,count] - InputAdapter - Exchange #6 - WholeStageCodegen (9) - HashAggregate [count,count] - Project - BroadcastHashJoin [ws_sold_time_sk,t_time_sk] - Project [ws_sold_time_sk] - BroadcastHashJoin [ws_ship_hdemo_sk,hd_demo_sk] - Project [ws_sold_time_sk,ws_ship_hdemo_sk] - BroadcastHashJoin [ws_web_page_sk,wp_web_page_sk] - Filter [ws_ship_hdemo_sk,ws_sold_time_sk,ws_web_page_sk] - ColumnarToRow - InputAdapter - Scan parquet default.web_sales [ws_sold_time_sk,ws_ship_hdemo_sk,ws_web_page_sk] - InputAdapter - ReusedExchange [wp_web_page_sk] #2 - InputAdapter - ReusedExchange [hd_demo_sk] #3 - InputAdapter - BroadcastExchange #7 - WholeStageCodegen (8) - Project [t_time_sk] - Filter [t_hour,t_time_sk] - ColumnarToRow - InputAdapter - Scan parquet default.time_dim [t_time_sk,t_hour] + ReusedExchange [hd_demo_sk] #4 + InputAdapter + BroadcastExchange #8 + WholeStageCodegen (8) + Project [t_time_sk] + Filter [t_hour,t_time_sk] + ColumnarToRow + InputAdapter + Scan parquet default.time_dim [t_time_sk,t_hour] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q90/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q90/explain.txt index 550bf89ce3b99..7a21808803aaa 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q90/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q90/explain.txt @@ -1,54 +1,55 @@ == Physical Plan == -TakeOrderedAndProject (50) -+- * Project (49) - +- BroadcastNestedLoopJoin Inner BuildRight (48) - :- * HashAggregate (27) - : +- Exchange (26) - : +- * HashAggregate (25) - : +- * Project (24) - : +- * BroadcastHashJoin Inner BuildRight (23) - : :- * Project (17) - : : +- * BroadcastHashJoin Inner BuildRight (16) - : : :- * Project (10) - : : : +- * BroadcastHashJoin Inner BuildRight (9) - : : : :- * Filter (3) - : : : : +- * ColumnarToRow (2) - : : : : +- Scan parquet default.web_sales (1) - : : : +- BroadcastExchange (8) - : : : +- * Project (7) - : : : +- * Filter (6) - : : : +- * ColumnarToRow (5) - : : : +- Scan parquet default.household_demographics (4) - : : +- BroadcastExchange (15) - : : +- * Project (14) - : : +- * Filter (13) - : : +- * ColumnarToRow (12) - : : +- Scan parquet default.time_dim (11) - : +- BroadcastExchange (22) - : +- * Project (21) - : +- * Filter (20) - : +- * ColumnarToRow (19) - : +- Scan parquet default.web_page (18) - +- BroadcastExchange (47) - +- * HashAggregate (46) - +- Exchange (45) - +- * HashAggregate (44) - +- * Project (43) - +- * BroadcastHashJoin Inner BuildRight (42) - :- * Project (40) - : +- * BroadcastHashJoin Inner BuildRight (39) - : :- * Project (33) - : : +- * BroadcastHashJoin Inner BuildRight (32) - : : :- * Filter (30) - : : : +- * ColumnarToRow (29) - : : : +- Scan parquet default.web_sales (28) - : : +- ReusedExchange (31) - : +- BroadcastExchange (38) - : +- * Project (37) - : +- * Filter (36) - : +- * ColumnarToRow (35) - : +- Scan parquet default.time_dim (34) - +- ReusedExchange (41) +* Sort (51) ++- Exchange (50) + +- * Project (49) + +- BroadcastNestedLoopJoin Inner BuildRight (48) + :- * HashAggregate (27) + : +- Exchange (26) + : +- * HashAggregate (25) + : +- * Project (24) + : +- * BroadcastHashJoin Inner BuildRight (23) + : :- * Project (17) + : : +- * BroadcastHashJoin Inner BuildRight (16) + : : :- * Project (10) + : : : +- * BroadcastHashJoin Inner BuildRight (9) + : : : :- * Filter (3) + : : : : +- * ColumnarToRow (2) + : : : : +- Scan parquet default.web_sales (1) + : : : +- BroadcastExchange (8) + : : : +- * Project (7) + : : : +- * Filter (6) + : : : +- * ColumnarToRow (5) + : : : +- Scan parquet default.household_demographics (4) + : : +- BroadcastExchange (15) + : : +- * Project (14) + : : +- * Filter (13) + : : +- * ColumnarToRow (12) + : : +- Scan parquet default.time_dim (11) + : +- BroadcastExchange (22) + : +- * Project (21) + : +- * Filter (20) + : +- * ColumnarToRow (19) + : +- Scan parquet default.web_page (18) + +- BroadcastExchange (47) + +- * HashAggregate (46) + +- Exchange (45) + +- * HashAggregate (44) + +- * Project (43) + +- * BroadcastHashJoin Inner BuildRight (42) + :- * Project (40) + : +- * BroadcastHashJoin Inner BuildRight (39) + : :- * Project (33) + : : +- * BroadcastHashJoin Inner BuildRight (32) + : : :- * Filter (30) + : : : +- * ColumnarToRow (29) + : : : +- Scan parquet default.web_sales (28) + : : +- ReusedExchange (31) + : +- BroadcastExchange (38) + : +- * Project (37) + : +- * Filter (36) + : +- * ColumnarToRow (35) + : +- Scan parquet default.time_dim (34) + +- ReusedExchange (41) (1) Scan parquet default.web_sales @@ -274,7 +275,11 @@ Join condition: None Output [1]: [CheckOverflow((promote_precision(cast(amc#17 as decimal(15,4))) / promote_precision(cast(pmc#23 as decimal(15,4)))), DecimalType(35,20), true) AS am_pm_ratio#25] Input [2]: [amc#17, pmc#23] -(50) TakeOrderedAndProject +(50) Exchange Input [1]: [am_pm_ratio#25] -Arguments: 100, [am_pm_ratio#25 ASC NULLS FIRST], [am_pm_ratio#25] +Arguments: rangepartitioning(am_pm_ratio#25 ASC NULLS FIRST, 5), true, [id=#26] + +(51) Sort [codegen id : 12] +Input [1]: [am_pm_ratio#25] +Arguments: [am_pm_ratio#25 ASC NULLS FIRST], true, 0 diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q90/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q90/simplified.txt index 121d84d9dde2f..bf3cfc9cbc037 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q90/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q90/simplified.txt @@ -1,74 +1,77 @@ -TakeOrderedAndProject [am_pm_ratio] - WholeStageCodegen (11) - Project [amc,pmc] - InputAdapter - BroadcastNestedLoopJoin - WholeStageCodegen (5) - HashAggregate [count] [count(1),amc,count] - InputAdapter - Exchange #1 - WholeStageCodegen (4) - HashAggregate [count,count] - Project - BroadcastHashJoin [ws_web_page_sk,wp_web_page_sk] - Project [ws_web_page_sk] - BroadcastHashJoin [ws_sold_time_sk,t_time_sk] - Project [ws_sold_time_sk,ws_web_page_sk] - BroadcastHashJoin [ws_ship_hdemo_sk,hd_demo_sk] - Filter [ws_ship_hdemo_sk,ws_sold_time_sk,ws_web_page_sk] - ColumnarToRow - InputAdapter - Scan parquet default.web_sales [ws_sold_time_sk,ws_ship_hdemo_sk,ws_web_page_sk] - InputAdapter - BroadcastExchange #2 - WholeStageCodegen (1) - Project [hd_demo_sk] - Filter [hd_dep_count,hd_demo_sk] - ColumnarToRow - InputAdapter - Scan parquet default.household_demographics [hd_demo_sk,hd_dep_count] - InputAdapter - BroadcastExchange #3 - WholeStageCodegen (2) - Project [t_time_sk] - Filter [t_hour,t_time_sk] - ColumnarToRow - InputAdapter - Scan parquet default.time_dim [t_time_sk,t_hour] - InputAdapter - BroadcastExchange #4 - WholeStageCodegen (3) - Project [wp_web_page_sk] - Filter [wp_char_count,wp_web_page_sk] - ColumnarToRow - InputAdapter - Scan parquet default.web_page [wp_web_page_sk,wp_char_count] - BroadcastExchange #5 - WholeStageCodegen (10) - HashAggregate [count] [count(1),pmc,count] - InputAdapter - Exchange #6 - WholeStageCodegen (9) - HashAggregate [count,count] - Project - BroadcastHashJoin [ws_web_page_sk,wp_web_page_sk] - Project [ws_web_page_sk] - BroadcastHashJoin [ws_sold_time_sk,t_time_sk] - Project [ws_sold_time_sk,ws_web_page_sk] - BroadcastHashJoin [ws_ship_hdemo_sk,hd_demo_sk] - Filter [ws_ship_hdemo_sk,ws_sold_time_sk,ws_web_page_sk] - ColumnarToRow +WholeStageCodegen (12) + Sort [am_pm_ratio] + InputAdapter + Exchange [am_pm_ratio] #1 + WholeStageCodegen (11) + Project [amc,pmc] + InputAdapter + BroadcastNestedLoopJoin + WholeStageCodegen (5) + HashAggregate [count] [count(1),amc,count] + InputAdapter + Exchange #2 + WholeStageCodegen (4) + HashAggregate [count,count] + Project + BroadcastHashJoin [ws_web_page_sk,wp_web_page_sk] + Project [ws_web_page_sk] + BroadcastHashJoin [ws_sold_time_sk,t_time_sk] + Project [ws_sold_time_sk,ws_web_page_sk] + BroadcastHashJoin [ws_ship_hdemo_sk,hd_demo_sk] + Filter [ws_ship_hdemo_sk,ws_sold_time_sk,ws_web_page_sk] + ColumnarToRow + InputAdapter + Scan parquet default.web_sales [ws_sold_time_sk,ws_ship_hdemo_sk,ws_web_page_sk] InputAdapter - Scan parquet default.web_sales [ws_sold_time_sk,ws_ship_hdemo_sk,ws_web_page_sk] + BroadcastExchange #3 + WholeStageCodegen (1) + Project [hd_demo_sk] + Filter [hd_dep_count,hd_demo_sk] + ColumnarToRow + InputAdapter + Scan parquet default.household_demographics [hd_demo_sk,hd_dep_count] InputAdapter - ReusedExchange [hd_demo_sk] #2 + BroadcastExchange #4 + WholeStageCodegen (2) + Project [t_time_sk] + Filter [t_hour,t_time_sk] + ColumnarToRow + InputAdapter + Scan parquet default.time_dim [t_time_sk,t_hour] InputAdapter - BroadcastExchange #7 - WholeStageCodegen (7) - Project [t_time_sk] - Filter [t_hour,t_time_sk] + BroadcastExchange #5 + WholeStageCodegen (3) + Project [wp_web_page_sk] + Filter [wp_char_count,wp_web_page_sk] ColumnarToRow InputAdapter - Scan parquet default.time_dim [t_time_sk,t_hour] - InputAdapter - ReusedExchange [wp_web_page_sk] #4 + Scan parquet default.web_page [wp_web_page_sk,wp_char_count] + BroadcastExchange #6 + WholeStageCodegen (10) + HashAggregate [count] [count(1),pmc,count] + InputAdapter + Exchange #7 + WholeStageCodegen (9) + HashAggregate [count,count] + Project + BroadcastHashJoin [ws_web_page_sk,wp_web_page_sk] + Project [ws_web_page_sk] + BroadcastHashJoin [ws_sold_time_sk,t_time_sk] + Project [ws_sold_time_sk,ws_web_page_sk] + BroadcastHashJoin [ws_ship_hdemo_sk,hd_demo_sk] + Filter [ws_ship_hdemo_sk,ws_sold_time_sk,ws_web_page_sk] + ColumnarToRow + InputAdapter + Scan parquet default.web_sales [ws_sold_time_sk,ws_ship_hdemo_sk,ws_web_page_sk] + InputAdapter + ReusedExchange [hd_demo_sk] #3 + InputAdapter + BroadcastExchange #8 + WholeStageCodegen (7) + Project [t_time_sk] + Filter [t_hour,t_time_sk] + ColumnarToRow + InputAdapter + Scan parquet default.time_dim [t_time_sk,t_hour] + InputAdapter + ReusedExchange [wp_web_page_sk] #5 diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q91.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q91.sf100/explain.txt index 69b02557c4750..6bcbe470cec50 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q91.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q91.sf100/explain.txt @@ -8,206 +8,206 @@ +- * BroadcastHashJoin Inner BuildRight (41) :- * Project (36) : +- * BroadcastHashJoin Inner BuildRight (35) - : :- * Project (23) - : : +- * BroadcastHashJoin Inner BuildRight (22) - : : :- * Project (16) - : : : +- * BroadcastHashJoin Inner BuildRight (15) - : : : :- * Project (9) - : : : : +- * BroadcastHashJoin Inner BuildLeft (8) - : : : : :- BroadcastExchange (4) - : : : : : +- * Filter (3) - : : : : : +- * ColumnarToRow (2) - : : : : : +- Scan parquet default.customer_demographics (1) - : : : : +- * Filter (7) - : : : : +- * ColumnarToRow (6) - : : : : +- Scan parquet default.customer (5) - : : : +- BroadcastExchange (14) - : : : +- * Project (13) - : : : +- * Filter (12) - : : : +- * ColumnarToRow (11) - : : : +- Scan parquet default.household_demographics (10) - : : +- BroadcastExchange (21) - : : +- * Project (20) - : : +- * Filter (19) - : : +- * ColumnarToRow (18) - : : +- Scan parquet default.customer_address (17) + : :- * Project (30) + : : +- * BroadcastHashJoin Inner BuildRight (29) + : : :- * Project (23) + : : : +- * BroadcastHashJoin Inner BuildRight (22) + : : : :- * Project (16) + : : : : +- * BroadcastHashJoin Inner BuildLeft (15) + : : : : :- BroadcastExchange (11) + : : : : : +- * Project (10) + : : : : : +- * BroadcastHashJoin Inner BuildRight (9) + : : : : : :- * Filter (3) + : : : : : : +- * ColumnarToRow (2) + : : : : : : +- Scan parquet default.catalog_returns (1) + : : : : : +- BroadcastExchange (8) + : : : : : +- * Project (7) + : : : : : +- * Filter (6) + : : : : : +- * ColumnarToRow (5) + : : : : : +- Scan parquet default.date_dim (4) + : : : : +- * Filter (14) + : : : : +- * ColumnarToRow (13) + : : : : +- Scan parquet default.customer (12) + : : : +- BroadcastExchange (21) + : : : +- * Project (20) + : : : +- * Filter (19) + : : : +- * ColumnarToRow (18) + : : : +- Scan parquet default.household_demographics (17) + : : +- BroadcastExchange (28) + : : +- * Project (27) + : : +- * Filter (26) + : : +- * ColumnarToRow (25) + : : +- Scan parquet default.customer_address (24) : +- BroadcastExchange (34) - : +- * Project (33) - : +- * BroadcastHashJoin Inner BuildLeft (32) - : :- BroadcastExchange (28) - : : +- * Project (27) - : : +- * Filter (26) - : : +- * ColumnarToRow (25) - : : +- Scan parquet default.date_dim (24) - : +- * Filter (31) - : +- * ColumnarToRow (30) - : +- Scan parquet default.catalog_returns (29) + : +- * Filter (33) + : +- * ColumnarToRow (32) + : +- Scan parquet default.customer_demographics (31) +- BroadcastExchange (40) +- * Filter (39) +- * ColumnarToRow (38) +- Scan parquet default.call_center (37) -(1) Scan parquet default.customer_demographics -Output [3]: [cd_demo_sk#1, cd_marital_status#2, cd_education_status#3] +(1) Scan parquet default.catalog_returns +Output [4]: [cr_returned_date_sk#1, cr_returning_customer_sk#2, cr_call_center_sk#3, cr_net_loss#4] Batched: true -Location [not included in comparison]/{warehouse_dir}/customer_demographics] -PushedFilters: [Or(And(EqualTo(cd_marital_status,M),EqualTo(cd_education_status,Unknown)),And(EqualTo(cd_marital_status,W),EqualTo(cd_education_status,Advanced Degree))), IsNotNull(cd_demo_sk)] -ReadSchema: struct - -(2) ColumnarToRow [codegen id : 1] -Input [3]: [cd_demo_sk#1, cd_marital_status#2, cd_education_status#3] +Location [not included in comparison]/{warehouse_dir}/catalog_returns] +PushedFilters: [IsNotNull(cr_call_center_sk), IsNotNull(cr_returned_date_sk), IsNotNull(cr_returning_customer_sk)] +ReadSchema: struct -(3) Filter [codegen id : 1] -Input [3]: [cd_demo_sk#1, cd_marital_status#2, cd_education_status#3] -Condition : ((((cd_marital_status#2 = M) AND (cd_education_status#3 = Unknown)) OR ((cd_marital_status#2 = W) AND (cd_education_status#3 = Advanced Degree))) AND isnotnull(cd_demo_sk#1)) +(2) ColumnarToRow [codegen id : 2] +Input [4]: [cr_returned_date_sk#1, cr_returning_customer_sk#2, cr_call_center_sk#3, cr_net_loss#4] -(4) BroadcastExchange -Input [3]: [cd_demo_sk#1, cd_marital_status#2, cd_education_status#3] -Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, false] as bigint)),false), [id=#4] +(3) Filter [codegen id : 2] +Input [4]: [cr_returned_date_sk#1, cr_returning_customer_sk#2, cr_call_center_sk#3, cr_net_loss#4] +Condition : ((isnotnull(cr_call_center_sk#3) AND isnotnull(cr_returned_date_sk#1)) AND isnotnull(cr_returning_customer_sk#2)) -(5) Scan parquet default.customer -Output [4]: [c_customer_sk#5, c_current_cdemo_sk#6, c_current_hdemo_sk#7, c_current_addr_sk#8] +(4) Scan parquet default.date_dim +Output [3]: [d_date_sk#5, d_year#6, d_moy#7] Batched: true -Location [not included in comparison]/{warehouse_dir}/customer] -PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_current_addr_sk), IsNotNull(c_current_cdemo_sk), IsNotNull(c_current_hdemo_sk)] -ReadSchema: struct +Location [not included in comparison]/{warehouse_dir}/date_dim] +PushedFilters: [IsNotNull(d_year), IsNotNull(d_moy), EqualTo(d_year,1998), EqualTo(d_moy,11), IsNotNull(d_date_sk)] +ReadSchema: struct -(6) ColumnarToRow -Input [4]: [c_customer_sk#5, c_current_cdemo_sk#6, c_current_hdemo_sk#7, c_current_addr_sk#8] +(5) ColumnarToRow [codegen id : 1] +Input [3]: [d_date_sk#5, d_year#6, d_moy#7] -(7) Filter -Input [4]: [c_customer_sk#5, c_current_cdemo_sk#6, c_current_hdemo_sk#7, c_current_addr_sk#8] -Condition : (((isnotnull(c_customer_sk#5) AND isnotnull(c_current_addr_sk#8)) AND isnotnull(c_current_cdemo_sk#6)) AND isnotnull(c_current_hdemo_sk#7)) +(6) Filter [codegen id : 1] +Input [3]: [d_date_sk#5, d_year#6, d_moy#7] +Condition : ((((isnotnull(d_year#6) AND isnotnull(d_moy#7)) AND (d_year#6 = 1998)) AND (d_moy#7 = 11)) AND isnotnull(d_date_sk#5)) -(8) BroadcastHashJoin [codegen id : 7] -Left keys [1]: [cd_demo_sk#1] -Right keys [1]: [c_current_cdemo_sk#6] -Join condition: None +(7) Project [codegen id : 1] +Output [1]: [d_date_sk#5] +Input [3]: [d_date_sk#5, d_year#6, d_moy#7] -(9) Project [codegen id : 7] -Output [5]: [cd_marital_status#2, cd_education_status#3, c_customer_sk#5, c_current_hdemo_sk#7, c_current_addr_sk#8] -Input [7]: [cd_demo_sk#1, cd_marital_status#2, cd_education_status#3, c_customer_sk#5, c_current_cdemo_sk#6, c_current_hdemo_sk#7, c_current_addr_sk#8] +(8) BroadcastExchange +Input [1]: [d_date_sk#5] +Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#8] -(10) Scan parquet default.household_demographics -Output [2]: [hd_demo_sk#9, hd_buy_potential#10] -Batched: true -Location [not included in comparison]/{warehouse_dir}/household_demographics] -PushedFilters: [IsNotNull(hd_buy_potential), StringStartsWith(hd_buy_potential,Unknown), IsNotNull(hd_demo_sk)] -ReadSchema: struct +(9) BroadcastHashJoin [codegen id : 2] +Left keys [1]: [cr_returned_date_sk#1] +Right keys [1]: [d_date_sk#5] +Join condition: None + +(10) Project [codegen id : 2] +Output [3]: [cr_returning_customer_sk#2, cr_call_center_sk#3, cr_net_loss#4] +Input [5]: [cr_returned_date_sk#1, cr_returning_customer_sk#2, cr_call_center_sk#3, cr_net_loss#4, d_date_sk#5] -(11) ColumnarToRow [codegen id : 2] -Input [2]: [hd_demo_sk#9, hd_buy_potential#10] +(11) BroadcastExchange +Input [3]: [cr_returning_customer_sk#2, cr_call_center_sk#3, cr_net_loss#4] +Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#9] -(12) Filter [codegen id : 2] -Input [2]: [hd_demo_sk#9, hd_buy_potential#10] -Condition : ((isnotnull(hd_buy_potential#10) AND StartsWith(hd_buy_potential#10, Unknown)) AND isnotnull(hd_demo_sk#9)) +(12) Scan parquet default.customer +Output [4]: [c_customer_sk#10, c_current_cdemo_sk#11, c_current_hdemo_sk#12, c_current_addr_sk#13] +Batched: true +Location [not included in comparison]/{warehouse_dir}/customer] +PushedFilters: [IsNotNull(c_customer_sk), IsNotNull(c_current_addr_sk), IsNotNull(c_current_cdemo_sk), IsNotNull(c_current_hdemo_sk)] +ReadSchema: struct -(13) Project [codegen id : 2] -Output [1]: [hd_demo_sk#9] -Input [2]: [hd_demo_sk#9, hd_buy_potential#10] +(13) ColumnarToRow +Input [4]: [c_customer_sk#10, c_current_cdemo_sk#11, c_current_hdemo_sk#12, c_current_addr_sk#13] -(14) BroadcastExchange -Input [1]: [hd_demo_sk#9] -Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#11] +(14) Filter +Input [4]: [c_customer_sk#10, c_current_cdemo_sk#11, c_current_hdemo_sk#12, c_current_addr_sk#13] +Condition : (((isnotnull(c_customer_sk#10) AND isnotnull(c_current_addr_sk#13)) AND isnotnull(c_current_cdemo_sk#11)) AND isnotnull(c_current_hdemo_sk#12)) (15) BroadcastHashJoin [codegen id : 7] -Left keys [1]: [c_current_hdemo_sk#7] -Right keys [1]: [hd_demo_sk#9] +Left keys [1]: [cr_returning_customer_sk#2] +Right keys [1]: [c_customer_sk#10] Join condition: None (16) Project [codegen id : 7] -Output [4]: [cd_marital_status#2, cd_education_status#3, c_customer_sk#5, c_current_addr_sk#8] -Input [6]: [cd_marital_status#2, cd_education_status#3, c_customer_sk#5, c_current_hdemo_sk#7, c_current_addr_sk#8, hd_demo_sk#9] +Output [5]: [cr_call_center_sk#3, cr_net_loss#4, c_current_cdemo_sk#11, c_current_hdemo_sk#12, c_current_addr_sk#13] +Input [7]: [cr_returning_customer_sk#2, cr_call_center_sk#3, cr_net_loss#4, c_customer_sk#10, c_current_cdemo_sk#11, c_current_hdemo_sk#12, c_current_addr_sk#13] -(17) Scan parquet default.customer_address -Output [2]: [ca_address_sk#12, ca_gmt_offset#13] +(17) Scan parquet default.household_demographics +Output [2]: [hd_demo_sk#14, hd_buy_potential#15] Batched: true -Location [not included in comparison]/{warehouse_dir}/customer_address] -PushedFilters: [IsNotNull(ca_gmt_offset), EqualTo(ca_gmt_offset,-7.00), IsNotNull(ca_address_sk)] -ReadSchema: struct +Location [not included in comparison]/{warehouse_dir}/household_demographics] +PushedFilters: [IsNotNull(hd_buy_potential), StringStartsWith(hd_buy_potential,Unknown), IsNotNull(hd_demo_sk)] +ReadSchema: struct (18) ColumnarToRow [codegen id : 3] -Input [2]: [ca_address_sk#12, ca_gmt_offset#13] +Input [2]: [hd_demo_sk#14, hd_buy_potential#15] (19) Filter [codegen id : 3] -Input [2]: [ca_address_sk#12, ca_gmt_offset#13] -Condition : ((isnotnull(ca_gmt_offset#13) AND (ca_gmt_offset#13 = -7.00)) AND isnotnull(ca_address_sk#12)) +Input [2]: [hd_demo_sk#14, hd_buy_potential#15] +Condition : ((isnotnull(hd_buy_potential#15) AND StartsWith(hd_buy_potential#15, Unknown)) AND isnotnull(hd_demo_sk#14)) (20) Project [codegen id : 3] -Output [1]: [ca_address_sk#12] -Input [2]: [ca_address_sk#12, ca_gmt_offset#13] +Output [1]: [hd_demo_sk#14] +Input [2]: [hd_demo_sk#14, hd_buy_potential#15] (21) BroadcastExchange -Input [1]: [ca_address_sk#12] -Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#14] +Input [1]: [hd_demo_sk#14] +Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#16] (22) BroadcastHashJoin [codegen id : 7] -Left keys [1]: [c_current_addr_sk#8] -Right keys [1]: [ca_address_sk#12] +Left keys [1]: [c_current_hdemo_sk#12] +Right keys [1]: [hd_demo_sk#14] Join condition: None (23) Project [codegen id : 7] -Output [3]: [cd_marital_status#2, cd_education_status#3, c_customer_sk#5] -Input [5]: [cd_marital_status#2, cd_education_status#3, c_customer_sk#5, c_current_addr_sk#8, ca_address_sk#12] +Output [4]: [cr_call_center_sk#3, cr_net_loss#4, c_current_cdemo_sk#11, c_current_addr_sk#13] +Input [6]: [cr_call_center_sk#3, cr_net_loss#4, c_current_cdemo_sk#11, c_current_hdemo_sk#12, c_current_addr_sk#13, hd_demo_sk#14] -(24) Scan parquet default.date_dim -Output [3]: [d_date_sk#15, d_year#16, d_moy#17] +(24) Scan parquet default.customer_address +Output [2]: [ca_address_sk#17, ca_gmt_offset#18] Batched: true -Location [not included in comparison]/{warehouse_dir}/date_dim] -PushedFilters: [IsNotNull(d_year), IsNotNull(d_moy), EqualTo(d_year,1998), EqualTo(d_moy,11), IsNotNull(d_date_sk)] -ReadSchema: struct +Location [not included in comparison]/{warehouse_dir}/customer_address] +PushedFilters: [IsNotNull(ca_gmt_offset), EqualTo(ca_gmt_offset,-7.00), IsNotNull(ca_address_sk)] +ReadSchema: struct (25) ColumnarToRow [codegen id : 4] -Input [3]: [d_date_sk#15, d_year#16, d_moy#17] +Input [2]: [ca_address_sk#17, ca_gmt_offset#18] (26) Filter [codegen id : 4] -Input [3]: [d_date_sk#15, d_year#16, d_moy#17] -Condition : ((((isnotnull(d_year#16) AND isnotnull(d_moy#17)) AND (d_year#16 = 1998)) AND (d_moy#17 = 11)) AND isnotnull(d_date_sk#15)) +Input [2]: [ca_address_sk#17, ca_gmt_offset#18] +Condition : ((isnotnull(ca_gmt_offset#18) AND (ca_gmt_offset#18 = -7.00)) AND isnotnull(ca_address_sk#17)) (27) Project [codegen id : 4] -Output [1]: [d_date_sk#15] -Input [3]: [d_date_sk#15, d_year#16, d_moy#17] +Output [1]: [ca_address_sk#17] +Input [2]: [ca_address_sk#17, ca_gmt_offset#18] (28) BroadcastExchange -Input [1]: [d_date_sk#15] -Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#18] +Input [1]: [ca_address_sk#17] +Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#19] -(29) Scan parquet default.catalog_returns -Output [4]: [cr_returned_date_sk#19, cr_returning_customer_sk#20, cr_call_center_sk#21, cr_net_loss#22] -Batched: true -Location [not included in comparison]/{warehouse_dir}/catalog_returns] -PushedFilters: [IsNotNull(cr_call_center_sk), IsNotNull(cr_returned_date_sk), IsNotNull(cr_returning_customer_sk)] -ReadSchema: struct +(29) BroadcastHashJoin [codegen id : 7] +Left keys [1]: [c_current_addr_sk#13] +Right keys [1]: [ca_address_sk#17] +Join condition: None -(30) ColumnarToRow -Input [4]: [cr_returned_date_sk#19, cr_returning_customer_sk#20, cr_call_center_sk#21, cr_net_loss#22] +(30) Project [codegen id : 7] +Output [3]: [cr_call_center_sk#3, cr_net_loss#4, c_current_cdemo_sk#11] +Input [5]: [cr_call_center_sk#3, cr_net_loss#4, c_current_cdemo_sk#11, c_current_addr_sk#13, ca_address_sk#17] -(31) Filter -Input [4]: [cr_returned_date_sk#19, cr_returning_customer_sk#20, cr_call_center_sk#21, cr_net_loss#22] -Condition : ((isnotnull(cr_call_center_sk#21) AND isnotnull(cr_returned_date_sk#19)) AND isnotnull(cr_returning_customer_sk#20)) +(31) Scan parquet default.customer_demographics +Output [3]: [cd_demo_sk#20, cd_marital_status#21, cd_education_status#22] +Batched: true +Location [not included in comparison]/{warehouse_dir}/customer_demographics] +PushedFilters: [Or(And(EqualTo(cd_marital_status,M),EqualTo(cd_education_status,Unknown)),And(EqualTo(cd_marital_status,W),EqualTo(cd_education_status,Advanced Degree))), IsNotNull(cd_demo_sk)] +ReadSchema: struct -(32) BroadcastHashJoin [codegen id : 5] -Left keys [1]: [d_date_sk#15] -Right keys [1]: [cr_returned_date_sk#19] -Join condition: None +(32) ColumnarToRow [codegen id : 5] +Input [3]: [cd_demo_sk#20, cd_marital_status#21, cd_education_status#22] -(33) Project [codegen id : 5] -Output [3]: [cr_returning_customer_sk#20, cr_call_center_sk#21, cr_net_loss#22] -Input [5]: [d_date_sk#15, cr_returned_date_sk#19, cr_returning_customer_sk#20, cr_call_center_sk#21, cr_net_loss#22] +(33) Filter [codegen id : 5] +Input [3]: [cd_demo_sk#20, cd_marital_status#21, cd_education_status#22] +Condition : ((((cd_marital_status#21 = M) AND (cd_education_status#22 = Unknown)) OR ((cd_marital_status#21 = W) AND (cd_education_status#22 = Advanced Degree))) AND isnotnull(cd_demo_sk#20)) (34) BroadcastExchange -Input [3]: [cr_returning_customer_sk#20, cr_call_center_sk#21, cr_net_loss#22] -Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#23] +Input [3]: [cd_demo_sk#20, cd_marital_status#21, cd_education_status#22] +Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, false] as bigint)),false), [id=#23] (35) BroadcastHashJoin [codegen id : 7] -Left keys [1]: [c_customer_sk#5] -Right keys [1]: [cr_returning_customer_sk#20] +Left keys [1]: [c_current_cdemo_sk#11] +Right keys [1]: [cd_demo_sk#20] Join condition: None (36) Project [codegen id : 7] -Output [4]: [cd_marital_status#2, cd_education_status#3, cr_call_center_sk#21, cr_net_loss#22] -Input [6]: [cd_marital_status#2, cd_education_status#3, c_customer_sk#5, cr_returning_customer_sk#20, cr_call_center_sk#21, cr_net_loss#22] +Output [4]: [cr_call_center_sk#3, cr_net_loss#4, cd_marital_status#21, cd_education_status#22] +Input [6]: [cr_call_center_sk#3, cr_net_loss#4, c_current_cdemo_sk#11, cd_demo_sk#20, cd_marital_status#21, cd_education_status#22] (37) Scan parquet default.call_center Output [4]: [cc_call_center_sk#24, cc_call_center_id#25, cc_name#26, cc_manager#27] @@ -228,35 +228,35 @@ Input [4]: [cc_call_center_sk#24, cc_call_center_id#25, cc_name#26, cc_manager#2 Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, false] as bigint)),false), [id=#28] (41) BroadcastHashJoin [codegen id : 7] -Left keys [1]: [cr_call_center_sk#21] +Left keys [1]: [cr_call_center_sk#3] Right keys [1]: [cc_call_center_sk#24] Join condition: None (42) Project [codegen id : 7] -Output [6]: [cc_call_center_id#25, cc_name#26, cc_manager#27, cr_net_loss#22, cd_marital_status#2, cd_education_status#3] -Input [8]: [cd_marital_status#2, cd_education_status#3, cr_call_center_sk#21, cr_net_loss#22, cc_call_center_sk#24, cc_call_center_id#25, cc_name#26, cc_manager#27] +Output [6]: [cc_call_center_id#25, cc_name#26, cc_manager#27, cr_net_loss#4, cd_marital_status#21, cd_education_status#22] +Input [8]: [cr_call_center_sk#3, cr_net_loss#4, cd_marital_status#21, cd_education_status#22, cc_call_center_sk#24, cc_call_center_id#25, cc_name#26, cc_manager#27] (43) HashAggregate [codegen id : 7] -Input [6]: [cc_call_center_id#25, cc_name#26, cc_manager#27, cr_net_loss#22, cd_marital_status#2, cd_education_status#3] -Keys [5]: [cc_call_center_id#25, cc_name#26, cc_manager#27, cd_marital_status#2, cd_education_status#3] -Functions [1]: [partial_sum(UnscaledValue(cr_net_loss#22))] +Input [6]: [cc_call_center_id#25, cc_name#26, cc_manager#27, cr_net_loss#4, cd_marital_status#21, cd_education_status#22] +Keys [5]: [cc_call_center_id#25, cc_name#26, cc_manager#27, cd_marital_status#21, cd_education_status#22] +Functions [1]: [partial_sum(UnscaledValue(cr_net_loss#4))] Aggregate Attributes [1]: [sum#29] -Results [6]: [cc_call_center_id#25, cc_name#26, cc_manager#27, cd_marital_status#2, cd_education_status#3, sum#30] +Results [6]: [cc_call_center_id#25, cc_name#26, cc_manager#27, cd_marital_status#21, cd_education_status#22, sum#30] (44) Exchange -Input [6]: [cc_call_center_id#25, cc_name#26, cc_manager#27, cd_marital_status#2, cd_education_status#3, sum#30] -Arguments: hashpartitioning(cc_call_center_id#25, cc_name#26, cc_manager#27, cd_marital_status#2, cd_education_status#3, 5), true, [id=#31] +Input [6]: [cc_call_center_id#25, cc_name#26, cc_manager#27, cd_marital_status#21, cd_education_status#22, sum#30] +Arguments: hashpartitioning(cc_call_center_id#25, cc_name#26, cc_manager#27, cd_marital_status#21, cd_education_status#22, 5), ENSURE_REQUIREMENTS, [id=#31] (45) HashAggregate [codegen id : 8] -Input [6]: [cc_call_center_id#25, cc_name#26, cc_manager#27, cd_marital_status#2, cd_education_status#3, sum#30] -Keys [5]: [cc_call_center_id#25, cc_name#26, cc_manager#27, cd_marital_status#2, cd_education_status#3] -Functions [1]: [sum(UnscaledValue(cr_net_loss#22))] -Aggregate Attributes [1]: [sum(UnscaledValue(cr_net_loss#22))#32] -Results [4]: [cc_call_center_id#25 AS Call_Center#33, cc_name#26 AS Call_Center_Name#34, cc_manager#27 AS Manager#35, MakeDecimal(sum(UnscaledValue(cr_net_loss#22))#32,17,2) AS Returns_Loss#36] +Input [6]: [cc_call_center_id#25, cc_name#26, cc_manager#27, cd_marital_status#21, cd_education_status#22, sum#30] +Keys [5]: [cc_call_center_id#25, cc_name#26, cc_manager#27, cd_marital_status#21, cd_education_status#22] +Functions [1]: [sum(UnscaledValue(cr_net_loss#4))] +Aggregate Attributes [1]: [sum(UnscaledValue(cr_net_loss#4))#32] +Results [4]: [cc_call_center_id#25 AS Call_Center#33, cc_name#26 AS Call_Center_Name#34, cc_manager#27 AS Manager#35, MakeDecimal(sum(UnscaledValue(cr_net_loss#4))#32,17,2) AS Returns_Loss#36] (46) Exchange Input [4]: [Call_Center#33, Call_Center_Name#34, Manager#35, Returns_Loss#36] -Arguments: rangepartitioning(Returns_Loss#36 DESC NULLS LAST, 5), true, [id=#37] +Arguments: rangepartitioning(Returns_Loss#36 DESC NULLS LAST, 5), ENSURE_REQUIREMENTS, [id=#37] (47) Sort [codegen id : 9] Input [4]: [Call_Center#33, Call_Center_Name#34, Manager#35, Returns_Loss#36] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q91.sf100/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q91.sf100/simplified.txt index f64791821893d..6c8d629feed3e 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q91.sf100/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q91.sf100/simplified.txt @@ -10,58 +10,58 @@ WholeStageCodegen (9) HashAggregate [cc_call_center_id,cc_name,cc_manager,cd_marital_status,cd_education_status,cr_net_loss] [sum,sum] Project [cc_call_center_id,cc_name,cc_manager,cr_net_loss,cd_marital_status,cd_education_status] BroadcastHashJoin [cr_call_center_sk,cc_call_center_sk] - Project [cd_marital_status,cd_education_status,cr_call_center_sk,cr_net_loss] - BroadcastHashJoin [c_customer_sk,cr_returning_customer_sk] - Project [cd_marital_status,cd_education_status,c_customer_sk] + Project [cr_call_center_sk,cr_net_loss,cd_marital_status,cd_education_status] + BroadcastHashJoin [c_current_cdemo_sk,cd_demo_sk] + Project [cr_call_center_sk,cr_net_loss,c_current_cdemo_sk] BroadcastHashJoin [c_current_addr_sk,ca_address_sk] - Project [cd_marital_status,cd_education_status,c_customer_sk,c_current_addr_sk] + Project [cr_call_center_sk,cr_net_loss,c_current_cdemo_sk,c_current_addr_sk] BroadcastHashJoin [c_current_hdemo_sk,hd_demo_sk] - Project [cd_marital_status,cd_education_status,c_customer_sk,c_current_hdemo_sk,c_current_addr_sk] - BroadcastHashJoin [cd_demo_sk,c_current_cdemo_sk] + Project [cr_call_center_sk,cr_net_loss,c_current_cdemo_sk,c_current_hdemo_sk,c_current_addr_sk] + BroadcastHashJoin [cr_returning_customer_sk,c_customer_sk] InputAdapter BroadcastExchange #3 - WholeStageCodegen (1) - Filter [cd_marital_status,cd_education_status,cd_demo_sk] - ColumnarToRow + WholeStageCodegen (2) + Project [cr_returning_customer_sk,cr_call_center_sk,cr_net_loss] + BroadcastHashJoin [cr_returned_date_sk,d_date_sk] + Filter [cr_call_center_sk,cr_returned_date_sk,cr_returning_customer_sk] + ColumnarToRow + InputAdapter + Scan parquet default.catalog_returns [cr_returned_date_sk,cr_returning_customer_sk,cr_call_center_sk,cr_net_loss] InputAdapter - Scan parquet default.customer_demographics [cd_demo_sk,cd_marital_status,cd_education_status] + BroadcastExchange #4 + WholeStageCodegen (1) + Project [d_date_sk] + Filter [d_year,d_moy,d_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.date_dim [d_date_sk,d_year,d_moy] Filter [c_customer_sk,c_current_addr_sk,c_current_cdemo_sk,c_current_hdemo_sk] ColumnarToRow InputAdapter Scan parquet default.customer [c_customer_sk,c_current_cdemo_sk,c_current_hdemo_sk,c_current_addr_sk] InputAdapter - BroadcastExchange #4 - WholeStageCodegen (2) + BroadcastExchange #5 + WholeStageCodegen (3) Project [hd_demo_sk] Filter [hd_buy_potential,hd_demo_sk] ColumnarToRow InputAdapter Scan parquet default.household_demographics [hd_demo_sk,hd_buy_potential] InputAdapter - BroadcastExchange #5 - WholeStageCodegen (3) + BroadcastExchange #6 + WholeStageCodegen (4) Project [ca_address_sk] Filter [ca_gmt_offset,ca_address_sk] ColumnarToRow InputAdapter Scan parquet default.customer_address [ca_address_sk,ca_gmt_offset] InputAdapter - BroadcastExchange #6 + BroadcastExchange #7 WholeStageCodegen (5) - Project [cr_returning_customer_sk,cr_call_center_sk,cr_net_loss] - BroadcastHashJoin [d_date_sk,cr_returned_date_sk] + Filter [cd_marital_status,cd_education_status,cd_demo_sk] + ColumnarToRow InputAdapter - BroadcastExchange #7 - WholeStageCodegen (4) - Project [d_date_sk] - Filter [d_year,d_moy,d_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.date_dim [d_date_sk,d_year,d_moy] - Filter [cr_call_center_sk,cr_returned_date_sk,cr_returning_customer_sk] - ColumnarToRow - InputAdapter - Scan parquet default.catalog_returns [cr_returned_date_sk,cr_returning_customer_sk,cr_call_center_sk,cr_net_loss] + Scan parquet default.customer_demographics [cd_demo_sk,cd_marital_status,cd_education_status] InputAdapter BroadcastExchange #8 WholeStageCodegen (6) diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q92.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q92.sf100/explain.txt index dc4665185b014..99459bfe9a049 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q92.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q92.sf100/explain.txt @@ -1,5 +1,5 @@ == Physical Plan == -TakeOrderedAndProject (34) +* Sort (34) +- * HashAggregate (33) +- Exchange (32) +- * HashAggregate (31) @@ -190,7 +190,7 @@ Functions [1]: [sum(UnscaledValue(ws_ext_discount_amt#6))] Aggregate Attributes [1]: [sum(UnscaledValue(ws_ext_discount_amt#6))#22] Results [1]: [MakeDecimal(sum(UnscaledValue(ws_ext_discount_amt#6))#22,17,2) AS Excess Discount Amount #23] -(34) TakeOrderedAndProject +(34) Sort [codegen id : 7] Input [1]: [Excess Discount Amount #23] -Arguments: 100, [Excess Discount Amount #23 ASC NULLS FIRST], [Excess Discount Amount #23] +Arguments: [Excess Discount Amount #23 ASC NULLS FIRST], true, 0 diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q92.sf100/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q92.sf100/simplified.txt index 7fd1cd3637a09..0721155286d17 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q92.sf100/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q92.sf100/simplified.txt @@ -1,5 +1,5 @@ -TakeOrderedAndProject [Excess Discount Amount ] - WholeStageCodegen (7) +WholeStageCodegen (7) + Sort [Excess Discount Amount ] HashAggregate [sum] [sum(UnscaledValue(ws_ext_discount_amt)),Excess Discount Amount ,sum] InputAdapter Exchange #1 diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q92/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q92/explain.txt index b17a48db8baac..8a441392f4165 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q92/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q92/explain.txt @@ -1,5 +1,5 @@ == Physical Plan == -TakeOrderedAndProject (34) +* Sort (34) +- * HashAggregate (33) +- Exchange (32) +- * HashAggregate (31) @@ -190,7 +190,7 @@ Functions [1]: [sum(UnscaledValue(ws_ext_discount_amt#3))] Aggregate Attributes [1]: [sum(UnscaledValue(ws_ext_discount_amt#3))#22] Results [1]: [MakeDecimal(sum(UnscaledValue(ws_ext_discount_amt#3))#22,17,2) AS Excess Discount Amount #23] -(34) TakeOrderedAndProject +(34) Sort [codegen id : 7] Input [1]: [Excess Discount Amount #23] -Arguments: 100, [Excess Discount Amount #23 ASC NULLS FIRST], [Excess Discount Amount #23] +Arguments: [Excess Discount Amount #23 ASC NULLS FIRST], true, 0 diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q92/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q92/simplified.txt index 652b2e36cf781..1f24a7c964f20 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q92/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q92/simplified.txt @@ -1,5 +1,5 @@ -TakeOrderedAndProject [Excess Discount Amount ] - WholeStageCodegen (7) +WholeStageCodegen (7) + Sort [Excess Discount Amount ] HashAggregate [sum] [sum(UnscaledValue(ws_ext_discount_amt)),Excess Discount Amount ,sum] InputAdapter Exchange #1 diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q94.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q94.sf100/explain.txt index 7720d9dee4170..43390c5048a6d 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q94.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q94.sf100/explain.txt @@ -1,5 +1,5 @@ == Physical Plan == -TakeOrderedAndProject (47) +* Sort (47) +- * HashAggregate (46) +- Exchange (45) +- * HashAggregate (44) @@ -259,7 +259,7 @@ Functions [3]: [sum(UnscaledValue(ws_ext_ship_cost#6)), sum(UnscaledValue(ws_net Aggregate Attributes [3]: [sum(UnscaledValue(ws_ext_ship_cost#6))#24, sum(UnscaledValue(ws_net_profit#7))#25, count(ws_order_number#5)#29] Results [3]: [count(ws_order_number#5)#29 AS order count #32, MakeDecimal(sum(UnscaledValue(ws_ext_ship_cost#6))#24,17,2) AS total shipping cost #33, MakeDecimal(sum(UnscaledValue(ws_net_profit#7))#25,17,2) AS total net profit #34] -(47) TakeOrderedAndProject +(47) Sort [codegen id : 14] Input [3]: [order count #32, total shipping cost #33, total net profit #34] -Arguments: 100, [order count #32 ASC NULLS FIRST], [order count #32, total shipping cost #33, total net profit #34] +Arguments: [order count #32 ASC NULLS FIRST], true, 0 diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q94.sf100/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q94.sf100/simplified.txt index 128a8179ac10b..7b3d461b9e80f 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q94.sf100/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q94.sf100/simplified.txt @@ -1,5 +1,5 @@ -TakeOrderedAndProject [order count ,total shipping cost ,total net profit ] - WholeStageCodegen (14) +WholeStageCodegen (14) + Sort [order count ] HashAggregate [sum,sum,count] [sum(UnscaledValue(ws_ext_ship_cost)),sum(UnscaledValue(ws_net_profit)),count(ws_order_number),order count ,total shipping cost ,total net profit ,sum,sum,count] InputAdapter Exchange #1 diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q94/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q94/explain.txt index a94e74f66b201..2abbe4f9b8390 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q94/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q94/explain.txt @@ -1,5 +1,5 @@ == Physical Plan == -TakeOrderedAndProject (41) +* Sort (41) +- * HashAggregate (40) +- Exchange (39) +- * HashAggregate (38) @@ -229,7 +229,7 @@ Functions [3]: [sum(UnscaledValue(ws_ext_ship_cost#6)), sum(UnscaledValue(ws_net Aggregate Attributes [3]: [sum(UnscaledValue(ws_ext_ship_cost#6))#22, sum(UnscaledValue(ws_net_profit#7))#23, count(ws_order_number#5)#27] Results [3]: [count(ws_order_number#5)#27 AS order count #30, MakeDecimal(sum(UnscaledValue(ws_ext_ship_cost#6))#22,17,2) AS total shipping cost #31, MakeDecimal(sum(UnscaledValue(ws_net_profit#7))#23,17,2) AS total net profit #32] -(41) TakeOrderedAndProject +(41) Sort [codegen id : 8] Input [3]: [order count #30, total shipping cost #31, total net profit #32] -Arguments: 100, [order count #30 ASC NULLS FIRST], [order count #30, total shipping cost #31, total net profit #32] +Arguments: [order count #30 ASC NULLS FIRST], true, 0 diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q94/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q94/simplified.txt index 9d30b998fe174..5e7d7db5c0a9e 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q94/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q94/simplified.txt @@ -1,5 +1,5 @@ -TakeOrderedAndProject [order count ,total shipping cost ,total net profit ] - WholeStageCodegen (8) +WholeStageCodegen (8) + Sort [order count ] HashAggregate [sum,sum,count] [sum(UnscaledValue(ws_ext_ship_cost)),sum(UnscaledValue(ws_net_profit)),count(ws_order_number),order count ,total shipping cost ,total net profit ,sum,sum,count] InputAdapter Exchange #1 diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q95.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q95.sf100/explain.txt index eae118d46245d..547792f3d7ae4 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q95.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q95.sf100/explain.txt @@ -1,67 +1,65 @@ == Physical Plan == -TakeOrderedAndProject (63) -+- * HashAggregate (62) - +- Exchange (61) - +- * HashAggregate (60) - +- * HashAggregate (59) - +- Exchange (58) - +- * HashAggregate (57) - +- * Project (56) - +- * BroadcastHashJoin Inner BuildRight (55) - :- * Project (49) - : +- * BroadcastHashJoin Inner BuildRight (48) - : :- * Project (42) - : : +- * BroadcastHashJoin Inner BuildRight (41) - : : :- SortMergeJoin LeftSemi (35) - : : : :- * Sort (19) - : : : : +- Exchange (18) - : : : : +- SortMergeJoin LeftSemi (17) +* Sort (61) ++- * HashAggregate (60) + +- Exchange (59) + +- * HashAggregate (58) + +- * HashAggregate (57) + +- Exchange (56) + +- * HashAggregate (55) + +- * Project (54) + +- * BroadcastHashJoin Inner BuildRight (53) + :- * Project (47) + : +- * BroadcastHashJoin Inner BuildRight (46) + : :- * Project (40) + : : +- * BroadcastHashJoin Inner BuildRight (39) + : : :- SortMergeJoin LeftSemi (33) + : : : :- * Sort (17) + : : : : +- Exchange (16) + : : : : +- SortMergeJoin LeftSemi (15) : : : : :- * Sort (5) : : : : : +- Exchange (4) : : : : : +- * Filter (3) : : : : : +- * ColumnarToRow (2) : : : : : +- Scan parquet default.web_sales (1) - : : : : +- * Sort (16) - : : : : +- Exchange (15) - : : : : +- * Project (14) - : : : : +- * SortMergeJoin Inner (13) - : : : : :- * Sort (10) - : : : : : +- Exchange (9) - : : : : : +- * Filter (8) - : : : : : +- * ColumnarToRow (7) - : : : : : +- Scan parquet default.web_sales (6) - : : : : +- * Sort (12) - : : : : +- ReusedExchange (11) - : : : +- * Project (34) - : : : +- * SortMergeJoin Inner (33) - : : : :- * Sort (27) - : : : : +- Exchange (26) - : : : : +- * Project (25) - : : : : +- * SortMergeJoin Inner (24) - : : : : :- * Sort (21) - : : : : : +- ReusedExchange (20) - : : : : +- * Sort (23) - : : : : +- ReusedExchange (22) - : : : +- * Sort (32) - : : : +- Exchange (31) - : : : +- * Filter (30) - : : : +- * ColumnarToRow (29) - : : : +- Scan parquet default.web_returns (28) - : : +- BroadcastExchange (40) - : : +- * Project (39) - : : +- * Filter (38) - : : +- * ColumnarToRow (37) - : : +- Scan parquet default.customer_address (36) - : +- BroadcastExchange (47) - : +- * Project (46) - : +- * Filter (45) - : +- * ColumnarToRow (44) - : +- Scan parquet default.web_site (43) - +- BroadcastExchange (54) - +- * Project (53) - +- * Filter (52) - +- * ColumnarToRow (51) - +- Scan parquet default.date_dim (50) + : : : : +- * Project (14) + : : : : +- * SortMergeJoin Inner (13) + : : : : :- * Sort (10) + : : : : : +- Exchange (9) + : : : : : +- * Filter (8) + : : : : : +- * ColumnarToRow (7) + : : : : : +- Scan parquet default.web_sales (6) + : : : : +- * Sort (12) + : : : : +- ReusedExchange (11) + : : : +- * Project (32) + : : : +- * SortMergeJoin Inner (31) + : : : :- * Sort (25) + : : : : +- Exchange (24) + : : : : +- * Project (23) + : : : : +- * SortMergeJoin Inner (22) + : : : : :- * Sort (19) + : : : : : +- ReusedExchange (18) + : : : : +- * Sort (21) + : : : : +- ReusedExchange (20) + : : : +- * Sort (30) + : : : +- Exchange (29) + : : : +- * Filter (28) + : : : +- * ColumnarToRow (27) + : : : +- Scan parquet default.web_returns (26) + : : +- BroadcastExchange (38) + : : +- * Project (37) + : : +- * Filter (36) + : : +- * ColumnarToRow (35) + : : +- Scan parquet default.customer_address (34) + : +- BroadcastExchange (45) + : +- * Project (44) + : +- * Filter (43) + : +- * ColumnarToRow (42) + : +- Scan parquet default.web_site (41) + +- BroadcastExchange (52) + +- * Project (51) + +- * Filter (50) + +- * ColumnarToRow (49) + +- Scan parquet default.date_dim (48) (1) Scan parquet default.web_sales @@ -124,224 +122,216 @@ Join condition: NOT (ws_warehouse_sk#8 = ws_warehouse_sk#10) Output [1]: [ws_order_number#4 AS ws_order_number#4#12] Input [4]: [ws_warehouse_sk#8, ws_order_number#4, ws_warehouse_sk#10, ws_order_number#11] -(15) Exchange -Input [1]: [ws_order_number#4#12] -Arguments: hashpartitioning(ws_order_number#4#12, 5), true, [id=#13] - -(16) Sort [codegen id : 8] -Input [1]: [ws_order_number#4#12] -Arguments: [ws_order_number#4#12 ASC NULLS FIRST], false, 0 - -(17) SortMergeJoin +(15) SortMergeJoin Left keys [1]: [ws_order_number#4] Right keys [1]: [ws_order_number#4#12] Join condition: None -(18) Exchange +(16) Exchange Input [6]: [ws_ship_date_sk#1, ws_ship_addr_sk#2, ws_web_site_sk#3, ws_order_number#4, ws_ext_ship_cost#5, ws_net_profit#6] -Arguments: hashpartitioning(cast(ws_order_number#4 as bigint), 5), true, [id=#14] +Arguments: hashpartitioning(cast(ws_order_number#4 as bigint), 5), true, [id=#13] -(19) Sort [codegen id : 9] +(17) Sort [codegen id : 8] Input [6]: [ws_ship_date_sk#1, ws_ship_addr_sk#2, ws_web_site_sk#3, ws_order_number#4, ws_ext_ship_cost#5, ws_net_profit#6] Arguments: [cast(ws_order_number#4 as bigint) ASC NULLS FIRST], false, 0 -(20) ReusedExchange [Reuses operator id: 9] +(18) ReusedExchange [Reuses operator id: 9] Output [2]: [ws_warehouse_sk#8, ws_order_number#4] -(21) Sort [codegen id : 11] +(19) Sort [codegen id : 10] Input [2]: [ws_warehouse_sk#8, ws_order_number#4] Arguments: [ws_order_number#4 ASC NULLS FIRST], false, 0 -(22) ReusedExchange [Reuses operator id: 9] -Output [2]: [ws_warehouse_sk#15, ws_order_number#16] +(20) ReusedExchange [Reuses operator id: 9] +Output [2]: [ws_warehouse_sk#14, ws_order_number#15] -(23) Sort [codegen id : 13] -Input [2]: [ws_warehouse_sk#15, ws_order_number#16] -Arguments: [ws_order_number#16 ASC NULLS FIRST], false, 0 +(21) Sort [codegen id : 12] +Input [2]: [ws_warehouse_sk#14, ws_order_number#15] +Arguments: [ws_order_number#15 ASC NULLS FIRST], false, 0 -(24) SortMergeJoin [codegen id : 14] +(22) SortMergeJoin [codegen id : 13] Left keys [1]: [ws_order_number#4] -Right keys [1]: [ws_order_number#16] -Join condition: NOT (ws_warehouse_sk#8 = ws_warehouse_sk#15) +Right keys [1]: [ws_order_number#15] +Join condition: NOT (ws_warehouse_sk#8 = ws_warehouse_sk#14) -(25) Project [codegen id : 14] +(23) Project [codegen id : 13] Output [1]: [ws_order_number#4] -Input [4]: [ws_warehouse_sk#8, ws_order_number#4, ws_warehouse_sk#15, ws_order_number#16] +Input [4]: [ws_warehouse_sk#8, ws_order_number#4, ws_warehouse_sk#14, ws_order_number#15] -(26) Exchange +(24) Exchange Input [1]: [ws_order_number#4] -Arguments: hashpartitioning(cast(ws_order_number#4 as bigint), 5), true, [id=#17] +Arguments: hashpartitioning(cast(ws_order_number#4 as bigint), 5), true, [id=#16] -(27) Sort [codegen id : 15] +(25) Sort [codegen id : 14] Input [1]: [ws_order_number#4] Arguments: [cast(ws_order_number#4 as bigint) ASC NULLS FIRST], false, 0 -(28) Scan parquet default.web_returns -Output [1]: [wr_order_number#18] +(26) Scan parquet default.web_returns +Output [1]: [wr_order_number#17] Batched: true Location [not included in comparison]/{warehouse_dir}/web_returns] PushedFilters: [IsNotNull(wr_order_number)] ReadSchema: struct -(29) ColumnarToRow [codegen id : 16] -Input [1]: [wr_order_number#18] +(27) ColumnarToRow [codegen id : 15] +Input [1]: [wr_order_number#17] -(30) Filter [codegen id : 16] -Input [1]: [wr_order_number#18] -Condition : isnotnull(wr_order_number#18) +(28) Filter [codegen id : 15] +Input [1]: [wr_order_number#17] +Condition : isnotnull(wr_order_number#17) -(31) Exchange -Input [1]: [wr_order_number#18] -Arguments: hashpartitioning(wr_order_number#18, 5), true, [id=#19] +(29) Exchange +Input [1]: [wr_order_number#17] +Arguments: hashpartitioning(wr_order_number#17, 5), true, [id=#18] -(32) Sort [codegen id : 17] -Input [1]: [wr_order_number#18] -Arguments: [wr_order_number#18 ASC NULLS FIRST], false, 0 +(30) Sort [codegen id : 16] +Input [1]: [wr_order_number#17] +Arguments: [wr_order_number#17 ASC NULLS FIRST], false, 0 -(33) SortMergeJoin [codegen id : 18] +(31) SortMergeJoin [codegen id : 17] Left keys [1]: [cast(ws_order_number#4 as bigint)] -Right keys [1]: [wr_order_number#18] +Right keys [1]: [wr_order_number#17] Join condition: None -(34) Project [codegen id : 18] -Output [1]: [wr_order_number#18] -Input [2]: [ws_order_number#4, wr_order_number#18] +(32) Project [codegen id : 17] +Output [1]: [wr_order_number#17] +Input [2]: [ws_order_number#4, wr_order_number#17] -(35) SortMergeJoin +(33) SortMergeJoin Left keys [1]: [cast(ws_order_number#4 as bigint)] -Right keys [1]: [wr_order_number#18] +Right keys [1]: [wr_order_number#17] Join condition: None -(36) Scan parquet default.customer_address -Output [2]: [ca_address_sk#20, ca_state#21] +(34) Scan parquet default.customer_address +Output [2]: [ca_address_sk#19, ca_state#20] Batched: true Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [IsNotNull(ca_state), EqualTo(ca_state,IL), IsNotNull(ca_address_sk)] ReadSchema: struct -(37) ColumnarToRow [codegen id : 19] -Input [2]: [ca_address_sk#20, ca_state#21] +(35) ColumnarToRow [codegen id : 18] +Input [2]: [ca_address_sk#19, ca_state#20] -(38) Filter [codegen id : 19] -Input [2]: [ca_address_sk#20, ca_state#21] -Condition : ((isnotnull(ca_state#21) AND (ca_state#21 = IL)) AND isnotnull(ca_address_sk#20)) +(36) Filter [codegen id : 18] +Input [2]: [ca_address_sk#19, ca_state#20] +Condition : ((isnotnull(ca_state#20) AND (ca_state#20 = IL)) AND isnotnull(ca_address_sk#19)) -(39) Project [codegen id : 19] -Output [1]: [ca_address_sk#20] -Input [2]: [ca_address_sk#20, ca_state#21] +(37) Project [codegen id : 18] +Output [1]: [ca_address_sk#19] +Input [2]: [ca_address_sk#19, ca_state#20] -(40) BroadcastExchange -Input [1]: [ca_address_sk#20] -Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#22] +(38) BroadcastExchange +Input [1]: [ca_address_sk#19] +Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#21] -(41) BroadcastHashJoin [codegen id : 22] +(39) BroadcastHashJoin [codegen id : 21] Left keys [1]: [ws_ship_addr_sk#2] -Right keys [1]: [ca_address_sk#20] +Right keys [1]: [ca_address_sk#19] Join condition: None -(42) Project [codegen id : 22] +(40) Project [codegen id : 21] Output [5]: [ws_ship_date_sk#1, ws_web_site_sk#3, ws_order_number#4, ws_ext_ship_cost#5, ws_net_profit#6] -Input [7]: [ws_ship_date_sk#1, ws_ship_addr_sk#2, ws_web_site_sk#3, ws_order_number#4, ws_ext_ship_cost#5, ws_net_profit#6, ca_address_sk#20] +Input [7]: [ws_ship_date_sk#1, ws_ship_addr_sk#2, ws_web_site_sk#3, ws_order_number#4, ws_ext_ship_cost#5, ws_net_profit#6, ca_address_sk#19] -(43) Scan parquet default.web_site -Output [2]: [web_site_sk#23, web_company_name#24] +(41) Scan parquet default.web_site +Output [2]: [web_site_sk#22, web_company_name#23] Batched: true Location [not included in comparison]/{warehouse_dir}/web_site] PushedFilters: [IsNotNull(web_company_name), EqualTo(web_company_name,pri), IsNotNull(web_site_sk)] ReadSchema: struct -(44) ColumnarToRow [codegen id : 20] -Input [2]: [web_site_sk#23, web_company_name#24] +(42) ColumnarToRow [codegen id : 19] +Input [2]: [web_site_sk#22, web_company_name#23] -(45) Filter [codegen id : 20] -Input [2]: [web_site_sk#23, web_company_name#24] -Condition : ((isnotnull(web_company_name#24) AND (web_company_name#24 = pri)) AND isnotnull(web_site_sk#23)) +(43) Filter [codegen id : 19] +Input [2]: [web_site_sk#22, web_company_name#23] +Condition : ((isnotnull(web_company_name#23) AND (web_company_name#23 = pri)) AND isnotnull(web_site_sk#22)) -(46) Project [codegen id : 20] -Output [1]: [web_site_sk#23] -Input [2]: [web_site_sk#23, web_company_name#24] +(44) Project [codegen id : 19] +Output [1]: [web_site_sk#22] +Input [2]: [web_site_sk#22, web_company_name#23] -(47) BroadcastExchange -Input [1]: [web_site_sk#23] -Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#25] +(45) BroadcastExchange +Input [1]: [web_site_sk#22] +Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#24] -(48) BroadcastHashJoin [codegen id : 22] +(46) BroadcastHashJoin [codegen id : 21] Left keys [1]: [ws_web_site_sk#3] -Right keys [1]: [web_site_sk#23] +Right keys [1]: [web_site_sk#22] Join condition: None -(49) Project [codegen id : 22] +(47) Project [codegen id : 21] Output [4]: [ws_ship_date_sk#1, ws_order_number#4, ws_ext_ship_cost#5, ws_net_profit#6] -Input [6]: [ws_ship_date_sk#1, ws_web_site_sk#3, ws_order_number#4, ws_ext_ship_cost#5, ws_net_profit#6, web_site_sk#23] +Input [6]: [ws_ship_date_sk#1, ws_web_site_sk#3, ws_order_number#4, ws_ext_ship_cost#5, ws_net_profit#6, web_site_sk#22] -(50) Scan parquet default.date_dim -Output [2]: [d_date_sk#26, d_date#27] +(48) Scan parquet default.date_dim +Output [2]: [d_date_sk#25, d_date#26] Batched: true Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_date), GreaterThanOrEqual(d_date,1999-02-01), LessThanOrEqual(d_date,1999-04-02), IsNotNull(d_date_sk)] ReadSchema: struct -(51) ColumnarToRow [codegen id : 21] -Input [2]: [d_date_sk#26, d_date#27] +(49) ColumnarToRow [codegen id : 20] +Input [2]: [d_date_sk#25, d_date#26] -(52) Filter [codegen id : 21] -Input [2]: [d_date_sk#26, d_date#27] -Condition : (((isnotnull(d_date#27) AND (d_date#27 >= 10623)) AND (d_date#27 <= 10683)) AND isnotnull(d_date_sk#26)) +(50) Filter [codegen id : 20] +Input [2]: [d_date_sk#25, d_date#26] +Condition : (((isnotnull(d_date#26) AND (d_date#26 >= 10623)) AND (d_date#26 <= 10683)) AND isnotnull(d_date_sk#25)) -(53) Project [codegen id : 21] -Output [1]: [d_date_sk#26] -Input [2]: [d_date_sk#26, d_date#27] +(51) Project [codegen id : 20] +Output [1]: [d_date_sk#25] +Input [2]: [d_date_sk#25, d_date#26] -(54) BroadcastExchange -Input [1]: [d_date_sk#26] -Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#28] +(52) BroadcastExchange +Input [1]: [d_date_sk#25] +Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#27] -(55) BroadcastHashJoin [codegen id : 22] +(53) BroadcastHashJoin [codegen id : 21] Left keys [1]: [ws_ship_date_sk#1] -Right keys [1]: [d_date_sk#26] +Right keys [1]: [d_date_sk#25] Join condition: None -(56) Project [codegen id : 22] +(54) Project [codegen id : 21] Output [3]: [ws_order_number#4, ws_ext_ship_cost#5, ws_net_profit#6] -Input [5]: [ws_ship_date_sk#1, ws_order_number#4, ws_ext_ship_cost#5, ws_net_profit#6, d_date_sk#26] +Input [5]: [ws_ship_date_sk#1, ws_order_number#4, ws_ext_ship_cost#5, ws_net_profit#6, d_date_sk#25] -(57) HashAggregate [codegen id : 22] +(55) HashAggregate [codegen id : 21] Input [3]: [ws_order_number#4, ws_ext_ship_cost#5, ws_net_profit#6] Keys [1]: [ws_order_number#4] Functions [2]: [partial_sum(UnscaledValue(ws_ext_ship_cost#5)), partial_sum(UnscaledValue(ws_net_profit#6))] -Aggregate Attributes [2]: [sum(UnscaledValue(ws_ext_ship_cost#5))#29, sum(UnscaledValue(ws_net_profit#6))#30] -Results [3]: [ws_order_number#4, sum#31, sum#32] +Aggregate Attributes [2]: [sum(UnscaledValue(ws_ext_ship_cost#5))#28, sum(UnscaledValue(ws_net_profit#6))#29] +Results [3]: [ws_order_number#4, sum#30, sum#31] -(58) Exchange -Input [3]: [ws_order_number#4, sum#31, sum#32] -Arguments: hashpartitioning(ws_order_number#4, 5), true, [id=#33] +(56) Exchange +Input [3]: [ws_order_number#4, sum#30, sum#31] +Arguments: hashpartitioning(ws_order_number#4, 5), true, [id=#32] -(59) HashAggregate [codegen id : 23] -Input [3]: [ws_order_number#4, sum#31, sum#32] +(57) HashAggregate [codegen id : 22] +Input [3]: [ws_order_number#4, sum#30, sum#31] Keys [1]: [ws_order_number#4] Functions [2]: [merge_sum(UnscaledValue(ws_ext_ship_cost#5)), merge_sum(UnscaledValue(ws_net_profit#6))] -Aggregate Attributes [2]: [sum(UnscaledValue(ws_ext_ship_cost#5))#29, sum(UnscaledValue(ws_net_profit#6))#30] -Results [3]: [ws_order_number#4, sum#31, sum#32] +Aggregate Attributes [2]: [sum(UnscaledValue(ws_ext_ship_cost#5))#28, sum(UnscaledValue(ws_net_profit#6))#29] +Results [3]: [ws_order_number#4, sum#30, sum#31] -(60) HashAggregate [codegen id : 23] -Input [3]: [ws_order_number#4, sum#31, sum#32] +(58) HashAggregate [codegen id : 22] +Input [3]: [ws_order_number#4, sum#30, sum#31] Keys: [] Functions [3]: [merge_sum(UnscaledValue(ws_ext_ship_cost#5)), merge_sum(UnscaledValue(ws_net_profit#6)), partial_count(distinct ws_order_number#4)] -Aggregate Attributes [3]: [sum(UnscaledValue(ws_ext_ship_cost#5))#29, sum(UnscaledValue(ws_net_profit#6))#30, count(ws_order_number#4)#34] -Results [3]: [sum#31, sum#32, count#35] +Aggregate Attributes [3]: [sum(UnscaledValue(ws_ext_ship_cost#5))#28, sum(UnscaledValue(ws_net_profit#6))#29, count(ws_order_number#4)#33] +Results [3]: [sum#30, sum#31, count#34] -(61) Exchange -Input [3]: [sum#31, sum#32, count#35] -Arguments: SinglePartition, true, [id=#36] +(59) Exchange +Input [3]: [sum#30, sum#31, count#34] +Arguments: SinglePartition, true, [id=#35] -(62) HashAggregate [codegen id : 24] -Input [3]: [sum#31, sum#32, count#35] +(60) HashAggregate [codegen id : 23] +Input [3]: [sum#30, sum#31, count#34] Keys: [] Functions [3]: [sum(UnscaledValue(ws_ext_ship_cost#5)), sum(UnscaledValue(ws_net_profit#6)), count(distinct ws_order_number#4)] -Aggregate Attributes [3]: [sum(UnscaledValue(ws_ext_ship_cost#5))#29, sum(UnscaledValue(ws_net_profit#6))#30, count(ws_order_number#4)#34] -Results [3]: [count(ws_order_number#4)#34 AS order count #37, MakeDecimal(sum(UnscaledValue(ws_ext_ship_cost#5))#29,17,2) AS total shipping cost #38, MakeDecimal(sum(UnscaledValue(ws_net_profit#6))#30,17,2) AS total net profit #39] +Aggregate Attributes [3]: [sum(UnscaledValue(ws_ext_ship_cost#5))#28, sum(UnscaledValue(ws_net_profit#6))#29, count(ws_order_number#4)#33] +Results [3]: [count(ws_order_number#4)#33 AS order count #36, MakeDecimal(sum(UnscaledValue(ws_ext_ship_cost#5))#28,17,2) AS total shipping cost #37, MakeDecimal(sum(UnscaledValue(ws_net_profit#6))#29,17,2) AS total net profit #38] -(63) TakeOrderedAndProject -Input [3]: [order count #37, total shipping cost #38, total net profit #39] -Arguments: 100, [order count #37 ASC NULLS FIRST], [order count #37, total shipping cost #38, total net profit #39] +(61) Sort [codegen id : 23] +Input [3]: [order count #36, total shipping cost #37, total net profit #38] +Arguments: [order count #36 ASC NULLS FIRST], true, 0 diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q95.sf100/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q95.sf100/simplified.txt index bdcbb87b372dc..7213a9f58d3f8 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q95.sf100/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q95.sf100/simplified.txt @@ -1,14 +1,14 @@ -TakeOrderedAndProject [order count ,total shipping cost ,total net profit ] - WholeStageCodegen (24) +WholeStageCodegen (23) + Sort [order count ] HashAggregate [sum,sum,count] [sum(UnscaledValue(ws_ext_ship_cost)),sum(UnscaledValue(ws_net_profit)),count(ws_order_number),order count ,total shipping cost ,total net profit ,sum,sum,count] InputAdapter Exchange #1 - WholeStageCodegen (23) + WholeStageCodegen (22) HashAggregate [ws_order_number] [sum(UnscaledValue(ws_ext_ship_cost)),sum(UnscaledValue(ws_net_profit)),count(ws_order_number),sum,sum,count,sum,sum,count] HashAggregate [ws_order_number] [sum(UnscaledValue(ws_ext_ship_cost)),sum(UnscaledValue(ws_net_profit)),sum,sum,sum,sum] InputAdapter Exchange [ws_order_number] #2 - WholeStageCodegen (22) + WholeStageCodegen (21) HashAggregate [ws_order_number,ws_ext_ship_cost,ws_net_profit] [sum(UnscaledValue(ws_ext_ship_cost)),sum(UnscaledValue(ws_net_profit)),sum,sum,sum,sum] Project [ws_order_number,ws_ext_ship_cost,ws_net_profit] BroadcastHashJoin [ws_ship_date_sk,d_date_sk] @@ -18,7 +18,7 @@ TakeOrderedAndProject [order count ,total shipping cost ,total net profit ] BroadcastHashJoin [ws_ship_addr_sk,ca_address_sk] InputAdapter SortMergeJoin [ws_order_number,wr_order_number] - WholeStageCodegen (9) + WholeStageCodegen (8) Sort [ws_order_number] InputAdapter Exchange [ws_order_number] #3 @@ -32,78 +32,74 @@ TakeOrderedAndProject [order count ,total shipping cost ,total net profit ] ColumnarToRow InputAdapter Scan parquet default.web_sales [ws_ship_date_sk,ws_ship_addr_sk,ws_web_site_sk,ws_order_number,ws_ext_ship_cost,ws_net_profit] - WholeStageCodegen (8) - Sort [ws_order_number] - InputAdapter - Exchange [ws_order_number] #5 - WholeStageCodegen (7) - Project [ws_order_number] - SortMergeJoin [ws_order_number,ws_order_number,ws_warehouse_sk,ws_warehouse_sk] - InputAdapter - WholeStageCodegen (4) - Sort [ws_order_number] - InputAdapter - Exchange [ws_order_number] #6 - WholeStageCodegen (3) - Filter [ws_order_number,ws_warehouse_sk] - ColumnarToRow - InputAdapter - Scan parquet default.web_sales [ws_warehouse_sk,ws_order_number] - InputAdapter - WholeStageCodegen (6) - Sort [ws_order_number] - InputAdapter - ReusedExchange [ws_warehouse_sk,ws_order_number] #6 - WholeStageCodegen (18) + WholeStageCodegen (7) + Project [ws_order_number] + SortMergeJoin [ws_order_number,ws_order_number,ws_warehouse_sk,ws_warehouse_sk] + InputAdapter + WholeStageCodegen (4) + Sort [ws_order_number] + InputAdapter + Exchange [ws_order_number] #5 + WholeStageCodegen (3) + Filter [ws_order_number,ws_warehouse_sk] + ColumnarToRow + InputAdapter + Scan parquet default.web_sales [ws_warehouse_sk,ws_order_number] + InputAdapter + WholeStageCodegen (6) + Sort [ws_order_number] + InputAdapter + ReusedExchange [ws_warehouse_sk,ws_order_number] #5 + WholeStageCodegen (17) Project [wr_order_number] SortMergeJoin [ws_order_number,wr_order_number] InputAdapter - WholeStageCodegen (15) + WholeStageCodegen (14) Sort [ws_order_number] InputAdapter - Exchange [ws_order_number] #7 - WholeStageCodegen (14) + Exchange [ws_order_number] #6 + WholeStageCodegen (13) Project [ws_order_number] SortMergeJoin [ws_order_number,ws_order_number,ws_warehouse_sk,ws_warehouse_sk] InputAdapter - WholeStageCodegen (11) + WholeStageCodegen (10) Sort [ws_order_number] InputAdapter - ReusedExchange [ws_warehouse_sk,ws_order_number] #6 + ReusedExchange [ws_warehouse_sk,ws_order_number] #5 InputAdapter - WholeStageCodegen (13) + WholeStageCodegen (12) Sort [ws_order_number] InputAdapter - ReusedExchange [ws_warehouse_sk,ws_order_number] #6 + ReusedExchange [ws_warehouse_sk,ws_order_number] #5 InputAdapter - WholeStageCodegen (17) + WholeStageCodegen (16) Sort [wr_order_number] InputAdapter - Exchange [wr_order_number] #8 - WholeStageCodegen (16) + Exchange [wr_order_number] #7 + WholeStageCodegen (15) Filter [wr_order_number] ColumnarToRow InputAdapter Scan parquet default.web_returns [wr_order_number] InputAdapter - BroadcastExchange #9 - WholeStageCodegen (19) + BroadcastExchange #8 + WholeStageCodegen (18) Project [ca_address_sk] Filter [ca_state,ca_address_sk] ColumnarToRow InputAdapter Scan parquet default.customer_address [ca_address_sk,ca_state] InputAdapter - BroadcastExchange #10 - WholeStageCodegen (20) + BroadcastExchange #9 + WholeStageCodegen (19) Project [web_site_sk] Filter [web_company_name,web_site_sk] ColumnarToRow InputAdapter Scan parquet default.web_site [web_site_sk,web_company_name] InputAdapter - BroadcastExchange #11 - WholeStageCodegen (21) + BroadcastExchange #10 + WholeStageCodegen (20) Project [d_date_sk] Filter [d_date,d_date_sk] ColumnarToRow diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q95/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q95/explain.txt index 3a24e83aff256..1cc99e296383f 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q95/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q95/explain.txt @@ -1,5 +1,5 @@ == Physical Plan == -TakeOrderedAndProject (56) +* Sort (56) +- * HashAggregate (55) +- Exchange (54) +- * HashAggregate (53) @@ -312,7 +312,7 @@ Functions [3]: [sum(UnscaledValue(ws_ext_ship_cost#5)), sum(UnscaledValue(ws_net Aggregate Attributes [3]: [sum(UnscaledValue(ws_ext_ship_cost#5))#27, sum(UnscaledValue(ws_net_profit#6))#28, count(ws_order_number#4)#32] Results [3]: [count(ws_order_number#4)#32 AS order count #35, MakeDecimal(sum(UnscaledValue(ws_ext_ship_cost#5))#27,17,2) AS total shipping cost #36, MakeDecimal(sum(UnscaledValue(ws_net_profit#6))#28,17,2) AS total net profit #37] -(56) TakeOrderedAndProject +(56) Sort [codegen id : 11] Input [3]: [order count #35, total shipping cost #36, total net profit #37] -Arguments: 100, [order count #35 ASC NULLS FIRST], [order count #35, total shipping cost #36, total net profit #37] +Arguments: [order count #35 ASC NULLS FIRST], true, 0 diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q95/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q95/simplified.txt index 6d35311c810f5..191ff22c1961f 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q95/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q95/simplified.txt @@ -1,5 +1,5 @@ -TakeOrderedAndProject [order count ,total shipping cost ,total net profit ] - WholeStageCodegen (11) +WholeStageCodegen (11) + Sort [order count ] HashAggregate [sum,sum,count] [sum(UnscaledValue(ws_ext_ship_cost)),sum(UnscaledValue(ws_net_profit)),count(ws_order_number),order count ,total shipping cost ,total net profit ,sum,sum,count] InputAdapter Exchange #1 diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q96.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q96.sf100/explain.txt index d00029f985471..5ae0e1632f15b 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q96.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q96.sf100/explain.txt @@ -1,5 +1,5 @@ == Physical Plan == -TakeOrderedAndProject (28) +* Sort (28) +- * HashAggregate (27) +- Exchange (26) +- * HashAggregate (25) @@ -154,7 +154,7 @@ Functions [1]: [count(1)] Aggregate Attributes [1]: [count(1)#17] Results [1]: [count(1)#17 AS count(1)#18] -(28) TakeOrderedAndProject +(28) Sort [codegen id : 5] Input [1]: [count(1)#18] -Arguments: 100, [count(1)#18 ASC NULLS FIRST], [count(1)#18] +Arguments: [count(1)#18 ASC NULLS FIRST], true, 0 diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q96.sf100/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q96.sf100/simplified.txt index 1355caffbbfe8..d9ee3e09481ed 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q96.sf100/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q96.sf100/simplified.txt @@ -1,5 +1,5 @@ -TakeOrderedAndProject [count(1)] - WholeStageCodegen (5) +WholeStageCodegen (5) + Sort [count(1)] HashAggregate [count] [count(1),count(1),count] InputAdapter Exchange #1 diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q96/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q96/explain.txt index 3561eff8f57ef..6729910d9cb4a 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q96/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q96/explain.txt @@ -1,5 +1,5 @@ == Physical Plan == -TakeOrderedAndProject (28) +* Sort (28) +- * HashAggregate (27) +- Exchange (26) +- * HashAggregate (25) @@ -154,7 +154,7 @@ Functions [1]: [count(1)] Aggregate Attributes [1]: [count(1)#17] Results [1]: [count(1)#17 AS count(1)#18] -(28) TakeOrderedAndProject +(28) Sort [codegen id : 5] Input [1]: [count(1)#18] -Arguments: 100, [count(1)#18 ASC NULLS FIRST], [count(1)#18] +Arguments: [count(1)#18 ASC NULLS FIRST], true, 0 diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q96/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q96/simplified.txt index b13f28bf69cfd..45400b6c512f4 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q96/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q96/simplified.txt @@ -1,5 +1,5 @@ -TakeOrderedAndProject [count(1)] - WholeStageCodegen (5) +WholeStageCodegen (5) + Sort [count(1)] HashAggregate [count] [count(1),count(1),count] InputAdapter Exchange #1 diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q97.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q97.sf100/explain.txt index 0a2e88b5bc160..fadad48be3d6c 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q97.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q97.sf100/explain.txt @@ -1,34 +1,33 @@ == Physical Plan == -CollectLimit (30) -+- * HashAggregate (29) - +- Exchange (28) - +- * HashAggregate (27) - +- * Project (26) - +- SortMergeJoin FullOuter (25) - :- * Sort (14) - : +- * HashAggregate (13) - : +- Exchange (12) - : +- * HashAggregate (11) - : +- * Project (10) - : +- * BroadcastHashJoin Inner BuildRight (9) - : :- * Filter (3) - : : +- * ColumnarToRow (2) - : : +- Scan parquet default.store_sales (1) - : +- BroadcastExchange (8) - : +- * Project (7) - : +- * Filter (6) - : +- * ColumnarToRow (5) - : +- Scan parquet default.date_dim (4) - +- * Sort (24) - +- * HashAggregate (23) - +- Exchange (22) - +- * HashAggregate (21) - +- * Project (20) - +- * BroadcastHashJoin Inner BuildRight (19) - :- * Filter (17) - : +- * ColumnarToRow (16) - : +- Scan parquet default.catalog_sales (15) - +- ReusedExchange (18) +* HashAggregate (29) ++- Exchange (28) + +- * HashAggregate (27) + +- * Project (26) + +- SortMergeJoin FullOuter (25) + :- * Sort (14) + : +- * HashAggregate (13) + : +- Exchange (12) + : +- * HashAggregate (11) + : +- * Project (10) + : +- * BroadcastHashJoin Inner BuildRight (9) + : :- * Filter (3) + : : +- * ColumnarToRow (2) + : : +- Scan parquet default.store_sales (1) + : +- BroadcastExchange (8) + : +- * Project (7) + : +- * Filter (6) + : +- * ColumnarToRow (5) + : +- Scan parquet default.date_dim (4) + +- * Sort (24) + +- * HashAggregate (23) + +- Exchange (22) + +- * HashAggregate (21) + +- * Project (20) + +- * BroadcastHashJoin Inner BuildRight (19) + :- * Filter (17) + : +- * ColumnarToRow (16) + : +- Scan parquet default.catalog_sales (15) + +- ReusedExchange (18) (1) Scan parquet default.store_sales @@ -85,7 +84,7 @@ Results [2]: [ss_customer_sk#3, ss_item_sk#2] (12) Exchange Input [2]: [ss_customer_sk#3, ss_item_sk#2] -Arguments: hashpartitioning(ss_customer_sk#3, ss_item_sk#2, 5), true, [id=#7] +Arguments: hashpartitioning(ss_customer_sk#3, ss_item_sk#2, 5), ENSURE_REQUIREMENTS, [id=#7] (13) HashAggregate [codegen id : 3] Input [2]: [ss_customer_sk#3, ss_item_sk#2] @@ -133,7 +132,7 @@ Results [2]: [cs_bill_customer_sk#11, cs_item_sk#12] (22) Exchange Input [2]: [cs_bill_customer_sk#11, cs_item_sk#12] -Arguments: hashpartitioning(cs_bill_customer_sk#11, cs_item_sk#12, 5), true, [id=#13] +Arguments: hashpartitioning(cs_bill_customer_sk#11, cs_item_sk#12, 5), ENSURE_REQUIREMENTS, [id=#13] (23) HashAggregate [codegen id : 6] Input [2]: [cs_bill_customer_sk#11, cs_item_sk#12] @@ -158,22 +157,18 @@ Input [4]: [customer_sk#8, item_sk#9, customer_sk#14, item_sk#15] (27) HashAggregate [codegen id : 7] Input [2]: [customer_sk#8, customer_sk#14] Keys: [] -Functions [3]: [partial_sum(cast(CASE WHEN (isnotnull(customer_sk#8) AND isnull(customer_sk#14)) THEN 1 ELSE 0 END as bigint)), partial_sum(cast(CASE WHEN (isnull(customer_sk#8) AND isnotnull(customer_sk#14)) THEN 1 ELSE 0 END as bigint)), partial_sum(cast(CASE WHEN (isnotnull(customer_sk#8) AND isnotnull(customer_sk#14)) THEN 1 ELSE 0 END as bigint))] +Functions [3]: [partial_sum(CASE WHEN (isnotnull(customer_sk#8) AND isnull(customer_sk#14)) THEN 1 ELSE 0 END), partial_sum(CASE WHEN (isnull(customer_sk#8) AND isnotnull(customer_sk#14)) THEN 1 ELSE 0 END), partial_sum(CASE WHEN (isnotnull(customer_sk#8) AND isnotnull(customer_sk#14)) THEN 1 ELSE 0 END)] Aggregate Attributes [3]: [sum#16, sum#17, sum#18] Results [3]: [sum#19, sum#20, sum#21] (28) Exchange Input [3]: [sum#19, sum#20, sum#21] -Arguments: SinglePartition, true, [id=#22] +Arguments: SinglePartition, ENSURE_REQUIREMENTS, [id=#22] (29) HashAggregate [codegen id : 8] Input [3]: [sum#19, sum#20, sum#21] Keys: [] -Functions [3]: [sum(cast(CASE WHEN (isnotnull(customer_sk#8) AND isnull(customer_sk#14)) THEN 1 ELSE 0 END as bigint)), sum(cast(CASE WHEN (isnull(customer_sk#8) AND isnotnull(customer_sk#14)) THEN 1 ELSE 0 END as bigint)), sum(cast(CASE WHEN (isnotnull(customer_sk#8) AND isnotnull(customer_sk#14)) THEN 1 ELSE 0 END as bigint))] -Aggregate Attributes [3]: [sum(cast(CASE WHEN (isnotnull(customer_sk#8) AND isnull(customer_sk#14)) THEN 1 ELSE 0 END as bigint))#23, sum(cast(CASE WHEN (isnull(customer_sk#8) AND isnotnull(customer_sk#14)) THEN 1 ELSE 0 END as bigint))#24, sum(cast(CASE WHEN (isnotnull(customer_sk#8) AND isnotnull(customer_sk#14)) THEN 1 ELSE 0 END as bigint))#25] -Results [3]: [sum(cast(CASE WHEN (isnotnull(customer_sk#8) AND isnull(customer_sk#14)) THEN 1 ELSE 0 END as bigint))#23 AS store_only#26, sum(cast(CASE WHEN (isnull(customer_sk#8) AND isnotnull(customer_sk#14)) THEN 1 ELSE 0 END as bigint))#24 AS catalog_only#27, sum(cast(CASE WHEN (isnotnull(customer_sk#8) AND isnotnull(customer_sk#14)) THEN 1 ELSE 0 END as bigint))#25 AS store_and_catalog#28] - -(30) CollectLimit -Input [3]: [store_only#26, catalog_only#27, store_and_catalog#28] -Arguments: 100 +Functions [3]: [sum(CASE WHEN (isnotnull(customer_sk#8) AND isnull(customer_sk#14)) THEN 1 ELSE 0 END), sum(CASE WHEN (isnull(customer_sk#8) AND isnotnull(customer_sk#14)) THEN 1 ELSE 0 END), sum(CASE WHEN (isnotnull(customer_sk#8) AND isnotnull(customer_sk#14)) THEN 1 ELSE 0 END)] +Aggregate Attributes [3]: [sum(CASE WHEN (isnotnull(customer_sk#8) AND isnull(customer_sk#14)) THEN 1 ELSE 0 END)#23, sum(CASE WHEN (isnull(customer_sk#8) AND isnotnull(customer_sk#14)) THEN 1 ELSE 0 END)#24, sum(CASE WHEN (isnotnull(customer_sk#8) AND isnotnull(customer_sk#14)) THEN 1 ELSE 0 END)#25] +Results [3]: [sum(CASE WHEN (isnotnull(customer_sk#8) AND isnull(customer_sk#14)) THEN 1 ELSE 0 END)#23 AS store_only#26, sum(CASE WHEN (isnull(customer_sk#8) AND isnotnull(customer_sk#14)) THEN 1 ELSE 0 END)#24 AS catalog_only#27, sum(CASE WHEN (isnotnull(customer_sk#8) AND isnotnull(customer_sk#14)) THEN 1 ELSE 0 END)#25 AS store_and_catalog#28] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q97.sf100/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q97.sf100/simplified.txt index bae48ec244faa..dc149c443c20f 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q97.sf100/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q97.sf100/simplified.txt @@ -1,46 +1,45 @@ -CollectLimit - WholeStageCodegen (8) - HashAggregate [sum,sum,sum] [sum(cast(CASE WHEN (isnotnull(customer_sk) AND isnull(customer_sk)) THEN 1 ELSE 0 END as bigint)),sum(cast(CASE WHEN (isnull(customer_sk) AND isnotnull(customer_sk)) THEN 1 ELSE 0 END as bigint)),sum(cast(CASE WHEN (isnotnull(customer_sk) AND isnotnull(customer_sk)) THEN 1 ELSE 0 END as bigint)),store_only,catalog_only,store_and_catalog,sum,sum,sum] - InputAdapter - Exchange #1 - WholeStageCodegen (7) - HashAggregate [customer_sk,customer_sk] [sum,sum,sum,sum,sum,sum] - Project [customer_sk,customer_sk] - InputAdapter - SortMergeJoin [customer_sk,item_sk,customer_sk,item_sk] - WholeStageCodegen (3) - Sort [customer_sk,item_sk] - HashAggregate [ss_customer_sk,ss_item_sk] [customer_sk,item_sk] - InputAdapter - Exchange [ss_customer_sk,ss_item_sk] #2 - WholeStageCodegen (2) - HashAggregate [ss_customer_sk,ss_item_sk] - Project [ss_item_sk,ss_customer_sk] - BroadcastHashJoin [ss_sold_date_sk,d_date_sk] - Filter [ss_sold_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.store_sales [ss_sold_date_sk,ss_item_sk,ss_customer_sk] - InputAdapter - BroadcastExchange #3 - WholeStageCodegen (1) - Project [d_date_sk] - Filter [d_month_seq,d_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.date_dim [d_date_sk,d_month_seq] - WholeStageCodegen (6) - Sort [customer_sk,item_sk] - HashAggregate [cs_bill_customer_sk,cs_item_sk] [customer_sk,item_sk] - InputAdapter - Exchange [cs_bill_customer_sk,cs_item_sk] #4 - WholeStageCodegen (5) - HashAggregate [cs_bill_customer_sk,cs_item_sk] - Project [cs_bill_customer_sk,cs_item_sk] - BroadcastHashJoin [cs_sold_date_sk,d_date_sk] - Filter [cs_sold_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.catalog_sales [cs_sold_date_sk,cs_bill_customer_sk,cs_item_sk] - InputAdapter - ReusedExchange [d_date_sk] #3 +WholeStageCodegen (8) + HashAggregate [sum,sum,sum] [sum(CASE WHEN (isnotnull(customer_sk) AND isnull(customer_sk)) THEN 1 ELSE 0 END),sum(CASE WHEN (isnull(customer_sk) AND isnotnull(customer_sk)) THEN 1 ELSE 0 END),sum(CASE WHEN (isnotnull(customer_sk) AND isnotnull(customer_sk)) THEN 1 ELSE 0 END),store_only,catalog_only,store_and_catalog,sum,sum,sum] + InputAdapter + Exchange #1 + WholeStageCodegen (7) + HashAggregate [customer_sk,customer_sk] [sum,sum,sum,sum,sum,sum] + Project [customer_sk,customer_sk] + InputAdapter + SortMergeJoin [customer_sk,item_sk,customer_sk,item_sk] + WholeStageCodegen (3) + Sort [customer_sk,item_sk] + HashAggregate [ss_customer_sk,ss_item_sk] [customer_sk,item_sk] + InputAdapter + Exchange [ss_customer_sk,ss_item_sk] #2 + WholeStageCodegen (2) + HashAggregate [ss_customer_sk,ss_item_sk] + Project [ss_item_sk,ss_customer_sk] + BroadcastHashJoin [ss_sold_date_sk,d_date_sk] + Filter [ss_sold_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.store_sales [ss_sold_date_sk,ss_item_sk,ss_customer_sk] + InputAdapter + BroadcastExchange #3 + WholeStageCodegen (1) + Project [d_date_sk] + Filter [d_month_seq,d_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.date_dim [d_date_sk,d_month_seq] + WholeStageCodegen (6) + Sort [customer_sk,item_sk] + HashAggregate [cs_bill_customer_sk,cs_item_sk] [customer_sk,item_sk] + InputAdapter + Exchange [cs_bill_customer_sk,cs_item_sk] #4 + WholeStageCodegen (5) + HashAggregate [cs_bill_customer_sk,cs_item_sk] + Project [cs_bill_customer_sk,cs_item_sk] + BroadcastHashJoin [cs_sold_date_sk,d_date_sk] + Filter [cs_sold_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.catalog_sales [cs_sold_date_sk,cs_bill_customer_sk,cs_item_sk] + InputAdapter + ReusedExchange [d_date_sk] #3 diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q97/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q97/explain.txt index 0a2e88b5bc160..fadad48be3d6c 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q97/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q97/explain.txt @@ -1,34 +1,33 @@ == Physical Plan == -CollectLimit (30) -+- * HashAggregate (29) - +- Exchange (28) - +- * HashAggregate (27) - +- * Project (26) - +- SortMergeJoin FullOuter (25) - :- * Sort (14) - : +- * HashAggregate (13) - : +- Exchange (12) - : +- * HashAggregate (11) - : +- * Project (10) - : +- * BroadcastHashJoin Inner BuildRight (9) - : :- * Filter (3) - : : +- * ColumnarToRow (2) - : : +- Scan parquet default.store_sales (1) - : +- BroadcastExchange (8) - : +- * Project (7) - : +- * Filter (6) - : +- * ColumnarToRow (5) - : +- Scan parquet default.date_dim (4) - +- * Sort (24) - +- * HashAggregate (23) - +- Exchange (22) - +- * HashAggregate (21) - +- * Project (20) - +- * BroadcastHashJoin Inner BuildRight (19) - :- * Filter (17) - : +- * ColumnarToRow (16) - : +- Scan parquet default.catalog_sales (15) - +- ReusedExchange (18) +* HashAggregate (29) ++- Exchange (28) + +- * HashAggregate (27) + +- * Project (26) + +- SortMergeJoin FullOuter (25) + :- * Sort (14) + : +- * HashAggregate (13) + : +- Exchange (12) + : +- * HashAggregate (11) + : +- * Project (10) + : +- * BroadcastHashJoin Inner BuildRight (9) + : :- * Filter (3) + : : +- * ColumnarToRow (2) + : : +- Scan parquet default.store_sales (1) + : +- BroadcastExchange (8) + : +- * Project (7) + : +- * Filter (6) + : +- * ColumnarToRow (5) + : +- Scan parquet default.date_dim (4) + +- * Sort (24) + +- * HashAggregate (23) + +- Exchange (22) + +- * HashAggregate (21) + +- * Project (20) + +- * BroadcastHashJoin Inner BuildRight (19) + :- * Filter (17) + : +- * ColumnarToRow (16) + : +- Scan parquet default.catalog_sales (15) + +- ReusedExchange (18) (1) Scan parquet default.store_sales @@ -85,7 +84,7 @@ Results [2]: [ss_customer_sk#3, ss_item_sk#2] (12) Exchange Input [2]: [ss_customer_sk#3, ss_item_sk#2] -Arguments: hashpartitioning(ss_customer_sk#3, ss_item_sk#2, 5), true, [id=#7] +Arguments: hashpartitioning(ss_customer_sk#3, ss_item_sk#2, 5), ENSURE_REQUIREMENTS, [id=#7] (13) HashAggregate [codegen id : 3] Input [2]: [ss_customer_sk#3, ss_item_sk#2] @@ -133,7 +132,7 @@ Results [2]: [cs_bill_customer_sk#11, cs_item_sk#12] (22) Exchange Input [2]: [cs_bill_customer_sk#11, cs_item_sk#12] -Arguments: hashpartitioning(cs_bill_customer_sk#11, cs_item_sk#12, 5), true, [id=#13] +Arguments: hashpartitioning(cs_bill_customer_sk#11, cs_item_sk#12, 5), ENSURE_REQUIREMENTS, [id=#13] (23) HashAggregate [codegen id : 6] Input [2]: [cs_bill_customer_sk#11, cs_item_sk#12] @@ -158,22 +157,18 @@ Input [4]: [customer_sk#8, item_sk#9, customer_sk#14, item_sk#15] (27) HashAggregate [codegen id : 7] Input [2]: [customer_sk#8, customer_sk#14] Keys: [] -Functions [3]: [partial_sum(cast(CASE WHEN (isnotnull(customer_sk#8) AND isnull(customer_sk#14)) THEN 1 ELSE 0 END as bigint)), partial_sum(cast(CASE WHEN (isnull(customer_sk#8) AND isnotnull(customer_sk#14)) THEN 1 ELSE 0 END as bigint)), partial_sum(cast(CASE WHEN (isnotnull(customer_sk#8) AND isnotnull(customer_sk#14)) THEN 1 ELSE 0 END as bigint))] +Functions [3]: [partial_sum(CASE WHEN (isnotnull(customer_sk#8) AND isnull(customer_sk#14)) THEN 1 ELSE 0 END), partial_sum(CASE WHEN (isnull(customer_sk#8) AND isnotnull(customer_sk#14)) THEN 1 ELSE 0 END), partial_sum(CASE WHEN (isnotnull(customer_sk#8) AND isnotnull(customer_sk#14)) THEN 1 ELSE 0 END)] Aggregate Attributes [3]: [sum#16, sum#17, sum#18] Results [3]: [sum#19, sum#20, sum#21] (28) Exchange Input [3]: [sum#19, sum#20, sum#21] -Arguments: SinglePartition, true, [id=#22] +Arguments: SinglePartition, ENSURE_REQUIREMENTS, [id=#22] (29) HashAggregate [codegen id : 8] Input [3]: [sum#19, sum#20, sum#21] Keys: [] -Functions [3]: [sum(cast(CASE WHEN (isnotnull(customer_sk#8) AND isnull(customer_sk#14)) THEN 1 ELSE 0 END as bigint)), sum(cast(CASE WHEN (isnull(customer_sk#8) AND isnotnull(customer_sk#14)) THEN 1 ELSE 0 END as bigint)), sum(cast(CASE WHEN (isnotnull(customer_sk#8) AND isnotnull(customer_sk#14)) THEN 1 ELSE 0 END as bigint))] -Aggregate Attributes [3]: [sum(cast(CASE WHEN (isnotnull(customer_sk#8) AND isnull(customer_sk#14)) THEN 1 ELSE 0 END as bigint))#23, sum(cast(CASE WHEN (isnull(customer_sk#8) AND isnotnull(customer_sk#14)) THEN 1 ELSE 0 END as bigint))#24, sum(cast(CASE WHEN (isnotnull(customer_sk#8) AND isnotnull(customer_sk#14)) THEN 1 ELSE 0 END as bigint))#25] -Results [3]: [sum(cast(CASE WHEN (isnotnull(customer_sk#8) AND isnull(customer_sk#14)) THEN 1 ELSE 0 END as bigint))#23 AS store_only#26, sum(cast(CASE WHEN (isnull(customer_sk#8) AND isnotnull(customer_sk#14)) THEN 1 ELSE 0 END as bigint))#24 AS catalog_only#27, sum(cast(CASE WHEN (isnotnull(customer_sk#8) AND isnotnull(customer_sk#14)) THEN 1 ELSE 0 END as bigint))#25 AS store_and_catalog#28] - -(30) CollectLimit -Input [3]: [store_only#26, catalog_only#27, store_and_catalog#28] -Arguments: 100 +Functions [3]: [sum(CASE WHEN (isnotnull(customer_sk#8) AND isnull(customer_sk#14)) THEN 1 ELSE 0 END), sum(CASE WHEN (isnull(customer_sk#8) AND isnotnull(customer_sk#14)) THEN 1 ELSE 0 END), sum(CASE WHEN (isnotnull(customer_sk#8) AND isnotnull(customer_sk#14)) THEN 1 ELSE 0 END)] +Aggregate Attributes [3]: [sum(CASE WHEN (isnotnull(customer_sk#8) AND isnull(customer_sk#14)) THEN 1 ELSE 0 END)#23, sum(CASE WHEN (isnull(customer_sk#8) AND isnotnull(customer_sk#14)) THEN 1 ELSE 0 END)#24, sum(CASE WHEN (isnotnull(customer_sk#8) AND isnotnull(customer_sk#14)) THEN 1 ELSE 0 END)#25] +Results [3]: [sum(CASE WHEN (isnotnull(customer_sk#8) AND isnull(customer_sk#14)) THEN 1 ELSE 0 END)#23 AS store_only#26, sum(CASE WHEN (isnull(customer_sk#8) AND isnotnull(customer_sk#14)) THEN 1 ELSE 0 END)#24 AS catalog_only#27, sum(CASE WHEN (isnotnull(customer_sk#8) AND isnotnull(customer_sk#14)) THEN 1 ELSE 0 END)#25 AS store_and_catalog#28] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q97/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q97/simplified.txt index bae48ec244faa..dc149c443c20f 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q97/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q97/simplified.txt @@ -1,46 +1,45 @@ -CollectLimit - WholeStageCodegen (8) - HashAggregate [sum,sum,sum] [sum(cast(CASE WHEN (isnotnull(customer_sk) AND isnull(customer_sk)) THEN 1 ELSE 0 END as bigint)),sum(cast(CASE WHEN (isnull(customer_sk) AND isnotnull(customer_sk)) THEN 1 ELSE 0 END as bigint)),sum(cast(CASE WHEN (isnotnull(customer_sk) AND isnotnull(customer_sk)) THEN 1 ELSE 0 END as bigint)),store_only,catalog_only,store_and_catalog,sum,sum,sum] - InputAdapter - Exchange #1 - WholeStageCodegen (7) - HashAggregate [customer_sk,customer_sk] [sum,sum,sum,sum,sum,sum] - Project [customer_sk,customer_sk] - InputAdapter - SortMergeJoin [customer_sk,item_sk,customer_sk,item_sk] - WholeStageCodegen (3) - Sort [customer_sk,item_sk] - HashAggregate [ss_customer_sk,ss_item_sk] [customer_sk,item_sk] - InputAdapter - Exchange [ss_customer_sk,ss_item_sk] #2 - WholeStageCodegen (2) - HashAggregate [ss_customer_sk,ss_item_sk] - Project [ss_item_sk,ss_customer_sk] - BroadcastHashJoin [ss_sold_date_sk,d_date_sk] - Filter [ss_sold_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.store_sales [ss_sold_date_sk,ss_item_sk,ss_customer_sk] - InputAdapter - BroadcastExchange #3 - WholeStageCodegen (1) - Project [d_date_sk] - Filter [d_month_seq,d_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.date_dim [d_date_sk,d_month_seq] - WholeStageCodegen (6) - Sort [customer_sk,item_sk] - HashAggregate [cs_bill_customer_sk,cs_item_sk] [customer_sk,item_sk] - InputAdapter - Exchange [cs_bill_customer_sk,cs_item_sk] #4 - WholeStageCodegen (5) - HashAggregate [cs_bill_customer_sk,cs_item_sk] - Project [cs_bill_customer_sk,cs_item_sk] - BroadcastHashJoin [cs_sold_date_sk,d_date_sk] - Filter [cs_sold_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.catalog_sales [cs_sold_date_sk,cs_bill_customer_sk,cs_item_sk] - InputAdapter - ReusedExchange [d_date_sk] #3 +WholeStageCodegen (8) + HashAggregate [sum,sum,sum] [sum(CASE WHEN (isnotnull(customer_sk) AND isnull(customer_sk)) THEN 1 ELSE 0 END),sum(CASE WHEN (isnull(customer_sk) AND isnotnull(customer_sk)) THEN 1 ELSE 0 END),sum(CASE WHEN (isnotnull(customer_sk) AND isnotnull(customer_sk)) THEN 1 ELSE 0 END),store_only,catalog_only,store_and_catalog,sum,sum,sum] + InputAdapter + Exchange #1 + WholeStageCodegen (7) + HashAggregate [customer_sk,customer_sk] [sum,sum,sum,sum,sum,sum] + Project [customer_sk,customer_sk] + InputAdapter + SortMergeJoin [customer_sk,item_sk,customer_sk,item_sk] + WholeStageCodegen (3) + Sort [customer_sk,item_sk] + HashAggregate [ss_customer_sk,ss_item_sk] [customer_sk,item_sk] + InputAdapter + Exchange [ss_customer_sk,ss_item_sk] #2 + WholeStageCodegen (2) + HashAggregate [ss_customer_sk,ss_item_sk] + Project [ss_item_sk,ss_customer_sk] + BroadcastHashJoin [ss_sold_date_sk,d_date_sk] + Filter [ss_sold_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.store_sales [ss_sold_date_sk,ss_item_sk,ss_customer_sk] + InputAdapter + BroadcastExchange #3 + WholeStageCodegen (1) + Project [d_date_sk] + Filter [d_month_seq,d_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.date_dim [d_date_sk,d_month_seq] + WholeStageCodegen (6) + Sort [customer_sk,item_sk] + HashAggregate [cs_bill_customer_sk,cs_item_sk] [customer_sk,item_sk] + InputAdapter + Exchange [cs_bill_customer_sk,cs_item_sk] #4 + WholeStageCodegen (5) + HashAggregate [cs_bill_customer_sk,cs_item_sk] + Project [cs_bill_customer_sk,cs_item_sk] + BroadcastHashJoin [cs_sold_date_sk,d_date_sk] + Filter [cs_sold_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.catalog_sales [cs_sold_date_sk,cs_bill_customer_sk,cs_item_sk] + InputAdapter + ReusedExchange [d_date_sk] #3 diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q99.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q99.sf100/explain.txt index c547e7af5d790..5d9c5794ae33b 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q99.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q99.sf100/explain.txt @@ -10,15 +10,15 @@ TakeOrderedAndProject (32) : :- * Project (16) : : +- * BroadcastHashJoin Inner BuildRight (15) : : :- * Project (10) - : : : +- * BroadcastHashJoin Inner BuildLeft (9) - : : : :- BroadcastExchange (5) - : : : : +- * Project (4) - : : : : +- * Filter (3) - : : : : +- * ColumnarToRow (2) - : : : : +- Scan parquet default.date_dim (1) - : : : +- * Filter (8) - : : : +- * ColumnarToRow (7) - : : : +- Scan parquet default.catalog_sales (6) + : : : +- * BroadcastHashJoin Inner BuildRight (9) + : : : :- * Filter (3) + : : : : +- * ColumnarToRow (2) + : : : : +- Scan parquet default.catalog_sales (1) + : : : +- BroadcastExchange (8) + : : : +- * Project (7) + : : : +- * Filter (6) + : : : +- * ColumnarToRow (5) + : : : +- Scan parquet default.date_dim (4) : : +- BroadcastExchange (14) : : +- * Filter (13) : : +- * ColumnarToRow (12) @@ -33,50 +33,50 @@ TakeOrderedAndProject (32) +- Scan parquet default.warehouse (23) -(1) Scan parquet default.date_dim -Output [2]: [d_date_sk#1, d_month_seq#2] +(1) Scan parquet default.catalog_sales +Output [5]: [cs_sold_date_sk#1, cs_ship_date_sk#2, cs_call_center_sk#3, cs_ship_mode_sk#4, cs_warehouse_sk#5] Batched: true -Location [not included in comparison]/{warehouse_dir}/date_dim] -PushedFilters: [IsNotNull(d_month_seq), GreaterThanOrEqual(d_month_seq,1200), LessThanOrEqual(d_month_seq,1211), IsNotNull(d_date_sk)] -ReadSchema: struct +Location [not included in comparison]/{warehouse_dir}/catalog_sales] +PushedFilters: [IsNotNull(cs_warehouse_sk), IsNotNull(cs_ship_mode_sk), IsNotNull(cs_call_center_sk), IsNotNull(cs_ship_date_sk)] +ReadSchema: struct -(2) ColumnarToRow [codegen id : 1] -Input [2]: [d_date_sk#1, d_month_seq#2] +(2) ColumnarToRow [codegen id : 5] +Input [5]: [cs_sold_date_sk#1, cs_ship_date_sk#2, cs_call_center_sk#3, cs_ship_mode_sk#4, cs_warehouse_sk#5] -(3) Filter [codegen id : 1] -Input [2]: [d_date_sk#1, d_month_seq#2] -Condition : (((isnotnull(d_month_seq#2) AND (d_month_seq#2 >= 1200)) AND (d_month_seq#2 <= 1211)) AND isnotnull(d_date_sk#1)) +(3) Filter [codegen id : 5] +Input [5]: [cs_sold_date_sk#1, cs_ship_date_sk#2, cs_call_center_sk#3, cs_ship_mode_sk#4, cs_warehouse_sk#5] +Condition : (((isnotnull(cs_warehouse_sk#5) AND isnotnull(cs_ship_mode_sk#4)) AND isnotnull(cs_call_center_sk#3)) AND isnotnull(cs_ship_date_sk#2)) -(4) Project [codegen id : 1] -Output [1]: [d_date_sk#1] -Input [2]: [d_date_sk#1, d_month_seq#2] +(4) Scan parquet default.date_dim +Output [2]: [d_date_sk#6, d_month_seq#7] +Batched: true +Location [not included in comparison]/{warehouse_dir}/date_dim] +PushedFilters: [IsNotNull(d_month_seq), GreaterThanOrEqual(d_month_seq,1200), LessThanOrEqual(d_month_seq,1211), IsNotNull(d_date_sk)] +ReadSchema: struct -(5) BroadcastExchange -Input [1]: [d_date_sk#1] -Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#3] +(5) ColumnarToRow [codegen id : 1] +Input [2]: [d_date_sk#6, d_month_seq#7] -(6) Scan parquet default.catalog_sales -Output [5]: [cs_sold_date_sk#4, cs_ship_date_sk#5, cs_call_center_sk#6, cs_ship_mode_sk#7, cs_warehouse_sk#8] -Batched: true -Location [not included in comparison]/{warehouse_dir}/catalog_sales] -PushedFilters: [IsNotNull(cs_warehouse_sk), IsNotNull(cs_ship_mode_sk), IsNotNull(cs_call_center_sk), IsNotNull(cs_ship_date_sk)] -ReadSchema: struct +(6) Filter [codegen id : 1] +Input [2]: [d_date_sk#6, d_month_seq#7] +Condition : (((isnotnull(d_month_seq#7) AND (d_month_seq#7 >= 1200)) AND (d_month_seq#7 <= 1211)) AND isnotnull(d_date_sk#6)) -(7) ColumnarToRow -Input [5]: [cs_sold_date_sk#4, cs_ship_date_sk#5, cs_call_center_sk#6, cs_ship_mode_sk#7, cs_warehouse_sk#8] +(7) Project [codegen id : 1] +Output [1]: [d_date_sk#6] +Input [2]: [d_date_sk#6, d_month_seq#7] -(8) Filter -Input [5]: [cs_sold_date_sk#4, cs_ship_date_sk#5, cs_call_center_sk#6, cs_ship_mode_sk#7, cs_warehouse_sk#8] -Condition : (((isnotnull(cs_warehouse_sk#8) AND isnotnull(cs_ship_mode_sk#7)) AND isnotnull(cs_call_center_sk#6)) AND isnotnull(cs_ship_date_sk#5)) +(8) BroadcastExchange +Input [1]: [d_date_sk#6] +Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#8] (9) BroadcastHashJoin [codegen id : 5] -Left keys [1]: [d_date_sk#1] -Right keys [1]: [cs_ship_date_sk#5] +Left keys [1]: [cs_ship_date_sk#2] +Right keys [1]: [d_date_sk#6] Join condition: None (10) Project [codegen id : 5] -Output [5]: [cs_sold_date_sk#4, cs_ship_date_sk#5, cs_call_center_sk#6, cs_ship_mode_sk#7, cs_warehouse_sk#8] -Input [6]: [d_date_sk#1, cs_sold_date_sk#4, cs_ship_date_sk#5, cs_call_center_sk#6, cs_ship_mode_sk#7, cs_warehouse_sk#8] +Output [5]: [cs_sold_date_sk#1, cs_ship_date_sk#2, cs_call_center_sk#3, cs_ship_mode_sk#4, cs_warehouse_sk#5] +Input [6]: [cs_sold_date_sk#1, cs_ship_date_sk#2, cs_call_center_sk#3, cs_ship_mode_sk#4, cs_warehouse_sk#5, d_date_sk#6] (11) Scan parquet default.ship_mode Output [2]: [sm_ship_mode_sk#9, sm_type#10] @@ -97,13 +97,13 @@ Input [2]: [sm_ship_mode_sk#9, sm_type#10] Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, false] as bigint)),false), [id=#11] (15) BroadcastHashJoin [codegen id : 5] -Left keys [1]: [cs_ship_mode_sk#7] +Left keys [1]: [cs_ship_mode_sk#4] Right keys [1]: [sm_ship_mode_sk#9] Join condition: None (16) Project [codegen id : 5] -Output [5]: [cs_sold_date_sk#4, cs_ship_date_sk#5, cs_call_center_sk#6, cs_warehouse_sk#8, sm_type#10] -Input [7]: [cs_sold_date_sk#4, cs_ship_date_sk#5, cs_call_center_sk#6, cs_ship_mode_sk#7, cs_warehouse_sk#8, sm_ship_mode_sk#9, sm_type#10] +Output [5]: [cs_sold_date_sk#1, cs_ship_date_sk#2, cs_call_center_sk#3, cs_warehouse_sk#5, sm_type#10] +Input [7]: [cs_sold_date_sk#1, cs_ship_date_sk#2, cs_call_center_sk#3, cs_ship_mode_sk#4, cs_warehouse_sk#5, sm_ship_mode_sk#9, sm_type#10] (17) Scan parquet default.call_center Output [2]: [cc_call_center_sk#12, cc_name#13] @@ -124,13 +124,13 @@ Input [2]: [cc_call_center_sk#12, cc_name#13] Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, false] as bigint)),false), [id=#14] (21) BroadcastHashJoin [codegen id : 5] -Left keys [1]: [cs_call_center_sk#6] +Left keys [1]: [cs_call_center_sk#3] Right keys [1]: [cc_call_center_sk#12] Join condition: None (22) Project [codegen id : 5] -Output [5]: [cs_sold_date_sk#4, cs_ship_date_sk#5, cs_warehouse_sk#8, sm_type#10, cc_name#13] -Input [7]: [cs_sold_date_sk#4, cs_ship_date_sk#5, cs_call_center_sk#6, cs_warehouse_sk#8, sm_type#10, cc_call_center_sk#12, cc_name#13] +Output [5]: [cs_sold_date_sk#1, cs_ship_date_sk#2, cs_warehouse_sk#5, sm_type#10, cc_name#13] +Input [7]: [cs_sold_date_sk#1, cs_ship_date_sk#2, cs_call_center_sk#3, cs_warehouse_sk#5, sm_type#10, cc_call_center_sk#12, cc_name#13] (23) Scan parquet default.warehouse Output [2]: [w_warehouse_sk#15, w_warehouse_name#16] @@ -151,31 +151,31 @@ Input [2]: [w_warehouse_sk#15, w_warehouse_name#16] Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, false] as bigint)),false), [id=#17] (27) BroadcastHashJoin [codegen id : 5] -Left keys [1]: [cs_warehouse_sk#8] +Left keys [1]: [cs_warehouse_sk#5] Right keys [1]: [w_warehouse_sk#15] Join condition: None (28) Project [codegen id : 5] -Output [5]: [cs_sold_date_sk#4, cs_ship_date_sk#5, w_warehouse_name#16, sm_type#10, cc_name#13] -Input [7]: [cs_sold_date_sk#4, cs_ship_date_sk#5, cs_warehouse_sk#8, sm_type#10, cc_name#13, w_warehouse_sk#15, w_warehouse_name#16] +Output [5]: [cs_sold_date_sk#1, cs_ship_date_sk#2, w_warehouse_name#16, sm_type#10, cc_name#13] +Input [7]: [cs_sold_date_sk#1, cs_ship_date_sk#2, cs_warehouse_sk#5, sm_type#10, cc_name#13, w_warehouse_sk#15, w_warehouse_name#16] (29) HashAggregate [codegen id : 5] -Input [5]: [cs_sold_date_sk#4, cs_ship_date_sk#5, w_warehouse_name#16, sm_type#10, cc_name#13] +Input [5]: [cs_sold_date_sk#1, cs_ship_date_sk#2, w_warehouse_name#16, sm_type#10, cc_name#13] Keys [3]: [substr(w_warehouse_name#16, 1, 20) AS substr(w_warehouse_name#16, 1, 20)#18, sm_type#10, cc_name#13] -Functions [5]: [partial_sum(cast(CASE WHEN ((cs_ship_date_sk#5 - cs_sold_date_sk#4) <= 30) THEN 1 ELSE 0 END as bigint)), partial_sum(cast(CASE WHEN (((cs_ship_date_sk#5 - cs_sold_date_sk#4) > 30) AND ((cs_ship_date_sk#5 - cs_sold_date_sk#4) <= 60)) THEN 1 ELSE 0 END as bigint)), partial_sum(cast(CASE WHEN (((cs_ship_date_sk#5 - cs_sold_date_sk#4) > 60) AND ((cs_ship_date_sk#5 - cs_sold_date_sk#4) <= 90)) THEN 1 ELSE 0 END as bigint)), partial_sum(cast(CASE WHEN (((cs_ship_date_sk#5 - cs_sold_date_sk#4) > 90) AND ((cs_ship_date_sk#5 - cs_sold_date_sk#4) <= 120)) THEN 1 ELSE 0 END as bigint)), partial_sum(cast(CASE WHEN ((cs_ship_date_sk#5 - cs_sold_date_sk#4) > 120) THEN 1 ELSE 0 END as bigint))] +Functions [5]: [partial_sum(CASE WHEN ((cs_ship_date_sk#2 - cs_sold_date_sk#1) <= 30) THEN 1 ELSE 0 END), partial_sum(CASE WHEN (((cs_ship_date_sk#2 - cs_sold_date_sk#1) > 30) AND ((cs_ship_date_sk#2 - cs_sold_date_sk#1) <= 60)) THEN 1 ELSE 0 END), partial_sum(CASE WHEN (((cs_ship_date_sk#2 - cs_sold_date_sk#1) > 60) AND ((cs_ship_date_sk#2 - cs_sold_date_sk#1) <= 90)) THEN 1 ELSE 0 END), partial_sum(CASE WHEN (((cs_ship_date_sk#2 - cs_sold_date_sk#1) > 90) AND ((cs_ship_date_sk#2 - cs_sold_date_sk#1) <= 120)) THEN 1 ELSE 0 END), partial_sum(CASE WHEN ((cs_ship_date_sk#2 - cs_sold_date_sk#1) > 120) THEN 1 ELSE 0 END)] Aggregate Attributes [5]: [sum#19, sum#20, sum#21, sum#22, sum#23] Results [8]: [substr(w_warehouse_name#16, 1, 20)#18, sm_type#10, cc_name#13, sum#24, sum#25, sum#26, sum#27, sum#28] (30) Exchange Input [8]: [substr(w_warehouse_name#16, 1, 20)#18, sm_type#10, cc_name#13, sum#24, sum#25, sum#26, sum#27, sum#28] -Arguments: hashpartitioning(substr(w_warehouse_name#16, 1, 20)#18, sm_type#10, cc_name#13, 5), true, [id=#29] +Arguments: hashpartitioning(substr(w_warehouse_name#16, 1, 20)#18, sm_type#10, cc_name#13, 5), ENSURE_REQUIREMENTS, [id=#29] (31) HashAggregate [codegen id : 6] Input [8]: [substr(w_warehouse_name#16, 1, 20)#18, sm_type#10, cc_name#13, sum#24, sum#25, sum#26, sum#27, sum#28] Keys [3]: [substr(w_warehouse_name#16, 1, 20)#18, sm_type#10, cc_name#13] -Functions [5]: [sum(cast(CASE WHEN ((cs_ship_date_sk#5 - cs_sold_date_sk#4) <= 30) THEN 1 ELSE 0 END as bigint)), sum(cast(CASE WHEN (((cs_ship_date_sk#5 - cs_sold_date_sk#4) > 30) AND ((cs_ship_date_sk#5 - cs_sold_date_sk#4) <= 60)) THEN 1 ELSE 0 END as bigint)), sum(cast(CASE WHEN (((cs_ship_date_sk#5 - cs_sold_date_sk#4) > 60) AND ((cs_ship_date_sk#5 - cs_sold_date_sk#4) <= 90)) THEN 1 ELSE 0 END as bigint)), sum(cast(CASE WHEN (((cs_ship_date_sk#5 - cs_sold_date_sk#4) > 90) AND ((cs_ship_date_sk#5 - cs_sold_date_sk#4) <= 120)) THEN 1 ELSE 0 END as bigint)), sum(cast(CASE WHEN ((cs_ship_date_sk#5 - cs_sold_date_sk#4) > 120) THEN 1 ELSE 0 END as bigint))] -Aggregate Attributes [5]: [sum(cast(CASE WHEN ((cs_ship_date_sk#5 - cs_sold_date_sk#4) <= 30) THEN 1 ELSE 0 END as bigint))#30, sum(cast(CASE WHEN (((cs_ship_date_sk#5 - cs_sold_date_sk#4) > 30) AND ((cs_ship_date_sk#5 - cs_sold_date_sk#4) <= 60)) THEN 1 ELSE 0 END as bigint))#31, sum(cast(CASE WHEN (((cs_ship_date_sk#5 - cs_sold_date_sk#4) > 60) AND ((cs_ship_date_sk#5 - cs_sold_date_sk#4) <= 90)) THEN 1 ELSE 0 END as bigint))#32, sum(cast(CASE WHEN (((cs_ship_date_sk#5 - cs_sold_date_sk#4) > 90) AND ((cs_ship_date_sk#5 - cs_sold_date_sk#4) <= 120)) THEN 1 ELSE 0 END as bigint))#33, sum(cast(CASE WHEN ((cs_ship_date_sk#5 - cs_sold_date_sk#4) > 120) THEN 1 ELSE 0 END as bigint))#34] -Results [8]: [substr(w_warehouse_name#16, 1, 20)#18 AS substr(w_warehouse_name, 1, 20)#35, sm_type#10, cc_name#13, sum(cast(CASE WHEN ((cs_ship_date_sk#5 - cs_sold_date_sk#4) <= 30) THEN 1 ELSE 0 END as bigint))#30 AS 30 days #36, sum(cast(CASE WHEN (((cs_ship_date_sk#5 - cs_sold_date_sk#4) > 30) AND ((cs_ship_date_sk#5 - cs_sold_date_sk#4) <= 60)) THEN 1 ELSE 0 END as bigint))#31 AS 31 - 60 days #37, sum(cast(CASE WHEN (((cs_ship_date_sk#5 - cs_sold_date_sk#4) > 60) AND ((cs_ship_date_sk#5 - cs_sold_date_sk#4) <= 90)) THEN 1 ELSE 0 END as bigint))#32 AS 61 - 90 days #38, sum(cast(CASE WHEN (((cs_ship_date_sk#5 - cs_sold_date_sk#4) > 90) AND ((cs_ship_date_sk#5 - cs_sold_date_sk#4) <= 120)) THEN 1 ELSE 0 END as bigint))#33 AS 91 - 120 days #39, sum(cast(CASE WHEN ((cs_ship_date_sk#5 - cs_sold_date_sk#4) > 120) THEN 1 ELSE 0 END as bigint))#34 AS >120 days #40] +Functions [5]: [sum(CASE WHEN ((cs_ship_date_sk#2 - cs_sold_date_sk#1) <= 30) THEN 1 ELSE 0 END), sum(CASE WHEN (((cs_ship_date_sk#2 - cs_sold_date_sk#1) > 30) AND ((cs_ship_date_sk#2 - cs_sold_date_sk#1) <= 60)) THEN 1 ELSE 0 END), sum(CASE WHEN (((cs_ship_date_sk#2 - cs_sold_date_sk#1) > 60) AND ((cs_ship_date_sk#2 - cs_sold_date_sk#1) <= 90)) THEN 1 ELSE 0 END), sum(CASE WHEN (((cs_ship_date_sk#2 - cs_sold_date_sk#1) > 90) AND ((cs_ship_date_sk#2 - cs_sold_date_sk#1) <= 120)) THEN 1 ELSE 0 END), sum(CASE WHEN ((cs_ship_date_sk#2 - cs_sold_date_sk#1) > 120) THEN 1 ELSE 0 END)] +Aggregate Attributes [5]: [sum(CASE WHEN ((cs_ship_date_sk#2 - cs_sold_date_sk#1) <= 30) THEN 1 ELSE 0 END)#30, sum(CASE WHEN (((cs_ship_date_sk#2 - cs_sold_date_sk#1) > 30) AND ((cs_ship_date_sk#2 - cs_sold_date_sk#1) <= 60)) THEN 1 ELSE 0 END)#31, sum(CASE WHEN (((cs_ship_date_sk#2 - cs_sold_date_sk#1) > 60) AND ((cs_ship_date_sk#2 - cs_sold_date_sk#1) <= 90)) THEN 1 ELSE 0 END)#32, sum(CASE WHEN (((cs_ship_date_sk#2 - cs_sold_date_sk#1) > 90) AND ((cs_ship_date_sk#2 - cs_sold_date_sk#1) <= 120)) THEN 1 ELSE 0 END)#33, sum(CASE WHEN ((cs_ship_date_sk#2 - cs_sold_date_sk#1) > 120) THEN 1 ELSE 0 END)#34] +Results [8]: [substr(w_warehouse_name#16, 1, 20)#18 AS substr(w_warehouse_name, 1, 20)#35, sm_type#10, cc_name#13, sum(CASE WHEN ((cs_ship_date_sk#2 - cs_sold_date_sk#1) <= 30) THEN 1 ELSE 0 END)#30 AS 30 days #36, sum(CASE WHEN (((cs_ship_date_sk#2 - cs_sold_date_sk#1) > 30) AND ((cs_ship_date_sk#2 - cs_sold_date_sk#1) <= 60)) THEN 1 ELSE 0 END)#31 AS 31 - 60 days #37, sum(CASE WHEN (((cs_ship_date_sk#2 - cs_sold_date_sk#1) > 60) AND ((cs_ship_date_sk#2 - cs_sold_date_sk#1) <= 90)) THEN 1 ELSE 0 END)#32 AS 61 - 90 days #38, sum(CASE WHEN (((cs_ship_date_sk#2 - cs_sold_date_sk#1) > 90) AND ((cs_ship_date_sk#2 - cs_sold_date_sk#1) <= 120)) THEN 1 ELSE 0 END)#33 AS 91 - 120 days #39, sum(CASE WHEN ((cs_ship_date_sk#2 - cs_sold_date_sk#1) > 120) THEN 1 ELSE 0 END)#34 AS >120 days #40] (32) TakeOrderedAndProject Input [8]: [substr(w_warehouse_name, 1, 20)#35, sm_type#10, cc_name#13, 30 days #36, 31 - 60 days #37, 61 - 90 days #38, 91 - 120 days #39, >120 days #40] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q99.sf100/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q99.sf100/simplified.txt index de3b1913ae25c..3526a87fad82e 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q99.sf100/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q99.sf100/simplified.txt @@ -1,6 +1,6 @@ TakeOrderedAndProject [substr(w_warehouse_name, 1, 20),sm_type,cc_name,30 days ,31 - 60 days ,61 - 90 days ,91 - 120 days ,>120 days ] WholeStageCodegen (6) - HashAggregate [substr(w_warehouse_name, 1, 20),sm_type,cc_name,sum,sum,sum,sum,sum] [sum(cast(CASE WHEN ((cs_ship_date_sk - cs_sold_date_sk) <= 30) THEN 1 ELSE 0 END as bigint)),sum(cast(CASE WHEN (((cs_ship_date_sk - cs_sold_date_sk) > 30) AND ((cs_ship_date_sk - cs_sold_date_sk) <= 60)) THEN 1 ELSE 0 END as bigint)),sum(cast(CASE WHEN (((cs_ship_date_sk - cs_sold_date_sk) > 60) AND ((cs_ship_date_sk - cs_sold_date_sk) <= 90)) THEN 1 ELSE 0 END as bigint)),sum(cast(CASE WHEN (((cs_ship_date_sk - cs_sold_date_sk) > 90) AND ((cs_ship_date_sk - cs_sold_date_sk) <= 120)) THEN 1 ELSE 0 END as bigint)),sum(cast(CASE WHEN ((cs_ship_date_sk - cs_sold_date_sk) > 120) THEN 1 ELSE 0 END as bigint)),substr(w_warehouse_name, 1, 20),30 days ,31 - 60 days ,61 - 90 days ,91 - 120 days ,>120 days ,sum,sum,sum,sum,sum] + HashAggregate [substr(w_warehouse_name, 1, 20),sm_type,cc_name,sum,sum,sum,sum,sum] [sum(CASE WHEN ((cs_ship_date_sk - cs_sold_date_sk) <= 30) THEN 1 ELSE 0 END),sum(CASE WHEN (((cs_ship_date_sk - cs_sold_date_sk) > 30) AND ((cs_ship_date_sk - cs_sold_date_sk) <= 60)) THEN 1 ELSE 0 END),sum(CASE WHEN (((cs_ship_date_sk - cs_sold_date_sk) > 60) AND ((cs_ship_date_sk - cs_sold_date_sk) <= 90)) THEN 1 ELSE 0 END),sum(CASE WHEN (((cs_ship_date_sk - cs_sold_date_sk) > 90) AND ((cs_ship_date_sk - cs_sold_date_sk) <= 120)) THEN 1 ELSE 0 END),sum(CASE WHEN ((cs_ship_date_sk - cs_sold_date_sk) > 120) THEN 1 ELSE 0 END),substr(w_warehouse_name, 1, 20),30 days ,31 - 60 days ,61 - 90 days ,91 - 120 days ,>120 days ,sum,sum,sum,sum,sum] InputAdapter Exchange [substr(w_warehouse_name, 1, 20),sm_type,cc_name] #1 WholeStageCodegen (5) @@ -12,7 +12,11 @@ TakeOrderedAndProject [substr(w_warehouse_name, 1, 20),sm_type,cc_name,30 days , Project [cs_sold_date_sk,cs_ship_date_sk,cs_call_center_sk,cs_warehouse_sk,sm_type] BroadcastHashJoin [cs_ship_mode_sk,sm_ship_mode_sk] Project [cs_sold_date_sk,cs_ship_date_sk,cs_call_center_sk,cs_ship_mode_sk,cs_warehouse_sk] - BroadcastHashJoin [d_date_sk,cs_ship_date_sk] + BroadcastHashJoin [cs_ship_date_sk,d_date_sk] + Filter [cs_warehouse_sk,cs_ship_mode_sk,cs_call_center_sk,cs_ship_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.catalog_sales [cs_sold_date_sk,cs_ship_date_sk,cs_call_center_sk,cs_ship_mode_sk,cs_warehouse_sk] InputAdapter BroadcastExchange #2 WholeStageCodegen (1) @@ -21,10 +25,6 @@ TakeOrderedAndProject [substr(w_warehouse_name, 1, 20),sm_type,cc_name,30 days , ColumnarToRow InputAdapter Scan parquet default.date_dim [d_date_sk,d_month_seq] - Filter [cs_warehouse_sk,cs_ship_mode_sk,cs_call_center_sk,cs_ship_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.catalog_sales [cs_sold_date_sk,cs_ship_date_sk,cs_call_center_sk,cs_ship_mode_sk,cs_warehouse_sk] InputAdapter BroadcastExchange #3 WholeStageCodegen (2) diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q99/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q99/explain.txt index 595cb2984ab75..b7dcf12fb7166 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q99/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q99/explain.txt @@ -162,20 +162,20 @@ Input [6]: [cs_sold_date_sk#1, cs_ship_date_sk#2, w_warehouse_name#7, sm_type#10 (29) HashAggregate [codegen id : 5] Input [5]: [cs_sold_date_sk#1, cs_ship_date_sk#2, w_warehouse_name#7, sm_type#10, cc_name#13] Keys [3]: [substr(w_warehouse_name#7, 1, 20) AS substr(w_warehouse_name#7, 1, 20)#18, sm_type#10, cc_name#13] -Functions [5]: [partial_sum(cast(CASE WHEN ((cs_ship_date_sk#2 - cs_sold_date_sk#1) <= 30) THEN 1 ELSE 0 END as bigint)), partial_sum(cast(CASE WHEN (((cs_ship_date_sk#2 - cs_sold_date_sk#1) > 30) AND ((cs_ship_date_sk#2 - cs_sold_date_sk#1) <= 60)) THEN 1 ELSE 0 END as bigint)), partial_sum(cast(CASE WHEN (((cs_ship_date_sk#2 - cs_sold_date_sk#1) > 60) AND ((cs_ship_date_sk#2 - cs_sold_date_sk#1) <= 90)) THEN 1 ELSE 0 END as bigint)), partial_sum(cast(CASE WHEN (((cs_ship_date_sk#2 - cs_sold_date_sk#1) > 90) AND ((cs_ship_date_sk#2 - cs_sold_date_sk#1) <= 120)) THEN 1 ELSE 0 END as bigint)), partial_sum(cast(CASE WHEN ((cs_ship_date_sk#2 - cs_sold_date_sk#1) > 120) THEN 1 ELSE 0 END as bigint))] +Functions [5]: [partial_sum(CASE WHEN ((cs_ship_date_sk#2 - cs_sold_date_sk#1) <= 30) THEN 1 ELSE 0 END), partial_sum(CASE WHEN (((cs_ship_date_sk#2 - cs_sold_date_sk#1) > 30) AND ((cs_ship_date_sk#2 - cs_sold_date_sk#1) <= 60)) THEN 1 ELSE 0 END), partial_sum(CASE WHEN (((cs_ship_date_sk#2 - cs_sold_date_sk#1) > 60) AND ((cs_ship_date_sk#2 - cs_sold_date_sk#1) <= 90)) THEN 1 ELSE 0 END), partial_sum(CASE WHEN (((cs_ship_date_sk#2 - cs_sold_date_sk#1) > 90) AND ((cs_ship_date_sk#2 - cs_sold_date_sk#1) <= 120)) THEN 1 ELSE 0 END), partial_sum(CASE WHEN ((cs_ship_date_sk#2 - cs_sold_date_sk#1) > 120) THEN 1 ELSE 0 END)] Aggregate Attributes [5]: [sum#19, sum#20, sum#21, sum#22, sum#23] Results [8]: [substr(w_warehouse_name#7, 1, 20)#18, sm_type#10, cc_name#13, sum#24, sum#25, sum#26, sum#27, sum#28] (30) Exchange Input [8]: [substr(w_warehouse_name#7, 1, 20)#18, sm_type#10, cc_name#13, sum#24, sum#25, sum#26, sum#27, sum#28] -Arguments: hashpartitioning(substr(w_warehouse_name#7, 1, 20)#18, sm_type#10, cc_name#13, 5), true, [id=#29] +Arguments: hashpartitioning(substr(w_warehouse_name#7, 1, 20)#18, sm_type#10, cc_name#13, 5), ENSURE_REQUIREMENTS, [id=#29] (31) HashAggregate [codegen id : 6] Input [8]: [substr(w_warehouse_name#7, 1, 20)#18, sm_type#10, cc_name#13, sum#24, sum#25, sum#26, sum#27, sum#28] Keys [3]: [substr(w_warehouse_name#7, 1, 20)#18, sm_type#10, cc_name#13] -Functions [5]: [sum(cast(CASE WHEN ((cs_ship_date_sk#2 - cs_sold_date_sk#1) <= 30) THEN 1 ELSE 0 END as bigint)), sum(cast(CASE WHEN (((cs_ship_date_sk#2 - cs_sold_date_sk#1) > 30) AND ((cs_ship_date_sk#2 - cs_sold_date_sk#1) <= 60)) THEN 1 ELSE 0 END as bigint)), sum(cast(CASE WHEN (((cs_ship_date_sk#2 - cs_sold_date_sk#1) > 60) AND ((cs_ship_date_sk#2 - cs_sold_date_sk#1) <= 90)) THEN 1 ELSE 0 END as bigint)), sum(cast(CASE WHEN (((cs_ship_date_sk#2 - cs_sold_date_sk#1) > 90) AND ((cs_ship_date_sk#2 - cs_sold_date_sk#1) <= 120)) THEN 1 ELSE 0 END as bigint)), sum(cast(CASE WHEN ((cs_ship_date_sk#2 - cs_sold_date_sk#1) > 120) THEN 1 ELSE 0 END as bigint))] -Aggregate Attributes [5]: [sum(cast(CASE WHEN ((cs_ship_date_sk#2 - cs_sold_date_sk#1) <= 30) THEN 1 ELSE 0 END as bigint))#30, sum(cast(CASE WHEN (((cs_ship_date_sk#2 - cs_sold_date_sk#1) > 30) AND ((cs_ship_date_sk#2 - cs_sold_date_sk#1) <= 60)) THEN 1 ELSE 0 END as bigint))#31, sum(cast(CASE WHEN (((cs_ship_date_sk#2 - cs_sold_date_sk#1) > 60) AND ((cs_ship_date_sk#2 - cs_sold_date_sk#1) <= 90)) THEN 1 ELSE 0 END as bigint))#32, sum(cast(CASE WHEN (((cs_ship_date_sk#2 - cs_sold_date_sk#1) > 90) AND ((cs_ship_date_sk#2 - cs_sold_date_sk#1) <= 120)) THEN 1 ELSE 0 END as bigint))#33, sum(cast(CASE WHEN ((cs_ship_date_sk#2 - cs_sold_date_sk#1) > 120) THEN 1 ELSE 0 END as bigint))#34] -Results [8]: [substr(w_warehouse_name#7, 1, 20)#18 AS substr(w_warehouse_name, 1, 20)#35, sm_type#10, cc_name#13, sum(cast(CASE WHEN ((cs_ship_date_sk#2 - cs_sold_date_sk#1) <= 30) THEN 1 ELSE 0 END as bigint))#30 AS 30 days #36, sum(cast(CASE WHEN (((cs_ship_date_sk#2 - cs_sold_date_sk#1) > 30) AND ((cs_ship_date_sk#2 - cs_sold_date_sk#1) <= 60)) THEN 1 ELSE 0 END as bigint))#31 AS 31 - 60 days #37, sum(cast(CASE WHEN (((cs_ship_date_sk#2 - cs_sold_date_sk#1) > 60) AND ((cs_ship_date_sk#2 - cs_sold_date_sk#1) <= 90)) THEN 1 ELSE 0 END as bigint))#32 AS 61 - 90 days #38, sum(cast(CASE WHEN (((cs_ship_date_sk#2 - cs_sold_date_sk#1) > 90) AND ((cs_ship_date_sk#2 - cs_sold_date_sk#1) <= 120)) THEN 1 ELSE 0 END as bigint))#33 AS 91 - 120 days #39, sum(cast(CASE WHEN ((cs_ship_date_sk#2 - cs_sold_date_sk#1) > 120) THEN 1 ELSE 0 END as bigint))#34 AS >120 days #40] +Functions [5]: [sum(CASE WHEN ((cs_ship_date_sk#2 - cs_sold_date_sk#1) <= 30) THEN 1 ELSE 0 END), sum(CASE WHEN (((cs_ship_date_sk#2 - cs_sold_date_sk#1) > 30) AND ((cs_ship_date_sk#2 - cs_sold_date_sk#1) <= 60)) THEN 1 ELSE 0 END), sum(CASE WHEN (((cs_ship_date_sk#2 - cs_sold_date_sk#1) > 60) AND ((cs_ship_date_sk#2 - cs_sold_date_sk#1) <= 90)) THEN 1 ELSE 0 END), sum(CASE WHEN (((cs_ship_date_sk#2 - cs_sold_date_sk#1) > 90) AND ((cs_ship_date_sk#2 - cs_sold_date_sk#1) <= 120)) THEN 1 ELSE 0 END), sum(CASE WHEN ((cs_ship_date_sk#2 - cs_sold_date_sk#1) > 120) THEN 1 ELSE 0 END)] +Aggregate Attributes [5]: [sum(CASE WHEN ((cs_ship_date_sk#2 - cs_sold_date_sk#1) <= 30) THEN 1 ELSE 0 END)#30, sum(CASE WHEN (((cs_ship_date_sk#2 - cs_sold_date_sk#1) > 30) AND ((cs_ship_date_sk#2 - cs_sold_date_sk#1) <= 60)) THEN 1 ELSE 0 END)#31, sum(CASE WHEN (((cs_ship_date_sk#2 - cs_sold_date_sk#1) > 60) AND ((cs_ship_date_sk#2 - cs_sold_date_sk#1) <= 90)) THEN 1 ELSE 0 END)#32, sum(CASE WHEN (((cs_ship_date_sk#2 - cs_sold_date_sk#1) > 90) AND ((cs_ship_date_sk#2 - cs_sold_date_sk#1) <= 120)) THEN 1 ELSE 0 END)#33, sum(CASE WHEN ((cs_ship_date_sk#2 - cs_sold_date_sk#1) > 120) THEN 1 ELSE 0 END)#34] +Results [8]: [substr(w_warehouse_name#7, 1, 20)#18 AS substr(w_warehouse_name, 1, 20)#35, sm_type#10, cc_name#13, sum(CASE WHEN ((cs_ship_date_sk#2 - cs_sold_date_sk#1) <= 30) THEN 1 ELSE 0 END)#30 AS 30 days #36, sum(CASE WHEN (((cs_ship_date_sk#2 - cs_sold_date_sk#1) > 30) AND ((cs_ship_date_sk#2 - cs_sold_date_sk#1) <= 60)) THEN 1 ELSE 0 END)#31 AS 31 - 60 days #37, sum(CASE WHEN (((cs_ship_date_sk#2 - cs_sold_date_sk#1) > 60) AND ((cs_ship_date_sk#2 - cs_sold_date_sk#1) <= 90)) THEN 1 ELSE 0 END)#32 AS 61 - 90 days #38, sum(CASE WHEN (((cs_ship_date_sk#2 - cs_sold_date_sk#1) > 90) AND ((cs_ship_date_sk#2 - cs_sold_date_sk#1) <= 120)) THEN 1 ELSE 0 END)#33 AS 91 - 120 days #39, sum(CASE WHEN ((cs_ship_date_sk#2 - cs_sold_date_sk#1) > 120) THEN 1 ELSE 0 END)#34 AS >120 days #40] (32) TakeOrderedAndProject Input [8]: [substr(w_warehouse_name, 1, 20)#35, sm_type#10, cc_name#13, 30 days #36, 31 - 60 days #37, 61 - 90 days #38, 91 - 120 days #39, >120 days #40] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q99/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q99/simplified.txt index 9ebaaac52930a..79f7b4f13350d 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q99/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v1_4/q99/simplified.txt @@ -1,6 +1,6 @@ TakeOrderedAndProject [substr(w_warehouse_name, 1, 20),sm_type,cc_name,30 days ,31 - 60 days ,61 - 90 days ,91 - 120 days ,>120 days ] WholeStageCodegen (6) - HashAggregate [substr(w_warehouse_name, 1, 20),sm_type,cc_name,sum,sum,sum,sum,sum] [sum(cast(CASE WHEN ((cs_ship_date_sk - cs_sold_date_sk) <= 30) THEN 1 ELSE 0 END as bigint)),sum(cast(CASE WHEN (((cs_ship_date_sk - cs_sold_date_sk) > 30) AND ((cs_ship_date_sk - cs_sold_date_sk) <= 60)) THEN 1 ELSE 0 END as bigint)),sum(cast(CASE WHEN (((cs_ship_date_sk - cs_sold_date_sk) > 60) AND ((cs_ship_date_sk - cs_sold_date_sk) <= 90)) THEN 1 ELSE 0 END as bigint)),sum(cast(CASE WHEN (((cs_ship_date_sk - cs_sold_date_sk) > 90) AND ((cs_ship_date_sk - cs_sold_date_sk) <= 120)) THEN 1 ELSE 0 END as bigint)),sum(cast(CASE WHEN ((cs_ship_date_sk - cs_sold_date_sk) > 120) THEN 1 ELSE 0 END as bigint)),substr(w_warehouse_name, 1, 20),30 days ,31 - 60 days ,61 - 90 days ,91 - 120 days ,>120 days ,sum,sum,sum,sum,sum] + HashAggregate [substr(w_warehouse_name, 1, 20),sm_type,cc_name,sum,sum,sum,sum,sum] [sum(CASE WHEN ((cs_ship_date_sk - cs_sold_date_sk) <= 30) THEN 1 ELSE 0 END),sum(CASE WHEN (((cs_ship_date_sk - cs_sold_date_sk) > 30) AND ((cs_ship_date_sk - cs_sold_date_sk) <= 60)) THEN 1 ELSE 0 END),sum(CASE WHEN (((cs_ship_date_sk - cs_sold_date_sk) > 60) AND ((cs_ship_date_sk - cs_sold_date_sk) <= 90)) THEN 1 ELSE 0 END),sum(CASE WHEN (((cs_ship_date_sk - cs_sold_date_sk) > 90) AND ((cs_ship_date_sk - cs_sold_date_sk) <= 120)) THEN 1 ELSE 0 END),sum(CASE WHEN ((cs_ship_date_sk - cs_sold_date_sk) > 120) THEN 1 ELSE 0 END),substr(w_warehouse_name, 1, 20),30 days ,31 - 60 days ,61 - 90 days ,91 - 120 days ,>120 days ,sum,sum,sum,sum,sum] InputAdapter Exchange [substr(w_warehouse_name, 1, 20),sm_type,cc_name] #1 WholeStageCodegen (5) diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q14.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q14.sf100/explain.txt index 25a1ca79cc500..dad6098ce4685 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q14.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q14.sf100/explain.txt @@ -496,15 +496,15 @@ Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum#38, isEmpty#39, cou Keys [3]: [i_brand_id#7, i_class_id#8, i_category_id#9] Functions [2]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true)), count(1)] Aggregate Attributes [2]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#42, count(1)#43] -Results [7]: [store AS channel#44, i_brand_id#7, i_class_id#8, i_category_id#9, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#42 AS sales#45, count(1)#43 AS number_sales#46, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#42 AS sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#47] +Results [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#42 AS sales#44, count(1)#43 AS number_sales#45, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#42 AS sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#46] (86) Filter [codegen id : 78] -Input [7]: [channel#44, i_brand_id#7, i_class_id#8, i_category_id#9, sales#45, number_sales#46, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#47] -Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#47) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#47 as decimal(32,6)) > cast(Subquery scalar-subquery#48, [id=#49] as decimal(32,6)))) +Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sales#44, number_sales#45, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#46] +Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#46) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#46 as decimal(32,6)) > cast(Subquery scalar-subquery#47, [id=#48] as decimal(32,6)))) (87) Project [codegen id : 78] -Output [6]: [channel#44, i_brand_id#7, i_class_id#8, i_category_id#9, sales#45, number_sales#46] -Input [7]: [channel#44, i_brand_id#7, i_class_id#8, i_category_id#9, sales#45, number_sales#46, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#47] +Output [6]: [store AS channel#49, i_brand_id#7, i_class_id#8, i_category_id#9, sales#44, number_sales#45] +Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sales#44, number_sales#45, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#46] (88) ReusedExchange [Reuses operator id: 4] Output [4]: [ss_sold_date_sk#1, ss_item_sk#2, ss_quantity#3, ss_list_price#4] @@ -584,18 +584,18 @@ Input [6]: [i_brand_id#54, i_class_id#55, i_category_id#56, sum#60, isEmpty#61, Keys [3]: [i_brand_id#54, i_class_id#55, i_category_id#56] Functions [2]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true)), count(1)] Aggregate Attributes [2]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#64, count(1)#65] -Results [7]: [store AS channel#66, i_brand_id#54, i_class_id#55, i_category_id#56, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#64 AS sales#67, count(1)#65 AS number_sales#68, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#64 AS sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#69] +Results [6]: [i_brand_id#54, i_class_id#55, i_category_id#56, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#64 AS sales#66, count(1)#65 AS number_sales#67, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#64 AS sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#68] (106) Filter [codegen id : 77] -Input [7]: [channel#66, i_brand_id#54, i_class_id#55, i_category_id#56, sales#67, number_sales#68, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#69] -Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#69) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#69 as decimal(32,6)) > cast(ReusedSubquery Subquery scalar-subquery#48, [id=#49] as decimal(32,6)))) +Input [6]: [i_brand_id#54, i_class_id#55, i_category_id#56, sales#66, number_sales#67, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#68] +Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#68) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#68 as decimal(32,6)) > cast(ReusedSubquery Subquery scalar-subquery#47, [id=#48] as decimal(32,6)))) (107) Project [codegen id : 77] -Output [6]: [channel#66, i_brand_id#54, i_class_id#55, i_category_id#56, sales#67, number_sales#68] -Input [7]: [channel#66, i_brand_id#54, i_class_id#55, i_category_id#56, sales#67, number_sales#68, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#69] +Output [6]: [store AS channel#69, i_brand_id#54, i_class_id#55, i_category_id#56, sales#66, number_sales#67] +Input [6]: [i_brand_id#54, i_class_id#55, i_category_id#56, sales#66, number_sales#67, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#68] (108) BroadcastExchange -Input [6]: [channel#66, i_brand_id#54, i_class_id#55, i_category_id#56, sales#67, number_sales#68] +Input [6]: [channel#69, i_brand_id#54, i_class_id#55, i_category_id#56, sales#66, number_sales#67] Arguments: HashedRelationBroadcastMode(List(input[1, int, true], input[2, int, true], input[3, int, true]),false), [id=#70] (109) BroadcastHashJoin [codegen id : 78] @@ -604,12 +604,12 @@ Right keys [3]: [i_brand_id#54, i_class_id#55, i_category_id#56] Join condition: None (110) TakeOrderedAndProject -Input [12]: [channel#44, i_brand_id#7, i_class_id#8, i_category_id#9, sales#45, number_sales#46, channel#66, i_brand_id#54, i_class_id#55, i_category_id#56, sales#67, number_sales#68] -Arguments: 100, [channel#44 ASC NULLS FIRST, i_brand_id#7 ASC NULLS FIRST, i_class_id#8 ASC NULLS FIRST, i_category_id#9 ASC NULLS FIRST], [channel#44, i_brand_id#7, i_class_id#8, i_category_id#9, sales#45, number_sales#46, channel#66, i_brand_id#54, i_class_id#55, i_category_id#56, sales#67, number_sales#68] +Input [12]: [channel#49, i_brand_id#7, i_class_id#8, i_category_id#9, sales#44, number_sales#45, channel#69, i_brand_id#54, i_class_id#55, i_category_id#56, sales#66, number_sales#67] +Arguments: 100, [i_brand_id#7 ASC NULLS FIRST, i_class_id#8 ASC NULLS FIRST, i_category_id#9 ASC NULLS FIRST], [channel#49, i_brand_id#7, i_class_id#8, i_category_id#9, sales#44, number_sales#45, channel#69, i_brand_id#54, i_class_id#55, i_category_id#56, sales#66, number_sales#67] ===== Subqueries ===== -Subquery:1 Hosting operator id = 86 Hosting Expression = Subquery scalar-subquery#48, [id=#49] +Subquery:1 Hosting operator id = 86 Hosting Expression = Subquery scalar-subquery#47, [id=#48] * HashAggregate (136) +- Exchange (135) +- * HashAggregate (134) @@ -780,7 +780,7 @@ Condition : (((((isnotnull(d_year#11) AND isnotnull(d_moy#89)) AND isnotnull(d_d Output [1]: [d_week_seq#29] Input [4]: [d_week_seq#29, d_year#11, d_moy#89, d_dom#90] -Subquery:3 Hosting operator id = 106 Hosting Expression = ReusedSubquery Subquery scalar-subquery#48, [id=#49] +Subquery:3 Hosting operator id = 106 Hosting Expression = ReusedSubquery Subquery scalar-subquery#47, [id=#48] Subquery:4 Hosting operator id = 95 Hosting Expression = Subquery scalar-subquery#50, [id=#51] * Project (144) diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q14.sf100/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q14.sf100/simplified.txt index 37186560cb3b8..d6b8ba4395d2e 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q14.sf100/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q14.sf100/simplified.txt @@ -1,7 +1,7 @@ -TakeOrderedAndProject [channel,i_brand_id,i_class_id,i_category_id,sales,number_sales,channel,i_brand_id,i_class_id,i_category_id,sales,number_sales] +TakeOrderedAndProject [i_brand_id,i_class_id,i_category_id,channel,sales,number_sales,channel,i_brand_id,i_class_id,i_category_id,sales,number_sales] WholeStageCodegen (78) BroadcastHashJoin [i_brand_id,i_class_id,i_category_id,i_brand_id,i_class_id,i_category_id] - Project [channel,i_brand_id,i_class_id,i_category_id,sales,number_sales] + Project [i_brand_id,i_class_id,i_category_id,sales,number_sales] Filter [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true))] Subquery #2 WholeStageCodegen (8) @@ -45,7 +45,7 @@ TakeOrderedAndProject [channel,i_brand_id,i_class_id,i_category_id,sales,number_ Scan parquet default.web_sales [ws_sold_date_sk,ws_quantity,ws_list_price] InputAdapter ReusedExchange [d_date_sk] #16 - HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),channel,sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] + HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] InputAdapter Exchange [i_brand_id,i_class_id,i_category_id] #1 WholeStageCodegen (38) @@ -190,10 +190,10 @@ TakeOrderedAndProject [channel,i_brand_id,i_class_id,i_category_id,sales,number_ InputAdapter BroadcastExchange #17 WholeStageCodegen (77) - Project [channel,i_brand_id,i_class_id,i_category_id,sales,number_sales] + Project [i_brand_id,i_class_id,i_category_id,sales,number_sales] Filter [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true))] ReusedSubquery [average_sales] #2 - HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),channel,sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] + HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] InputAdapter Exchange [i_brand_id,i_class_id,i_category_id] #18 WholeStageCodegen (76) diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q14/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q14/explain.txt index ea0e8319f3fe0..1af2e69d57338 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q14/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q14/explain.txt @@ -446,15 +446,15 @@ Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum#35, isEmpty#36, cou Keys [3]: [i_brand_id#6, i_class_id#7, i_category_id#8] Functions [2]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true)), count(1)] Aggregate Attributes [2]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#39, count(1)#40] -Results [7]: [store AS channel#41, i_brand_id#6, i_class_id#7, i_category_id#8, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#39 AS sales#42, count(1)#40 AS number_sales#43, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#39 AS sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#44] +Results [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#39 AS sales#41, count(1)#40 AS number_sales#42, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#39 AS sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#43] (76) Filter [codegen id : 52] -Input [7]: [channel#41, i_brand_id#6, i_class_id#7, i_category_id#8, sales#42, number_sales#43, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#44] -Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#44) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#44 as decimal(32,6)) > cast(Subquery scalar-subquery#45, [id=#46] as decimal(32,6)))) +Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sales#41, number_sales#42, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#43] +Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#43) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#43 as decimal(32,6)) > cast(Subquery scalar-subquery#44, [id=#45] as decimal(32,6)))) (77) Project [codegen id : 52] -Output [6]: [channel#41, i_brand_id#6, i_class_id#7, i_category_id#8, sales#42, number_sales#43] -Input [7]: [channel#41, i_brand_id#6, i_class_id#7, i_category_id#8, sales#42, number_sales#43, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#44] +Output [6]: [store AS channel#46, i_brand_id#6, i_class_id#7, i_category_id#8, sales#41, number_sales#42] +Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sales#41, number_sales#42, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#43] (78) Scan parquet default.store_sales Output [4]: [ss_sold_date_sk#1, ss_item_sk#2, ss_quantity#3, ss_list_price#4] @@ -537,18 +537,18 @@ Input [6]: [i_brand_id#48, i_class_id#49, i_category_id#50, sum#57, isEmpty#58, Keys [3]: [i_brand_id#48, i_class_id#49, i_category_id#50] Functions [2]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true)), count(1)] Aggregate Attributes [2]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#61, count(1)#62] -Results [7]: [store AS channel#63, i_brand_id#48, i_class_id#49, i_category_id#50, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#61 AS sales#64, count(1)#62 AS number_sales#65, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#61 AS sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#66] +Results [6]: [i_brand_id#48, i_class_id#49, i_category_id#50, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#61 AS sales#63, count(1)#62 AS number_sales#64, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#61 AS sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#65] (96) Filter [codegen id : 51] -Input [7]: [channel#63, i_brand_id#48, i_class_id#49, i_category_id#50, sales#64, number_sales#65, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#66] -Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#66) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#66 as decimal(32,6)) > cast(ReusedSubquery Subquery scalar-subquery#45, [id=#46] as decimal(32,6)))) +Input [6]: [i_brand_id#48, i_class_id#49, i_category_id#50, sales#63, number_sales#64, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#65] +Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#65) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#65 as decimal(32,6)) > cast(ReusedSubquery Subquery scalar-subquery#44, [id=#45] as decimal(32,6)))) (97) Project [codegen id : 51] -Output [6]: [channel#63, i_brand_id#48, i_class_id#49, i_category_id#50, sales#64, number_sales#65] -Input [7]: [channel#63, i_brand_id#48, i_class_id#49, i_category_id#50, sales#64, number_sales#65, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#66] +Output [6]: [store AS channel#66, i_brand_id#48, i_class_id#49, i_category_id#50, sales#63, number_sales#64] +Input [6]: [i_brand_id#48, i_class_id#49, i_category_id#50, sales#63, number_sales#64, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#65] (98) BroadcastExchange -Input [6]: [channel#63, i_brand_id#48, i_class_id#49, i_category_id#50, sales#64, number_sales#65] +Input [6]: [channel#66, i_brand_id#48, i_class_id#49, i_category_id#50, sales#63, number_sales#64] Arguments: HashedRelationBroadcastMode(List(input[1, int, true], input[2, int, true], input[3, int, true]),false), [id=#67] (99) BroadcastHashJoin [codegen id : 52] @@ -557,12 +557,12 @@ Right keys [3]: [i_brand_id#48, i_class_id#49, i_category_id#50] Join condition: None (100) TakeOrderedAndProject -Input [12]: [channel#41, i_brand_id#6, i_class_id#7, i_category_id#8, sales#42, number_sales#43, channel#63, i_brand_id#48, i_class_id#49, i_category_id#50, sales#64, number_sales#65] -Arguments: 100, [channel#41 ASC NULLS FIRST, i_brand_id#6 ASC NULLS FIRST, i_class_id#7 ASC NULLS FIRST, i_category_id#8 ASC NULLS FIRST], [channel#41, i_brand_id#6, i_class_id#7, i_category_id#8, sales#42, number_sales#43, channel#63, i_brand_id#48, i_class_id#49, i_category_id#50, sales#64, number_sales#65] +Input [12]: [channel#46, i_brand_id#6, i_class_id#7, i_category_id#8, sales#41, number_sales#42, channel#66, i_brand_id#48, i_class_id#49, i_category_id#50, sales#63, number_sales#64] +Arguments: 100, [i_brand_id#6 ASC NULLS FIRST, i_class_id#7 ASC NULLS FIRST, i_category_id#8 ASC NULLS FIRST], [channel#46, i_brand_id#6, i_class_id#7, i_category_id#8, sales#41, number_sales#42, channel#66, i_brand_id#48, i_class_id#49, i_category_id#50, sales#63, number_sales#64] ===== Subqueries ===== -Subquery:1 Hosting operator id = 76 Hosting Expression = Subquery scalar-subquery#45, [id=#46] +Subquery:1 Hosting operator id = 76 Hosting Expression = Subquery scalar-subquery#44, [id=#45] * HashAggregate (126) +- Exchange (125) +- * HashAggregate (124) @@ -733,7 +733,7 @@ Condition : (((((isnotnull(d_year#11) AND isnotnull(d_moy#86)) AND isnotnull(d_d Output [1]: [d_week_seq#28] Input [4]: [d_week_seq#28, d_year#11, d_moy#86, d_dom#87] -Subquery:3 Hosting operator id = 96 Hosting Expression = ReusedSubquery Subquery scalar-subquery#45, [id=#46] +Subquery:3 Hosting operator id = 96 Hosting Expression = ReusedSubquery Subquery scalar-subquery#44, [id=#45] Subquery:4 Hosting operator id = 88 Hosting Expression = Subquery scalar-subquery#51, [id=#52] * Project (134) diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q14/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q14/simplified.txt index 6e6950d4cb33a..7bbf83e3de707 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q14/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q14/simplified.txt @@ -1,7 +1,7 @@ -TakeOrderedAndProject [channel,i_brand_id,i_class_id,i_category_id,sales,number_sales,channel,i_brand_id,i_class_id,i_category_id,sales,number_sales] +TakeOrderedAndProject [i_brand_id,i_class_id,i_category_id,channel,sales,number_sales,channel,i_brand_id,i_class_id,i_category_id,sales,number_sales] WholeStageCodegen (52) BroadcastHashJoin [i_brand_id,i_class_id,i_category_id,i_brand_id,i_class_id,i_category_id] - Project [channel,i_brand_id,i_class_id,i_category_id,sales,number_sales] + Project [i_brand_id,i_class_id,i_category_id,sales,number_sales] Filter [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true))] Subquery #2 WholeStageCodegen (8) @@ -45,7 +45,7 @@ TakeOrderedAndProject [channel,i_brand_id,i_class_id,i_category_id,sales,number_ Scan parquet default.web_sales [ws_sold_date_sk,ws_quantity,ws_list_price] InputAdapter ReusedExchange [d_date_sk] #13 - HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),channel,sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] + HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] InputAdapter Exchange [i_brand_id,i_class_id,i_category_id] #1 WholeStageCodegen (25) @@ -166,10 +166,10 @@ TakeOrderedAndProject [channel,i_brand_id,i_class_id,i_category_id,sales,number_ InputAdapter BroadcastExchange #14 WholeStageCodegen (51) - Project [channel,i_brand_id,i_class_id,i_category_id,sales,number_sales] + Project [i_brand_id,i_class_id,i_category_id,sales,number_sales] Filter [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true))] ReusedSubquery [average_sales] #2 - HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),channel,sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] + HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] InputAdapter Exchange [i_brand_id,i_class_id,i_category_id] #15 WholeStageCodegen (50) diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q14a.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q14a.sf100/explain.txt index 8c697ff080952..7be9447d16b45 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q14a.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q14a.sf100/explain.txt @@ -1,226 +1,214 @@ == Physical Plan == -TakeOrderedAndProject (222) -+- * HashAggregate (221) - +- Exchange (220) - +- * HashAggregate (219) - +- Union (218) - :- * HashAggregate (198) - : +- Exchange (197) - : +- * HashAggregate (196) - : +- Union (195) - : :- * HashAggregate (175) - : : +- Exchange (174) - : : +- * HashAggregate (173) - : : +- Union (172) - : : :- * HashAggregate (152) - : : : +- Exchange (151) - : : : +- * HashAggregate (150) - : : : +- Union (149) - : : : :- * HashAggregate (129) - : : : : +- Exchange (128) - : : : : +- * HashAggregate (127) - : : : : +- Union (126) - : : : : :- * Project (87) - : : : : : +- * Filter (86) - : : : : : +- * HashAggregate (85) - : : : : : +- Exchange (84) - : : : : : +- * HashAggregate (83) - : : : : : +- * Project (82) - : : : : : +- * BroadcastHashJoin Inner BuildRight (81) - : : : : : :- * Project (71) - : : : : : : +- * BroadcastHashJoin Inner BuildRight (70) - : : : : : : :- SortMergeJoin LeftSemi (64) - : : : : : : : :- * Sort (5) - : : : : : : : : +- Exchange (4) - : : : : : : : : +- * Filter (3) - : : : : : : : : +- * ColumnarToRow (2) - : : : : : : : : +- Scan parquet default.store_sales (1) - : : : : : : : +- * Sort (63) - : : : : : : : +- Exchange (62) - : : : : : : : +- * Project (61) - : : : : : : : +- * BroadcastHashJoin Inner BuildRight (60) - : : : : : : : :- * Filter (8) - : : : : : : : : +- * ColumnarToRow (7) - : : : : : : : : +- Scan parquet default.item (6) - : : : : : : : +- BroadcastExchange (59) - : : : : : : : +- * HashAggregate (58) - : : : : : : : +- * HashAggregate (57) - : : : : : : : +- * HashAggregate (56) - : : : : : : : +- Exchange (55) - : : : : : : : +- * HashAggregate (54) - : : : : : : : +- SortMergeJoin LeftSemi (53) - : : : : : : : :- SortMergeJoin LeftSemi (41) - : : : : : : : : :- * Sort (26) - : : : : : : : : : +- Exchange (25) - : : : : : : : : : +- * Project (24) - : : : : : : : : : +- * BroadcastHashJoin Inner BuildRight (23) - : : : : : : : : : :- * Project (18) - : : : : : : : : : : +- * BroadcastHashJoin Inner BuildRight (17) - : : : : : : : : : : :- * Filter (11) - : : : : : : : : : : : +- * ColumnarToRow (10) - : : : : : : : : : : : +- Scan parquet default.store_sales (9) - : : : : : : : : : : +- BroadcastExchange (16) - : : : : : : : : : : +- * Project (15) - : : : : : : : : : : +- * Filter (14) - : : : : : : : : : : +- * ColumnarToRow (13) - : : : : : : : : : : +- Scan parquet default.date_dim (12) - : : : : : : : : : +- BroadcastExchange (22) - : : : : : : : : : +- * Filter (21) - : : : : : : : : : +- * ColumnarToRow (20) - : : : : : : : : : +- Scan parquet default.item (19) - : : : : : : : : +- * Sort (40) - : : : : : : : : +- Exchange (39) - : : : : : : : : +- * Project (38) - : : : : : : : : +- * BroadcastHashJoin Inner BuildRight (37) - : : : : : : : : :- * Project (32) - : : : : : : : : : +- * BroadcastHashJoin Inner BuildRight (31) - : : : : : : : : : :- * Filter (29) - : : : : : : : : : : +- * ColumnarToRow (28) - : : : : : : : : : : +- Scan parquet default.catalog_sales (27) - : : : : : : : : : +- ReusedExchange (30) - : : : : : : : : +- BroadcastExchange (36) - : : : : : : : : +- * Filter (35) - : : : : : : : : +- * ColumnarToRow (34) - : : : : : : : : +- Scan parquet default.item (33) - : : : : : : : +- * Sort (52) - : : : : : : : +- Exchange (51) - : : : : : : : +- * Project (50) - : : : : : : : +- * BroadcastHashJoin Inner BuildRight (49) - : : : : : : : :- * Project (47) - : : : : : : : : +- * BroadcastHashJoin Inner BuildRight (46) - : : : : : : : : :- * Filter (44) - : : : : : : : : : +- * ColumnarToRow (43) - : : : : : : : : : +- Scan parquet default.web_sales (42) - : : : : : : : : +- ReusedExchange (45) - : : : : : : : +- ReusedExchange (48) - : : : : : : +- BroadcastExchange (69) - : : : : : : +- * Project (68) - : : : : : : +- * Filter (67) - : : : : : : +- * ColumnarToRow (66) - : : : : : : +- Scan parquet default.date_dim (65) - : : : : : +- BroadcastExchange (80) - : : : : : +- SortMergeJoin LeftSemi (79) - : : : : : :- * Sort (76) - : : : : : : +- Exchange (75) - : : : : : : +- * Filter (74) - : : : : : : +- * ColumnarToRow (73) - : : : : : : +- Scan parquet default.item (72) - : : : : : +- * Sort (78) - : : : : : +- ReusedExchange (77) - : : : : :- * Project (106) - : : : : : +- * Filter (105) - : : : : : +- * HashAggregate (104) - : : : : : +- Exchange (103) - : : : : : +- * HashAggregate (102) - : : : : : +- * Project (101) - : : : : : +- * BroadcastHashJoin Inner BuildRight (100) - : : : : : :- * Project (98) - : : : : : : +- * BroadcastHashJoin Inner BuildRight (97) - : : : : : : :- SortMergeJoin LeftSemi (95) - : : : : : : : :- * Sort (92) - : : : : : : : : +- Exchange (91) - : : : : : : : : +- * Filter (90) - : : : : : : : : +- * ColumnarToRow (89) - : : : : : : : : +- Scan parquet default.catalog_sales (88) - : : : : : : : +- * Sort (94) - : : : : : : : +- ReusedExchange (93) - : : : : : : +- ReusedExchange (96) - : : : : : +- ReusedExchange (99) - : : : : +- * Project (125) - : : : : +- * Filter (124) - : : : : +- * HashAggregate (123) - : : : : +- Exchange (122) - : : : : +- * HashAggregate (121) - : : : : +- * Project (120) - : : : : +- * BroadcastHashJoin Inner BuildRight (119) - : : : : :- * Project (117) - : : : : : +- * BroadcastHashJoin Inner BuildRight (116) - : : : : : :- SortMergeJoin LeftSemi (114) - : : : : : : :- * Sort (111) - : : : : : : : +- Exchange (110) - : : : : : : : +- * Filter (109) - : : : : : : : +- * ColumnarToRow (108) - : : : : : : : +- Scan parquet default.web_sales (107) - : : : : : : +- * Sort (113) - : : : : : : +- ReusedExchange (112) - : : : : : +- ReusedExchange (115) - : : : : +- ReusedExchange (118) - : : : +- * HashAggregate (148) - : : : +- Exchange (147) - : : : +- * HashAggregate (146) - : : : +- * HashAggregate (145) - : : : +- Exchange (144) - : : : +- * HashAggregate (143) - : : : +- Union (142) - : : : :- * Project (133) - : : : : +- * Filter (132) - : : : : +- * HashAggregate (131) - : : : : +- ReusedExchange (130) - : : : :- * Project (137) - : : : : +- * Filter (136) - : : : : +- * HashAggregate (135) - : : : : +- ReusedExchange (134) - : : : +- * Project (141) - : : : +- * Filter (140) - : : : +- * HashAggregate (139) - : : : +- ReusedExchange (138) - : : +- * HashAggregate (171) - : : +- Exchange (170) - : : +- * HashAggregate (169) - : : +- * HashAggregate (168) - : : +- Exchange (167) - : : +- * HashAggregate (166) - : : +- Union (165) - : : :- * Project (156) - : : : +- * Filter (155) - : : : +- * HashAggregate (154) - : : : +- ReusedExchange (153) - : : :- * Project (160) - : : : +- * Filter (159) - : : : +- * HashAggregate (158) - : : : +- ReusedExchange (157) - : : +- * Project (164) - : : +- * Filter (163) - : : +- * HashAggregate (162) - : : +- ReusedExchange (161) - : +- * HashAggregate (194) - : +- Exchange (193) - : +- * HashAggregate (192) - : +- * HashAggregate (191) - : +- Exchange (190) - : +- * HashAggregate (189) - : +- Union (188) - : :- * Project (179) - : : +- * Filter (178) - : : +- * HashAggregate (177) - : : +- ReusedExchange (176) - : :- * Project (183) - : : +- * Filter (182) - : : +- * HashAggregate (181) - : : +- ReusedExchange (180) - : +- * Project (187) - : +- * Filter (186) - : +- * HashAggregate (185) - : +- ReusedExchange (184) - +- * HashAggregate (217) - +- Exchange (216) - +- * HashAggregate (215) - +- * HashAggregate (214) - +- Exchange (213) - +- * HashAggregate (212) - +- Union (211) - :- * Project (202) - : +- * Filter (201) - : +- * HashAggregate (200) - : +- ReusedExchange (199) - :- * Project (206) - : +- * Filter (205) - : +- * HashAggregate (204) - : +- ReusedExchange (203) - +- * Project (210) - +- * Filter (209) - +- * HashAggregate (208) - +- ReusedExchange (207) +TakeOrderedAndProject (210) ++- * HashAggregate (209) + +- Exchange (208) + +- * HashAggregate (207) + +- Union (206) + :- * HashAggregate (129) + : +- Exchange (128) + : +- * HashAggregate (127) + : +- Union (126) + : :- * Project (87) + : : +- * Filter (86) + : : +- * HashAggregate (85) + : : +- Exchange (84) + : : +- * HashAggregate (83) + : : +- * Project (82) + : : +- * BroadcastHashJoin Inner BuildRight (81) + : : :- * Project (71) + : : : +- * BroadcastHashJoin Inner BuildRight (70) + : : : :- SortMergeJoin LeftSemi (64) + : : : : :- * Sort (5) + : : : : : +- Exchange (4) + : : : : : +- * Filter (3) + : : : : : +- * ColumnarToRow (2) + : : : : : +- Scan parquet default.store_sales (1) + : : : : +- * Sort (63) + : : : : +- Exchange (62) + : : : : +- * Project (61) + : : : : +- * BroadcastHashJoin Inner BuildRight (60) + : : : : :- * Filter (8) + : : : : : +- * ColumnarToRow (7) + : : : : : +- Scan parquet default.item (6) + : : : : +- BroadcastExchange (59) + : : : : +- * HashAggregate (58) + : : : : +- * HashAggregate (57) + : : : : +- * HashAggregate (56) + : : : : +- Exchange (55) + : : : : +- * HashAggregate (54) + : : : : +- SortMergeJoin LeftSemi (53) + : : : : :- SortMergeJoin LeftSemi (41) + : : : : : :- * Sort (26) + : : : : : : +- Exchange (25) + : : : : : : +- * Project (24) + : : : : : : +- * BroadcastHashJoin Inner BuildRight (23) + : : : : : : :- * Project (18) + : : : : : : : +- * BroadcastHashJoin Inner BuildRight (17) + : : : : : : : :- * Filter (11) + : : : : : : : : +- * ColumnarToRow (10) + : : : : : : : : +- Scan parquet default.store_sales (9) + : : : : : : : +- BroadcastExchange (16) + : : : : : : : +- * Project (15) + : : : : : : : +- * Filter (14) + : : : : : : : +- * ColumnarToRow (13) + : : : : : : : +- Scan parquet default.date_dim (12) + : : : : : : +- BroadcastExchange (22) + : : : : : : +- * Filter (21) + : : : : : : +- * ColumnarToRow (20) + : : : : : : +- Scan parquet default.item (19) + : : : : : +- * Sort (40) + : : : : : +- Exchange (39) + : : : : : +- * Project (38) + : : : : : +- * BroadcastHashJoin Inner BuildRight (37) + : : : : : :- * Project (32) + : : : : : : +- * BroadcastHashJoin Inner BuildRight (31) + : : : : : : :- * Filter (29) + : : : : : : : +- * ColumnarToRow (28) + : : : : : : : +- Scan parquet default.catalog_sales (27) + : : : : : : +- ReusedExchange (30) + : : : : : +- BroadcastExchange (36) + : : : : : +- * Filter (35) + : : : : : +- * ColumnarToRow (34) + : : : : : +- Scan parquet default.item (33) + : : : : +- * Sort (52) + : : : : +- Exchange (51) + : : : : +- * Project (50) + : : : : +- * BroadcastHashJoin Inner BuildRight (49) + : : : : :- * Project (47) + : : : : : +- * BroadcastHashJoin Inner BuildRight (46) + : : : : : :- * Filter (44) + : : : : : : +- * ColumnarToRow (43) + : : : : : : +- Scan parquet default.web_sales (42) + : : : : : +- ReusedExchange (45) + : : : : +- ReusedExchange (48) + : : : +- BroadcastExchange (69) + : : : +- * Project (68) + : : : +- * Filter (67) + : : : +- * ColumnarToRow (66) + : : : +- Scan parquet default.date_dim (65) + : : +- BroadcastExchange (80) + : : +- SortMergeJoin LeftSemi (79) + : : :- * Sort (76) + : : : +- Exchange (75) + : : : +- * Filter (74) + : : : +- * ColumnarToRow (73) + : : : +- Scan parquet default.item (72) + : : +- * Sort (78) + : : +- ReusedExchange (77) + : :- * Project (106) + : : +- * Filter (105) + : : +- * HashAggregate (104) + : : +- Exchange (103) + : : +- * HashAggregate (102) + : : +- * Project (101) + : : +- * BroadcastHashJoin Inner BuildRight (100) + : : :- * Project (98) + : : : +- * BroadcastHashJoin Inner BuildRight (97) + : : : :- SortMergeJoin LeftSemi (95) + : : : : :- * Sort (92) + : : : : : +- Exchange (91) + : : : : : +- * Filter (90) + : : : : : +- * ColumnarToRow (89) + : : : : : +- Scan parquet default.catalog_sales (88) + : : : : +- * Sort (94) + : : : : +- ReusedExchange (93) + : : : +- ReusedExchange (96) + : : +- ReusedExchange (99) + : +- * Project (125) + : +- * Filter (124) + : +- * HashAggregate (123) + : +- Exchange (122) + : +- * HashAggregate (121) + : +- * Project (120) + : +- * BroadcastHashJoin Inner BuildRight (119) + : :- * Project (117) + : : +- * BroadcastHashJoin Inner BuildRight (116) + : : :- SortMergeJoin LeftSemi (114) + : : : :- * Sort (111) + : : : : +- Exchange (110) + : : : : +- * Filter (109) + : : : : +- * ColumnarToRow (108) + : : : : +- Scan parquet default.web_sales (107) + : : : +- * Sort (113) + : : : +- ReusedExchange (112) + : : +- ReusedExchange (115) + : +- ReusedExchange (118) + :- * HashAggregate (148) + : +- Exchange (147) + : +- * HashAggregate (146) + : +- * HashAggregate (145) + : +- Exchange (144) + : +- * HashAggregate (143) + : +- Union (142) + : :- * Project (133) + : : +- * Filter (132) + : : +- * HashAggregate (131) + : : +- ReusedExchange (130) + : :- * Project (137) + : : +- * Filter (136) + : : +- * HashAggregate (135) + : : +- ReusedExchange (134) + : +- * Project (141) + : +- * Filter (140) + : +- * HashAggregate (139) + : +- ReusedExchange (138) + :- * HashAggregate (167) + : +- Exchange (166) + : +- * HashAggregate (165) + : +- * HashAggregate (164) + : +- Exchange (163) + : +- * HashAggregate (162) + : +- Union (161) + : :- * Project (152) + : : +- * Filter (151) + : : +- * HashAggregate (150) + : : +- ReusedExchange (149) + : :- * Project (156) + : : +- * Filter (155) + : : +- * HashAggregate (154) + : : +- ReusedExchange (153) + : +- * Project (160) + : +- * Filter (159) + : +- * HashAggregate (158) + : +- ReusedExchange (157) + :- * HashAggregate (186) + : +- Exchange (185) + : +- * HashAggregate (184) + : +- * HashAggregate (183) + : +- Exchange (182) + : +- * HashAggregate (181) + : +- Union (180) + : :- * Project (171) + : : +- * Filter (170) + : : +- * HashAggregate (169) + : : +- ReusedExchange (168) + : :- * Project (175) + : : +- * Filter (174) + : : +- * HashAggregate (173) + : : +- ReusedExchange (172) + : +- * Project (179) + : +- * Filter (178) + : +- * HashAggregate (177) + : +- ReusedExchange (176) + +- * HashAggregate (205) + +- Exchange (204) + +- * HashAggregate (203) + +- * HashAggregate (202) + +- Exchange (201) + +- * HashAggregate (200) + +- Union (199) + :- * Project (190) + : +- * Filter (189) + : +- * HashAggregate (188) + : +- ReusedExchange (187) + :- * Project (194) + : +- * Filter (193) + : +- * HashAggregate (192) + : +- ReusedExchange (191) + +- * Project (198) + +- * Filter (197) + +- * HashAggregate (196) + +- ReusedExchange (195) (1) Scan parquet default.store_sales @@ -239,7 +227,7 @@ Condition : (isnotnull(ss_item_sk#2) AND isnotnull(ss_sold_date_sk#1)) (4) Exchange Input [4]: [ss_sold_date_sk#1, ss_item_sk#2, ss_quantity#3, ss_list_price#4] -Arguments: hashpartitioning(ss_item_sk#2, 5), true, [id=#5] +Arguments: hashpartitioning(ss_item_sk#2, 5), ENSURE_REQUIREMENTS, [id=#5] (5) Sort [codegen id : 2] Input [4]: [ss_sold_date_sk#1, ss_item_sk#2, ss_quantity#3, ss_list_price#4] @@ -333,7 +321,7 @@ Input [5]: [ss_item_sk#2, i_item_sk#6, i_brand_id#7, i_class_id#8, i_category_id (25) Exchange Input [3]: [brand_id#14, class_id#15, category_id#16] -Arguments: hashpartitioning(coalesce(brand_id#14, 0), isnull(brand_id#14), coalesce(class_id#15, 0), isnull(class_id#15), coalesce(category_id#16, 0), isnull(category_id#16), 5), true, [id=#17] +Arguments: hashpartitioning(coalesce(brand_id#14, 0), isnull(brand_id#14), coalesce(class_id#15, 0), isnull(class_id#15), coalesce(category_id#16, 0), isnull(category_id#16), 5), ENSURE_REQUIREMENTS, [id=#17] (26) Sort [codegen id : 6] Input [3]: [brand_id#14, class_id#15, category_id#16] @@ -394,7 +382,7 @@ Input [5]: [cs_item_sk#19, i_item_sk#6, i_brand_id#7, i_class_id#8, i_category_i (39) Exchange Input [3]: [i_brand_id#7, i_class_id#8, i_category_id#9] -Arguments: hashpartitioning(coalesce(i_brand_id#7, 0), isnull(i_brand_id#7), coalesce(i_class_id#8, 0), isnull(i_class_id#8), coalesce(i_category_id#9, 0), isnull(i_category_id#9), 5), true, [id=#21] +Arguments: hashpartitioning(coalesce(i_brand_id#7, 0), isnull(i_brand_id#7), coalesce(i_class_id#8, 0), isnull(i_class_id#8), coalesce(i_category_id#9, 0), isnull(i_category_id#9), 5), ENSURE_REQUIREMENTS, [id=#21] (40) Sort [codegen id : 10] Input [3]: [i_brand_id#7, i_class_id#8, i_category_id#9] @@ -445,7 +433,7 @@ Input [5]: [ws_item_sk#23, i_item_sk#6, i_brand_id#7, i_class_id#8, i_category_i (51) Exchange Input [3]: [i_brand_id#7, i_class_id#8, i_category_id#9] -Arguments: hashpartitioning(coalesce(i_brand_id#7, 0), isnull(i_brand_id#7), coalesce(i_class_id#8, 0), isnull(i_class_id#8), coalesce(i_category_id#9, 0), isnull(i_category_id#9), 5), true, [id=#24] +Arguments: hashpartitioning(coalesce(i_brand_id#7, 0), isnull(i_brand_id#7), coalesce(i_class_id#8, 0), isnull(i_class_id#8), coalesce(i_category_id#9, 0), isnull(i_category_id#9), 5), ENSURE_REQUIREMENTS, [id=#24] (52) Sort [codegen id : 14] Input [3]: [i_brand_id#7, i_class_id#8, i_category_id#9] @@ -465,7 +453,7 @@ Results [3]: [brand_id#14, class_id#15, category_id#16] (55) Exchange Input [3]: [brand_id#14, class_id#15, category_id#16] -Arguments: hashpartitioning(brand_id#14, class_id#15, category_id#16, 5), true, [id=#25] +Arguments: hashpartitioning(brand_id#14, class_id#15, category_id#16, 5), ENSURE_REQUIREMENTS, [id=#25] (56) HashAggregate [codegen id : 16] Input [3]: [brand_id#14, class_id#15, category_id#16] @@ -503,7 +491,7 @@ Input [7]: [i_item_sk#6, i_brand_id#7, i_class_id#8, i_category_id#9, brand_id#1 (62) Exchange Input [1]: [ss_item_sk#27] -Arguments: hashpartitioning(ss_item_sk#27, 5), true, [id=#28] +Arguments: hashpartitioning(ss_item_sk#27, 5), ENSURE_REQUIREMENTS, [id=#28] (63) Sort [codegen id : 18] Input [1]: [ss_item_sk#27] @@ -561,7 +549,7 @@ Condition : isnotnull(i_item_sk#6) (75) Exchange Input [4]: [i_item_sk#6, i_brand_id#7, i_class_id#8, i_category_id#9] -Arguments: hashpartitioning(i_item_sk#6, 5), true, [id=#31] +Arguments: hashpartitioning(i_item_sk#6, 5), ENSURE_REQUIREMENTS, [id=#31] (76) Sort [codegen id : 21] Input [4]: [i_item_sk#6, i_brand_id#7, i_class_id#8, i_category_id#9] @@ -601,22 +589,22 @@ Results [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum#36, isEmpty#37, c (84) Exchange Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum#36, isEmpty#37, count#38] -Arguments: hashpartitioning(i_brand_id#7, i_class_id#8, i_category_id#9, 5), true, [id=#39] +Arguments: hashpartitioning(i_brand_id#7, i_class_id#8, i_category_id#9, 5), ENSURE_REQUIREMENTS, [id=#39] (85) HashAggregate [codegen id : 39] Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum#36, isEmpty#37, count#38] Keys [3]: [i_brand_id#7, i_class_id#8, i_category_id#9] Functions [2]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true)), count(1)] Aggregate Attributes [2]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#40, count(1)#41] -Results [7]: [store AS channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#40 AS sales#43, count(1)#41 AS number_sales#44, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#40 AS sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#45] +Results [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#40 AS sales#42, count(1)#41 AS number_sales#43, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#40 AS sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#44] (86) Filter [codegen id : 39] -Input [7]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sales#43, number_sales#44, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#45] -Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#45) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#45 as decimal(32,6)) > cast(Subquery scalar-subquery#46, [id=#47] as decimal(32,6)))) +Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sales#42, number_sales#43, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#44] +Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#44) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#44 as decimal(32,6)) > cast(Subquery scalar-subquery#45, [id=#46] as decimal(32,6)))) (87) Project [codegen id : 39] -Output [6]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sales#43, number_sales#44] -Input [7]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sales#43, number_sales#44, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#45] +Output [6]: [store AS channel#47, i_brand_id#7, i_class_id#8, i_category_id#9, sales#42, number_sales#43] +Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sales#42, number_sales#43, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#44] (88) Scan parquet default.catalog_sales Output [4]: [cs_sold_date_sk#18, cs_item_sk#19, cs_quantity#48, cs_list_price#49] @@ -634,7 +622,7 @@ Condition : (isnotnull(cs_item_sk#19) AND isnotnull(cs_sold_date_sk#18)) (91) Exchange Input [4]: [cs_sold_date_sk#18, cs_item_sk#19, cs_quantity#48, cs_list_price#49] -Arguments: hashpartitioning(cs_item_sk#19, 5), true, [id=#50] +Arguments: hashpartitioning(cs_item_sk#19, 5), ENSURE_REQUIREMENTS, [id=#50] (92) Sort [codegen id : 41] Input [4]: [cs_sold_date_sk#18, cs_item_sk#19, cs_quantity#48, cs_list_price#49] @@ -685,22 +673,22 @@ Results [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum#54, isEmpty#55, c (103) Exchange Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum#54, isEmpty#55, count#56] -Arguments: hashpartitioning(i_brand_id#7, i_class_id#8, i_category_id#9, 5), true, [id=#57] +Arguments: hashpartitioning(i_brand_id#7, i_class_id#8, i_category_id#9, 5), ENSURE_REQUIREMENTS, [id=#57] (104) HashAggregate [codegen id : 78] Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum#54, isEmpty#55, count#56] Keys [3]: [i_brand_id#7, i_class_id#8, i_category_id#9] Functions [2]: [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true)), count(1)] Aggregate Attributes [2]: [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#58, count(1)#59] -Results [7]: [catalog AS channel#60, i_brand_id#7, i_class_id#8, i_category_id#9, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#58 AS sales#61, count(1)#59 AS number_sales#62, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#58 AS sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#63] +Results [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#58 AS sales#60, count(1)#59 AS number_sales#61, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#58 AS sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#62] (105) Filter [codegen id : 78] -Input [7]: [channel#60, i_brand_id#7, i_class_id#8, i_category_id#9, sales#61, number_sales#62, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#63] -Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#63) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#63 as decimal(32,6)) > cast(ReusedSubquery Subquery scalar-subquery#46, [id=#47] as decimal(32,6)))) +Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sales#60, number_sales#61, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#62] +Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#62) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#62 as decimal(32,6)) > cast(ReusedSubquery Subquery scalar-subquery#45, [id=#46] as decimal(32,6)))) (106) Project [codegen id : 78] -Output [6]: [channel#60, i_brand_id#7, i_class_id#8, i_category_id#9, sales#61, number_sales#62] -Input [7]: [channel#60, i_brand_id#7, i_class_id#8, i_category_id#9, sales#61, number_sales#62, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#63] +Output [6]: [catalog AS channel#63, i_brand_id#7, i_class_id#8, i_category_id#9, sales#60, number_sales#61] +Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sales#60, number_sales#61, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#62] (107) Scan parquet default.web_sales Output [4]: [ws_sold_date_sk#22, ws_item_sk#23, ws_quantity#64, ws_list_price#65] @@ -718,7 +706,7 @@ Condition : (isnotnull(ws_item_sk#23) AND isnotnull(ws_sold_date_sk#22)) (110) Exchange Input [4]: [ws_sold_date_sk#22, ws_item_sk#23, ws_quantity#64, ws_list_price#65] -Arguments: hashpartitioning(ws_item_sk#23, 5), true, [id=#66] +Arguments: hashpartitioning(ws_item_sk#23, 5), ENSURE_REQUIREMENTS, [id=#66] (111) Sort [codegen id : 80] Input [4]: [ws_sold_date_sk#22, ws_item_sk#23, ws_quantity#64, ws_list_price#65] @@ -769,42 +757,42 @@ Results [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum#70, isEmpty#71, c (122) Exchange Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum#70, isEmpty#71, count#72] -Arguments: hashpartitioning(i_brand_id#7, i_class_id#8, i_category_id#9, 5), true, [id=#73] +Arguments: hashpartitioning(i_brand_id#7, i_class_id#8, i_category_id#9, 5), ENSURE_REQUIREMENTS, [id=#73] (123) HashAggregate [codegen id : 117] Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum#70, isEmpty#71, count#72] Keys [3]: [i_brand_id#7, i_class_id#8, i_category_id#9] Functions [2]: [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true)), count(1)] Aggregate Attributes [2]: [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#74, count(1)#75] -Results [7]: [web AS channel#76, i_brand_id#7, i_class_id#8, i_category_id#9, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#74 AS sales#77, count(1)#75 AS number_sales#78, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#74 AS sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#79] +Results [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#74 AS sales#76, count(1)#75 AS number_sales#77, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#74 AS sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#78] (124) Filter [codegen id : 117] -Input [7]: [channel#76, i_brand_id#7, i_class_id#8, i_category_id#9, sales#77, number_sales#78, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#79] -Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#79) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#79 as decimal(32,6)) > cast(ReusedSubquery Subquery scalar-subquery#46, [id=#47] as decimal(32,6)))) +Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sales#76, number_sales#77, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#78] +Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#78) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#78 as decimal(32,6)) > cast(ReusedSubquery Subquery scalar-subquery#45, [id=#46] as decimal(32,6)))) (125) Project [codegen id : 117] -Output [6]: [channel#76, i_brand_id#7, i_class_id#8, i_category_id#9, sales#77, number_sales#78] -Input [7]: [channel#76, i_brand_id#7, i_class_id#8, i_category_id#9, sales#77, number_sales#78, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#79] +Output [6]: [web AS channel#79, i_brand_id#7, i_class_id#8, i_category_id#9, sales#76, number_sales#77] +Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sales#76, number_sales#77, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#78] (126) Union (127) HashAggregate [codegen id : 118] -Input [6]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sales#43, number_sales#44] -Keys [4]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9] -Functions [2]: [partial_sum(sales#43), partial_sum(number_sales#44)] +Input [6]: [channel#47, i_brand_id#7, i_class_id#8, i_category_id#9, sales#42, number_sales#43] +Keys [4]: [channel#47, i_brand_id#7, i_class_id#8, i_category_id#9] +Functions [2]: [partial_sum(sales#42), partial_sum(number_sales#43)] Aggregate Attributes [3]: [sum#80, isEmpty#81, sum#82] -Results [7]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sum#83, isEmpty#84, sum#85] +Results [7]: [channel#47, i_brand_id#7, i_class_id#8, i_category_id#9, sum#83, isEmpty#84, sum#85] (128) Exchange -Input [7]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sum#83, isEmpty#84, sum#85] -Arguments: hashpartitioning(channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, 5), true, [id=#86] +Input [7]: [channel#47, i_brand_id#7, i_class_id#8, i_category_id#9, sum#83, isEmpty#84, sum#85] +Arguments: hashpartitioning(channel#47, i_brand_id#7, i_class_id#8, i_category_id#9, 5), ENSURE_REQUIREMENTS, [id=#86] (129) HashAggregate [codegen id : 119] -Input [7]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sum#83, isEmpty#84, sum#85] -Keys [4]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9] -Functions [2]: [sum(sales#43), sum(number_sales#44)] -Aggregate Attributes [2]: [sum(sales#43)#87, sum(number_sales#44)#88] -Results [6]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sum(sales#43)#87 AS sum_sales#89, sum(number_sales#44)#88 AS number_sales#90] +Input [7]: [channel#47, i_brand_id#7, i_class_id#8, i_category_id#9, sum#83, isEmpty#84, sum#85] +Keys [4]: [channel#47, i_brand_id#7, i_class_id#8, i_category_id#9] +Functions [2]: [sum(sales#42), sum(number_sales#43)] +Aggregate Attributes [2]: [sum(sales#42)#87, sum(number_sales#43)#88] +Results [6]: [channel#47, i_brand_id#7, i_class_id#8, i_category_id#9, sum(sales#42)#87 AS sum_sales#89, sum(number_sales#43)#88 AS number_sales#90] (130) ReusedExchange [Reuses operator id: 84] Output [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum#91, isEmpty#92, count#93] @@ -814,15 +802,15 @@ Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum#91, isEmpty#92, cou Keys [3]: [i_brand_id#7, i_class_id#8, i_category_id#9] Functions [2]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true)), count(1)] Aggregate Attributes [2]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#94, count(1)#95] -Results [7]: [store AS channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#94 AS sales#43, count(1)#95 AS number_sales#44, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#94 AS sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#96] +Results [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#94 AS sales#42, count(1)#95 AS number_sales#43, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#94 AS sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#96] (132) Filter [codegen id : 158] -Input [7]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sales#43, number_sales#44, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#96] -Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#96) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#96 as decimal(32,6)) > cast(ReusedSubquery Subquery scalar-subquery#46, [id=#47] as decimal(32,6)))) +Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sales#42, number_sales#43, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#96] +Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#96) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#96 as decimal(32,6)) > cast(ReusedSubquery Subquery scalar-subquery#45, [id=#46] as decimal(32,6)))) (133) Project [codegen id : 158] -Output [6]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sales#43, number_sales#44] -Input [7]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sales#43, number_sales#44, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#96] +Output [6]: [store AS channel#47, i_brand_id#7, i_class_id#8, i_category_id#9, sales#42, number_sales#43] +Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sales#42, number_sales#43, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#96] (134) ReusedExchange [Reuses operator id: 103] Output [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum#97, isEmpty#98, count#99] @@ -832,629 +820,569 @@ Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum#97, isEmpty#98, cou Keys [3]: [i_brand_id#7, i_class_id#8, i_category_id#9] Functions [2]: [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true)), count(1)] Aggregate Attributes [2]: [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#100, count(1)#101] -Results [7]: [catalog AS channel#60, i_brand_id#7, i_class_id#8, i_category_id#9, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#100 AS sales#61, count(1)#101 AS number_sales#62, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#100 AS sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#102] +Results [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#100 AS sales#60, count(1)#101 AS number_sales#61, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#100 AS sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#102] (136) Filter [codegen id : 197] -Input [7]: [channel#60, i_brand_id#7, i_class_id#8, i_category_id#9, sales#61, number_sales#62, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#102] -Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#102) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#102 as decimal(32,6)) > cast(ReusedSubquery Subquery scalar-subquery#46, [id=#47] as decimal(32,6)))) +Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sales#60, number_sales#61, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#102] +Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#102) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#102 as decimal(32,6)) > cast(ReusedSubquery Subquery scalar-subquery#45, [id=#46] as decimal(32,6)))) (137) Project [codegen id : 197] -Output [6]: [channel#60, i_brand_id#7, i_class_id#8, i_category_id#9, sales#61, number_sales#62] -Input [7]: [channel#60, i_brand_id#7, i_class_id#8, i_category_id#9, sales#61, number_sales#62, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#102] +Output [6]: [catalog AS channel#103, i_brand_id#7, i_class_id#8, i_category_id#9, sales#60, number_sales#61] +Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sales#60, number_sales#61, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#102] (138) ReusedExchange [Reuses operator id: 122] -Output [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum#103, isEmpty#104, count#105] +Output [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum#104, isEmpty#105, count#106] (139) HashAggregate [codegen id : 236] -Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum#103, isEmpty#104, count#105] +Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum#104, isEmpty#105, count#106] Keys [3]: [i_brand_id#7, i_class_id#8, i_category_id#9] Functions [2]: [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true)), count(1)] -Aggregate Attributes [2]: [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#106, count(1)#107] -Results [7]: [web AS channel#76, i_brand_id#7, i_class_id#8, i_category_id#9, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#106 AS sales#77, count(1)#107 AS number_sales#78, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#106 AS sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#108] +Aggregate Attributes [2]: [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#107, count(1)#108] +Results [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#107 AS sales#76, count(1)#108 AS number_sales#77, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#107 AS sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#109] (140) Filter [codegen id : 236] -Input [7]: [channel#76, i_brand_id#7, i_class_id#8, i_category_id#9, sales#77, number_sales#78, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#108] -Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#108) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#108 as decimal(32,6)) > cast(ReusedSubquery Subquery scalar-subquery#46, [id=#47] as decimal(32,6)))) +Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sales#76, number_sales#77, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#109] +Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#109) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#109 as decimal(32,6)) > cast(ReusedSubquery Subquery scalar-subquery#45, [id=#46] as decimal(32,6)))) (141) Project [codegen id : 236] -Output [6]: [channel#76, i_brand_id#7, i_class_id#8, i_category_id#9, sales#77, number_sales#78] -Input [7]: [channel#76, i_brand_id#7, i_class_id#8, i_category_id#9, sales#77, number_sales#78, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#108] +Output [6]: [web AS channel#110, i_brand_id#7, i_class_id#8, i_category_id#9, sales#76, number_sales#77] +Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sales#76, number_sales#77, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#109] (142) Union (143) HashAggregate [codegen id : 237] -Input [6]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sales#43, number_sales#44] -Keys [4]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9] -Functions [2]: [partial_sum(sales#43), partial_sum(number_sales#44)] -Aggregate Attributes [3]: [sum#109, isEmpty#110, sum#111] -Results [7]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sum#112, isEmpty#113, sum#114] +Input [6]: [channel#47, i_brand_id#7, i_class_id#8, i_category_id#9, sales#42, number_sales#43] +Keys [4]: [channel#47, i_brand_id#7, i_class_id#8, i_category_id#9] +Functions [2]: [partial_sum(sales#42), partial_sum(number_sales#43)] +Aggregate Attributes [3]: [sum#111, isEmpty#112, sum#113] +Results [7]: [channel#47, i_brand_id#7, i_class_id#8, i_category_id#9, sum#114, isEmpty#115, sum#116] (144) Exchange -Input [7]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sum#112, isEmpty#113, sum#114] -Arguments: hashpartitioning(channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, 5), true, [id=#115] +Input [7]: [channel#47, i_brand_id#7, i_class_id#8, i_category_id#9, sum#114, isEmpty#115, sum#116] +Arguments: hashpartitioning(channel#47, i_brand_id#7, i_class_id#8, i_category_id#9, 5), ENSURE_REQUIREMENTS, [id=#117] (145) HashAggregate [codegen id : 238] -Input [7]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sum#112, isEmpty#113, sum#114] -Keys [4]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9] -Functions [2]: [sum(sales#43), sum(number_sales#44)] -Aggregate Attributes [2]: [sum(sales#43)#116, sum(number_sales#44)#117] -Results [5]: [channel#42, i_brand_id#7, i_class_id#8, sum(sales#43)#116 AS sum_sales#89, sum(number_sales#44)#117 AS number_sales#90] +Input [7]: [channel#47, i_brand_id#7, i_class_id#8, i_category_id#9, sum#114, isEmpty#115, sum#116] +Keys [4]: [channel#47, i_brand_id#7, i_class_id#8, i_category_id#9] +Functions [2]: [sum(sales#42), sum(number_sales#43)] +Aggregate Attributes [2]: [sum(sales#42)#118, sum(number_sales#43)#119] +Results [5]: [channel#47, i_brand_id#7, i_class_id#8, sum(sales#42)#118 AS sum_sales#89, sum(number_sales#43)#119 AS number_sales#90] (146) HashAggregate [codegen id : 238] -Input [5]: [channel#42, i_brand_id#7, i_class_id#8, sum_sales#89, number_sales#90] -Keys [3]: [channel#42, i_brand_id#7, i_class_id#8] +Input [5]: [channel#47, i_brand_id#7, i_class_id#8, sum_sales#89, number_sales#90] +Keys [3]: [channel#47, i_brand_id#7, i_class_id#8] Functions [2]: [partial_sum(sum_sales#89), partial_sum(number_sales#90)] -Aggregate Attributes [3]: [sum#118, isEmpty#119, sum#120] -Results [6]: [channel#42, i_brand_id#7, i_class_id#8, sum#121, isEmpty#122, sum#123] +Aggregate Attributes [3]: [sum#120, isEmpty#121, sum#122] +Results [6]: [channel#47, i_brand_id#7, i_class_id#8, sum#123, isEmpty#124, sum#125] (147) Exchange -Input [6]: [channel#42, i_brand_id#7, i_class_id#8, sum#121, isEmpty#122, sum#123] -Arguments: hashpartitioning(channel#42, i_brand_id#7, i_class_id#8, 5), true, [id=#124] +Input [6]: [channel#47, i_brand_id#7, i_class_id#8, sum#123, isEmpty#124, sum#125] +Arguments: hashpartitioning(channel#47, i_brand_id#7, i_class_id#8, 5), ENSURE_REQUIREMENTS, [id=#126] (148) HashAggregate [codegen id : 239] -Input [6]: [channel#42, i_brand_id#7, i_class_id#8, sum#121, isEmpty#122, sum#123] -Keys [3]: [channel#42, i_brand_id#7, i_class_id#8] +Input [6]: [channel#47, i_brand_id#7, i_class_id#8, sum#123, isEmpty#124, sum#125] +Keys [3]: [channel#47, i_brand_id#7, i_class_id#8] Functions [2]: [sum(sum_sales#89), sum(number_sales#90)] -Aggregate Attributes [2]: [sum(sum_sales#89)#125, sum(number_sales#90)#126] -Results [6]: [channel#42, i_brand_id#7, i_class_id#8, null AS i_category_id#127, sum(sum_sales#89)#125 AS sum(sum_sales)#128, sum(number_sales#90)#126 AS sum(number_sales)#129] +Aggregate Attributes [2]: [sum(sum_sales#89)#127, sum(number_sales#90)#128] +Results [6]: [channel#47, i_brand_id#7, i_class_id#8, null AS i_category_id#129, sum(sum_sales#89)#127 AS sum(sum_sales)#130, sum(number_sales#90)#128 AS sum(number_sales)#131] -(149) Union +(149) ReusedExchange [Reuses operator id: 84] +Output [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum#132, isEmpty#133, count#134] -(150) HashAggregate [codegen id : 240] -Input [6]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sum_sales#89, number_sales#90] -Keys [6]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sum_sales#89, number_sales#90] -Functions: [] -Aggregate Attributes: [] -Results [6]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sum_sales#89, number_sales#90] - -(151) Exchange -Input [6]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sum_sales#89, number_sales#90] -Arguments: hashpartitioning(channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sum_sales#89, number_sales#90, 5), true, [id=#130] - -(152) HashAggregate [codegen id : 241] -Input [6]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sum_sales#89, number_sales#90] -Keys [6]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sum_sales#89, number_sales#90] -Functions: [] -Aggregate Attributes: [] -Results [6]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sum_sales#89, number_sales#90] - -(153) ReusedExchange [Reuses operator id: 84] -Output [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum#131, isEmpty#132, count#133] - -(154) HashAggregate [codegen id : 280] -Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum#131, isEmpty#132, count#133] +(150) HashAggregate [codegen id : 278] +Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum#132, isEmpty#133, count#134] Keys [3]: [i_brand_id#7, i_class_id#8, i_category_id#9] Functions [2]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true)), count(1)] -Aggregate Attributes [2]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#134, count(1)#135] -Results [7]: [store AS channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#134 AS sales#43, count(1)#135 AS number_sales#44, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#134 AS sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#136] +Aggregate Attributes [2]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#135, count(1)#136] +Results [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#135 AS sales#42, count(1)#136 AS number_sales#43, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#135 AS sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#137] -(155) Filter [codegen id : 280] -Input [7]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sales#43, number_sales#44, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#136] -Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#136) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#136 as decimal(32,6)) > cast(ReusedSubquery Subquery scalar-subquery#46, [id=#47] as decimal(32,6)))) +(151) Filter [codegen id : 278] +Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sales#42, number_sales#43, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#137] +Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#137) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#137 as decimal(32,6)) > cast(ReusedSubquery Subquery scalar-subquery#45, [id=#46] as decimal(32,6)))) -(156) Project [codegen id : 280] -Output [6]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sales#43, number_sales#44] -Input [7]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sales#43, number_sales#44, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#136] +(152) Project [codegen id : 278] +Output [6]: [store AS channel#47, i_brand_id#7, i_class_id#8, i_category_id#9, sales#42, number_sales#43] +Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sales#42, number_sales#43, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#137] -(157) ReusedExchange [Reuses operator id: 103] -Output [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum#137, isEmpty#138, count#139] +(153) ReusedExchange [Reuses operator id: 103] +Output [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum#138, isEmpty#139, count#140] -(158) HashAggregate [codegen id : 319] -Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum#137, isEmpty#138, count#139] +(154) HashAggregate [codegen id : 317] +Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum#138, isEmpty#139, count#140] Keys [3]: [i_brand_id#7, i_class_id#8, i_category_id#9] Functions [2]: [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true)), count(1)] -Aggregate Attributes [2]: [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#140, count(1)#141] -Results [7]: [catalog AS channel#60, i_brand_id#7, i_class_id#8, i_category_id#9, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#140 AS sales#61, count(1)#141 AS number_sales#62, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#140 AS sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#142] +Aggregate Attributes [2]: [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#141, count(1)#142] +Results [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#141 AS sales#60, count(1)#142 AS number_sales#61, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#141 AS sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#143] -(159) Filter [codegen id : 319] -Input [7]: [channel#60, i_brand_id#7, i_class_id#8, i_category_id#9, sales#61, number_sales#62, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#142] -Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#142) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#142 as decimal(32,6)) > cast(ReusedSubquery Subquery scalar-subquery#46, [id=#47] as decimal(32,6)))) +(155) Filter [codegen id : 317] +Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sales#60, number_sales#61, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#143] +Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#143) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#143 as decimal(32,6)) > cast(ReusedSubquery Subquery scalar-subquery#45, [id=#46] as decimal(32,6)))) -(160) Project [codegen id : 319] -Output [6]: [channel#60, i_brand_id#7, i_class_id#8, i_category_id#9, sales#61, number_sales#62] -Input [7]: [channel#60, i_brand_id#7, i_class_id#8, i_category_id#9, sales#61, number_sales#62, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#142] +(156) Project [codegen id : 317] +Output [6]: [catalog AS channel#144, i_brand_id#7, i_class_id#8, i_category_id#9, sales#60, number_sales#61] +Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sales#60, number_sales#61, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#143] -(161) ReusedExchange [Reuses operator id: 122] -Output [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum#143, isEmpty#144, count#145] +(157) ReusedExchange [Reuses operator id: 122] +Output [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum#145, isEmpty#146, count#147] -(162) HashAggregate [codegen id : 358] -Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum#143, isEmpty#144, count#145] +(158) HashAggregate [codegen id : 356] +Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum#145, isEmpty#146, count#147] Keys [3]: [i_brand_id#7, i_class_id#8, i_category_id#9] Functions [2]: [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true)), count(1)] -Aggregate Attributes [2]: [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#146, count(1)#147] -Results [7]: [web AS channel#76, i_brand_id#7, i_class_id#8, i_category_id#9, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#146 AS sales#77, count(1)#147 AS number_sales#78, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#146 AS sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#148] - -(163) Filter [codegen id : 358] -Input [7]: [channel#76, i_brand_id#7, i_class_id#8, i_category_id#9, sales#77, number_sales#78, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#148] -Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#148) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#148 as decimal(32,6)) > cast(ReusedSubquery Subquery scalar-subquery#46, [id=#47] as decimal(32,6)))) - -(164) Project [codegen id : 358] -Output [6]: [channel#76, i_brand_id#7, i_class_id#8, i_category_id#9, sales#77, number_sales#78] -Input [7]: [channel#76, i_brand_id#7, i_class_id#8, i_category_id#9, sales#77, number_sales#78, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#148] - -(165) Union - -(166) HashAggregate [codegen id : 359] -Input [6]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sales#43, number_sales#44] -Keys [4]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9] -Functions [2]: [partial_sum(sales#43), partial_sum(number_sales#44)] -Aggregate Attributes [3]: [sum#149, isEmpty#150, sum#151] -Results [7]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sum#152, isEmpty#153, sum#154] - -(167) Exchange -Input [7]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sum#152, isEmpty#153, sum#154] -Arguments: hashpartitioning(channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, 5), true, [id=#155] - -(168) HashAggregate [codegen id : 360] -Input [7]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sum#152, isEmpty#153, sum#154] -Keys [4]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9] -Functions [2]: [sum(sales#43), sum(number_sales#44)] -Aggregate Attributes [2]: [sum(sales#43)#156, sum(number_sales#44)#157] -Results [4]: [channel#42, i_brand_id#7, sum(sales#43)#156 AS sum_sales#89, sum(number_sales#44)#157 AS number_sales#90] - -(169) HashAggregate [codegen id : 360] -Input [4]: [channel#42, i_brand_id#7, sum_sales#89, number_sales#90] -Keys [2]: [channel#42, i_brand_id#7] +Aggregate Attributes [2]: [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#148, count(1)#149] +Results [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#148 AS sales#76, count(1)#149 AS number_sales#77, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#148 AS sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#150] + +(159) Filter [codegen id : 356] +Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sales#76, number_sales#77, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#150] +Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#150) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#150 as decimal(32,6)) > cast(ReusedSubquery Subquery scalar-subquery#45, [id=#46] as decimal(32,6)))) + +(160) Project [codegen id : 356] +Output [6]: [web AS channel#151, i_brand_id#7, i_class_id#8, i_category_id#9, sales#76, number_sales#77] +Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sales#76, number_sales#77, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#150] + +(161) Union + +(162) HashAggregate [codegen id : 357] +Input [6]: [channel#47, i_brand_id#7, i_class_id#8, i_category_id#9, sales#42, number_sales#43] +Keys [4]: [channel#47, i_brand_id#7, i_class_id#8, i_category_id#9] +Functions [2]: [partial_sum(sales#42), partial_sum(number_sales#43)] +Aggregate Attributes [3]: [sum#152, isEmpty#153, sum#154] +Results [7]: [channel#47, i_brand_id#7, i_class_id#8, i_category_id#9, sum#155, isEmpty#156, sum#157] + +(163) Exchange +Input [7]: [channel#47, i_brand_id#7, i_class_id#8, i_category_id#9, sum#155, isEmpty#156, sum#157] +Arguments: hashpartitioning(channel#47, i_brand_id#7, i_class_id#8, i_category_id#9, 5), ENSURE_REQUIREMENTS, [id=#158] + +(164) HashAggregate [codegen id : 358] +Input [7]: [channel#47, i_brand_id#7, i_class_id#8, i_category_id#9, sum#155, isEmpty#156, sum#157] +Keys [4]: [channel#47, i_brand_id#7, i_class_id#8, i_category_id#9] +Functions [2]: [sum(sales#42), sum(number_sales#43)] +Aggregate Attributes [2]: [sum(sales#42)#159, sum(number_sales#43)#160] +Results [4]: [channel#47, i_brand_id#7, sum(sales#42)#159 AS sum_sales#89, sum(number_sales#43)#160 AS number_sales#90] + +(165) HashAggregate [codegen id : 358] +Input [4]: [channel#47, i_brand_id#7, sum_sales#89, number_sales#90] +Keys [2]: [channel#47, i_brand_id#7] Functions [2]: [partial_sum(sum_sales#89), partial_sum(number_sales#90)] -Aggregate Attributes [3]: [sum#158, isEmpty#159, sum#160] -Results [5]: [channel#42, i_brand_id#7, sum#161, isEmpty#162, sum#163] +Aggregate Attributes [3]: [sum#161, isEmpty#162, sum#163] +Results [5]: [channel#47, i_brand_id#7, sum#164, isEmpty#165, sum#166] -(170) Exchange -Input [5]: [channel#42, i_brand_id#7, sum#161, isEmpty#162, sum#163] -Arguments: hashpartitioning(channel#42, i_brand_id#7, 5), true, [id=#164] +(166) Exchange +Input [5]: [channel#47, i_brand_id#7, sum#164, isEmpty#165, sum#166] +Arguments: hashpartitioning(channel#47, i_brand_id#7, 5), ENSURE_REQUIREMENTS, [id=#167] -(171) HashAggregate [codegen id : 361] -Input [5]: [channel#42, i_brand_id#7, sum#161, isEmpty#162, sum#163] -Keys [2]: [channel#42, i_brand_id#7] +(167) HashAggregate [codegen id : 359] +Input [5]: [channel#47, i_brand_id#7, sum#164, isEmpty#165, sum#166] +Keys [2]: [channel#47, i_brand_id#7] Functions [2]: [sum(sum_sales#89), sum(number_sales#90)] -Aggregate Attributes [2]: [sum(sum_sales#89)#165, sum(number_sales#90)#166] -Results [6]: [channel#42, i_brand_id#7, null AS i_class_id#167, null AS i_category_id#168, sum(sum_sales#89)#165 AS sum(sum_sales)#169, sum(number_sales#90)#166 AS sum(number_sales)#170] - -(172) Union - -(173) HashAggregate [codegen id : 362] -Input [6]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sum_sales#89, number_sales#90] -Keys [6]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sum_sales#89, number_sales#90] -Functions: [] -Aggregate Attributes: [] -Results [6]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sum_sales#89, number_sales#90] +Aggregate Attributes [2]: [sum(sum_sales#89)#168, sum(number_sales#90)#169] +Results [6]: [channel#47, i_brand_id#7, null AS i_class_id#170, null AS i_category_id#171, sum(sum_sales#89)#168 AS sum(sum_sales)#172, sum(number_sales#90)#169 AS sum(number_sales)#173] -(174) Exchange -Input [6]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sum_sales#89, number_sales#90] -Arguments: hashpartitioning(channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sum_sales#89, number_sales#90, 5), true, [id=#171] +(168) ReusedExchange [Reuses operator id: 84] +Output [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum#174, isEmpty#175, count#176] -(175) HashAggregate [codegen id : 363] -Input [6]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sum_sales#89, number_sales#90] -Keys [6]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sum_sales#89, number_sales#90] -Functions: [] -Aggregate Attributes: [] -Results [6]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sum_sales#89, number_sales#90] - -(176) ReusedExchange [Reuses operator id: 84] -Output [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum#172, isEmpty#173, count#174] - -(177) HashAggregate [codegen id : 402] -Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum#172, isEmpty#173, count#174] +(169) HashAggregate [codegen id : 398] +Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum#174, isEmpty#175, count#176] Keys [3]: [i_brand_id#7, i_class_id#8, i_category_id#9] Functions [2]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true)), count(1)] -Aggregate Attributes [2]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#175, count(1)#176] -Results [7]: [store AS channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#175 AS sales#43, count(1)#176 AS number_sales#44, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#175 AS sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#177] +Aggregate Attributes [2]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#177, count(1)#178] +Results [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#177 AS sales#42, count(1)#178 AS number_sales#43, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#177 AS sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#179] -(178) Filter [codegen id : 402] -Input [7]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sales#43, number_sales#44, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#177] -Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#177) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#177 as decimal(32,6)) > cast(ReusedSubquery Subquery scalar-subquery#46, [id=#47] as decimal(32,6)))) +(170) Filter [codegen id : 398] +Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sales#42, number_sales#43, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#179] +Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#179) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#179 as decimal(32,6)) > cast(ReusedSubquery Subquery scalar-subquery#45, [id=#46] as decimal(32,6)))) -(179) Project [codegen id : 402] -Output [6]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sales#43, number_sales#44] -Input [7]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sales#43, number_sales#44, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#177] +(171) Project [codegen id : 398] +Output [6]: [store AS channel#47, i_brand_id#7, i_class_id#8, i_category_id#9, sales#42, number_sales#43] +Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sales#42, number_sales#43, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#179] -(180) ReusedExchange [Reuses operator id: 103] -Output [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum#178, isEmpty#179, count#180] +(172) ReusedExchange [Reuses operator id: 103] +Output [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum#180, isEmpty#181, count#182] -(181) HashAggregate [codegen id : 441] -Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum#178, isEmpty#179, count#180] +(173) HashAggregate [codegen id : 437] +Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum#180, isEmpty#181, count#182] Keys [3]: [i_brand_id#7, i_class_id#8, i_category_id#9] Functions [2]: [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true)), count(1)] -Aggregate Attributes [2]: [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#181, count(1)#182] -Results [7]: [catalog AS channel#60, i_brand_id#7, i_class_id#8, i_category_id#9, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#181 AS sales#61, count(1)#182 AS number_sales#62, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#181 AS sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#183] +Aggregate Attributes [2]: [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#183, count(1)#184] +Results [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#183 AS sales#60, count(1)#184 AS number_sales#61, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#183 AS sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#185] -(182) Filter [codegen id : 441] -Input [7]: [channel#60, i_brand_id#7, i_class_id#8, i_category_id#9, sales#61, number_sales#62, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#183] -Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#183) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#183 as decimal(32,6)) > cast(ReusedSubquery Subquery scalar-subquery#46, [id=#47] as decimal(32,6)))) +(174) Filter [codegen id : 437] +Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sales#60, number_sales#61, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#185] +Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#185) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#185 as decimal(32,6)) > cast(ReusedSubquery Subquery scalar-subquery#45, [id=#46] as decimal(32,6)))) -(183) Project [codegen id : 441] -Output [6]: [channel#60, i_brand_id#7, i_class_id#8, i_category_id#9, sales#61, number_sales#62] -Input [7]: [channel#60, i_brand_id#7, i_class_id#8, i_category_id#9, sales#61, number_sales#62, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#183] +(175) Project [codegen id : 437] +Output [6]: [catalog AS channel#186, i_brand_id#7, i_class_id#8, i_category_id#9, sales#60, number_sales#61] +Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sales#60, number_sales#61, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#185] -(184) ReusedExchange [Reuses operator id: 122] -Output [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum#184, isEmpty#185, count#186] +(176) ReusedExchange [Reuses operator id: 122] +Output [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum#187, isEmpty#188, count#189] -(185) HashAggregate [codegen id : 480] -Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum#184, isEmpty#185, count#186] +(177) HashAggregate [codegen id : 476] +Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum#187, isEmpty#188, count#189] Keys [3]: [i_brand_id#7, i_class_id#8, i_category_id#9] Functions [2]: [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true)), count(1)] -Aggregate Attributes [2]: [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#187, count(1)#188] -Results [7]: [web AS channel#76, i_brand_id#7, i_class_id#8, i_category_id#9, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#187 AS sales#77, count(1)#188 AS number_sales#78, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#187 AS sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#189] - -(186) Filter [codegen id : 480] -Input [7]: [channel#76, i_brand_id#7, i_class_id#8, i_category_id#9, sales#77, number_sales#78, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#189] -Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#189) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#189 as decimal(32,6)) > cast(ReusedSubquery Subquery scalar-subquery#46, [id=#47] as decimal(32,6)))) - -(187) Project [codegen id : 480] -Output [6]: [channel#76, i_brand_id#7, i_class_id#8, i_category_id#9, sales#77, number_sales#78] -Input [7]: [channel#76, i_brand_id#7, i_class_id#8, i_category_id#9, sales#77, number_sales#78, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#189] - -(188) Union - -(189) HashAggregate [codegen id : 481] -Input [6]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sales#43, number_sales#44] -Keys [4]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9] -Functions [2]: [partial_sum(sales#43), partial_sum(number_sales#44)] -Aggregate Attributes [3]: [sum#190, isEmpty#191, sum#192] -Results [7]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sum#193, isEmpty#194, sum#195] - -(190) Exchange -Input [7]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sum#193, isEmpty#194, sum#195] -Arguments: hashpartitioning(channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, 5), true, [id=#196] - -(191) HashAggregate [codegen id : 482] -Input [7]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sum#193, isEmpty#194, sum#195] -Keys [4]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9] -Functions [2]: [sum(sales#43), sum(number_sales#44)] -Aggregate Attributes [2]: [sum(sales#43)#197, sum(number_sales#44)#198] -Results [3]: [channel#42, sum(sales#43)#197 AS sum_sales#89, sum(number_sales#44)#198 AS number_sales#90] - -(192) HashAggregate [codegen id : 482] -Input [3]: [channel#42, sum_sales#89, number_sales#90] -Keys [1]: [channel#42] +Aggregate Attributes [2]: [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#190, count(1)#191] +Results [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#190 AS sales#76, count(1)#191 AS number_sales#77, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#190 AS sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#192] + +(178) Filter [codegen id : 476] +Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sales#76, number_sales#77, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#192] +Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#192) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#192 as decimal(32,6)) > cast(ReusedSubquery Subquery scalar-subquery#45, [id=#46] as decimal(32,6)))) + +(179) Project [codegen id : 476] +Output [6]: [web AS channel#193, i_brand_id#7, i_class_id#8, i_category_id#9, sales#76, number_sales#77] +Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sales#76, number_sales#77, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#192] + +(180) Union + +(181) HashAggregate [codegen id : 477] +Input [6]: [channel#47, i_brand_id#7, i_class_id#8, i_category_id#9, sales#42, number_sales#43] +Keys [4]: [channel#47, i_brand_id#7, i_class_id#8, i_category_id#9] +Functions [2]: [partial_sum(sales#42), partial_sum(number_sales#43)] +Aggregate Attributes [3]: [sum#194, isEmpty#195, sum#196] +Results [7]: [channel#47, i_brand_id#7, i_class_id#8, i_category_id#9, sum#197, isEmpty#198, sum#199] + +(182) Exchange +Input [7]: [channel#47, i_brand_id#7, i_class_id#8, i_category_id#9, sum#197, isEmpty#198, sum#199] +Arguments: hashpartitioning(channel#47, i_brand_id#7, i_class_id#8, i_category_id#9, 5), ENSURE_REQUIREMENTS, [id=#200] + +(183) HashAggregate [codegen id : 478] +Input [7]: [channel#47, i_brand_id#7, i_class_id#8, i_category_id#9, sum#197, isEmpty#198, sum#199] +Keys [4]: [channel#47, i_brand_id#7, i_class_id#8, i_category_id#9] +Functions [2]: [sum(sales#42), sum(number_sales#43)] +Aggregate Attributes [2]: [sum(sales#42)#201, sum(number_sales#43)#202] +Results [3]: [channel#47, sum(sales#42)#201 AS sum_sales#89, sum(number_sales#43)#202 AS number_sales#90] + +(184) HashAggregate [codegen id : 478] +Input [3]: [channel#47, sum_sales#89, number_sales#90] +Keys [1]: [channel#47] Functions [2]: [partial_sum(sum_sales#89), partial_sum(number_sales#90)] -Aggregate Attributes [3]: [sum#199, isEmpty#200, sum#201] -Results [4]: [channel#42, sum#202, isEmpty#203, sum#204] +Aggregate Attributes [3]: [sum#203, isEmpty#204, sum#205] +Results [4]: [channel#47, sum#206, isEmpty#207, sum#208] -(193) Exchange -Input [4]: [channel#42, sum#202, isEmpty#203, sum#204] -Arguments: hashpartitioning(channel#42, 5), true, [id=#205] +(185) Exchange +Input [4]: [channel#47, sum#206, isEmpty#207, sum#208] +Arguments: hashpartitioning(channel#47, 5), ENSURE_REQUIREMENTS, [id=#209] -(194) HashAggregate [codegen id : 483] -Input [4]: [channel#42, sum#202, isEmpty#203, sum#204] -Keys [1]: [channel#42] +(186) HashAggregate [codegen id : 479] +Input [4]: [channel#47, sum#206, isEmpty#207, sum#208] +Keys [1]: [channel#47] Functions [2]: [sum(sum_sales#89), sum(number_sales#90)] -Aggregate Attributes [2]: [sum(sum_sales#89)#206, sum(number_sales#90)#207] -Results [6]: [channel#42, null AS i_brand_id#208, null AS i_class_id#209, null AS i_category_id#210, sum(sum_sales#89)#206 AS sum(sum_sales)#211, sum(number_sales#90)#207 AS sum(number_sales)#212] - -(195) Union - -(196) HashAggregate [codegen id : 484] -Input [6]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sum_sales#89, number_sales#90] -Keys [6]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sum_sales#89, number_sales#90] -Functions: [] -Aggregate Attributes: [] -Results [6]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sum_sales#89, number_sales#90] - -(197) Exchange -Input [6]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sum_sales#89, number_sales#90] -Arguments: hashpartitioning(channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sum_sales#89, number_sales#90, 5), true, [id=#213] - -(198) HashAggregate [codegen id : 485] -Input [6]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sum_sales#89, number_sales#90] -Keys [6]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sum_sales#89, number_sales#90] -Functions: [] -Aggregate Attributes: [] -Results [6]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sum_sales#89, number_sales#90] +Aggregate Attributes [2]: [sum(sum_sales#89)#210, sum(number_sales#90)#211] +Results [6]: [channel#47, null AS i_brand_id#212, null AS i_class_id#213, null AS i_category_id#214, sum(sum_sales#89)#210 AS sum(sum_sales)#215, sum(number_sales#90)#211 AS sum(number_sales)#216] -(199) ReusedExchange [Reuses operator id: 84] -Output [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum#214, isEmpty#215, count#216] +(187) ReusedExchange [Reuses operator id: 84] +Output [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum#217, isEmpty#218, count#219] -(200) HashAggregate [codegen id : 524] -Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum#214, isEmpty#215, count#216] +(188) HashAggregate [codegen id : 518] +Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum#217, isEmpty#218, count#219] Keys [3]: [i_brand_id#7, i_class_id#8, i_category_id#9] Functions [2]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true)), count(1)] -Aggregate Attributes [2]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#217, count(1)#218] -Results [7]: [store AS channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#217 AS sales#43, count(1)#218 AS number_sales#44, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#217 AS sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#219] +Aggregate Attributes [2]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#220, count(1)#221] +Results [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#220 AS sales#42, count(1)#221 AS number_sales#43, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#220 AS sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#222] -(201) Filter [codegen id : 524] -Input [7]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sales#43, number_sales#44, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#219] -Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#219) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#219 as decimal(32,6)) > cast(ReusedSubquery Subquery scalar-subquery#46, [id=#47] as decimal(32,6)))) +(189) Filter [codegen id : 518] +Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sales#42, number_sales#43, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#222] +Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#222) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#222 as decimal(32,6)) > cast(ReusedSubquery Subquery scalar-subquery#45, [id=#46] as decimal(32,6)))) -(202) Project [codegen id : 524] -Output [6]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sales#43, number_sales#44] -Input [7]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sales#43, number_sales#44, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#219] +(190) Project [codegen id : 518] +Output [6]: [store AS channel#47, i_brand_id#7, i_class_id#8, i_category_id#9, sales#42, number_sales#43] +Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sales#42, number_sales#43, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#222] -(203) ReusedExchange [Reuses operator id: 103] -Output [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum#220, isEmpty#221, count#222] +(191) ReusedExchange [Reuses operator id: 103] +Output [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum#223, isEmpty#224, count#225] -(204) HashAggregate [codegen id : 563] -Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum#220, isEmpty#221, count#222] +(192) HashAggregate [codegen id : 557] +Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum#223, isEmpty#224, count#225] Keys [3]: [i_brand_id#7, i_class_id#8, i_category_id#9] Functions [2]: [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true)), count(1)] -Aggregate Attributes [2]: [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#223, count(1)#224] -Results [7]: [catalog AS channel#60, i_brand_id#7, i_class_id#8, i_category_id#9, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#223 AS sales#61, count(1)#224 AS number_sales#62, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#223 AS sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#225] +Aggregate Attributes [2]: [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#226, count(1)#227] +Results [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#226 AS sales#60, count(1)#227 AS number_sales#61, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#226 AS sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#228] -(205) Filter [codegen id : 563] -Input [7]: [channel#60, i_brand_id#7, i_class_id#8, i_category_id#9, sales#61, number_sales#62, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#225] -Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#225) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#225 as decimal(32,6)) > cast(ReusedSubquery Subquery scalar-subquery#46, [id=#47] as decimal(32,6)))) +(193) Filter [codegen id : 557] +Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sales#60, number_sales#61, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#228] +Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#228) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#228 as decimal(32,6)) > cast(ReusedSubquery Subquery scalar-subquery#45, [id=#46] as decimal(32,6)))) -(206) Project [codegen id : 563] -Output [6]: [channel#60, i_brand_id#7, i_class_id#8, i_category_id#9, sales#61, number_sales#62] -Input [7]: [channel#60, i_brand_id#7, i_class_id#8, i_category_id#9, sales#61, number_sales#62, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#225] +(194) Project [codegen id : 557] +Output [6]: [catalog AS channel#229, i_brand_id#7, i_class_id#8, i_category_id#9, sales#60, number_sales#61] +Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sales#60, number_sales#61, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#48 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#49 as decimal(12,2)))), DecimalType(18,2), true))#228] -(207) ReusedExchange [Reuses operator id: 122] -Output [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum#226, isEmpty#227, count#228] +(195) ReusedExchange [Reuses operator id: 122] +Output [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum#230, isEmpty#231, count#232] -(208) HashAggregate [codegen id : 602] -Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum#226, isEmpty#227, count#228] +(196) HashAggregate [codegen id : 596] +Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum#230, isEmpty#231, count#232] Keys [3]: [i_brand_id#7, i_class_id#8, i_category_id#9] Functions [2]: [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true)), count(1)] -Aggregate Attributes [2]: [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#229, count(1)#230] -Results [7]: [web AS channel#76, i_brand_id#7, i_class_id#8, i_category_id#9, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#229 AS sales#77, count(1)#230 AS number_sales#78, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#229 AS sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#231] - -(209) Filter [codegen id : 602] -Input [7]: [channel#76, i_brand_id#7, i_class_id#8, i_category_id#9, sales#77, number_sales#78, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#231] -Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#231) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#231 as decimal(32,6)) > cast(ReusedSubquery Subquery scalar-subquery#46, [id=#47] as decimal(32,6)))) - -(210) Project [codegen id : 602] -Output [6]: [channel#76, i_brand_id#7, i_class_id#8, i_category_id#9, sales#77, number_sales#78] -Input [7]: [channel#76, i_brand_id#7, i_class_id#8, i_category_id#9, sales#77, number_sales#78, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#231] - -(211) Union - -(212) HashAggregate [codegen id : 603] -Input [6]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sales#43, number_sales#44] -Keys [4]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9] -Functions [2]: [partial_sum(sales#43), partial_sum(number_sales#44)] -Aggregate Attributes [3]: [sum#232, isEmpty#233, sum#234] -Results [7]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sum#235, isEmpty#236, sum#237] - -(213) Exchange -Input [7]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sum#235, isEmpty#236, sum#237] -Arguments: hashpartitioning(channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, 5), true, [id=#238] - -(214) HashAggregate [codegen id : 604] -Input [7]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sum#235, isEmpty#236, sum#237] -Keys [4]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9] -Functions [2]: [sum(sales#43), sum(number_sales#44)] -Aggregate Attributes [2]: [sum(sales#43)#239, sum(number_sales#44)#240] -Results [2]: [sum(sales#43)#239 AS sum_sales#89, sum(number_sales#44)#240 AS number_sales#90] - -(215) HashAggregate [codegen id : 604] +Aggregate Attributes [2]: [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#233, count(1)#234] +Results [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#233 AS sales#76, count(1)#234 AS number_sales#77, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#233 AS sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#235] + +(197) Filter [codegen id : 596] +Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sales#76, number_sales#77, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#235] +Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#235) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#235 as decimal(32,6)) > cast(ReusedSubquery Subquery scalar-subquery#45, [id=#46] as decimal(32,6)))) + +(198) Project [codegen id : 596] +Output [6]: [web AS channel#236, i_brand_id#7, i_class_id#8, i_category_id#9, sales#76, number_sales#77] +Input [6]: [i_brand_id#7, i_class_id#8, i_category_id#9, sales#76, number_sales#77, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#64 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#65 as decimal(12,2)))), DecimalType(18,2), true))#235] + +(199) Union + +(200) HashAggregate [codegen id : 597] +Input [6]: [channel#47, i_brand_id#7, i_class_id#8, i_category_id#9, sales#42, number_sales#43] +Keys [4]: [channel#47, i_brand_id#7, i_class_id#8, i_category_id#9] +Functions [2]: [partial_sum(sales#42), partial_sum(number_sales#43)] +Aggregate Attributes [3]: [sum#237, isEmpty#238, sum#239] +Results [7]: [channel#47, i_brand_id#7, i_class_id#8, i_category_id#9, sum#240, isEmpty#241, sum#242] + +(201) Exchange +Input [7]: [channel#47, i_brand_id#7, i_class_id#8, i_category_id#9, sum#240, isEmpty#241, sum#242] +Arguments: hashpartitioning(channel#47, i_brand_id#7, i_class_id#8, i_category_id#9, 5), ENSURE_REQUIREMENTS, [id=#243] + +(202) HashAggregate [codegen id : 598] +Input [7]: [channel#47, i_brand_id#7, i_class_id#8, i_category_id#9, sum#240, isEmpty#241, sum#242] +Keys [4]: [channel#47, i_brand_id#7, i_class_id#8, i_category_id#9] +Functions [2]: [sum(sales#42), sum(number_sales#43)] +Aggregate Attributes [2]: [sum(sales#42)#244, sum(number_sales#43)#245] +Results [2]: [sum(sales#42)#244 AS sum_sales#89, sum(number_sales#43)#245 AS number_sales#90] + +(203) HashAggregate [codegen id : 598] Input [2]: [sum_sales#89, number_sales#90] Keys: [] Functions [2]: [partial_sum(sum_sales#89), partial_sum(number_sales#90)] -Aggregate Attributes [3]: [sum#241, isEmpty#242, sum#243] -Results [3]: [sum#244, isEmpty#245, sum#246] +Aggregate Attributes [3]: [sum#246, isEmpty#247, sum#248] +Results [3]: [sum#249, isEmpty#250, sum#251] -(216) Exchange -Input [3]: [sum#244, isEmpty#245, sum#246] -Arguments: SinglePartition, true, [id=#247] +(204) Exchange +Input [3]: [sum#249, isEmpty#250, sum#251] +Arguments: SinglePartition, ENSURE_REQUIREMENTS, [id=#252] -(217) HashAggregate [codegen id : 605] -Input [3]: [sum#244, isEmpty#245, sum#246] +(205) HashAggregate [codegen id : 599] +Input [3]: [sum#249, isEmpty#250, sum#251] Keys: [] Functions [2]: [sum(sum_sales#89), sum(number_sales#90)] -Aggregate Attributes [2]: [sum(sum_sales#89)#248, sum(number_sales#90)#249] -Results [6]: [null AS channel#250, null AS i_brand_id#251, null AS i_class_id#252, null AS i_category_id#253, sum(sum_sales#89)#248 AS sum(sum_sales)#254, sum(number_sales#90)#249 AS sum(number_sales)#255] +Aggregate Attributes [2]: [sum(sum_sales#89)#253, sum(number_sales#90)#254] +Results [6]: [null AS channel#255, null AS i_brand_id#256, null AS i_class_id#257, null AS i_category_id#258, sum(sum_sales#89)#253 AS sum(sum_sales)#259, sum(number_sales#90)#254 AS sum(number_sales)#260] -(218) Union +(206) Union -(219) HashAggregate [codegen id : 606] -Input [6]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sum_sales#89, number_sales#90] -Keys [6]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sum_sales#89, number_sales#90] +(207) HashAggregate [codegen id : 600] +Input [6]: [channel#47, i_brand_id#7, i_class_id#8, i_category_id#9, sum_sales#89, number_sales#90] +Keys [6]: [channel#47, i_brand_id#7, i_class_id#8, i_category_id#9, sum_sales#89, number_sales#90] Functions: [] Aggregate Attributes: [] -Results [6]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sum_sales#89, number_sales#90] +Results [6]: [channel#47, i_brand_id#7, i_class_id#8, i_category_id#9, sum_sales#89, number_sales#90] -(220) Exchange -Input [6]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sum_sales#89, number_sales#90] -Arguments: hashpartitioning(channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sum_sales#89, number_sales#90, 5), true, [id=#256] +(208) Exchange +Input [6]: [channel#47, i_brand_id#7, i_class_id#8, i_category_id#9, sum_sales#89, number_sales#90] +Arguments: hashpartitioning(channel#47, i_brand_id#7, i_class_id#8, i_category_id#9, sum_sales#89, number_sales#90, 5), ENSURE_REQUIREMENTS, [id=#261] -(221) HashAggregate [codegen id : 607] -Input [6]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sum_sales#89, number_sales#90] -Keys [6]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sum_sales#89, number_sales#90] +(209) HashAggregate [codegen id : 601] +Input [6]: [channel#47, i_brand_id#7, i_class_id#8, i_category_id#9, sum_sales#89, number_sales#90] +Keys [6]: [channel#47, i_brand_id#7, i_class_id#8, i_category_id#9, sum_sales#89, number_sales#90] Functions: [] Aggregate Attributes: [] -Results [6]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sum_sales#89, number_sales#90] +Results [6]: [channel#47, i_brand_id#7, i_class_id#8, i_category_id#9, sum_sales#89, number_sales#90] -(222) TakeOrderedAndProject -Input [6]: [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sum_sales#89, number_sales#90] -Arguments: 100, [channel#42 ASC NULLS FIRST, i_brand_id#7 ASC NULLS FIRST, i_class_id#8 ASC NULLS FIRST, i_category_id#9 ASC NULLS FIRST], [channel#42, i_brand_id#7, i_class_id#8, i_category_id#9, sum_sales#89, number_sales#90] +(210) TakeOrderedAndProject +Input [6]: [channel#47, i_brand_id#7, i_class_id#8, i_category_id#9, sum_sales#89, number_sales#90] +Arguments: 100, [channel#47 ASC NULLS FIRST, i_brand_id#7 ASC NULLS FIRST, i_class_id#8 ASC NULLS FIRST, i_category_id#9 ASC NULLS FIRST], [channel#47, i_brand_id#7, i_class_id#8, i_category_id#9, sum_sales#89, number_sales#90] ===== Subqueries ===== -Subquery:1 Hosting operator id = 86 Hosting Expression = Subquery scalar-subquery#46, [id=#47] -* HashAggregate (252) -+- Exchange (251) - +- * HashAggregate (250) - +- Union (249) - :- * Project (232) - : +- * BroadcastHashJoin Inner BuildRight (231) - : :- * Filter (225) - : : +- * ColumnarToRow (224) - : : +- Scan parquet default.store_sales (223) - : +- BroadcastExchange (230) - : +- * Project (229) - : +- * Filter (228) - : +- * ColumnarToRow (227) - : +- Scan parquet default.date_dim (226) - :- * Project (242) - : +- * BroadcastHashJoin Inner BuildRight (241) - : :- * Filter (235) - : : +- * ColumnarToRow (234) - : : +- Scan parquet default.catalog_sales (233) - : +- BroadcastExchange (240) - : +- * Project (239) - : +- * Filter (238) - : +- * ColumnarToRow (237) - : +- Scan parquet default.date_dim (236) - +- * Project (248) - +- * BroadcastHashJoin Inner BuildRight (247) - :- * Filter (245) - : +- * ColumnarToRow (244) - : +- Scan parquet default.web_sales (243) - +- ReusedExchange (246) - - -(223) Scan parquet default.store_sales +Subquery:1 Hosting operator id = 86 Hosting Expression = Subquery scalar-subquery#45, [id=#46] +* HashAggregate (240) ++- Exchange (239) + +- * HashAggregate (238) + +- Union (237) + :- * Project (220) + : +- * BroadcastHashJoin Inner BuildRight (219) + : :- * Filter (213) + : : +- * ColumnarToRow (212) + : : +- Scan parquet default.store_sales (211) + : +- BroadcastExchange (218) + : +- * Project (217) + : +- * Filter (216) + : +- * ColumnarToRow (215) + : +- Scan parquet default.date_dim (214) + :- * Project (230) + : +- * BroadcastHashJoin Inner BuildRight (229) + : :- * Filter (223) + : : +- * ColumnarToRow (222) + : : +- Scan parquet default.catalog_sales (221) + : +- BroadcastExchange (228) + : +- * Project (227) + : +- * Filter (226) + : +- * ColumnarToRow (225) + : +- Scan parquet default.date_dim (224) + +- * Project (236) + +- * BroadcastHashJoin Inner BuildRight (235) + :- * Filter (233) + : +- * ColumnarToRow (232) + : +- Scan parquet default.web_sales (231) + +- ReusedExchange (234) + + +(211) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#1, ss_quantity#3, ss_list_price#4] Batched: true Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk)] ReadSchema: struct -(224) ColumnarToRow [codegen id : 2] +(212) ColumnarToRow [codegen id : 2] Input [3]: [ss_sold_date_sk#1, ss_quantity#3, ss_list_price#4] -(225) Filter [codegen id : 2] +(213) Filter [codegen id : 2] Input [3]: [ss_sold_date_sk#1, ss_quantity#3, ss_list_price#4] Condition : isnotnull(ss_sold_date_sk#1) -(226) Scan parquet default.date_dim +(214) Scan parquet default.date_dim Output [2]: [d_date_sk#10, d_year#11] Batched: true Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), GreaterThanOrEqual(d_year,1999), LessThanOrEqual(d_year,2001), IsNotNull(d_date_sk)] ReadSchema: struct -(227) ColumnarToRow [codegen id : 1] +(215) ColumnarToRow [codegen id : 1] Input [2]: [d_date_sk#10, d_year#11] -(228) Filter [codegen id : 1] +(216) Filter [codegen id : 1] Input [2]: [d_date_sk#10, d_year#11] Condition : (((isnotnull(d_year#11) AND (d_year#11 >= 1999)) AND (d_year#11 <= 2001)) AND isnotnull(d_date_sk#10)) -(229) Project [codegen id : 1] +(217) Project [codegen id : 1] Output [1]: [d_date_sk#10] Input [2]: [d_date_sk#10, d_year#11] -(230) BroadcastExchange +(218) BroadcastExchange Input [1]: [d_date_sk#10] -Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#257] +Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#262] -(231) BroadcastHashJoin [codegen id : 2] +(219) BroadcastHashJoin [codegen id : 2] Left keys [1]: [ss_sold_date_sk#1] Right keys [1]: [d_date_sk#10] Join condition: None -(232) Project [codegen id : 2] -Output [2]: [ss_quantity#3 AS quantity#258, ss_list_price#4 AS list_price#259] +(220) Project [codegen id : 2] +Output [2]: [ss_quantity#3 AS quantity#263, ss_list_price#4 AS list_price#264] Input [4]: [ss_sold_date_sk#1, ss_quantity#3, ss_list_price#4, d_date_sk#10] -(233) Scan parquet default.catalog_sales +(221) Scan parquet default.catalog_sales Output [3]: [cs_sold_date_sk#18, cs_quantity#48, cs_list_price#49] Batched: true Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_sold_date_sk)] ReadSchema: struct -(234) ColumnarToRow [codegen id : 4] +(222) ColumnarToRow [codegen id : 4] Input [3]: [cs_sold_date_sk#18, cs_quantity#48, cs_list_price#49] -(235) Filter [codegen id : 4] +(223) Filter [codegen id : 4] Input [3]: [cs_sold_date_sk#18, cs_quantity#48, cs_list_price#49] Condition : isnotnull(cs_sold_date_sk#18) -(236) Scan parquet default.date_dim +(224) Scan parquet default.date_dim Output [2]: [d_date_sk#10, d_year#11] Batched: true Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), GreaterThanOrEqual(d_year,1998), LessThanOrEqual(d_year,2000), IsNotNull(d_date_sk)] ReadSchema: struct -(237) ColumnarToRow [codegen id : 3] +(225) ColumnarToRow [codegen id : 3] Input [2]: [d_date_sk#10, d_year#11] -(238) Filter [codegen id : 3] +(226) Filter [codegen id : 3] Input [2]: [d_date_sk#10, d_year#11] Condition : (((isnotnull(d_year#11) AND (d_year#11 >= 1998)) AND (d_year#11 <= 2000)) AND isnotnull(d_date_sk#10)) -(239) Project [codegen id : 3] +(227) Project [codegen id : 3] Output [1]: [d_date_sk#10] Input [2]: [d_date_sk#10, d_year#11] -(240) BroadcastExchange +(228) BroadcastExchange Input [1]: [d_date_sk#10] -Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#260] +Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#265] -(241) BroadcastHashJoin [codegen id : 4] +(229) BroadcastHashJoin [codegen id : 4] Left keys [1]: [cs_sold_date_sk#18] Right keys [1]: [d_date_sk#10] Join condition: None -(242) Project [codegen id : 4] -Output [2]: [cs_quantity#48 AS quantity#261, cs_list_price#49 AS list_price#262] +(230) Project [codegen id : 4] +Output [2]: [cs_quantity#48 AS quantity#266, cs_list_price#49 AS list_price#267] Input [4]: [cs_sold_date_sk#18, cs_quantity#48, cs_list_price#49, d_date_sk#10] -(243) Scan parquet default.web_sales +(231) Scan parquet default.web_sales Output [3]: [ws_sold_date_sk#22, ws_quantity#64, ws_list_price#65] Batched: true Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_sold_date_sk)] ReadSchema: struct -(244) ColumnarToRow [codegen id : 6] +(232) ColumnarToRow [codegen id : 6] Input [3]: [ws_sold_date_sk#22, ws_quantity#64, ws_list_price#65] -(245) Filter [codegen id : 6] +(233) Filter [codegen id : 6] Input [3]: [ws_sold_date_sk#22, ws_quantity#64, ws_list_price#65] Condition : isnotnull(ws_sold_date_sk#22) -(246) ReusedExchange [Reuses operator id: 240] +(234) ReusedExchange [Reuses operator id: 228] Output [1]: [d_date_sk#10] -(247) BroadcastHashJoin [codegen id : 6] +(235) BroadcastHashJoin [codegen id : 6] Left keys [1]: [ws_sold_date_sk#22] Right keys [1]: [d_date_sk#10] Join condition: None -(248) Project [codegen id : 6] -Output [2]: [ws_quantity#64 AS quantity#263, ws_list_price#65 AS list_price#264] +(236) Project [codegen id : 6] +Output [2]: [ws_quantity#64 AS quantity#268, ws_list_price#65 AS list_price#269] Input [4]: [ws_sold_date_sk#22, ws_quantity#64, ws_list_price#65, d_date_sk#10] -(249) Union +(237) Union -(250) HashAggregate [codegen id : 7] -Input [2]: [quantity#258, list_price#259] +(238) HashAggregate [codegen id : 7] +Input [2]: [quantity#263, list_price#264] Keys: [] -Functions [1]: [partial_avg(CheckOverflow((promote_precision(cast(cast(quantity#258 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(list_price#259 as decimal(12,2)))), DecimalType(18,2), true))] -Aggregate Attributes [2]: [sum#265, count#266] -Results [2]: [sum#267, count#268] +Functions [1]: [partial_avg(CheckOverflow((promote_precision(cast(cast(quantity#263 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(list_price#264 as decimal(12,2)))), DecimalType(18,2), true))] +Aggregate Attributes [2]: [sum#270, count#271] +Results [2]: [sum#272, count#273] -(251) Exchange -Input [2]: [sum#267, count#268] -Arguments: SinglePartition, true, [id=#269] +(239) Exchange +Input [2]: [sum#272, count#273] +Arguments: SinglePartition, ENSURE_REQUIREMENTS, [id=#274] -(252) HashAggregate [codegen id : 8] -Input [2]: [sum#267, count#268] +(240) HashAggregate [codegen id : 8] +Input [2]: [sum#272, count#273] Keys: [] -Functions [1]: [avg(CheckOverflow((promote_precision(cast(cast(quantity#258 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(list_price#259 as decimal(12,2)))), DecimalType(18,2), true))] -Aggregate Attributes [1]: [avg(CheckOverflow((promote_precision(cast(cast(quantity#258 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(list_price#259 as decimal(12,2)))), DecimalType(18,2), true))#270] -Results [1]: [avg(CheckOverflow((promote_precision(cast(cast(quantity#258 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(list_price#259 as decimal(12,2)))), DecimalType(18,2), true))#270 AS average_sales#271] +Functions [1]: [avg(CheckOverflow((promote_precision(cast(cast(quantity#263 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(list_price#264 as decimal(12,2)))), DecimalType(18,2), true))] +Aggregate Attributes [1]: [avg(CheckOverflow((promote_precision(cast(cast(quantity#263 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(list_price#264 as decimal(12,2)))), DecimalType(18,2), true))#275] +Results [1]: [avg(CheckOverflow((promote_precision(cast(cast(quantity#263 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(list_price#264 as decimal(12,2)))), DecimalType(18,2), true))#275 AS average_sales#276] -Subquery:2 Hosting operator id = 105 Hosting Expression = ReusedSubquery Subquery scalar-subquery#46, [id=#47] +Subquery:2 Hosting operator id = 105 Hosting Expression = ReusedSubquery Subquery scalar-subquery#45, [id=#46] -Subquery:3 Hosting operator id = 124 Hosting Expression = ReusedSubquery Subquery scalar-subquery#46, [id=#47] +Subquery:3 Hosting operator id = 124 Hosting Expression = ReusedSubquery Subquery scalar-subquery#45, [id=#46] -Subquery:4 Hosting operator id = 132 Hosting Expression = ReusedSubquery Subquery scalar-subquery#46, [id=#47] +Subquery:4 Hosting operator id = 132 Hosting Expression = ReusedSubquery Subquery scalar-subquery#45, [id=#46] -Subquery:5 Hosting operator id = 136 Hosting Expression = ReusedSubquery Subquery scalar-subquery#46, [id=#47] +Subquery:5 Hosting operator id = 136 Hosting Expression = ReusedSubquery Subquery scalar-subquery#45, [id=#46] -Subquery:6 Hosting operator id = 140 Hosting Expression = ReusedSubquery Subquery scalar-subquery#46, [id=#47] +Subquery:6 Hosting operator id = 140 Hosting Expression = ReusedSubquery Subquery scalar-subquery#45, [id=#46] -Subquery:7 Hosting operator id = 155 Hosting Expression = ReusedSubquery Subquery scalar-subquery#46, [id=#47] +Subquery:7 Hosting operator id = 151 Hosting Expression = ReusedSubquery Subquery scalar-subquery#45, [id=#46] -Subquery:8 Hosting operator id = 159 Hosting Expression = ReusedSubquery Subquery scalar-subquery#46, [id=#47] +Subquery:8 Hosting operator id = 155 Hosting Expression = ReusedSubquery Subquery scalar-subquery#45, [id=#46] -Subquery:9 Hosting operator id = 163 Hosting Expression = ReusedSubquery Subquery scalar-subquery#46, [id=#47] +Subquery:9 Hosting operator id = 159 Hosting Expression = ReusedSubquery Subquery scalar-subquery#45, [id=#46] -Subquery:10 Hosting operator id = 178 Hosting Expression = ReusedSubquery Subquery scalar-subquery#46, [id=#47] +Subquery:10 Hosting operator id = 170 Hosting Expression = ReusedSubquery Subquery scalar-subquery#45, [id=#46] -Subquery:11 Hosting operator id = 182 Hosting Expression = ReusedSubquery Subquery scalar-subquery#46, [id=#47] +Subquery:11 Hosting operator id = 174 Hosting Expression = ReusedSubquery Subquery scalar-subquery#45, [id=#46] -Subquery:12 Hosting operator id = 186 Hosting Expression = ReusedSubquery Subquery scalar-subquery#46, [id=#47] +Subquery:12 Hosting operator id = 178 Hosting Expression = ReusedSubquery Subquery scalar-subquery#45, [id=#46] -Subquery:13 Hosting operator id = 201 Hosting Expression = ReusedSubquery Subquery scalar-subquery#46, [id=#47] +Subquery:13 Hosting operator id = 189 Hosting Expression = ReusedSubquery Subquery scalar-subquery#45, [id=#46] -Subquery:14 Hosting operator id = 205 Hosting Expression = ReusedSubquery Subquery scalar-subquery#46, [id=#47] +Subquery:14 Hosting operator id = 193 Hosting Expression = ReusedSubquery Subquery scalar-subquery#45, [id=#46] -Subquery:15 Hosting operator id = 209 Hosting Expression = ReusedSubquery Subquery scalar-subquery#46, [id=#47] +Subquery:15 Hosting operator id = 197 Hosting Expression = ReusedSubquery Subquery scalar-subquery#45, [id=#46] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q14a.sf100/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q14a.sf100/simplified.txt index e4a9b46cf741d..c63f1b8a75643 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q14a.sf100/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q14a.sf100/simplified.txt @@ -1,427 +1,403 @@ TakeOrderedAndProject [channel,i_brand_id,i_class_id,i_category_id,sum_sales,number_sales] - WholeStageCodegen (607) + WholeStageCodegen (601) HashAggregate [channel,i_brand_id,i_class_id,i_category_id,sum_sales,number_sales] InputAdapter Exchange [channel,i_brand_id,i_class_id,i_category_id,sum_sales,number_sales] #1 - WholeStageCodegen (606) + WholeStageCodegen (600) HashAggregate [channel,i_brand_id,i_class_id,i_category_id,sum_sales,number_sales] InputAdapter Union - WholeStageCodegen (485) - HashAggregate [channel,i_brand_id,i_class_id,i_category_id,sum_sales,number_sales] + WholeStageCodegen (119) + HashAggregate [channel,i_brand_id,i_class_id,i_category_id,sum,isEmpty,sum] [sum(sales),sum(number_salesL),sum_sales,number_sales,sum,isEmpty,sum] InputAdapter - Exchange [channel,i_brand_id,i_class_id,i_category_id,sum_sales,number_sales] #2 - WholeStageCodegen (484) - HashAggregate [channel,i_brand_id,i_class_id,i_category_id,sum_sales,number_sales] + Exchange [channel,i_brand_id,i_class_id,i_category_id] #2 + WholeStageCodegen (118) + HashAggregate [channel,i_brand_id,i_class_id,i_category_id,sales,number_sales] [sum,isEmpty,sum,sum,isEmpty,sum] InputAdapter Union - WholeStageCodegen (363) - HashAggregate [channel,i_brand_id,i_class_id,i_category_id,sum_sales,number_sales] - InputAdapter - Exchange [channel,i_brand_id,i_class_id,i_category_id,sum_sales,number_sales] #3 - WholeStageCodegen (362) - HashAggregate [channel,i_brand_id,i_class_id,i_category_id,sum_sales,number_sales] + WholeStageCodegen (39) + Project [i_brand_id,i_class_id,i_category_id,sales,number_sales] + Filter [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true))] + Subquery #1 + WholeStageCodegen (8) + HashAggregate [sum,count] [avg(CheckOverflow((promote_precision(cast(cast(quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(list_price as decimal(12,2)))), DecimalType(18,2), true)),average_sales,sum,count] InputAdapter - Union - WholeStageCodegen (241) - HashAggregate [channel,i_brand_id,i_class_id,i_category_id,sum_sales,number_sales] + Exchange #17 + WholeStageCodegen (7) + HashAggregate [quantity,list_price] [sum,count,sum,count] InputAdapter - Exchange [channel,i_brand_id,i_class_id,i_category_id,sum_sales,number_sales] #4 - WholeStageCodegen (240) - HashAggregate [channel,i_brand_id,i_class_id,i_category_id,sum_sales,number_sales] - InputAdapter - Union - WholeStageCodegen (119) - HashAggregate [channel,i_brand_id,i_class_id,i_category_id,sum,isEmpty,sum] [sum(sales),sum(number_salesL),sum_sales,number_sales,sum,isEmpty,sum] - InputAdapter - Exchange [channel,i_brand_id,i_class_id,i_category_id] #5 - WholeStageCodegen (118) - HashAggregate [channel,i_brand_id,i_class_id,i_category_id,sales,number_sales] [sum,isEmpty,sum,sum,isEmpty,sum] - InputAdapter - Union - WholeStageCodegen (39) - Project [channel,i_brand_id,i_class_id,i_category_id,sales,number_sales] - Filter [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true))] - Subquery #1 - WholeStageCodegen (8) - HashAggregate [sum,count] [avg(CheckOverflow((promote_precision(cast(cast(quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(list_price as decimal(12,2)))), DecimalType(18,2), true)),average_sales,sum,count] - InputAdapter - Exchange #20 - WholeStageCodegen (7) - HashAggregate [quantity,list_price] [sum,count,sum,count] - InputAdapter - Union - WholeStageCodegen (2) - Project [ss_quantity,ss_list_price] - BroadcastHashJoin [ss_sold_date_sk,d_date_sk] - Filter [ss_sold_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.store_sales [ss_sold_date_sk,ss_quantity,ss_list_price] - InputAdapter - BroadcastExchange #21 - WholeStageCodegen (1) - Project [d_date_sk] - Filter [d_year,d_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.date_dim [d_date_sk,d_year] - WholeStageCodegen (4) - Project [cs_quantity,cs_list_price] - BroadcastHashJoin [cs_sold_date_sk,d_date_sk] - Filter [cs_sold_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.catalog_sales [cs_sold_date_sk,cs_quantity,cs_list_price] - InputAdapter - BroadcastExchange #22 - WholeStageCodegen (3) - Project [d_date_sk] - Filter [d_year,d_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.date_dim [d_date_sk,d_year] - WholeStageCodegen (6) - Project [ws_quantity,ws_list_price] - BroadcastHashJoin [ws_sold_date_sk,d_date_sk] - Filter [ws_sold_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.web_sales [ws_sold_date_sk,ws_quantity,ws_list_price] - InputAdapter - ReusedExchange [d_date_sk] #22 - HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),channel,sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] - InputAdapter - Exchange [i_brand_id,i_class_id,i_category_id] #6 - WholeStageCodegen (38) - HashAggregate [i_brand_id,i_class_id,i_category_id,ss_quantity,ss_list_price] [sum,isEmpty,count,sum,isEmpty,count] - Project [ss_quantity,ss_list_price,i_brand_id,i_class_id,i_category_id] - BroadcastHashJoin [ss_item_sk,i_item_sk] - Project [ss_item_sk,ss_quantity,ss_list_price] - BroadcastHashJoin [ss_sold_date_sk,d_date_sk] + Union + WholeStageCodegen (2) + Project [ss_quantity,ss_list_price] + BroadcastHashJoin [ss_sold_date_sk,d_date_sk] + Filter [ss_sold_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.store_sales [ss_sold_date_sk,ss_quantity,ss_list_price] + InputAdapter + BroadcastExchange #18 + WholeStageCodegen (1) + Project [d_date_sk] + Filter [d_year,d_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.date_dim [d_date_sk,d_year] + WholeStageCodegen (4) + Project [cs_quantity,cs_list_price] + BroadcastHashJoin [cs_sold_date_sk,d_date_sk] + Filter [cs_sold_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.catalog_sales [cs_sold_date_sk,cs_quantity,cs_list_price] + InputAdapter + BroadcastExchange #19 + WholeStageCodegen (3) + Project [d_date_sk] + Filter [d_year,d_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.date_dim [d_date_sk,d_year] + WholeStageCodegen (6) + Project [ws_quantity,ws_list_price] + BroadcastHashJoin [ws_sold_date_sk,d_date_sk] + Filter [ws_sold_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.web_sales [ws_sold_date_sk,ws_quantity,ws_list_price] + InputAdapter + ReusedExchange [d_date_sk] #19 + HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] + InputAdapter + Exchange [i_brand_id,i_class_id,i_category_id] #3 + WholeStageCodegen (38) + HashAggregate [i_brand_id,i_class_id,i_category_id,ss_quantity,ss_list_price] [sum,isEmpty,count,sum,isEmpty,count] + Project [ss_quantity,ss_list_price,i_brand_id,i_class_id,i_category_id] + BroadcastHashJoin [ss_item_sk,i_item_sk] + Project [ss_item_sk,ss_quantity,ss_list_price] + BroadcastHashJoin [ss_sold_date_sk,d_date_sk] + InputAdapter + SortMergeJoin [ss_item_sk,ss_item_sk] + WholeStageCodegen (2) + Sort [ss_item_sk] + InputAdapter + Exchange [ss_item_sk] #4 + WholeStageCodegen (1) + Filter [ss_item_sk,ss_sold_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.store_sales [ss_sold_date_sk,ss_item_sk,ss_quantity,ss_list_price] + WholeStageCodegen (18) + Sort [ss_item_sk] + InputAdapter + Exchange [ss_item_sk] #5 + WholeStageCodegen (17) + Project [i_item_sk] + BroadcastHashJoin [i_brand_id,i_class_id,i_category_id,brand_id,class_id,category_id] + Filter [i_brand_id,i_class_id,i_category_id] + ColumnarToRow + InputAdapter + Scan parquet default.item [i_item_sk,i_brand_id,i_class_id,i_category_id] + InputAdapter + BroadcastExchange #6 + WholeStageCodegen (16) + HashAggregate [brand_id,class_id,category_id] + HashAggregate [brand_id,class_id,category_id] + HashAggregate [brand_id,class_id,category_id] + InputAdapter + Exchange [brand_id,class_id,category_id] #7 + WholeStageCodegen (15) + HashAggregate [brand_id,class_id,category_id] + InputAdapter + SortMergeJoin [brand_id,class_id,category_id,i_brand_id,i_class_id,i_category_id] + SortMergeJoin [brand_id,class_id,category_id,i_brand_id,i_class_id,i_category_id] + WholeStageCodegen (6) + Sort [brand_id,class_id,category_id] InputAdapter - SortMergeJoin [ss_item_sk,ss_item_sk] - WholeStageCodegen (2) - Sort [ss_item_sk] - InputAdapter - Exchange [ss_item_sk] #7 - WholeStageCodegen (1) + Exchange [brand_id,class_id,category_id] #8 + WholeStageCodegen (5) + Project [i_brand_id,i_class_id,i_category_id] + BroadcastHashJoin [ss_item_sk,i_item_sk] + Project [ss_item_sk] + BroadcastHashJoin [ss_sold_date_sk,d_date_sk] Filter [ss_item_sk,ss_sold_date_sk] ColumnarToRow InputAdapter - Scan parquet default.store_sales [ss_sold_date_sk,ss_item_sk,ss_quantity,ss_list_price] - WholeStageCodegen (18) - Sort [ss_item_sk] - InputAdapter - Exchange [ss_item_sk] #8 - WholeStageCodegen (17) - Project [i_item_sk] - BroadcastHashJoin [i_brand_id,i_class_id,i_category_id,brand_id,class_id,category_id] - Filter [i_brand_id,i_class_id,i_category_id] - ColumnarToRow - InputAdapter - Scan parquet default.item [i_item_sk,i_brand_id,i_class_id,i_category_id] - InputAdapter - BroadcastExchange #9 - WholeStageCodegen (16) - HashAggregate [brand_id,class_id,category_id] - HashAggregate [brand_id,class_id,category_id] - HashAggregate [brand_id,class_id,category_id] - InputAdapter - Exchange [brand_id,class_id,category_id] #10 - WholeStageCodegen (15) - HashAggregate [brand_id,class_id,category_id] - InputAdapter - SortMergeJoin [brand_id,class_id,category_id,i_brand_id,i_class_id,i_category_id] - SortMergeJoin [brand_id,class_id,category_id,i_brand_id,i_class_id,i_category_id] - WholeStageCodegen (6) - Sort [brand_id,class_id,category_id] - InputAdapter - Exchange [brand_id,class_id,category_id] #11 - WholeStageCodegen (5) - Project [i_brand_id,i_class_id,i_category_id] - BroadcastHashJoin [ss_item_sk,i_item_sk] - Project [ss_item_sk] - BroadcastHashJoin [ss_sold_date_sk,d_date_sk] - Filter [ss_item_sk,ss_sold_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.store_sales [ss_sold_date_sk,ss_item_sk] - InputAdapter - BroadcastExchange #12 - WholeStageCodegen (3) - Project [d_date_sk] - Filter [d_year,d_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.date_dim [d_date_sk,d_year] - InputAdapter - BroadcastExchange #13 - WholeStageCodegen (4) - Filter [i_item_sk,i_brand_id,i_class_id,i_category_id] - ColumnarToRow - InputAdapter - Scan parquet default.item [i_item_sk,i_brand_id,i_class_id,i_category_id] - WholeStageCodegen (10) - Sort [i_brand_id,i_class_id,i_category_id] - InputAdapter - Exchange [i_brand_id,i_class_id,i_category_id] #14 - WholeStageCodegen (9) - Project [i_brand_id,i_class_id,i_category_id] - BroadcastHashJoin [cs_item_sk,i_item_sk] - Project [cs_item_sk] - BroadcastHashJoin [cs_sold_date_sk,d_date_sk] - Filter [cs_item_sk,cs_sold_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.catalog_sales [cs_sold_date_sk,cs_item_sk] - InputAdapter - ReusedExchange [d_date_sk] #12 - InputAdapter - BroadcastExchange #15 - WholeStageCodegen (8) - Filter [i_item_sk] - ColumnarToRow - InputAdapter - Scan parquet default.item [i_item_sk,i_brand_id,i_class_id,i_category_id] - WholeStageCodegen (14) - Sort [i_brand_id,i_class_id,i_category_id] - InputAdapter - Exchange [i_brand_id,i_class_id,i_category_id] #16 - WholeStageCodegen (13) - Project [i_brand_id,i_class_id,i_category_id] - BroadcastHashJoin [ws_item_sk,i_item_sk] - Project [ws_item_sk] - BroadcastHashJoin [ws_sold_date_sk,d_date_sk] - Filter [ws_item_sk,ws_sold_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.web_sales [ws_sold_date_sk,ws_item_sk] - InputAdapter - ReusedExchange [d_date_sk] #12 - InputAdapter - ReusedExchange [i_item_sk,i_brand_id,i_class_id,i_category_id] #15 - InputAdapter - BroadcastExchange #17 - WholeStageCodegen (19) - Project [d_date_sk] - Filter [d_year,d_moy,d_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.date_dim [d_date_sk,d_year,d_moy] - InputAdapter - BroadcastExchange #18 - SortMergeJoin [i_item_sk,ss_item_sk] - WholeStageCodegen (21) - Sort [i_item_sk] - InputAdapter - Exchange [i_item_sk] #19 - WholeStageCodegen (20) - Filter [i_item_sk] - ColumnarToRow - InputAdapter - Scan parquet default.item [i_item_sk,i_brand_id,i_class_id,i_category_id] - WholeStageCodegen (37) - Sort [ss_item_sk] - InputAdapter - ReusedExchange [ss_item_sk] #8 - WholeStageCodegen (78) - Project [channel,i_brand_id,i_class_id,i_category_id,sales,number_sales] - Filter [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price as decimal(12,2)))), DecimalType(18,2), true))] - ReusedSubquery [average_sales] #1 - HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),channel,sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(cs_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] - InputAdapter - Exchange [i_brand_id,i_class_id,i_category_id] #23 - WholeStageCodegen (77) - HashAggregate [i_brand_id,i_class_id,i_category_id,cs_quantity,cs_list_price] [sum,isEmpty,count,sum,isEmpty,count] - Project [cs_quantity,cs_list_price,i_brand_id,i_class_id,i_category_id] - BroadcastHashJoin [cs_item_sk,i_item_sk] - Project [cs_item_sk,cs_quantity,cs_list_price] - BroadcastHashJoin [cs_sold_date_sk,d_date_sk] + Scan parquet default.store_sales [ss_sold_date_sk,ss_item_sk] + InputAdapter + BroadcastExchange #9 + WholeStageCodegen (3) + Project [d_date_sk] + Filter [d_year,d_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.date_dim [d_date_sk,d_year] + InputAdapter + BroadcastExchange #10 + WholeStageCodegen (4) + Filter [i_item_sk,i_brand_id,i_class_id,i_category_id] + ColumnarToRow + InputAdapter + Scan parquet default.item [i_item_sk,i_brand_id,i_class_id,i_category_id] + WholeStageCodegen (10) + Sort [i_brand_id,i_class_id,i_category_id] InputAdapter - SortMergeJoin [cs_item_sk,ss_item_sk] - WholeStageCodegen (41) - Sort [cs_item_sk] - InputAdapter - Exchange [cs_item_sk] #24 - WholeStageCodegen (40) + Exchange [i_brand_id,i_class_id,i_category_id] #11 + WholeStageCodegen (9) + Project [i_brand_id,i_class_id,i_category_id] + BroadcastHashJoin [cs_item_sk,i_item_sk] + Project [cs_item_sk] + BroadcastHashJoin [cs_sold_date_sk,d_date_sk] Filter [cs_item_sk,cs_sold_date_sk] ColumnarToRow InputAdapter - Scan parquet default.catalog_sales [cs_sold_date_sk,cs_item_sk,cs_quantity,cs_list_price] - WholeStageCodegen (57) - Sort [ss_item_sk] - InputAdapter - ReusedExchange [ss_item_sk] #8 - InputAdapter - ReusedExchange [d_date_sk] #17 - InputAdapter - ReusedExchange [i_item_sk,i_brand_id,i_class_id,i_category_id] #18 - WholeStageCodegen (117) - Project [channel,i_brand_id,i_class_id,i_category_id,sales,number_sales] - Filter [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price as decimal(12,2)))), DecimalType(18,2), true))] - ReusedSubquery [average_sales] #1 - HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),channel,sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(ws_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] - InputAdapter - Exchange [i_brand_id,i_class_id,i_category_id] #25 - WholeStageCodegen (116) - HashAggregate [i_brand_id,i_class_id,i_category_id,ws_quantity,ws_list_price] [sum,isEmpty,count,sum,isEmpty,count] - Project [ws_quantity,ws_list_price,i_brand_id,i_class_id,i_category_id] - BroadcastHashJoin [ws_item_sk,i_item_sk] - Project [ws_item_sk,ws_quantity,ws_list_price] - BroadcastHashJoin [ws_sold_date_sk,d_date_sk] - InputAdapter - SortMergeJoin [ws_item_sk,ss_item_sk] - WholeStageCodegen (80) - Sort [ws_item_sk] - InputAdapter - Exchange [ws_item_sk] #26 - WholeStageCodegen (79) - Filter [ws_item_sk,ws_sold_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.web_sales [ws_sold_date_sk,ws_item_sk,ws_quantity,ws_list_price] - WholeStageCodegen (96) - Sort [ss_item_sk] + Scan parquet default.catalog_sales [cs_sold_date_sk,cs_item_sk] + InputAdapter + ReusedExchange [d_date_sk] #9 + InputAdapter + BroadcastExchange #12 + WholeStageCodegen (8) + Filter [i_item_sk] + ColumnarToRow + InputAdapter + Scan parquet default.item [i_item_sk,i_brand_id,i_class_id,i_category_id] + WholeStageCodegen (14) + Sort [i_brand_id,i_class_id,i_category_id] + InputAdapter + Exchange [i_brand_id,i_class_id,i_category_id] #13 + WholeStageCodegen (13) + Project [i_brand_id,i_class_id,i_category_id] + BroadcastHashJoin [ws_item_sk,i_item_sk] + Project [ws_item_sk] + BroadcastHashJoin [ws_sold_date_sk,d_date_sk] + Filter [ws_item_sk,ws_sold_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.web_sales [ws_sold_date_sk,ws_item_sk] + InputAdapter + ReusedExchange [d_date_sk] #9 InputAdapter - ReusedExchange [ss_item_sk] #8 - InputAdapter - ReusedExchange [d_date_sk] #17 - InputAdapter - ReusedExchange [i_item_sk,i_brand_id,i_class_id,i_category_id] #18 - WholeStageCodegen (239) - HashAggregate [channel,i_brand_id,i_class_id,sum,isEmpty,sum] [sum(sum_sales),sum(number_salesL),i_category_id,sum(sum_sales),sum(number_sales),sum,isEmpty,sum] + ReusedExchange [i_item_sk,i_brand_id,i_class_id,i_category_id] #12 + InputAdapter + BroadcastExchange #14 + WholeStageCodegen (19) + Project [d_date_sk] + Filter [d_year,d_moy,d_date_sk] + ColumnarToRow InputAdapter - Exchange [channel,i_brand_id,i_class_id] #27 - WholeStageCodegen (238) - HashAggregate [channel,i_brand_id,i_class_id,sum_sales,number_sales] [sum,isEmpty,sum,sum,isEmpty,sum] - HashAggregate [channel,i_brand_id,i_class_id,i_category_id,sum,isEmpty,sum] [sum(sales),sum(number_salesL),sum_sales,number_sales,sum,isEmpty,sum] - InputAdapter - Exchange [channel,i_brand_id,i_class_id,i_category_id] #28 - WholeStageCodegen (237) - HashAggregate [channel,i_brand_id,i_class_id,i_category_id,sales,number_sales] [sum,isEmpty,sum,sum,isEmpty,sum] - InputAdapter - Union - WholeStageCodegen (158) - Project [channel,i_brand_id,i_class_id,i_category_id,sales,number_sales] - Filter [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true))] - ReusedSubquery [average_sales] #1 - HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),channel,sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] - InputAdapter - ReusedExchange [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] #6 - WholeStageCodegen (197) - Project [channel,i_brand_id,i_class_id,i_category_id,sales,number_sales] - Filter [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price as decimal(12,2)))), DecimalType(18,2), true))] - ReusedSubquery [average_sales] #1 - HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),channel,sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(cs_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] - InputAdapter - ReusedExchange [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] #23 - WholeStageCodegen (236) - Project [channel,i_brand_id,i_class_id,i_category_id,sales,number_sales] - Filter [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price as decimal(12,2)))), DecimalType(18,2), true))] - ReusedSubquery [average_sales] #1 - HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),channel,sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(ws_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] - InputAdapter - ReusedExchange [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] #25 - WholeStageCodegen (361) - HashAggregate [channel,i_brand_id,sum,isEmpty,sum] [sum(sum_sales),sum(number_salesL),i_class_id,i_category_id,sum(sum_sales),sum(number_sales),sum,isEmpty,sum] + Scan parquet default.date_dim [d_date_sk,d_year,d_moy] InputAdapter - Exchange [channel,i_brand_id] #29 - WholeStageCodegen (360) - HashAggregate [channel,i_brand_id,sum_sales,number_sales] [sum,isEmpty,sum,sum,isEmpty,sum] - HashAggregate [channel,i_brand_id,i_class_id,i_category_id,sum,isEmpty,sum] [sum(sales),sum(number_salesL),sum_sales,number_sales,sum,isEmpty,sum] + BroadcastExchange #15 + SortMergeJoin [i_item_sk,ss_item_sk] + WholeStageCodegen (21) + Sort [i_item_sk] InputAdapter - Exchange [channel,i_brand_id,i_class_id,i_category_id] #30 - WholeStageCodegen (359) - HashAggregate [channel,i_brand_id,i_class_id,i_category_id,sales,number_sales] [sum,isEmpty,sum,sum,isEmpty,sum] - InputAdapter - Union - WholeStageCodegen (280) - Project [channel,i_brand_id,i_class_id,i_category_id,sales,number_sales] - Filter [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true))] - ReusedSubquery [average_sales] #1 - HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),channel,sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] - InputAdapter - ReusedExchange [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] #6 - WholeStageCodegen (319) - Project [channel,i_brand_id,i_class_id,i_category_id,sales,number_sales] - Filter [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price as decimal(12,2)))), DecimalType(18,2), true))] - ReusedSubquery [average_sales] #1 - HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),channel,sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(cs_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] - InputAdapter - ReusedExchange [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] #23 - WholeStageCodegen (358) - Project [channel,i_brand_id,i_class_id,i_category_id,sales,number_sales] - Filter [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price as decimal(12,2)))), DecimalType(18,2), true))] - ReusedSubquery [average_sales] #1 - HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),channel,sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(ws_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] - InputAdapter - ReusedExchange [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] #25 - WholeStageCodegen (483) - HashAggregate [channel,sum,isEmpty,sum] [sum(sum_sales),sum(number_salesL),i_brand_id,i_class_id,i_category_id,sum(sum_sales),sum(number_sales),sum,isEmpty,sum] - InputAdapter - Exchange [channel] #31 - WholeStageCodegen (482) - HashAggregate [channel,sum_sales,number_sales] [sum,isEmpty,sum,sum,isEmpty,sum] - HashAggregate [channel,i_brand_id,i_class_id,i_category_id,sum,isEmpty,sum] [sum(sales),sum(number_salesL),sum_sales,number_sales,sum,isEmpty,sum] - InputAdapter - Exchange [channel,i_brand_id,i_class_id,i_category_id] #32 - WholeStageCodegen (481) - HashAggregate [channel,i_brand_id,i_class_id,i_category_id,sales,number_sales] [sum,isEmpty,sum,sum,isEmpty,sum] - InputAdapter - Union - WholeStageCodegen (402) - Project [channel,i_brand_id,i_class_id,i_category_id,sales,number_sales] - Filter [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true))] - ReusedSubquery [average_sales] #1 - HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),channel,sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] - InputAdapter - ReusedExchange [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] #6 - WholeStageCodegen (441) - Project [channel,i_brand_id,i_class_id,i_category_id,sales,number_sales] - Filter [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price as decimal(12,2)))), DecimalType(18,2), true))] - ReusedSubquery [average_sales] #1 - HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),channel,sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(cs_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] - InputAdapter - ReusedExchange [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] #23 - WholeStageCodegen (480) - Project [channel,i_brand_id,i_class_id,i_category_id,sales,number_sales] - Filter [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price as decimal(12,2)))), DecimalType(18,2), true))] - ReusedSubquery [average_sales] #1 - HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),channel,sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(ws_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] - InputAdapter - ReusedExchange [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] #25 - WholeStageCodegen (605) + Exchange [i_item_sk] #16 + WholeStageCodegen (20) + Filter [i_item_sk] + ColumnarToRow + InputAdapter + Scan parquet default.item [i_item_sk,i_brand_id,i_class_id,i_category_id] + WholeStageCodegen (37) + Sort [ss_item_sk] + InputAdapter + ReusedExchange [ss_item_sk] #5 + WholeStageCodegen (78) + Project [i_brand_id,i_class_id,i_category_id,sales,number_sales] + Filter [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price as decimal(12,2)))), DecimalType(18,2), true))] + ReusedSubquery [average_sales] #1 + HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(cs_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] + InputAdapter + Exchange [i_brand_id,i_class_id,i_category_id] #20 + WholeStageCodegen (77) + HashAggregate [i_brand_id,i_class_id,i_category_id,cs_quantity,cs_list_price] [sum,isEmpty,count,sum,isEmpty,count] + Project [cs_quantity,cs_list_price,i_brand_id,i_class_id,i_category_id] + BroadcastHashJoin [cs_item_sk,i_item_sk] + Project [cs_item_sk,cs_quantity,cs_list_price] + BroadcastHashJoin [cs_sold_date_sk,d_date_sk] + InputAdapter + SortMergeJoin [cs_item_sk,ss_item_sk] + WholeStageCodegen (41) + Sort [cs_item_sk] + InputAdapter + Exchange [cs_item_sk] #21 + WholeStageCodegen (40) + Filter [cs_item_sk,cs_sold_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.catalog_sales [cs_sold_date_sk,cs_item_sk,cs_quantity,cs_list_price] + WholeStageCodegen (57) + Sort [ss_item_sk] + InputAdapter + ReusedExchange [ss_item_sk] #5 + InputAdapter + ReusedExchange [d_date_sk] #14 + InputAdapter + ReusedExchange [i_item_sk,i_brand_id,i_class_id,i_category_id] #15 + WholeStageCodegen (117) + Project [i_brand_id,i_class_id,i_category_id,sales,number_sales] + Filter [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price as decimal(12,2)))), DecimalType(18,2), true))] + ReusedSubquery [average_sales] #1 + HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(ws_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] + InputAdapter + Exchange [i_brand_id,i_class_id,i_category_id] #22 + WholeStageCodegen (116) + HashAggregate [i_brand_id,i_class_id,i_category_id,ws_quantity,ws_list_price] [sum,isEmpty,count,sum,isEmpty,count] + Project [ws_quantity,ws_list_price,i_brand_id,i_class_id,i_category_id] + BroadcastHashJoin [ws_item_sk,i_item_sk] + Project [ws_item_sk,ws_quantity,ws_list_price] + BroadcastHashJoin [ws_sold_date_sk,d_date_sk] + InputAdapter + SortMergeJoin [ws_item_sk,ss_item_sk] + WholeStageCodegen (80) + Sort [ws_item_sk] + InputAdapter + Exchange [ws_item_sk] #23 + WholeStageCodegen (79) + Filter [ws_item_sk,ws_sold_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.web_sales [ws_sold_date_sk,ws_item_sk,ws_quantity,ws_list_price] + WholeStageCodegen (96) + Sort [ss_item_sk] + InputAdapter + ReusedExchange [ss_item_sk] #5 + InputAdapter + ReusedExchange [d_date_sk] #14 + InputAdapter + ReusedExchange [i_item_sk,i_brand_id,i_class_id,i_category_id] #15 + WholeStageCodegen (239) + HashAggregate [channel,i_brand_id,i_class_id,sum,isEmpty,sum] [sum(sum_sales),sum(number_salesL),i_category_id,sum(sum_sales),sum(number_sales),sum,isEmpty,sum] + InputAdapter + Exchange [channel,i_brand_id,i_class_id] #24 + WholeStageCodegen (238) + HashAggregate [channel,i_brand_id,i_class_id,sum_sales,number_sales] [sum,isEmpty,sum,sum,isEmpty,sum] + HashAggregate [channel,i_brand_id,i_class_id,i_category_id,sum,isEmpty,sum] [sum(sales),sum(number_salesL),sum_sales,number_sales,sum,isEmpty,sum] + InputAdapter + Exchange [channel,i_brand_id,i_class_id,i_category_id] #25 + WholeStageCodegen (237) + HashAggregate [channel,i_brand_id,i_class_id,i_category_id,sales,number_sales] [sum,isEmpty,sum,sum,isEmpty,sum] + InputAdapter + Union + WholeStageCodegen (158) + Project [i_brand_id,i_class_id,i_category_id,sales,number_sales] + Filter [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true))] + ReusedSubquery [average_sales] #1 + HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] + InputAdapter + ReusedExchange [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] #3 + WholeStageCodegen (197) + Project [i_brand_id,i_class_id,i_category_id,sales,number_sales] + Filter [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price as decimal(12,2)))), DecimalType(18,2), true))] + ReusedSubquery [average_sales] #1 + HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(cs_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] + InputAdapter + ReusedExchange [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] #20 + WholeStageCodegen (236) + Project [i_brand_id,i_class_id,i_category_id,sales,number_sales] + Filter [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price as decimal(12,2)))), DecimalType(18,2), true))] + ReusedSubquery [average_sales] #1 + HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(ws_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] + InputAdapter + ReusedExchange [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] #22 + WholeStageCodegen (359) + HashAggregate [channel,i_brand_id,sum,isEmpty,sum] [sum(sum_sales),sum(number_salesL),i_class_id,i_category_id,sum(sum_sales),sum(number_sales),sum,isEmpty,sum] + InputAdapter + Exchange [channel,i_brand_id] #26 + WholeStageCodegen (358) + HashAggregate [channel,i_brand_id,sum_sales,number_sales] [sum,isEmpty,sum,sum,isEmpty,sum] + HashAggregate [channel,i_brand_id,i_class_id,i_category_id,sum,isEmpty,sum] [sum(sales),sum(number_salesL),sum_sales,number_sales,sum,isEmpty,sum] + InputAdapter + Exchange [channel,i_brand_id,i_class_id,i_category_id] #27 + WholeStageCodegen (357) + HashAggregate [channel,i_brand_id,i_class_id,i_category_id,sales,number_sales] [sum,isEmpty,sum,sum,isEmpty,sum] + InputAdapter + Union + WholeStageCodegen (278) + Project [i_brand_id,i_class_id,i_category_id,sales,number_sales] + Filter [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true))] + ReusedSubquery [average_sales] #1 + HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] + InputAdapter + ReusedExchange [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] #3 + WholeStageCodegen (317) + Project [i_brand_id,i_class_id,i_category_id,sales,number_sales] + Filter [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price as decimal(12,2)))), DecimalType(18,2), true))] + ReusedSubquery [average_sales] #1 + HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(cs_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] + InputAdapter + ReusedExchange [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] #20 + WholeStageCodegen (356) + Project [i_brand_id,i_class_id,i_category_id,sales,number_sales] + Filter [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price as decimal(12,2)))), DecimalType(18,2), true))] + ReusedSubquery [average_sales] #1 + HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(ws_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] + InputAdapter + ReusedExchange [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] #22 + WholeStageCodegen (479) + HashAggregate [channel,sum,isEmpty,sum] [sum(sum_sales),sum(number_salesL),i_brand_id,i_class_id,i_category_id,sum(sum_sales),sum(number_sales),sum,isEmpty,sum] + InputAdapter + Exchange [channel] #28 + WholeStageCodegen (478) + HashAggregate [channel,sum_sales,number_sales] [sum,isEmpty,sum,sum,isEmpty,sum] + HashAggregate [channel,i_brand_id,i_class_id,i_category_id,sum,isEmpty,sum] [sum(sales),sum(number_salesL),sum_sales,number_sales,sum,isEmpty,sum] + InputAdapter + Exchange [channel,i_brand_id,i_class_id,i_category_id] #29 + WholeStageCodegen (477) + HashAggregate [channel,i_brand_id,i_class_id,i_category_id,sales,number_sales] [sum,isEmpty,sum,sum,isEmpty,sum] + InputAdapter + Union + WholeStageCodegen (398) + Project [i_brand_id,i_class_id,i_category_id,sales,number_sales] + Filter [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true))] + ReusedSubquery [average_sales] #1 + HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] + InputAdapter + ReusedExchange [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] #3 + WholeStageCodegen (437) + Project [i_brand_id,i_class_id,i_category_id,sales,number_sales] + Filter [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price as decimal(12,2)))), DecimalType(18,2), true))] + ReusedSubquery [average_sales] #1 + HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(cs_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] + InputAdapter + ReusedExchange [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] #20 + WholeStageCodegen (476) + Project [i_brand_id,i_class_id,i_category_id,sales,number_sales] + Filter [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price as decimal(12,2)))), DecimalType(18,2), true))] + ReusedSubquery [average_sales] #1 + HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(ws_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] + InputAdapter + ReusedExchange [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] #22 + WholeStageCodegen (599) HashAggregate [sum,isEmpty,sum] [sum(sum_sales),sum(number_salesL),channel,i_brand_id,i_class_id,i_category_id,sum(sum_sales),sum(number_sales),sum,isEmpty,sum] InputAdapter - Exchange #33 - WholeStageCodegen (604) + Exchange #30 + WholeStageCodegen (598) HashAggregate [sum_sales,number_sales] [sum,isEmpty,sum,sum,isEmpty,sum] HashAggregate [channel,i_brand_id,i_class_id,i_category_id,sum,isEmpty,sum] [sum(sales),sum(number_salesL),sum_sales,number_sales,sum,isEmpty,sum] InputAdapter - Exchange [channel,i_brand_id,i_class_id,i_category_id] #34 - WholeStageCodegen (603) + Exchange [channel,i_brand_id,i_class_id,i_category_id] #31 + WholeStageCodegen (597) HashAggregate [channel,i_brand_id,i_class_id,i_category_id,sales,number_sales] [sum,isEmpty,sum,sum,isEmpty,sum] InputAdapter Union - WholeStageCodegen (524) - Project [channel,i_brand_id,i_class_id,i_category_id,sales,number_sales] + WholeStageCodegen (518) + Project [i_brand_id,i_class_id,i_category_id,sales,number_sales] Filter [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true))] ReusedSubquery [average_sales] #1 - HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),channel,sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] + HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] InputAdapter - ReusedExchange [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] #6 - WholeStageCodegen (563) - Project [channel,i_brand_id,i_class_id,i_category_id,sales,number_sales] + ReusedExchange [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] #3 + WholeStageCodegen (557) + Project [i_brand_id,i_class_id,i_category_id,sales,number_sales] Filter [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price as decimal(12,2)))), DecimalType(18,2), true))] ReusedSubquery [average_sales] #1 - HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),channel,sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(cs_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] + HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(cs_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] InputAdapter - ReusedExchange [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] #23 - WholeStageCodegen (602) - Project [channel,i_brand_id,i_class_id,i_category_id,sales,number_sales] + ReusedExchange [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] #20 + WholeStageCodegen (596) + Project [i_brand_id,i_class_id,i_category_id,sales,number_sales] Filter [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price as decimal(12,2)))), DecimalType(18,2), true))] ReusedSubquery [average_sales] #1 - HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),channel,sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(ws_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] + HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(ws_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] InputAdapter - ReusedExchange [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] #25 + ReusedExchange [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] #22 diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q14a/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q14a/explain.txt index c54ad0e36216d..4e60a9b6b1547 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q14a/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q14a/explain.txt @@ -1,210 +1,198 @@ == Physical Plan == -TakeOrderedAndProject (206) -+- * HashAggregate (205) - +- Exchange (204) - +- * HashAggregate (203) - +- Union (202) - :- * HashAggregate (182) - : +- Exchange (181) - : +- * HashAggregate (180) - : +- Union (179) - : :- * HashAggregate (159) - : : +- Exchange (158) - : : +- * HashAggregate (157) - : : +- Union (156) - : : :- * HashAggregate (136) - : : : +- Exchange (135) - : : : +- * HashAggregate (134) - : : : +- Union (133) - : : : :- * HashAggregate (113) - : : : : +- Exchange (112) - : : : : +- * HashAggregate (111) - : : : : +- Union (110) - : : : : :- * Project (77) - : : : : : +- * Filter (76) - : : : : : +- * HashAggregate (75) - : : : : : +- Exchange (74) - : : : : : +- * HashAggregate (73) - : : : : : +- * Project (72) - : : : : : +- * BroadcastHashJoin Inner BuildRight (71) - : : : : : :- * Project (65) - : : : : : : +- * BroadcastHashJoin Inner BuildRight (64) - : : : : : : :- * BroadcastHashJoin LeftSemi BuildRight (57) - : : : : : : : :- * Filter (3) - : : : : : : : : +- * ColumnarToRow (2) - : : : : : : : : +- Scan parquet default.store_sales (1) - : : : : : : : +- BroadcastExchange (56) - : : : : : : : +- * Project (55) - : : : : : : : +- * BroadcastHashJoin Inner BuildRight (54) - : : : : : : : :- * Filter (6) - : : : : : : : : +- * ColumnarToRow (5) - : : : : : : : : +- Scan parquet default.item (4) - : : : : : : : +- BroadcastExchange (53) - : : : : : : : +- * HashAggregate (52) - : : : : : : : +- * HashAggregate (51) - : : : : : : : +- * HashAggregate (50) - : : : : : : : +- Exchange (49) - : : : : : : : +- * HashAggregate (48) - : : : : : : : +- * BroadcastHashJoin LeftSemi BuildRight (47) - : : : : : : : :- * BroadcastHashJoin LeftSemi BuildRight (36) - : : : : : : : : :- * Project (22) - : : : : : : : : : +- * BroadcastHashJoin Inner BuildRight (21) - : : : : : : : : : :- * Project (15) - : : : : : : : : : : +- * BroadcastHashJoin Inner BuildRight (14) - : : : : : : : : : : :- * Filter (9) - : : : : : : : : : : : +- * ColumnarToRow (8) - : : : : : : : : : : : +- Scan parquet default.store_sales (7) - : : : : : : : : : : +- BroadcastExchange (13) - : : : : : : : : : : +- * Filter (12) - : : : : : : : : : : +- * ColumnarToRow (11) - : : : : : : : : : : +- Scan parquet default.item (10) - : : : : : : : : : +- BroadcastExchange (20) - : : : : : : : : : +- * Project (19) - : : : : : : : : : +- * Filter (18) - : : : : : : : : : +- * ColumnarToRow (17) - : : : : : : : : : +- Scan parquet default.date_dim (16) - : : : : : : : : +- BroadcastExchange (35) - : : : : : : : : +- * Project (34) - : : : : : : : : +- * BroadcastHashJoin Inner BuildRight (33) - : : : : : : : : :- * Project (31) - : : : : : : : : : +- * BroadcastHashJoin Inner BuildRight (30) - : : : : : : : : : :- * Filter (25) - : : : : : : : : : : +- * ColumnarToRow (24) - : : : : : : : : : : +- Scan parquet default.catalog_sales (23) - : : : : : : : : : +- BroadcastExchange (29) - : : : : : : : : : +- * Filter (28) - : : : : : : : : : +- * ColumnarToRow (27) - : : : : : : : : : +- Scan parquet default.item (26) - : : : : : : : : +- ReusedExchange (32) - : : : : : : : +- BroadcastExchange (46) - : : : : : : : +- * Project (45) - : : : : : : : +- * BroadcastHashJoin Inner BuildRight (44) - : : : : : : : :- * Project (42) - : : : : : : : : +- * BroadcastHashJoin Inner BuildRight (41) - : : : : : : : : :- * Filter (39) - : : : : : : : : : +- * ColumnarToRow (38) - : : : : : : : : : +- Scan parquet default.web_sales (37) - : : : : : : : : +- ReusedExchange (40) - : : : : : : : +- ReusedExchange (43) - : : : : : : +- BroadcastExchange (63) - : : : : : : +- * BroadcastHashJoin LeftSemi BuildRight (62) - : : : : : : :- * Filter (60) - : : : : : : : +- * ColumnarToRow (59) - : : : : : : : +- Scan parquet default.item (58) - : : : : : : +- ReusedExchange (61) - : : : : : +- BroadcastExchange (70) - : : : : : +- * Project (69) - : : : : : +- * Filter (68) - : : : : : +- * ColumnarToRow (67) - : : : : : +- Scan parquet default.date_dim (66) - : : : : :- * Project (93) - : : : : : +- * Filter (92) - : : : : : +- * HashAggregate (91) - : : : : : +- Exchange (90) - : : : : : +- * HashAggregate (89) - : : : : : +- * Project (88) - : : : : : +- * BroadcastHashJoin Inner BuildRight (87) - : : : : : :- * Project (85) - : : : : : : +- * BroadcastHashJoin Inner BuildRight (84) - : : : : : : :- * BroadcastHashJoin LeftSemi BuildRight (82) - : : : : : : : :- * Filter (80) - : : : : : : : : +- * ColumnarToRow (79) - : : : : : : : : +- Scan parquet default.catalog_sales (78) - : : : : : : : +- ReusedExchange (81) - : : : : : : +- ReusedExchange (83) - : : : : : +- ReusedExchange (86) - : : : : +- * Project (109) - : : : : +- * Filter (108) - : : : : +- * HashAggregate (107) - : : : : +- Exchange (106) - : : : : +- * HashAggregate (105) - : : : : +- * Project (104) - : : : : +- * BroadcastHashJoin Inner BuildRight (103) - : : : : :- * Project (101) - : : : : : +- * BroadcastHashJoin Inner BuildRight (100) - : : : : : :- * BroadcastHashJoin LeftSemi BuildRight (98) - : : : : : : :- * Filter (96) - : : : : : : : +- * ColumnarToRow (95) - : : : : : : : +- Scan parquet default.web_sales (94) - : : : : : : +- ReusedExchange (97) - : : : : : +- ReusedExchange (99) - : : : : +- ReusedExchange (102) - : : : +- * HashAggregate (132) - : : : +- Exchange (131) - : : : +- * HashAggregate (130) - : : : +- * HashAggregate (129) - : : : +- Exchange (128) - : : : +- * HashAggregate (127) - : : : +- Union (126) - : : : :- * Project (117) - : : : : +- * Filter (116) - : : : : +- * HashAggregate (115) - : : : : +- ReusedExchange (114) - : : : :- * Project (121) - : : : : +- * Filter (120) - : : : : +- * HashAggregate (119) - : : : : +- ReusedExchange (118) - : : : +- * Project (125) - : : : +- * Filter (124) - : : : +- * HashAggregate (123) - : : : +- ReusedExchange (122) - : : +- * HashAggregate (155) - : : +- Exchange (154) - : : +- * HashAggregate (153) - : : +- * HashAggregate (152) - : : +- Exchange (151) - : : +- * HashAggregate (150) - : : +- Union (149) - : : :- * Project (140) - : : : +- * Filter (139) - : : : +- * HashAggregate (138) - : : : +- ReusedExchange (137) - : : :- * Project (144) - : : : +- * Filter (143) - : : : +- * HashAggregate (142) - : : : +- ReusedExchange (141) - : : +- * Project (148) - : : +- * Filter (147) - : : +- * HashAggregate (146) - : : +- ReusedExchange (145) - : +- * HashAggregate (178) - : +- Exchange (177) - : +- * HashAggregate (176) - : +- * HashAggregate (175) - : +- Exchange (174) - : +- * HashAggregate (173) - : +- Union (172) - : :- * Project (163) - : : +- * Filter (162) - : : +- * HashAggregate (161) - : : +- ReusedExchange (160) - : :- * Project (167) - : : +- * Filter (166) - : : +- * HashAggregate (165) - : : +- ReusedExchange (164) - : +- * Project (171) - : +- * Filter (170) - : +- * HashAggregate (169) - : +- ReusedExchange (168) - +- * HashAggregate (201) - +- Exchange (200) - +- * HashAggregate (199) - +- * HashAggregate (198) - +- Exchange (197) - +- * HashAggregate (196) - +- Union (195) - :- * Project (186) - : +- * Filter (185) - : +- * HashAggregate (184) - : +- ReusedExchange (183) - :- * Project (190) - : +- * Filter (189) - : +- * HashAggregate (188) - : +- ReusedExchange (187) - +- * Project (194) - +- * Filter (193) - +- * HashAggregate (192) - +- ReusedExchange (191) +TakeOrderedAndProject (194) ++- * HashAggregate (193) + +- Exchange (192) + +- * HashAggregate (191) + +- Union (190) + :- * HashAggregate (113) + : +- Exchange (112) + : +- * HashAggregate (111) + : +- Union (110) + : :- * Project (77) + : : +- * Filter (76) + : : +- * HashAggregate (75) + : : +- Exchange (74) + : : +- * HashAggregate (73) + : : +- * Project (72) + : : +- * BroadcastHashJoin Inner BuildRight (71) + : : :- * Project (65) + : : : +- * BroadcastHashJoin Inner BuildRight (64) + : : : :- * BroadcastHashJoin LeftSemi BuildRight (57) + : : : : :- * Filter (3) + : : : : : +- * ColumnarToRow (2) + : : : : : +- Scan parquet default.store_sales (1) + : : : : +- BroadcastExchange (56) + : : : : +- * Project (55) + : : : : +- * BroadcastHashJoin Inner BuildRight (54) + : : : : :- * Filter (6) + : : : : : +- * ColumnarToRow (5) + : : : : : +- Scan parquet default.item (4) + : : : : +- BroadcastExchange (53) + : : : : +- * HashAggregate (52) + : : : : +- * HashAggregate (51) + : : : : +- * HashAggregate (50) + : : : : +- Exchange (49) + : : : : +- * HashAggregate (48) + : : : : +- * BroadcastHashJoin LeftSemi BuildRight (47) + : : : : :- * BroadcastHashJoin LeftSemi BuildRight (36) + : : : : : :- * Project (22) + : : : : : : +- * BroadcastHashJoin Inner BuildRight (21) + : : : : : : :- * Project (15) + : : : : : : : +- * BroadcastHashJoin Inner BuildRight (14) + : : : : : : : :- * Filter (9) + : : : : : : : : +- * ColumnarToRow (8) + : : : : : : : : +- Scan parquet default.store_sales (7) + : : : : : : : +- BroadcastExchange (13) + : : : : : : : +- * Filter (12) + : : : : : : : +- * ColumnarToRow (11) + : : : : : : : +- Scan parquet default.item (10) + : : : : : : +- BroadcastExchange (20) + : : : : : : +- * Project (19) + : : : : : : +- * Filter (18) + : : : : : : +- * ColumnarToRow (17) + : : : : : : +- Scan parquet default.date_dim (16) + : : : : : +- BroadcastExchange (35) + : : : : : +- * Project (34) + : : : : : +- * BroadcastHashJoin Inner BuildRight (33) + : : : : : :- * Project (31) + : : : : : : +- * BroadcastHashJoin Inner BuildRight (30) + : : : : : : :- * Filter (25) + : : : : : : : +- * ColumnarToRow (24) + : : : : : : : +- Scan parquet default.catalog_sales (23) + : : : : : : +- BroadcastExchange (29) + : : : : : : +- * Filter (28) + : : : : : : +- * ColumnarToRow (27) + : : : : : : +- Scan parquet default.item (26) + : : : : : +- ReusedExchange (32) + : : : : +- BroadcastExchange (46) + : : : : +- * Project (45) + : : : : +- * BroadcastHashJoin Inner BuildRight (44) + : : : : :- * Project (42) + : : : : : +- * BroadcastHashJoin Inner BuildRight (41) + : : : : : :- * Filter (39) + : : : : : : +- * ColumnarToRow (38) + : : : : : : +- Scan parquet default.web_sales (37) + : : : : : +- ReusedExchange (40) + : : : : +- ReusedExchange (43) + : : : +- BroadcastExchange (63) + : : : +- * BroadcastHashJoin LeftSemi BuildRight (62) + : : : :- * Filter (60) + : : : : +- * ColumnarToRow (59) + : : : : +- Scan parquet default.item (58) + : : : +- ReusedExchange (61) + : : +- BroadcastExchange (70) + : : +- * Project (69) + : : +- * Filter (68) + : : +- * ColumnarToRow (67) + : : +- Scan parquet default.date_dim (66) + : :- * Project (93) + : : +- * Filter (92) + : : +- * HashAggregate (91) + : : +- Exchange (90) + : : +- * HashAggregate (89) + : : +- * Project (88) + : : +- * BroadcastHashJoin Inner BuildRight (87) + : : :- * Project (85) + : : : +- * BroadcastHashJoin Inner BuildRight (84) + : : : :- * BroadcastHashJoin LeftSemi BuildRight (82) + : : : : :- * Filter (80) + : : : : : +- * ColumnarToRow (79) + : : : : : +- Scan parquet default.catalog_sales (78) + : : : : +- ReusedExchange (81) + : : : +- ReusedExchange (83) + : : +- ReusedExchange (86) + : +- * Project (109) + : +- * Filter (108) + : +- * HashAggregate (107) + : +- Exchange (106) + : +- * HashAggregate (105) + : +- * Project (104) + : +- * BroadcastHashJoin Inner BuildRight (103) + : :- * Project (101) + : : +- * BroadcastHashJoin Inner BuildRight (100) + : : :- * BroadcastHashJoin LeftSemi BuildRight (98) + : : : :- * Filter (96) + : : : : +- * ColumnarToRow (95) + : : : : +- Scan parquet default.web_sales (94) + : : : +- ReusedExchange (97) + : : +- ReusedExchange (99) + : +- ReusedExchange (102) + :- * HashAggregate (132) + : +- Exchange (131) + : +- * HashAggregate (130) + : +- * HashAggregate (129) + : +- Exchange (128) + : +- * HashAggregate (127) + : +- Union (126) + : :- * Project (117) + : : +- * Filter (116) + : : +- * HashAggregate (115) + : : +- ReusedExchange (114) + : :- * Project (121) + : : +- * Filter (120) + : : +- * HashAggregate (119) + : : +- ReusedExchange (118) + : +- * Project (125) + : +- * Filter (124) + : +- * HashAggregate (123) + : +- ReusedExchange (122) + :- * HashAggregate (151) + : +- Exchange (150) + : +- * HashAggregate (149) + : +- * HashAggregate (148) + : +- Exchange (147) + : +- * HashAggregate (146) + : +- Union (145) + : :- * Project (136) + : : +- * Filter (135) + : : +- * HashAggregate (134) + : : +- ReusedExchange (133) + : :- * Project (140) + : : +- * Filter (139) + : : +- * HashAggregate (138) + : : +- ReusedExchange (137) + : +- * Project (144) + : +- * Filter (143) + : +- * HashAggregate (142) + : +- ReusedExchange (141) + :- * HashAggregate (170) + : +- Exchange (169) + : +- * HashAggregate (168) + : +- * HashAggregate (167) + : +- Exchange (166) + : +- * HashAggregate (165) + : +- Union (164) + : :- * Project (155) + : : +- * Filter (154) + : : +- * HashAggregate (153) + : : +- ReusedExchange (152) + : :- * Project (159) + : : +- * Filter (158) + : : +- * HashAggregate (157) + : : +- ReusedExchange (156) + : +- * Project (163) + : +- * Filter (162) + : +- * HashAggregate (161) + : +- ReusedExchange (160) + +- * HashAggregate (189) + +- Exchange (188) + +- * HashAggregate (187) + +- * HashAggregate (186) + +- Exchange (185) + +- * HashAggregate (184) + +- Union (183) + :- * Project (174) + : +- * Filter (173) + : +- * HashAggregate (172) + : +- ReusedExchange (171) + :- * Project (178) + : +- * Filter (177) + : +- * HashAggregate (176) + : +- ReusedExchange (175) + +- * Project (182) + +- * Filter (181) + +- * HashAggregate (180) + +- ReusedExchange (179) (1) Scan parquet default.store_sales @@ -425,7 +413,7 @@ Results [3]: [brand_id#13, class_id#14, category_id#15] (49) Exchange Input [3]: [brand_id#13, class_id#14, category_id#15] -Arguments: hashpartitioning(brand_id#13, class_id#14, category_id#15, 5), true, [id=#23] +Arguments: hashpartitioning(brand_id#13, class_id#14, category_id#15, 5), ENSURE_REQUIREMENTS, [id=#23] (50) HashAggregate [codegen id : 10] Input [3]: [brand_id#13, class_id#14, category_id#15] @@ -545,22 +533,22 @@ Results [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum#33, isEmpty#34, c (74) Exchange Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum#33, isEmpty#34, count#35] -Arguments: hashpartitioning(i_brand_id#6, i_class_id#7, i_category_id#8, 5), true, [id=#36] +Arguments: hashpartitioning(i_brand_id#6, i_class_id#7, i_category_id#8, 5), ENSURE_REQUIREMENTS, [id=#36] (75) HashAggregate [codegen id : 26] Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum#33, isEmpty#34, count#35] Keys [3]: [i_brand_id#6, i_class_id#7, i_category_id#8] Functions [2]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true)), count(1)] Aggregate Attributes [2]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#37, count(1)#38] -Results [7]: [store AS channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#37 AS sales#40, count(1)#38 AS number_sales#41, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#37 AS sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#42] +Results [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#37 AS sales#39, count(1)#38 AS number_sales#40, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#37 AS sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#41] (76) Filter [codegen id : 26] -Input [7]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sales#40, number_sales#41, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#42] -Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#42) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#42 as decimal(32,6)) > cast(Subquery scalar-subquery#43, [id=#44] as decimal(32,6)))) +Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sales#39, number_sales#40, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#41] +Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#41) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#41 as decimal(32,6)) > cast(Subquery scalar-subquery#42, [id=#43] as decimal(32,6)))) (77) Project [codegen id : 26] -Output [6]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sales#40, number_sales#41] -Input [7]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sales#40, number_sales#41, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#42] +Output [6]: [store AS channel#44, i_brand_id#6, i_class_id#7, i_category_id#8, sales#39, number_sales#40] +Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sales#39, number_sales#40, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#41] (78) Scan parquet default.catalog_sales Output [4]: [cs_sold_date_sk#16, cs_item_sk#17, cs_quantity#45, cs_list_price#46] @@ -617,22 +605,22 @@ Results [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum#50, isEmpty#51, c (90) Exchange Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum#50, isEmpty#51, count#52] -Arguments: hashpartitioning(i_brand_id#6, i_class_id#7, i_category_id#8, 5), true, [id=#53] +Arguments: hashpartitioning(i_brand_id#6, i_class_id#7, i_category_id#8, 5), ENSURE_REQUIREMENTS, [id=#53] (91) HashAggregate [codegen id : 52] Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum#50, isEmpty#51, count#52] Keys [3]: [i_brand_id#6, i_class_id#7, i_category_id#8] Functions [2]: [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true)), count(1)] Aggregate Attributes [2]: [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#54, count(1)#55] -Results [7]: [catalog AS channel#56, i_brand_id#6, i_class_id#7, i_category_id#8, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#54 AS sales#57, count(1)#55 AS number_sales#58, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#54 AS sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#59] +Results [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#54 AS sales#56, count(1)#55 AS number_sales#57, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#54 AS sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#58] (92) Filter [codegen id : 52] -Input [7]: [channel#56, i_brand_id#6, i_class_id#7, i_category_id#8, sales#57, number_sales#58, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#59] -Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#59) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#59 as decimal(32,6)) > cast(ReusedSubquery Subquery scalar-subquery#43, [id=#44] as decimal(32,6)))) +Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sales#56, number_sales#57, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#58] +Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#58) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#58 as decimal(32,6)) > cast(ReusedSubquery Subquery scalar-subquery#42, [id=#43] as decimal(32,6)))) (93) Project [codegen id : 52] -Output [6]: [channel#56, i_brand_id#6, i_class_id#7, i_category_id#8, sales#57, number_sales#58] -Input [7]: [channel#56, i_brand_id#6, i_class_id#7, i_category_id#8, sales#57, number_sales#58, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#59] +Output [6]: [catalog AS channel#59, i_brand_id#6, i_class_id#7, i_category_id#8, sales#56, number_sales#57] +Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sales#56, number_sales#57, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#58] (94) Scan parquet default.web_sales Output [4]: [ws_sold_date_sk#20, ws_item_sk#21, ws_quantity#60, ws_list_price#61] @@ -689,42 +677,42 @@ Results [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum#65, isEmpty#66, c (106) Exchange Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum#65, isEmpty#66, count#67] -Arguments: hashpartitioning(i_brand_id#6, i_class_id#7, i_category_id#8, 5), true, [id=#68] +Arguments: hashpartitioning(i_brand_id#6, i_class_id#7, i_category_id#8, 5), ENSURE_REQUIREMENTS, [id=#68] (107) HashAggregate [codegen id : 78] Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum#65, isEmpty#66, count#67] Keys [3]: [i_brand_id#6, i_class_id#7, i_category_id#8] Functions [2]: [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true)), count(1)] Aggregate Attributes [2]: [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#69, count(1)#70] -Results [7]: [web AS channel#71, i_brand_id#6, i_class_id#7, i_category_id#8, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#69 AS sales#72, count(1)#70 AS number_sales#73, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#69 AS sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#74] +Results [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#69 AS sales#71, count(1)#70 AS number_sales#72, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#69 AS sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#73] (108) Filter [codegen id : 78] -Input [7]: [channel#71, i_brand_id#6, i_class_id#7, i_category_id#8, sales#72, number_sales#73, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#74] -Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#74) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#74 as decimal(32,6)) > cast(ReusedSubquery Subquery scalar-subquery#43, [id=#44] as decimal(32,6)))) +Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sales#71, number_sales#72, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#73] +Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#73) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#73 as decimal(32,6)) > cast(ReusedSubquery Subquery scalar-subquery#42, [id=#43] as decimal(32,6)))) (109) Project [codegen id : 78] -Output [6]: [channel#71, i_brand_id#6, i_class_id#7, i_category_id#8, sales#72, number_sales#73] -Input [7]: [channel#71, i_brand_id#6, i_class_id#7, i_category_id#8, sales#72, number_sales#73, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#74] +Output [6]: [web AS channel#74, i_brand_id#6, i_class_id#7, i_category_id#8, sales#71, number_sales#72] +Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sales#71, number_sales#72, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#73] (110) Union (111) HashAggregate [codegen id : 79] -Input [6]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sales#40, number_sales#41] -Keys [4]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8] -Functions [2]: [partial_sum(sales#40), partial_sum(number_sales#41)] +Input [6]: [channel#44, i_brand_id#6, i_class_id#7, i_category_id#8, sales#39, number_sales#40] +Keys [4]: [channel#44, i_brand_id#6, i_class_id#7, i_category_id#8] +Functions [2]: [partial_sum(sales#39), partial_sum(number_sales#40)] Aggregate Attributes [3]: [sum#75, isEmpty#76, sum#77] -Results [7]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sum#78, isEmpty#79, sum#80] +Results [7]: [channel#44, i_brand_id#6, i_class_id#7, i_category_id#8, sum#78, isEmpty#79, sum#80] (112) Exchange -Input [7]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sum#78, isEmpty#79, sum#80] -Arguments: hashpartitioning(channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, 5), true, [id=#81] +Input [7]: [channel#44, i_brand_id#6, i_class_id#7, i_category_id#8, sum#78, isEmpty#79, sum#80] +Arguments: hashpartitioning(channel#44, i_brand_id#6, i_class_id#7, i_category_id#8, 5), ENSURE_REQUIREMENTS, [id=#81] (113) HashAggregate [codegen id : 80] -Input [7]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sum#78, isEmpty#79, sum#80] -Keys [4]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8] -Functions [2]: [sum(sales#40), sum(number_sales#41)] -Aggregate Attributes [2]: [sum(sales#40)#82, sum(number_sales#41)#83] -Results [6]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sum(sales#40)#82 AS sum_sales#84, sum(number_sales#41)#83 AS number_sales#85] +Input [7]: [channel#44, i_brand_id#6, i_class_id#7, i_category_id#8, sum#78, isEmpty#79, sum#80] +Keys [4]: [channel#44, i_brand_id#6, i_class_id#7, i_category_id#8] +Functions [2]: [sum(sales#39), sum(number_sales#40)] +Aggregate Attributes [2]: [sum(sales#39)#82, sum(number_sales#40)#83] +Results [6]: [channel#44, i_brand_id#6, i_class_id#7, i_category_id#8, sum(sales#39)#82 AS sum_sales#84, sum(number_sales#40)#83 AS number_sales#85] (114) ReusedExchange [Reuses operator id: 74] Output [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum#86, isEmpty#87, count#88] @@ -734,15 +722,15 @@ Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum#86, isEmpty#87, cou Keys [3]: [i_brand_id#6, i_class_id#7, i_category_id#8] Functions [2]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true)), count(1)] Aggregate Attributes [2]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#89, count(1)#90] -Results [7]: [store AS channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#89 AS sales#40, count(1)#90 AS number_sales#41, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#89 AS sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#91] +Results [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#89 AS sales#39, count(1)#90 AS number_sales#40, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#89 AS sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#91] (116) Filter [codegen id : 106] -Input [7]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sales#40, number_sales#41, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#91] -Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#91) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#91 as decimal(32,6)) > cast(ReusedSubquery Subquery scalar-subquery#43, [id=#44] as decimal(32,6)))) +Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sales#39, number_sales#40, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#91] +Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#91) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#91 as decimal(32,6)) > cast(ReusedSubquery Subquery scalar-subquery#42, [id=#43] as decimal(32,6)))) (117) Project [codegen id : 106] -Output [6]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sales#40, number_sales#41] -Input [7]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sales#40, number_sales#41, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#91] +Output [6]: [store AS channel#44, i_brand_id#6, i_class_id#7, i_category_id#8, sales#39, number_sales#40] +Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sales#39, number_sales#40, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#91] (118) ReusedExchange [Reuses operator id: 90] Output [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum#92, isEmpty#93, count#94] @@ -752,629 +740,569 @@ Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum#92, isEmpty#93, cou Keys [3]: [i_brand_id#6, i_class_id#7, i_category_id#8] Functions [2]: [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true)), count(1)] Aggregate Attributes [2]: [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#95, count(1)#96] -Results [7]: [catalog AS channel#56, i_brand_id#6, i_class_id#7, i_category_id#8, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#95 AS sales#57, count(1)#96 AS number_sales#58, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#95 AS sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#97] +Results [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#95 AS sales#56, count(1)#96 AS number_sales#57, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#95 AS sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#97] (120) Filter [codegen id : 132] -Input [7]: [channel#56, i_brand_id#6, i_class_id#7, i_category_id#8, sales#57, number_sales#58, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#97] -Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#97) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#97 as decimal(32,6)) > cast(ReusedSubquery Subquery scalar-subquery#43, [id=#44] as decimal(32,6)))) +Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sales#56, number_sales#57, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#97] +Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#97) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#97 as decimal(32,6)) > cast(ReusedSubquery Subquery scalar-subquery#42, [id=#43] as decimal(32,6)))) (121) Project [codegen id : 132] -Output [6]: [channel#56, i_brand_id#6, i_class_id#7, i_category_id#8, sales#57, number_sales#58] -Input [7]: [channel#56, i_brand_id#6, i_class_id#7, i_category_id#8, sales#57, number_sales#58, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#97] +Output [6]: [catalog AS channel#98, i_brand_id#6, i_class_id#7, i_category_id#8, sales#56, number_sales#57] +Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sales#56, number_sales#57, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#97] (122) ReusedExchange [Reuses operator id: 106] -Output [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum#98, isEmpty#99, count#100] +Output [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum#99, isEmpty#100, count#101] (123) HashAggregate [codegen id : 158] -Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum#98, isEmpty#99, count#100] +Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum#99, isEmpty#100, count#101] Keys [3]: [i_brand_id#6, i_class_id#7, i_category_id#8] Functions [2]: [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true)), count(1)] -Aggregate Attributes [2]: [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#101, count(1)#102] -Results [7]: [web AS channel#71, i_brand_id#6, i_class_id#7, i_category_id#8, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#101 AS sales#72, count(1)#102 AS number_sales#73, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#101 AS sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#103] +Aggregate Attributes [2]: [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#102, count(1)#103] +Results [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#102 AS sales#71, count(1)#103 AS number_sales#72, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#102 AS sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#104] (124) Filter [codegen id : 158] -Input [7]: [channel#71, i_brand_id#6, i_class_id#7, i_category_id#8, sales#72, number_sales#73, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#103] -Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#103) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#103 as decimal(32,6)) > cast(ReusedSubquery Subquery scalar-subquery#43, [id=#44] as decimal(32,6)))) +Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sales#71, number_sales#72, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#104] +Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#104) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#104 as decimal(32,6)) > cast(ReusedSubquery Subquery scalar-subquery#42, [id=#43] as decimal(32,6)))) (125) Project [codegen id : 158] -Output [6]: [channel#71, i_brand_id#6, i_class_id#7, i_category_id#8, sales#72, number_sales#73] -Input [7]: [channel#71, i_brand_id#6, i_class_id#7, i_category_id#8, sales#72, number_sales#73, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#103] +Output [6]: [web AS channel#105, i_brand_id#6, i_class_id#7, i_category_id#8, sales#71, number_sales#72] +Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sales#71, number_sales#72, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#104] (126) Union (127) HashAggregate [codegen id : 159] -Input [6]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sales#40, number_sales#41] -Keys [4]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8] -Functions [2]: [partial_sum(sales#40), partial_sum(number_sales#41)] -Aggregate Attributes [3]: [sum#104, isEmpty#105, sum#106] -Results [7]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sum#107, isEmpty#108, sum#109] +Input [6]: [channel#44, i_brand_id#6, i_class_id#7, i_category_id#8, sales#39, number_sales#40] +Keys [4]: [channel#44, i_brand_id#6, i_class_id#7, i_category_id#8] +Functions [2]: [partial_sum(sales#39), partial_sum(number_sales#40)] +Aggregate Attributes [3]: [sum#106, isEmpty#107, sum#108] +Results [7]: [channel#44, i_brand_id#6, i_class_id#7, i_category_id#8, sum#109, isEmpty#110, sum#111] (128) Exchange -Input [7]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sum#107, isEmpty#108, sum#109] -Arguments: hashpartitioning(channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, 5), true, [id=#110] +Input [7]: [channel#44, i_brand_id#6, i_class_id#7, i_category_id#8, sum#109, isEmpty#110, sum#111] +Arguments: hashpartitioning(channel#44, i_brand_id#6, i_class_id#7, i_category_id#8, 5), ENSURE_REQUIREMENTS, [id=#112] (129) HashAggregate [codegen id : 160] -Input [7]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sum#107, isEmpty#108, sum#109] -Keys [4]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8] -Functions [2]: [sum(sales#40), sum(number_sales#41)] -Aggregate Attributes [2]: [sum(sales#40)#111, sum(number_sales#41)#112] -Results [5]: [channel#39, i_brand_id#6, i_class_id#7, sum(sales#40)#111 AS sum_sales#84, sum(number_sales#41)#112 AS number_sales#85] +Input [7]: [channel#44, i_brand_id#6, i_class_id#7, i_category_id#8, sum#109, isEmpty#110, sum#111] +Keys [4]: [channel#44, i_brand_id#6, i_class_id#7, i_category_id#8] +Functions [2]: [sum(sales#39), sum(number_sales#40)] +Aggregate Attributes [2]: [sum(sales#39)#113, sum(number_sales#40)#114] +Results [5]: [channel#44, i_brand_id#6, i_class_id#7, sum(sales#39)#113 AS sum_sales#84, sum(number_sales#40)#114 AS number_sales#85] (130) HashAggregate [codegen id : 160] -Input [5]: [channel#39, i_brand_id#6, i_class_id#7, sum_sales#84, number_sales#85] -Keys [3]: [channel#39, i_brand_id#6, i_class_id#7] +Input [5]: [channel#44, i_brand_id#6, i_class_id#7, sum_sales#84, number_sales#85] +Keys [3]: [channel#44, i_brand_id#6, i_class_id#7] Functions [2]: [partial_sum(sum_sales#84), partial_sum(number_sales#85)] -Aggregate Attributes [3]: [sum#113, isEmpty#114, sum#115] -Results [6]: [channel#39, i_brand_id#6, i_class_id#7, sum#116, isEmpty#117, sum#118] +Aggregate Attributes [3]: [sum#115, isEmpty#116, sum#117] +Results [6]: [channel#44, i_brand_id#6, i_class_id#7, sum#118, isEmpty#119, sum#120] (131) Exchange -Input [6]: [channel#39, i_brand_id#6, i_class_id#7, sum#116, isEmpty#117, sum#118] -Arguments: hashpartitioning(channel#39, i_brand_id#6, i_class_id#7, 5), true, [id=#119] +Input [6]: [channel#44, i_brand_id#6, i_class_id#7, sum#118, isEmpty#119, sum#120] +Arguments: hashpartitioning(channel#44, i_brand_id#6, i_class_id#7, 5), ENSURE_REQUIREMENTS, [id=#121] (132) HashAggregate [codegen id : 161] -Input [6]: [channel#39, i_brand_id#6, i_class_id#7, sum#116, isEmpty#117, sum#118] -Keys [3]: [channel#39, i_brand_id#6, i_class_id#7] +Input [6]: [channel#44, i_brand_id#6, i_class_id#7, sum#118, isEmpty#119, sum#120] +Keys [3]: [channel#44, i_brand_id#6, i_class_id#7] Functions [2]: [sum(sum_sales#84), sum(number_sales#85)] -Aggregate Attributes [2]: [sum(sum_sales#84)#120, sum(number_sales#85)#121] -Results [6]: [channel#39, i_brand_id#6, i_class_id#7, null AS i_category_id#122, sum(sum_sales#84)#120 AS sum(sum_sales)#123, sum(number_sales#85)#121 AS sum(number_sales)#124] +Aggregate Attributes [2]: [sum(sum_sales#84)#122, sum(number_sales#85)#123] +Results [6]: [channel#44, i_brand_id#6, i_class_id#7, null AS i_category_id#124, sum(sum_sales#84)#122 AS sum(sum_sales)#125, sum(number_sales#85)#123 AS sum(number_sales)#126] -(133) Union +(133) ReusedExchange [Reuses operator id: 74] +Output [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum#127, isEmpty#128, count#129] -(134) HashAggregate [codegen id : 162] -Input [6]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sum_sales#84, number_sales#85] -Keys [6]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sum_sales#84, number_sales#85] -Functions: [] -Aggregate Attributes: [] -Results [6]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sum_sales#84, number_sales#85] - -(135) Exchange -Input [6]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sum_sales#84, number_sales#85] -Arguments: hashpartitioning(channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sum_sales#84, number_sales#85, 5), true, [id=#125] - -(136) HashAggregate [codegen id : 163] -Input [6]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sum_sales#84, number_sales#85] -Keys [6]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sum_sales#84, number_sales#85] -Functions: [] -Aggregate Attributes: [] -Results [6]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sum_sales#84, number_sales#85] - -(137) ReusedExchange [Reuses operator id: 74] -Output [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum#126, isEmpty#127, count#128] - -(138) HashAggregate [codegen id : 189] -Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum#126, isEmpty#127, count#128] +(134) HashAggregate [codegen id : 187] +Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum#127, isEmpty#128, count#129] Keys [3]: [i_brand_id#6, i_class_id#7, i_category_id#8] Functions [2]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true)), count(1)] -Aggregate Attributes [2]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#129, count(1)#130] -Results [7]: [store AS channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#129 AS sales#40, count(1)#130 AS number_sales#41, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#129 AS sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#131] +Aggregate Attributes [2]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#130, count(1)#131] +Results [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#130 AS sales#39, count(1)#131 AS number_sales#40, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#130 AS sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#132] -(139) Filter [codegen id : 189] -Input [7]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sales#40, number_sales#41, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#131] -Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#131) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#131 as decimal(32,6)) > cast(ReusedSubquery Subquery scalar-subquery#43, [id=#44] as decimal(32,6)))) +(135) Filter [codegen id : 187] +Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sales#39, number_sales#40, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#132] +Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#132) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#132 as decimal(32,6)) > cast(ReusedSubquery Subquery scalar-subquery#42, [id=#43] as decimal(32,6)))) -(140) Project [codegen id : 189] -Output [6]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sales#40, number_sales#41] -Input [7]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sales#40, number_sales#41, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#131] +(136) Project [codegen id : 187] +Output [6]: [store AS channel#44, i_brand_id#6, i_class_id#7, i_category_id#8, sales#39, number_sales#40] +Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sales#39, number_sales#40, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#132] -(141) ReusedExchange [Reuses operator id: 90] -Output [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum#132, isEmpty#133, count#134] +(137) ReusedExchange [Reuses operator id: 90] +Output [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum#133, isEmpty#134, count#135] -(142) HashAggregate [codegen id : 215] -Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum#132, isEmpty#133, count#134] +(138) HashAggregate [codegen id : 213] +Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum#133, isEmpty#134, count#135] Keys [3]: [i_brand_id#6, i_class_id#7, i_category_id#8] Functions [2]: [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true)), count(1)] -Aggregate Attributes [2]: [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#135, count(1)#136] -Results [7]: [catalog AS channel#56, i_brand_id#6, i_class_id#7, i_category_id#8, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#135 AS sales#57, count(1)#136 AS number_sales#58, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#135 AS sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#137] +Aggregate Attributes [2]: [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#136, count(1)#137] +Results [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#136 AS sales#56, count(1)#137 AS number_sales#57, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#136 AS sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#138] -(143) Filter [codegen id : 215] -Input [7]: [channel#56, i_brand_id#6, i_class_id#7, i_category_id#8, sales#57, number_sales#58, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#137] -Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#137) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#137 as decimal(32,6)) > cast(ReusedSubquery Subquery scalar-subquery#43, [id=#44] as decimal(32,6)))) +(139) Filter [codegen id : 213] +Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sales#56, number_sales#57, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#138] +Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#138) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#138 as decimal(32,6)) > cast(ReusedSubquery Subquery scalar-subquery#42, [id=#43] as decimal(32,6)))) -(144) Project [codegen id : 215] -Output [6]: [channel#56, i_brand_id#6, i_class_id#7, i_category_id#8, sales#57, number_sales#58] -Input [7]: [channel#56, i_brand_id#6, i_class_id#7, i_category_id#8, sales#57, number_sales#58, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#137] +(140) Project [codegen id : 213] +Output [6]: [catalog AS channel#139, i_brand_id#6, i_class_id#7, i_category_id#8, sales#56, number_sales#57] +Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sales#56, number_sales#57, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#138] -(145) ReusedExchange [Reuses operator id: 106] -Output [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum#138, isEmpty#139, count#140] +(141) ReusedExchange [Reuses operator id: 106] +Output [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum#140, isEmpty#141, count#142] -(146) HashAggregate [codegen id : 241] -Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum#138, isEmpty#139, count#140] +(142) HashAggregate [codegen id : 239] +Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum#140, isEmpty#141, count#142] Keys [3]: [i_brand_id#6, i_class_id#7, i_category_id#8] Functions [2]: [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true)), count(1)] -Aggregate Attributes [2]: [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#141, count(1)#142] -Results [7]: [web AS channel#71, i_brand_id#6, i_class_id#7, i_category_id#8, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#141 AS sales#72, count(1)#142 AS number_sales#73, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#141 AS sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#143] - -(147) Filter [codegen id : 241] -Input [7]: [channel#71, i_brand_id#6, i_class_id#7, i_category_id#8, sales#72, number_sales#73, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#143] -Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#143) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#143 as decimal(32,6)) > cast(ReusedSubquery Subquery scalar-subquery#43, [id=#44] as decimal(32,6)))) - -(148) Project [codegen id : 241] -Output [6]: [channel#71, i_brand_id#6, i_class_id#7, i_category_id#8, sales#72, number_sales#73] -Input [7]: [channel#71, i_brand_id#6, i_class_id#7, i_category_id#8, sales#72, number_sales#73, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#143] - -(149) Union - -(150) HashAggregate [codegen id : 242] -Input [6]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sales#40, number_sales#41] -Keys [4]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8] -Functions [2]: [partial_sum(sales#40), partial_sum(number_sales#41)] -Aggregate Attributes [3]: [sum#144, isEmpty#145, sum#146] -Results [7]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sum#147, isEmpty#148, sum#149] - -(151) Exchange -Input [7]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sum#147, isEmpty#148, sum#149] -Arguments: hashpartitioning(channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, 5), true, [id=#150] - -(152) HashAggregate [codegen id : 243] -Input [7]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sum#147, isEmpty#148, sum#149] -Keys [4]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8] -Functions [2]: [sum(sales#40), sum(number_sales#41)] -Aggregate Attributes [2]: [sum(sales#40)#151, sum(number_sales#41)#152] -Results [4]: [channel#39, i_brand_id#6, sum(sales#40)#151 AS sum_sales#84, sum(number_sales#41)#152 AS number_sales#85] - -(153) HashAggregate [codegen id : 243] -Input [4]: [channel#39, i_brand_id#6, sum_sales#84, number_sales#85] -Keys [2]: [channel#39, i_brand_id#6] +Aggregate Attributes [2]: [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#143, count(1)#144] +Results [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#143 AS sales#71, count(1)#144 AS number_sales#72, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#143 AS sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#145] + +(143) Filter [codegen id : 239] +Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sales#71, number_sales#72, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#145] +Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#145) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#145 as decimal(32,6)) > cast(ReusedSubquery Subquery scalar-subquery#42, [id=#43] as decimal(32,6)))) + +(144) Project [codegen id : 239] +Output [6]: [web AS channel#146, i_brand_id#6, i_class_id#7, i_category_id#8, sales#71, number_sales#72] +Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sales#71, number_sales#72, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#145] + +(145) Union + +(146) HashAggregate [codegen id : 240] +Input [6]: [channel#44, i_brand_id#6, i_class_id#7, i_category_id#8, sales#39, number_sales#40] +Keys [4]: [channel#44, i_brand_id#6, i_class_id#7, i_category_id#8] +Functions [2]: [partial_sum(sales#39), partial_sum(number_sales#40)] +Aggregate Attributes [3]: [sum#147, isEmpty#148, sum#149] +Results [7]: [channel#44, i_brand_id#6, i_class_id#7, i_category_id#8, sum#150, isEmpty#151, sum#152] + +(147) Exchange +Input [7]: [channel#44, i_brand_id#6, i_class_id#7, i_category_id#8, sum#150, isEmpty#151, sum#152] +Arguments: hashpartitioning(channel#44, i_brand_id#6, i_class_id#7, i_category_id#8, 5), ENSURE_REQUIREMENTS, [id=#153] + +(148) HashAggregate [codegen id : 241] +Input [7]: [channel#44, i_brand_id#6, i_class_id#7, i_category_id#8, sum#150, isEmpty#151, sum#152] +Keys [4]: [channel#44, i_brand_id#6, i_class_id#7, i_category_id#8] +Functions [2]: [sum(sales#39), sum(number_sales#40)] +Aggregate Attributes [2]: [sum(sales#39)#154, sum(number_sales#40)#155] +Results [4]: [channel#44, i_brand_id#6, sum(sales#39)#154 AS sum_sales#84, sum(number_sales#40)#155 AS number_sales#85] + +(149) HashAggregate [codegen id : 241] +Input [4]: [channel#44, i_brand_id#6, sum_sales#84, number_sales#85] +Keys [2]: [channel#44, i_brand_id#6] Functions [2]: [partial_sum(sum_sales#84), partial_sum(number_sales#85)] -Aggregate Attributes [3]: [sum#153, isEmpty#154, sum#155] -Results [5]: [channel#39, i_brand_id#6, sum#156, isEmpty#157, sum#158] +Aggregate Attributes [3]: [sum#156, isEmpty#157, sum#158] +Results [5]: [channel#44, i_brand_id#6, sum#159, isEmpty#160, sum#161] -(154) Exchange -Input [5]: [channel#39, i_brand_id#6, sum#156, isEmpty#157, sum#158] -Arguments: hashpartitioning(channel#39, i_brand_id#6, 5), true, [id=#159] +(150) Exchange +Input [5]: [channel#44, i_brand_id#6, sum#159, isEmpty#160, sum#161] +Arguments: hashpartitioning(channel#44, i_brand_id#6, 5), ENSURE_REQUIREMENTS, [id=#162] -(155) HashAggregate [codegen id : 244] -Input [5]: [channel#39, i_brand_id#6, sum#156, isEmpty#157, sum#158] -Keys [2]: [channel#39, i_brand_id#6] +(151) HashAggregate [codegen id : 242] +Input [5]: [channel#44, i_brand_id#6, sum#159, isEmpty#160, sum#161] +Keys [2]: [channel#44, i_brand_id#6] Functions [2]: [sum(sum_sales#84), sum(number_sales#85)] -Aggregate Attributes [2]: [sum(sum_sales#84)#160, sum(number_sales#85)#161] -Results [6]: [channel#39, i_brand_id#6, null AS i_class_id#162, null AS i_category_id#163, sum(sum_sales#84)#160 AS sum(sum_sales)#164, sum(number_sales#85)#161 AS sum(number_sales)#165] - -(156) Union - -(157) HashAggregate [codegen id : 245] -Input [6]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sum_sales#84, number_sales#85] -Keys [6]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sum_sales#84, number_sales#85] -Functions: [] -Aggregate Attributes: [] -Results [6]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sum_sales#84, number_sales#85] +Aggregate Attributes [2]: [sum(sum_sales#84)#163, sum(number_sales#85)#164] +Results [6]: [channel#44, i_brand_id#6, null AS i_class_id#165, null AS i_category_id#166, sum(sum_sales#84)#163 AS sum(sum_sales)#167, sum(number_sales#85)#164 AS sum(number_sales)#168] -(158) Exchange -Input [6]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sum_sales#84, number_sales#85] -Arguments: hashpartitioning(channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sum_sales#84, number_sales#85, 5), true, [id=#166] +(152) ReusedExchange [Reuses operator id: 74] +Output [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum#169, isEmpty#170, count#171] -(159) HashAggregate [codegen id : 246] -Input [6]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sum_sales#84, number_sales#85] -Keys [6]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sum_sales#84, number_sales#85] -Functions: [] -Aggregate Attributes: [] -Results [6]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sum_sales#84, number_sales#85] - -(160) ReusedExchange [Reuses operator id: 74] -Output [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum#167, isEmpty#168, count#169] - -(161) HashAggregate [codegen id : 272] -Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum#167, isEmpty#168, count#169] +(153) HashAggregate [codegen id : 268] +Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum#169, isEmpty#170, count#171] Keys [3]: [i_brand_id#6, i_class_id#7, i_category_id#8] Functions [2]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true)), count(1)] -Aggregate Attributes [2]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#170, count(1)#171] -Results [7]: [store AS channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#170 AS sales#40, count(1)#171 AS number_sales#41, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#170 AS sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#172] +Aggregate Attributes [2]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#172, count(1)#173] +Results [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#172 AS sales#39, count(1)#173 AS number_sales#40, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#172 AS sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#174] -(162) Filter [codegen id : 272] -Input [7]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sales#40, number_sales#41, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#172] -Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#172) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#172 as decimal(32,6)) > cast(ReusedSubquery Subquery scalar-subquery#43, [id=#44] as decimal(32,6)))) +(154) Filter [codegen id : 268] +Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sales#39, number_sales#40, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#174] +Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#174) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#174 as decimal(32,6)) > cast(ReusedSubquery Subquery scalar-subquery#42, [id=#43] as decimal(32,6)))) -(163) Project [codegen id : 272] -Output [6]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sales#40, number_sales#41] -Input [7]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sales#40, number_sales#41, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#172] +(155) Project [codegen id : 268] +Output [6]: [store AS channel#44, i_brand_id#6, i_class_id#7, i_category_id#8, sales#39, number_sales#40] +Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sales#39, number_sales#40, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#174] -(164) ReusedExchange [Reuses operator id: 90] -Output [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum#173, isEmpty#174, count#175] +(156) ReusedExchange [Reuses operator id: 90] +Output [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum#175, isEmpty#176, count#177] -(165) HashAggregate [codegen id : 298] -Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum#173, isEmpty#174, count#175] +(157) HashAggregate [codegen id : 294] +Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum#175, isEmpty#176, count#177] Keys [3]: [i_brand_id#6, i_class_id#7, i_category_id#8] Functions [2]: [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true)), count(1)] -Aggregate Attributes [2]: [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#176, count(1)#177] -Results [7]: [catalog AS channel#56, i_brand_id#6, i_class_id#7, i_category_id#8, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#176 AS sales#57, count(1)#177 AS number_sales#58, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#176 AS sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#178] +Aggregate Attributes [2]: [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#178, count(1)#179] +Results [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#178 AS sales#56, count(1)#179 AS number_sales#57, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#178 AS sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#180] -(166) Filter [codegen id : 298] -Input [7]: [channel#56, i_brand_id#6, i_class_id#7, i_category_id#8, sales#57, number_sales#58, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#178] -Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#178) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#178 as decimal(32,6)) > cast(ReusedSubquery Subquery scalar-subquery#43, [id=#44] as decimal(32,6)))) +(158) Filter [codegen id : 294] +Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sales#56, number_sales#57, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#180] +Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#180) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#180 as decimal(32,6)) > cast(ReusedSubquery Subquery scalar-subquery#42, [id=#43] as decimal(32,6)))) -(167) Project [codegen id : 298] -Output [6]: [channel#56, i_brand_id#6, i_class_id#7, i_category_id#8, sales#57, number_sales#58] -Input [7]: [channel#56, i_brand_id#6, i_class_id#7, i_category_id#8, sales#57, number_sales#58, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#178] +(159) Project [codegen id : 294] +Output [6]: [catalog AS channel#181, i_brand_id#6, i_class_id#7, i_category_id#8, sales#56, number_sales#57] +Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sales#56, number_sales#57, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#180] -(168) ReusedExchange [Reuses operator id: 106] -Output [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum#179, isEmpty#180, count#181] +(160) ReusedExchange [Reuses operator id: 106] +Output [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum#182, isEmpty#183, count#184] -(169) HashAggregate [codegen id : 324] -Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum#179, isEmpty#180, count#181] +(161) HashAggregate [codegen id : 320] +Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum#182, isEmpty#183, count#184] Keys [3]: [i_brand_id#6, i_class_id#7, i_category_id#8] Functions [2]: [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true)), count(1)] -Aggregate Attributes [2]: [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#182, count(1)#183] -Results [7]: [web AS channel#71, i_brand_id#6, i_class_id#7, i_category_id#8, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#182 AS sales#72, count(1)#183 AS number_sales#73, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#182 AS sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#184] - -(170) Filter [codegen id : 324] -Input [7]: [channel#71, i_brand_id#6, i_class_id#7, i_category_id#8, sales#72, number_sales#73, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#184] -Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#184) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#184 as decimal(32,6)) > cast(ReusedSubquery Subquery scalar-subquery#43, [id=#44] as decimal(32,6)))) - -(171) Project [codegen id : 324] -Output [6]: [channel#71, i_brand_id#6, i_class_id#7, i_category_id#8, sales#72, number_sales#73] -Input [7]: [channel#71, i_brand_id#6, i_class_id#7, i_category_id#8, sales#72, number_sales#73, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#184] - -(172) Union - -(173) HashAggregate [codegen id : 325] -Input [6]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sales#40, number_sales#41] -Keys [4]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8] -Functions [2]: [partial_sum(sales#40), partial_sum(number_sales#41)] -Aggregate Attributes [3]: [sum#185, isEmpty#186, sum#187] -Results [7]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sum#188, isEmpty#189, sum#190] - -(174) Exchange -Input [7]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sum#188, isEmpty#189, sum#190] -Arguments: hashpartitioning(channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, 5), true, [id=#191] - -(175) HashAggregate [codegen id : 326] -Input [7]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sum#188, isEmpty#189, sum#190] -Keys [4]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8] -Functions [2]: [sum(sales#40), sum(number_sales#41)] -Aggregate Attributes [2]: [sum(sales#40)#192, sum(number_sales#41)#193] -Results [3]: [channel#39, sum(sales#40)#192 AS sum_sales#84, sum(number_sales#41)#193 AS number_sales#85] - -(176) HashAggregate [codegen id : 326] -Input [3]: [channel#39, sum_sales#84, number_sales#85] -Keys [1]: [channel#39] +Aggregate Attributes [2]: [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#185, count(1)#186] +Results [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#185 AS sales#71, count(1)#186 AS number_sales#72, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#185 AS sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#187] + +(162) Filter [codegen id : 320] +Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sales#71, number_sales#72, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#187] +Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#187) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#187 as decimal(32,6)) > cast(ReusedSubquery Subquery scalar-subquery#42, [id=#43] as decimal(32,6)))) + +(163) Project [codegen id : 320] +Output [6]: [web AS channel#188, i_brand_id#6, i_class_id#7, i_category_id#8, sales#71, number_sales#72] +Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sales#71, number_sales#72, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#187] + +(164) Union + +(165) HashAggregate [codegen id : 321] +Input [6]: [channel#44, i_brand_id#6, i_class_id#7, i_category_id#8, sales#39, number_sales#40] +Keys [4]: [channel#44, i_brand_id#6, i_class_id#7, i_category_id#8] +Functions [2]: [partial_sum(sales#39), partial_sum(number_sales#40)] +Aggregate Attributes [3]: [sum#189, isEmpty#190, sum#191] +Results [7]: [channel#44, i_brand_id#6, i_class_id#7, i_category_id#8, sum#192, isEmpty#193, sum#194] + +(166) Exchange +Input [7]: [channel#44, i_brand_id#6, i_class_id#7, i_category_id#8, sum#192, isEmpty#193, sum#194] +Arguments: hashpartitioning(channel#44, i_brand_id#6, i_class_id#7, i_category_id#8, 5), ENSURE_REQUIREMENTS, [id=#195] + +(167) HashAggregate [codegen id : 322] +Input [7]: [channel#44, i_brand_id#6, i_class_id#7, i_category_id#8, sum#192, isEmpty#193, sum#194] +Keys [4]: [channel#44, i_brand_id#6, i_class_id#7, i_category_id#8] +Functions [2]: [sum(sales#39), sum(number_sales#40)] +Aggregate Attributes [2]: [sum(sales#39)#196, sum(number_sales#40)#197] +Results [3]: [channel#44, sum(sales#39)#196 AS sum_sales#84, sum(number_sales#40)#197 AS number_sales#85] + +(168) HashAggregate [codegen id : 322] +Input [3]: [channel#44, sum_sales#84, number_sales#85] +Keys [1]: [channel#44] Functions [2]: [partial_sum(sum_sales#84), partial_sum(number_sales#85)] -Aggregate Attributes [3]: [sum#194, isEmpty#195, sum#196] -Results [4]: [channel#39, sum#197, isEmpty#198, sum#199] +Aggregate Attributes [3]: [sum#198, isEmpty#199, sum#200] +Results [4]: [channel#44, sum#201, isEmpty#202, sum#203] -(177) Exchange -Input [4]: [channel#39, sum#197, isEmpty#198, sum#199] -Arguments: hashpartitioning(channel#39, 5), true, [id=#200] +(169) Exchange +Input [4]: [channel#44, sum#201, isEmpty#202, sum#203] +Arguments: hashpartitioning(channel#44, 5), ENSURE_REQUIREMENTS, [id=#204] -(178) HashAggregate [codegen id : 327] -Input [4]: [channel#39, sum#197, isEmpty#198, sum#199] -Keys [1]: [channel#39] +(170) HashAggregate [codegen id : 323] +Input [4]: [channel#44, sum#201, isEmpty#202, sum#203] +Keys [1]: [channel#44] Functions [2]: [sum(sum_sales#84), sum(number_sales#85)] -Aggregate Attributes [2]: [sum(sum_sales#84)#201, sum(number_sales#85)#202] -Results [6]: [channel#39, null AS i_brand_id#203, null AS i_class_id#204, null AS i_category_id#205, sum(sum_sales#84)#201 AS sum(sum_sales)#206, sum(number_sales#85)#202 AS sum(number_sales)#207] - -(179) Union - -(180) HashAggregate [codegen id : 328] -Input [6]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sum_sales#84, number_sales#85] -Keys [6]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sum_sales#84, number_sales#85] -Functions: [] -Aggregate Attributes: [] -Results [6]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sum_sales#84, number_sales#85] - -(181) Exchange -Input [6]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sum_sales#84, number_sales#85] -Arguments: hashpartitioning(channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sum_sales#84, number_sales#85, 5), true, [id=#208] - -(182) HashAggregate [codegen id : 329] -Input [6]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sum_sales#84, number_sales#85] -Keys [6]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sum_sales#84, number_sales#85] -Functions: [] -Aggregate Attributes: [] -Results [6]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sum_sales#84, number_sales#85] +Aggregate Attributes [2]: [sum(sum_sales#84)#205, sum(number_sales#85)#206] +Results [6]: [channel#44, null AS i_brand_id#207, null AS i_class_id#208, null AS i_category_id#209, sum(sum_sales#84)#205 AS sum(sum_sales)#210, sum(number_sales#85)#206 AS sum(number_sales)#211] -(183) ReusedExchange [Reuses operator id: 74] -Output [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum#209, isEmpty#210, count#211] +(171) ReusedExchange [Reuses operator id: 74] +Output [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum#212, isEmpty#213, count#214] -(184) HashAggregate [codegen id : 355] -Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum#209, isEmpty#210, count#211] +(172) HashAggregate [codegen id : 349] +Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum#212, isEmpty#213, count#214] Keys [3]: [i_brand_id#6, i_class_id#7, i_category_id#8] Functions [2]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true)), count(1)] -Aggregate Attributes [2]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#212, count(1)#213] -Results [7]: [store AS channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#212 AS sales#40, count(1)#213 AS number_sales#41, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#212 AS sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#214] +Aggregate Attributes [2]: [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#215, count(1)#216] +Results [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#215 AS sales#39, count(1)#216 AS number_sales#40, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#215 AS sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#217] -(185) Filter [codegen id : 355] -Input [7]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sales#40, number_sales#41, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#214] -Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#214) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#214 as decimal(32,6)) > cast(ReusedSubquery Subquery scalar-subquery#43, [id=#44] as decimal(32,6)))) +(173) Filter [codegen id : 349] +Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sales#39, number_sales#40, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#217] +Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#217) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#217 as decimal(32,6)) > cast(ReusedSubquery Subquery scalar-subquery#42, [id=#43] as decimal(32,6)))) -(186) Project [codegen id : 355] -Output [6]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sales#40, number_sales#41] -Input [7]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sales#40, number_sales#41, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#214] +(174) Project [codegen id : 349] +Output [6]: [store AS channel#44, i_brand_id#6, i_class_id#7, i_category_id#8, sales#39, number_sales#40] +Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sales#39, number_sales#40, sum(CheckOverflow((promote_precision(cast(cast(ss_quantity#3 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price#4 as decimal(12,2)))), DecimalType(18,2), true))#217] -(187) ReusedExchange [Reuses operator id: 90] -Output [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum#215, isEmpty#216, count#217] +(175) ReusedExchange [Reuses operator id: 90] +Output [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum#218, isEmpty#219, count#220] -(188) HashAggregate [codegen id : 381] -Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum#215, isEmpty#216, count#217] +(176) HashAggregate [codegen id : 375] +Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum#218, isEmpty#219, count#220] Keys [3]: [i_brand_id#6, i_class_id#7, i_category_id#8] Functions [2]: [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true)), count(1)] -Aggregate Attributes [2]: [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#218, count(1)#219] -Results [7]: [catalog AS channel#56, i_brand_id#6, i_class_id#7, i_category_id#8, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#218 AS sales#57, count(1)#219 AS number_sales#58, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#218 AS sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#220] +Aggregate Attributes [2]: [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#221, count(1)#222] +Results [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#221 AS sales#56, count(1)#222 AS number_sales#57, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#221 AS sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#223] -(189) Filter [codegen id : 381] -Input [7]: [channel#56, i_brand_id#6, i_class_id#7, i_category_id#8, sales#57, number_sales#58, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#220] -Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#220) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#220 as decimal(32,6)) > cast(ReusedSubquery Subquery scalar-subquery#43, [id=#44] as decimal(32,6)))) +(177) Filter [codegen id : 375] +Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sales#56, number_sales#57, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#223] +Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#223) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#223 as decimal(32,6)) > cast(ReusedSubquery Subquery scalar-subquery#42, [id=#43] as decimal(32,6)))) -(190) Project [codegen id : 381] -Output [6]: [channel#56, i_brand_id#6, i_class_id#7, i_category_id#8, sales#57, number_sales#58] -Input [7]: [channel#56, i_brand_id#6, i_class_id#7, i_category_id#8, sales#57, number_sales#58, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#220] +(178) Project [codegen id : 375] +Output [6]: [catalog AS channel#224, i_brand_id#6, i_class_id#7, i_category_id#8, sales#56, number_sales#57] +Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sales#56, number_sales#57, sum(CheckOverflow((promote_precision(cast(cast(cs_quantity#45 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price#46 as decimal(12,2)))), DecimalType(18,2), true))#223] -(191) ReusedExchange [Reuses operator id: 106] -Output [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum#221, isEmpty#222, count#223] +(179) ReusedExchange [Reuses operator id: 106] +Output [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum#225, isEmpty#226, count#227] -(192) HashAggregate [codegen id : 407] -Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum#221, isEmpty#222, count#223] +(180) HashAggregate [codegen id : 401] +Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum#225, isEmpty#226, count#227] Keys [3]: [i_brand_id#6, i_class_id#7, i_category_id#8] Functions [2]: [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true)), count(1)] -Aggregate Attributes [2]: [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#224, count(1)#225] -Results [7]: [web AS channel#71, i_brand_id#6, i_class_id#7, i_category_id#8, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#224 AS sales#72, count(1)#225 AS number_sales#73, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#224 AS sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#226] - -(193) Filter [codegen id : 407] -Input [7]: [channel#71, i_brand_id#6, i_class_id#7, i_category_id#8, sales#72, number_sales#73, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#226] -Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#226) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#226 as decimal(32,6)) > cast(ReusedSubquery Subquery scalar-subquery#43, [id=#44] as decimal(32,6)))) - -(194) Project [codegen id : 407] -Output [6]: [channel#71, i_brand_id#6, i_class_id#7, i_category_id#8, sales#72, number_sales#73] -Input [7]: [channel#71, i_brand_id#6, i_class_id#7, i_category_id#8, sales#72, number_sales#73, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#226] - -(195) Union - -(196) HashAggregate [codegen id : 408] -Input [6]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sales#40, number_sales#41] -Keys [4]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8] -Functions [2]: [partial_sum(sales#40), partial_sum(number_sales#41)] -Aggregate Attributes [3]: [sum#227, isEmpty#228, sum#229] -Results [7]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sum#230, isEmpty#231, sum#232] - -(197) Exchange -Input [7]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sum#230, isEmpty#231, sum#232] -Arguments: hashpartitioning(channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, 5), true, [id=#233] - -(198) HashAggregate [codegen id : 409] -Input [7]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sum#230, isEmpty#231, sum#232] -Keys [4]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8] -Functions [2]: [sum(sales#40), sum(number_sales#41)] -Aggregate Attributes [2]: [sum(sales#40)#234, sum(number_sales#41)#235] -Results [2]: [sum(sales#40)#234 AS sum_sales#84, sum(number_sales#41)#235 AS number_sales#85] - -(199) HashAggregate [codegen id : 409] +Aggregate Attributes [2]: [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#228, count(1)#229] +Results [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#228 AS sales#71, count(1)#229 AS number_sales#72, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#228 AS sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#230] + +(181) Filter [codegen id : 401] +Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sales#71, number_sales#72, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#230] +Condition : (isnotnull(sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#230) AND (cast(sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#230 as decimal(32,6)) > cast(ReusedSubquery Subquery scalar-subquery#42, [id=#43] as decimal(32,6)))) + +(182) Project [codegen id : 401] +Output [6]: [web AS channel#231, i_brand_id#6, i_class_id#7, i_category_id#8, sales#71, number_sales#72] +Input [6]: [i_brand_id#6, i_class_id#7, i_category_id#8, sales#71, number_sales#72, sum(CheckOverflow((promote_precision(cast(cast(ws_quantity#60 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price#61 as decimal(12,2)))), DecimalType(18,2), true))#230] + +(183) Union + +(184) HashAggregate [codegen id : 402] +Input [6]: [channel#44, i_brand_id#6, i_class_id#7, i_category_id#8, sales#39, number_sales#40] +Keys [4]: [channel#44, i_brand_id#6, i_class_id#7, i_category_id#8] +Functions [2]: [partial_sum(sales#39), partial_sum(number_sales#40)] +Aggregate Attributes [3]: [sum#232, isEmpty#233, sum#234] +Results [7]: [channel#44, i_brand_id#6, i_class_id#7, i_category_id#8, sum#235, isEmpty#236, sum#237] + +(185) Exchange +Input [7]: [channel#44, i_brand_id#6, i_class_id#7, i_category_id#8, sum#235, isEmpty#236, sum#237] +Arguments: hashpartitioning(channel#44, i_brand_id#6, i_class_id#7, i_category_id#8, 5), ENSURE_REQUIREMENTS, [id=#238] + +(186) HashAggregate [codegen id : 403] +Input [7]: [channel#44, i_brand_id#6, i_class_id#7, i_category_id#8, sum#235, isEmpty#236, sum#237] +Keys [4]: [channel#44, i_brand_id#6, i_class_id#7, i_category_id#8] +Functions [2]: [sum(sales#39), sum(number_sales#40)] +Aggregate Attributes [2]: [sum(sales#39)#239, sum(number_sales#40)#240] +Results [2]: [sum(sales#39)#239 AS sum_sales#84, sum(number_sales#40)#240 AS number_sales#85] + +(187) HashAggregate [codegen id : 403] Input [2]: [sum_sales#84, number_sales#85] Keys: [] Functions [2]: [partial_sum(sum_sales#84), partial_sum(number_sales#85)] -Aggregate Attributes [3]: [sum#236, isEmpty#237, sum#238] -Results [3]: [sum#239, isEmpty#240, sum#241] +Aggregate Attributes [3]: [sum#241, isEmpty#242, sum#243] +Results [3]: [sum#244, isEmpty#245, sum#246] -(200) Exchange -Input [3]: [sum#239, isEmpty#240, sum#241] -Arguments: SinglePartition, true, [id=#242] +(188) Exchange +Input [3]: [sum#244, isEmpty#245, sum#246] +Arguments: SinglePartition, ENSURE_REQUIREMENTS, [id=#247] -(201) HashAggregate [codegen id : 410] -Input [3]: [sum#239, isEmpty#240, sum#241] +(189) HashAggregate [codegen id : 404] +Input [3]: [sum#244, isEmpty#245, sum#246] Keys: [] Functions [2]: [sum(sum_sales#84), sum(number_sales#85)] -Aggregate Attributes [2]: [sum(sum_sales#84)#243, sum(number_sales#85)#244] -Results [6]: [null AS channel#245, null AS i_brand_id#246, null AS i_class_id#247, null AS i_category_id#248, sum(sum_sales#84)#243 AS sum(sum_sales)#249, sum(number_sales#85)#244 AS sum(number_sales)#250] +Aggregate Attributes [2]: [sum(sum_sales#84)#248, sum(number_sales#85)#249] +Results [6]: [null AS channel#250, null AS i_brand_id#251, null AS i_class_id#252, null AS i_category_id#253, sum(sum_sales#84)#248 AS sum(sum_sales)#254, sum(number_sales#85)#249 AS sum(number_sales)#255] -(202) Union +(190) Union -(203) HashAggregate [codegen id : 411] -Input [6]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sum_sales#84, number_sales#85] -Keys [6]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sum_sales#84, number_sales#85] +(191) HashAggregate [codegen id : 405] +Input [6]: [channel#44, i_brand_id#6, i_class_id#7, i_category_id#8, sum_sales#84, number_sales#85] +Keys [6]: [channel#44, i_brand_id#6, i_class_id#7, i_category_id#8, sum_sales#84, number_sales#85] Functions: [] Aggregate Attributes: [] -Results [6]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sum_sales#84, number_sales#85] +Results [6]: [channel#44, i_brand_id#6, i_class_id#7, i_category_id#8, sum_sales#84, number_sales#85] -(204) Exchange -Input [6]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sum_sales#84, number_sales#85] -Arguments: hashpartitioning(channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sum_sales#84, number_sales#85, 5), true, [id=#251] +(192) Exchange +Input [6]: [channel#44, i_brand_id#6, i_class_id#7, i_category_id#8, sum_sales#84, number_sales#85] +Arguments: hashpartitioning(channel#44, i_brand_id#6, i_class_id#7, i_category_id#8, sum_sales#84, number_sales#85, 5), ENSURE_REQUIREMENTS, [id=#256] -(205) HashAggregate [codegen id : 412] -Input [6]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sum_sales#84, number_sales#85] -Keys [6]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sum_sales#84, number_sales#85] +(193) HashAggregate [codegen id : 406] +Input [6]: [channel#44, i_brand_id#6, i_class_id#7, i_category_id#8, sum_sales#84, number_sales#85] +Keys [6]: [channel#44, i_brand_id#6, i_class_id#7, i_category_id#8, sum_sales#84, number_sales#85] Functions: [] Aggregate Attributes: [] -Results [6]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sum_sales#84, number_sales#85] +Results [6]: [channel#44, i_brand_id#6, i_class_id#7, i_category_id#8, sum_sales#84, number_sales#85] -(206) TakeOrderedAndProject -Input [6]: [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sum_sales#84, number_sales#85] -Arguments: 100, [channel#39 ASC NULLS FIRST, i_brand_id#6 ASC NULLS FIRST, i_class_id#7 ASC NULLS FIRST, i_category_id#8 ASC NULLS FIRST], [channel#39, i_brand_id#6, i_class_id#7, i_category_id#8, sum_sales#84, number_sales#85] +(194) TakeOrderedAndProject +Input [6]: [channel#44, i_brand_id#6, i_class_id#7, i_category_id#8, sum_sales#84, number_sales#85] +Arguments: 100, [channel#44 ASC NULLS FIRST, i_brand_id#6 ASC NULLS FIRST, i_class_id#7 ASC NULLS FIRST, i_category_id#8 ASC NULLS FIRST], [channel#44, i_brand_id#6, i_class_id#7, i_category_id#8, sum_sales#84, number_sales#85] ===== Subqueries ===== -Subquery:1 Hosting operator id = 76 Hosting Expression = Subquery scalar-subquery#43, [id=#44] -* HashAggregate (236) -+- Exchange (235) - +- * HashAggregate (234) - +- Union (233) - :- * Project (216) - : +- * BroadcastHashJoin Inner BuildRight (215) - : :- * Filter (209) - : : +- * ColumnarToRow (208) - : : +- Scan parquet default.store_sales (207) - : +- BroadcastExchange (214) - : +- * Project (213) - : +- * Filter (212) - : +- * ColumnarToRow (211) - : +- Scan parquet default.date_dim (210) - :- * Project (226) - : +- * BroadcastHashJoin Inner BuildRight (225) - : :- * Filter (219) - : : +- * ColumnarToRow (218) - : : +- Scan parquet default.catalog_sales (217) - : +- BroadcastExchange (224) - : +- * Project (223) - : +- * Filter (222) - : +- * ColumnarToRow (221) - : +- Scan parquet default.date_dim (220) - +- * Project (232) - +- * BroadcastHashJoin Inner BuildRight (231) - :- * Filter (229) - : +- * ColumnarToRow (228) - : +- Scan parquet default.web_sales (227) - +- ReusedExchange (230) - - -(207) Scan parquet default.store_sales +Subquery:1 Hosting operator id = 76 Hosting Expression = Subquery scalar-subquery#42, [id=#43] +* HashAggregate (224) ++- Exchange (223) + +- * HashAggregate (222) + +- Union (221) + :- * Project (204) + : +- * BroadcastHashJoin Inner BuildRight (203) + : :- * Filter (197) + : : +- * ColumnarToRow (196) + : : +- Scan parquet default.store_sales (195) + : +- BroadcastExchange (202) + : +- * Project (201) + : +- * Filter (200) + : +- * ColumnarToRow (199) + : +- Scan parquet default.date_dim (198) + :- * Project (214) + : +- * BroadcastHashJoin Inner BuildRight (213) + : :- * Filter (207) + : : +- * ColumnarToRow (206) + : : +- Scan parquet default.catalog_sales (205) + : +- BroadcastExchange (212) + : +- * Project (211) + : +- * Filter (210) + : +- * ColumnarToRow (209) + : +- Scan parquet default.date_dim (208) + +- * Project (220) + +- * BroadcastHashJoin Inner BuildRight (219) + :- * Filter (217) + : +- * ColumnarToRow (216) + : +- Scan parquet default.web_sales (215) + +- ReusedExchange (218) + + +(195) Scan parquet default.store_sales Output [3]: [ss_sold_date_sk#1, ss_quantity#3, ss_list_price#4] Batched: true Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_sold_date_sk)] ReadSchema: struct -(208) ColumnarToRow [codegen id : 2] +(196) ColumnarToRow [codegen id : 2] Input [3]: [ss_sold_date_sk#1, ss_quantity#3, ss_list_price#4] -(209) Filter [codegen id : 2] +(197) Filter [codegen id : 2] Input [3]: [ss_sold_date_sk#1, ss_quantity#3, ss_list_price#4] Condition : isnotnull(ss_sold_date_sk#1) -(210) Scan parquet default.date_dim +(198) Scan parquet default.date_dim Output [2]: [d_date_sk#10, d_year#11] Batched: true Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), GreaterThanOrEqual(d_year,1999), LessThanOrEqual(d_year,2001), IsNotNull(d_date_sk)] ReadSchema: struct -(211) ColumnarToRow [codegen id : 1] +(199) ColumnarToRow [codegen id : 1] Input [2]: [d_date_sk#10, d_year#11] -(212) Filter [codegen id : 1] +(200) Filter [codegen id : 1] Input [2]: [d_date_sk#10, d_year#11] Condition : (((isnotnull(d_year#11) AND (d_year#11 >= 1999)) AND (d_year#11 <= 2001)) AND isnotnull(d_date_sk#10)) -(213) Project [codegen id : 1] +(201) Project [codegen id : 1] Output [1]: [d_date_sk#10] Input [2]: [d_date_sk#10, d_year#11] -(214) BroadcastExchange +(202) BroadcastExchange Input [1]: [d_date_sk#10] -Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#252] +Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#257] -(215) BroadcastHashJoin [codegen id : 2] +(203) BroadcastHashJoin [codegen id : 2] Left keys [1]: [ss_sold_date_sk#1] Right keys [1]: [d_date_sk#10] Join condition: None -(216) Project [codegen id : 2] -Output [2]: [ss_quantity#3 AS quantity#253, ss_list_price#4 AS list_price#254] +(204) Project [codegen id : 2] +Output [2]: [ss_quantity#3 AS quantity#258, ss_list_price#4 AS list_price#259] Input [4]: [ss_sold_date_sk#1, ss_quantity#3, ss_list_price#4, d_date_sk#10] -(217) Scan parquet default.catalog_sales +(205) Scan parquet default.catalog_sales Output [3]: [cs_sold_date_sk#16, cs_quantity#45, cs_list_price#46] Batched: true Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_sold_date_sk)] ReadSchema: struct -(218) ColumnarToRow [codegen id : 4] +(206) ColumnarToRow [codegen id : 4] Input [3]: [cs_sold_date_sk#16, cs_quantity#45, cs_list_price#46] -(219) Filter [codegen id : 4] +(207) Filter [codegen id : 4] Input [3]: [cs_sold_date_sk#16, cs_quantity#45, cs_list_price#46] Condition : isnotnull(cs_sold_date_sk#16) -(220) Scan parquet default.date_dim +(208) Scan parquet default.date_dim Output [2]: [d_date_sk#10, d_year#11] Batched: true Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), GreaterThanOrEqual(d_year,1998), LessThanOrEqual(d_year,2000), IsNotNull(d_date_sk)] ReadSchema: struct -(221) ColumnarToRow [codegen id : 3] +(209) ColumnarToRow [codegen id : 3] Input [2]: [d_date_sk#10, d_year#11] -(222) Filter [codegen id : 3] +(210) Filter [codegen id : 3] Input [2]: [d_date_sk#10, d_year#11] Condition : (((isnotnull(d_year#11) AND (d_year#11 >= 1998)) AND (d_year#11 <= 2000)) AND isnotnull(d_date_sk#10)) -(223) Project [codegen id : 3] +(211) Project [codegen id : 3] Output [1]: [d_date_sk#10] Input [2]: [d_date_sk#10, d_year#11] -(224) BroadcastExchange +(212) BroadcastExchange Input [1]: [d_date_sk#10] -Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#255] +Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#260] -(225) BroadcastHashJoin [codegen id : 4] +(213) BroadcastHashJoin [codegen id : 4] Left keys [1]: [cs_sold_date_sk#16] Right keys [1]: [d_date_sk#10] Join condition: None -(226) Project [codegen id : 4] -Output [2]: [cs_quantity#45 AS quantity#256, cs_list_price#46 AS list_price#257] +(214) Project [codegen id : 4] +Output [2]: [cs_quantity#45 AS quantity#261, cs_list_price#46 AS list_price#262] Input [4]: [cs_sold_date_sk#16, cs_quantity#45, cs_list_price#46, d_date_sk#10] -(227) Scan parquet default.web_sales +(215) Scan parquet default.web_sales Output [3]: [ws_sold_date_sk#20, ws_quantity#60, ws_list_price#61] Batched: true Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_sold_date_sk)] ReadSchema: struct -(228) ColumnarToRow [codegen id : 6] +(216) ColumnarToRow [codegen id : 6] Input [3]: [ws_sold_date_sk#20, ws_quantity#60, ws_list_price#61] -(229) Filter [codegen id : 6] +(217) Filter [codegen id : 6] Input [3]: [ws_sold_date_sk#20, ws_quantity#60, ws_list_price#61] Condition : isnotnull(ws_sold_date_sk#20) -(230) ReusedExchange [Reuses operator id: 224] +(218) ReusedExchange [Reuses operator id: 212] Output [1]: [d_date_sk#10] -(231) BroadcastHashJoin [codegen id : 6] +(219) BroadcastHashJoin [codegen id : 6] Left keys [1]: [ws_sold_date_sk#20] Right keys [1]: [d_date_sk#10] Join condition: None -(232) Project [codegen id : 6] -Output [2]: [ws_quantity#60 AS quantity#258, ws_list_price#61 AS list_price#259] +(220) Project [codegen id : 6] +Output [2]: [ws_quantity#60 AS quantity#263, ws_list_price#61 AS list_price#264] Input [4]: [ws_sold_date_sk#20, ws_quantity#60, ws_list_price#61, d_date_sk#10] -(233) Union +(221) Union -(234) HashAggregate [codegen id : 7] -Input [2]: [quantity#253, list_price#254] +(222) HashAggregate [codegen id : 7] +Input [2]: [quantity#258, list_price#259] Keys: [] -Functions [1]: [partial_avg(CheckOverflow((promote_precision(cast(cast(quantity#253 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(list_price#254 as decimal(12,2)))), DecimalType(18,2), true))] -Aggregate Attributes [2]: [sum#260, count#261] -Results [2]: [sum#262, count#263] +Functions [1]: [partial_avg(CheckOverflow((promote_precision(cast(cast(quantity#258 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(list_price#259 as decimal(12,2)))), DecimalType(18,2), true))] +Aggregate Attributes [2]: [sum#265, count#266] +Results [2]: [sum#267, count#268] -(235) Exchange -Input [2]: [sum#262, count#263] -Arguments: SinglePartition, true, [id=#264] +(223) Exchange +Input [2]: [sum#267, count#268] +Arguments: SinglePartition, ENSURE_REQUIREMENTS, [id=#269] -(236) HashAggregate [codegen id : 8] -Input [2]: [sum#262, count#263] +(224) HashAggregate [codegen id : 8] +Input [2]: [sum#267, count#268] Keys: [] -Functions [1]: [avg(CheckOverflow((promote_precision(cast(cast(quantity#253 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(list_price#254 as decimal(12,2)))), DecimalType(18,2), true))] -Aggregate Attributes [1]: [avg(CheckOverflow((promote_precision(cast(cast(quantity#253 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(list_price#254 as decimal(12,2)))), DecimalType(18,2), true))#265] -Results [1]: [avg(CheckOverflow((promote_precision(cast(cast(quantity#253 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(list_price#254 as decimal(12,2)))), DecimalType(18,2), true))#265 AS average_sales#266] +Functions [1]: [avg(CheckOverflow((promote_precision(cast(cast(quantity#258 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(list_price#259 as decimal(12,2)))), DecimalType(18,2), true))] +Aggregate Attributes [1]: [avg(CheckOverflow((promote_precision(cast(cast(quantity#258 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(list_price#259 as decimal(12,2)))), DecimalType(18,2), true))#270] +Results [1]: [avg(CheckOverflow((promote_precision(cast(cast(quantity#258 as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(list_price#259 as decimal(12,2)))), DecimalType(18,2), true))#270 AS average_sales#271] -Subquery:2 Hosting operator id = 92 Hosting Expression = ReusedSubquery Subquery scalar-subquery#43, [id=#44] +Subquery:2 Hosting operator id = 92 Hosting Expression = ReusedSubquery Subquery scalar-subquery#42, [id=#43] -Subquery:3 Hosting operator id = 108 Hosting Expression = ReusedSubquery Subquery scalar-subquery#43, [id=#44] +Subquery:3 Hosting operator id = 108 Hosting Expression = ReusedSubquery Subquery scalar-subquery#42, [id=#43] -Subquery:4 Hosting operator id = 116 Hosting Expression = ReusedSubquery Subquery scalar-subquery#43, [id=#44] +Subquery:4 Hosting operator id = 116 Hosting Expression = ReusedSubquery Subquery scalar-subquery#42, [id=#43] -Subquery:5 Hosting operator id = 120 Hosting Expression = ReusedSubquery Subquery scalar-subquery#43, [id=#44] +Subquery:5 Hosting operator id = 120 Hosting Expression = ReusedSubquery Subquery scalar-subquery#42, [id=#43] -Subquery:6 Hosting operator id = 124 Hosting Expression = ReusedSubquery Subquery scalar-subquery#43, [id=#44] +Subquery:6 Hosting operator id = 124 Hosting Expression = ReusedSubquery Subquery scalar-subquery#42, [id=#43] -Subquery:7 Hosting operator id = 139 Hosting Expression = ReusedSubquery Subquery scalar-subquery#43, [id=#44] +Subquery:7 Hosting operator id = 135 Hosting Expression = ReusedSubquery Subquery scalar-subquery#42, [id=#43] -Subquery:8 Hosting operator id = 143 Hosting Expression = ReusedSubquery Subquery scalar-subquery#43, [id=#44] +Subquery:8 Hosting operator id = 139 Hosting Expression = ReusedSubquery Subquery scalar-subquery#42, [id=#43] -Subquery:9 Hosting operator id = 147 Hosting Expression = ReusedSubquery Subquery scalar-subquery#43, [id=#44] +Subquery:9 Hosting operator id = 143 Hosting Expression = ReusedSubquery Subquery scalar-subquery#42, [id=#43] -Subquery:10 Hosting operator id = 162 Hosting Expression = ReusedSubquery Subquery scalar-subquery#43, [id=#44] +Subquery:10 Hosting operator id = 154 Hosting Expression = ReusedSubquery Subquery scalar-subquery#42, [id=#43] -Subquery:11 Hosting operator id = 166 Hosting Expression = ReusedSubquery Subquery scalar-subquery#43, [id=#44] +Subquery:11 Hosting operator id = 158 Hosting Expression = ReusedSubquery Subquery scalar-subquery#42, [id=#43] -Subquery:12 Hosting operator id = 170 Hosting Expression = ReusedSubquery Subquery scalar-subquery#43, [id=#44] +Subquery:12 Hosting operator id = 162 Hosting Expression = ReusedSubquery Subquery scalar-subquery#42, [id=#43] -Subquery:13 Hosting operator id = 185 Hosting Expression = ReusedSubquery Subquery scalar-subquery#43, [id=#44] +Subquery:13 Hosting operator id = 173 Hosting Expression = ReusedSubquery Subquery scalar-subquery#42, [id=#43] -Subquery:14 Hosting operator id = 189 Hosting Expression = ReusedSubquery Subquery scalar-subquery#43, [id=#44] +Subquery:14 Hosting operator id = 177 Hosting Expression = ReusedSubquery Subquery scalar-subquery#42, [id=#43] -Subquery:15 Hosting operator id = 193 Hosting Expression = ReusedSubquery Subquery scalar-subquery#43, [id=#44] +Subquery:15 Hosting operator id = 181 Hosting Expression = ReusedSubquery Subquery scalar-subquery#42, [id=#43] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q14a/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q14a/simplified.txt index fc86da1801926..18484308fecaf 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q14a/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q14a/simplified.txt @@ -1,387 +1,363 @@ TakeOrderedAndProject [channel,i_brand_id,i_class_id,i_category_id,sum_sales,number_sales] - WholeStageCodegen (412) + WholeStageCodegen (406) HashAggregate [channel,i_brand_id,i_class_id,i_category_id,sum_sales,number_sales] InputAdapter Exchange [channel,i_brand_id,i_class_id,i_category_id,sum_sales,number_sales] #1 - WholeStageCodegen (411) + WholeStageCodegen (405) HashAggregate [channel,i_brand_id,i_class_id,i_category_id,sum_sales,number_sales] InputAdapter Union - WholeStageCodegen (329) - HashAggregate [channel,i_brand_id,i_class_id,i_category_id,sum_sales,number_sales] + WholeStageCodegen (80) + HashAggregate [channel,i_brand_id,i_class_id,i_category_id,sum,isEmpty,sum] [sum(sales),sum(number_salesL),sum_sales,number_sales,sum,isEmpty,sum] InputAdapter - Exchange [channel,i_brand_id,i_class_id,i_category_id,sum_sales,number_sales] #2 - WholeStageCodegen (328) - HashAggregate [channel,i_brand_id,i_class_id,i_category_id,sum_sales,number_sales] + Exchange [channel,i_brand_id,i_class_id,i_category_id] #2 + WholeStageCodegen (79) + HashAggregate [channel,i_brand_id,i_class_id,i_category_id,sales,number_sales] [sum,isEmpty,sum,sum,isEmpty,sum] InputAdapter Union - WholeStageCodegen (246) - HashAggregate [channel,i_brand_id,i_class_id,i_category_id,sum_sales,number_sales] - InputAdapter - Exchange [channel,i_brand_id,i_class_id,i_category_id,sum_sales,number_sales] #3 - WholeStageCodegen (245) - HashAggregate [channel,i_brand_id,i_class_id,i_category_id,sum_sales,number_sales] + WholeStageCodegen (26) + Project [i_brand_id,i_class_id,i_category_id,sales,number_sales] + Filter [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true))] + Subquery #1 + WholeStageCodegen (8) + HashAggregate [sum,count] [avg(CheckOverflow((promote_precision(cast(cast(quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(list_price as decimal(12,2)))), DecimalType(18,2), true)),average_sales,sum,count] InputAdapter - Union - WholeStageCodegen (163) - HashAggregate [channel,i_brand_id,i_class_id,i_category_id,sum_sales,number_sales] + Exchange #14 + WholeStageCodegen (7) + HashAggregate [quantity,list_price] [sum,count,sum,count] InputAdapter - Exchange [channel,i_brand_id,i_class_id,i_category_id,sum_sales,number_sales] #4 - WholeStageCodegen (162) - HashAggregate [channel,i_brand_id,i_class_id,i_category_id,sum_sales,number_sales] - InputAdapter - Union - WholeStageCodegen (80) - HashAggregate [channel,i_brand_id,i_class_id,i_category_id,sum,isEmpty,sum] [sum(sales),sum(number_salesL),sum_sales,number_sales,sum,isEmpty,sum] + Union + WholeStageCodegen (2) + Project [ss_quantity,ss_list_price] + BroadcastHashJoin [ss_sold_date_sk,d_date_sk] + Filter [ss_sold_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.store_sales [ss_sold_date_sk,ss_quantity,ss_list_price] + InputAdapter + BroadcastExchange #15 + WholeStageCodegen (1) + Project [d_date_sk] + Filter [d_year,d_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.date_dim [d_date_sk,d_year] + WholeStageCodegen (4) + Project [cs_quantity,cs_list_price] + BroadcastHashJoin [cs_sold_date_sk,d_date_sk] + Filter [cs_sold_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.catalog_sales [cs_sold_date_sk,cs_quantity,cs_list_price] + InputAdapter + BroadcastExchange #16 + WholeStageCodegen (3) + Project [d_date_sk] + Filter [d_year,d_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.date_dim [d_date_sk,d_year] + WholeStageCodegen (6) + Project [ws_quantity,ws_list_price] + BroadcastHashJoin [ws_sold_date_sk,d_date_sk] + Filter [ws_sold_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.web_sales [ws_sold_date_sk,ws_quantity,ws_list_price] + InputAdapter + ReusedExchange [d_date_sk] #16 + HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] + InputAdapter + Exchange [i_brand_id,i_class_id,i_category_id] #3 + WholeStageCodegen (25) + HashAggregate [i_brand_id,i_class_id,i_category_id,ss_quantity,ss_list_price] [sum,isEmpty,count,sum,isEmpty,count] + Project [ss_quantity,ss_list_price,i_brand_id,i_class_id,i_category_id] + BroadcastHashJoin [ss_sold_date_sk,d_date_sk] + Project [ss_sold_date_sk,ss_quantity,ss_list_price,i_brand_id,i_class_id,i_category_id] + BroadcastHashJoin [ss_item_sk,i_item_sk] + BroadcastHashJoin [ss_item_sk,ss_item_sk] + Filter [ss_item_sk,ss_sold_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.store_sales [ss_sold_date_sk,ss_item_sk,ss_quantity,ss_list_price] + InputAdapter + BroadcastExchange #4 + WholeStageCodegen (11) + Project [i_item_sk] + BroadcastHashJoin [i_brand_id,i_class_id,i_category_id,brand_id,class_id,category_id] + Filter [i_brand_id,i_class_id,i_category_id] + ColumnarToRow + InputAdapter + Scan parquet default.item [i_item_sk,i_brand_id,i_class_id,i_category_id] InputAdapter - Exchange [channel,i_brand_id,i_class_id,i_category_id] #5 - WholeStageCodegen (79) - HashAggregate [channel,i_brand_id,i_class_id,i_category_id,sales,number_sales] [sum,isEmpty,sum,sum,isEmpty,sum] - InputAdapter - Union - WholeStageCodegen (26) - Project [channel,i_brand_id,i_class_id,i_category_id,sales,number_sales] - Filter [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true))] - Subquery #1 - WholeStageCodegen (8) - HashAggregate [sum,count] [avg(CheckOverflow((promote_precision(cast(cast(quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(list_price as decimal(12,2)))), DecimalType(18,2), true)),average_sales,sum,count] - InputAdapter - Exchange #17 - WholeStageCodegen (7) - HashAggregate [quantity,list_price] [sum,count,sum,count] + BroadcastExchange #5 + WholeStageCodegen (10) + HashAggregate [brand_id,class_id,category_id] + HashAggregate [brand_id,class_id,category_id] + HashAggregate [brand_id,class_id,category_id] + InputAdapter + Exchange [brand_id,class_id,category_id] #6 + WholeStageCodegen (9) + HashAggregate [brand_id,class_id,category_id] + BroadcastHashJoin [brand_id,class_id,category_id,i_brand_id,i_class_id,i_category_id] + BroadcastHashJoin [brand_id,class_id,category_id,i_brand_id,i_class_id,i_category_id] + Project [i_brand_id,i_class_id,i_category_id] + BroadcastHashJoin [ss_sold_date_sk,d_date_sk] + Project [ss_sold_date_sk,i_brand_id,i_class_id,i_category_id] + BroadcastHashJoin [ss_item_sk,i_item_sk] + Filter [ss_item_sk,ss_sold_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.store_sales [ss_sold_date_sk,ss_item_sk] InputAdapter - Union - WholeStageCodegen (2) - Project [ss_quantity,ss_list_price] - BroadcastHashJoin [ss_sold_date_sk,d_date_sk] - Filter [ss_sold_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.store_sales [ss_sold_date_sk,ss_quantity,ss_list_price] - InputAdapter - BroadcastExchange #18 - WholeStageCodegen (1) - Project [d_date_sk] - Filter [d_year,d_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.date_dim [d_date_sk,d_year] - WholeStageCodegen (4) - Project [cs_quantity,cs_list_price] - BroadcastHashJoin [cs_sold_date_sk,d_date_sk] - Filter [cs_sold_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.catalog_sales [cs_sold_date_sk,cs_quantity,cs_list_price] - InputAdapter - BroadcastExchange #19 - WholeStageCodegen (3) - Project [d_date_sk] - Filter [d_year,d_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.date_dim [d_date_sk,d_year] - WholeStageCodegen (6) - Project [ws_quantity,ws_list_price] - BroadcastHashJoin [ws_sold_date_sk,d_date_sk] - Filter [ws_sold_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.web_sales [ws_sold_date_sk,ws_quantity,ws_list_price] + BroadcastExchange #7 + WholeStageCodegen (1) + Filter [i_item_sk,i_brand_id,i_class_id,i_category_id] + ColumnarToRow InputAdapter - ReusedExchange [d_date_sk] #19 - HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),channel,sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] - InputAdapter - Exchange [i_brand_id,i_class_id,i_category_id] #6 - WholeStageCodegen (25) - HashAggregate [i_brand_id,i_class_id,i_category_id,ss_quantity,ss_list_price] [sum,isEmpty,count,sum,isEmpty,count] - Project [ss_quantity,ss_list_price,i_brand_id,i_class_id,i_category_id] - BroadcastHashJoin [ss_sold_date_sk,d_date_sk] - Project [ss_sold_date_sk,ss_quantity,ss_list_price,i_brand_id,i_class_id,i_category_id] - BroadcastHashJoin [ss_item_sk,i_item_sk] - BroadcastHashJoin [ss_item_sk,ss_item_sk] - Filter [ss_item_sk,ss_sold_date_sk] + Scan parquet default.item [i_item_sk,i_brand_id,i_class_id,i_category_id] + InputAdapter + BroadcastExchange #8 + WholeStageCodegen (2) + Project [d_date_sk] + Filter [d_year,d_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.date_dim [d_date_sk,d_year] + InputAdapter + BroadcastExchange #9 + WholeStageCodegen (5) + Project [i_brand_id,i_class_id,i_category_id] + BroadcastHashJoin [cs_sold_date_sk,d_date_sk] + Project [cs_sold_date_sk,i_brand_id,i_class_id,i_category_id] + BroadcastHashJoin [cs_item_sk,i_item_sk] + Filter [cs_item_sk,cs_sold_date_sk] ColumnarToRow InputAdapter - Scan parquet default.store_sales [ss_sold_date_sk,ss_item_sk,ss_quantity,ss_list_price] + Scan parquet default.catalog_sales [cs_sold_date_sk,cs_item_sk] InputAdapter - BroadcastExchange #7 - WholeStageCodegen (11) - Project [i_item_sk] - BroadcastHashJoin [i_brand_id,i_class_id,i_category_id,brand_id,class_id,category_id] - Filter [i_brand_id,i_class_id,i_category_id] - ColumnarToRow - InputAdapter - Scan parquet default.item [i_item_sk,i_brand_id,i_class_id,i_category_id] - InputAdapter - BroadcastExchange #8 - WholeStageCodegen (10) - HashAggregate [brand_id,class_id,category_id] - HashAggregate [brand_id,class_id,category_id] - HashAggregate [brand_id,class_id,category_id] - InputAdapter - Exchange [brand_id,class_id,category_id] #9 - WholeStageCodegen (9) - HashAggregate [brand_id,class_id,category_id] - BroadcastHashJoin [brand_id,class_id,category_id,i_brand_id,i_class_id,i_category_id] - BroadcastHashJoin [brand_id,class_id,category_id,i_brand_id,i_class_id,i_category_id] - Project [i_brand_id,i_class_id,i_category_id] - BroadcastHashJoin [ss_sold_date_sk,d_date_sk] - Project [ss_sold_date_sk,i_brand_id,i_class_id,i_category_id] - BroadcastHashJoin [ss_item_sk,i_item_sk] - Filter [ss_item_sk,ss_sold_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.store_sales [ss_sold_date_sk,ss_item_sk] - InputAdapter - BroadcastExchange #10 - WholeStageCodegen (1) - Filter [i_item_sk,i_brand_id,i_class_id,i_category_id] - ColumnarToRow - InputAdapter - Scan parquet default.item [i_item_sk,i_brand_id,i_class_id,i_category_id] - InputAdapter - BroadcastExchange #11 - WholeStageCodegen (2) - Project [d_date_sk] - Filter [d_year,d_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.date_dim [d_date_sk,d_year] - InputAdapter - BroadcastExchange #12 - WholeStageCodegen (5) - Project [i_brand_id,i_class_id,i_category_id] - BroadcastHashJoin [cs_sold_date_sk,d_date_sk] - Project [cs_sold_date_sk,i_brand_id,i_class_id,i_category_id] - BroadcastHashJoin [cs_item_sk,i_item_sk] - Filter [cs_item_sk,cs_sold_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.catalog_sales [cs_sold_date_sk,cs_item_sk] - InputAdapter - BroadcastExchange #13 - WholeStageCodegen (3) - Filter [i_item_sk] - ColumnarToRow - InputAdapter - Scan parquet default.item [i_item_sk,i_brand_id,i_class_id,i_category_id] - InputAdapter - ReusedExchange [d_date_sk] #11 - InputAdapter - BroadcastExchange #14 - WholeStageCodegen (8) - Project [i_brand_id,i_class_id,i_category_id] - BroadcastHashJoin [ws_sold_date_sk,d_date_sk] - Project [ws_sold_date_sk,i_brand_id,i_class_id,i_category_id] - BroadcastHashJoin [ws_item_sk,i_item_sk] - Filter [ws_item_sk,ws_sold_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.web_sales [ws_sold_date_sk,ws_item_sk] - InputAdapter - ReusedExchange [i_item_sk,i_brand_id,i_class_id,i_category_id] #13 - InputAdapter - ReusedExchange [d_date_sk] #11 - InputAdapter - BroadcastExchange #15 - WholeStageCodegen (23) - BroadcastHashJoin [i_item_sk,ss_item_sk] + BroadcastExchange #10 + WholeStageCodegen (3) Filter [i_item_sk] ColumnarToRow InputAdapter Scan parquet default.item [i_item_sk,i_brand_id,i_class_id,i_category_id] - InputAdapter - ReusedExchange [ss_item_sk] #7 - InputAdapter - BroadcastExchange #16 - WholeStageCodegen (24) - Project [d_date_sk] - Filter [d_year,d_moy,d_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.date_dim [d_date_sk,d_year,d_moy] - WholeStageCodegen (52) - Project [channel,i_brand_id,i_class_id,i_category_id,sales,number_sales] - Filter [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price as decimal(12,2)))), DecimalType(18,2), true))] - ReusedSubquery [average_sales] #1 - HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),channel,sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(cs_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] - InputAdapter - Exchange [i_brand_id,i_class_id,i_category_id] #20 - WholeStageCodegen (51) - HashAggregate [i_brand_id,i_class_id,i_category_id,cs_quantity,cs_list_price] [sum,isEmpty,count,sum,isEmpty,count] - Project [cs_quantity,cs_list_price,i_brand_id,i_class_id,i_category_id] - BroadcastHashJoin [cs_sold_date_sk,d_date_sk] - Project [cs_sold_date_sk,cs_quantity,cs_list_price,i_brand_id,i_class_id,i_category_id] - BroadcastHashJoin [cs_item_sk,i_item_sk] - BroadcastHashJoin [cs_item_sk,ss_item_sk] - Filter [cs_item_sk,cs_sold_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.catalog_sales [cs_sold_date_sk,cs_item_sk,cs_quantity,cs_list_price] - InputAdapter - ReusedExchange [ss_item_sk] #7 - InputAdapter - ReusedExchange [i_item_sk,i_brand_id,i_class_id,i_category_id] #15 - InputAdapter - ReusedExchange [d_date_sk] #16 - WholeStageCodegen (78) - Project [channel,i_brand_id,i_class_id,i_category_id,sales,number_sales] - Filter [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price as decimal(12,2)))), DecimalType(18,2), true))] - ReusedSubquery [average_sales] #1 - HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),channel,sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(ws_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] - InputAdapter - Exchange [i_brand_id,i_class_id,i_category_id] #21 - WholeStageCodegen (77) - HashAggregate [i_brand_id,i_class_id,i_category_id,ws_quantity,ws_list_price] [sum,isEmpty,count,sum,isEmpty,count] - Project [ws_quantity,ws_list_price,i_brand_id,i_class_id,i_category_id] + InputAdapter + ReusedExchange [d_date_sk] #8 + InputAdapter + BroadcastExchange #11 + WholeStageCodegen (8) + Project [i_brand_id,i_class_id,i_category_id] BroadcastHashJoin [ws_sold_date_sk,d_date_sk] - Project [ws_sold_date_sk,ws_quantity,ws_list_price,i_brand_id,i_class_id,i_category_id] + Project [ws_sold_date_sk,i_brand_id,i_class_id,i_category_id] BroadcastHashJoin [ws_item_sk,i_item_sk] - BroadcastHashJoin [ws_item_sk,ss_item_sk] - Filter [ws_item_sk,ws_sold_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.web_sales [ws_sold_date_sk,ws_item_sk,ws_quantity,ws_list_price] - InputAdapter - ReusedExchange [ss_item_sk] #7 + Filter [ws_item_sk,ws_sold_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.web_sales [ws_sold_date_sk,ws_item_sk] InputAdapter - ReusedExchange [i_item_sk,i_brand_id,i_class_id,i_category_id] #15 + ReusedExchange [i_item_sk,i_brand_id,i_class_id,i_category_id] #10 InputAdapter - ReusedExchange [d_date_sk] #16 - WholeStageCodegen (161) - HashAggregate [channel,i_brand_id,i_class_id,sum,isEmpty,sum] [sum(sum_sales),sum(number_salesL),i_category_id,sum(sum_sales),sum(number_sales),sum,isEmpty,sum] + ReusedExchange [d_date_sk] #8 + InputAdapter + BroadcastExchange #12 + WholeStageCodegen (23) + BroadcastHashJoin [i_item_sk,ss_item_sk] + Filter [i_item_sk] + ColumnarToRow InputAdapter - Exchange [channel,i_brand_id,i_class_id] #22 - WholeStageCodegen (160) - HashAggregate [channel,i_brand_id,i_class_id,sum_sales,number_sales] [sum,isEmpty,sum,sum,isEmpty,sum] - HashAggregate [channel,i_brand_id,i_class_id,i_category_id,sum,isEmpty,sum] [sum(sales),sum(number_salesL),sum_sales,number_sales,sum,isEmpty,sum] - InputAdapter - Exchange [channel,i_brand_id,i_class_id,i_category_id] #23 - WholeStageCodegen (159) - HashAggregate [channel,i_brand_id,i_class_id,i_category_id,sales,number_sales] [sum,isEmpty,sum,sum,isEmpty,sum] - InputAdapter - Union - WholeStageCodegen (106) - Project [channel,i_brand_id,i_class_id,i_category_id,sales,number_sales] - Filter [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true))] - ReusedSubquery [average_sales] #1 - HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),channel,sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] - InputAdapter - ReusedExchange [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] #6 - WholeStageCodegen (132) - Project [channel,i_brand_id,i_class_id,i_category_id,sales,number_sales] - Filter [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price as decimal(12,2)))), DecimalType(18,2), true))] - ReusedSubquery [average_sales] #1 - HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),channel,sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(cs_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] - InputAdapter - ReusedExchange [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] #20 - WholeStageCodegen (158) - Project [channel,i_brand_id,i_class_id,i_category_id,sales,number_sales] - Filter [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price as decimal(12,2)))), DecimalType(18,2), true))] - ReusedSubquery [average_sales] #1 - HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),channel,sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(ws_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] - InputAdapter - ReusedExchange [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] #21 - WholeStageCodegen (244) - HashAggregate [channel,i_brand_id,sum,isEmpty,sum] [sum(sum_sales),sum(number_salesL),i_class_id,i_category_id,sum(sum_sales),sum(number_sales),sum,isEmpty,sum] + Scan parquet default.item [i_item_sk,i_brand_id,i_class_id,i_category_id] + InputAdapter + ReusedExchange [ss_item_sk] #4 InputAdapter - Exchange [channel,i_brand_id] #24 - WholeStageCodegen (243) - HashAggregate [channel,i_brand_id,sum_sales,number_sales] [sum,isEmpty,sum,sum,isEmpty,sum] - HashAggregate [channel,i_brand_id,i_class_id,i_category_id,sum,isEmpty,sum] [sum(sales),sum(number_salesL),sum_sales,number_sales,sum,isEmpty,sum] + BroadcastExchange #13 + WholeStageCodegen (24) + Project [d_date_sk] + Filter [d_year,d_moy,d_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.date_dim [d_date_sk,d_year,d_moy] + WholeStageCodegen (52) + Project [i_brand_id,i_class_id,i_category_id,sales,number_sales] + Filter [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price as decimal(12,2)))), DecimalType(18,2), true))] + ReusedSubquery [average_sales] #1 + HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(cs_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] + InputAdapter + Exchange [i_brand_id,i_class_id,i_category_id] #17 + WholeStageCodegen (51) + HashAggregate [i_brand_id,i_class_id,i_category_id,cs_quantity,cs_list_price] [sum,isEmpty,count,sum,isEmpty,count] + Project [cs_quantity,cs_list_price,i_brand_id,i_class_id,i_category_id] + BroadcastHashJoin [cs_sold_date_sk,d_date_sk] + Project [cs_sold_date_sk,cs_quantity,cs_list_price,i_brand_id,i_class_id,i_category_id] + BroadcastHashJoin [cs_item_sk,i_item_sk] + BroadcastHashJoin [cs_item_sk,ss_item_sk] + Filter [cs_item_sk,cs_sold_date_sk] + ColumnarToRow InputAdapter - Exchange [channel,i_brand_id,i_class_id,i_category_id] #25 - WholeStageCodegen (242) - HashAggregate [channel,i_brand_id,i_class_id,i_category_id,sales,number_sales] [sum,isEmpty,sum,sum,isEmpty,sum] - InputAdapter - Union - WholeStageCodegen (189) - Project [channel,i_brand_id,i_class_id,i_category_id,sales,number_sales] - Filter [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true))] - ReusedSubquery [average_sales] #1 - HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),channel,sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] - InputAdapter - ReusedExchange [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] #6 - WholeStageCodegen (215) - Project [channel,i_brand_id,i_class_id,i_category_id,sales,number_sales] - Filter [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price as decimal(12,2)))), DecimalType(18,2), true))] - ReusedSubquery [average_sales] #1 - HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),channel,sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(cs_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] - InputAdapter - ReusedExchange [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] #20 - WholeStageCodegen (241) - Project [channel,i_brand_id,i_class_id,i_category_id,sales,number_sales] - Filter [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price as decimal(12,2)))), DecimalType(18,2), true))] - ReusedSubquery [average_sales] #1 - HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),channel,sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(ws_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] - InputAdapter - ReusedExchange [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] #21 - WholeStageCodegen (327) - HashAggregate [channel,sum,isEmpty,sum] [sum(sum_sales),sum(number_salesL),i_brand_id,i_class_id,i_category_id,sum(sum_sales),sum(number_sales),sum,isEmpty,sum] - InputAdapter - Exchange [channel] #26 - WholeStageCodegen (326) - HashAggregate [channel,sum_sales,number_sales] [sum,isEmpty,sum,sum,isEmpty,sum] - HashAggregate [channel,i_brand_id,i_class_id,i_category_id,sum,isEmpty,sum] [sum(sales),sum(number_salesL),sum_sales,number_sales,sum,isEmpty,sum] - InputAdapter - Exchange [channel,i_brand_id,i_class_id,i_category_id] #27 - WholeStageCodegen (325) - HashAggregate [channel,i_brand_id,i_class_id,i_category_id,sales,number_sales] [sum,isEmpty,sum,sum,isEmpty,sum] - InputAdapter - Union - WholeStageCodegen (272) - Project [channel,i_brand_id,i_class_id,i_category_id,sales,number_sales] - Filter [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true))] - ReusedSubquery [average_sales] #1 - HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),channel,sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] - InputAdapter - ReusedExchange [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] #6 - WholeStageCodegen (298) - Project [channel,i_brand_id,i_class_id,i_category_id,sales,number_sales] - Filter [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price as decimal(12,2)))), DecimalType(18,2), true))] - ReusedSubquery [average_sales] #1 - HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),channel,sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(cs_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] - InputAdapter - ReusedExchange [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] #20 - WholeStageCodegen (324) - Project [channel,i_brand_id,i_class_id,i_category_id,sales,number_sales] - Filter [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price as decimal(12,2)))), DecimalType(18,2), true))] - ReusedSubquery [average_sales] #1 - HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),channel,sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(ws_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] - InputAdapter - ReusedExchange [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] #21 - WholeStageCodegen (410) + Scan parquet default.catalog_sales [cs_sold_date_sk,cs_item_sk,cs_quantity,cs_list_price] + InputAdapter + ReusedExchange [ss_item_sk] #4 + InputAdapter + ReusedExchange [i_item_sk,i_brand_id,i_class_id,i_category_id] #12 + InputAdapter + ReusedExchange [d_date_sk] #13 + WholeStageCodegen (78) + Project [i_brand_id,i_class_id,i_category_id,sales,number_sales] + Filter [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price as decimal(12,2)))), DecimalType(18,2), true))] + ReusedSubquery [average_sales] #1 + HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(ws_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] + InputAdapter + Exchange [i_brand_id,i_class_id,i_category_id] #18 + WholeStageCodegen (77) + HashAggregate [i_brand_id,i_class_id,i_category_id,ws_quantity,ws_list_price] [sum,isEmpty,count,sum,isEmpty,count] + Project [ws_quantity,ws_list_price,i_brand_id,i_class_id,i_category_id] + BroadcastHashJoin [ws_sold_date_sk,d_date_sk] + Project [ws_sold_date_sk,ws_quantity,ws_list_price,i_brand_id,i_class_id,i_category_id] + BroadcastHashJoin [ws_item_sk,i_item_sk] + BroadcastHashJoin [ws_item_sk,ss_item_sk] + Filter [ws_item_sk,ws_sold_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.web_sales [ws_sold_date_sk,ws_item_sk,ws_quantity,ws_list_price] + InputAdapter + ReusedExchange [ss_item_sk] #4 + InputAdapter + ReusedExchange [i_item_sk,i_brand_id,i_class_id,i_category_id] #12 + InputAdapter + ReusedExchange [d_date_sk] #13 + WholeStageCodegen (161) + HashAggregate [channel,i_brand_id,i_class_id,sum,isEmpty,sum] [sum(sum_sales),sum(number_salesL),i_category_id,sum(sum_sales),sum(number_sales),sum,isEmpty,sum] + InputAdapter + Exchange [channel,i_brand_id,i_class_id] #19 + WholeStageCodegen (160) + HashAggregate [channel,i_brand_id,i_class_id,sum_sales,number_sales] [sum,isEmpty,sum,sum,isEmpty,sum] + HashAggregate [channel,i_brand_id,i_class_id,i_category_id,sum,isEmpty,sum] [sum(sales),sum(number_salesL),sum_sales,number_sales,sum,isEmpty,sum] + InputAdapter + Exchange [channel,i_brand_id,i_class_id,i_category_id] #20 + WholeStageCodegen (159) + HashAggregate [channel,i_brand_id,i_class_id,i_category_id,sales,number_sales] [sum,isEmpty,sum,sum,isEmpty,sum] + InputAdapter + Union + WholeStageCodegen (106) + Project [i_brand_id,i_class_id,i_category_id,sales,number_sales] + Filter [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true))] + ReusedSubquery [average_sales] #1 + HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] + InputAdapter + ReusedExchange [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] #3 + WholeStageCodegen (132) + Project [i_brand_id,i_class_id,i_category_id,sales,number_sales] + Filter [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price as decimal(12,2)))), DecimalType(18,2), true))] + ReusedSubquery [average_sales] #1 + HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(cs_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] + InputAdapter + ReusedExchange [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] #17 + WholeStageCodegen (158) + Project [i_brand_id,i_class_id,i_category_id,sales,number_sales] + Filter [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price as decimal(12,2)))), DecimalType(18,2), true))] + ReusedSubquery [average_sales] #1 + HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(ws_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] + InputAdapter + ReusedExchange [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] #18 + WholeStageCodegen (242) + HashAggregate [channel,i_brand_id,sum,isEmpty,sum] [sum(sum_sales),sum(number_salesL),i_class_id,i_category_id,sum(sum_sales),sum(number_sales),sum,isEmpty,sum] + InputAdapter + Exchange [channel,i_brand_id] #21 + WholeStageCodegen (241) + HashAggregate [channel,i_brand_id,sum_sales,number_sales] [sum,isEmpty,sum,sum,isEmpty,sum] + HashAggregate [channel,i_brand_id,i_class_id,i_category_id,sum,isEmpty,sum] [sum(sales),sum(number_salesL),sum_sales,number_sales,sum,isEmpty,sum] + InputAdapter + Exchange [channel,i_brand_id,i_class_id,i_category_id] #22 + WholeStageCodegen (240) + HashAggregate [channel,i_brand_id,i_class_id,i_category_id,sales,number_sales] [sum,isEmpty,sum,sum,isEmpty,sum] + InputAdapter + Union + WholeStageCodegen (187) + Project [i_brand_id,i_class_id,i_category_id,sales,number_sales] + Filter [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true))] + ReusedSubquery [average_sales] #1 + HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] + InputAdapter + ReusedExchange [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] #3 + WholeStageCodegen (213) + Project [i_brand_id,i_class_id,i_category_id,sales,number_sales] + Filter [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price as decimal(12,2)))), DecimalType(18,2), true))] + ReusedSubquery [average_sales] #1 + HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(cs_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] + InputAdapter + ReusedExchange [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] #17 + WholeStageCodegen (239) + Project [i_brand_id,i_class_id,i_category_id,sales,number_sales] + Filter [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price as decimal(12,2)))), DecimalType(18,2), true))] + ReusedSubquery [average_sales] #1 + HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(ws_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] + InputAdapter + ReusedExchange [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] #18 + WholeStageCodegen (323) + HashAggregate [channel,sum,isEmpty,sum] [sum(sum_sales),sum(number_salesL),i_brand_id,i_class_id,i_category_id,sum(sum_sales),sum(number_sales),sum,isEmpty,sum] + InputAdapter + Exchange [channel] #23 + WholeStageCodegen (322) + HashAggregate [channel,sum_sales,number_sales] [sum,isEmpty,sum,sum,isEmpty,sum] + HashAggregate [channel,i_brand_id,i_class_id,i_category_id,sum,isEmpty,sum] [sum(sales),sum(number_salesL),sum_sales,number_sales,sum,isEmpty,sum] + InputAdapter + Exchange [channel,i_brand_id,i_class_id,i_category_id] #24 + WholeStageCodegen (321) + HashAggregate [channel,i_brand_id,i_class_id,i_category_id,sales,number_sales] [sum,isEmpty,sum,sum,isEmpty,sum] + InputAdapter + Union + WholeStageCodegen (268) + Project [i_brand_id,i_class_id,i_category_id,sales,number_sales] + Filter [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true))] + ReusedSubquery [average_sales] #1 + HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] + InputAdapter + ReusedExchange [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] #3 + WholeStageCodegen (294) + Project [i_brand_id,i_class_id,i_category_id,sales,number_sales] + Filter [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price as decimal(12,2)))), DecimalType(18,2), true))] + ReusedSubquery [average_sales] #1 + HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(cs_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] + InputAdapter + ReusedExchange [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] #17 + WholeStageCodegen (320) + Project [i_brand_id,i_class_id,i_category_id,sales,number_sales] + Filter [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price as decimal(12,2)))), DecimalType(18,2), true))] + ReusedSubquery [average_sales] #1 + HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(ws_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] + InputAdapter + ReusedExchange [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] #18 + WholeStageCodegen (404) HashAggregate [sum,isEmpty,sum] [sum(sum_sales),sum(number_salesL),channel,i_brand_id,i_class_id,i_category_id,sum(sum_sales),sum(number_sales),sum,isEmpty,sum] InputAdapter - Exchange #28 - WholeStageCodegen (409) + Exchange #25 + WholeStageCodegen (403) HashAggregate [sum_sales,number_sales] [sum,isEmpty,sum,sum,isEmpty,sum] HashAggregate [channel,i_brand_id,i_class_id,i_category_id,sum,isEmpty,sum] [sum(sales),sum(number_salesL),sum_sales,number_sales,sum,isEmpty,sum] InputAdapter - Exchange [channel,i_brand_id,i_class_id,i_category_id] #29 - WholeStageCodegen (408) + Exchange [channel,i_brand_id,i_class_id,i_category_id] #26 + WholeStageCodegen (402) HashAggregate [channel,i_brand_id,i_class_id,i_category_id,sales,number_sales] [sum,isEmpty,sum,sum,isEmpty,sum] InputAdapter Union - WholeStageCodegen (355) - Project [channel,i_brand_id,i_class_id,i_category_id,sales,number_sales] + WholeStageCodegen (349) + Project [i_brand_id,i_class_id,i_category_id,sales,number_sales] Filter [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true))] ReusedSubquery [average_sales] #1 - HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),channel,sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] + HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(ss_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ss_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] InputAdapter - ReusedExchange [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] #6 - WholeStageCodegen (381) - Project [channel,i_brand_id,i_class_id,i_category_id,sales,number_sales] + ReusedExchange [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] #3 + WholeStageCodegen (375) + Project [i_brand_id,i_class_id,i_category_id,sales,number_sales] Filter [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price as decimal(12,2)))), DecimalType(18,2), true))] ReusedSubquery [average_sales] #1 - HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),channel,sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(cs_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] + HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(cs_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(cs_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(cs_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] InputAdapter - ReusedExchange [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] #20 - WholeStageCodegen (407) - Project [channel,i_brand_id,i_class_id,i_category_id,sales,number_sales] + ReusedExchange [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] #17 + WholeStageCodegen (401) + Project [i_brand_id,i_class_id,i_category_id,sales,number_sales] Filter [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price as decimal(12,2)))), DecimalType(18,2), true))] ReusedSubquery [average_sales] #1 - HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),channel,sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(ws_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] + HashAggregate [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] [sum(CheckOverflow((promote_precision(cast(cast(ws_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price as decimal(12,2)))), DecimalType(18,2), true)),count(1),sales,number_sales,sum(CheckOverflow((promote_precision(cast(cast(ws_quantity as decimal(10,0)) as decimal(12,2))) * promote_precision(cast(ws_list_price as decimal(12,2)))), DecimalType(18,2), true)),sum,isEmpty,count] InputAdapter - ReusedExchange [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] #21 + ReusedExchange [i_brand_id,i_class_id,i_category_id,sum,isEmpty,count] #18 diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q18a.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q18a.sf100/explain.txt index 2d76deefcaa36..f6c5258701525 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q18a.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q18a.sf100/explain.txt @@ -34,24 +34,24 @@ TakeOrderedAndProject (160) : +- * Sort (46) : +- Exchange (45) : +- * Project (44) - : +- * SortMergeJoin Inner (43) - : :- * Sort (37) - : : +- Exchange (36) - : : +- * Project (35) - : : +- * BroadcastHashJoin Inner BuildRight (34) - : : :- * Project (29) - : : : +- * Filter (28) - : : : +- * ColumnarToRow (27) - : : : +- Scan parquet default.customer (26) - : : +- BroadcastExchange (33) - : : +- * Filter (32) - : : +- * ColumnarToRow (31) - : : +- Scan parquet default.customer_address (30) - : +- * Sort (42) - : +- Exchange (41) - : +- * Filter (40) - : +- * ColumnarToRow (39) - : +- Scan parquet default.customer_demographics (38) + : +- * BroadcastHashJoin Inner BuildRight (43) + : :- * Project (38) + : : +- * SortMergeJoin Inner (37) + : : :- * Sort (31) + : : : +- Exchange (30) + : : : +- * Project (29) + : : : +- * Filter (28) + : : : +- * ColumnarToRow (27) + : : : +- Scan parquet default.customer (26) + : : +- * Sort (36) + : : +- Exchange (35) + : : +- * Filter (34) + : : +- * ColumnarToRow (33) + : : +- Scan parquet default.customer_demographics (32) + : +- BroadcastExchange (42) + : +- * Filter (41) + : +- * ColumnarToRow (40) + : +- Scan parquet default.customer_address (39) :- * HashAggregate (76) : +- Exchange (75) : +- * HashAggregate (74) @@ -266,7 +266,7 @@ Input [10]: [cs_bill_customer_sk#2, cs_item_sk#4, cs_quantity#5, cs_list_price#6 (24) Exchange Input [8]: [cs_bill_customer_sk#2, cs_quantity#5, cs_list_price#6, cs_sales_price#7, cs_coupon_amt#8, cs_net_profit#9, cd_dep_count#13, i_item_id#19] -Arguments: hashpartitioning(cs_bill_customer_sk#2, 5), true, [id=#21] +Arguments: hashpartitioning(cs_bill_customer_sk#2, 5), ENSURE_REQUIREMENTS, [id=#21] (25) Sort [codegen id : 5] Input [8]: [cs_bill_customer_sk#2, cs_quantity#5, cs_list_price#6, cs_sales_price#7, cs_coupon_amt#8, cs_net_profit#9, cd_dep_count#13, i_item_id#19] @@ -279,89 +279,89 @@ Location [not included in comparison]/{warehouse_dir}/customer] PushedFilters: [In(c_birth_month, [9,5,12,4,1,10]), IsNotNull(c_customer_sk), IsNotNull(c_current_cdemo_sk), IsNotNull(c_current_addr_sk)] ReadSchema: struct -(27) ColumnarToRow [codegen id : 7] +(27) ColumnarToRow [codegen id : 6] Input [5]: [c_customer_sk#22, c_current_cdemo_sk#23, c_current_addr_sk#24, c_birth_month#25, c_birth_year#26] -(28) Filter [codegen id : 7] +(28) Filter [codegen id : 6] Input [5]: [c_customer_sk#22, c_current_cdemo_sk#23, c_current_addr_sk#24, c_birth_month#25, c_birth_year#26] Condition : (((c_birth_month#25 IN (9,5,12,4,1,10) AND isnotnull(c_customer_sk#22)) AND isnotnull(c_current_cdemo_sk#23)) AND isnotnull(c_current_addr_sk#24)) -(29) Project [codegen id : 7] +(29) Project [codegen id : 6] Output [4]: [c_customer_sk#22, c_current_cdemo_sk#23, c_current_addr_sk#24, c_birth_year#26] Input [5]: [c_customer_sk#22, c_current_cdemo_sk#23, c_current_addr_sk#24, c_birth_month#25, c_birth_year#26] -(30) Scan parquet default.customer_address -Output [4]: [ca_address_sk#27, ca_county#28, ca_state#29, ca_country#30] -Batched: true -Location [not included in comparison]/{warehouse_dir}/customer_address] -PushedFilters: [In(ca_state, [ND,WI,AL,NC,OK,MS,TN]), IsNotNull(ca_address_sk)] -ReadSchema: struct - -(31) ColumnarToRow [codegen id : 6] -Input [4]: [ca_address_sk#27, ca_county#28, ca_state#29, ca_country#30] - -(32) Filter [codegen id : 6] -Input [4]: [ca_address_sk#27, ca_county#28, ca_state#29, ca_country#30] -Condition : (ca_state#29 IN (ND,WI,AL,NC,OK,MS,TN) AND isnotnull(ca_address_sk#27)) - -(33) BroadcastExchange -Input [4]: [ca_address_sk#27, ca_county#28, ca_state#29, ca_country#30] -Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, false] as bigint)),false), [id=#31] - -(34) BroadcastHashJoin [codegen id : 7] -Left keys [1]: [c_current_addr_sk#24] -Right keys [1]: [ca_address_sk#27] -Join condition: None - -(35) Project [codegen id : 7] -Output [6]: [c_customer_sk#22, c_current_cdemo_sk#23, c_birth_year#26, ca_county#28, ca_state#29, ca_country#30] -Input [8]: [c_customer_sk#22, c_current_cdemo_sk#23, c_current_addr_sk#24, c_birth_year#26, ca_address_sk#27, ca_county#28, ca_state#29, ca_country#30] - -(36) Exchange -Input [6]: [c_customer_sk#22, c_current_cdemo_sk#23, c_birth_year#26, ca_county#28, ca_state#29, ca_country#30] -Arguments: hashpartitioning(c_current_cdemo_sk#23, 5), true, [id=#32] +(30) Exchange +Input [4]: [c_customer_sk#22, c_current_cdemo_sk#23, c_current_addr_sk#24, c_birth_year#26] +Arguments: hashpartitioning(c_current_cdemo_sk#23, 5), ENSURE_REQUIREMENTS, [id=#27] -(37) Sort [codegen id : 8] -Input [6]: [c_customer_sk#22, c_current_cdemo_sk#23, c_birth_year#26, ca_county#28, ca_state#29, ca_country#30] +(31) Sort [codegen id : 7] +Input [4]: [c_customer_sk#22, c_current_cdemo_sk#23, c_current_addr_sk#24, c_birth_year#26] Arguments: [c_current_cdemo_sk#23 ASC NULLS FIRST], false, 0 -(38) Scan parquet default.customer_demographics -Output [1]: [cd_demo_sk#33] +(32) Scan parquet default.customer_demographics +Output [1]: [cd_demo_sk#28] Batched: true Location [not included in comparison]/{warehouse_dir}/customer_demographics] PushedFilters: [IsNotNull(cd_demo_sk)] ReadSchema: struct -(39) ColumnarToRow [codegen id : 9] -Input [1]: [cd_demo_sk#33] +(33) ColumnarToRow [codegen id : 8] +Input [1]: [cd_demo_sk#28] -(40) Filter [codegen id : 9] -Input [1]: [cd_demo_sk#33] -Condition : isnotnull(cd_demo_sk#33) +(34) Filter [codegen id : 8] +Input [1]: [cd_demo_sk#28] +Condition : isnotnull(cd_demo_sk#28) -(41) Exchange -Input [1]: [cd_demo_sk#33] -Arguments: hashpartitioning(cd_demo_sk#33, 5), true, [id=#34] +(35) Exchange +Input [1]: [cd_demo_sk#28] +Arguments: hashpartitioning(cd_demo_sk#28, 5), ENSURE_REQUIREMENTS, [id=#29] -(42) Sort [codegen id : 10] -Input [1]: [cd_demo_sk#33] -Arguments: [cd_demo_sk#33 ASC NULLS FIRST], false, 0 +(36) Sort [codegen id : 9] +Input [1]: [cd_demo_sk#28] +Arguments: [cd_demo_sk#28 ASC NULLS FIRST], false, 0 -(43) SortMergeJoin [codegen id : 11] +(37) SortMergeJoin [codegen id : 11] Left keys [1]: [c_current_cdemo_sk#23] -Right keys [1]: [cd_demo_sk#33] +Right keys [1]: [cd_demo_sk#28] +Join condition: None + +(38) Project [codegen id : 11] +Output [3]: [c_customer_sk#22, c_current_addr_sk#24, c_birth_year#26] +Input [5]: [c_customer_sk#22, c_current_cdemo_sk#23, c_current_addr_sk#24, c_birth_year#26, cd_demo_sk#28] + +(39) Scan parquet default.customer_address +Output [4]: [ca_address_sk#30, ca_county#31, ca_state#32, ca_country#33] +Batched: true +Location [not included in comparison]/{warehouse_dir}/customer_address] +PushedFilters: [In(ca_state, [ND,WI,AL,NC,OK,MS,TN]), IsNotNull(ca_address_sk)] +ReadSchema: struct + +(40) ColumnarToRow [codegen id : 10] +Input [4]: [ca_address_sk#30, ca_county#31, ca_state#32, ca_country#33] + +(41) Filter [codegen id : 10] +Input [4]: [ca_address_sk#30, ca_county#31, ca_state#32, ca_country#33] +Condition : (ca_state#32 IN (ND,WI,AL,NC,OK,MS,TN) AND isnotnull(ca_address_sk#30)) + +(42) BroadcastExchange +Input [4]: [ca_address_sk#30, ca_county#31, ca_state#32, ca_country#33] +Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, false] as bigint)),false), [id=#34] + +(43) BroadcastHashJoin [codegen id : 11] +Left keys [1]: [c_current_addr_sk#24] +Right keys [1]: [ca_address_sk#30] Join condition: None (44) Project [codegen id : 11] -Output [5]: [c_customer_sk#22, c_birth_year#26, ca_county#28, ca_state#29, ca_country#30] -Input [7]: [c_customer_sk#22, c_current_cdemo_sk#23, c_birth_year#26, ca_county#28, ca_state#29, ca_country#30, cd_demo_sk#33] +Output [5]: [c_customer_sk#22, c_birth_year#26, ca_county#31, ca_state#32, ca_country#33] +Input [7]: [c_customer_sk#22, c_current_addr_sk#24, c_birth_year#26, ca_address_sk#30, ca_county#31, ca_state#32, ca_country#33] (45) Exchange -Input [5]: [c_customer_sk#22, c_birth_year#26, ca_county#28, ca_state#29, ca_country#30] -Arguments: hashpartitioning(c_customer_sk#22, 5), true, [id=#35] +Input [5]: [c_customer_sk#22, c_birth_year#26, ca_county#31, ca_state#32, ca_country#33] +Arguments: hashpartitioning(c_customer_sk#22, 5), ENSURE_REQUIREMENTS, [id=#35] (46) Sort [codegen id : 12] -Input [5]: [c_customer_sk#22, c_birth_year#26, ca_county#28, ca_state#29, ca_country#30] +Input [5]: [c_customer_sk#22, c_birth_year#26, ca_county#31, ca_state#32, ca_country#33] Arguments: [c_customer_sk#22 ASC NULLS FIRST], false, 0 (47) SortMergeJoin [codegen id : 13] @@ -370,26 +370,26 @@ Right keys [1]: [c_customer_sk#22] Join condition: None (48) Project [codegen id : 13] -Output [11]: [i_item_id#19, ca_country#30, ca_state#29, ca_county#28, cast(cs_quantity#5 as decimal(12,2)) AS agg1#36, cast(cs_list_price#6 as decimal(12,2)) AS agg2#37, cast(cs_coupon_amt#8 as decimal(12,2)) AS agg3#38, cast(cs_sales_price#7 as decimal(12,2)) AS agg4#39, cast(cs_net_profit#9 as decimal(12,2)) AS agg5#40, cast(c_birth_year#26 as decimal(12,2)) AS agg6#41, cast(cd_dep_count#13 as decimal(12,2)) AS agg7#42] -Input [13]: [cs_bill_customer_sk#2, cs_quantity#5, cs_list_price#6, cs_sales_price#7, cs_coupon_amt#8, cs_net_profit#9, cd_dep_count#13, i_item_id#19, c_customer_sk#22, c_birth_year#26, ca_county#28, ca_state#29, ca_country#30] +Output [11]: [i_item_id#19, ca_country#33, ca_state#32, ca_county#31, cast(cs_quantity#5 as decimal(12,2)) AS agg1#36, cast(cs_list_price#6 as decimal(12,2)) AS agg2#37, cast(cs_coupon_amt#8 as decimal(12,2)) AS agg3#38, cast(cs_sales_price#7 as decimal(12,2)) AS agg4#39, cast(cs_net_profit#9 as decimal(12,2)) AS agg5#40, cast(c_birth_year#26 as decimal(12,2)) AS agg6#41, cast(cd_dep_count#13 as decimal(12,2)) AS agg7#42] +Input [13]: [cs_bill_customer_sk#2, cs_quantity#5, cs_list_price#6, cs_sales_price#7, cs_coupon_amt#8, cs_net_profit#9, cd_dep_count#13, i_item_id#19, c_customer_sk#22, c_birth_year#26, ca_county#31, ca_state#32, ca_country#33] (49) HashAggregate [codegen id : 13] -Input [11]: [i_item_id#19, ca_country#30, ca_state#29, ca_county#28, agg1#36, agg2#37, agg3#38, agg4#39, agg5#40, agg6#41, agg7#42] -Keys [4]: [i_item_id#19, ca_country#30, ca_state#29, ca_county#28] +Input [11]: [i_item_id#19, ca_country#33, ca_state#32, ca_county#31, agg1#36, agg2#37, agg3#38, agg4#39, agg5#40, agg6#41, agg7#42] +Keys [4]: [i_item_id#19, ca_country#33, ca_state#32, ca_county#31] Functions [7]: [partial_avg(agg1#36), partial_avg(agg2#37), partial_avg(agg3#38), partial_avg(agg4#39), partial_avg(agg5#40), partial_avg(agg6#41), partial_avg(agg7#42)] Aggregate Attributes [14]: [sum#43, count#44, sum#45, count#46, sum#47, count#48, sum#49, count#50, sum#51, count#52, sum#53, count#54, sum#55, count#56] -Results [18]: [i_item_id#19, ca_country#30, ca_state#29, ca_county#28, sum#57, count#58, sum#59, count#60, sum#61, count#62, sum#63, count#64, sum#65, count#66, sum#67, count#68, sum#69, count#70] +Results [18]: [i_item_id#19, ca_country#33, ca_state#32, ca_county#31, sum#57, count#58, sum#59, count#60, sum#61, count#62, sum#63, count#64, sum#65, count#66, sum#67, count#68, sum#69, count#70] (50) Exchange -Input [18]: [i_item_id#19, ca_country#30, ca_state#29, ca_county#28, sum#57, count#58, sum#59, count#60, sum#61, count#62, sum#63, count#64, sum#65, count#66, sum#67, count#68, sum#69, count#70] -Arguments: hashpartitioning(i_item_id#19, ca_country#30, ca_state#29, ca_county#28, 5), true, [id=#71] +Input [18]: [i_item_id#19, ca_country#33, ca_state#32, ca_county#31, sum#57, count#58, sum#59, count#60, sum#61, count#62, sum#63, count#64, sum#65, count#66, sum#67, count#68, sum#69, count#70] +Arguments: hashpartitioning(i_item_id#19, ca_country#33, ca_state#32, ca_county#31, 5), ENSURE_REQUIREMENTS, [id=#71] (51) HashAggregate [codegen id : 14] -Input [18]: [i_item_id#19, ca_country#30, ca_state#29, ca_county#28, sum#57, count#58, sum#59, count#60, sum#61, count#62, sum#63, count#64, sum#65, count#66, sum#67, count#68, sum#69, count#70] -Keys [4]: [i_item_id#19, ca_country#30, ca_state#29, ca_county#28] +Input [18]: [i_item_id#19, ca_country#33, ca_state#32, ca_county#31, sum#57, count#58, sum#59, count#60, sum#61, count#62, sum#63, count#64, sum#65, count#66, sum#67, count#68, sum#69, count#70] +Keys [4]: [i_item_id#19, ca_country#33, ca_state#32, ca_county#31] Functions [7]: [avg(agg1#36), avg(agg2#37), avg(agg3#38), avg(agg4#39), avg(agg5#40), avg(agg6#41), avg(agg7#42)] Aggregate Attributes [7]: [avg(agg1#36)#72, avg(agg2#37)#73, avg(agg3#38)#74, avg(agg4#39)#75, avg(agg5#40)#76, avg(agg6#41)#77, avg(agg7#42)#78] -Results [11]: [i_item_id#19, ca_country#30, ca_state#29, ca_county#28, avg(agg1#36)#72 AS agg1#79, avg(agg2#37)#73 AS agg2#80, avg(agg3#38)#74 AS agg3#81, avg(agg4#39)#75 AS agg4#82, avg(agg5#40)#76 AS agg5#83, avg(agg6#41)#77 AS agg6#84, avg(agg7#42)#78 AS agg7#85] +Results [11]: [i_item_id#19, ca_country#33, ca_state#32, ca_county#31, avg(agg1#36)#72 AS agg1#79, avg(agg2#37)#73 AS agg2#80, avg(agg3#38)#74 AS agg3#81, avg(agg4#39)#75 AS agg4#82, avg(agg5#40)#76 AS agg5#83, avg(agg6#41)#77 AS agg6#84, avg(agg7#42)#78 AS agg7#85] (52) ReusedExchange [Reuses operator id: 24] Output [8]: [cs_bill_customer_sk#2, cs_quantity#5, cs_list_price#6, cs_sales_price#7, cs_coupon_amt#8, cs_net_profit#9, cd_dep_count#13, i_item_id#19] @@ -417,41 +417,41 @@ Output [4]: [c_customer_sk#22, c_current_cdemo_sk#23, c_current_addr_sk#24, c_bi Input [5]: [c_customer_sk#22, c_current_cdemo_sk#23, c_current_addr_sk#24, c_birth_month#25, c_birth_year#26] (58) Scan parquet default.customer_address -Output [3]: [ca_address_sk#27, ca_state#29, ca_country#30] +Output [3]: [ca_address_sk#30, ca_state#32, ca_country#33] Batched: true Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [In(ca_state, [ND,WI,AL,NC,OK,MS,TN]), IsNotNull(ca_address_sk)] ReadSchema: struct (59) ColumnarToRow [codegen id : 20] -Input [3]: [ca_address_sk#27, ca_state#29, ca_country#30] +Input [3]: [ca_address_sk#30, ca_state#32, ca_country#33] (60) Filter [codegen id : 20] -Input [3]: [ca_address_sk#27, ca_state#29, ca_country#30] -Condition : (ca_state#29 IN (ND,WI,AL,NC,OK,MS,TN) AND isnotnull(ca_address_sk#27)) +Input [3]: [ca_address_sk#30, ca_state#32, ca_country#33] +Condition : (ca_state#32 IN (ND,WI,AL,NC,OK,MS,TN) AND isnotnull(ca_address_sk#30)) (61) BroadcastExchange -Input [3]: [ca_address_sk#27, ca_state#29, ca_country#30] +Input [3]: [ca_address_sk#30, ca_state#32, ca_country#33] Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, false] as bigint)),false), [id=#86] (62) BroadcastHashJoin [codegen id : 21] Left keys [1]: [c_current_addr_sk#24] -Right keys [1]: [ca_address_sk#27] +Right keys [1]: [ca_address_sk#30] Join condition: None (63) Project [codegen id : 21] -Output [5]: [c_customer_sk#22, c_current_cdemo_sk#23, c_birth_year#26, ca_state#29, ca_country#30] -Input [7]: [c_customer_sk#22, c_current_cdemo_sk#23, c_current_addr_sk#24, c_birth_year#26, ca_address_sk#27, ca_state#29, ca_country#30] +Output [5]: [c_customer_sk#22, c_current_cdemo_sk#23, c_birth_year#26, ca_state#32, ca_country#33] +Input [7]: [c_customer_sk#22, c_current_cdemo_sk#23, c_current_addr_sk#24, c_birth_year#26, ca_address_sk#30, ca_state#32, ca_country#33] (64) Exchange -Input [5]: [c_customer_sk#22, c_current_cdemo_sk#23, c_birth_year#26, ca_state#29, ca_country#30] -Arguments: hashpartitioning(c_current_cdemo_sk#23, 5), true, [id=#87] +Input [5]: [c_customer_sk#22, c_current_cdemo_sk#23, c_birth_year#26, ca_state#32, ca_country#33] +Arguments: hashpartitioning(c_current_cdemo_sk#23, 5), ENSURE_REQUIREMENTS, [id=#87] (65) Sort [codegen id : 22] -Input [5]: [c_customer_sk#22, c_current_cdemo_sk#23, c_birth_year#26, ca_state#29, ca_country#30] +Input [5]: [c_customer_sk#22, c_current_cdemo_sk#23, c_birth_year#26, ca_state#32, ca_country#33] Arguments: [c_current_cdemo_sk#23 ASC NULLS FIRST], false, 0 -(66) ReusedExchange [Reuses operator id: 41] +(66) ReusedExchange [Reuses operator id: 35] Output [1]: [cd_demo_sk#88] (67) Sort [codegen id : 24] @@ -464,15 +464,15 @@ Right keys [1]: [cd_demo_sk#88] Join condition: None (69) Project [codegen id : 25] -Output [4]: [c_customer_sk#22, c_birth_year#26, ca_state#29, ca_country#30] -Input [6]: [c_customer_sk#22, c_current_cdemo_sk#23, c_birth_year#26, ca_state#29, ca_country#30, cd_demo_sk#88] +Output [4]: [c_customer_sk#22, c_birth_year#26, ca_state#32, ca_country#33] +Input [6]: [c_customer_sk#22, c_current_cdemo_sk#23, c_birth_year#26, ca_state#32, ca_country#33, cd_demo_sk#88] (70) Exchange -Input [4]: [c_customer_sk#22, c_birth_year#26, ca_state#29, ca_country#30] -Arguments: hashpartitioning(c_customer_sk#22, 5), true, [id=#89] +Input [4]: [c_customer_sk#22, c_birth_year#26, ca_state#32, ca_country#33] +Arguments: hashpartitioning(c_customer_sk#22, 5), ENSURE_REQUIREMENTS, [id=#89] (71) Sort [codegen id : 26] -Input [4]: [c_customer_sk#22, c_birth_year#26, ca_state#29, ca_country#30] +Input [4]: [c_customer_sk#22, c_birth_year#26, ca_state#32, ca_country#33] Arguments: [c_customer_sk#22 ASC NULLS FIRST], false, 0 (72) SortMergeJoin [codegen id : 27] @@ -481,26 +481,26 @@ Right keys [1]: [c_customer_sk#22] Join condition: None (73) Project [codegen id : 27] -Output [10]: [i_item_id#19, ca_country#30, ca_state#29, cast(cs_quantity#5 as decimal(12,2)) AS agg1#36, cast(cs_list_price#6 as decimal(12,2)) AS agg2#37, cast(cs_coupon_amt#8 as decimal(12,2)) AS agg3#38, cast(cs_sales_price#7 as decimal(12,2)) AS agg4#39, cast(cs_net_profit#9 as decimal(12,2)) AS agg5#40, cast(c_birth_year#26 as decimal(12,2)) AS agg6#41, cast(cd_dep_count#13 as decimal(12,2)) AS agg7#42] -Input [12]: [cs_bill_customer_sk#2, cs_quantity#5, cs_list_price#6, cs_sales_price#7, cs_coupon_amt#8, cs_net_profit#9, cd_dep_count#13, i_item_id#19, c_customer_sk#22, c_birth_year#26, ca_state#29, ca_country#30] +Output [10]: [i_item_id#19, ca_country#33, ca_state#32, cast(cs_quantity#5 as decimal(12,2)) AS agg1#36, cast(cs_list_price#6 as decimal(12,2)) AS agg2#37, cast(cs_coupon_amt#8 as decimal(12,2)) AS agg3#38, cast(cs_sales_price#7 as decimal(12,2)) AS agg4#39, cast(cs_net_profit#9 as decimal(12,2)) AS agg5#40, cast(c_birth_year#26 as decimal(12,2)) AS agg6#41, cast(cd_dep_count#13 as decimal(12,2)) AS agg7#42] +Input [12]: [cs_bill_customer_sk#2, cs_quantity#5, cs_list_price#6, cs_sales_price#7, cs_coupon_amt#8, cs_net_profit#9, cd_dep_count#13, i_item_id#19, c_customer_sk#22, c_birth_year#26, ca_state#32, ca_country#33] (74) HashAggregate [codegen id : 27] -Input [10]: [i_item_id#19, ca_country#30, ca_state#29, agg1#36, agg2#37, agg3#38, agg4#39, agg5#40, agg6#41, agg7#42] -Keys [3]: [i_item_id#19, ca_country#30, ca_state#29] +Input [10]: [i_item_id#19, ca_country#33, ca_state#32, agg1#36, agg2#37, agg3#38, agg4#39, agg5#40, agg6#41, agg7#42] +Keys [3]: [i_item_id#19, ca_country#33, ca_state#32] Functions [7]: [partial_avg(agg1#36), partial_avg(agg2#37), partial_avg(agg3#38), partial_avg(agg4#39), partial_avg(agg5#40), partial_avg(agg6#41), partial_avg(agg7#42)] Aggregate Attributes [14]: [sum#90, count#91, sum#92, count#93, sum#94, count#95, sum#96, count#97, sum#98, count#99, sum#100, count#101, sum#102, count#103] -Results [17]: [i_item_id#19, ca_country#30, ca_state#29, sum#104, count#105, sum#106, count#107, sum#108, count#109, sum#110, count#111, sum#112, count#113, sum#114, count#115, sum#116, count#117] +Results [17]: [i_item_id#19, ca_country#33, ca_state#32, sum#104, count#105, sum#106, count#107, sum#108, count#109, sum#110, count#111, sum#112, count#113, sum#114, count#115, sum#116, count#117] (75) Exchange -Input [17]: [i_item_id#19, ca_country#30, ca_state#29, sum#104, count#105, sum#106, count#107, sum#108, count#109, sum#110, count#111, sum#112, count#113, sum#114, count#115, sum#116, count#117] -Arguments: hashpartitioning(i_item_id#19, ca_country#30, ca_state#29, 5), true, [id=#118] +Input [17]: [i_item_id#19, ca_country#33, ca_state#32, sum#104, count#105, sum#106, count#107, sum#108, count#109, sum#110, count#111, sum#112, count#113, sum#114, count#115, sum#116, count#117] +Arguments: hashpartitioning(i_item_id#19, ca_country#33, ca_state#32, 5), ENSURE_REQUIREMENTS, [id=#118] (76) HashAggregate [codegen id : 28] -Input [17]: [i_item_id#19, ca_country#30, ca_state#29, sum#104, count#105, sum#106, count#107, sum#108, count#109, sum#110, count#111, sum#112, count#113, sum#114, count#115, sum#116, count#117] -Keys [3]: [i_item_id#19, ca_country#30, ca_state#29] +Input [17]: [i_item_id#19, ca_country#33, ca_state#32, sum#104, count#105, sum#106, count#107, sum#108, count#109, sum#110, count#111, sum#112, count#113, sum#114, count#115, sum#116, count#117] +Keys [3]: [i_item_id#19, ca_country#33, ca_state#32] Functions [7]: [avg(agg1#36), avg(agg2#37), avg(agg3#38), avg(agg4#39), avg(agg5#40), avg(agg6#41), avg(agg7#42)] Aggregate Attributes [7]: [avg(agg1#36)#119, avg(agg2#37)#120, avg(agg3#38)#121, avg(agg4#39)#122, avg(agg5#40)#123, avg(agg6#41)#124, avg(agg7#42)#125] -Results [11]: [i_item_id#19, ca_country#30, ca_state#29, null AS county#126, avg(agg1#36)#119 AS agg1#127, avg(agg2#37)#120 AS agg2#128, avg(agg3#38)#121 AS agg3#129, avg(agg4#39)#122 AS agg4#130, avg(agg5#40)#123 AS agg5#131, avg(agg6#41)#124 AS agg6#132, avg(agg7#42)#125 AS agg7#133] +Results [11]: [i_item_id#19, ca_country#33, ca_state#32, null AS county#126, avg(agg1#36)#119 AS agg1#127, avg(agg2#37)#120 AS agg2#128, avg(agg3#38)#121 AS agg3#129, avg(agg4#39)#122 AS agg4#130, avg(agg5#40)#123 AS agg5#131, avg(agg6#41)#124 AS agg6#132, avg(agg7#42)#125 AS agg7#133] (77) ReusedExchange [Reuses operator id: 24] Output [8]: [cs_bill_customer_sk#2, cs_quantity#5, cs_list_price#6, cs_sales_price#7, cs_coupon_amt#8, cs_net_profit#9, cd_dep_count#13, i_item_id#19] @@ -528,45 +528,45 @@ Output [4]: [c_customer_sk#22, c_current_cdemo_sk#23, c_current_addr_sk#24, c_bi Input [5]: [c_customer_sk#22, c_current_cdemo_sk#23, c_current_addr_sk#24, c_birth_month#25, c_birth_year#26] (83) Scan parquet default.customer_address -Output [3]: [ca_address_sk#27, ca_state#29, ca_country#30] +Output [3]: [ca_address_sk#30, ca_state#32, ca_country#33] Batched: true Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [In(ca_state, [ND,WI,AL,NC,OK,MS,TN]), IsNotNull(ca_address_sk)] ReadSchema: struct (84) ColumnarToRow [codegen id : 34] -Input [3]: [ca_address_sk#27, ca_state#29, ca_country#30] +Input [3]: [ca_address_sk#30, ca_state#32, ca_country#33] (85) Filter [codegen id : 34] -Input [3]: [ca_address_sk#27, ca_state#29, ca_country#30] -Condition : (ca_state#29 IN (ND,WI,AL,NC,OK,MS,TN) AND isnotnull(ca_address_sk#27)) +Input [3]: [ca_address_sk#30, ca_state#32, ca_country#33] +Condition : (ca_state#32 IN (ND,WI,AL,NC,OK,MS,TN) AND isnotnull(ca_address_sk#30)) (86) Project [codegen id : 34] -Output [2]: [ca_address_sk#27, ca_country#30] -Input [3]: [ca_address_sk#27, ca_state#29, ca_country#30] +Output [2]: [ca_address_sk#30, ca_country#33] +Input [3]: [ca_address_sk#30, ca_state#32, ca_country#33] (87) BroadcastExchange -Input [2]: [ca_address_sk#27, ca_country#30] +Input [2]: [ca_address_sk#30, ca_country#33] Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#134] (88) BroadcastHashJoin [codegen id : 35] Left keys [1]: [c_current_addr_sk#24] -Right keys [1]: [ca_address_sk#27] +Right keys [1]: [ca_address_sk#30] Join condition: None (89) Project [codegen id : 35] -Output [4]: [c_customer_sk#22, c_current_cdemo_sk#23, c_birth_year#26, ca_country#30] -Input [6]: [c_customer_sk#22, c_current_cdemo_sk#23, c_current_addr_sk#24, c_birth_year#26, ca_address_sk#27, ca_country#30] +Output [4]: [c_customer_sk#22, c_current_cdemo_sk#23, c_birth_year#26, ca_country#33] +Input [6]: [c_customer_sk#22, c_current_cdemo_sk#23, c_current_addr_sk#24, c_birth_year#26, ca_address_sk#30, ca_country#33] (90) Exchange -Input [4]: [c_customer_sk#22, c_current_cdemo_sk#23, c_birth_year#26, ca_country#30] -Arguments: hashpartitioning(c_current_cdemo_sk#23, 5), true, [id=#135] +Input [4]: [c_customer_sk#22, c_current_cdemo_sk#23, c_birth_year#26, ca_country#33] +Arguments: hashpartitioning(c_current_cdemo_sk#23, 5), ENSURE_REQUIREMENTS, [id=#135] (91) Sort [codegen id : 36] -Input [4]: [c_customer_sk#22, c_current_cdemo_sk#23, c_birth_year#26, ca_country#30] +Input [4]: [c_customer_sk#22, c_current_cdemo_sk#23, c_birth_year#26, ca_country#33] Arguments: [c_current_cdemo_sk#23 ASC NULLS FIRST], false, 0 -(92) ReusedExchange [Reuses operator id: 41] +(92) ReusedExchange [Reuses operator id: 35] Output [1]: [cd_demo_sk#136] (93) Sort [codegen id : 38] @@ -579,15 +579,15 @@ Right keys [1]: [cd_demo_sk#136] Join condition: None (95) Project [codegen id : 39] -Output [3]: [c_customer_sk#22, c_birth_year#26, ca_country#30] -Input [5]: [c_customer_sk#22, c_current_cdemo_sk#23, c_birth_year#26, ca_country#30, cd_demo_sk#136] +Output [3]: [c_customer_sk#22, c_birth_year#26, ca_country#33] +Input [5]: [c_customer_sk#22, c_current_cdemo_sk#23, c_birth_year#26, ca_country#33, cd_demo_sk#136] (96) Exchange -Input [3]: [c_customer_sk#22, c_birth_year#26, ca_country#30] -Arguments: hashpartitioning(c_customer_sk#22, 5), true, [id=#137] +Input [3]: [c_customer_sk#22, c_birth_year#26, ca_country#33] +Arguments: hashpartitioning(c_customer_sk#22, 5), ENSURE_REQUIREMENTS, [id=#137] (97) Sort [codegen id : 40] -Input [3]: [c_customer_sk#22, c_birth_year#26, ca_country#30] +Input [3]: [c_customer_sk#22, c_birth_year#26, ca_country#33] Arguments: [c_customer_sk#22 ASC NULLS FIRST], false, 0 (98) SortMergeJoin [codegen id : 41] @@ -596,26 +596,26 @@ Right keys [1]: [c_customer_sk#22] Join condition: None (99) Project [codegen id : 41] -Output [9]: [i_item_id#19, ca_country#30, cast(cs_quantity#5 as decimal(12,2)) AS agg1#36, cast(cs_list_price#6 as decimal(12,2)) AS agg2#37, cast(cs_coupon_amt#8 as decimal(12,2)) AS agg3#38, cast(cs_sales_price#7 as decimal(12,2)) AS agg4#39, cast(cs_net_profit#9 as decimal(12,2)) AS agg5#40, cast(c_birth_year#26 as decimal(12,2)) AS agg6#41, cast(cd_dep_count#13 as decimal(12,2)) AS agg7#42] -Input [11]: [cs_bill_customer_sk#2, cs_quantity#5, cs_list_price#6, cs_sales_price#7, cs_coupon_amt#8, cs_net_profit#9, cd_dep_count#13, i_item_id#19, c_customer_sk#22, c_birth_year#26, ca_country#30] +Output [9]: [i_item_id#19, ca_country#33, cast(cs_quantity#5 as decimal(12,2)) AS agg1#36, cast(cs_list_price#6 as decimal(12,2)) AS agg2#37, cast(cs_coupon_amt#8 as decimal(12,2)) AS agg3#38, cast(cs_sales_price#7 as decimal(12,2)) AS agg4#39, cast(cs_net_profit#9 as decimal(12,2)) AS agg5#40, cast(c_birth_year#26 as decimal(12,2)) AS agg6#41, cast(cd_dep_count#13 as decimal(12,2)) AS agg7#42] +Input [11]: [cs_bill_customer_sk#2, cs_quantity#5, cs_list_price#6, cs_sales_price#7, cs_coupon_amt#8, cs_net_profit#9, cd_dep_count#13, i_item_id#19, c_customer_sk#22, c_birth_year#26, ca_country#33] (100) HashAggregate [codegen id : 41] -Input [9]: [i_item_id#19, ca_country#30, agg1#36, agg2#37, agg3#38, agg4#39, agg5#40, agg6#41, agg7#42] -Keys [2]: [i_item_id#19, ca_country#30] +Input [9]: [i_item_id#19, ca_country#33, agg1#36, agg2#37, agg3#38, agg4#39, agg5#40, agg6#41, agg7#42] +Keys [2]: [i_item_id#19, ca_country#33] Functions [7]: [partial_avg(agg1#36), partial_avg(agg2#37), partial_avg(agg3#38), partial_avg(agg4#39), partial_avg(agg5#40), partial_avg(agg6#41), partial_avg(agg7#42)] Aggregate Attributes [14]: [sum#138, count#139, sum#140, count#141, sum#142, count#143, sum#144, count#145, sum#146, count#147, sum#148, count#149, sum#150, count#151] -Results [16]: [i_item_id#19, ca_country#30, sum#152, count#153, sum#154, count#155, sum#156, count#157, sum#158, count#159, sum#160, count#161, sum#162, count#163, sum#164, count#165] +Results [16]: [i_item_id#19, ca_country#33, sum#152, count#153, sum#154, count#155, sum#156, count#157, sum#158, count#159, sum#160, count#161, sum#162, count#163, sum#164, count#165] (101) Exchange -Input [16]: [i_item_id#19, ca_country#30, sum#152, count#153, sum#154, count#155, sum#156, count#157, sum#158, count#159, sum#160, count#161, sum#162, count#163, sum#164, count#165] -Arguments: hashpartitioning(i_item_id#19, ca_country#30, 5), true, [id=#166] +Input [16]: [i_item_id#19, ca_country#33, sum#152, count#153, sum#154, count#155, sum#156, count#157, sum#158, count#159, sum#160, count#161, sum#162, count#163, sum#164, count#165] +Arguments: hashpartitioning(i_item_id#19, ca_country#33, 5), ENSURE_REQUIREMENTS, [id=#166] (102) HashAggregate [codegen id : 42] -Input [16]: [i_item_id#19, ca_country#30, sum#152, count#153, sum#154, count#155, sum#156, count#157, sum#158, count#159, sum#160, count#161, sum#162, count#163, sum#164, count#165] -Keys [2]: [i_item_id#19, ca_country#30] +Input [16]: [i_item_id#19, ca_country#33, sum#152, count#153, sum#154, count#155, sum#156, count#157, sum#158, count#159, sum#160, count#161, sum#162, count#163, sum#164, count#165] +Keys [2]: [i_item_id#19, ca_country#33] Functions [7]: [avg(agg1#36), avg(agg2#37), avg(agg3#38), avg(agg4#39), avg(agg5#40), avg(agg6#41), avg(agg7#42)] Aggregate Attributes [7]: [avg(agg1#36)#167, avg(agg2#37)#168, avg(agg3#38)#169, avg(agg4#39)#170, avg(agg5#40)#171, avg(agg6#41)#172, avg(agg7#42)#173] -Results [11]: [i_item_id#19, ca_country#30, null AS ca_state#174, null AS county#175, avg(agg1#36)#167 AS agg1#176, avg(agg2#37)#168 AS agg2#177, avg(agg3#38)#169 AS agg3#178, avg(agg4#39)#170 AS agg4#179, avg(agg5#40)#171 AS agg5#180, avg(agg6#41)#172 AS agg6#181, avg(agg7#42)#173 AS agg7#182] +Results [11]: [i_item_id#19, ca_country#33, null AS ca_state#174, null AS county#175, avg(agg1#36)#167 AS agg1#176, avg(agg2#37)#168 AS agg2#177, avg(agg3#38)#169 AS agg3#178, avg(agg4#39)#170 AS agg4#179, avg(agg5#40)#171 AS agg5#180, avg(agg6#41)#172 AS agg6#181, avg(agg7#42)#173 AS agg7#182] (103) Scan parquet default.catalog_sales Output [9]: [cs_sold_date_sk#1, cs_bill_customer_sk#2, cs_bill_cdemo_sk#3, cs_item_sk#4, cs_quantity#5, cs_list_price#6, cs_sales_price#7, cs_coupon_amt#8, cs_net_profit#9] @@ -674,35 +674,35 @@ Output [4]: [c_customer_sk#22, c_current_cdemo_sk#23, c_current_addr_sk#24, c_bi Input [5]: [c_customer_sk#22, c_current_cdemo_sk#23, c_current_addr_sk#24, c_birth_month#25, c_birth_year#26] (116) Scan parquet default.customer_address -Output [2]: [ca_address_sk#27, ca_state#29] +Output [2]: [ca_address_sk#30, ca_state#32] Batched: true Location [not included in comparison]/{warehouse_dir}/customer_address] PushedFilters: [In(ca_state, [ND,WI,AL,NC,OK,MS,TN]), IsNotNull(ca_address_sk)] ReadSchema: struct (117) ColumnarToRow [codegen id : 45] -Input [2]: [ca_address_sk#27, ca_state#29] +Input [2]: [ca_address_sk#30, ca_state#32] (118) Filter [codegen id : 45] -Input [2]: [ca_address_sk#27, ca_state#29] -Condition : (ca_state#29 IN (ND,WI,AL,NC,OK,MS,TN) AND isnotnull(ca_address_sk#27)) +Input [2]: [ca_address_sk#30, ca_state#32] +Condition : (ca_state#32 IN (ND,WI,AL,NC,OK,MS,TN) AND isnotnull(ca_address_sk#30)) (119) Project [codegen id : 45] -Output [1]: [ca_address_sk#27] -Input [2]: [ca_address_sk#27, ca_state#29] +Output [1]: [ca_address_sk#30] +Input [2]: [ca_address_sk#30, ca_state#32] (120) BroadcastExchange -Input [1]: [ca_address_sk#27] +Input [1]: [ca_address_sk#30] Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#183] (121) BroadcastHashJoin [codegen id : 46] Left keys [1]: [c_current_addr_sk#24] -Right keys [1]: [ca_address_sk#27] +Right keys [1]: [ca_address_sk#30] Join condition: None (122) Project [codegen id : 46] Output [3]: [c_customer_sk#22, c_current_cdemo_sk#23, c_birth_year#26] -Input [5]: [c_customer_sk#22, c_current_cdemo_sk#23, c_current_addr_sk#24, c_birth_year#26, ca_address_sk#27] +Input [5]: [c_customer_sk#22, c_current_cdemo_sk#23, c_current_addr_sk#24, c_birth_year#26, ca_address_sk#30] (123) BroadcastExchange Input [3]: [c_customer_sk#22, c_current_cdemo_sk#23, c_birth_year#26] @@ -765,7 +765,7 @@ Results [15]: [i_item_id#19, sum#201, count#202, sum#203, count#204, sum#205, co (136) Exchange Input [15]: [i_item_id#19, sum#201, count#202, sum#203, count#204, sum#205, count#206, sum#207, count#208, sum#209, count#210, sum#211, count#212, sum#213, count#214] -Arguments: hashpartitioning(i_item_id#19, 5), true, [id=#215] +Arguments: hashpartitioning(i_item_id#19, 5), ENSURE_REQUIREMENTS, [id=#215] (137) HashAggregate [codegen id : 50] Input [15]: [i_item_id#19, sum#201, count#202, sum#203, count#204, sum#205, count#206, sum#207, count#208, sum#209, count#210, sum#211, count#212, sum#213, count#214] @@ -860,7 +860,7 @@ Results [14]: [sum#248, count#249, sum#250, count#251, sum#252, count#253, sum#2 (157) Exchange Input [14]: [sum#248, count#249, sum#250, count#251, sum#252, count#253, sum#254, count#255, sum#256, count#257, sum#258, count#259, sum#260, count#261] -Arguments: SinglePartition, true, [id=#262] +Arguments: SinglePartition, ENSURE_REQUIREMENTS, [id=#262] (158) HashAggregate [codegen id : 58] Input [14]: [sum#248, count#249, sum#250, count#251, sum#252, count#253, sum#254, count#255, sum#256, count#257, sum#258, count#259, sum#260, count#261] @@ -872,6 +872,6 @@ Results [11]: [null AS i_item_id#270, null AS ca_country#271, null AS ca_state#2 (159) Union (160) TakeOrderedAndProject -Input [11]: [i_item_id#19, ca_country#30, ca_state#29, ca_county#28, agg1#79, agg2#80, agg3#81, agg4#82, agg5#83, agg6#84, agg7#85] -Arguments: 100, [ca_country#30 ASC NULLS FIRST, ca_state#29 ASC NULLS FIRST, ca_county#28 ASC NULLS FIRST, i_item_id#19 ASC NULLS FIRST], [i_item_id#19, ca_country#30, ca_state#29, ca_county#28, agg1#79, agg2#80, agg3#81, agg4#82, agg5#83, agg6#84, agg7#85] +Input [11]: [i_item_id#19, ca_country#33, ca_state#32, ca_county#31, agg1#79, agg2#80, agg3#81, agg4#82, agg5#83, agg6#84, agg7#85] +Arguments: 100, [ca_country#33 ASC NULLS FIRST, ca_state#32 ASC NULLS FIRST, ca_county#31 ASC NULLS FIRST, i_item_id#19 ASC NULLS FIRST], [i_item_id#19, ca_country#33, ca_state#32, ca_county#31, agg1#79, agg2#80, agg3#81, agg4#82, agg5#83, agg6#84, agg7#85] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q18a.sf100/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q18a.sf100/simplified.txt index 5514e335f1b51..4566929712713 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q18a.sf100/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q18a.sf100/simplified.txt @@ -54,37 +54,37 @@ TakeOrderedAndProject [ca_country,ca_state,ca_county,i_item_id,agg1,agg2,agg3,ag Exchange [c_customer_sk] #6 WholeStageCodegen (11) Project [c_customer_sk,c_birth_year,ca_county,ca_state,ca_country] - SortMergeJoin [c_current_cdemo_sk,cd_demo_sk] - InputAdapter - WholeStageCodegen (8) - Sort [c_current_cdemo_sk] - InputAdapter - Exchange [c_current_cdemo_sk] #7 - WholeStageCodegen (7) - Project [c_customer_sk,c_current_cdemo_sk,c_birth_year,ca_county,ca_state,ca_country] - BroadcastHashJoin [c_current_addr_sk,ca_address_sk] + BroadcastHashJoin [c_current_addr_sk,ca_address_sk] + Project [c_customer_sk,c_current_addr_sk,c_birth_year] + SortMergeJoin [c_current_cdemo_sk,cd_demo_sk] + InputAdapter + WholeStageCodegen (7) + Sort [c_current_cdemo_sk] + InputAdapter + Exchange [c_current_cdemo_sk] #7 + WholeStageCodegen (6) Project [c_customer_sk,c_current_cdemo_sk,c_current_addr_sk,c_birth_year] Filter [c_birth_month,c_customer_sk,c_current_cdemo_sk,c_current_addr_sk] ColumnarToRow InputAdapter Scan parquet default.customer [c_customer_sk,c_current_cdemo_sk,c_current_addr_sk,c_birth_month,c_birth_year] - InputAdapter - BroadcastExchange #8 - WholeStageCodegen (6) - Filter [ca_state,ca_address_sk] - ColumnarToRow - InputAdapter - Scan parquet default.customer_address [ca_address_sk,ca_county,ca_state,ca_country] + InputAdapter + WholeStageCodegen (9) + Sort [cd_demo_sk] + InputAdapter + Exchange [cd_demo_sk] #8 + WholeStageCodegen (8) + Filter [cd_demo_sk] + ColumnarToRow + InputAdapter + Scan parquet default.customer_demographics [cd_demo_sk] InputAdapter - WholeStageCodegen (10) - Sort [cd_demo_sk] - InputAdapter - Exchange [cd_demo_sk] #9 - WholeStageCodegen (9) - Filter [cd_demo_sk] - ColumnarToRow - InputAdapter - Scan parquet default.customer_demographics [cd_demo_sk] + BroadcastExchange #9 + WholeStageCodegen (10) + Filter [ca_state,ca_address_sk] + ColumnarToRow + InputAdapter + Scan parquet default.customer_address [ca_address_sk,ca_county,ca_state,ca_country] WholeStageCodegen (28) HashAggregate [i_item_id,ca_country,ca_state,sum,count,sum,count,sum,count,sum,count,sum,count,sum,count,sum,count] [avg(agg1),avg(agg2),avg(agg3),avg(agg4),avg(agg5),avg(agg6),avg(agg7),county,agg1,agg2,agg3,agg4,agg5,agg6,agg7,sum,count,sum,count,sum,count,sum,count,sum,count,sum,count,sum,count] InputAdapter @@ -130,7 +130,7 @@ TakeOrderedAndProject [ca_country,ca_state,ca_county,i_item_id,agg1,agg2,agg3,ag WholeStageCodegen (24) Sort [cd_demo_sk] InputAdapter - ReusedExchange [cd_demo_sk] #9 + ReusedExchange [cd_demo_sk] #8 WholeStageCodegen (42) HashAggregate [i_item_id,ca_country,sum,count,sum,count,sum,count,sum,count,sum,count,sum,count,sum,count] [avg(agg1),avg(agg2),avg(agg3),avg(agg4),avg(agg5),avg(agg6),avg(agg7),ca_state,county,agg1,agg2,agg3,agg4,agg5,agg6,agg7,sum,count,sum,count,sum,count,sum,count,sum,count,sum,count,sum,count] InputAdapter @@ -177,7 +177,7 @@ TakeOrderedAndProject [ca_country,ca_state,ca_county,i_item_id,agg1,agg2,agg3,ag WholeStageCodegen (38) Sort [cd_demo_sk] InputAdapter - ReusedExchange [cd_demo_sk] #9 + ReusedExchange [cd_demo_sk] #8 WholeStageCodegen (50) HashAggregate [i_item_id,sum,count,sum,count,sum,count,sum,count,sum,count,sum,count,sum,count] [avg(agg1),avg(agg2),avg(agg3),avg(agg4),avg(agg5),avg(agg6),avg(agg7),ca_country,ca_state,county,agg1,agg2,agg3,agg4,agg5,agg6,agg7,sum,count,sum,count,sum,count,sum,count,sum,count,sum,count,sum,count] InputAdapter diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q34.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q34.sf100/explain.txt index c7b8685b64bea..5d8f0d04161bf 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q34.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q34.sf100/explain.txt @@ -120,7 +120,7 @@ Input [5]: [ss_customer_sk#2, ss_hdemo_sk#3, ss_store_sk#4, ss_ticket_number#5, Output [4]: [hd_demo_sk#13, hd_buy_potential#14, hd_dep_count#15, hd_vehicle_count#16] Batched: true Location [not included in comparison]/{warehouse_dir}/household_demographics] -PushedFilters: [IsNotNull(hd_vehicle_count), Or(EqualTo(hd_buy_potential,>10000),EqualTo(hd_buy_potential,unknown)), GreaterThan(hd_vehicle_count,0), IsNotNull(hd_demo_sk)] +PushedFilters: [IsNotNull(hd_vehicle_count), IsNotNull(hd_dep_count), Or(EqualTo(hd_buy_potential,>10000),EqualTo(hd_buy_potential,unknown)), GreaterThan(hd_vehicle_count,0), GreaterThan(hd_vehicle_count,0), IsNotNull(hd_demo_sk)] ReadSchema: struct (19) ColumnarToRow [codegen id : 3] @@ -128,7 +128,7 @@ Input [4]: [hd_demo_sk#13, hd_buy_potential#14, hd_dep_count#15, hd_vehicle_coun (20) Filter [codegen id : 3] Input [4]: [hd_demo_sk#13, hd_buy_potential#14, hd_dep_count#15, hd_vehicle_count#16] -Condition : ((((isnotnull(hd_vehicle_count#16) AND ((hd_buy_potential#14 = >10000) OR (hd_buy_potential#14 = unknown))) AND (hd_vehicle_count#16 > 0)) AND (CASE WHEN (hd_vehicle_count#16 > 0) THEN (cast(hd_dep_count#15 as double) / cast(hd_vehicle_count#16 as double)) ELSE null END > 1.2)) AND isnotnull(hd_demo_sk#13)) +Condition : (((((isnotnull(hd_vehicle_count#16) AND isnotnull(hd_dep_count#15)) AND ((hd_buy_potential#14 = >10000) OR (hd_buy_potential#14 = unknown))) AND (hd_vehicle_count#16 > 0)) AND ((cast(hd_dep_count#15 as double) / cast(hd_vehicle_count#16 as double)) > 1.2)) AND isnotnull(hd_demo_sk#13)) (21) Project [codegen id : 3] Output [1]: [hd_demo_sk#13] @@ -156,7 +156,7 @@ Results [3]: [ss_ticket_number#5, ss_customer_sk#2, count#19] (26) Exchange Input [3]: [ss_ticket_number#5, ss_customer_sk#2, count#19] -Arguments: hashpartitioning(ss_ticket_number#5, ss_customer_sk#2, 5), true, [id=#20] +Arguments: hashpartitioning(ss_ticket_number#5, ss_customer_sk#2, 5), ENSURE_REQUIREMENTS, [id=#20] (27) HashAggregate [codegen id : 5] Input [3]: [ss_ticket_number#5, ss_customer_sk#2, count#19] @@ -171,7 +171,7 @@ Condition : ((cnt#22 >= 15) AND (cnt#22 <= 20)) (29) Exchange Input [3]: [ss_ticket_number#5, ss_customer_sk#2, cnt#22] -Arguments: hashpartitioning(ss_customer_sk#2, 5), true, [id=#23] +Arguments: hashpartitioning(ss_customer_sk#2, 5), ENSURE_REQUIREMENTS, [id=#23] (30) Sort [codegen id : 6] Input [3]: [ss_ticket_number#5, ss_customer_sk#2, cnt#22] @@ -193,7 +193,7 @@ Condition : isnotnull(c_customer_sk#24) (34) Exchange Input [5]: [c_customer_sk#24, c_salutation#25, c_first_name#26, c_last_name#27, c_preferred_cust_flag#28] -Arguments: hashpartitioning(c_customer_sk#24, 5), true, [id=#29] +Arguments: hashpartitioning(c_customer_sk#24, 5), ENSURE_REQUIREMENTS, [id=#29] (35) Sort [codegen id : 8] Input [5]: [c_customer_sk#24, c_salutation#25, c_first_name#26, c_last_name#27, c_preferred_cust_flag#28] @@ -210,7 +210,7 @@ Input [8]: [ss_ticket_number#5, ss_customer_sk#2, cnt#22, c_customer_sk#24, c_sa (38) Exchange Input [6]: [c_last_name#27, c_first_name#26, c_salutation#25, c_preferred_cust_flag#28, ss_ticket_number#5, cnt#22] -Arguments: rangepartitioning(c_last_name#27 ASC NULLS FIRST, c_first_name#26 ASC NULLS FIRST, c_salutation#25 ASC NULLS FIRST, c_preferred_cust_flag#28 DESC NULLS LAST, ss_ticket_number#5 ASC NULLS FIRST, 5), true, [id=#30] +Arguments: rangepartitioning(c_last_name#27 ASC NULLS FIRST, c_first_name#26 ASC NULLS FIRST, c_salutation#25 ASC NULLS FIRST, c_preferred_cust_flag#28 DESC NULLS LAST, ss_ticket_number#5 ASC NULLS FIRST, 5), ENSURE_REQUIREMENTS, [id=#30] (39) Sort [codegen id : 10] Input [6]: [c_last_name#27, c_first_name#26, c_salutation#25, c_preferred_cust_flag#28, ss_ticket_number#5, cnt#22] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q34.sf100/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q34.sf100/simplified.txt index 451659e2c617c..244478fd68825 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q34.sf100/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q34.sf100/simplified.txt @@ -47,7 +47,7 @@ WholeStageCodegen (10) BroadcastExchange #6 WholeStageCodegen (3) Project [hd_demo_sk] - Filter [hd_vehicle_count,hd_buy_potential,hd_dep_count,hd_demo_sk] + Filter [hd_vehicle_count,hd_dep_count,hd_buy_potential,hd_demo_sk] ColumnarToRow InputAdapter Scan parquet default.household_demographics [hd_demo_sk,hd_buy_potential,hd_dep_count,hd_vehicle_count] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q34/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q34/explain.txt index 01b5f46bd5dd4..e588993073a91 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q34/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q34/explain.txt @@ -117,7 +117,7 @@ Input [5]: [ss_customer_sk#2, ss_hdemo_sk#3, ss_store_sk#4, ss_ticket_number#5, Output [4]: [hd_demo_sk#13, hd_buy_potential#14, hd_dep_count#15, hd_vehicle_count#16] Batched: true Location [not included in comparison]/{warehouse_dir}/household_demographics] -PushedFilters: [IsNotNull(hd_vehicle_count), Or(EqualTo(hd_buy_potential,>10000),EqualTo(hd_buy_potential,unknown)), GreaterThan(hd_vehicle_count,0), IsNotNull(hd_demo_sk)] +PushedFilters: [IsNotNull(hd_vehicle_count), IsNotNull(hd_dep_count), Or(EqualTo(hd_buy_potential,>10000),EqualTo(hd_buy_potential,unknown)), GreaterThan(hd_vehicle_count,0), GreaterThan(hd_vehicle_count,0), IsNotNull(hd_demo_sk)] ReadSchema: struct (19) ColumnarToRow [codegen id : 3] @@ -125,7 +125,7 @@ Input [4]: [hd_demo_sk#13, hd_buy_potential#14, hd_dep_count#15, hd_vehicle_coun (20) Filter [codegen id : 3] Input [4]: [hd_demo_sk#13, hd_buy_potential#14, hd_dep_count#15, hd_vehicle_count#16] -Condition : ((((isnotnull(hd_vehicle_count#16) AND ((hd_buy_potential#14 = >10000) OR (hd_buy_potential#14 = unknown))) AND (hd_vehicle_count#16 > 0)) AND (CASE WHEN (hd_vehicle_count#16 > 0) THEN (cast(hd_dep_count#15 as double) / cast(hd_vehicle_count#16 as double)) ELSE null END > 1.2)) AND isnotnull(hd_demo_sk#13)) +Condition : (((((isnotnull(hd_vehicle_count#16) AND isnotnull(hd_dep_count#15)) AND ((hd_buy_potential#14 = >10000) OR (hd_buy_potential#14 = unknown))) AND (hd_vehicle_count#16 > 0)) AND ((cast(hd_dep_count#15 as double) / cast(hd_vehicle_count#16 as double)) > 1.2)) AND isnotnull(hd_demo_sk#13)) (21) Project [codegen id : 3] Output [1]: [hd_demo_sk#13] @@ -153,7 +153,7 @@ Results [3]: [ss_ticket_number#5, ss_customer_sk#2, count#19] (26) Exchange Input [3]: [ss_ticket_number#5, ss_customer_sk#2, count#19] -Arguments: hashpartitioning(ss_ticket_number#5, ss_customer_sk#2, 5), true, [id=#20] +Arguments: hashpartitioning(ss_ticket_number#5, ss_customer_sk#2, 5), ENSURE_REQUIREMENTS, [id=#20] (27) HashAggregate [codegen id : 6] Input [3]: [ss_ticket_number#5, ss_customer_sk#2, count#19] @@ -195,7 +195,7 @@ Input [8]: [ss_ticket_number#5, ss_customer_sk#2, cnt#22, c_customer_sk#23, c_sa (35) Exchange Input [6]: [c_last_name#26, c_first_name#25, c_salutation#24, c_preferred_cust_flag#27, ss_ticket_number#5, cnt#22] -Arguments: rangepartitioning(c_last_name#26 ASC NULLS FIRST, c_first_name#25 ASC NULLS FIRST, c_salutation#24 ASC NULLS FIRST, c_preferred_cust_flag#27 DESC NULLS LAST, ss_ticket_number#5 ASC NULLS FIRST, 5), true, [id=#29] +Arguments: rangepartitioning(c_last_name#26 ASC NULLS FIRST, c_first_name#25 ASC NULLS FIRST, c_salutation#24 ASC NULLS FIRST, c_preferred_cust_flag#27 DESC NULLS LAST, ss_ticket_number#5 ASC NULLS FIRST, 5), ENSURE_REQUIREMENTS, [id=#29] (36) Sort [codegen id : 7] Input [6]: [c_last_name#26, c_first_name#25, c_salutation#24, c_preferred_cust_flag#27, ss_ticket_number#5, cnt#22] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q34/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q34/simplified.txt index 8aa32fed5a176..22cab3a42862f 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q34/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q34/simplified.txt @@ -41,7 +41,7 @@ WholeStageCodegen (7) BroadcastExchange #5 WholeStageCodegen (3) Project [hd_demo_sk] - Filter [hd_vehicle_count,hd_buy_potential,hd_dep_count,hd_demo_sk] + Filter [hd_vehicle_count,hd_dep_count,hd_buy_potential,hd_demo_sk] ColumnarToRow InputAdapter Scan parquet default.household_demographics [hd_demo_sk,hd_buy_potential,hd_dep_count,hd_vehicle_count] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q36a.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q36a.sf100/explain.txt index 107343f091fb2..20ea78c9140e6 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q36a.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q36a.sf100/explain.txt @@ -1,53 +1,49 @@ == Physical Plan == -TakeOrderedAndProject (49) -+- * Project (48) - +- Window (47) - +- * Sort (46) - +- Exchange (45) - +- * HashAggregate (44) - +- Exchange (43) - +- * HashAggregate (42) - +- Union (41) - :- * HashAggregate (35) - : +- Exchange (34) - : +- * HashAggregate (33) - : +- Union (32) - : :- * HashAggregate (26) - : : +- Exchange (25) - : : +- * HashAggregate (24) - : : +- * Project (23) - : : +- * BroadcastHashJoin Inner BuildRight (22) - : : :- * Project (17) - : : : +- * BroadcastHashJoin Inner BuildRight (16) - : : : :- * Project (10) - : : : : +- * BroadcastHashJoin Inner BuildRight (9) - : : : : :- * Filter (3) - : : : : : +- * ColumnarToRow (2) - : : : : : +- Scan parquet default.store_sales (1) - : : : : +- BroadcastExchange (8) - : : : : +- * Project (7) - : : : : +- * Filter (6) - : : : : +- * ColumnarToRow (5) - : : : : +- Scan parquet default.date_dim (4) - : : : +- BroadcastExchange (15) - : : : +- * Project (14) - : : : +- * Filter (13) - : : : +- * ColumnarToRow (12) - : : : +- Scan parquet default.store (11) - : : +- BroadcastExchange (21) - : : +- * Filter (20) - : : +- * ColumnarToRow (19) - : : +- Scan parquet default.item (18) - : +- * HashAggregate (31) - : +- Exchange (30) - : +- * HashAggregate (29) - : +- * HashAggregate (28) - : +- ReusedExchange (27) - +- * HashAggregate (40) - +- Exchange (39) - +- * HashAggregate (38) - +- * HashAggregate (37) - +- ReusedExchange (36) +TakeOrderedAndProject (45) ++- * Project (44) + +- Window (43) + +- * Sort (42) + +- Exchange (41) + +- * HashAggregate (40) + +- Exchange (39) + +- * HashAggregate (38) + +- Union (37) + :- * HashAggregate (26) + : +- Exchange (25) + : +- * HashAggregate (24) + : +- * Project (23) + : +- * BroadcastHashJoin Inner BuildRight (22) + : :- * Project (17) + : : +- * BroadcastHashJoin Inner BuildRight (16) + : : :- * Project (10) + : : : +- * BroadcastHashJoin Inner BuildRight (9) + : : : :- * Filter (3) + : : : : +- * ColumnarToRow (2) + : : : : +- Scan parquet default.store_sales (1) + : : : +- BroadcastExchange (8) + : : : +- * Project (7) + : : : +- * Filter (6) + : : : +- * ColumnarToRow (5) + : : : +- Scan parquet default.date_dim (4) + : : +- BroadcastExchange (15) + : : +- * Project (14) + : : +- * Filter (13) + : : +- * ColumnarToRow (12) + : : +- Scan parquet default.store (11) + : +- BroadcastExchange (21) + : +- * Filter (20) + : +- * ColumnarToRow (19) + : +- Scan parquet default.item (18) + :- * HashAggregate (31) + : +- Exchange (30) + : +- * HashAggregate (29) + : +- * HashAggregate (28) + : +- ReusedExchange (27) + +- * HashAggregate (36) + +- Exchange (35) + +- * HashAggregate (34) + +- * HashAggregate (33) + +- ReusedExchange (32) (1) Scan parquet default.store_sales @@ -162,7 +158,7 @@ Results [4]: [i_category#14, i_class#13, sum#18, sum#19] (25) Exchange Input [4]: [i_category#14, i_class#13, sum#18, sum#19] -Arguments: hashpartitioning(i_category#14, i_class#13, 5), true, [id=#20] +Arguments: hashpartitioning(i_category#14, i_class#13, 5), ENSURE_REQUIREMENTS, [id=#20] (26) HashAggregate [codegen id : 5] Input [4]: [i_category#14, i_class#13, sum#18, sum#19] @@ -190,7 +186,7 @@ Results [5]: [i_category#14, sum#37, isEmpty#38, sum#39, isEmpty#40] (30) Exchange Input [5]: [i_category#14, sum#37, isEmpty#38, sum#39, isEmpty#40] -Arguments: hashpartitioning(i_category#14, 5), true, [id=#41] +Arguments: hashpartitioning(i_category#14, 5), ENSURE_REQUIREMENTS, [id=#41] (31) HashAggregate [codegen id : 11] Input [5]: [i_category#14, sum#37, isEmpty#38, sum#39, isEmpty#40] @@ -199,91 +195,71 @@ Functions [2]: [sum(ss_net_profit#31), sum(ss_ext_sales_price#32)] Aggregate Attributes [2]: [sum(ss_net_profit#31)#42, sum(ss_ext_sales_price#32)#43] Results [6]: [cast(CheckOverflow((promote_precision(sum(ss_net_profit#31)#42) / promote_precision(sum(ss_ext_sales_price#32)#43)), DecimalType(38,11), true) as decimal(38,20)) AS gross_margin#44, i_category#14, null AS i_class#45, 0 AS t_category#46, 1 AS t_class#47, 1 AS lochierarchy#48] -(32) Union +(32) ReusedExchange [Reuses operator id: 25] +Output [4]: [i_category#14, i_class#13, sum#49, sum#50] -(33) HashAggregate [codegen id : 12] -Input [6]: [gross_margin#23, i_category#14, i_class#13, t_category#24, t_class#25, lochierarchy#26] -Keys [6]: [gross_margin#23, i_category#14, i_class#13, t_category#24, t_class#25, lochierarchy#26] -Functions: [] -Aggregate Attributes: [] -Results [6]: [gross_margin#23, i_category#14, i_class#13, t_category#24, t_class#25, lochierarchy#26] - -(34) Exchange -Input [6]: [gross_margin#23, i_category#14, i_class#13, t_category#24, t_class#25, lochierarchy#26] -Arguments: hashpartitioning(gross_margin#23, i_category#14, i_class#13, t_category#24, t_class#25, lochierarchy#26, 5), true, [id=#49] - -(35) HashAggregate [codegen id : 13] -Input [6]: [gross_margin#23, i_category#14, i_class#13, t_category#24, t_class#25, lochierarchy#26] -Keys [6]: [gross_margin#23, i_category#14, i_class#13, t_category#24, t_class#25, lochierarchy#26] -Functions: [] -Aggregate Attributes: [] -Results [6]: [gross_margin#23, i_category#14, i_class#13, t_category#24, t_class#25, lochierarchy#26] - -(36) ReusedExchange [Reuses operator id: 25] -Output [4]: [i_category#14, i_class#13, sum#50, sum#51] - -(37) HashAggregate [codegen id : 18] -Input [4]: [i_category#14, i_class#13, sum#50, sum#51] +(33) HashAggregate [codegen id : 16] +Input [4]: [i_category#14, i_class#13, sum#49, sum#50] Keys [2]: [i_category#14, i_class#13] Functions [2]: [sum(UnscaledValue(ss_net_profit#5)), sum(UnscaledValue(ss_ext_sales_price#4))] -Aggregate Attributes [2]: [sum(UnscaledValue(ss_net_profit#5))#52, sum(UnscaledValue(ss_ext_sales_price#4))#53] -Results [2]: [MakeDecimal(sum(UnscaledValue(ss_net_profit#5))#52,17,2) AS ss_net_profit#31, MakeDecimal(sum(UnscaledValue(ss_ext_sales_price#4))#53,17,2) AS ss_ext_sales_price#32] +Aggregate Attributes [2]: [sum(UnscaledValue(ss_net_profit#5))#51, sum(UnscaledValue(ss_ext_sales_price#4))#52] +Results [2]: [MakeDecimal(sum(UnscaledValue(ss_net_profit#5))#51,17,2) AS ss_net_profit#31, MakeDecimal(sum(UnscaledValue(ss_ext_sales_price#4))#52,17,2) AS ss_ext_sales_price#32] -(38) HashAggregate [codegen id : 18] +(34) HashAggregate [codegen id : 16] Input [2]: [ss_net_profit#31, ss_ext_sales_price#32] Keys: [] Functions [2]: [partial_sum(ss_net_profit#31), partial_sum(ss_ext_sales_price#32)] -Aggregate Attributes [4]: [sum#54, isEmpty#55, sum#56, isEmpty#57] -Results [4]: [sum#58, isEmpty#59, sum#60, isEmpty#61] +Aggregate Attributes [4]: [sum#53, isEmpty#54, sum#55, isEmpty#56] +Results [4]: [sum#57, isEmpty#58, sum#59, isEmpty#60] -(39) Exchange -Input [4]: [sum#58, isEmpty#59, sum#60, isEmpty#61] -Arguments: SinglePartition, true, [id=#62] +(35) Exchange +Input [4]: [sum#57, isEmpty#58, sum#59, isEmpty#60] +Arguments: SinglePartition, ENSURE_REQUIREMENTS, [id=#61] -(40) HashAggregate [codegen id : 19] -Input [4]: [sum#58, isEmpty#59, sum#60, isEmpty#61] +(36) HashAggregate [codegen id : 17] +Input [4]: [sum#57, isEmpty#58, sum#59, isEmpty#60] Keys: [] Functions [2]: [sum(ss_net_profit#31), sum(ss_ext_sales_price#32)] -Aggregate Attributes [2]: [sum(ss_net_profit#31)#63, sum(ss_ext_sales_price#32)#64] -Results [6]: [cast(CheckOverflow((promote_precision(sum(ss_net_profit#31)#63) / promote_precision(sum(ss_ext_sales_price#32)#64)), DecimalType(38,11), true) as decimal(38,20)) AS gross_margin#65, null AS i_category#66, null AS i_class#67, 1 AS t_category#68, 1 AS t_class#69, 2 AS lochierarchy#70] +Aggregate Attributes [2]: [sum(ss_net_profit#31)#62, sum(ss_ext_sales_price#32)#63] +Results [6]: [cast(CheckOverflow((promote_precision(sum(ss_net_profit#31)#62) / promote_precision(sum(ss_ext_sales_price#32)#63)), DecimalType(38,11), true) as decimal(38,20)) AS gross_margin#64, null AS i_category#65, null AS i_class#66, 1 AS t_category#67, 1 AS t_class#68, 2 AS lochierarchy#69] -(41) Union +(37) Union -(42) HashAggregate [codegen id : 20] +(38) HashAggregate [codegen id : 18] Input [6]: [gross_margin#23, i_category#14, i_class#13, t_category#24, t_class#25, lochierarchy#26] Keys [6]: [gross_margin#23, i_category#14, i_class#13, t_category#24, t_class#25, lochierarchy#26] Functions: [] Aggregate Attributes: [] Results [6]: [gross_margin#23, i_category#14, i_class#13, t_category#24, t_class#25, lochierarchy#26] -(43) Exchange +(39) Exchange Input [6]: [gross_margin#23, i_category#14, i_class#13, t_category#24, t_class#25, lochierarchy#26] -Arguments: hashpartitioning(gross_margin#23, i_category#14, i_class#13, t_category#24, t_class#25, lochierarchy#26, 5), true, [id=#71] +Arguments: hashpartitioning(gross_margin#23, i_category#14, i_class#13, t_category#24, t_class#25, lochierarchy#26, 5), ENSURE_REQUIREMENTS, [id=#70] -(44) HashAggregate [codegen id : 21] +(40) HashAggregate [codegen id : 19] Input [6]: [gross_margin#23, i_category#14, i_class#13, t_category#24, t_class#25, lochierarchy#26] Keys [6]: [gross_margin#23, i_category#14, i_class#13, t_category#24, t_class#25, lochierarchy#26] Functions: [] Aggregate Attributes: [] -Results [5]: [gross_margin#23, i_category#14, i_class#13, lochierarchy#26, CASE WHEN (t_class#25 = 0) THEN i_category#14 END AS _w0#72] +Results [5]: [gross_margin#23, i_category#14, i_class#13, lochierarchy#26, CASE WHEN (t_class#25 = 0) THEN i_category#14 END AS _w0#71] -(45) Exchange -Input [5]: [gross_margin#23, i_category#14, i_class#13, lochierarchy#26, _w0#72] -Arguments: hashpartitioning(lochierarchy#26, _w0#72, 5), true, [id=#73] +(41) Exchange +Input [5]: [gross_margin#23, i_category#14, i_class#13, lochierarchy#26, _w0#71] +Arguments: hashpartitioning(lochierarchy#26, _w0#71, 5), ENSURE_REQUIREMENTS, [id=#72] -(46) Sort [codegen id : 22] -Input [5]: [gross_margin#23, i_category#14, i_class#13, lochierarchy#26, _w0#72] -Arguments: [lochierarchy#26 ASC NULLS FIRST, _w0#72 ASC NULLS FIRST, gross_margin#23 ASC NULLS FIRST], false, 0 +(42) Sort [codegen id : 20] +Input [5]: [gross_margin#23, i_category#14, i_class#13, lochierarchy#26, _w0#71] +Arguments: [lochierarchy#26 ASC NULLS FIRST, _w0#71 ASC NULLS FIRST, gross_margin#23 ASC NULLS FIRST], false, 0 -(47) Window -Input [5]: [gross_margin#23, i_category#14, i_class#13, lochierarchy#26, _w0#72] -Arguments: [rank(gross_margin#23) windowspecdefinition(lochierarchy#26, _w0#72, gross_margin#23 ASC NULLS FIRST, specifiedwindowframe(RowFrame, unboundedpreceding$(), currentrow$())) AS rank_within_parent#74], [lochierarchy#26, _w0#72], [gross_margin#23 ASC NULLS FIRST] +(43) Window +Input [5]: [gross_margin#23, i_category#14, i_class#13, lochierarchy#26, _w0#71] +Arguments: [rank(gross_margin#23) windowspecdefinition(lochierarchy#26, _w0#71, gross_margin#23 ASC NULLS FIRST, specifiedwindowframe(RowFrame, unboundedpreceding$(), currentrow$())) AS rank_within_parent#73], [lochierarchy#26, _w0#71], [gross_margin#23 ASC NULLS FIRST] -(48) Project [codegen id : 23] -Output [5]: [gross_margin#23, i_category#14, i_class#13, lochierarchy#26, rank_within_parent#74] -Input [6]: [gross_margin#23, i_category#14, i_class#13, lochierarchy#26, _w0#72, rank_within_parent#74] +(44) Project [codegen id : 21] +Output [5]: [gross_margin#23, i_category#14, i_class#13, lochierarchy#26, rank_within_parent#73] +Input [6]: [gross_margin#23, i_category#14, i_class#13, lochierarchy#26, _w0#71, rank_within_parent#73] -(49) TakeOrderedAndProject -Input [5]: [gross_margin#23, i_category#14, i_class#13, lochierarchy#26, rank_within_parent#74] -Arguments: 100, [lochierarchy#26 DESC NULLS LAST, CASE WHEN (lochierarchy#26 = 0) THEN i_category#14 END ASC NULLS FIRST, rank_within_parent#74 ASC NULLS FIRST], [gross_margin#23, i_category#14, i_class#13, lochierarchy#26, rank_within_parent#74] +(45) TakeOrderedAndProject +Input [5]: [gross_margin#23, i_category#14, i_class#13, lochierarchy#26, rank_within_parent#73] +Arguments: 100, [lochierarchy#26 DESC NULLS LAST, CASE WHEN (lochierarchy#26 = 0) THEN i_category#14 END ASC NULLS FIRST, rank_within_parent#73 ASC NULLS FIRST], [gross_margin#23, i_category#14, i_class#13, lochierarchy#26, rank_within_parent#73] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q36a.sf100/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q36a.sf100/simplified.txt index aa85d4870683d..f1cf7e8587cc1 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q36a.sf100/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q36a.sf100/simplified.txt @@ -1,82 +1,74 @@ TakeOrderedAndProject [lochierarchy,i_category,rank_within_parent,gross_margin,i_class] - WholeStageCodegen (23) + WholeStageCodegen (21) Project [gross_margin,i_category,i_class,lochierarchy,rank_within_parent] InputAdapter Window [gross_margin,lochierarchy,_w0] - WholeStageCodegen (22) + WholeStageCodegen (20) Sort [lochierarchy,_w0,gross_margin] InputAdapter Exchange [lochierarchy,_w0] #1 - WholeStageCodegen (21) + WholeStageCodegen (19) HashAggregate [gross_margin,i_category,i_class,t_category,t_class,lochierarchy] [_w0] InputAdapter Exchange [gross_margin,i_category,i_class,t_category,t_class,lochierarchy] #2 - WholeStageCodegen (20) + WholeStageCodegen (18) HashAggregate [gross_margin,i_category,i_class,t_category,t_class,lochierarchy] InputAdapter Union - WholeStageCodegen (13) - HashAggregate [gross_margin,i_category,i_class,t_category,t_class,lochierarchy] + WholeStageCodegen (5) + HashAggregate [i_category,i_class,sum,sum] [sum(UnscaledValue(ss_net_profit)),sum(UnscaledValue(ss_ext_sales_price)),gross_margin,t_category,t_class,lochierarchy,sum,sum] InputAdapter - Exchange [gross_margin,i_category,i_class,t_category,t_class,lochierarchy] #3 - WholeStageCodegen (12) - HashAggregate [gross_margin,i_category,i_class,t_category,t_class,lochierarchy] - InputAdapter - Union - WholeStageCodegen (5) - HashAggregate [i_category,i_class,sum,sum] [sum(UnscaledValue(ss_net_profit)),sum(UnscaledValue(ss_ext_sales_price)),gross_margin,t_category,t_class,lochierarchy,sum,sum] - InputAdapter - Exchange [i_category,i_class] #4 - WholeStageCodegen (4) - HashAggregate [i_category,i_class,ss_net_profit,ss_ext_sales_price] [sum,sum,sum,sum] - Project [ss_ext_sales_price,ss_net_profit,i_class,i_category] - BroadcastHashJoin [ss_item_sk,i_item_sk] - Project [ss_item_sk,ss_ext_sales_price,ss_net_profit] - BroadcastHashJoin [ss_store_sk,s_store_sk] - Project [ss_item_sk,ss_store_sk,ss_ext_sales_price,ss_net_profit] - BroadcastHashJoin [ss_sold_date_sk,d_date_sk] - Filter [ss_sold_date_sk,ss_item_sk,ss_store_sk] - ColumnarToRow - InputAdapter - Scan parquet default.store_sales [ss_sold_date_sk,ss_item_sk,ss_store_sk,ss_ext_sales_price,ss_net_profit] - InputAdapter - BroadcastExchange #5 - WholeStageCodegen (1) - Project [d_date_sk] - Filter [d_year,d_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.date_dim [d_date_sk,d_year] + Exchange [i_category,i_class] #3 + WholeStageCodegen (4) + HashAggregate [i_category,i_class,ss_net_profit,ss_ext_sales_price] [sum,sum,sum,sum] + Project [ss_ext_sales_price,ss_net_profit,i_class,i_category] + BroadcastHashJoin [ss_item_sk,i_item_sk] + Project [ss_item_sk,ss_ext_sales_price,ss_net_profit] + BroadcastHashJoin [ss_store_sk,s_store_sk] + Project [ss_item_sk,ss_store_sk,ss_ext_sales_price,ss_net_profit] + BroadcastHashJoin [ss_sold_date_sk,d_date_sk] + Filter [ss_sold_date_sk,ss_item_sk,ss_store_sk] + ColumnarToRow + InputAdapter + Scan parquet default.store_sales [ss_sold_date_sk,ss_item_sk,ss_store_sk,ss_ext_sales_price,ss_net_profit] + InputAdapter + BroadcastExchange #4 + WholeStageCodegen (1) + Project [d_date_sk] + Filter [d_year,d_date_sk] + ColumnarToRow InputAdapter - BroadcastExchange #6 - WholeStageCodegen (2) - Project [s_store_sk] - Filter [s_state,s_store_sk] - ColumnarToRow - InputAdapter - Scan parquet default.store [s_store_sk,s_state] - InputAdapter - BroadcastExchange #7 - WholeStageCodegen (3) - Filter [i_item_sk] - ColumnarToRow - InputAdapter - Scan parquet default.item [i_item_sk,i_class,i_category] - WholeStageCodegen (11) - HashAggregate [i_category,sum,isEmpty,sum,isEmpty] [sum(ss_net_profit),sum(ss_ext_sales_price),gross_margin,i_class,t_category,t_class,lochierarchy,sum,isEmpty,sum,isEmpty] + Scan parquet default.date_dim [d_date_sk,d_year] InputAdapter - Exchange [i_category] #8 - WholeStageCodegen (10) - HashAggregate [i_category,ss_net_profit,ss_ext_sales_price] [sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty] - HashAggregate [i_category,i_class,sum,sum] [sum(UnscaledValue(ss_net_profit)),sum(UnscaledValue(ss_ext_sales_price)),ss_net_profit,ss_ext_sales_price,sum,sum] - InputAdapter - ReusedExchange [i_category,i_class,sum,sum] #4 - WholeStageCodegen (19) + BroadcastExchange #5 + WholeStageCodegen (2) + Project [s_store_sk] + Filter [s_state,s_store_sk] + ColumnarToRow + InputAdapter + Scan parquet default.store [s_store_sk,s_state] + InputAdapter + BroadcastExchange #6 + WholeStageCodegen (3) + Filter [i_item_sk] + ColumnarToRow + InputAdapter + Scan parquet default.item [i_item_sk,i_class,i_category] + WholeStageCodegen (11) + HashAggregate [i_category,sum,isEmpty,sum,isEmpty] [sum(ss_net_profit),sum(ss_ext_sales_price),gross_margin,i_class,t_category,t_class,lochierarchy,sum,isEmpty,sum,isEmpty] + InputAdapter + Exchange [i_category] #7 + WholeStageCodegen (10) + HashAggregate [i_category,ss_net_profit,ss_ext_sales_price] [sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty] + HashAggregate [i_category,i_class,sum,sum] [sum(UnscaledValue(ss_net_profit)),sum(UnscaledValue(ss_ext_sales_price)),ss_net_profit,ss_ext_sales_price,sum,sum] + InputAdapter + ReusedExchange [i_category,i_class,sum,sum] #3 + WholeStageCodegen (17) HashAggregate [sum,isEmpty,sum,isEmpty] [sum(ss_net_profit),sum(ss_ext_sales_price),gross_margin,i_category,i_class,t_category,t_class,lochierarchy,sum,isEmpty,sum,isEmpty] InputAdapter - Exchange #9 - WholeStageCodegen (18) + Exchange #8 + WholeStageCodegen (16) HashAggregate [ss_net_profit,ss_ext_sales_price] [sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty] HashAggregate [i_category,i_class,sum,sum] [sum(UnscaledValue(ss_net_profit)),sum(UnscaledValue(ss_ext_sales_price)),ss_net_profit,ss_ext_sales_price,sum,sum] InputAdapter - ReusedExchange [i_category,i_class,sum,sum] #4 + ReusedExchange [i_category,i_class,sum,sum] #3 diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q36a/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q36a/explain.txt index 0d6dfa6f90a86..40b823563a890 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q36a/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q36a/explain.txt @@ -1,53 +1,49 @@ == Physical Plan == -TakeOrderedAndProject (49) -+- * Project (48) - +- Window (47) - +- * Sort (46) - +- Exchange (45) - +- * HashAggregate (44) - +- Exchange (43) - +- * HashAggregate (42) - +- Union (41) - :- * HashAggregate (35) - : +- Exchange (34) - : +- * HashAggregate (33) - : +- Union (32) - : :- * HashAggregate (26) - : : +- Exchange (25) - : : +- * HashAggregate (24) - : : +- * Project (23) - : : +- * BroadcastHashJoin Inner BuildRight (22) - : : :- * Project (16) - : : : +- * BroadcastHashJoin Inner BuildRight (15) - : : : :- * Project (10) - : : : : +- * BroadcastHashJoin Inner BuildRight (9) - : : : : :- * Filter (3) - : : : : : +- * ColumnarToRow (2) - : : : : : +- Scan parquet default.store_sales (1) - : : : : +- BroadcastExchange (8) - : : : : +- * Project (7) - : : : : +- * Filter (6) - : : : : +- * ColumnarToRow (5) - : : : : +- Scan parquet default.date_dim (4) - : : : +- BroadcastExchange (14) - : : : +- * Filter (13) - : : : +- * ColumnarToRow (12) - : : : +- Scan parquet default.item (11) - : : +- BroadcastExchange (21) - : : +- * Project (20) - : : +- * Filter (19) - : : +- * ColumnarToRow (18) - : : +- Scan parquet default.store (17) - : +- * HashAggregate (31) - : +- Exchange (30) - : +- * HashAggregate (29) - : +- * HashAggregate (28) - : +- ReusedExchange (27) - +- * HashAggregate (40) - +- Exchange (39) - +- * HashAggregate (38) - +- * HashAggregate (37) - +- ReusedExchange (36) +TakeOrderedAndProject (45) ++- * Project (44) + +- Window (43) + +- * Sort (42) + +- Exchange (41) + +- * HashAggregate (40) + +- Exchange (39) + +- * HashAggregate (38) + +- Union (37) + :- * HashAggregate (26) + : +- Exchange (25) + : +- * HashAggregate (24) + : +- * Project (23) + : +- * BroadcastHashJoin Inner BuildRight (22) + : :- * Project (16) + : : +- * BroadcastHashJoin Inner BuildRight (15) + : : :- * Project (10) + : : : +- * BroadcastHashJoin Inner BuildRight (9) + : : : :- * Filter (3) + : : : : +- * ColumnarToRow (2) + : : : : +- Scan parquet default.store_sales (1) + : : : +- BroadcastExchange (8) + : : : +- * Project (7) + : : : +- * Filter (6) + : : : +- * ColumnarToRow (5) + : : : +- Scan parquet default.date_dim (4) + : : +- BroadcastExchange (14) + : : +- * Filter (13) + : : +- * ColumnarToRow (12) + : : +- Scan parquet default.item (11) + : +- BroadcastExchange (21) + : +- * Project (20) + : +- * Filter (19) + : +- * ColumnarToRow (18) + : +- Scan parquet default.store (17) + :- * HashAggregate (31) + : +- Exchange (30) + : +- * HashAggregate (29) + : +- * HashAggregate (28) + : +- ReusedExchange (27) + +- * HashAggregate (36) + +- Exchange (35) + +- * HashAggregate (34) + +- * HashAggregate (33) + +- ReusedExchange (32) (1) Scan parquet default.store_sales @@ -162,7 +158,7 @@ Results [4]: [i_category#11, i_class#10, sum#18, sum#19] (25) Exchange Input [4]: [i_category#11, i_class#10, sum#18, sum#19] -Arguments: hashpartitioning(i_category#11, i_class#10, 5), true, [id=#20] +Arguments: hashpartitioning(i_category#11, i_class#10, 5), ENSURE_REQUIREMENTS, [id=#20] (26) HashAggregate [codegen id : 5] Input [4]: [i_category#11, i_class#10, sum#18, sum#19] @@ -190,7 +186,7 @@ Results [5]: [i_category#11, sum#37, isEmpty#38, sum#39, isEmpty#40] (30) Exchange Input [5]: [i_category#11, sum#37, isEmpty#38, sum#39, isEmpty#40] -Arguments: hashpartitioning(i_category#11, 5), true, [id=#41] +Arguments: hashpartitioning(i_category#11, 5), ENSURE_REQUIREMENTS, [id=#41] (31) HashAggregate [codegen id : 11] Input [5]: [i_category#11, sum#37, isEmpty#38, sum#39, isEmpty#40] @@ -199,91 +195,71 @@ Functions [2]: [sum(ss_net_profit#31), sum(ss_ext_sales_price#32)] Aggregate Attributes [2]: [sum(ss_net_profit#31)#42, sum(ss_ext_sales_price#32)#43] Results [6]: [cast(CheckOverflow((promote_precision(sum(ss_net_profit#31)#42) / promote_precision(sum(ss_ext_sales_price#32)#43)), DecimalType(38,11), true) as decimal(38,20)) AS gross_margin#44, i_category#11, null AS i_class#45, 0 AS t_category#46, 1 AS t_class#47, 1 AS lochierarchy#48] -(32) Union +(32) ReusedExchange [Reuses operator id: 25] +Output [4]: [i_category#11, i_class#10, sum#49, sum#50] -(33) HashAggregate [codegen id : 12] -Input [6]: [gross_margin#23, i_category#11, i_class#10, t_category#24, t_class#25, lochierarchy#26] -Keys [6]: [gross_margin#23, i_category#11, i_class#10, t_category#24, t_class#25, lochierarchy#26] -Functions: [] -Aggregate Attributes: [] -Results [6]: [gross_margin#23, i_category#11, i_class#10, t_category#24, t_class#25, lochierarchy#26] - -(34) Exchange -Input [6]: [gross_margin#23, i_category#11, i_class#10, t_category#24, t_class#25, lochierarchy#26] -Arguments: hashpartitioning(gross_margin#23, i_category#11, i_class#10, t_category#24, t_class#25, lochierarchy#26, 5), true, [id=#49] - -(35) HashAggregate [codegen id : 13] -Input [6]: [gross_margin#23, i_category#11, i_class#10, t_category#24, t_class#25, lochierarchy#26] -Keys [6]: [gross_margin#23, i_category#11, i_class#10, t_category#24, t_class#25, lochierarchy#26] -Functions: [] -Aggregate Attributes: [] -Results [6]: [gross_margin#23, i_category#11, i_class#10, t_category#24, t_class#25, lochierarchy#26] - -(36) ReusedExchange [Reuses operator id: 25] -Output [4]: [i_category#11, i_class#10, sum#50, sum#51] - -(37) HashAggregate [codegen id : 18] -Input [4]: [i_category#11, i_class#10, sum#50, sum#51] +(33) HashAggregate [codegen id : 16] +Input [4]: [i_category#11, i_class#10, sum#49, sum#50] Keys [2]: [i_category#11, i_class#10] Functions [2]: [sum(UnscaledValue(ss_net_profit#5)), sum(UnscaledValue(ss_ext_sales_price#4))] -Aggregate Attributes [2]: [sum(UnscaledValue(ss_net_profit#5))#52, sum(UnscaledValue(ss_ext_sales_price#4))#53] -Results [2]: [MakeDecimal(sum(UnscaledValue(ss_net_profit#5))#52,17,2) AS ss_net_profit#31, MakeDecimal(sum(UnscaledValue(ss_ext_sales_price#4))#53,17,2) AS ss_ext_sales_price#32] +Aggregate Attributes [2]: [sum(UnscaledValue(ss_net_profit#5))#51, sum(UnscaledValue(ss_ext_sales_price#4))#52] +Results [2]: [MakeDecimal(sum(UnscaledValue(ss_net_profit#5))#51,17,2) AS ss_net_profit#31, MakeDecimal(sum(UnscaledValue(ss_ext_sales_price#4))#52,17,2) AS ss_ext_sales_price#32] -(38) HashAggregate [codegen id : 18] +(34) HashAggregate [codegen id : 16] Input [2]: [ss_net_profit#31, ss_ext_sales_price#32] Keys: [] Functions [2]: [partial_sum(ss_net_profit#31), partial_sum(ss_ext_sales_price#32)] -Aggregate Attributes [4]: [sum#54, isEmpty#55, sum#56, isEmpty#57] -Results [4]: [sum#58, isEmpty#59, sum#60, isEmpty#61] +Aggregate Attributes [4]: [sum#53, isEmpty#54, sum#55, isEmpty#56] +Results [4]: [sum#57, isEmpty#58, sum#59, isEmpty#60] -(39) Exchange -Input [4]: [sum#58, isEmpty#59, sum#60, isEmpty#61] -Arguments: SinglePartition, true, [id=#62] +(35) Exchange +Input [4]: [sum#57, isEmpty#58, sum#59, isEmpty#60] +Arguments: SinglePartition, ENSURE_REQUIREMENTS, [id=#61] -(40) HashAggregate [codegen id : 19] -Input [4]: [sum#58, isEmpty#59, sum#60, isEmpty#61] +(36) HashAggregate [codegen id : 17] +Input [4]: [sum#57, isEmpty#58, sum#59, isEmpty#60] Keys: [] Functions [2]: [sum(ss_net_profit#31), sum(ss_ext_sales_price#32)] -Aggregate Attributes [2]: [sum(ss_net_profit#31)#63, sum(ss_ext_sales_price#32)#64] -Results [6]: [cast(CheckOverflow((promote_precision(sum(ss_net_profit#31)#63) / promote_precision(sum(ss_ext_sales_price#32)#64)), DecimalType(38,11), true) as decimal(38,20)) AS gross_margin#65, null AS i_category#66, null AS i_class#67, 1 AS t_category#68, 1 AS t_class#69, 2 AS lochierarchy#70] +Aggregate Attributes [2]: [sum(ss_net_profit#31)#62, sum(ss_ext_sales_price#32)#63] +Results [6]: [cast(CheckOverflow((promote_precision(sum(ss_net_profit#31)#62) / promote_precision(sum(ss_ext_sales_price#32)#63)), DecimalType(38,11), true) as decimal(38,20)) AS gross_margin#64, null AS i_category#65, null AS i_class#66, 1 AS t_category#67, 1 AS t_class#68, 2 AS lochierarchy#69] -(41) Union +(37) Union -(42) HashAggregate [codegen id : 20] +(38) HashAggregate [codegen id : 18] Input [6]: [gross_margin#23, i_category#11, i_class#10, t_category#24, t_class#25, lochierarchy#26] Keys [6]: [gross_margin#23, i_category#11, i_class#10, t_category#24, t_class#25, lochierarchy#26] Functions: [] Aggregate Attributes: [] Results [6]: [gross_margin#23, i_category#11, i_class#10, t_category#24, t_class#25, lochierarchy#26] -(43) Exchange +(39) Exchange Input [6]: [gross_margin#23, i_category#11, i_class#10, t_category#24, t_class#25, lochierarchy#26] -Arguments: hashpartitioning(gross_margin#23, i_category#11, i_class#10, t_category#24, t_class#25, lochierarchy#26, 5), true, [id=#71] +Arguments: hashpartitioning(gross_margin#23, i_category#11, i_class#10, t_category#24, t_class#25, lochierarchy#26, 5), ENSURE_REQUIREMENTS, [id=#70] -(44) HashAggregate [codegen id : 21] +(40) HashAggregate [codegen id : 19] Input [6]: [gross_margin#23, i_category#11, i_class#10, t_category#24, t_class#25, lochierarchy#26] Keys [6]: [gross_margin#23, i_category#11, i_class#10, t_category#24, t_class#25, lochierarchy#26] Functions: [] Aggregate Attributes: [] -Results [5]: [gross_margin#23, i_category#11, i_class#10, lochierarchy#26, CASE WHEN (t_class#25 = 0) THEN i_category#11 END AS _w0#72] +Results [5]: [gross_margin#23, i_category#11, i_class#10, lochierarchy#26, CASE WHEN (t_class#25 = 0) THEN i_category#11 END AS _w0#71] -(45) Exchange -Input [5]: [gross_margin#23, i_category#11, i_class#10, lochierarchy#26, _w0#72] -Arguments: hashpartitioning(lochierarchy#26, _w0#72, 5), true, [id=#73] +(41) Exchange +Input [5]: [gross_margin#23, i_category#11, i_class#10, lochierarchy#26, _w0#71] +Arguments: hashpartitioning(lochierarchy#26, _w0#71, 5), ENSURE_REQUIREMENTS, [id=#72] -(46) Sort [codegen id : 22] -Input [5]: [gross_margin#23, i_category#11, i_class#10, lochierarchy#26, _w0#72] -Arguments: [lochierarchy#26 ASC NULLS FIRST, _w0#72 ASC NULLS FIRST, gross_margin#23 ASC NULLS FIRST], false, 0 +(42) Sort [codegen id : 20] +Input [5]: [gross_margin#23, i_category#11, i_class#10, lochierarchy#26, _w0#71] +Arguments: [lochierarchy#26 ASC NULLS FIRST, _w0#71 ASC NULLS FIRST, gross_margin#23 ASC NULLS FIRST], false, 0 -(47) Window -Input [5]: [gross_margin#23, i_category#11, i_class#10, lochierarchy#26, _w0#72] -Arguments: [rank(gross_margin#23) windowspecdefinition(lochierarchy#26, _w0#72, gross_margin#23 ASC NULLS FIRST, specifiedwindowframe(RowFrame, unboundedpreceding$(), currentrow$())) AS rank_within_parent#74], [lochierarchy#26, _w0#72], [gross_margin#23 ASC NULLS FIRST] +(43) Window +Input [5]: [gross_margin#23, i_category#11, i_class#10, lochierarchy#26, _w0#71] +Arguments: [rank(gross_margin#23) windowspecdefinition(lochierarchy#26, _w0#71, gross_margin#23 ASC NULLS FIRST, specifiedwindowframe(RowFrame, unboundedpreceding$(), currentrow$())) AS rank_within_parent#73], [lochierarchy#26, _w0#71], [gross_margin#23 ASC NULLS FIRST] -(48) Project [codegen id : 23] -Output [5]: [gross_margin#23, i_category#11, i_class#10, lochierarchy#26, rank_within_parent#74] -Input [6]: [gross_margin#23, i_category#11, i_class#10, lochierarchy#26, _w0#72, rank_within_parent#74] +(44) Project [codegen id : 21] +Output [5]: [gross_margin#23, i_category#11, i_class#10, lochierarchy#26, rank_within_parent#73] +Input [6]: [gross_margin#23, i_category#11, i_class#10, lochierarchy#26, _w0#71, rank_within_parent#73] -(49) TakeOrderedAndProject -Input [5]: [gross_margin#23, i_category#11, i_class#10, lochierarchy#26, rank_within_parent#74] -Arguments: 100, [lochierarchy#26 DESC NULLS LAST, CASE WHEN (lochierarchy#26 = 0) THEN i_category#11 END ASC NULLS FIRST, rank_within_parent#74 ASC NULLS FIRST], [gross_margin#23, i_category#11, i_class#10, lochierarchy#26, rank_within_parent#74] +(45) TakeOrderedAndProject +Input [5]: [gross_margin#23, i_category#11, i_class#10, lochierarchy#26, rank_within_parent#73] +Arguments: 100, [lochierarchy#26 DESC NULLS LAST, CASE WHEN (lochierarchy#26 = 0) THEN i_category#11 END ASC NULLS FIRST, rank_within_parent#73 ASC NULLS FIRST], [gross_margin#23, i_category#11, i_class#10, lochierarchy#26, rank_within_parent#73] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q36a/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q36a/simplified.txt index a72781e1da0ed..297c414a18cb0 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q36a/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q36a/simplified.txt @@ -1,82 +1,74 @@ TakeOrderedAndProject [lochierarchy,i_category,rank_within_parent,gross_margin,i_class] - WholeStageCodegen (23) + WholeStageCodegen (21) Project [gross_margin,i_category,i_class,lochierarchy,rank_within_parent] InputAdapter Window [gross_margin,lochierarchy,_w0] - WholeStageCodegen (22) + WholeStageCodegen (20) Sort [lochierarchy,_w0,gross_margin] InputAdapter Exchange [lochierarchy,_w0] #1 - WholeStageCodegen (21) + WholeStageCodegen (19) HashAggregate [gross_margin,i_category,i_class,t_category,t_class,lochierarchy] [_w0] InputAdapter Exchange [gross_margin,i_category,i_class,t_category,t_class,lochierarchy] #2 - WholeStageCodegen (20) + WholeStageCodegen (18) HashAggregate [gross_margin,i_category,i_class,t_category,t_class,lochierarchy] InputAdapter Union - WholeStageCodegen (13) - HashAggregate [gross_margin,i_category,i_class,t_category,t_class,lochierarchy] + WholeStageCodegen (5) + HashAggregate [i_category,i_class,sum,sum] [sum(UnscaledValue(ss_net_profit)),sum(UnscaledValue(ss_ext_sales_price)),gross_margin,t_category,t_class,lochierarchy,sum,sum] InputAdapter - Exchange [gross_margin,i_category,i_class,t_category,t_class,lochierarchy] #3 - WholeStageCodegen (12) - HashAggregate [gross_margin,i_category,i_class,t_category,t_class,lochierarchy] - InputAdapter - Union - WholeStageCodegen (5) - HashAggregate [i_category,i_class,sum,sum] [sum(UnscaledValue(ss_net_profit)),sum(UnscaledValue(ss_ext_sales_price)),gross_margin,t_category,t_class,lochierarchy,sum,sum] - InputAdapter - Exchange [i_category,i_class] #4 - WholeStageCodegen (4) - HashAggregate [i_category,i_class,ss_net_profit,ss_ext_sales_price] [sum,sum,sum,sum] - Project [ss_ext_sales_price,ss_net_profit,i_class,i_category] - BroadcastHashJoin [ss_store_sk,s_store_sk] - Project [ss_store_sk,ss_ext_sales_price,ss_net_profit,i_class,i_category] - BroadcastHashJoin [ss_item_sk,i_item_sk] - Project [ss_item_sk,ss_store_sk,ss_ext_sales_price,ss_net_profit] - BroadcastHashJoin [ss_sold_date_sk,d_date_sk] - Filter [ss_sold_date_sk,ss_item_sk,ss_store_sk] - ColumnarToRow - InputAdapter - Scan parquet default.store_sales [ss_sold_date_sk,ss_item_sk,ss_store_sk,ss_ext_sales_price,ss_net_profit] - InputAdapter - BroadcastExchange #5 - WholeStageCodegen (1) - Project [d_date_sk] - Filter [d_year,d_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.date_dim [d_date_sk,d_year] + Exchange [i_category,i_class] #3 + WholeStageCodegen (4) + HashAggregate [i_category,i_class,ss_net_profit,ss_ext_sales_price] [sum,sum,sum,sum] + Project [ss_ext_sales_price,ss_net_profit,i_class,i_category] + BroadcastHashJoin [ss_store_sk,s_store_sk] + Project [ss_store_sk,ss_ext_sales_price,ss_net_profit,i_class,i_category] + BroadcastHashJoin [ss_item_sk,i_item_sk] + Project [ss_item_sk,ss_store_sk,ss_ext_sales_price,ss_net_profit] + BroadcastHashJoin [ss_sold_date_sk,d_date_sk] + Filter [ss_sold_date_sk,ss_item_sk,ss_store_sk] + ColumnarToRow + InputAdapter + Scan parquet default.store_sales [ss_sold_date_sk,ss_item_sk,ss_store_sk,ss_ext_sales_price,ss_net_profit] + InputAdapter + BroadcastExchange #4 + WholeStageCodegen (1) + Project [d_date_sk] + Filter [d_year,d_date_sk] + ColumnarToRow InputAdapter - BroadcastExchange #6 - WholeStageCodegen (2) - Filter [i_item_sk] - ColumnarToRow - InputAdapter - Scan parquet default.item [i_item_sk,i_class,i_category] - InputAdapter - BroadcastExchange #7 - WholeStageCodegen (3) - Project [s_store_sk] - Filter [s_state,s_store_sk] - ColumnarToRow - InputAdapter - Scan parquet default.store [s_store_sk,s_state] - WholeStageCodegen (11) - HashAggregate [i_category,sum,isEmpty,sum,isEmpty] [sum(ss_net_profit),sum(ss_ext_sales_price),gross_margin,i_class,t_category,t_class,lochierarchy,sum,isEmpty,sum,isEmpty] + Scan parquet default.date_dim [d_date_sk,d_year] InputAdapter - Exchange [i_category] #8 - WholeStageCodegen (10) - HashAggregate [i_category,ss_net_profit,ss_ext_sales_price] [sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty] - HashAggregate [i_category,i_class,sum,sum] [sum(UnscaledValue(ss_net_profit)),sum(UnscaledValue(ss_ext_sales_price)),ss_net_profit,ss_ext_sales_price,sum,sum] + BroadcastExchange #5 + WholeStageCodegen (2) + Filter [i_item_sk] + ColumnarToRow InputAdapter - ReusedExchange [i_category,i_class,sum,sum] #4 - WholeStageCodegen (19) + Scan parquet default.item [i_item_sk,i_class,i_category] + InputAdapter + BroadcastExchange #6 + WholeStageCodegen (3) + Project [s_store_sk] + Filter [s_state,s_store_sk] + ColumnarToRow + InputAdapter + Scan parquet default.store [s_store_sk,s_state] + WholeStageCodegen (11) + HashAggregate [i_category,sum,isEmpty,sum,isEmpty] [sum(ss_net_profit),sum(ss_ext_sales_price),gross_margin,i_class,t_category,t_class,lochierarchy,sum,isEmpty,sum,isEmpty] + InputAdapter + Exchange [i_category] #7 + WholeStageCodegen (10) + HashAggregate [i_category,ss_net_profit,ss_ext_sales_price] [sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty] + HashAggregate [i_category,i_class,sum,sum] [sum(UnscaledValue(ss_net_profit)),sum(UnscaledValue(ss_ext_sales_price)),ss_net_profit,ss_ext_sales_price,sum,sum] + InputAdapter + ReusedExchange [i_category,i_class,sum,sum] #3 + WholeStageCodegen (17) HashAggregate [sum,isEmpty,sum,isEmpty] [sum(ss_net_profit),sum(ss_ext_sales_price),gross_margin,i_category,i_class,t_category,t_class,lochierarchy,sum,isEmpty,sum,isEmpty] InputAdapter - Exchange #9 - WholeStageCodegen (18) + Exchange #8 + WholeStageCodegen (16) HashAggregate [ss_net_profit,ss_ext_sales_price] [sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty] HashAggregate [i_category,i_class,sum,sum] [sum(UnscaledValue(ss_net_profit)),sum(UnscaledValue(ss_ext_sales_price)),ss_net_profit,ss_ext_sales_price,sum,sum] InputAdapter - ReusedExchange [i_category,i_class,sum,sum] #4 + ReusedExchange [i_category,i_class,sum,sum] #3 diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q5a.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q5a.sf100/explain.txt index 471d38c89e601..411cbf4809cd1 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q5a.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q5a.sf100/explain.txt @@ -1,102 +1,98 @@ == Physical Plan == -TakeOrderedAndProject (98) -+- * HashAggregate (97) - +- Exchange (96) - +- * HashAggregate (95) - +- Union (94) - :- * HashAggregate (88) - : +- Exchange (87) - : +- * HashAggregate (86) - : +- Union (85) - : :- * HashAggregate (79) - : : +- Exchange (78) - : : +- * HashAggregate (77) - : : +- Union (76) - : : :- * HashAggregate (25) - : : : +- Exchange (24) - : : : +- * HashAggregate (23) - : : : +- * Project (22) - : : : +- * BroadcastHashJoin Inner BuildRight (21) - : : : :- * Project (16) - : : : : +- * BroadcastHashJoin Inner BuildRight (15) - : : : : :- Union (9) - : : : : : :- * Project (4) - : : : : : : +- * Filter (3) - : : : : : : +- * ColumnarToRow (2) - : : : : : : +- Scan parquet default.store_sales (1) - : : : : : +- * Project (8) - : : : : : +- * Filter (7) - : : : : : +- * ColumnarToRow (6) - : : : : : +- Scan parquet default.store_returns (5) - : : : : +- BroadcastExchange (14) - : : : : +- * Project (13) - : : : : +- * Filter (12) - : : : : +- * ColumnarToRow (11) - : : : : +- Scan parquet default.date_dim (10) - : : : +- BroadcastExchange (20) - : : : +- * Filter (19) - : : : +- * ColumnarToRow (18) - : : : +- Scan parquet default.store (17) - : : :- * HashAggregate (46) - : : : +- Exchange (45) - : : : +- * HashAggregate (44) - : : : +- * Project (43) - : : : +- * BroadcastHashJoin Inner BuildRight (42) - : : : :- * Project (37) - : : : : +- * BroadcastHashJoin Inner BuildRight (36) - : : : : :- Union (34) - : : : : : :- * Project (29) - : : : : : : +- * Filter (28) - : : : : : : +- * ColumnarToRow (27) - : : : : : : +- Scan parquet default.catalog_sales (26) - : : : : : +- * Project (33) - : : : : : +- * Filter (32) - : : : : : +- * ColumnarToRow (31) - : : : : : +- Scan parquet default.catalog_returns (30) - : : : : +- ReusedExchange (35) - : : : +- BroadcastExchange (41) - : : : +- * Filter (40) - : : : +- * ColumnarToRow (39) - : : : +- Scan parquet default.catalog_page (38) - : : +- * HashAggregate (75) - : : +- Exchange (74) - : : +- * HashAggregate (73) - : : +- * Project (72) - : : +- * BroadcastHashJoin Inner BuildRight (71) - : : :- * Project (66) - : : : +- * BroadcastHashJoin Inner BuildRight (65) - : : : :- Union (63) - : : : : :- * Project (50) - : : : : : +- * Filter (49) - : : : : : +- * ColumnarToRow (48) - : : : : : +- Scan parquet default.web_sales (47) - : : : : +- * Project (62) - : : : : +- * SortMergeJoin Inner (61) - : : : : :- * Sort (55) - : : : : : +- Exchange (54) - : : : : : +- * Filter (53) - : : : : : +- * ColumnarToRow (52) - : : : : : +- Scan parquet default.web_returns (51) - : : : : +- * Sort (60) - : : : : +- Exchange (59) - : : : : +- * Filter (58) - : : : : +- * ColumnarToRow (57) - : : : : +- Scan parquet default.web_sales (56) - : : : +- ReusedExchange (64) - : : +- BroadcastExchange (70) - : : +- * Filter (69) - : : +- * ColumnarToRow (68) - : : +- Scan parquet default.web_site (67) - : +- * HashAggregate (84) - : +- Exchange (83) - : +- * HashAggregate (82) - : +- * HashAggregate (81) - : +- ReusedExchange (80) - +- * HashAggregate (93) - +- Exchange (92) - +- * HashAggregate (91) - +- * HashAggregate (90) - +- ReusedExchange (89) +TakeOrderedAndProject (94) ++- * HashAggregate (93) + +- Exchange (92) + +- * HashAggregate (91) + +- Union (90) + :- * HashAggregate (79) + : +- Exchange (78) + : +- * HashAggregate (77) + : +- Union (76) + : :- * HashAggregate (25) + : : +- Exchange (24) + : : +- * HashAggregate (23) + : : +- * Project (22) + : : +- * BroadcastHashJoin Inner BuildRight (21) + : : :- * Project (15) + : : : +- * BroadcastHashJoin Inner BuildRight (14) + : : : :- Union (9) + : : : : :- * Project (4) + : : : : : +- * Filter (3) + : : : : : +- * ColumnarToRow (2) + : : : : : +- Scan parquet default.store_sales (1) + : : : : +- * Project (8) + : : : : +- * Filter (7) + : : : : +- * ColumnarToRow (6) + : : : : +- Scan parquet default.store_returns (5) + : : : +- BroadcastExchange (13) + : : : +- * Filter (12) + : : : +- * ColumnarToRow (11) + : : : +- Scan parquet default.store (10) + : : +- BroadcastExchange (20) + : : +- * Project (19) + : : +- * Filter (18) + : : +- * ColumnarToRow (17) + : : +- Scan parquet default.date_dim (16) + : :- * HashAggregate (46) + : : +- Exchange (45) + : : +- * HashAggregate (44) + : : +- * Project (43) + : : +- * BroadcastHashJoin Inner BuildRight (42) + : : :- * Project (40) + : : : +- * BroadcastHashJoin Inner BuildRight (39) + : : : :- Union (34) + : : : : :- * Project (29) + : : : : : +- * Filter (28) + : : : : : +- * ColumnarToRow (27) + : : : : : +- Scan parquet default.catalog_sales (26) + : : : : +- * Project (33) + : : : : +- * Filter (32) + : : : : +- * ColumnarToRow (31) + : : : : +- Scan parquet default.catalog_returns (30) + : : : +- BroadcastExchange (38) + : : : +- * Filter (37) + : : : +- * ColumnarToRow (36) + : : : +- Scan parquet default.catalog_page (35) + : : +- ReusedExchange (41) + : +- * HashAggregate (75) + : +- Exchange (74) + : +- * HashAggregate (73) + : +- * Project (72) + : +- * BroadcastHashJoin Inner BuildRight (71) + : :- * Project (69) + : : +- * BroadcastHashJoin Inner BuildRight (68) + : : :- Union (63) + : : : :- * Project (50) + : : : : +- * Filter (49) + : : : : +- * ColumnarToRow (48) + : : : : +- Scan parquet default.web_sales (47) + : : : +- * Project (62) + : : : +- * SortMergeJoin Inner (61) + : : : :- * Sort (55) + : : : : +- Exchange (54) + : : : : +- * Filter (53) + : : : : +- * ColumnarToRow (52) + : : : : +- Scan parquet default.web_returns (51) + : : : +- * Sort (60) + : : : +- Exchange (59) + : : : +- * Filter (58) + : : : +- * ColumnarToRow (57) + : : : +- Scan parquet default.web_sales (56) + : : +- BroadcastExchange (67) + : : +- * Filter (66) + : : +- * ColumnarToRow (65) + : : +- Scan parquet default.web_site (64) + : +- ReusedExchange (70) + :- * HashAggregate (84) + : +- Exchange (83) + : +- * HashAggregate (82) + : +- * HashAggregate (81) + : +- ReusedExchange (80) + +- * HashAggregate (89) + +- Exchange (88) + +- * HashAggregate (87) + +- * HashAggregate (86) + +- ReusedExchange (85) (1) Scan parquet default.store_sales @@ -136,81 +132,81 @@ Input [4]: [sr_returned_date_sk#11, sr_store_sk#12, sr_return_amt#13, sr_net_los (9) Union -(10) Scan parquet default.date_dim -Output [2]: [d_date_sk#21, d_date#22] +(10) Scan parquet default.store +Output [2]: [s_store_sk#21, s_store_id#22] Batched: true -Location [not included in comparison]/{warehouse_dir}/date_dim] -PushedFilters: [IsNotNull(d_date), GreaterThanOrEqual(d_date,1998-08-04), LessThanOrEqual(d_date,1998-08-18), IsNotNull(d_date_sk)] -ReadSchema: struct +Location [not included in comparison]/{warehouse_dir}/store] +PushedFilters: [IsNotNull(s_store_sk)] +ReadSchema: struct (11) ColumnarToRow [codegen id : 3] -Input [2]: [d_date_sk#21, d_date#22] +Input [2]: [s_store_sk#21, s_store_id#22] (12) Filter [codegen id : 3] -Input [2]: [d_date_sk#21, d_date#22] -Condition : (((isnotnull(d_date#22) AND (d_date#22 >= 10442)) AND (d_date#22 <= 10456)) AND isnotnull(d_date_sk#21)) +Input [2]: [s_store_sk#21, s_store_id#22] +Condition : isnotnull(s_store_sk#21) -(13) Project [codegen id : 3] -Output [1]: [d_date_sk#21] -Input [2]: [d_date_sk#21, d_date#22] +(13) BroadcastExchange +Input [2]: [s_store_sk#21, s_store_id#22] +Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, false] as bigint)),false), [id=#23] -(14) BroadcastExchange -Input [1]: [d_date_sk#21] -Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#23] - -(15) BroadcastHashJoin [codegen id : 5] -Left keys [1]: [date_sk#6] -Right keys [1]: [cast(d_date_sk#21 as bigint)] +(14) BroadcastHashJoin [codegen id : 5] +Left keys [1]: [store_sk#5] +Right keys [1]: [cast(s_store_sk#21 as bigint)] Join condition: None -(16) Project [codegen id : 5] -Output [5]: [store_sk#5, sales_price#7, profit#8, return_amt#9, net_loss#10] -Input [7]: [store_sk#5, date_sk#6, sales_price#7, profit#8, return_amt#9, net_loss#10, d_date_sk#21] +(15) Project [codegen id : 5] +Output [6]: [date_sk#6, sales_price#7, profit#8, return_amt#9, net_loss#10, s_store_id#22] +Input [8]: [store_sk#5, date_sk#6, sales_price#7, profit#8, return_amt#9, net_loss#10, s_store_sk#21, s_store_id#22] -(17) Scan parquet default.store -Output [2]: [s_store_sk#24, s_store_id#25] +(16) Scan parquet default.date_dim +Output [2]: [d_date_sk#24, d_date#25] Batched: true -Location [not included in comparison]/{warehouse_dir}/store] -PushedFilters: [IsNotNull(s_store_sk)] -ReadSchema: struct +Location [not included in comparison]/{warehouse_dir}/date_dim] +PushedFilters: [IsNotNull(d_date), GreaterThanOrEqual(d_date,1998-08-04), LessThanOrEqual(d_date,1998-08-18), IsNotNull(d_date_sk)] +ReadSchema: struct + +(17) ColumnarToRow [codegen id : 4] +Input [2]: [d_date_sk#24, d_date#25] -(18) ColumnarToRow [codegen id : 4] -Input [2]: [s_store_sk#24, s_store_id#25] +(18) Filter [codegen id : 4] +Input [2]: [d_date_sk#24, d_date#25] +Condition : (((isnotnull(d_date#25) AND (d_date#25 >= 10442)) AND (d_date#25 <= 10456)) AND isnotnull(d_date_sk#24)) -(19) Filter [codegen id : 4] -Input [2]: [s_store_sk#24, s_store_id#25] -Condition : isnotnull(s_store_sk#24) +(19) Project [codegen id : 4] +Output [1]: [d_date_sk#24] +Input [2]: [d_date_sk#24, d_date#25] (20) BroadcastExchange -Input [2]: [s_store_sk#24, s_store_id#25] -Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, false] as bigint)),false), [id=#26] +Input [1]: [d_date_sk#24] +Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#26] (21) BroadcastHashJoin [codegen id : 5] -Left keys [1]: [store_sk#5] -Right keys [1]: [cast(s_store_sk#24 as bigint)] +Left keys [1]: [date_sk#6] +Right keys [1]: [cast(d_date_sk#24 as bigint)] Join condition: None (22) Project [codegen id : 5] -Output [5]: [sales_price#7, profit#8, return_amt#9, net_loss#10, s_store_id#25] -Input [7]: [store_sk#5, sales_price#7, profit#8, return_amt#9, net_loss#10, s_store_sk#24, s_store_id#25] +Output [5]: [sales_price#7, profit#8, return_amt#9, net_loss#10, s_store_id#22] +Input [7]: [date_sk#6, sales_price#7, profit#8, return_amt#9, net_loss#10, s_store_id#22, d_date_sk#24] (23) HashAggregate [codegen id : 5] -Input [5]: [sales_price#7, profit#8, return_amt#9, net_loss#10, s_store_id#25] -Keys [1]: [s_store_id#25] +Input [5]: [sales_price#7, profit#8, return_amt#9, net_loss#10, s_store_id#22] +Keys [1]: [s_store_id#22] Functions [4]: [partial_sum(UnscaledValue(sales_price#7)), partial_sum(UnscaledValue(return_amt#9)), partial_sum(UnscaledValue(profit#8)), partial_sum(UnscaledValue(net_loss#10))] Aggregate Attributes [4]: [sum#27, sum#28, sum#29, sum#30] -Results [5]: [s_store_id#25, sum#31, sum#32, sum#33, sum#34] +Results [5]: [s_store_id#22, sum#31, sum#32, sum#33, sum#34] (24) Exchange -Input [5]: [s_store_id#25, sum#31, sum#32, sum#33, sum#34] -Arguments: hashpartitioning(s_store_id#25, 5), true, [id=#35] +Input [5]: [s_store_id#22, sum#31, sum#32, sum#33, sum#34] +Arguments: hashpartitioning(s_store_id#22, 5), ENSURE_REQUIREMENTS, [id=#35] (25) HashAggregate [codegen id : 6] -Input [5]: [s_store_id#25, sum#31, sum#32, sum#33, sum#34] -Keys [1]: [s_store_id#25] +Input [5]: [s_store_id#22, sum#31, sum#32, sum#33, sum#34] +Keys [1]: [s_store_id#22] Functions [4]: [sum(UnscaledValue(sales_price#7)), sum(UnscaledValue(return_amt#9)), sum(UnscaledValue(profit#8)), sum(UnscaledValue(net_loss#10))] Aggregate Attributes [4]: [sum(UnscaledValue(sales_price#7))#36, sum(UnscaledValue(return_amt#9))#37, sum(UnscaledValue(profit#8))#38, sum(UnscaledValue(net_loss#10))#39] -Results [5]: [store channel AS channel#40, concat(store, s_store_id#25) AS id#41, MakeDecimal(sum(UnscaledValue(sales_price#7))#36,17,2) AS sales#42, MakeDecimal(sum(UnscaledValue(return_amt#9))#37,17,2) AS returns#43, CheckOverflow((promote_precision(cast(MakeDecimal(sum(UnscaledValue(profit#8))#38,17,2) as decimal(18,2))) - promote_precision(cast(MakeDecimal(sum(UnscaledValue(net_loss#10))#39,17,2) as decimal(18,2)))), DecimalType(18,2), true) AS profit#44] +Results [5]: [store channel AS channel#40, concat(store, s_store_id#22) AS id#41, MakeDecimal(sum(UnscaledValue(sales_price#7))#36,17,2) AS sales#42, MakeDecimal(sum(UnscaledValue(return_amt#9))#37,17,2) AS returns#43, CheckOverflow((promote_precision(cast(MakeDecimal(sum(UnscaledValue(profit#8))#38,17,2) as decimal(18,2))) - promote_precision(cast(MakeDecimal(sum(UnscaledValue(net_loss#10))#39,17,2) as decimal(18,2)))), DecimalType(18,2), true) AS profit#44] (26) Scan parquet default.catalog_sales Output [4]: [cs_sold_date_sk#45, cs_catalog_page_sk#46, cs_ext_sales_price#47, cs_net_profit#48] @@ -250,44 +246,44 @@ Input [4]: [cr_returned_date_sk#55, cr_catalog_page_sk#56, cr_return_amount#57, (34) Union -(35) ReusedExchange [Reuses operator id: 14] -Output [1]: [d_date_sk#21] - -(36) BroadcastHashJoin [codegen id : 11] -Left keys [1]: [date_sk#50] -Right keys [1]: [d_date_sk#21] -Join condition: None - -(37) Project [codegen id : 11] -Output [5]: [page_sk#49, sales_price#51, profit#52, return_amt#53, net_loss#54] -Input [7]: [page_sk#49, date_sk#50, sales_price#51, profit#52, return_amt#53, net_loss#54, d_date_sk#21] - -(38) Scan parquet default.catalog_page +(35) Scan parquet default.catalog_page Output [2]: [cp_catalog_page_sk#65, cp_catalog_page_id#66] Batched: true Location [not included in comparison]/{warehouse_dir}/catalog_page] PushedFilters: [IsNotNull(cp_catalog_page_sk)] ReadSchema: struct -(39) ColumnarToRow [codegen id : 10] +(36) ColumnarToRow [codegen id : 9] Input [2]: [cp_catalog_page_sk#65, cp_catalog_page_id#66] -(40) Filter [codegen id : 10] +(37) Filter [codegen id : 9] Input [2]: [cp_catalog_page_sk#65, cp_catalog_page_id#66] Condition : isnotnull(cp_catalog_page_sk#65) -(41) BroadcastExchange +(38) BroadcastExchange Input [2]: [cp_catalog_page_sk#65, cp_catalog_page_id#66] Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, false] as bigint)),false), [id=#67] -(42) BroadcastHashJoin [codegen id : 11] +(39) BroadcastHashJoin [codegen id : 11] Left keys [1]: [page_sk#49] Right keys [1]: [cp_catalog_page_sk#65] Join condition: None +(40) Project [codegen id : 11] +Output [6]: [date_sk#50, sales_price#51, profit#52, return_amt#53, net_loss#54, cp_catalog_page_id#66] +Input [8]: [page_sk#49, date_sk#50, sales_price#51, profit#52, return_amt#53, net_loss#54, cp_catalog_page_sk#65, cp_catalog_page_id#66] + +(41) ReusedExchange [Reuses operator id: 20] +Output [1]: [d_date_sk#24] + +(42) BroadcastHashJoin [codegen id : 11] +Left keys [1]: [date_sk#50] +Right keys [1]: [d_date_sk#24] +Join condition: None + (43) Project [codegen id : 11] Output [5]: [sales_price#51, profit#52, return_amt#53, net_loss#54, cp_catalog_page_id#66] -Input [7]: [page_sk#49, sales_price#51, profit#52, return_amt#53, net_loss#54, cp_catalog_page_sk#65, cp_catalog_page_id#66] +Input [7]: [date_sk#50, sales_price#51, profit#52, return_amt#53, net_loss#54, cp_catalog_page_id#66, d_date_sk#24] (44) HashAggregate [codegen id : 11] Input [5]: [sales_price#51, profit#52, return_amt#53, net_loss#54, cp_catalog_page_id#66] @@ -298,7 +294,7 @@ Results [5]: [cp_catalog_page_id#66, sum#72, sum#73, sum#74, sum#75] (45) Exchange Input [5]: [cp_catalog_page_id#66, sum#72, sum#73, sum#74, sum#75] -Arguments: hashpartitioning(cp_catalog_page_id#66, 5), true, [id=#76] +Arguments: hashpartitioning(cp_catalog_page_id#66, 5), ENSURE_REQUIREMENTS, [id=#76] (46) HashAggregate [codegen id : 12] Input [5]: [cp_catalog_page_id#66, sum#72, sum#73, sum#74, sum#75] @@ -341,7 +337,7 @@ Condition : isnotnull(wr_returned_date_sk#96) (54) Exchange Input [5]: [wr_returned_date_sk#96, wr_item_sk#97, wr_order_number#98, wr_return_amt#99, wr_net_loss#100] -Arguments: hashpartitioning(wr_item_sk#97, wr_order_number#98, 5), true, [id=#101] +Arguments: hashpartitioning(wr_item_sk#97, wr_order_number#98, 5), ENSURE_REQUIREMENTS, [id=#101] (55) Sort [codegen id : 15] Input [5]: [wr_returned_date_sk#96, wr_item_sk#97, wr_order_number#98, wr_return_amt#99, wr_net_loss#100] @@ -363,7 +359,7 @@ Condition : ((isnotnull(ws_item_sk#102) AND isnotnull(ws_order_number#103)) AND (59) Exchange Input [3]: [ws_item_sk#102, ws_web_site_sk#87, ws_order_number#103] -Arguments: hashpartitioning(cast(ws_item_sk#102 as bigint), cast(ws_order_number#103 as bigint), 5), true, [id=#104] +Arguments: hashpartitioning(cast(ws_item_sk#102 as bigint), cast(ws_order_number#103 as bigint), 5), ENSURE_REQUIREMENTS, [id=#104] (60) Sort [codegen id : 17] Input [3]: [ws_item_sk#102, ws_web_site_sk#87, ws_order_number#103] @@ -380,44 +376,44 @@ Input [8]: [wr_returned_date_sk#96, wr_item_sk#97, wr_order_number#98, wr_return (63) Union -(64) ReusedExchange [Reuses operator id: 14] -Output [1]: [d_date_sk#21] - -(65) BroadcastHashJoin [codegen id : 21] -Left keys [1]: [date_sk#91] -Right keys [1]: [cast(d_date_sk#21 as bigint)] -Join condition: None - -(66) Project [codegen id : 21] -Output [5]: [wsr_web_site_sk#90, sales_price#92, profit#93, return_amt#94, net_loss#95] -Input [7]: [wsr_web_site_sk#90, date_sk#91, sales_price#92, profit#93, return_amt#94, net_loss#95, d_date_sk#21] - -(67) Scan parquet default.web_site +(64) Scan parquet default.web_site Output [2]: [web_site_sk#111, web_site_id#112] Batched: true Location [not included in comparison]/{warehouse_dir}/web_site] PushedFilters: [IsNotNull(web_site_sk)] ReadSchema: struct -(68) ColumnarToRow [codegen id : 20] +(65) ColumnarToRow [codegen id : 19] Input [2]: [web_site_sk#111, web_site_id#112] -(69) Filter [codegen id : 20] +(66) Filter [codegen id : 19] Input [2]: [web_site_sk#111, web_site_id#112] Condition : isnotnull(web_site_sk#111) -(70) BroadcastExchange +(67) BroadcastExchange Input [2]: [web_site_sk#111, web_site_id#112] Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, false] as bigint)),false), [id=#113] -(71) BroadcastHashJoin [codegen id : 21] +(68) BroadcastHashJoin [codegen id : 21] Left keys [1]: [wsr_web_site_sk#90] Right keys [1]: [web_site_sk#111] Join condition: None +(69) Project [codegen id : 21] +Output [6]: [date_sk#91, sales_price#92, profit#93, return_amt#94, net_loss#95, web_site_id#112] +Input [8]: [wsr_web_site_sk#90, date_sk#91, sales_price#92, profit#93, return_amt#94, net_loss#95, web_site_sk#111, web_site_id#112] + +(70) ReusedExchange [Reuses operator id: 20] +Output [1]: [d_date_sk#24] + +(71) BroadcastHashJoin [codegen id : 21] +Left keys [1]: [date_sk#91] +Right keys [1]: [cast(d_date_sk#24 as bigint)] +Join condition: None + (72) Project [codegen id : 21] Output [5]: [sales_price#92, profit#93, return_amt#94, net_loss#95, web_site_id#112] -Input [7]: [wsr_web_site_sk#90, sales_price#92, profit#93, return_amt#94, net_loss#95, web_site_sk#111, web_site_id#112] +Input [7]: [date_sk#91, sales_price#92, profit#93, return_amt#94, net_loss#95, web_site_id#112, d_date_sk#24] (73) HashAggregate [codegen id : 21] Input [5]: [sales_price#92, profit#93, return_amt#94, net_loss#95, web_site_id#112] @@ -428,7 +424,7 @@ Results [5]: [web_site_id#112, sum#118, sum#119, sum#120, sum#121] (74) Exchange Input [5]: [web_site_id#112, sum#118, sum#119, sum#120, sum#121] -Arguments: hashpartitioning(web_site_id#112, 5), true, [id=#122] +Arguments: hashpartitioning(web_site_id#112, 5), ENSURE_REQUIREMENTS, [id=#122] (75) HashAggregate [codegen id : 22] Input [5]: [web_site_id#112, sum#118, sum#119, sum#120, sum#121] @@ -448,7 +444,7 @@ Results [8]: [channel#40, id#41, sum#138, isEmpty#139, sum#140, isEmpty#141, sum (78) Exchange Input [8]: [channel#40, id#41, sum#138, isEmpty#139, sum#140, isEmpty#141, sum#142, isEmpty#143] -Arguments: hashpartitioning(channel#40, id#41, 5), true, [id=#144] +Arguments: hashpartitioning(channel#40, id#41, 5), ENSURE_REQUIREMENTS, [id=#144] (79) HashAggregate [codegen id : 24] Input [8]: [channel#40, id#41, sum#138, isEmpty#139, sum#140, isEmpty#141, sum#142, isEmpty#143] @@ -476,7 +472,7 @@ Results [7]: [channel#40, sum#170, isEmpty#171, sum#172, isEmpty#173, sum#174, i (83) Exchange Input [7]: [channel#40, sum#170, isEmpty#171, sum#172, isEmpty#173, sum#174, isEmpty#175] -Arguments: hashpartitioning(channel#40, 5), true, [id=#176] +Arguments: hashpartitioning(channel#40, 5), ENSURE_REQUIREMENTS, [id=#176] (84) HashAggregate [codegen id : 49] Input [7]: [channel#40, sum#170, isEmpty#171, sum#172, isEmpty#173, sum#174, isEmpty#175] @@ -485,75 +481,55 @@ Functions [3]: [sum(sales#161), sum(returns#162), sum(profit#163)] Aggregate Attributes [3]: [sum(sales#161)#177, sum(returns#162)#178, sum(profit#163)#179] Results [5]: [channel#40, null AS id#180, sum(sales#161)#177 AS sum(sales)#181, sum(returns#162)#178 AS sum(returns)#182, sum(profit#163)#179 AS sum(profit)#183] -(85) Union - -(86) HashAggregate [codegen id : 50] -Input [5]: [channel#40, id#41, sales#148, returns#149, profit#150] -Keys [5]: [channel#40, id#41, sales#148, returns#149, profit#150] -Functions: [] -Aggregate Attributes: [] -Results [5]: [channel#40, id#41, sales#148, returns#149, profit#150] +(85) ReusedExchange [Reuses operator id: 78] +Output [8]: [channel#40, id#41, sum#184, isEmpty#185, sum#186, isEmpty#187, sum#188, isEmpty#189] -(87) Exchange -Input [5]: [channel#40, id#41, sales#148, returns#149, profit#150] -Arguments: hashpartitioning(channel#40, id#41, sales#148, returns#149, profit#150, 5), true, [id=#184] - -(88) HashAggregate [codegen id : 51] -Input [5]: [channel#40, id#41, sales#148, returns#149, profit#150] -Keys [5]: [channel#40, id#41, sales#148, returns#149, profit#150] -Functions: [] -Aggregate Attributes: [] -Results [5]: [channel#40, id#41, sales#148, returns#149, profit#150] - -(89) ReusedExchange [Reuses operator id: 78] -Output [8]: [channel#40, id#41, sum#185, isEmpty#186, sum#187, isEmpty#188, sum#189, isEmpty#190] - -(90) HashAggregate [codegen id : 75] -Input [8]: [channel#40, id#41, sum#185, isEmpty#186, sum#187, isEmpty#188, sum#189, isEmpty#190] +(86) HashAggregate [codegen id : 73] +Input [8]: [channel#40, id#41, sum#184, isEmpty#185, sum#186, isEmpty#187, sum#188, isEmpty#189] Keys [2]: [channel#40, id#41] -Functions [3]: [sum(sales#42), sum(returns#43), sum(profit#191)] -Aggregate Attributes [3]: [sum(sales#42)#192, sum(returns#43)#193, sum(profit#191)#194] -Results [3]: [sum(sales#42)#192 AS sales#161, sum(returns#43)#193 AS returns#162, sum(profit#191)#194 AS profit#163] +Functions [3]: [sum(sales#42), sum(returns#43), sum(profit#190)] +Aggregate Attributes [3]: [sum(sales#42)#191, sum(returns#43)#192, sum(profit#190)#193] +Results [3]: [sum(sales#42)#191 AS sales#161, sum(returns#43)#192 AS returns#162, sum(profit#190)#193 AS profit#163] -(91) HashAggregate [codegen id : 75] +(87) HashAggregate [codegen id : 73] Input [3]: [sales#161, returns#162, profit#163] Keys: [] Functions [3]: [partial_sum(sales#161), partial_sum(returns#162), partial_sum(profit#163)] -Aggregate Attributes [6]: [sum#195, isEmpty#196, sum#197, isEmpty#198, sum#199, isEmpty#200] -Results [6]: [sum#201, isEmpty#202, sum#203, isEmpty#204, sum#205, isEmpty#206] +Aggregate Attributes [6]: [sum#194, isEmpty#195, sum#196, isEmpty#197, sum#198, isEmpty#199] +Results [6]: [sum#200, isEmpty#201, sum#202, isEmpty#203, sum#204, isEmpty#205] -(92) Exchange -Input [6]: [sum#201, isEmpty#202, sum#203, isEmpty#204, sum#205, isEmpty#206] -Arguments: SinglePartition, true, [id=#207] +(88) Exchange +Input [6]: [sum#200, isEmpty#201, sum#202, isEmpty#203, sum#204, isEmpty#205] +Arguments: SinglePartition, ENSURE_REQUIREMENTS, [id=#206] -(93) HashAggregate [codegen id : 76] -Input [6]: [sum#201, isEmpty#202, sum#203, isEmpty#204, sum#205, isEmpty#206] +(89) HashAggregate [codegen id : 74] +Input [6]: [sum#200, isEmpty#201, sum#202, isEmpty#203, sum#204, isEmpty#205] Keys: [] Functions [3]: [sum(sales#161), sum(returns#162), sum(profit#163)] -Aggregate Attributes [3]: [sum(sales#161)#208, sum(returns#162)#209, sum(profit#163)#210] -Results [5]: [null AS channel#211, null AS id#212, sum(sales#161)#208 AS sum(sales)#213, sum(returns#162)#209 AS sum(returns)#214, sum(profit#163)#210 AS sum(profit)#215] +Aggregate Attributes [3]: [sum(sales#161)#207, sum(returns#162)#208, sum(profit#163)#209] +Results [5]: [null AS channel#210, null AS id#211, sum(sales#161)#207 AS sum(sales)#212, sum(returns#162)#208 AS sum(returns)#213, sum(profit#163)#209 AS sum(profit)#214] -(94) Union +(90) Union -(95) HashAggregate [codegen id : 77] +(91) HashAggregate [codegen id : 75] Input [5]: [channel#40, id#41, sales#148, returns#149, profit#150] Keys [5]: [channel#40, id#41, sales#148, returns#149, profit#150] Functions: [] Aggregate Attributes: [] Results [5]: [channel#40, id#41, sales#148, returns#149, profit#150] -(96) Exchange +(92) Exchange Input [5]: [channel#40, id#41, sales#148, returns#149, profit#150] -Arguments: hashpartitioning(channel#40, id#41, sales#148, returns#149, profit#150, 5), true, [id=#216] +Arguments: hashpartitioning(channel#40, id#41, sales#148, returns#149, profit#150, 5), ENSURE_REQUIREMENTS, [id=#215] -(97) HashAggregate [codegen id : 78] +(93) HashAggregate [codegen id : 76] Input [5]: [channel#40, id#41, sales#148, returns#149, profit#150] Keys [5]: [channel#40, id#41, sales#148, returns#149, profit#150] Functions: [] Aggregate Attributes: [] Results [5]: [channel#40, id#41, sales#148, returns#149, profit#150] -(98) TakeOrderedAndProject +(94) TakeOrderedAndProject Input [5]: [channel#40, id#41, sales#148, returns#149, profit#150] Arguments: 100, [channel#40 ASC NULLS FIRST, id#41 ASC NULLS FIRST], [channel#40, id#41, sales#148, returns#149, profit#150] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q5a.sf100/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q5a.sf100/simplified.txt index 81b4178b7a9ca..8d1794b903178 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q5a.sf100/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q5a.sf100/simplified.txt @@ -1,165 +1,157 @@ TakeOrderedAndProject [channel,id,sales,returns,profit] - WholeStageCodegen (78) + WholeStageCodegen (76) HashAggregate [channel,id,sales,returns,profit] InputAdapter Exchange [channel,id,sales,returns,profit] #1 - WholeStageCodegen (77) + WholeStageCodegen (75) HashAggregate [channel,id,sales,returns,profit] InputAdapter Union - WholeStageCodegen (51) - HashAggregate [channel,id,sales,returns,profit] + WholeStageCodegen (24) + HashAggregate [channel,id,sum,isEmpty,sum,isEmpty,sum,isEmpty] [sum(sales),sum(returns),sum(profit),sales,returns,profit,sum,isEmpty,sum,isEmpty,sum,isEmpty] InputAdapter - Exchange [channel,id,sales,returns,profit] #2 - WholeStageCodegen (50) - HashAggregate [channel,id,sales,returns,profit] + Exchange [channel,id] #2 + WholeStageCodegen (23) + HashAggregate [channel,id,sales,returns,profit] [sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty] InputAdapter Union - WholeStageCodegen (24) - HashAggregate [channel,id,sum,isEmpty,sum,isEmpty,sum,isEmpty] [sum(sales),sum(returns),sum(profit),sales,returns,profit,sum,isEmpty,sum,isEmpty,sum,isEmpty] + WholeStageCodegen (6) + HashAggregate [s_store_id,sum,sum,sum,sum] [sum(UnscaledValue(sales_price)),sum(UnscaledValue(return_amt)),sum(UnscaledValue(profit)),sum(UnscaledValue(net_loss)),channel,id,sales,returns,profit,sum,sum,sum,sum] InputAdapter - Exchange [channel,id] #3 - WholeStageCodegen (23) - HashAggregate [channel,id,sales,returns,profit] [sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty] - InputAdapter - Union - WholeStageCodegen (6) - HashAggregate [s_store_id,sum,sum,sum,sum] [sum(UnscaledValue(sales_price)),sum(UnscaledValue(return_amt)),sum(UnscaledValue(profit)),sum(UnscaledValue(net_loss)),channel,id,sales,returns,profit,sum,sum,sum,sum] + Exchange [s_store_id] #3 + WholeStageCodegen (5) + HashAggregate [s_store_id,sales_price,return_amt,profit,net_loss] [sum,sum,sum,sum,sum,sum,sum,sum] + Project [sales_price,profit,return_amt,net_loss,s_store_id] + BroadcastHashJoin [date_sk,d_date_sk] + Project [date_sk,sales_price,profit,return_amt,net_loss,s_store_id] + BroadcastHashJoin [store_sk,s_store_sk] InputAdapter - Exchange [s_store_id] #4 - WholeStageCodegen (5) - HashAggregate [s_store_id,sales_price,return_amt,profit,net_loss] [sum,sum,sum,sum,sum,sum,sum,sum] - Project [sales_price,profit,return_amt,net_loss,s_store_id] - BroadcastHashJoin [store_sk,s_store_sk] - Project [store_sk,sales_price,profit,return_amt,net_loss] - BroadcastHashJoin [date_sk,d_date_sk] - InputAdapter - Union - WholeStageCodegen (1) - Project [ss_store_sk,ss_sold_date_sk,ss_ext_sales_price,ss_net_profit] - Filter [ss_sold_date_sk,ss_store_sk] - ColumnarToRow - InputAdapter - Scan parquet default.store_sales [ss_sold_date_sk,ss_store_sk,ss_ext_sales_price,ss_net_profit] - WholeStageCodegen (2) - Project [sr_store_sk,sr_returned_date_sk,sr_return_amt,sr_net_loss] - Filter [sr_returned_date_sk,sr_store_sk] - ColumnarToRow - InputAdapter - Scan parquet default.store_returns [sr_returned_date_sk,sr_store_sk,sr_return_amt,sr_net_loss] - InputAdapter - BroadcastExchange #5 - WholeStageCodegen (3) - Project [d_date_sk] - Filter [d_date,d_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.date_dim [d_date_sk,d_date] + Union + WholeStageCodegen (1) + Project [ss_store_sk,ss_sold_date_sk,ss_ext_sales_price,ss_net_profit] + Filter [ss_sold_date_sk,ss_store_sk] + ColumnarToRow + InputAdapter + Scan parquet default.store_sales [ss_sold_date_sk,ss_store_sk,ss_ext_sales_price,ss_net_profit] + WholeStageCodegen (2) + Project [sr_store_sk,sr_returned_date_sk,sr_return_amt,sr_net_loss] + Filter [sr_returned_date_sk,sr_store_sk] + ColumnarToRow InputAdapter - BroadcastExchange #6 - WholeStageCodegen (4) - Filter [s_store_sk] - ColumnarToRow - InputAdapter - Scan parquet default.store [s_store_sk,s_store_id] - WholeStageCodegen (12) - HashAggregate [cp_catalog_page_id,sum,sum,sum,sum] [sum(UnscaledValue(sales_price)),sum(UnscaledValue(return_amt)),sum(UnscaledValue(profit)),sum(UnscaledValue(net_loss)),channel,id,sales,returns,profit,sum,sum,sum,sum] + Scan parquet default.store_returns [sr_returned_date_sk,sr_store_sk,sr_return_amt,sr_net_loss] InputAdapter - Exchange [cp_catalog_page_id] #7 - WholeStageCodegen (11) - HashAggregate [cp_catalog_page_id,sales_price,return_amt,profit,net_loss] [sum,sum,sum,sum,sum,sum,sum,sum] - Project [sales_price,profit,return_amt,net_loss,cp_catalog_page_id] - BroadcastHashJoin [page_sk,cp_catalog_page_sk] - Project [page_sk,sales_price,profit,return_amt,net_loss] - BroadcastHashJoin [date_sk,d_date_sk] - InputAdapter - Union - WholeStageCodegen (7) - Project [cs_catalog_page_sk,cs_sold_date_sk,cs_ext_sales_price,cs_net_profit] - Filter [cs_sold_date_sk,cs_catalog_page_sk] - ColumnarToRow - InputAdapter - Scan parquet default.catalog_sales [cs_sold_date_sk,cs_catalog_page_sk,cs_ext_sales_price,cs_net_profit] - WholeStageCodegen (8) - Project [cr_catalog_page_sk,cr_returned_date_sk,cr_return_amount,cr_net_loss] - Filter [cr_returned_date_sk,cr_catalog_page_sk] - ColumnarToRow - InputAdapter - Scan parquet default.catalog_returns [cr_returned_date_sk,cr_catalog_page_sk,cr_return_amount,cr_net_loss] - InputAdapter - ReusedExchange [d_date_sk] #5 + BroadcastExchange #4 + WholeStageCodegen (3) + Filter [s_store_sk] + ColumnarToRow + InputAdapter + Scan parquet default.store [s_store_sk,s_store_id] + InputAdapter + BroadcastExchange #5 + WholeStageCodegen (4) + Project [d_date_sk] + Filter [d_date,d_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.date_dim [d_date_sk,d_date] + WholeStageCodegen (12) + HashAggregate [cp_catalog_page_id,sum,sum,sum,sum] [sum(UnscaledValue(sales_price)),sum(UnscaledValue(return_amt)),sum(UnscaledValue(profit)),sum(UnscaledValue(net_loss)),channel,id,sales,returns,profit,sum,sum,sum,sum] + InputAdapter + Exchange [cp_catalog_page_id] #6 + WholeStageCodegen (11) + HashAggregate [cp_catalog_page_id,sales_price,return_amt,profit,net_loss] [sum,sum,sum,sum,sum,sum,sum,sum] + Project [sales_price,profit,return_amt,net_loss,cp_catalog_page_id] + BroadcastHashJoin [date_sk,d_date_sk] + Project [date_sk,sales_price,profit,return_amt,net_loss,cp_catalog_page_id] + BroadcastHashJoin [page_sk,cp_catalog_page_sk] + InputAdapter + Union + WholeStageCodegen (7) + Project [cs_catalog_page_sk,cs_sold_date_sk,cs_ext_sales_price,cs_net_profit] + Filter [cs_sold_date_sk,cs_catalog_page_sk] + ColumnarToRow + InputAdapter + Scan parquet default.catalog_sales [cs_sold_date_sk,cs_catalog_page_sk,cs_ext_sales_price,cs_net_profit] + WholeStageCodegen (8) + Project [cr_catalog_page_sk,cr_returned_date_sk,cr_return_amount,cr_net_loss] + Filter [cr_returned_date_sk,cr_catalog_page_sk] + ColumnarToRow InputAdapter - BroadcastExchange #8 - WholeStageCodegen (10) - Filter [cp_catalog_page_sk] - ColumnarToRow - InputAdapter - Scan parquet default.catalog_page [cp_catalog_page_sk,cp_catalog_page_id] - WholeStageCodegen (22) - HashAggregate [web_site_id,sum,sum,sum,sum] [sum(UnscaledValue(sales_price)),sum(UnscaledValue(return_amt)),sum(UnscaledValue(profit)),sum(UnscaledValue(net_loss)),channel,id,sales,returns,profit,sum,sum,sum,sum] + Scan parquet default.catalog_returns [cr_returned_date_sk,cr_catalog_page_sk,cr_return_amount,cr_net_loss] + InputAdapter + BroadcastExchange #7 + WholeStageCodegen (9) + Filter [cp_catalog_page_sk] + ColumnarToRow + InputAdapter + Scan parquet default.catalog_page [cp_catalog_page_sk,cp_catalog_page_id] + InputAdapter + ReusedExchange [d_date_sk] #5 + WholeStageCodegen (22) + HashAggregate [web_site_id,sum,sum,sum,sum] [sum(UnscaledValue(sales_price)),sum(UnscaledValue(return_amt)),sum(UnscaledValue(profit)),sum(UnscaledValue(net_loss)),channel,id,sales,returns,profit,sum,sum,sum,sum] + InputAdapter + Exchange [web_site_id] #8 + WholeStageCodegen (21) + HashAggregate [web_site_id,sales_price,return_amt,profit,net_loss] [sum,sum,sum,sum,sum,sum,sum,sum] + Project [sales_price,profit,return_amt,net_loss,web_site_id] + BroadcastHashJoin [date_sk,d_date_sk] + Project [date_sk,sales_price,profit,return_amt,net_loss,web_site_id] + BroadcastHashJoin [wsr_web_site_sk,web_site_sk] InputAdapter - Exchange [web_site_id] #9 - WholeStageCodegen (21) - HashAggregate [web_site_id,sales_price,return_amt,profit,net_loss] [sum,sum,sum,sum,sum,sum,sum,sum] - Project [sales_price,profit,return_amt,net_loss,web_site_id] - BroadcastHashJoin [wsr_web_site_sk,web_site_sk] - Project [wsr_web_site_sk,sales_price,profit,return_amt,net_loss] - BroadcastHashJoin [date_sk,d_date_sk] + Union + WholeStageCodegen (13) + Project [ws_web_site_sk,ws_sold_date_sk,ws_ext_sales_price,ws_net_profit] + Filter [ws_sold_date_sk,ws_web_site_sk] + ColumnarToRow + InputAdapter + Scan parquet default.web_sales [ws_sold_date_sk,ws_web_site_sk,ws_ext_sales_price,ws_net_profit] + WholeStageCodegen (18) + Project [ws_web_site_sk,wr_returned_date_sk,wr_return_amt,wr_net_loss] + SortMergeJoin [wr_item_sk,wr_order_number,ws_item_sk,ws_order_number] + InputAdapter + WholeStageCodegen (15) + Sort [wr_item_sk,wr_order_number] InputAdapter - Union - WholeStageCodegen (13) - Project [ws_web_site_sk,ws_sold_date_sk,ws_ext_sales_price,ws_net_profit] - Filter [ws_sold_date_sk,ws_web_site_sk] - ColumnarToRow - InputAdapter - Scan parquet default.web_sales [ws_sold_date_sk,ws_web_site_sk,ws_ext_sales_price,ws_net_profit] - WholeStageCodegen (18) - Project [ws_web_site_sk,wr_returned_date_sk,wr_return_amt,wr_net_loss] - SortMergeJoin [wr_item_sk,wr_order_number,ws_item_sk,ws_order_number] - InputAdapter - WholeStageCodegen (15) - Sort [wr_item_sk,wr_order_number] - InputAdapter - Exchange [wr_item_sk,wr_order_number] #10 - WholeStageCodegen (14) - Filter [wr_returned_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.web_returns [wr_returned_date_sk,wr_item_sk,wr_order_number,wr_return_amt,wr_net_loss] + Exchange [wr_item_sk,wr_order_number] #9 + WholeStageCodegen (14) + Filter [wr_returned_date_sk] + ColumnarToRow InputAdapter - WholeStageCodegen (17) - Sort [ws_item_sk,ws_order_number] - InputAdapter - Exchange [ws_item_sk,ws_order_number] #11 - WholeStageCodegen (16) - Filter [ws_item_sk,ws_order_number,ws_web_site_sk] - ColumnarToRow - InputAdapter - Scan parquet default.web_sales [ws_item_sk,ws_web_site_sk,ws_order_number] + Scan parquet default.web_returns [wr_returned_date_sk,wr_item_sk,wr_order_number,wr_return_amt,wr_net_loss] + InputAdapter + WholeStageCodegen (17) + Sort [ws_item_sk,ws_order_number] InputAdapter - ReusedExchange [d_date_sk] #5 - InputAdapter - BroadcastExchange #12 - WholeStageCodegen (20) - Filter [web_site_sk] - ColumnarToRow - InputAdapter - Scan parquet default.web_site [web_site_sk,web_site_id] - WholeStageCodegen (49) - HashAggregate [channel,sum,isEmpty,sum,isEmpty,sum,isEmpty] [sum(sales),sum(returns),sum(profit),id,sum(sales),sum(returns),sum(profit),sum,isEmpty,sum,isEmpty,sum,isEmpty] - InputAdapter - Exchange [channel] #13 - WholeStageCodegen (48) - HashAggregate [channel,sales,returns,profit] [sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty] - HashAggregate [channel,id,sum,isEmpty,sum,isEmpty,sum,isEmpty] [sum(sales),sum(returns),sum(profit),sales,returns,profit,sum,isEmpty,sum,isEmpty,sum,isEmpty] - InputAdapter - ReusedExchange [channel,id,sum,isEmpty,sum,isEmpty,sum,isEmpty] #3 - WholeStageCodegen (76) + Exchange [ws_item_sk,ws_order_number] #10 + WholeStageCodegen (16) + Filter [ws_item_sk,ws_order_number,ws_web_site_sk] + ColumnarToRow + InputAdapter + Scan parquet default.web_sales [ws_item_sk,ws_web_site_sk,ws_order_number] + InputAdapter + BroadcastExchange #11 + WholeStageCodegen (19) + Filter [web_site_sk] + ColumnarToRow + InputAdapter + Scan parquet default.web_site [web_site_sk,web_site_id] + InputAdapter + ReusedExchange [d_date_sk] #5 + WholeStageCodegen (49) + HashAggregate [channel,sum,isEmpty,sum,isEmpty,sum,isEmpty] [sum(sales),sum(returns),sum(profit),id,sum(sales),sum(returns),sum(profit),sum,isEmpty,sum,isEmpty,sum,isEmpty] + InputAdapter + Exchange [channel] #12 + WholeStageCodegen (48) + HashAggregate [channel,sales,returns,profit] [sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty] + HashAggregate [channel,id,sum,isEmpty,sum,isEmpty,sum,isEmpty] [sum(sales),sum(returns),sum(profit),sales,returns,profit,sum,isEmpty,sum,isEmpty,sum,isEmpty] + InputAdapter + ReusedExchange [channel,id,sum,isEmpty,sum,isEmpty,sum,isEmpty] #2 + WholeStageCodegen (74) HashAggregate [sum,isEmpty,sum,isEmpty,sum,isEmpty] [sum(sales),sum(returns),sum(profit),channel,id,sum(sales),sum(returns),sum(profit),sum,isEmpty,sum,isEmpty,sum,isEmpty] InputAdapter - Exchange #14 - WholeStageCodegen (75) + Exchange #13 + WholeStageCodegen (73) HashAggregate [sales,returns,profit] [sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty] HashAggregate [channel,id,sum,isEmpty,sum,isEmpty,sum,isEmpty] [sum(sales),sum(returns),sum(profit),sales,returns,profit,sum,isEmpty,sum,isEmpty,sum,isEmpty] InputAdapter - ReusedExchange [channel,id,sum,isEmpty,sum,isEmpty,sum,isEmpty] #3 + ReusedExchange [channel,id,sum,isEmpty,sum,isEmpty,sum,isEmpty] #2 diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q5a/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q5a/explain.txt index fa2435de73e02..d4c1b5f93a0d2 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q5a/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q5a/explain.txt @@ -1,99 +1,95 @@ == Physical Plan == -TakeOrderedAndProject (95) -+- * HashAggregate (94) - +- Exchange (93) - +- * HashAggregate (92) - +- Union (91) - :- * HashAggregate (85) - : +- Exchange (84) - : +- * HashAggregate (83) - : +- Union (82) - : :- * HashAggregate (76) - : : +- Exchange (75) - : : +- * HashAggregate (74) - : : +- Union (73) - : : :- * HashAggregate (25) - : : : +- Exchange (24) - : : : +- * HashAggregate (23) - : : : +- * Project (22) - : : : +- * BroadcastHashJoin Inner BuildRight (21) - : : : :- * Project (16) - : : : : +- * BroadcastHashJoin Inner BuildRight (15) - : : : : :- Union (9) - : : : : : :- * Project (4) - : : : : : : +- * Filter (3) - : : : : : : +- * ColumnarToRow (2) - : : : : : : +- Scan parquet default.store_sales (1) - : : : : : +- * Project (8) - : : : : : +- * Filter (7) - : : : : : +- * ColumnarToRow (6) - : : : : : +- Scan parquet default.store_returns (5) - : : : : +- BroadcastExchange (14) - : : : : +- * Project (13) - : : : : +- * Filter (12) - : : : : +- * ColumnarToRow (11) - : : : : +- Scan parquet default.date_dim (10) - : : : +- BroadcastExchange (20) - : : : +- * Filter (19) - : : : +- * ColumnarToRow (18) - : : : +- Scan parquet default.store (17) - : : :- * HashAggregate (46) - : : : +- Exchange (45) - : : : +- * HashAggregate (44) - : : : +- * Project (43) - : : : +- * BroadcastHashJoin Inner BuildRight (42) - : : : :- * Project (37) - : : : : +- * BroadcastHashJoin Inner BuildRight (36) - : : : : :- Union (34) - : : : : : :- * Project (29) - : : : : : : +- * Filter (28) - : : : : : : +- * ColumnarToRow (27) - : : : : : : +- Scan parquet default.catalog_sales (26) - : : : : : +- * Project (33) - : : : : : +- * Filter (32) - : : : : : +- * ColumnarToRow (31) - : : : : : +- Scan parquet default.catalog_returns (30) - : : : : +- ReusedExchange (35) - : : : +- BroadcastExchange (41) - : : : +- * Filter (40) - : : : +- * ColumnarToRow (39) - : : : +- Scan parquet default.catalog_page (38) - : : +- * HashAggregate (72) - : : +- Exchange (71) - : : +- * HashAggregate (70) - : : +- * Project (69) - : : +- * BroadcastHashJoin Inner BuildRight (68) - : : :- * Project (63) - : : : +- * BroadcastHashJoin Inner BuildRight (62) - : : : :- Union (60) - : : : : :- * Project (50) - : : : : : +- * Filter (49) - : : : : : +- * ColumnarToRow (48) - : : : : : +- Scan parquet default.web_sales (47) - : : : : +- * Project (59) - : : : : +- * BroadcastHashJoin Inner BuildRight (58) - : : : : :- * Filter (53) - : : : : : +- * ColumnarToRow (52) - : : : : : +- Scan parquet default.web_returns (51) - : : : : +- BroadcastExchange (57) - : : : : +- * Filter (56) - : : : : +- * ColumnarToRow (55) - : : : : +- Scan parquet default.web_sales (54) - : : : +- ReusedExchange (61) - : : +- BroadcastExchange (67) - : : +- * Filter (66) - : : +- * ColumnarToRow (65) - : : +- Scan parquet default.web_site (64) - : +- * HashAggregate (81) - : +- Exchange (80) - : +- * HashAggregate (79) - : +- * HashAggregate (78) - : +- ReusedExchange (77) - +- * HashAggregate (90) - +- Exchange (89) - +- * HashAggregate (88) - +- * HashAggregate (87) - +- ReusedExchange (86) +TakeOrderedAndProject (91) ++- * HashAggregate (90) + +- Exchange (89) + +- * HashAggregate (88) + +- Union (87) + :- * HashAggregate (76) + : +- Exchange (75) + : +- * HashAggregate (74) + : +- Union (73) + : :- * HashAggregate (25) + : : +- Exchange (24) + : : +- * HashAggregate (23) + : : +- * Project (22) + : : +- * BroadcastHashJoin Inner BuildRight (21) + : : :- * Project (16) + : : : +- * BroadcastHashJoin Inner BuildRight (15) + : : : :- Union (9) + : : : : :- * Project (4) + : : : : : +- * Filter (3) + : : : : : +- * ColumnarToRow (2) + : : : : : +- Scan parquet default.store_sales (1) + : : : : +- * Project (8) + : : : : +- * Filter (7) + : : : : +- * ColumnarToRow (6) + : : : : +- Scan parquet default.store_returns (5) + : : : +- BroadcastExchange (14) + : : : +- * Project (13) + : : : +- * Filter (12) + : : : +- * ColumnarToRow (11) + : : : +- Scan parquet default.date_dim (10) + : : +- BroadcastExchange (20) + : : +- * Filter (19) + : : +- * ColumnarToRow (18) + : : +- Scan parquet default.store (17) + : :- * HashAggregate (46) + : : +- Exchange (45) + : : +- * HashAggregate (44) + : : +- * Project (43) + : : +- * BroadcastHashJoin Inner BuildRight (42) + : : :- * Project (37) + : : : +- * BroadcastHashJoin Inner BuildRight (36) + : : : :- Union (34) + : : : : :- * Project (29) + : : : : : +- * Filter (28) + : : : : : +- * ColumnarToRow (27) + : : : : : +- Scan parquet default.catalog_sales (26) + : : : : +- * Project (33) + : : : : +- * Filter (32) + : : : : +- * ColumnarToRow (31) + : : : : +- Scan parquet default.catalog_returns (30) + : : : +- ReusedExchange (35) + : : +- BroadcastExchange (41) + : : +- * Filter (40) + : : +- * ColumnarToRow (39) + : : +- Scan parquet default.catalog_page (38) + : +- * HashAggregate (72) + : +- Exchange (71) + : +- * HashAggregate (70) + : +- * Project (69) + : +- * BroadcastHashJoin Inner BuildRight (68) + : :- * Project (63) + : : +- * BroadcastHashJoin Inner BuildRight (62) + : : :- Union (60) + : : : :- * Project (50) + : : : : +- * Filter (49) + : : : : +- * ColumnarToRow (48) + : : : : +- Scan parquet default.web_sales (47) + : : : +- * Project (59) + : : : +- * BroadcastHashJoin Inner BuildRight (58) + : : : :- * Filter (53) + : : : : +- * ColumnarToRow (52) + : : : : +- Scan parquet default.web_returns (51) + : : : +- BroadcastExchange (57) + : : : +- * Filter (56) + : : : +- * ColumnarToRow (55) + : : : +- Scan parquet default.web_sales (54) + : : +- ReusedExchange (61) + : +- BroadcastExchange (67) + : +- * Filter (66) + : +- * ColumnarToRow (65) + : +- Scan parquet default.web_site (64) + :- * HashAggregate (81) + : +- Exchange (80) + : +- * HashAggregate (79) + : +- * HashAggregate (78) + : +- ReusedExchange (77) + +- * HashAggregate (86) + +- Exchange (85) + +- * HashAggregate (84) + +- * HashAggregate (83) + +- ReusedExchange (82) (1) Scan parquet default.store_sales @@ -200,7 +196,7 @@ Results [5]: [s_store_id#25, sum#31, sum#32, sum#33, sum#34] (24) Exchange Input [5]: [s_store_id#25, sum#31, sum#32, sum#33, sum#34] -Arguments: hashpartitioning(s_store_id#25, 5), true, [id=#35] +Arguments: hashpartitioning(s_store_id#25, 5), ENSURE_REQUIREMENTS, [id=#35] (25) HashAggregate [codegen id : 6] Input [5]: [s_store_id#25, sum#31, sum#32, sum#33, sum#34] @@ -295,7 +291,7 @@ Results [5]: [cp_catalog_page_id#66, sum#72, sum#73, sum#74, sum#75] (45) Exchange Input [5]: [cp_catalog_page_id#66, sum#72, sum#73, sum#74, sum#75] -Arguments: hashpartitioning(cp_catalog_page_id#66, 5), true, [id=#76] +Arguments: hashpartitioning(cp_catalog_page_id#66, 5), ENSURE_REQUIREMENTS, [id=#76] (46) HashAggregate [codegen id : 12] Input [5]: [cp_catalog_page_id#66, sum#72, sum#73, sum#74, sum#75] @@ -413,7 +409,7 @@ Results [5]: [web_site_id#111, sum#117, sum#118, sum#119, sum#120] (71) Exchange Input [5]: [web_site_id#111, sum#117, sum#118, sum#119, sum#120] -Arguments: hashpartitioning(web_site_id#111, 5), true, [id=#121] +Arguments: hashpartitioning(web_site_id#111, 5), ENSURE_REQUIREMENTS, [id=#121] (72) HashAggregate [codegen id : 19] Input [5]: [web_site_id#111, sum#117, sum#118, sum#119, sum#120] @@ -433,7 +429,7 @@ Results [8]: [channel#40, id#41, sum#137, isEmpty#138, sum#139, isEmpty#140, sum (75) Exchange Input [8]: [channel#40, id#41, sum#137, isEmpty#138, sum#139, isEmpty#140, sum#141, isEmpty#142] -Arguments: hashpartitioning(channel#40, id#41, 5), true, [id=#143] +Arguments: hashpartitioning(channel#40, id#41, 5), ENSURE_REQUIREMENTS, [id=#143] (76) HashAggregate [codegen id : 21] Input [8]: [channel#40, id#41, sum#137, isEmpty#138, sum#139, isEmpty#140, sum#141, isEmpty#142] @@ -461,7 +457,7 @@ Results [7]: [channel#40, sum#169, isEmpty#170, sum#171, isEmpty#172, sum#173, i (80) Exchange Input [7]: [channel#40, sum#169, isEmpty#170, sum#171, isEmpty#172, sum#173, isEmpty#174] -Arguments: hashpartitioning(channel#40, 5), true, [id=#175] +Arguments: hashpartitioning(channel#40, 5), ENSURE_REQUIREMENTS, [id=#175] (81) HashAggregate [codegen id : 43] Input [7]: [channel#40, sum#169, isEmpty#170, sum#171, isEmpty#172, sum#173, isEmpty#174] @@ -470,75 +466,55 @@ Functions [3]: [sum(sales#160), sum(returns#161), sum(profit#162)] Aggregate Attributes [3]: [sum(sales#160)#176, sum(returns#161)#177, sum(profit#162)#178] Results [5]: [channel#40, null AS id#179, sum(sales#160)#176 AS sum(sales)#180, sum(returns#161)#177 AS sum(returns)#181, sum(profit#162)#178 AS sum(profit)#182] -(82) Union +(82) ReusedExchange [Reuses operator id: 75] +Output [8]: [channel#40, id#41, sum#183, isEmpty#184, sum#185, isEmpty#186, sum#187, isEmpty#188] -(83) HashAggregate [codegen id : 44] -Input [5]: [channel#40, id#41, sales#147, returns#148, profit#149] -Keys [5]: [channel#40, id#41, sales#147, returns#148, profit#149] -Functions: [] -Aggregate Attributes: [] -Results [5]: [channel#40, id#41, sales#147, returns#148, profit#149] - -(84) Exchange -Input [5]: [channel#40, id#41, sales#147, returns#148, profit#149] -Arguments: hashpartitioning(channel#40, id#41, sales#147, returns#148, profit#149, 5), true, [id=#183] - -(85) HashAggregate [codegen id : 45] -Input [5]: [channel#40, id#41, sales#147, returns#148, profit#149] -Keys [5]: [channel#40, id#41, sales#147, returns#148, profit#149] -Functions: [] -Aggregate Attributes: [] -Results [5]: [channel#40, id#41, sales#147, returns#148, profit#149] - -(86) ReusedExchange [Reuses operator id: 75] -Output [8]: [channel#40, id#41, sum#184, isEmpty#185, sum#186, isEmpty#187, sum#188, isEmpty#189] - -(87) HashAggregate [codegen id : 66] -Input [8]: [channel#40, id#41, sum#184, isEmpty#185, sum#186, isEmpty#187, sum#188, isEmpty#189] +(83) HashAggregate [codegen id : 64] +Input [8]: [channel#40, id#41, sum#183, isEmpty#184, sum#185, isEmpty#186, sum#187, isEmpty#188] Keys [2]: [channel#40, id#41] -Functions [3]: [sum(sales#42), sum(returns#43), sum(profit#190)] -Aggregate Attributes [3]: [sum(sales#42)#191, sum(returns#43)#192, sum(profit#190)#193] -Results [3]: [sum(sales#42)#191 AS sales#160, sum(returns#43)#192 AS returns#161, sum(profit#190)#193 AS profit#162] +Functions [3]: [sum(sales#42), sum(returns#43), sum(profit#189)] +Aggregate Attributes [3]: [sum(sales#42)#190, sum(returns#43)#191, sum(profit#189)#192] +Results [3]: [sum(sales#42)#190 AS sales#160, sum(returns#43)#191 AS returns#161, sum(profit#189)#192 AS profit#162] -(88) HashAggregate [codegen id : 66] +(84) HashAggregate [codegen id : 64] Input [3]: [sales#160, returns#161, profit#162] Keys: [] Functions [3]: [partial_sum(sales#160), partial_sum(returns#161), partial_sum(profit#162)] -Aggregate Attributes [6]: [sum#194, isEmpty#195, sum#196, isEmpty#197, sum#198, isEmpty#199] -Results [6]: [sum#200, isEmpty#201, sum#202, isEmpty#203, sum#204, isEmpty#205] +Aggregate Attributes [6]: [sum#193, isEmpty#194, sum#195, isEmpty#196, sum#197, isEmpty#198] +Results [6]: [sum#199, isEmpty#200, sum#201, isEmpty#202, sum#203, isEmpty#204] -(89) Exchange -Input [6]: [sum#200, isEmpty#201, sum#202, isEmpty#203, sum#204, isEmpty#205] -Arguments: SinglePartition, true, [id=#206] +(85) Exchange +Input [6]: [sum#199, isEmpty#200, sum#201, isEmpty#202, sum#203, isEmpty#204] +Arguments: SinglePartition, ENSURE_REQUIREMENTS, [id=#205] -(90) HashAggregate [codegen id : 67] -Input [6]: [sum#200, isEmpty#201, sum#202, isEmpty#203, sum#204, isEmpty#205] +(86) HashAggregate [codegen id : 65] +Input [6]: [sum#199, isEmpty#200, sum#201, isEmpty#202, sum#203, isEmpty#204] Keys: [] Functions [3]: [sum(sales#160), sum(returns#161), sum(profit#162)] -Aggregate Attributes [3]: [sum(sales#160)#207, sum(returns#161)#208, sum(profit#162)#209] -Results [5]: [null AS channel#210, null AS id#211, sum(sales#160)#207 AS sum(sales)#212, sum(returns#161)#208 AS sum(returns)#213, sum(profit#162)#209 AS sum(profit)#214] +Aggregate Attributes [3]: [sum(sales#160)#206, sum(returns#161)#207, sum(profit#162)#208] +Results [5]: [null AS channel#209, null AS id#210, sum(sales#160)#206 AS sum(sales)#211, sum(returns#161)#207 AS sum(returns)#212, sum(profit#162)#208 AS sum(profit)#213] -(91) Union +(87) Union -(92) HashAggregate [codegen id : 68] +(88) HashAggregate [codegen id : 66] Input [5]: [channel#40, id#41, sales#147, returns#148, profit#149] Keys [5]: [channel#40, id#41, sales#147, returns#148, profit#149] Functions: [] Aggregate Attributes: [] Results [5]: [channel#40, id#41, sales#147, returns#148, profit#149] -(93) Exchange +(89) Exchange Input [5]: [channel#40, id#41, sales#147, returns#148, profit#149] -Arguments: hashpartitioning(channel#40, id#41, sales#147, returns#148, profit#149, 5), true, [id=#215] +Arguments: hashpartitioning(channel#40, id#41, sales#147, returns#148, profit#149, 5), ENSURE_REQUIREMENTS, [id=#214] -(94) HashAggregate [codegen id : 69] +(90) HashAggregate [codegen id : 67] Input [5]: [channel#40, id#41, sales#147, returns#148, profit#149] Keys [5]: [channel#40, id#41, sales#147, returns#148, profit#149] Functions: [] Aggregate Attributes: [] Results [5]: [channel#40, id#41, sales#147, returns#148, profit#149] -(95) TakeOrderedAndProject +(91) TakeOrderedAndProject Input [5]: [channel#40, id#41, sales#147, returns#148, profit#149] Arguments: 100, [channel#40 ASC NULLS FIRST, id#41 ASC NULLS FIRST], [channel#40, id#41, sales#147, returns#148, profit#149] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q5a/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q5a/simplified.txt index 6bb223e2f4488..f5a22c77a8e30 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q5a/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q5a/simplified.txt @@ -1,156 +1,148 @@ TakeOrderedAndProject [channel,id,sales,returns,profit] - WholeStageCodegen (69) + WholeStageCodegen (67) HashAggregate [channel,id,sales,returns,profit] InputAdapter Exchange [channel,id,sales,returns,profit] #1 - WholeStageCodegen (68) + WholeStageCodegen (66) HashAggregate [channel,id,sales,returns,profit] InputAdapter Union - WholeStageCodegen (45) - HashAggregate [channel,id,sales,returns,profit] + WholeStageCodegen (21) + HashAggregate [channel,id,sum,isEmpty,sum,isEmpty,sum,isEmpty] [sum(sales),sum(returns),sum(profit),sales,returns,profit,sum,isEmpty,sum,isEmpty,sum,isEmpty] InputAdapter - Exchange [channel,id,sales,returns,profit] #2 - WholeStageCodegen (44) - HashAggregate [channel,id,sales,returns,profit] + Exchange [channel,id] #2 + WholeStageCodegen (20) + HashAggregate [channel,id,sales,returns,profit] [sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty] InputAdapter Union - WholeStageCodegen (21) - HashAggregate [channel,id,sum,isEmpty,sum,isEmpty,sum,isEmpty] [sum(sales),sum(returns),sum(profit),sales,returns,profit,sum,isEmpty,sum,isEmpty,sum,isEmpty] + WholeStageCodegen (6) + HashAggregate [s_store_id,sum,sum,sum,sum] [sum(UnscaledValue(sales_price)),sum(UnscaledValue(return_amt)),sum(UnscaledValue(profit)),sum(UnscaledValue(net_loss)),channel,id,sales,returns,profit,sum,sum,sum,sum] InputAdapter - Exchange [channel,id] #3 - WholeStageCodegen (20) - HashAggregate [channel,id,sales,returns,profit] [sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty] - InputAdapter - Union - WholeStageCodegen (6) - HashAggregate [s_store_id,sum,sum,sum,sum] [sum(UnscaledValue(sales_price)),sum(UnscaledValue(return_amt)),sum(UnscaledValue(profit)),sum(UnscaledValue(net_loss)),channel,id,sales,returns,profit,sum,sum,sum,sum] + Exchange [s_store_id] #3 + WholeStageCodegen (5) + HashAggregate [s_store_id,sales_price,return_amt,profit,net_loss] [sum,sum,sum,sum,sum,sum,sum,sum] + Project [sales_price,profit,return_amt,net_loss,s_store_id] + BroadcastHashJoin [store_sk,s_store_sk] + Project [store_sk,sales_price,profit,return_amt,net_loss] + BroadcastHashJoin [date_sk,d_date_sk] InputAdapter - Exchange [s_store_id] #4 - WholeStageCodegen (5) - HashAggregate [s_store_id,sales_price,return_amt,profit,net_loss] [sum,sum,sum,sum,sum,sum,sum,sum] - Project [sales_price,profit,return_amt,net_loss,s_store_id] - BroadcastHashJoin [store_sk,s_store_sk] - Project [store_sk,sales_price,profit,return_amt,net_loss] - BroadcastHashJoin [date_sk,d_date_sk] - InputAdapter - Union - WholeStageCodegen (1) - Project [ss_store_sk,ss_sold_date_sk,ss_ext_sales_price,ss_net_profit] - Filter [ss_sold_date_sk,ss_store_sk] - ColumnarToRow - InputAdapter - Scan parquet default.store_sales [ss_sold_date_sk,ss_store_sk,ss_ext_sales_price,ss_net_profit] - WholeStageCodegen (2) - Project [sr_store_sk,sr_returned_date_sk,sr_return_amt,sr_net_loss] - Filter [sr_returned_date_sk,sr_store_sk] - ColumnarToRow - InputAdapter - Scan parquet default.store_returns [sr_returned_date_sk,sr_store_sk,sr_return_amt,sr_net_loss] - InputAdapter - BroadcastExchange #5 - WholeStageCodegen (3) - Project [d_date_sk] - Filter [d_date,d_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.date_dim [d_date_sk,d_date] + Union + WholeStageCodegen (1) + Project [ss_store_sk,ss_sold_date_sk,ss_ext_sales_price,ss_net_profit] + Filter [ss_sold_date_sk,ss_store_sk] + ColumnarToRow InputAdapter - BroadcastExchange #6 - WholeStageCodegen (4) - Filter [s_store_sk] - ColumnarToRow - InputAdapter - Scan parquet default.store [s_store_sk,s_store_id] - WholeStageCodegen (12) - HashAggregate [cp_catalog_page_id,sum,sum,sum,sum] [sum(UnscaledValue(sales_price)),sum(UnscaledValue(return_amt)),sum(UnscaledValue(profit)),sum(UnscaledValue(net_loss)),channel,id,sales,returns,profit,sum,sum,sum,sum] + Scan parquet default.store_sales [ss_sold_date_sk,ss_store_sk,ss_ext_sales_price,ss_net_profit] + WholeStageCodegen (2) + Project [sr_store_sk,sr_returned_date_sk,sr_return_amt,sr_net_loss] + Filter [sr_returned_date_sk,sr_store_sk] + ColumnarToRow + InputAdapter + Scan parquet default.store_returns [sr_returned_date_sk,sr_store_sk,sr_return_amt,sr_net_loss] InputAdapter - Exchange [cp_catalog_page_id] #7 - WholeStageCodegen (11) - HashAggregate [cp_catalog_page_id,sales_price,return_amt,profit,net_loss] [sum,sum,sum,sum,sum,sum,sum,sum] - Project [sales_price,profit,return_amt,net_loss,cp_catalog_page_id] - BroadcastHashJoin [page_sk,cp_catalog_page_sk] - Project [page_sk,sales_price,profit,return_amt,net_loss] - BroadcastHashJoin [date_sk,d_date_sk] - InputAdapter - Union - WholeStageCodegen (7) - Project [cs_catalog_page_sk,cs_sold_date_sk,cs_ext_sales_price,cs_net_profit] - Filter [cs_sold_date_sk,cs_catalog_page_sk] - ColumnarToRow - InputAdapter - Scan parquet default.catalog_sales [cs_sold_date_sk,cs_catalog_page_sk,cs_ext_sales_price,cs_net_profit] - WholeStageCodegen (8) - Project [cr_catalog_page_sk,cr_returned_date_sk,cr_return_amount,cr_net_loss] - Filter [cr_returned_date_sk,cr_catalog_page_sk] - ColumnarToRow - InputAdapter - Scan parquet default.catalog_returns [cr_returned_date_sk,cr_catalog_page_sk,cr_return_amount,cr_net_loss] - InputAdapter - ReusedExchange [d_date_sk] #5 + BroadcastExchange #4 + WholeStageCodegen (3) + Project [d_date_sk] + Filter [d_date,d_date_sk] + ColumnarToRow InputAdapter - BroadcastExchange #8 - WholeStageCodegen (10) - Filter [cp_catalog_page_sk] - ColumnarToRow - InputAdapter - Scan parquet default.catalog_page [cp_catalog_page_sk,cp_catalog_page_id] - WholeStageCodegen (19) - HashAggregate [web_site_id,sum,sum,sum,sum] [sum(UnscaledValue(sales_price)),sum(UnscaledValue(return_amt)),sum(UnscaledValue(profit)),sum(UnscaledValue(net_loss)),channel,id,sales,returns,profit,sum,sum,sum,sum] + Scan parquet default.date_dim [d_date_sk,d_date] + InputAdapter + BroadcastExchange #5 + WholeStageCodegen (4) + Filter [s_store_sk] + ColumnarToRow + InputAdapter + Scan parquet default.store [s_store_sk,s_store_id] + WholeStageCodegen (12) + HashAggregate [cp_catalog_page_id,sum,sum,sum,sum] [sum(UnscaledValue(sales_price)),sum(UnscaledValue(return_amt)),sum(UnscaledValue(profit)),sum(UnscaledValue(net_loss)),channel,id,sales,returns,profit,sum,sum,sum,sum] + InputAdapter + Exchange [cp_catalog_page_id] #6 + WholeStageCodegen (11) + HashAggregate [cp_catalog_page_id,sales_price,return_amt,profit,net_loss] [sum,sum,sum,sum,sum,sum,sum,sum] + Project [sales_price,profit,return_amt,net_loss,cp_catalog_page_id] + BroadcastHashJoin [page_sk,cp_catalog_page_sk] + Project [page_sk,sales_price,profit,return_amt,net_loss] + BroadcastHashJoin [date_sk,d_date_sk] InputAdapter - Exchange [web_site_id] #9 - WholeStageCodegen (18) - HashAggregate [web_site_id,sales_price,return_amt,profit,net_loss] [sum,sum,sum,sum,sum,sum,sum,sum] - Project [sales_price,profit,return_amt,net_loss,web_site_id] - BroadcastHashJoin [wsr_web_site_sk,web_site_sk] - Project [wsr_web_site_sk,sales_price,profit,return_amt,net_loss] - BroadcastHashJoin [date_sk,d_date_sk] - InputAdapter - Union - WholeStageCodegen (13) - Project [ws_web_site_sk,ws_sold_date_sk,ws_ext_sales_price,ws_net_profit] - Filter [ws_sold_date_sk,ws_web_site_sk] - ColumnarToRow - InputAdapter - Scan parquet default.web_sales [ws_sold_date_sk,ws_web_site_sk,ws_ext_sales_price,ws_net_profit] - WholeStageCodegen (15) - Project [ws_web_site_sk,wr_returned_date_sk,wr_return_amt,wr_net_loss] - BroadcastHashJoin [wr_item_sk,wr_order_number,ws_item_sk,ws_order_number] - Filter [wr_returned_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.web_returns [wr_returned_date_sk,wr_item_sk,wr_order_number,wr_return_amt,wr_net_loss] - InputAdapter - BroadcastExchange #10 - WholeStageCodegen (14) - Filter [ws_item_sk,ws_order_number,ws_web_site_sk] - ColumnarToRow - InputAdapter - Scan parquet default.web_sales [ws_item_sk,ws_web_site_sk,ws_order_number] - InputAdapter - ReusedExchange [d_date_sk] #5 + Union + WholeStageCodegen (7) + Project [cs_catalog_page_sk,cs_sold_date_sk,cs_ext_sales_price,cs_net_profit] + Filter [cs_sold_date_sk,cs_catalog_page_sk] + ColumnarToRow + InputAdapter + Scan parquet default.catalog_sales [cs_sold_date_sk,cs_catalog_page_sk,cs_ext_sales_price,cs_net_profit] + WholeStageCodegen (8) + Project [cr_catalog_page_sk,cr_returned_date_sk,cr_return_amount,cr_net_loss] + Filter [cr_returned_date_sk,cr_catalog_page_sk] + ColumnarToRow InputAdapter - BroadcastExchange #11 - WholeStageCodegen (17) - Filter [web_site_sk] - ColumnarToRow - InputAdapter - Scan parquet default.web_site [web_site_sk,web_site_id] - WholeStageCodegen (43) - HashAggregate [channel,sum,isEmpty,sum,isEmpty,sum,isEmpty] [sum(sales),sum(returns),sum(profit),id,sum(sales),sum(returns),sum(profit),sum,isEmpty,sum,isEmpty,sum,isEmpty] + Scan parquet default.catalog_returns [cr_returned_date_sk,cr_catalog_page_sk,cr_return_amount,cr_net_loss] + InputAdapter + ReusedExchange [d_date_sk] #4 + InputAdapter + BroadcastExchange #7 + WholeStageCodegen (10) + Filter [cp_catalog_page_sk] + ColumnarToRow + InputAdapter + Scan parquet default.catalog_page [cp_catalog_page_sk,cp_catalog_page_id] + WholeStageCodegen (19) + HashAggregate [web_site_id,sum,sum,sum,sum] [sum(UnscaledValue(sales_price)),sum(UnscaledValue(return_amt)),sum(UnscaledValue(profit)),sum(UnscaledValue(net_loss)),channel,id,sales,returns,profit,sum,sum,sum,sum] InputAdapter - Exchange [channel] #12 - WholeStageCodegen (42) - HashAggregate [channel,sales,returns,profit] [sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty] - HashAggregate [channel,id,sum,isEmpty,sum,isEmpty,sum,isEmpty] [sum(sales),sum(returns),sum(profit),sales,returns,profit,sum,isEmpty,sum,isEmpty,sum,isEmpty] - InputAdapter - ReusedExchange [channel,id,sum,isEmpty,sum,isEmpty,sum,isEmpty] #3 - WholeStageCodegen (67) + Exchange [web_site_id] #8 + WholeStageCodegen (18) + HashAggregate [web_site_id,sales_price,return_amt,profit,net_loss] [sum,sum,sum,sum,sum,sum,sum,sum] + Project [sales_price,profit,return_amt,net_loss,web_site_id] + BroadcastHashJoin [wsr_web_site_sk,web_site_sk] + Project [wsr_web_site_sk,sales_price,profit,return_amt,net_loss] + BroadcastHashJoin [date_sk,d_date_sk] + InputAdapter + Union + WholeStageCodegen (13) + Project [ws_web_site_sk,ws_sold_date_sk,ws_ext_sales_price,ws_net_profit] + Filter [ws_sold_date_sk,ws_web_site_sk] + ColumnarToRow + InputAdapter + Scan parquet default.web_sales [ws_sold_date_sk,ws_web_site_sk,ws_ext_sales_price,ws_net_profit] + WholeStageCodegen (15) + Project [ws_web_site_sk,wr_returned_date_sk,wr_return_amt,wr_net_loss] + BroadcastHashJoin [wr_item_sk,wr_order_number,ws_item_sk,ws_order_number] + Filter [wr_returned_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.web_returns [wr_returned_date_sk,wr_item_sk,wr_order_number,wr_return_amt,wr_net_loss] + InputAdapter + BroadcastExchange #9 + WholeStageCodegen (14) + Filter [ws_item_sk,ws_order_number,ws_web_site_sk] + ColumnarToRow + InputAdapter + Scan parquet default.web_sales [ws_item_sk,ws_web_site_sk,ws_order_number] + InputAdapter + ReusedExchange [d_date_sk] #4 + InputAdapter + BroadcastExchange #10 + WholeStageCodegen (17) + Filter [web_site_sk] + ColumnarToRow + InputAdapter + Scan parquet default.web_site [web_site_sk,web_site_id] + WholeStageCodegen (43) + HashAggregate [channel,sum,isEmpty,sum,isEmpty,sum,isEmpty] [sum(sales),sum(returns),sum(profit),id,sum(sales),sum(returns),sum(profit),sum,isEmpty,sum,isEmpty,sum,isEmpty] + InputAdapter + Exchange [channel] #11 + WholeStageCodegen (42) + HashAggregate [channel,sales,returns,profit] [sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty] + HashAggregate [channel,id,sum,isEmpty,sum,isEmpty,sum,isEmpty] [sum(sales),sum(returns),sum(profit),sales,returns,profit,sum,isEmpty,sum,isEmpty,sum,isEmpty] + InputAdapter + ReusedExchange [channel,id,sum,isEmpty,sum,isEmpty,sum,isEmpty] #2 + WholeStageCodegen (65) HashAggregate [sum,isEmpty,sum,isEmpty,sum,isEmpty] [sum(sales),sum(returns),sum(profit),channel,id,sum(sales),sum(returns),sum(profit),sum,isEmpty,sum,isEmpty,sum,isEmpty] InputAdapter - Exchange #13 - WholeStageCodegen (66) + Exchange #12 + WholeStageCodegen (64) HashAggregate [sales,returns,profit] [sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty] HashAggregate [channel,id,sum,isEmpty,sum,isEmpty,sum,isEmpty] [sum(sales),sum(returns),sum(profit),sales,returns,profit,sum,isEmpty,sum,isEmpty,sum,isEmpty] InputAdapter - ReusedExchange [channel,id,sum,isEmpty,sum,isEmpty,sum,isEmpty] #3 + ReusedExchange [channel,id,sum,isEmpty,sum,isEmpty,sum,isEmpty] #2 diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q6.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q6.sf100/explain.txt index ab246a3449557..1b9e8f37e9418 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q6.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q6.sf100/explain.txt @@ -11,30 +11,30 @@ TakeOrderedAndProject (50) : +- Exchange (27) : +- * Project (26) : +- * BroadcastHashJoin Inner BuildRight (25) - : :- * Project (10) - : : +- * BroadcastHashJoin Inner BuildRight (9) + : :- * Project (19) + : : +- * BroadcastHashJoin Inner BuildRight (18) : : :- * Filter (3) : : : +- * ColumnarToRow (2) : : : +- Scan parquet default.store_sales (1) - : : +- BroadcastExchange (8) - : : +- * Project (7) - : : +- * Filter (6) - : : +- * ColumnarToRow (5) - : : +- Scan parquet default.date_dim (4) + : : +- BroadcastExchange (17) + : : +- * Project (16) + : : +- * Filter (15) + : : +- * BroadcastHashJoin LeftOuter BuildRight (14) + : : :- * Filter (6) + : : : +- * ColumnarToRow (5) + : : : +- Scan parquet default.item (4) + : : +- BroadcastExchange (13) + : : +- * HashAggregate (12) + : : +- Exchange (11) + : : +- * HashAggregate (10) + : : +- * Filter (9) + : : +- * ColumnarToRow (8) + : : +- Scan parquet default.item (7) : +- BroadcastExchange (24) : +- * Project (23) : +- * Filter (22) - : +- * BroadcastHashJoin LeftOuter BuildRight (21) - : :- * Filter (13) - : : +- * ColumnarToRow (12) - : : +- Scan parquet default.item (11) - : +- BroadcastExchange (20) - : +- * HashAggregate (19) - : +- Exchange (18) - : +- * HashAggregate (17) - : +- * Filter (16) - : +- * ColumnarToRow (15) - : +- Scan parquet default.item (14) + : +- * ColumnarToRow (21) + : +- Scan parquet default.date_dim (20) +- * Sort (42) +- Exchange (41) +- * Project (40) @@ -65,112 +65,112 @@ Input [3]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3] Input [3]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3] Condition : ((isnotnull(ss_customer_sk#3) AND isnotnull(ss_sold_date_sk#1)) AND isnotnull(ss_item_sk#2)) -(4) Scan parquet default.date_dim -Output [2]: [d_date_sk#4, d_month_seq#5] -Batched: true -Location [not included in comparison]/{warehouse_dir}/date_dim] -PushedFilters: [IsNotNull(d_month_seq), IsNotNull(d_date_sk)] -ReadSchema: struct - -(5) ColumnarToRow [codegen id : 1] -Input [2]: [d_date_sk#4, d_month_seq#5] - -(6) Filter [codegen id : 1] -Input [2]: [d_date_sk#4, d_month_seq#5] -Condition : ((isnotnull(d_month_seq#5) AND (d_month_seq#5 = Subquery scalar-subquery#6, [id=#7])) AND isnotnull(d_date_sk#4)) - -(7) Project [codegen id : 1] -Output [1]: [d_date_sk#4] -Input [2]: [d_date_sk#4, d_month_seq#5] - -(8) BroadcastExchange -Input [1]: [d_date_sk#4] -Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#8] - -(9) BroadcastHashJoin [codegen id : 5] -Left keys [1]: [ss_sold_date_sk#1] -Right keys [1]: [d_date_sk#4] -Join condition: None - -(10) Project [codegen id : 5] -Output [2]: [ss_item_sk#2, ss_customer_sk#3] -Input [4]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3, d_date_sk#4] - -(11) Scan parquet default.item -Output [3]: [i_item_sk#9, i_current_price#10, i_category#11] +(4) Scan parquet default.item +Output [3]: [i_item_sk#4, i_current_price#5, i_category#6] Batched: true Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_current_price), IsNotNull(i_item_sk)] ReadSchema: struct -(12) ColumnarToRow [codegen id : 4] -Input [3]: [i_item_sk#9, i_current_price#10, i_category#11] +(5) ColumnarToRow [codegen id : 3] +Input [3]: [i_item_sk#4, i_current_price#5, i_category#6] -(13) Filter [codegen id : 4] -Input [3]: [i_item_sk#9, i_current_price#10, i_category#11] -Condition : (isnotnull(i_current_price#10) AND isnotnull(i_item_sk#9)) +(6) Filter [codegen id : 3] +Input [3]: [i_item_sk#4, i_current_price#5, i_category#6] +Condition : (isnotnull(i_current_price#5) AND isnotnull(i_item_sk#4)) -(14) Scan parquet default.item -Output [2]: [i_current_price#10, i_category#11] +(7) Scan parquet default.item +Output [2]: [i_current_price#5, i_category#6] Batched: true Location [not included in comparison]/{warehouse_dir}/item] PushedFilters: [IsNotNull(i_category)] ReadSchema: struct -(15) ColumnarToRow [codegen id : 2] -Input [2]: [i_current_price#10, i_category#11] - -(16) Filter [codegen id : 2] -Input [2]: [i_current_price#10, i_category#11] -Condition : isnotnull(i_category#11) - -(17) HashAggregate [codegen id : 2] -Input [2]: [i_current_price#10, i_category#11] -Keys [1]: [i_category#11] -Functions [1]: [partial_avg(UnscaledValue(i_current_price#10))] -Aggregate Attributes [2]: [sum#12, count#13] -Results [3]: [i_category#11, sum#14, count#15] - -(18) Exchange -Input [3]: [i_category#11, sum#14, count#15] -Arguments: hashpartitioning(i_category#11, 5), true, [id=#16] - -(19) HashAggregate [codegen id : 3] -Input [3]: [i_category#11, sum#14, count#15] -Keys [1]: [i_category#11] -Functions [1]: [avg(UnscaledValue(i_current_price#10))] -Aggregate Attributes [1]: [avg(UnscaledValue(i_current_price#10))#17] -Results [2]: [cast((avg(UnscaledValue(i_current_price#10))#17 / 100.0) as decimal(11,6)) AS avg(i_current_price)#18, i_category#11 AS i_category#11#19] - -(20) BroadcastExchange -Input [2]: [avg(i_current_price)#18, i_category#11#19] -Arguments: HashedRelationBroadcastMode(List(input[1, string, true]),false), [id=#20] - -(21) BroadcastHashJoin [codegen id : 4] -Left keys [1]: [i_category#11] -Right keys [1]: [i_category#11#19] +(8) ColumnarToRow [codegen id : 1] +Input [2]: [i_current_price#5, i_category#6] + +(9) Filter [codegen id : 1] +Input [2]: [i_current_price#5, i_category#6] +Condition : isnotnull(i_category#6) + +(10) HashAggregate [codegen id : 1] +Input [2]: [i_current_price#5, i_category#6] +Keys [1]: [i_category#6] +Functions [1]: [partial_avg(UnscaledValue(i_current_price#5))] +Aggregate Attributes [2]: [sum#7, count#8] +Results [3]: [i_category#6, sum#9, count#10] + +(11) Exchange +Input [3]: [i_category#6, sum#9, count#10] +Arguments: hashpartitioning(i_category#6, 5), true, [id=#11] + +(12) HashAggregate [codegen id : 2] +Input [3]: [i_category#6, sum#9, count#10] +Keys [1]: [i_category#6] +Functions [1]: [avg(UnscaledValue(i_current_price#5))] +Aggregate Attributes [1]: [avg(UnscaledValue(i_current_price#5))#12] +Results [2]: [cast((avg(UnscaledValue(i_current_price#5))#12 / 100.0) as decimal(11,6)) AS avg(i_current_price)#13, i_category#6 AS i_category#6#14] + +(13) BroadcastExchange +Input [2]: [avg(i_current_price)#13, i_category#6#14] +Arguments: HashedRelationBroadcastMode(List(input[1, string, true]),false), [id=#15] + +(14) BroadcastHashJoin [codegen id : 3] +Left keys [1]: [i_category#6] +Right keys [1]: [i_category#6#14] +Join condition: None + +(15) Filter [codegen id : 3] +Input [5]: [i_item_sk#4, i_current_price#5, i_category#6, avg(i_current_price)#13, i_category#6#14] +Condition : (cast(i_current_price#5 as decimal(14,7)) > CheckOverflow((1.200000 * promote_precision(avg(i_current_price)#13)), DecimalType(14,7), true)) + +(16) Project [codegen id : 3] +Output [1]: [i_item_sk#4] +Input [5]: [i_item_sk#4, i_current_price#5, i_category#6, avg(i_current_price)#13, i_category#6#14] + +(17) BroadcastExchange +Input [1]: [i_item_sk#4] +Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#16] + +(18) BroadcastHashJoin [codegen id : 5] +Left keys [1]: [ss_item_sk#2] +Right keys [1]: [i_item_sk#4] Join condition: None +(19) Project [codegen id : 5] +Output [2]: [ss_sold_date_sk#1, ss_customer_sk#3] +Input [4]: [ss_sold_date_sk#1, ss_item_sk#2, ss_customer_sk#3, i_item_sk#4] + +(20) Scan parquet default.date_dim +Output [2]: [d_date_sk#17, d_month_seq#18] +Batched: true +Location [not included in comparison]/{warehouse_dir}/date_dim] +PushedFilters: [IsNotNull(d_month_seq), IsNotNull(d_date_sk)] +ReadSchema: struct + +(21) ColumnarToRow [codegen id : 4] +Input [2]: [d_date_sk#17, d_month_seq#18] + (22) Filter [codegen id : 4] -Input [5]: [i_item_sk#9, i_current_price#10, i_category#11, avg(i_current_price)#18, i_category#11#19] -Condition : (cast(i_current_price#10 as decimal(14,7)) > CheckOverflow((1.200000 * promote_precision(avg(i_current_price)#18)), DecimalType(14,7), true)) +Input [2]: [d_date_sk#17, d_month_seq#18] +Condition : ((isnotnull(d_month_seq#18) AND (d_month_seq#18 = Subquery scalar-subquery#19, [id=#20])) AND isnotnull(d_date_sk#17)) (23) Project [codegen id : 4] -Output [1]: [i_item_sk#9] -Input [5]: [i_item_sk#9, i_current_price#10, i_category#11, avg(i_current_price)#18, i_category#11#19] +Output [1]: [d_date_sk#17] +Input [2]: [d_date_sk#17, d_month_seq#18] (24) BroadcastExchange -Input [1]: [i_item_sk#9] +Input [1]: [d_date_sk#17] Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#21] (25) BroadcastHashJoin [codegen id : 5] -Left keys [1]: [ss_item_sk#2] -Right keys [1]: [i_item_sk#9] +Left keys [1]: [ss_sold_date_sk#1] +Right keys [1]: [d_date_sk#17] Join condition: None (26) Project [codegen id : 5] Output [1]: [ss_customer_sk#3] -Input [3]: [ss_item_sk#2, ss_customer_sk#3, i_item_sk#9] +Input [3]: [ss_sold_date_sk#1, ss_customer_sk#3, d_date_sk#17] (27) Exchange Input [1]: [ss_customer_sk#3] @@ -282,7 +282,7 @@ Arguments: 100, [cnt#35 ASC NULLS FIRST, ca_state#24 ASC NULLS FIRST], [state#34 ===== Subqueries ===== -Subquery:1 Hosting operator id = 6 Hosting Expression = Subquery scalar-subquery#6, [id=#7] +Subquery:1 Hosting operator id = 22 Hosting Expression = Subquery scalar-subquery#19, [id=#20] * HashAggregate (57) +- Exchange (56) +- * HashAggregate (55) @@ -293,39 +293,39 @@ Subquery:1 Hosting operator id = 6 Hosting Expression = Subquery scalar-subquery (51) Scan parquet default.date_dim -Output [3]: [d_month_seq#5, d_year#37, d_moy#38] +Output [3]: [d_month_seq#18, d_year#37, d_moy#38] Batched: true Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), IsNotNull(d_moy), EqualTo(d_year,2000), EqualTo(d_moy,1)] ReadSchema: struct (52) ColumnarToRow [codegen id : 1] -Input [3]: [d_month_seq#5, d_year#37, d_moy#38] +Input [3]: [d_month_seq#18, d_year#37, d_moy#38] (53) Filter [codegen id : 1] -Input [3]: [d_month_seq#5, d_year#37, d_moy#38] +Input [3]: [d_month_seq#18, d_year#37, d_moy#38] Condition : (((isnotnull(d_year#37) AND isnotnull(d_moy#38)) AND (d_year#37 = 2000)) AND (d_moy#38 = 1)) (54) Project [codegen id : 1] -Output [1]: [d_month_seq#5] -Input [3]: [d_month_seq#5, d_year#37, d_moy#38] +Output [1]: [d_month_seq#18] +Input [3]: [d_month_seq#18, d_year#37, d_moy#38] (55) HashAggregate [codegen id : 1] -Input [1]: [d_month_seq#5] -Keys [1]: [d_month_seq#5] +Input [1]: [d_month_seq#18] +Keys [1]: [d_month_seq#18] Functions: [] Aggregate Attributes: [] -Results [1]: [d_month_seq#5] +Results [1]: [d_month_seq#18] (56) Exchange -Input [1]: [d_month_seq#5] -Arguments: hashpartitioning(d_month_seq#5, 5), true, [id=#39] +Input [1]: [d_month_seq#18] +Arguments: hashpartitioning(d_month_seq#18, 5), true, [id=#39] (57) HashAggregate [codegen id : 2] -Input [1]: [d_month_seq#5] -Keys [1]: [d_month_seq#5] +Input [1]: [d_month_seq#18] +Keys [1]: [d_month_seq#18] Functions: [] Aggregate Attributes: [] -Results [1]: [d_month_seq#5] +Results [1]: [d_month_seq#18] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q6.sf100/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q6.sf100/simplified.txt index 2700741b82c04..3cbd44fc5a7d9 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q6.sf100/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q6.sf100/simplified.txt @@ -16,55 +16,55 @@ TakeOrderedAndProject [cnt,ca_state,state] Exchange [ss_customer_sk] #2 WholeStageCodegen (5) Project [ss_customer_sk] - BroadcastHashJoin [ss_item_sk,i_item_sk] - Project [ss_item_sk,ss_customer_sk] - BroadcastHashJoin [ss_sold_date_sk,d_date_sk] + BroadcastHashJoin [ss_sold_date_sk,d_date_sk] + Project [ss_sold_date_sk,ss_customer_sk] + BroadcastHashJoin [ss_item_sk,i_item_sk] Filter [ss_customer_sk,ss_sold_date_sk,ss_item_sk] ColumnarToRow InputAdapter Scan parquet default.store_sales [ss_sold_date_sk,ss_item_sk,ss_customer_sk] InputAdapter BroadcastExchange #3 - WholeStageCodegen (1) - Project [d_date_sk] - Filter [d_month_seq,d_date_sk] - Subquery #1 - WholeStageCodegen (2) - HashAggregate [d_month_seq] + WholeStageCodegen (3) + Project [i_item_sk] + Filter [i_current_price,avg(i_current_price)] + BroadcastHashJoin [i_category,i_category] + Filter [i_current_price,i_item_sk] + ColumnarToRow InputAdapter - Exchange [d_month_seq] #4 - WholeStageCodegen (1) - HashAggregate [d_month_seq] - Project [d_month_seq] - Filter [d_year,d_moy] - ColumnarToRow - InputAdapter - Scan parquet default.date_dim [d_month_seq,d_year,d_moy] - ColumnarToRow + Scan parquet default.item [i_item_sk,i_current_price,i_category] InputAdapter - Scan parquet default.date_dim [d_date_sk,d_month_seq] + BroadcastExchange #4 + WholeStageCodegen (2) + HashAggregate [i_category,sum,count] [avg(UnscaledValue(i_current_price)),avg(i_current_price),i_category,sum,count] + InputAdapter + Exchange [i_category] #5 + WholeStageCodegen (1) + HashAggregate [i_category,i_current_price] [sum,count,sum,count] + Filter [i_category] + ColumnarToRow + InputAdapter + Scan parquet default.item [i_current_price,i_category] InputAdapter - BroadcastExchange #5 + BroadcastExchange #6 WholeStageCodegen (4) - Project [i_item_sk] - Filter [i_current_price,avg(i_current_price)] - BroadcastHashJoin [i_category,i_category] - Filter [i_current_price,i_item_sk] - ColumnarToRow + Project [d_date_sk] + Filter [d_month_seq,d_date_sk] + Subquery #1 + WholeStageCodegen (2) + HashAggregate [d_month_seq] InputAdapter - Scan parquet default.item [i_item_sk,i_current_price,i_category] + Exchange [d_month_seq] #7 + WholeStageCodegen (1) + HashAggregate [d_month_seq] + Project [d_month_seq] + Filter [d_year,d_moy] + ColumnarToRow + InputAdapter + Scan parquet default.date_dim [d_month_seq,d_year,d_moy] + ColumnarToRow InputAdapter - BroadcastExchange #6 - WholeStageCodegen (3) - HashAggregate [i_category,sum,count] [avg(UnscaledValue(i_current_price)),avg(i_current_price),i_category,sum,count] - InputAdapter - Exchange [i_category] #7 - WholeStageCodegen (2) - HashAggregate [i_category,i_current_price] [sum,count,sum,count] - Filter [i_category] - ColumnarToRow - InputAdapter - Scan parquet default.item [i_current_price,i_category] + Scan parquet default.date_dim [d_date_sk,d_month_seq] InputAdapter WholeStageCodegen (12) Sort [c_customer_sk] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q70a.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q70a.sf100/explain.txt index 628ca0ad4711c..214e5eadd0eac 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q70a.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q70a.sf100/explain.txt @@ -1,68 +1,64 @@ == Physical Plan == -TakeOrderedAndProject (64) -+- * Project (63) - +- Window (62) - +- * Sort (61) - +- Exchange (60) - +- * HashAggregate (59) - +- Exchange (58) - +- * HashAggregate (57) - +- Union (56) - :- * HashAggregate (50) - : +- Exchange (49) - : +- * HashAggregate (48) - : +- Union (47) - : :- * HashAggregate (41) - : : +- Exchange (40) - : : +- * HashAggregate (39) - : : +- * Project (38) - : : +- * BroadcastHashJoin Inner BuildRight (37) - : : :- * Project (10) - : : : +- * BroadcastHashJoin Inner BuildRight (9) - : : : :- * Filter (3) - : : : : +- * ColumnarToRow (2) - : : : : +- Scan parquet default.store_sales (1) - : : : +- BroadcastExchange (8) - : : : +- * Project (7) - : : : +- * Filter (6) - : : : +- * ColumnarToRow (5) - : : : +- Scan parquet default.date_dim (4) - : : +- BroadcastExchange (36) - : : +- * BroadcastHashJoin LeftSemi BuildRight (35) - : : :- * Filter (13) - : : : +- * ColumnarToRow (12) - : : : +- Scan parquet default.store (11) - : : +- BroadcastExchange (34) - : : +- * Project (33) - : : +- * Filter (32) - : : +- Window (31) - : : +- * Sort (30) - : : +- Exchange (29) - : : +- * HashAggregate (28) - : : +- Exchange (27) - : : +- * HashAggregate (26) - : : +- * Project (25) - : : +- * BroadcastHashJoin Inner BuildRight (24) - : : :- * Project (19) - : : : +- * BroadcastHashJoin Inner BuildRight (18) - : : : :- * Filter (16) - : : : : +- * ColumnarToRow (15) - : : : : +- Scan parquet default.store_sales (14) - : : : +- ReusedExchange (17) - : : +- BroadcastExchange (23) - : : +- * Filter (22) - : : +- * ColumnarToRow (21) - : : +- Scan parquet default.store (20) - : +- * HashAggregate (46) - : +- Exchange (45) - : +- * HashAggregate (44) - : +- * HashAggregate (43) - : +- ReusedExchange (42) - +- * HashAggregate (55) - +- Exchange (54) - +- * HashAggregate (53) - +- * HashAggregate (52) - +- ReusedExchange (51) +TakeOrderedAndProject (60) ++- * Project (59) + +- Window (58) + +- * Sort (57) + +- Exchange (56) + +- * HashAggregate (55) + +- Exchange (54) + +- * HashAggregate (53) + +- Union (52) + :- * HashAggregate (41) + : +- Exchange (40) + : +- * HashAggregate (39) + : +- * Project (38) + : +- * BroadcastHashJoin Inner BuildRight (37) + : :- * Project (10) + : : +- * BroadcastHashJoin Inner BuildRight (9) + : : :- * Filter (3) + : : : +- * ColumnarToRow (2) + : : : +- Scan parquet default.store_sales (1) + : : +- BroadcastExchange (8) + : : +- * Project (7) + : : +- * Filter (6) + : : +- * ColumnarToRow (5) + : : +- Scan parquet default.date_dim (4) + : +- BroadcastExchange (36) + : +- * BroadcastHashJoin LeftSemi BuildRight (35) + : :- * Filter (13) + : : +- * ColumnarToRow (12) + : : +- Scan parquet default.store (11) + : +- BroadcastExchange (34) + : +- * Project (33) + : +- * Filter (32) + : +- Window (31) + : +- * Sort (30) + : +- Exchange (29) + : +- * HashAggregate (28) + : +- Exchange (27) + : +- * HashAggregate (26) + : +- * Project (25) + : +- * BroadcastHashJoin Inner BuildRight (24) + : :- * Project (19) + : : +- * BroadcastHashJoin Inner BuildRight (18) + : : :- * Filter (16) + : : : +- * ColumnarToRow (15) + : : : +- Scan parquet default.store_sales (14) + : : +- ReusedExchange (17) + : +- BroadcastExchange (23) + : +- * Filter (22) + : +- * ColumnarToRow (21) + : +- Scan parquet default.store (20) + :- * HashAggregate (46) + : +- Exchange (45) + : +- * HashAggregate (44) + : +- * HashAggregate (43) + : +- ReusedExchange (42) + +- * HashAggregate (51) + +- Exchange (50) + +- * HashAggregate (49) + +- * HashAggregate (48) + +- ReusedExchange (47) (1) Scan parquet default.store_sales @@ -186,7 +182,7 @@ Results [2]: [s_state#9, sum#12] (27) Exchange Input [2]: [s_state#9, sum#12] -Arguments: hashpartitioning(s_state#9, 5), true, [id=#13] +Arguments: hashpartitioning(s_state#9, 5), ENSURE_REQUIREMENTS, [id=#13] (28) HashAggregate [codegen id : 5] Input [2]: [s_state#9, sum#12] @@ -197,7 +193,7 @@ Results [3]: [s_state#9 AS s_state#15, s_state#9, MakeDecimal(sum(UnscaledValue( (29) Exchange Input [3]: [s_state#15, s_state#9, _w2#16] -Arguments: hashpartitioning(s_state#9, 5), true, [id=#17] +Arguments: hashpartitioning(s_state#9, 5), ENSURE_REQUIREMENTS, [id=#17] (30) Sort [codegen id : 6] Input [3]: [s_state#15, s_state#9, _w2#16] @@ -246,7 +242,7 @@ Results [3]: [s_state#9, s_county#8, sum#22] (40) Exchange Input [3]: [s_state#9, s_county#8, sum#22] -Arguments: hashpartitioning(s_state#9, s_county#8, 5), true, [id=#23] +Arguments: hashpartitioning(s_state#9, s_county#8, 5), ENSURE_REQUIREMENTS, [id=#23] (41) HashAggregate [codegen id : 10] Input [3]: [s_state#9, s_county#8, sum#22] @@ -274,7 +270,7 @@ Results [3]: [s_state#9, sum#34, isEmpty#35] (45) Exchange Input [3]: [s_state#9, sum#34, isEmpty#35] -Arguments: hashpartitioning(s_state#9, 5), true, [id=#36] +Arguments: hashpartitioning(s_state#9, 5), ENSURE_REQUIREMENTS, [id=#36] (46) HashAggregate [codegen id : 21] Input [3]: [s_state#9, sum#34, isEmpty#35] @@ -283,91 +279,71 @@ Functions [1]: [sum(total_sum#31)] Aggregate Attributes [1]: [sum(total_sum#31)#37] Results [6]: [sum(total_sum#31)#37 AS total_sum#38, s_state#9, null AS s_county#39, 0 AS g_state#40, 1 AS g_county#41, 1 AS lochierarchy#42] -(47) Union +(47) ReusedExchange [Reuses operator id: 40] +Output [3]: [s_state#9, s_county#8, sum#43] -(48) HashAggregate [codegen id : 22] -Input [6]: [total_sum#25, s_state#9, s_county#8, g_state#26, g_county#27, lochierarchy#28] -Keys [6]: [total_sum#25, s_state#9, s_county#8, g_state#26, g_county#27, lochierarchy#28] -Functions: [] -Aggregate Attributes: [] -Results [6]: [total_sum#25, s_state#9, s_county#8, g_state#26, g_county#27, lochierarchy#28] - -(49) Exchange -Input [6]: [total_sum#25, s_state#9, s_county#8, g_state#26, g_county#27, lochierarchy#28] -Arguments: hashpartitioning(total_sum#25, s_state#9, s_county#8, g_state#26, g_county#27, lochierarchy#28, 5), true, [id=#43] - -(50) HashAggregate [codegen id : 23] -Input [6]: [total_sum#25, s_state#9, s_county#8, g_state#26, g_county#27, lochierarchy#28] -Keys [6]: [total_sum#25, s_state#9, s_county#8, g_state#26, g_county#27, lochierarchy#28] -Functions: [] -Aggregate Attributes: [] -Results [6]: [total_sum#25, s_state#9, s_county#8, g_state#26, g_county#27, lochierarchy#28] - -(51) ReusedExchange [Reuses operator id: 40] -Output [3]: [s_state#9, s_county#8, sum#44] - -(52) HashAggregate [codegen id : 33] -Input [3]: [s_state#9, s_county#8, sum#44] +(48) HashAggregate [codegen id : 31] +Input [3]: [s_state#9, s_county#8, sum#43] Keys [2]: [s_state#9, s_county#8] Functions [1]: [sum(UnscaledValue(ss_net_profit#3))] -Aggregate Attributes [1]: [sum(UnscaledValue(ss_net_profit#3))#45] -Results [1]: [MakeDecimal(sum(UnscaledValue(ss_net_profit#3))#45,17,2) AS total_sum#31] +Aggregate Attributes [1]: [sum(UnscaledValue(ss_net_profit#3))#44] +Results [1]: [MakeDecimal(sum(UnscaledValue(ss_net_profit#3))#44,17,2) AS total_sum#31] -(53) HashAggregate [codegen id : 33] +(49) HashAggregate [codegen id : 31] Input [1]: [total_sum#31] Keys: [] Functions [1]: [partial_sum(total_sum#31)] -Aggregate Attributes [2]: [sum#46, isEmpty#47] -Results [2]: [sum#48, isEmpty#49] +Aggregate Attributes [2]: [sum#45, isEmpty#46] +Results [2]: [sum#47, isEmpty#48] -(54) Exchange -Input [2]: [sum#48, isEmpty#49] -Arguments: SinglePartition, true, [id=#50] +(50) Exchange +Input [2]: [sum#47, isEmpty#48] +Arguments: SinglePartition, ENSURE_REQUIREMENTS, [id=#49] -(55) HashAggregate [codegen id : 34] -Input [2]: [sum#48, isEmpty#49] +(51) HashAggregate [codegen id : 32] +Input [2]: [sum#47, isEmpty#48] Keys: [] Functions [1]: [sum(total_sum#31)] -Aggregate Attributes [1]: [sum(total_sum#31)#51] -Results [6]: [sum(total_sum#31)#51 AS total_sum#52, null AS s_state#53, null AS s_county#54, 1 AS g_state#55, 1 AS g_county#56, 2 AS lochierarchy#57] +Aggregate Attributes [1]: [sum(total_sum#31)#50] +Results [6]: [sum(total_sum#31)#50 AS total_sum#51, null AS s_state#52, null AS s_county#53, 1 AS g_state#54, 1 AS g_county#55, 2 AS lochierarchy#56] -(56) Union +(52) Union -(57) HashAggregate [codegen id : 35] +(53) HashAggregate [codegen id : 33] Input [6]: [total_sum#25, s_state#9, s_county#8, g_state#26, g_county#27, lochierarchy#28] Keys [6]: [total_sum#25, s_state#9, s_county#8, g_state#26, g_county#27, lochierarchy#28] Functions: [] Aggregate Attributes: [] Results [6]: [total_sum#25, s_state#9, s_county#8, g_state#26, g_county#27, lochierarchy#28] -(58) Exchange +(54) Exchange Input [6]: [total_sum#25, s_state#9, s_county#8, g_state#26, g_county#27, lochierarchy#28] -Arguments: hashpartitioning(total_sum#25, s_state#9, s_county#8, g_state#26, g_county#27, lochierarchy#28, 5), true, [id=#58] +Arguments: hashpartitioning(total_sum#25, s_state#9, s_county#8, g_state#26, g_county#27, lochierarchy#28, 5), ENSURE_REQUIREMENTS, [id=#57] -(59) HashAggregate [codegen id : 36] +(55) HashAggregate [codegen id : 34] Input [6]: [total_sum#25, s_state#9, s_county#8, g_state#26, g_county#27, lochierarchy#28] Keys [6]: [total_sum#25, s_state#9, s_county#8, g_state#26, g_county#27, lochierarchy#28] Functions: [] Aggregate Attributes: [] -Results [5]: [total_sum#25, s_state#9, s_county#8, lochierarchy#28, CASE WHEN (g_county#27 = 0) THEN s_state#9 END AS _w0#59] +Results [5]: [total_sum#25, s_state#9, s_county#8, lochierarchy#28, CASE WHEN (g_county#27 = 0) THEN s_state#9 END AS _w0#58] -(60) Exchange -Input [5]: [total_sum#25, s_state#9, s_county#8, lochierarchy#28, _w0#59] -Arguments: hashpartitioning(lochierarchy#28, _w0#59, 5), true, [id=#60] +(56) Exchange +Input [5]: [total_sum#25, s_state#9, s_county#8, lochierarchy#28, _w0#58] +Arguments: hashpartitioning(lochierarchy#28, _w0#58, 5), ENSURE_REQUIREMENTS, [id=#59] -(61) Sort [codegen id : 37] -Input [5]: [total_sum#25, s_state#9, s_county#8, lochierarchy#28, _w0#59] -Arguments: [lochierarchy#28 ASC NULLS FIRST, _w0#59 ASC NULLS FIRST, total_sum#25 DESC NULLS LAST], false, 0 +(57) Sort [codegen id : 35] +Input [5]: [total_sum#25, s_state#9, s_county#8, lochierarchy#28, _w0#58] +Arguments: [lochierarchy#28 ASC NULLS FIRST, _w0#58 ASC NULLS FIRST, total_sum#25 DESC NULLS LAST], false, 0 -(62) Window -Input [5]: [total_sum#25, s_state#9, s_county#8, lochierarchy#28, _w0#59] -Arguments: [rank(total_sum#25) windowspecdefinition(lochierarchy#28, _w0#59, total_sum#25 DESC NULLS LAST, specifiedwindowframe(RowFrame, unboundedpreceding$(), currentrow$())) AS rank_within_parent#61], [lochierarchy#28, _w0#59], [total_sum#25 DESC NULLS LAST] +(58) Window +Input [5]: [total_sum#25, s_state#9, s_county#8, lochierarchy#28, _w0#58] +Arguments: [rank(total_sum#25) windowspecdefinition(lochierarchy#28, _w0#58, total_sum#25 DESC NULLS LAST, specifiedwindowframe(RowFrame, unboundedpreceding$(), currentrow$())) AS rank_within_parent#60], [lochierarchy#28, _w0#58], [total_sum#25 DESC NULLS LAST] -(63) Project [codegen id : 38] -Output [5]: [total_sum#25, s_state#9, s_county#8, lochierarchy#28, rank_within_parent#61] -Input [6]: [total_sum#25, s_state#9, s_county#8, lochierarchy#28, _w0#59, rank_within_parent#61] +(59) Project [codegen id : 36] +Output [5]: [total_sum#25, s_state#9, s_county#8, lochierarchy#28, rank_within_parent#60] +Input [6]: [total_sum#25, s_state#9, s_county#8, lochierarchy#28, _w0#58, rank_within_parent#60] -(64) TakeOrderedAndProject -Input [5]: [total_sum#25, s_state#9, s_county#8, lochierarchy#28, rank_within_parent#61] -Arguments: 100, [lochierarchy#28 DESC NULLS LAST, CASE WHEN (lochierarchy#28 = 0) THEN s_state#9 END ASC NULLS FIRST, rank_within_parent#61 ASC NULLS FIRST], [total_sum#25, s_state#9, s_county#8, lochierarchy#28, rank_within_parent#61] +(60) TakeOrderedAndProject +Input [5]: [total_sum#25, s_state#9, s_county#8, lochierarchy#28, rank_within_parent#60] +Arguments: 100, [lochierarchy#28 DESC NULLS LAST, CASE WHEN (lochierarchy#28 = 0) THEN s_state#9 END ASC NULLS FIRST, rank_within_parent#60 ASC NULLS FIRST], [total_sum#25, s_state#9, s_county#8, lochierarchy#28, rank_within_parent#60] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q70a.sf100/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q70a.sf100/simplified.txt index b3dbc1612539a..6b02f5692b0eb 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q70a.sf100/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q70a.sf100/simplified.txt @@ -1,107 +1,99 @@ TakeOrderedAndProject [lochierarchy,s_state,rank_within_parent,total_sum,s_county] - WholeStageCodegen (38) + WholeStageCodegen (36) Project [total_sum,s_state,s_county,lochierarchy,rank_within_parent] InputAdapter Window [total_sum,lochierarchy,_w0] - WholeStageCodegen (37) + WholeStageCodegen (35) Sort [lochierarchy,_w0,total_sum] InputAdapter Exchange [lochierarchy,_w0] #1 - WholeStageCodegen (36) + WholeStageCodegen (34) HashAggregate [total_sum,s_state,s_county,g_state,g_county,lochierarchy] [_w0] InputAdapter Exchange [total_sum,s_state,s_county,g_state,g_county,lochierarchy] #2 - WholeStageCodegen (35) + WholeStageCodegen (33) HashAggregate [total_sum,s_state,s_county,g_state,g_county,lochierarchy] InputAdapter Union - WholeStageCodegen (23) - HashAggregate [total_sum,s_state,s_county,g_state,g_county,lochierarchy] + WholeStageCodegen (10) + HashAggregate [s_state,s_county,sum] [sum(UnscaledValue(ss_net_profit)),total_sum,g_state,g_county,lochierarchy,sum] InputAdapter - Exchange [total_sum,s_state,s_county,g_state,g_county,lochierarchy] #3 - WholeStageCodegen (22) - HashAggregate [total_sum,s_state,s_county,g_state,g_county,lochierarchy] - InputAdapter - Union - WholeStageCodegen (10) - HashAggregate [s_state,s_county,sum] [sum(UnscaledValue(ss_net_profit)),total_sum,g_state,g_county,lochierarchy,sum] + Exchange [s_state,s_county] #3 + WholeStageCodegen (9) + HashAggregate [s_state,s_county,ss_net_profit] [sum,sum] + Project [ss_net_profit,s_county,s_state] + BroadcastHashJoin [ss_store_sk,s_store_sk] + Project [ss_store_sk,ss_net_profit] + BroadcastHashJoin [ss_sold_date_sk,d_date_sk] + Filter [ss_sold_date_sk,ss_store_sk] + ColumnarToRow + InputAdapter + Scan parquet default.store_sales [ss_sold_date_sk,ss_store_sk,ss_net_profit] InputAdapter - Exchange [s_state,s_county] #4 - WholeStageCodegen (9) - HashAggregate [s_state,s_county,ss_net_profit] [sum,sum] - Project [ss_net_profit,s_county,s_state] - BroadcastHashJoin [ss_store_sk,s_store_sk] - Project [ss_store_sk,ss_net_profit] - BroadcastHashJoin [ss_sold_date_sk,d_date_sk] - Filter [ss_sold_date_sk,ss_store_sk] - ColumnarToRow - InputAdapter - Scan parquet default.store_sales [ss_sold_date_sk,ss_store_sk,ss_net_profit] - InputAdapter - BroadcastExchange #5 - WholeStageCodegen (1) - Project [d_date_sk] - Filter [d_month_seq,d_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.date_dim [d_date_sk,d_month_seq] + BroadcastExchange #4 + WholeStageCodegen (1) + Project [d_date_sk] + Filter [d_month_seq,d_date_sk] + ColumnarToRow InputAdapter - BroadcastExchange #6 - WholeStageCodegen (8) - BroadcastHashJoin [s_state,s_state] - Filter [s_store_sk] - ColumnarToRow - InputAdapter - Scan parquet default.store [s_store_sk,s_county,s_state] - InputAdapter - BroadcastExchange #7 - WholeStageCodegen (7) - Project [s_state] - Filter [ranking] + Scan parquet default.date_dim [d_date_sk,d_month_seq] + InputAdapter + BroadcastExchange #5 + WholeStageCodegen (8) + BroadcastHashJoin [s_state,s_state] + Filter [s_store_sk] + ColumnarToRow + InputAdapter + Scan parquet default.store [s_store_sk,s_county,s_state] + InputAdapter + BroadcastExchange #6 + WholeStageCodegen (7) + Project [s_state] + Filter [ranking] + InputAdapter + Window [_w2,s_state] + WholeStageCodegen (6) + Sort [s_state,_w2] + InputAdapter + Exchange [s_state] #7 + WholeStageCodegen (5) + HashAggregate [s_state,sum] [sum(UnscaledValue(ss_net_profit)),s_state,_w2,sum] InputAdapter - Window [_w2,s_state] - WholeStageCodegen (6) - Sort [s_state,_w2] - InputAdapter - Exchange [s_state] #8 - WholeStageCodegen (5) - HashAggregate [s_state,sum] [sum(UnscaledValue(ss_net_profit)),s_state,_w2,sum] + Exchange [s_state] #8 + WholeStageCodegen (4) + HashAggregate [s_state,ss_net_profit] [sum,sum] + Project [ss_net_profit,s_state] + BroadcastHashJoin [ss_store_sk,s_store_sk] + Project [ss_store_sk,ss_net_profit] + BroadcastHashJoin [ss_sold_date_sk,d_date_sk] + Filter [ss_store_sk,ss_sold_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.store_sales [ss_sold_date_sk,ss_store_sk,ss_net_profit] InputAdapter - Exchange [s_state] #9 - WholeStageCodegen (4) - HashAggregate [s_state,ss_net_profit] [sum,sum] - Project [ss_net_profit,s_state] - BroadcastHashJoin [ss_store_sk,s_store_sk] - Project [ss_store_sk,ss_net_profit] - BroadcastHashJoin [ss_sold_date_sk,d_date_sk] - Filter [ss_store_sk,ss_sold_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.store_sales [ss_sold_date_sk,ss_store_sk,ss_net_profit] - InputAdapter - ReusedExchange [d_date_sk] #5 - InputAdapter - BroadcastExchange #10 - WholeStageCodegen (3) - Filter [s_store_sk] - ColumnarToRow - InputAdapter - Scan parquet default.store [s_store_sk,s_state] - WholeStageCodegen (21) - HashAggregate [s_state,sum,isEmpty] [sum(total_sum),total_sum,s_county,g_state,g_county,lochierarchy,sum,isEmpty] - InputAdapter - Exchange [s_state] #11 - WholeStageCodegen (20) - HashAggregate [s_state,total_sum] [sum,isEmpty,sum,isEmpty] - HashAggregate [s_state,s_county,sum] [sum(UnscaledValue(ss_net_profit)),total_sum,sum] - InputAdapter - ReusedExchange [s_state,s_county,sum] #4 - WholeStageCodegen (34) + ReusedExchange [d_date_sk] #4 + InputAdapter + BroadcastExchange #9 + WholeStageCodegen (3) + Filter [s_store_sk] + ColumnarToRow + InputAdapter + Scan parquet default.store [s_store_sk,s_state] + WholeStageCodegen (21) + HashAggregate [s_state,sum,isEmpty] [sum(total_sum),total_sum,s_county,g_state,g_county,lochierarchy,sum,isEmpty] + InputAdapter + Exchange [s_state] #10 + WholeStageCodegen (20) + HashAggregate [s_state,total_sum] [sum,isEmpty,sum,isEmpty] + HashAggregate [s_state,s_county,sum] [sum(UnscaledValue(ss_net_profit)),total_sum,sum] + InputAdapter + ReusedExchange [s_state,s_county,sum] #3 + WholeStageCodegen (32) HashAggregate [sum,isEmpty] [sum(total_sum),total_sum,s_state,s_county,g_state,g_county,lochierarchy,sum,isEmpty] InputAdapter - Exchange #12 - WholeStageCodegen (33) + Exchange #11 + WholeStageCodegen (31) HashAggregate [total_sum] [sum,isEmpty,sum,isEmpty] HashAggregate [s_state,s_county,sum] [sum(UnscaledValue(ss_net_profit)),total_sum,sum] InputAdapter - ReusedExchange [s_state,s_county,sum] #4 + ReusedExchange [s_state,s_county,sum] #3 diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q70a/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q70a/explain.txt index 705d1b3f91342..e41dc814cbd2e 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q70a/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q70a/explain.txt @@ -1,68 +1,64 @@ == Physical Plan == -TakeOrderedAndProject (64) -+- * Project (63) - +- Window (62) - +- * Sort (61) - +- Exchange (60) - +- * HashAggregate (59) - +- Exchange (58) - +- * HashAggregate (57) - +- Union (56) - :- * HashAggregate (50) - : +- Exchange (49) - : +- * HashAggregate (48) - : +- Union (47) - : :- * HashAggregate (41) - : : +- Exchange (40) - : : +- * HashAggregate (39) - : : +- * Project (38) - : : +- * BroadcastHashJoin Inner BuildRight (37) - : : :- * Project (10) - : : : +- * BroadcastHashJoin Inner BuildRight (9) - : : : :- * Filter (3) - : : : : +- * ColumnarToRow (2) - : : : : +- Scan parquet default.store_sales (1) - : : : +- BroadcastExchange (8) - : : : +- * Project (7) - : : : +- * Filter (6) - : : : +- * ColumnarToRow (5) - : : : +- Scan parquet default.date_dim (4) - : : +- BroadcastExchange (36) - : : +- * BroadcastHashJoin LeftSemi BuildRight (35) - : : :- * Filter (13) - : : : +- * ColumnarToRow (12) - : : : +- Scan parquet default.store (11) - : : +- BroadcastExchange (34) - : : +- * Project (33) - : : +- * Filter (32) - : : +- Window (31) - : : +- * Sort (30) - : : +- Exchange (29) - : : +- * HashAggregate (28) - : : +- Exchange (27) - : : +- * HashAggregate (26) - : : +- * Project (25) - : : +- * BroadcastHashJoin Inner BuildRight (24) - : : :- * Project (22) - : : : +- * BroadcastHashJoin Inner BuildRight (21) - : : : :- * Filter (16) - : : : : +- * ColumnarToRow (15) - : : : : +- Scan parquet default.store_sales (14) - : : : +- BroadcastExchange (20) - : : : +- * Filter (19) - : : : +- * ColumnarToRow (18) - : : : +- Scan parquet default.store (17) - : : +- ReusedExchange (23) - : +- * HashAggregate (46) - : +- Exchange (45) - : +- * HashAggregate (44) - : +- * HashAggregate (43) - : +- ReusedExchange (42) - +- * HashAggregate (55) - +- Exchange (54) - +- * HashAggregate (53) - +- * HashAggregate (52) - +- ReusedExchange (51) +TakeOrderedAndProject (60) ++- * Project (59) + +- Window (58) + +- * Sort (57) + +- Exchange (56) + +- * HashAggregate (55) + +- Exchange (54) + +- * HashAggregate (53) + +- Union (52) + :- * HashAggregate (41) + : +- Exchange (40) + : +- * HashAggregate (39) + : +- * Project (38) + : +- * BroadcastHashJoin Inner BuildRight (37) + : :- * Project (10) + : : +- * BroadcastHashJoin Inner BuildRight (9) + : : :- * Filter (3) + : : : +- * ColumnarToRow (2) + : : : +- Scan parquet default.store_sales (1) + : : +- BroadcastExchange (8) + : : +- * Project (7) + : : +- * Filter (6) + : : +- * ColumnarToRow (5) + : : +- Scan parquet default.date_dim (4) + : +- BroadcastExchange (36) + : +- * BroadcastHashJoin LeftSemi BuildRight (35) + : :- * Filter (13) + : : +- * ColumnarToRow (12) + : : +- Scan parquet default.store (11) + : +- BroadcastExchange (34) + : +- * Project (33) + : +- * Filter (32) + : +- Window (31) + : +- * Sort (30) + : +- Exchange (29) + : +- * HashAggregate (28) + : +- Exchange (27) + : +- * HashAggregate (26) + : +- * Project (25) + : +- * BroadcastHashJoin Inner BuildRight (24) + : :- * Project (22) + : : +- * BroadcastHashJoin Inner BuildRight (21) + : : :- * Filter (16) + : : : +- * ColumnarToRow (15) + : : : +- Scan parquet default.store_sales (14) + : : +- BroadcastExchange (20) + : : +- * Filter (19) + : : +- * ColumnarToRow (18) + : : +- Scan parquet default.store (17) + : +- ReusedExchange (23) + :- * HashAggregate (46) + : +- Exchange (45) + : +- * HashAggregate (44) + : +- * HashAggregate (43) + : +- ReusedExchange (42) + +- * HashAggregate (51) + +- Exchange (50) + +- * HashAggregate (49) + +- * HashAggregate (48) + +- ReusedExchange (47) (1) Scan parquet default.store_sales @@ -186,7 +182,7 @@ Results [2]: [s_state#9, sum#12] (27) Exchange Input [2]: [s_state#9, sum#12] -Arguments: hashpartitioning(s_state#9, 5), true, [id=#13] +Arguments: hashpartitioning(s_state#9, 5), ENSURE_REQUIREMENTS, [id=#13] (28) HashAggregate [codegen id : 5] Input [2]: [s_state#9, sum#12] @@ -197,7 +193,7 @@ Results [3]: [s_state#9 AS s_state#15, s_state#9, MakeDecimal(sum(UnscaledValue( (29) Exchange Input [3]: [s_state#15, s_state#9, _w2#16] -Arguments: hashpartitioning(s_state#9, 5), true, [id=#17] +Arguments: hashpartitioning(s_state#9, 5), ENSURE_REQUIREMENTS, [id=#17] (30) Sort [codegen id : 6] Input [3]: [s_state#15, s_state#9, _w2#16] @@ -246,7 +242,7 @@ Results [3]: [s_state#9, s_county#8, sum#22] (40) Exchange Input [3]: [s_state#9, s_county#8, sum#22] -Arguments: hashpartitioning(s_state#9, s_county#8, 5), true, [id=#23] +Arguments: hashpartitioning(s_state#9, s_county#8, 5), ENSURE_REQUIREMENTS, [id=#23] (41) HashAggregate [codegen id : 10] Input [3]: [s_state#9, s_county#8, sum#22] @@ -274,7 +270,7 @@ Results [3]: [s_state#9, sum#34, isEmpty#35] (45) Exchange Input [3]: [s_state#9, sum#34, isEmpty#35] -Arguments: hashpartitioning(s_state#9, 5), true, [id=#36] +Arguments: hashpartitioning(s_state#9, 5), ENSURE_REQUIREMENTS, [id=#36] (46) HashAggregate [codegen id : 21] Input [3]: [s_state#9, sum#34, isEmpty#35] @@ -283,91 +279,71 @@ Functions [1]: [sum(total_sum#31)] Aggregate Attributes [1]: [sum(total_sum#31)#37] Results [6]: [sum(total_sum#31)#37 AS total_sum#38, s_state#9, null AS s_county#39, 0 AS g_state#40, 1 AS g_county#41, 1 AS lochierarchy#42] -(47) Union +(47) ReusedExchange [Reuses operator id: 40] +Output [3]: [s_state#9, s_county#8, sum#43] -(48) HashAggregate [codegen id : 22] -Input [6]: [total_sum#25, s_state#9, s_county#8, g_state#26, g_county#27, lochierarchy#28] -Keys [6]: [total_sum#25, s_state#9, s_county#8, g_state#26, g_county#27, lochierarchy#28] -Functions: [] -Aggregate Attributes: [] -Results [6]: [total_sum#25, s_state#9, s_county#8, g_state#26, g_county#27, lochierarchy#28] - -(49) Exchange -Input [6]: [total_sum#25, s_state#9, s_county#8, g_state#26, g_county#27, lochierarchy#28] -Arguments: hashpartitioning(total_sum#25, s_state#9, s_county#8, g_state#26, g_county#27, lochierarchy#28, 5), true, [id=#43] - -(50) HashAggregate [codegen id : 23] -Input [6]: [total_sum#25, s_state#9, s_county#8, g_state#26, g_county#27, lochierarchy#28] -Keys [6]: [total_sum#25, s_state#9, s_county#8, g_state#26, g_county#27, lochierarchy#28] -Functions: [] -Aggregate Attributes: [] -Results [6]: [total_sum#25, s_state#9, s_county#8, g_state#26, g_county#27, lochierarchy#28] - -(51) ReusedExchange [Reuses operator id: 40] -Output [3]: [s_state#9, s_county#8, sum#44] - -(52) HashAggregate [codegen id : 33] -Input [3]: [s_state#9, s_county#8, sum#44] +(48) HashAggregate [codegen id : 31] +Input [3]: [s_state#9, s_county#8, sum#43] Keys [2]: [s_state#9, s_county#8] Functions [1]: [sum(UnscaledValue(ss_net_profit#3))] -Aggregate Attributes [1]: [sum(UnscaledValue(ss_net_profit#3))#45] -Results [1]: [MakeDecimal(sum(UnscaledValue(ss_net_profit#3))#45,17,2) AS total_sum#31] +Aggregate Attributes [1]: [sum(UnscaledValue(ss_net_profit#3))#44] +Results [1]: [MakeDecimal(sum(UnscaledValue(ss_net_profit#3))#44,17,2) AS total_sum#31] -(53) HashAggregate [codegen id : 33] +(49) HashAggregate [codegen id : 31] Input [1]: [total_sum#31] Keys: [] Functions [1]: [partial_sum(total_sum#31)] -Aggregate Attributes [2]: [sum#46, isEmpty#47] -Results [2]: [sum#48, isEmpty#49] +Aggregate Attributes [2]: [sum#45, isEmpty#46] +Results [2]: [sum#47, isEmpty#48] -(54) Exchange -Input [2]: [sum#48, isEmpty#49] -Arguments: SinglePartition, true, [id=#50] +(50) Exchange +Input [2]: [sum#47, isEmpty#48] +Arguments: SinglePartition, ENSURE_REQUIREMENTS, [id=#49] -(55) HashAggregate [codegen id : 34] -Input [2]: [sum#48, isEmpty#49] +(51) HashAggregate [codegen id : 32] +Input [2]: [sum#47, isEmpty#48] Keys: [] Functions [1]: [sum(total_sum#31)] -Aggregate Attributes [1]: [sum(total_sum#31)#51] -Results [6]: [sum(total_sum#31)#51 AS total_sum#52, null AS s_state#53, null AS s_county#54, 1 AS g_state#55, 1 AS g_county#56, 2 AS lochierarchy#57] +Aggregate Attributes [1]: [sum(total_sum#31)#50] +Results [6]: [sum(total_sum#31)#50 AS total_sum#51, null AS s_state#52, null AS s_county#53, 1 AS g_state#54, 1 AS g_county#55, 2 AS lochierarchy#56] -(56) Union +(52) Union -(57) HashAggregate [codegen id : 35] +(53) HashAggregate [codegen id : 33] Input [6]: [total_sum#25, s_state#9, s_county#8, g_state#26, g_county#27, lochierarchy#28] Keys [6]: [total_sum#25, s_state#9, s_county#8, g_state#26, g_county#27, lochierarchy#28] Functions: [] Aggregate Attributes: [] Results [6]: [total_sum#25, s_state#9, s_county#8, g_state#26, g_county#27, lochierarchy#28] -(58) Exchange +(54) Exchange Input [6]: [total_sum#25, s_state#9, s_county#8, g_state#26, g_county#27, lochierarchy#28] -Arguments: hashpartitioning(total_sum#25, s_state#9, s_county#8, g_state#26, g_county#27, lochierarchy#28, 5), true, [id=#58] +Arguments: hashpartitioning(total_sum#25, s_state#9, s_county#8, g_state#26, g_county#27, lochierarchy#28, 5), ENSURE_REQUIREMENTS, [id=#57] -(59) HashAggregate [codegen id : 36] +(55) HashAggregate [codegen id : 34] Input [6]: [total_sum#25, s_state#9, s_county#8, g_state#26, g_county#27, lochierarchy#28] Keys [6]: [total_sum#25, s_state#9, s_county#8, g_state#26, g_county#27, lochierarchy#28] Functions: [] Aggregate Attributes: [] -Results [5]: [total_sum#25, s_state#9, s_county#8, lochierarchy#28, CASE WHEN (g_county#27 = 0) THEN s_state#9 END AS _w0#59] +Results [5]: [total_sum#25, s_state#9, s_county#8, lochierarchy#28, CASE WHEN (g_county#27 = 0) THEN s_state#9 END AS _w0#58] -(60) Exchange -Input [5]: [total_sum#25, s_state#9, s_county#8, lochierarchy#28, _w0#59] -Arguments: hashpartitioning(lochierarchy#28, _w0#59, 5), true, [id=#60] +(56) Exchange +Input [5]: [total_sum#25, s_state#9, s_county#8, lochierarchy#28, _w0#58] +Arguments: hashpartitioning(lochierarchy#28, _w0#58, 5), ENSURE_REQUIREMENTS, [id=#59] -(61) Sort [codegen id : 37] -Input [5]: [total_sum#25, s_state#9, s_county#8, lochierarchy#28, _w0#59] -Arguments: [lochierarchy#28 ASC NULLS FIRST, _w0#59 ASC NULLS FIRST, total_sum#25 DESC NULLS LAST], false, 0 +(57) Sort [codegen id : 35] +Input [5]: [total_sum#25, s_state#9, s_county#8, lochierarchy#28, _w0#58] +Arguments: [lochierarchy#28 ASC NULLS FIRST, _w0#58 ASC NULLS FIRST, total_sum#25 DESC NULLS LAST], false, 0 -(62) Window -Input [5]: [total_sum#25, s_state#9, s_county#8, lochierarchy#28, _w0#59] -Arguments: [rank(total_sum#25) windowspecdefinition(lochierarchy#28, _w0#59, total_sum#25 DESC NULLS LAST, specifiedwindowframe(RowFrame, unboundedpreceding$(), currentrow$())) AS rank_within_parent#61], [lochierarchy#28, _w0#59], [total_sum#25 DESC NULLS LAST] +(58) Window +Input [5]: [total_sum#25, s_state#9, s_county#8, lochierarchy#28, _w0#58] +Arguments: [rank(total_sum#25) windowspecdefinition(lochierarchy#28, _w0#58, total_sum#25 DESC NULLS LAST, specifiedwindowframe(RowFrame, unboundedpreceding$(), currentrow$())) AS rank_within_parent#60], [lochierarchy#28, _w0#58], [total_sum#25 DESC NULLS LAST] -(63) Project [codegen id : 38] -Output [5]: [total_sum#25, s_state#9, s_county#8, lochierarchy#28, rank_within_parent#61] -Input [6]: [total_sum#25, s_state#9, s_county#8, lochierarchy#28, _w0#59, rank_within_parent#61] +(59) Project [codegen id : 36] +Output [5]: [total_sum#25, s_state#9, s_county#8, lochierarchy#28, rank_within_parent#60] +Input [6]: [total_sum#25, s_state#9, s_county#8, lochierarchy#28, _w0#58, rank_within_parent#60] -(64) TakeOrderedAndProject -Input [5]: [total_sum#25, s_state#9, s_county#8, lochierarchy#28, rank_within_parent#61] -Arguments: 100, [lochierarchy#28 DESC NULLS LAST, CASE WHEN (lochierarchy#28 = 0) THEN s_state#9 END ASC NULLS FIRST, rank_within_parent#61 ASC NULLS FIRST], [total_sum#25, s_state#9, s_county#8, lochierarchy#28, rank_within_parent#61] +(60) TakeOrderedAndProject +Input [5]: [total_sum#25, s_state#9, s_county#8, lochierarchy#28, rank_within_parent#60] +Arguments: 100, [lochierarchy#28 DESC NULLS LAST, CASE WHEN (lochierarchy#28 = 0) THEN s_state#9 END ASC NULLS FIRST, rank_within_parent#60 ASC NULLS FIRST], [total_sum#25, s_state#9, s_county#8, lochierarchy#28, rank_within_parent#60] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q70a/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q70a/simplified.txt index bd0bd7e87251f..b32ed8ecf2857 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q70a/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q70a/simplified.txt @@ -1,107 +1,99 @@ TakeOrderedAndProject [lochierarchy,s_state,rank_within_parent,total_sum,s_county] - WholeStageCodegen (38) + WholeStageCodegen (36) Project [total_sum,s_state,s_county,lochierarchy,rank_within_parent] InputAdapter Window [total_sum,lochierarchy,_w0] - WholeStageCodegen (37) + WholeStageCodegen (35) Sort [lochierarchy,_w0,total_sum] InputAdapter Exchange [lochierarchy,_w0] #1 - WholeStageCodegen (36) + WholeStageCodegen (34) HashAggregate [total_sum,s_state,s_county,g_state,g_county,lochierarchy] [_w0] InputAdapter Exchange [total_sum,s_state,s_county,g_state,g_county,lochierarchy] #2 - WholeStageCodegen (35) + WholeStageCodegen (33) HashAggregate [total_sum,s_state,s_county,g_state,g_county,lochierarchy] InputAdapter Union - WholeStageCodegen (23) - HashAggregate [total_sum,s_state,s_county,g_state,g_county,lochierarchy] + WholeStageCodegen (10) + HashAggregate [s_state,s_county,sum] [sum(UnscaledValue(ss_net_profit)),total_sum,g_state,g_county,lochierarchy,sum] InputAdapter - Exchange [total_sum,s_state,s_county,g_state,g_county,lochierarchy] #3 - WholeStageCodegen (22) - HashAggregate [total_sum,s_state,s_county,g_state,g_county,lochierarchy] - InputAdapter - Union - WholeStageCodegen (10) - HashAggregate [s_state,s_county,sum] [sum(UnscaledValue(ss_net_profit)),total_sum,g_state,g_county,lochierarchy,sum] + Exchange [s_state,s_county] #3 + WholeStageCodegen (9) + HashAggregate [s_state,s_county,ss_net_profit] [sum,sum] + Project [ss_net_profit,s_county,s_state] + BroadcastHashJoin [ss_store_sk,s_store_sk] + Project [ss_store_sk,ss_net_profit] + BroadcastHashJoin [ss_sold_date_sk,d_date_sk] + Filter [ss_sold_date_sk,ss_store_sk] + ColumnarToRow + InputAdapter + Scan parquet default.store_sales [ss_sold_date_sk,ss_store_sk,ss_net_profit] InputAdapter - Exchange [s_state,s_county] #4 - WholeStageCodegen (9) - HashAggregate [s_state,s_county,ss_net_profit] [sum,sum] - Project [ss_net_profit,s_county,s_state] - BroadcastHashJoin [ss_store_sk,s_store_sk] - Project [ss_store_sk,ss_net_profit] - BroadcastHashJoin [ss_sold_date_sk,d_date_sk] - Filter [ss_sold_date_sk,ss_store_sk] - ColumnarToRow - InputAdapter - Scan parquet default.store_sales [ss_sold_date_sk,ss_store_sk,ss_net_profit] - InputAdapter - BroadcastExchange #5 - WholeStageCodegen (1) - Project [d_date_sk] - Filter [d_month_seq,d_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.date_dim [d_date_sk,d_month_seq] + BroadcastExchange #4 + WholeStageCodegen (1) + Project [d_date_sk] + Filter [d_month_seq,d_date_sk] + ColumnarToRow InputAdapter - BroadcastExchange #6 - WholeStageCodegen (8) - BroadcastHashJoin [s_state,s_state] - Filter [s_store_sk] - ColumnarToRow - InputAdapter - Scan parquet default.store [s_store_sk,s_county,s_state] - InputAdapter - BroadcastExchange #7 - WholeStageCodegen (7) - Project [s_state] - Filter [ranking] + Scan parquet default.date_dim [d_date_sk,d_month_seq] + InputAdapter + BroadcastExchange #5 + WholeStageCodegen (8) + BroadcastHashJoin [s_state,s_state] + Filter [s_store_sk] + ColumnarToRow + InputAdapter + Scan parquet default.store [s_store_sk,s_county,s_state] + InputAdapter + BroadcastExchange #6 + WholeStageCodegen (7) + Project [s_state] + Filter [ranking] + InputAdapter + Window [_w2,s_state] + WholeStageCodegen (6) + Sort [s_state,_w2] + InputAdapter + Exchange [s_state] #7 + WholeStageCodegen (5) + HashAggregate [s_state,sum] [sum(UnscaledValue(ss_net_profit)),s_state,_w2,sum] InputAdapter - Window [_w2,s_state] - WholeStageCodegen (6) - Sort [s_state,_w2] - InputAdapter - Exchange [s_state] #8 - WholeStageCodegen (5) - HashAggregate [s_state,sum] [sum(UnscaledValue(ss_net_profit)),s_state,_w2,sum] + Exchange [s_state] #8 + WholeStageCodegen (4) + HashAggregate [s_state,ss_net_profit] [sum,sum] + Project [ss_net_profit,s_state] + BroadcastHashJoin [ss_sold_date_sk,d_date_sk] + Project [ss_sold_date_sk,ss_net_profit,s_state] + BroadcastHashJoin [ss_store_sk,s_store_sk] + Filter [ss_store_sk,ss_sold_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.store_sales [ss_sold_date_sk,ss_store_sk,ss_net_profit] InputAdapter - Exchange [s_state] #9 - WholeStageCodegen (4) - HashAggregate [s_state,ss_net_profit] [sum,sum] - Project [ss_net_profit,s_state] - BroadcastHashJoin [ss_sold_date_sk,d_date_sk] - Project [ss_sold_date_sk,ss_net_profit,s_state] - BroadcastHashJoin [ss_store_sk,s_store_sk] - Filter [ss_store_sk,ss_sold_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.store_sales [ss_sold_date_sk,ss_store_sk,ss_net_profit] - InputAdapter - BroadcastExchange #10 - WholeStageCodegen (2) - Filter [s_store_sk] - ColumnarToRow - InputAdapter - Scan parquet default.store [s_store_sk,s_state] - InputAdapter - ReusedExchange [d_date_sk] #5 - WholeStageCodegen (21) - HashAggregate [s_state,sum,isEmpty] [sum(total_sum),total_sum,s_county,g_state,g_county,lochierarchy,sum,isEmpty] - InputAdapter - Exchange [s_state] #11 - WholeStageCodegen (20) - HashAggregate [s_state,total_sum] [sum,isEmpty,sum,isEmpty] - HashAggregate [s_state,s_county,sum] [sum(UnscaledValue(ss_net_profit)),total_sum,sum] - InputAdapter - ReusedExchange [s_state,s_county,sum] #4 - WholeStageCodegen (34) + BroadcastExchange #9 + WholeStageCodegen (2) + Filter [s_store_sk] + ColumnarToRow + InputAdapter + Scan parquet default.store [s_store_sk,s_state] + InputAdapter + ReusedExchange [d_date_sk] #4 + WholeStageCodegen (21) + HashAggregate [s_state,sum,isEmpty] [sum(total_sum),total_sum,s_county,g_state,g_county,lochierarchy,sum,isEmpty] + InputAdapter + Exchange [s_state] #10 + WholeStageCodegen (20) + HashAggregate [s_state,total_sum] [sum,isEmpty,sum,isEmpty] + HashAggregate [s_state,s_county,sum] [sum(UnscaledValue(ss_net_profit)),total_sum,sum] + InputAdapter + ReusedExchange [s_state,s_county,sum] #3 + WholeStageCodegen (32) HashAggregate [sum,isEmpty] [sum(total_sum),total_sum,s_state,s_county,g_state,g_county,lochierarchy,sum,isEmpty] InputAdapter - Exchange #12 - WholeStageCodegen (33) + Exchange #11 + WholeStageCodegen (31) HashAggregate [total_sum] [sum,isEmpty,sum,isEmpty] HashAggregate [s_state,s_county,sum] [sum(UnscaledValue(ss_net_profit)),total_sum,sum] InputAdapter - ReusedExchange [s_state,s_county,sum] #4 + ReusedExchange [s_state,s_county,sum] #3 diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q72.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q72.sf100/explain.txt index c2627bd7e4cc9..04ff822b1ce52 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q72.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q72.sf100/explain.txt @@ -11,64 +11,64 @@ TakeOrderedAndProject (79) : +- * BroadcastHashJoin LeftOuter BuildRight (65) : :- * Project (60) : : +- * SortMergeJoin Inner (59) - : : :- * Sort (47) - : : : +- Exchange (46) - : : : +- * Project (45) - : : : +- * BroadcastHashJoin Inner BuildRight (44) - : : : :- * Project (32) - : : : : +- * SortMergeJoin Inner (31) - : : : : :- * Sort (25) - : : : : : +- Exchange (24) - : : : : : +- * Project (23) - : : : : : +- * BroadcastHashJoin Inner BuildRight (22) - : : : : : :- * Project (17) - : : : : : : +- * BroadcastHashJoin Inner BuildRight (16) - : : : : : : :- * Project (10) - : : : : : : : +- * BroadcastHashJoin Inner BuildLeft (9) - : : : : : : : :- BroadcastExchange (5) - : : : : : : : : +- * Project (4) - : : : : : : : : +- * Filter (3) - : : : : : : : : +- * ColumnarToRow (2) - : : : : : : : : +- Scan parquet default.household_demographics (1) - : : : : : : : +- * Filter (8) - : : : : : : : +- * ColumnarToRow (7) - : : : : : : : +- Scan parquet default.catalog_sales (6) - : : : : : : +- BroadcastExchange (15) - : : : : : : +- * Project (14) - : : : : : : +- * Filter (13) - : : : : : : +- * ColumnarToRow (12) - : : : : : : +- Scan parquet default.customer_demographics (11) - : : : : : +- BroadcastExchange (21) - : : : : : +- * Filter (20) - : : : : : +- * ColumnarToRow (19) - : : : : : +- Scan parquet default.date_dim (18) - : : : : +- * Sort (30) - : : : : +- Exchange (29) - : : : : +- * Filter (28) - : : : : +- * ColumnarToRow (27) - : : : : +- Scan parquet default.item (26) - : : : +- BroadcastExchange (43) - : : : +- * Project (42) - : : : +- * BroadcastHashJoin Inner BuildRight (41) - : : : :- * Filter (35) - : : : : +- * ColumnarToRow (34) - : : : : +- Scan parquet default.date_dim (33) - : : : +- BroadcastExchange (40) - : : : +- * Project (39) - : : : +- * Filter (38) - : : : +- * ColumnarToRow (37) - : : : +- Scan parquet default.date_dim (36) + : : :- * Sort (34) + : : : +- Exchange (33) + : : : +- * Project (32) + : : : +- * SortMergeJoin Inner (31) + : : : :- * Sort (25) + : : : : +- Exchange (24) + : : : : +- * Project (23) + : : : : +- * BroadcastHashJoin Inner BuildRight (22) + : : : : :- * Project (17) + : : : : : +- * BroadcastHashJoin Inner BuildRight (16) + : : : : : :- * Project (10) + : : : : : : +- * BroadcastHashJoin Inner BuildRight (9) + : : : : : : :- * Filter (3) + : : : : : : : +- * ColumnarToRow (2) + : : : : : : : +- Scan parquet default.catalog_sales (1) + : : : : : : +- BroadcastExchange (8) + : : : : : : +- * Project (7) + : : : : : : +- * Filter (6) + : : : : : : +- * ColumnarToRow (5) + : : : : : : +- Scan parquet default.household_demographics (4) + : : : : : +- BroadcastExchange (15) + : : : : : +- * Project (14) + : : : : : +- * Filter (13) + : : : : : +- * ColumnarToRow (12) + : : : : : +- Scan parquet default.customer_demographics (11) + : : : : +- BroadcastExchange (21) + : : : : +- * Filter (20) + : : : : +- * ColumnarToRow (19) + : : : : +- Scan parquet default.date_dim (18) + : : : +- * Sort (30) + : : : +- Exchange (29) + : : : +- * Filter (28) + : : : +- * ColumnarToRow (27) + : : : +- Scan parquet default.item (26) : : +- * Sort (58) : : +- Exchange (57) : : +- * Project (56) - : : +- * BroadcastHashJoin Inner BuildLeft (55) - : : :- BroadcastExchange (51) - : : : +- * Filter (50) - : : : +- * ColumnarToRow (49) - : : : +- Scan parquet default.warehouse (48) - : : +- * Filter (54) - : : +- * ColumnarToRow (53) - : : +- Scan parquet default.inventory (52) + : : +- * BroadcastHashJoin Inner BuildRight (55) + : : :- * Project (50) + : : : +- * BroadcastHashJoin Inner BuildLeft (49) + : : : :- BroadcastExchange (45) + : : : : +- * Project (44) + : : : : +- * BroadcastHashJoin Inner BuildLeft (43) + : : : : :- BroadcastExchange (39) + : : : : : +- * Project (38) + : : : : : +- * Filter (37) + : : : : : +- * ColumnarToRow (36) + : : : : : +- Scan parquet default.date_dim (35) + : : : : +- * Filter (42) + : : : : +- * ColumnarToRow (41) + : : : : +- Scan parquet default.date_dim (40) + : : : +- * Filter (48) + : : : +- * ColumnarToRow (47) + : : : +- Scan parquet default.inventory (46) + : : +- BroadcastExchange (54) + : : +- * Filter (53) + : : +- * ColumnarToRow (52) + : : +- Scan parquet default.warehouse (51) : +- BroadcastExchange (64) : +- * Filter (63) : +- * ColumnarToRow (62) @@ -80,50 +80,50 @@ TakeOrderedAndProject (79) +- Scan parquet default.catalog_returns (69) -(1) Scan parquet default.household_demographics -Output [2]: [hd_demo_sk#1, hd_buy_potential#2] +(1) Scan parquet default.catalog_sales +Output [8]: [cs_sold_date_sk#1, cs_ship_date_sk#2, cs_bill_cdemo_sk#3, cs_bill_hdemo_sk#4, cs_item_sk#5, cs_promo_sk#6, cs_order_number#7, cs_quantity#8] Batched: true -Location [not included in comparison]/{warehouse_dir}/household_demographics] -PushedFilters: [IsNotNull(hd_buy_potential), EqualTo(hd_buy_potential,1001-5000), IsNotNull(hd_demo_sk)] -ReadSchema: struct +Location [not included in comparison]/{warehouse_dir}/catalog_sales] +PushedFilters: [IsNotNull(cs_quantity), IsNotNull(cs_item_sk), IsNotNull(cs_bill_cdemo_sk), IsNotNull(cs_bill_hdemo_sk), IsNotNull(cs_sold_date_sk), IsNotNull(cs_ship_date_sk)] +ReadSchema: struct -(2) ColumnarToRow [codegen id : 1] -Input [2]: [hd_demo_sk#1, hd_buy_potential#2] +(2) ColumnarToRow [codegen id : 4] +Input [8]: [cs_sold_date_sk#1, cs_ship_date_sk#2, cs_bill_cdemo_sk#3, cs_bill_hdemo_sk#4, cs_item_sk#5, cs_promo_sk#6, cs_order_number#7, cs_quantity#8] -(3) Filter [codegen id : 1] -Input [2]: [hd_demo_sk#1, hd_buy_potential#2] -Condition : ((isnotnull(hd_buy_potential#2) AND (hd_buy_potential#2 = 1001-5000)) AND isnotnull(hd_demo_sk#1)) +(3) Filter [codegen id : 4] +Input [8]: [cs_sold_date_sk#1, cs_ship_date_sk#2, cs_bill_cdemo_sk#3, cs_bill_hdemo_sk#4, cs_item_sk#5, cs_promo_sk#6, cs_order_number#7, cs_quantity#8] +Condition : (((((isnotnull(cs_quantity#8) AND isnotnull(cs_item_sk#5)) AND isnotnull(cs_bill_cdemo_sk#3)) AND isnotnull(cs_bill_hdemo_sk#4)) AND isnotnull(cs_sold_date_sk#1)) AND isnotnull(cs_ship_date_sk#2)) -(4) Project [codegen id : 1] -Output [1]: [hd_demo_sk#1] -Input [2]: [hd_demo_sk#1, hd_buy_potential#2] +(4) Scan parquet default.household_demographics +Output [2]: [hd_demo_sk#9, hd_buy_potential#10] +Batched: true +Location [not included in comparison]/{warehouse_dir}/household_demographics] +PushedFilters: [IsNotNull(hd_buy_potential), EqualTo(hd_buy_potential,1001-5000), IsNotNull(hd_demo_sk)] +ReadSchema: struct -(5) BroadcastExchange -Input [1]: [hd_demo_sk#1] -Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#3] +(5) ColumnarToRow [codegen id : 1] +Input [2]: [hd_demo_sk#9, hd_buy_potential#10] -(6) Scan parquet default.catalog_sales -Output [8]: [cs_sold_date_sk#4, cs_ship_date_sk#5, cs_bill_cdemo_sk#6, cs_bill_hdemo_sk#7, cs_item_sk#8, cs_promo_sk#9, cs_order_number#10, cs_quantity#11] -Batched: true -Location [not included in comparison]/{warehouse_dir}/catalog_sales] -PushedFilters: [IsNotNull(cs_quantity), IsNotNull(cs_item_sk), IsNotNull(cs_bill_cdemo_sk), IsNotNull(cs_bill_hdemo_sk), IsNotNull(cs_sold_date_sk), IsNotNull(cs_ship_date_sk)] -ReadSchema: struct +(6) Filter [codegen id : 1] +Input [2]: [hd_demo_sk#9, hd_buy_potential#10] +Condition : ((isnotnull(hd_buy_potential#10) AND (hd_buy_potential#10 = 1001-5000)) AND isnotnull(hd_demo_sk#9)) -(7) ColumnarToRow -Input [8]: [cs_sold_date_sk#4, cs_ship_date_sk#5, cs_bill_cdemo_sk#6, cs_bill_hdemo_sk#7, cs_item_sk#8, cs_promo_sk#9, cs_order_number#10, cs_quantity#11] +(7) Project [codegen id : 1] +Output [1]: [hd_demo_sk#9] +Input [2]: [hd_demo_sk#9, hd_buy_potential#10] -(8) Filter -Input [8]: [cs_sold_date_sk#4, cs_ship_date_sk#5, cs_bill_cdemo_sk#6, cs_bill_hdemo_sk#7, cs_item_sk#8, cs_promo_sk#9, cs_order_number#10, cs_quantity#11] -Condition : (((((isnotnull(cs_quantity#11) AND isnotnull(cs_item_sk#8)) AND isnotnull(cs_bill_cdemo_sk#6)) AND isnotnull(cs_bill_hdemo_sk#7)) AND isnotnull(cs_sold_date_sk#4)) AND isnotnull(cs_ship_date_sk#5)) +(8) BroadcastExchange +Input [1]: [hd_demo_sk#9] +Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#11] (9) BroadcastHashJoin [codegen id : 4] -Left keys [1]: [hd_demo_sk#1] -Right keys [1]: [cs_bill_hdemo_sk#7] +Left keys [1]: [cs_bill_hdemo_sk#4] +Right keys [1]: [hd_demo_sk#9] Join condition: None (10) Project [codegen id : 4] -Output [7]: [cs_sold_date_sk#4, cs_ship_date_sk#5, cs_bill_cdemo_sk#6, cs_item_sk#8, cs_promo_sk#9, cs_order_number#10, cs_quantity#11] -Input [9]: [hd_demo_sk#1, cs_sold_date_sk#4, cs_ship_date_sk#5, cs_bill_cdemo_sk#6, cs_bill_hdemo_sk#7, cs_item_sk#8, cs_promo_sk#9, cs_order_number#10, cs_quantity#11] +Output [7]: [cs_sold_date_sk#1, cs_ship_date_sk#2, cs_bill_cdemo_sk#3, cs_item_sk#5, cs_promo_sk#6, cs_order_number#7, cs_quantity#8] +Input [9]: [cs_sold_date_sk#1, cs_ship_date_sk#2, cs_bill_cdemo_sk#3, cs_bill_hdemo_sk#4, cs_item_sk#5, cs_promo_sk#6, cs_order_number#7, cs_quantity#8, hd_demo_sk#9] (11) Scan parquet default.customer_demographics Output [2]: [cd_demo_sk#12, cd_marital_status#13] @@ -148,13 +148,13 @@ Input [1]: [cd_demo_sk#12] Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#14] (16) BroadcastHashJoin [codegen id : 4] -Left keys [1]: [cs_bill_cdemo_sk#6] +Left keys [1]: [cs_bill_cdemo_sk#3] Right keys [1]: [cd_demo_sk#12] Join condition: None (17) Project [codegen id : 4] -Output [6]: [cs_sold_date_sk#4, cs_ship_date_sk#5, cs_item_sk#8, cs_promo_sk#9, cs_order_number#10, cs_quantity#11] -Input [8]: [cs_sold_date_sk#4, cs_ship_date_sk#5, cs_bill_cdemo_sk#6, cs_item_sk#8, cs_promo_sk#9, cs_order_number#10, cs_quantity#11, cd_demo_sk#12] +Output [6]: [cs_sold_date_sk#1, cs_ship_date_sk#2, cs_item_sk#5, cs_promo_sk#6, cs_order_number#7, cs_quantity#8] +Input [8]: [cs_sold_date_sk#1, cs_ship_date_sk#2, cs_bill_cdemo_sk#3, cs_item_sk#5, cs_promo_sk#6, cs_order_number#7, cs_quantity#8, cd_demo_sk#12] (18) Scan parquet default.date_dim Output [2]: [d_date_sk#15, d_date#16] @@ -175,21 +175,21 @@ Input [2]: [d_date_sk#15, d_date#16] Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, false] as bigint)),false), [id=#17] (22) BroadcastHashJoin [codegen id : 4] -Left keys [1]: [cs_ship_date_sk#5] +Left keys [1]: [cs_ship_date_sk#2] Right keys [1]: [d_date_sk#15] Join condition: None (23) Project [codegen id : 4] -Output [6]: [cs_sold_date_sk#4, cs_item_sk#8, cs_promo_sk#9, cs_order_number#10, cs_quantity#11, d_date#16] -Input [8]: [cs_sold_date_sk#4, cs_ship_date_sk#5, cs_item_sk#8, cs_promo_sk#9, cs_order_number#10, cs_quantity#11, d_date_sk#15, d_date#16] +Output [6]: [cs_sold_date_sk#1, cs_item_sk#5, cs_promo_sk#6, cs_order_number#7, cs_quantity#8, d_date#16] +Input [8]: [cs_sold_date_sk#1, cs_ship_date_sk#2, cs_item_sk#5, cs_promo_sk#6, cs_order_number#7, cs_quantity#8, d_date_sk#15, d_date#16] (24) Exchange -Input [6]: [cs_sold_date_sk#4, cs_item_sk#8, cs_promo_sk#9, cs_order_number#10, cs_quantity#11, d_date#16] -Arguments: hashpartitioning(cs_item_sk#8, 5), true, [id=#18] +Input [6]: [cs_sold_date_sk#1, cs_item_sk#5, cs_promo_sk#6, cs_order_number#7, cs_quantity#8, d_date#16] +Arguments: hashpartitioning(cs_item_sk#5, 5), ENSURE_REQUIREMENTS, [id=#18] (25) Sort [codegen id : 5] -Input [6]: [cs_sold_date_sk#4, cs_item_sk#8, cs_promo_sk#9, cs_order_number#10, cs_quantity#11, d_date#16] -Arguments: [cs_item_sk#8 ASC NULLS FIRST], false, 0 +Input [6]: [cs_sold_date_sk#1, cs_item_sk#5, cs_promo_sk#6, cs_order_number#7, cs_quantity#8, d_date#16] +Arguments: [cs_item_sk#5 ASC NULLS FIRST], false, 0 (26) Scan parquet default.item Output [2]: [i_item_sk#19, i_item_desc#20] @@ -207,144 +207,144 @@ Condition : isnotnull(i_item_sk#19) (29) Exchange Input [2]: [i_item_sk#19, i_item_desc#20] -Arguments: hashpartitioning(i_item_sk#19, 5), true, [id=#21] +Arguments: hashpartitioning(i_item_sk#19, 5), ENSURE_REQUIREMENTS, [id=#21] (30) Sort [codegen id : 7] Input [2]: [i_item_sk#19, i_item_desc#20] Arguments: [i_item_sk#19 ASC NULLS FIRST], false, 0 -(31) SortMergeJoin [codegen id : 10] -Left keys [1]: [cs_item_sk#8] +(31) SortMergeJoin [codegen id : 8] +Left keys [1]: [cs_item_sk#5] Right keys [1]: [i_item_sk#19] Join condition: None -(32) Project [codegen id : 10] -Output [7]: [cs_sold_date_sk#4, cs_item_sk#8, cs_promo_sk#9, cs_order_number#10, cs_quantity#11, d_date#16, i_item_desc#20] -Input [8]: [cs_sold_date_sk#4, cs_item_sk#8, cs_promo_sk#9, cs_order_number#10, cs_quantity#11, d_date#16, i_item_sk#19, i_item_desc#20] - -(33) Scan parquet default.date_dim -Output [2]: [d_date_sk#22, d_week_seq#23] -Batched: true -Location [not included in comparison]/{warehouse_dir}/date_dim] -PushedFilters: [IsNotNull(d_week_seq), IsNotNull(d_date_sk)] -ReadSchema: struct +(32) Project [codegen id : 8] +Output [7]: [cs_sold_date_sk#1, cs_item_sk#5, cs_promo_sk#6, cs_order_number#7, cs_quantity#8, d_date#16, i_item_desc#20] +Input [8]: [cs_sold_date_sk#1, cs_item_sk#5, cs_promo_sk#6, cs_order_number#7, cs_quantity#8, d_date#16, i_item_sk#19, i_item_desc#20] -(34) ColumnarToRow [codegen id : 9] -Input [2]: [d_date_sk#22, d_week_seq#23] +(33) Exchange +Input [7]: [cs_sold_date_sk#1, cs_item_sk#5, cs_promo_sk#6, cs_order_number#7, cs_quantity#8, d_date#16, i_item_desc#20] +Arguments: hashpartitioning(cs_item_sk#5, cs_sold_date_sk#1, 5), ENSURE_REQUIREMENTS, [id=#22] -(35) Filter [codegen id : 9] -Input [2]: [d_date_sk#22, d_week_seq#23] -Condition : (isnotnull(d_week_seq#23) AND isnotnull(d_date_sk#22)) +(34) Sort [codegen id : 9] +Input [7]: [cs_sold_date_sk#1, cs_item_sk#5, cs_promo_sk#6, cs_order_number#7, cs_quantity#8, d_date#16, i_item_desc#20] +Arguments: [cs_item_sk#5 ASC NULLS FIRST, cs_sold_date_sk#1 ASC NULLS FIRST], false, 0 -(36) Scan parquet default.date_dim -Output [4]: [d_date_sk#24, d_date#25, d_week_seq#26, d_year#27] +(35) Scan parquet default.date_dim +Output [4]: [d_date_sk#23, d_date#24, d_week_seq#25, d_year#26] Batched: true Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2001), IsNotNull(d_date_sk), IsNotNull(d_week_seq), IsNotNull(d_date)] ReadSchema: struct -(37) ColumnarToRow [codegen id : 8] -Input [4]: [d_date_sk#24, d_date#25, d_week_seq#26, d_year#27] +(36) ColumnarToRow [codegen id : 10] +Input [4]: [d_date_sk#23, d_date#24, d_week_seq#25, d_year#26] -(38) Filter [codegen id : 8] -Input [4]: [d_date_sk#24, d_date#25, d_week_seq#26, d_year#27] -Condition : ((((isnotnull(d_year#27) AND (d_year#27 = 2001)) AND isnotnull(d_date_sk#24)) AND isnotnull(d_week_seq#26)) AND isnotnull(d_date#25)) +(37) Filter [codegen id : 10] +Input [4]: [d_date_sk#23, d_date#24, d_week_seq#25, d_year#26] +Condition : ((((isnotnull(d_year#26) AND (d_year#26 = 2001)) AND isnotnull(d_date_sk#23)) AND isnotnull(d_week_seq#25)) AND isnotnull(d_date#24)) -(39) Project [codegen id : 8] -Output [3]: [d_date_sk#24, d_date#25, d_week_seq#26] -Input [4]: [d_date_sk#24, d_date#25, d_week_seq#26, d_year#27] +(38) Project [codegen id : 10] +Output [3]: [d_date_sk#23, d_date#24, d_week_seq#25] +Input [4]: [d_date_sk#23, d_date#24, d_week_seq#25, d_year#26] -(40) BroadcastExchange -Input [3]: [d_date_sk#24, d_date#25, d_week_seq#26] -Arguments: HashedRelationBroadcastMode(List(cast(input[2, int, true] as bigint)),false), [id=#28] +(39) BroadcastExchange +Input [3]: [d_date_sk#23, d_date#24, d_week_seq#25] +Arguments: HashedRelationBroadcastMode(List(cast(input[2, int, true] as bigint)),false), [id=#27] -(41) BroadcastHashJoin [codegen id : 9] -Left keys [1]: [d_week_seq#23] -Right keys [1]: [d_week_seq#26] +(40) Scan parquet default.date_dim +Output [2]: [d_date_sk#28, d_week_seq#29] +Batched: true +Location [not included in comparison]/{warehouse_dir}/date_dim] +PushedFilters: [IsNotNull(d_week_seq), IsNotNull(d_date_sk)] +ReadSchema: struct + +(41) ColumnarToRow +Input [2]: [d_date_sk#28, d_week_seq#29] + +(42) Filter +Input [2]: [d_date_sk#28, d_week_seq#29] +Condition : (isnotnull(d_week_seq#29) AND isnotnull(d_date_sk#28)) + +(43) BroadcastHashJoin [codegen id : 11] +Left keys [1]: [d_week_seq#25] +Right keys [1]: [d_week_seq#29] Join condition: None -(42) Project [codegen id : 9] -Output [4]: [d_date_sk#22, d_date_sk#24, d_date#25, d_week_seq#26] -Input [5]: [d_date_sk#22, d_week_seq#23, d_date_sk#24, d_date#25, d_week_seq#26] +(44) Project [codegen id : 11] +Output [4]: [d_date_sk#23, d_date#24, d_week_seq#25, d_date_sk#28] +Input [5]: [d_date_sk#23, d_date#24, d_week_seq#25, d_date_sk#28, d_week_seq#29] -(43) BroadcastExchange -Input [4]: [d_date_sk#22, d_date_sk#24, d_date#25, d_week_seq#26] -Arguments: HashedRelationBroadcastMode(List(cast(input[1, int, true] as bigint)),false), [id=#29] +(45) BroadcastExchange +Input [4]: [d_date_sk#23, d_date#24, d_week_seq#25, d_date_sk#28] +Arguments: HashedRelationBroadcastMode(List(cast(input[3, int, true] as bigint)),false), [id=#30] -(44) BroadcastHashJoin [codegen id : 10] -Left keys [1]: [cs_sold_date_sk#4] -Right keys [1]: [d_date_sk#24] -Join condition: (d_date#16 > d_date#25 + 5 days) +(46) Scan parquet default.inventory +Output [4]: [inv_date_sk#31, inv_item_sk#32, inv_warehouse_sk#33, inv_quantity_on_hand#34] +Batched: true +Location [not included in comparison]/{warehouse_dir}/inventory] +PushedFilters: [IsNotNull(inv_quantity_on_hand), IsNotNull(inv_item_sk), IsNotNull(inv_warehouse_sk), IsNotNull(inv_date_sk)] +ReadSchema: struct -(45) Project [codegen id : 10] -Output [7]: [cs_item_sk#8, cs_promo_sk#9, cs_order_number#10, cs_quantity#11, i_item_desc#20, d_date_sk#22, d_week_seq#26] -Input [11]: [cs_sold_date_sk#4, cs_item_sk#8, cs_promo_sk#9, cs_order_number#10, cs_quantity#11, d_date#16, i_item_desc#20, d_date_sk#22, d_date_sk#24, d_date#25, d_week_seq#26] +(47) ColumnarToRow +Input [4]: [inv_date_sk#31, inv_item_sk#32, inv_warehouse_sk#33, inv_quantity_on_hand#34] -(46) Exchange -Input [7]: [cs_item_sk#8, cs_promo_sk#9, cs_order_number#10, cs_quantity#11, i_item_desc#20, d_date_sk#22, d_week_seq#26] -Arguments: hashpartitioning(cs_item_sk#8, d_date_sk#22, 5), true, [id=#30] +(48) Filter +Input [4]: [inv_date_sk#31, inv_item_sk#32, inv_warehouse_sk#33, inv_quantity_on_hand#34] +Condition : (((isnotnull(inv_quantity_on_hand#34) AND isnotnull(inv_item_sk#32)) AND isnotnull(inv_warehouse_sk#33)) AND isnotnull(inv_date_sk#31)) -(47) Sort [codegen id : 11] -Input [7]: [cs_item_sk#8, cs_promo_sk#9, cs_order_number#10, cs_quantity#11, i_item_desc#20, d_date_sk#22, d_week_seq#26] -Arguments: [cs_item_sk#8 ASC NULLS FIRST, d_date_sk#22 ASC NULLS FIRST], false, 0 +(49) BroadcastHashJoin [codegen id : 13] +Left keys [1]: [d_date_sk#28] +Right keys [1]: [inv_date_sk#31] +Join condition: None + +(50) Project [codegen id : 13] +Output [6]: [d_date_sk#23, d_date#24, d_week_seq#25, inv_item_sk#32, inv_warehouse_sk#33, inv_quantity_on_hand#34] +Input [8]: [d_date_sk#23, d_date#24, d_week_seq#25, d_date_sk#28, inv_date_sk#31, inv_item_sk#32, inv_warehouse_sk#33, inv_quantity_on_hand#34] -(48) Scan parquet default.warehouse -Output [2]: [w_warehouse_sk#31, w_warehouse_name#32] +(51) Scan parquet default.warehouse +Output [2]: [w_warehouse_sk#35, w_warehouse_name#36] Batched: true Location [not included in comparison]/{warehouse_dir}/warehouse] PushedFilters: [IsNotNull(w_warehouse_sk)] ReadSchema: struct -(49) ColumnarToRow [codegen id : 12] -Input [2]: [w_warehouse_sk#31, w_warehouse_name#32] - -(50) Filter [codegen id : 12] -Input [2]: [w_warehouse_sk#31, w_warehouse_name#32] -Condition : isnotnull(w_warehouse_sk#31) - -(51) BroadcastExchange -Input [2]: [w_warehouse_sk#31, w_warehouse_name#32] -Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, false] as bigint)),false), [id=#33] - -(52) Scan parquet default.inventory -Output [4]: [inv_date_sk#34, inv_item_sk#35, inv_warehouse_sk#36, inv_quantity_on_hand#37] -Batched: true -Location [not included in comparison]/{warehouse_dir}/inventory] -PushedFilters: [IsNotNull(inv_quantity_on_hand), IsNotNull(inv_item_sk), IsNotNull(inv_warehouse_sk), IsNotNull(inv_date_sk)] -ReadSchema: struct +(52) ColumnarToRow [codegen id : 12] +Input [2]: [w_warehouse_sk#35, w_warehouse_name#36] -(53) ColumnarToRow -Input [4]: [inv_date_sk#34, inv_item_sk#35, inv_warehouse_sk#36, inv_quantity_on_hand#37] +(53) Filter [codegen id : 12] +Input [2]: [w_warehouse_sk#35, w_warehouse_name#36] +Condition : isnotnull(w_warehouse_sk#35) -(54) Filter -Input [4]: [inv_date_sk#34, inv_item_sk#35, inv_warehouse_sk#36, inv_quantity_on_hand#37] -Condition : (((isnotnull(inv_quantity_on_hand#37) AND isnotnull(inv_item_sk#35)) AND isnotnull(inv_warehouse_sk#36)) AND isnotnull(inv_date_sk#34)) +(54) BroadcastExchange +Input [2]: [w_warehouse_sk#35, w_warehouse_name#36] +Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, false] as bigint)),false), [id=#37] (55) BroadcastHashJoin [codegen id : 13] -Left keys [1]: [w_warehouse_sk#31] -Right keys [1]: [inv_warehouse_sk#36] +Left keys [1]: [inv_warehouse_sk#33] +Right keys [1]: [w_warehouse_sk#35] Join condition: None (56) Project [codegen id : 13] -Output [4]: [w_warehouse_name#32, inv_date_sk#34, inv_item_sk#35, inv_quantity_on_hand#37] -Input [6]: [w_warehouse_sk#31, w_warehouse_name#32, inv_date_sk#34, inv_item_sk#35, inv_warehouse_sk#36, inv_quantity_on_hand#37] +Output [6]: [d_date_sk#23, d_date#24, d_week_seq#25, inv_item_sk#32, inv_quantity_on_hand#34, w_warehouse_name#36] +Input [8]: [d_date_sk#23, d_date#24, d_week_seq#25, inv_item_sk#32, inv_warehouse_sk#33, inv_quantity_on_hand#34, w_warehouse_sk#35, w_warehouse_name#36] (57) Exchange -Input [4]: [w_warehouse_name#32, inv_date_sk#34, inv_item_sk#35, inv_quantity_on_hand#37] -Arguments: hashpartitioning(inv_item_sk#35, inv_date_sk#34, 5), true, [id=#38] +Input [6]: [d_date_sk#23, d_date#24, d_week_seq#25, inv_item_sk#32, inv_quantity_on_hand#34, w_warehouse_name#36] +Arguments: hashpartitioning(inv_item_sk#32, d_date_sk#23, 5), ENSURE_REQUIREMENTS, [id=#38] (58) Sort [codegen id : 14] -Input [4]: [w_warehouse_name#32, inv_date_sk#34, inv_item_sk#35, inv_quantity_on_hand#37] -Arguments: [inv_item_sk#35 ASC NULLS FIRST, inv_date_sk#34 ASC NULLS FIRST], false, 0 +Input [6]: [d_date_sk#23, d_date#24, d_week_seq#25, inv_item_sk#32, inv_quantity_on_hand#34, w_warehouse_name#36] +Arguments: [inv_item_sk#32 ASC NULLS FIRST, d_date_sk#23 ASC NULLS FIRST], false, 0 (59) SortMergeJoin [codegen id : 16] -Left keys [2]: [cs_item_sk#8, d_date_sk#22] -Right keys [2]: [inv_item_sk#35, inv_date_sk#34] -Join condition: (inv_quantity_on_hand#37 < cs_quantity#11) +Left keys [2]: [cs_item_sk#5, cs_sold_date_sk#1] +Right keys [2]: [inv_item_sk#32, d_date_sk#23] +Join condition: ((inv_quantity_on_hand#34 < cs_quantity#8) AND (d_date#16 > d_date#24 + 5 days)) (60) Project [codegen id : 16] -Output [6]: [cs_item_sk#8, cs_promo_sk#9, cs_order_number#10, w_warehouse_name#32, i_item_desc#20, d_week_seq#26] -Input [11]: [cs_item_sk#8, cs_promo_sk#9, cs_order_number#10, cs_quantity#11, i_item_desc#20, d_date_sk#22, d_week_seq#26, w_warehouse_name#32, inv_date_sk#34, inv_item_sk#35, inv_quantity_on_hand#37] +Output [6]: [cs_item_sk#5, cs_promo_sk#6, cs_order_number#7, w_warehouse_name#36, i_item_desc#20, d_week_seq#25] +Input [13]: [cs_sold_date_sk#1, cs_item_sk#5, cs_promo_sk#6, cs_order_number#7, cs_quantity#8, d_date#16, i_item_desc#20, d_date_sk#23, d_date#24, d_week_seq#25, inv_item_sk#32, inv_quantity_on_hand#34, w_warehouse_name#36] (61) Scan parquet default.promotion Output [1]: [p_promo_sk#39] @@ -365,21 +365,21 @@ Input [1]: [p_promo_sk#39] Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, false] as bigint)),false), [id=#40] (65) BroadcastHashJoin [codegen id : 16] -Left keys [1]: [cs_promo_sk#9] +Left keys [1]: [cs_promo_sk#6] Right keys [1]: [p_promo_sk#39] Join condition: None (66) Project [codegen id : 16] -Output [5]: [cs_item_sk#8, cs_order_number#10, w_warehouse_name#32, i_item_desc#20, d_week_seq#26] -Input [7]: [cs_item_sk#8, cs_promo_sk#9, cs_order_number#10, w_warehouse_name#32, i_item_desc#20, d_week_seq#26, p_promo_sk#39] +Output [5]: [cs_item_sk#5, cs_order_number#7, w_warehouse_name#36, i_item_desc#20, d_week_seq#25] +Input [7]: [cs_item_sk#5, cs_promo_sk#6, cs_order_number#7, w_warehouse_name#36, i_item_desc#20, d_week_seq#25, p_promo_sk#39] (67) Exchange -Input [5]: [cs_item_sk#8, cs_order_number#10, w_warehouse_name#32, i_item_desc#20, d_week_seq#26] -Arguments: hashpartitioning(cs_item_sk#8, cs_order_number#10, 5), true, [id=#41] +Input [5]: [cs_item_sk#5, cs_order_number#7, w_warehouse_name#36, i_item_desc#20, d_week_seq#25] +Arguments: hashpartitioning(cs_item_sk#5, cs_order_number#7, 5), ENSURE_REQUIREMENTS, [id=#41] (68) Sort [codegen id : 17] -Input [5]: [cs_item_sk#8, cs_order_number#10, w_warehouse_name#32, i_item_desc#20, d_week_seq#26] -Arguments: [cs_item_sk#8 ASC NULLS FIRST, cs_order_number#10 ASC NULLS FIRST], false, 0 +Input [5]: [cs_item_sk#5, cs_order_number#7, w_warehouse_name#36, i_item_desc#20, d_week_seq#25] +Arguments: [cs_item_sk#5 ASC NULLS FIRST, cs_order_number#7 ASC NULLS FIRST], false, 0 (69) Scan parquet default.catalog_returns Output [2]: [cr_item_sk#42, cr_order_number#43] @@ -397,40 +397,40 @@ Condition : (isnotnull(cr_item_sk#42) AND isnotnull(cr_order_number#43)) (72) Exchange Input [2]: [cr_item_sk#42, cr_order_number#43] -Arguments: hashpartitioning(cr_item_sk#42, cr_order_number#43, 5), true, [id=#44] +Arguments: hashpartitioning(cr_item_sk#42, cr_order_number#43, 5), ENSURE_REQUIREMENTS, [id=#44] (73) Sort [codegen id : 19] Input [2]: [cr_item_sk#42, cr_order_number#43] Arguments: [cr_item_sk#42 ASC NULLS FIRST, cr_order_number#43 ASC NULLS FIRST], false, 0 (74) SortMergeJoin -Left keys [2]: [cs_item_sk#8, cs_order_number#10] +Left keys [2]: [cs_item_sk#5, cs_order_number#7] Right keys [2]: [cr_item_sk#42, cr_order_number#43] Join condition: None (75) Project [codegen id : 20] -Output [3]: [w_warehouse_name#32, i_item_desc#20, d_week_seq#26] -Input [7]: [cs_item_sk#8, cs_order_number#10, w_warehouse_name#32, i_item_desc#20, d_week_seq#26, cr_item_sk#42, cr_order_number#43] +Output [3]: [w_warehouse_name#36, i_item_desc#20, d_week_seq#25] +Input [7]: [cs_item_sk#5, cs_order_number#7, w_warehouse_name#36, i_item_desc#20, d_week_seq#25, cr_item_sk#42, cr_order_number#43] (76) HashAggregate [codegen id : 20] -Input [3]: [w_warehouse_name#32, i_item_desc#20, d_week_seq#26] -Keys [3]: [i_item_desc#20, w_warehouse_name#32, d_week_seq#26] +Input [3]: [w_warehouse_name#36, i_item_desc#20, d_week_seq#25] +Keys [3]: [i_item_desc#20, w_warehouse_name#36, d_week_seq#25] Functions [1]: [partial_count(1)] Aggregate Attributes [1]: [count#45] -Results [4]: [i_item_desc#20, w_warehouse_name#32, d_week_seq#26, count#46] +Results [4]: [i_item_desc#20, w_warehouse_name#36, d_week_seq#25, count#46] (77) Exchange -Input [4]: [i_item_desc#20, w_warehouse_name#32, d_week_seq#26, count#46] -Arguments: hashpartitioning(i_item_desc#20, w_warehouse_name#32, d_week_seq#26, 5), true, [id=#47] +Input [4]: [i_item_desc#20, w_warehouse_name#36, d_week_seq#25, count#46] +Arguments: hashpartitioning(i_item_desc#20, w_warehouse_name#36, d_week_seq#25, 5), ENSURE_REQUIREMENTS, [id=#47] (78) HashAggregate [codegen id : 21] -Input [4]: [i_item_desc#20, w_warehouse_name#32, d_week_seq#26, count#46] -Keys [3]: [i_item_desc#20, w_warehouse_name#32, d_week_seq#26] +Input [4]: [i_item_desc#20, w_warehouse_name#36, d_week_seq#25, count#46] +Keys [3]: [i_item_desc#20, w_warehouse_name#36, d_week_seq#25] Functions [1]: [count(1)] Aggregate Attributes [1]: [count(1)#48] -Results [6]: [i_item_desc#20, w_warehouse_name#32, d_week_seq#26, count(1)#48 AS no_promo#49, count(1)#48 AS promo#50, count(1)#48 AS total_cnt#51] +Results [6]: [i_item_desc#20, w_warehouse_name#36, d_week_seq#25, count(1)#48 AS no_promo#49, count(1)#48 AS promo#50, count(1)#48 AS total_cnt#51] (79) TakeOrderedAndProject -Input [6]: [i_item_desc#20, w_warehouse_name#32, d_week_seq#26, no_promo#49, promo#50, total_cnt#51] -Arguments: 100, [total_cnt#51 DESC NULLS LAST, i_item_desc#20 ASC NULLS FIRST, w_warehouse_name#32 ASC NULLS FIRST, d_week_seq#26 ASC NULLS FIRST], [i_item_desc#20, w_warehouse_name#32, d_week_seq#26, no_promo#49, promo#50, total_cnt#51] +Input [6]: [i_item_desc#20, w_warehouse_name#36, d_week_seq#25, no_promo#49, promo#50, total_cnt#51] +Arguments: 100, [total_cnt#51 DESC NULLS LAST, i_item_desc#20 ASC NULLS FIRST, w_warehouse_name#36 ASC NULLS FIRST, d_week_seq#25 ASC NULLS FIRST], [i_item_desc#20, w_warehouse_name#36, d_week_seq#25, no_promo#49, promo#50, total_cnt#51] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q72.sf100/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q72.sf100/simplified.txt index 39dba3af02359..b88505ad7b9bc 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q72.sf100/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q72.sf100/simplified.txt @@ -16,91 +16,95 @@ TakeOrderedAndProject [total_cnt,i_item_desc,w_warehouse_name,d_week_seq,no_prom Project [cs_item_sk,cs_order_number,w_warehouse_name,i_item_desc,d_week_seq] BroadcastHashJoin [cs_promo_sk,p_promo_sk] Project [cs_item_sk,cs_promo_sk,cs_order_number,w_warehouse_name,i_item_desc,d_week_seq] - SortMergeJoin [cs_item_sk,d_date_sk,inv_item_sk,inv_date_sk,inv_quantity_on_hand,cs_quantity] + SortMergeJoin [cs_item_sk,cs_sold_date_sk,inv_item_sk,d_date_sk,inv_quantity_on_hand,cs_quantity,d_date,d_date] InputAdapter - WholeStageCodegen (11) - Sort [cs_item_sk,d_date_sk] + WholeStageCodegen (9) + Sort [cs_item_sk,cs_sold_date_sk] InputAdapter - Exchange [cs_item_sk,d_date_sk] #3 - WholeStageCodegen (10) - Project [cs_item_sk,cs_promo_sk,cs_order_number,cs_quantity,i_item_desc,d_date_sk,d_week_seq] - BroadcastHashJoin [cs_sold_date_sk,d_date_sk,d_date,d_date] - Project [cs_sold_date_sk,cs_item_sk,cs_promo_sk,cs_order_number,cs_quantity,d_date,i_item_desc] - SortMergeJoin [cs_item_sk,i_item_sk] - InputAdapter - WholeStageCodegen (5) - Sort [cs_item_sk] - InputAdapter - Exchange [cs_item_sk] #4 - WholeStageCodegen (4) - Project [cs_sold_date_sk,cs_item_sk,cs_promo_sk,cs_order_number,cs_quantity,d_date] - BroadcastHashJoin [cs_ship_date_sk,d_date_sk] - Project [cs_sold_date_sk,cs_ship_date_sk,cs_item_sk,cs_promo_sk,cs_order_number,cs_quantity] - BroadcastHashJoin [cs_bill_cdemo_sk,cd_demo_sk] - Project [cs_sold_date_sk,cs_ship_date_sk,cs_bill_cdemo_sk,cs_item_sk,cs_promo_sk,cs_order_number,cs_quantity] - BroadcastHashJoin [hd_demo_sk,cs_bill_hdemo_sk] + Exchange [cs_item_sk,cs_sold_date_sk] #3 + WholeStageCodegen (8) + Project [cs_sold_date_sk,cs_item_sk,cs_promo_sk,cs_order_number,cs_quantity,d_date,i_item_desc] + SortMergeJoin [cs_item_sk,i_item_sk] + InputAdapter + WholeStageCodegen (5) + Sort [cs_item_sk] + InputAdapter + Exchange [cs_item_sk] #4 + WholeStageCodegen (4) + Project [cs_sold_date_sk,cs_item_sk,cs_promo_sk,cs_order_number,cs_quantity,d_date] + BroadcastHashJoin [cs_ship_date_sk,d_date_sk] + Project [cs_sold_date_sk,cs_ship_date_sk,cs_item_sk,cs_promo_sk,cs_order_number,cs_quantity] + BroadcastHashJoin [cs_bill_cdemo_sk,cd_demo_sk] + Project [cs_sold_date_sk,cs_ship_date_sk,cs_bill_cdemo_sk,cs_item_sk,cs_promo_sk,cs_order_number,cs_quantity] + BroadcastHashJoin [cs_bill_hdemo_sk,hd_demo_sk] + Filter [cs_quantity,cs_item_sk,cs_bill_cdemo_sk,cs_bill_hdemo_sk,cs_sold_date_sk,cs_ship_date_sk] + ColumnarToRow InputAdapter - BroadcastExchange #5 - WholeStageCodegen (1) - Project [hd_demo_sk] - Filter [hd_buy_potential,hd_demo_sk] - ColumnarToRow - InputAdapter - Scan parquet default.household_demographics [hd_demo_sk,hd_buy_potential] - Filter [cs_quantity,cs_item_sk,cs_bill_cdemo_sk,cs_bill_hdemo_sk,cs_sold_date_sk,cs_ship_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.catalog_sales [cs_sold_date_sk,cs_ship_date_sk,cs_bill_cdemo_sk,cs_bill_hdemo_sk,cs_item_sk,cs_promo_sk,cs_order_number,cs_quantity] + Scan parquet default.catalog_sales [cs_sold_date_sk,cs_ship_date_sk,cs_bill_cdemo_sk,cs_bill_hdemo_sk,cs_item_sk,cs_promo_sk,cs_order_number,cs_quantity] InputAdapter - BroadcastExchange #6 - WholeStageCodegen (2) - Project [cd_demo_sk] - Filter [cd_marital_status,cd_demo_sk] + BroadcastExchange #5 + WholeStageCodegen (1) + Project [hd_demo_sk] + Filter [hd_buy_potential,hd_demo_sk] ColumnarToRow InputAdapter - Scan parquet default.customer_demographics [cd_demo_sk,cd_marital_status] - InputAdapter - BroadcastExchange #7 - WholeStageCodegen (3) - Filter [d_date,d_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.date_dim [d_date_sk,d_date] - InputAdapter - WholeStageCodegen (7) - Sort [i_item_sk] - InputAdapter - Exchange [i_item_sk] #8 - WholeStageCodegen (6) - Filter [i_item_sk] - ColumnarToRow + Scan parquet default.household_demographics [hd_demo_sk,hd_buy_potential] InputAdapter - Scan parquet default.item [i_item_sk,i_item_desc] + BroadcastExchange #6 + WholeStageCodegen (2) + Project [cd_demo_sk] + Filter [cd_marital_status,cd_demo_sk] + ColumnarToRow + InputAdapter + Scan parquet default.customer_demographics [cd_demo_sk,cd_marital_status] + InputAdapter + BroadcastExchange #7 + WholeStageCodegen (3) + Filter [d_date,d_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.date_dim [d_date_sk,d_date] InputAdapter - BroadcastExchange #9 - WholeStageCodegen (9) - Project [d_date_sk,d_date_sk,d_date,d_week_seq] - BroadcastHashJoin [d_week_seq,d_week_seq] - Filter [d_week_seq,d_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.date_dim [d_date_sk,d_week_seq] - InputAdapter - BroadcastExchange #10 - WholeStageCodegen (8) - Project [d_date_sk,d_date,d_week_seq] - Filter [d_year,d_date_sk,d_week_seq,d_date] - ColumnarToRow - InputAdapter - Scan parquet default.date_dim [d_date_sk,d_date,d_week_seq,d_year] + WholeStageCodegen (7) + Sort [i_item_sk] + InputAdapter + Exchange [i_item_sk] #8 + WholeStageCodegen (6) + Filter [i_item_sk] + ColumnarToRow + InputAdapter + Scan parquet default.item [i_item_sk,i_item_desc] InputAdapter WholeStageCodegen (14) - Sort [inv_item_sk,inv_date_sk] + Sort [inv_item_sk,d_date_sk] InputAdapter - Exchange [inv_item_sk,inv_date_sk] #11 + Exchange [inv_item_sk,d_date_sk] #9 WholeStageCodegen (13) - Project [w_warehouse_name,inv_date_sk,inv_item_sk,inv_quantity_on_hand] - BroadcastHashJoin [w_warehouse_sk,inv_warehouse_sk] + Project [d_date_sk,d_date,d_week_seq,inv_item_sk,inv_quantity_on_hand,w_warehouse_name] + BroadcastHashJoin [inv_warehouse_sk,w_warehouse_sk] + Project [d_date_sk,d_date,d_week_seq,inv_item_sk,inv_warehouse_sk,inv_quantity_on_hand] + BroadcastHashJoin [d_date_sk,inv_date_sk] + InputAdapter + BroadcastExchange #10 + WholeStageCodegen (11) + Project [d_date_sk,d_date,d_week_seq,d_date_sk] + BroadcastHashJoin [d_week_seq,d_week_seq] + InputAdapter + BroadcastExchange #11 + WholeStageCodegen (10) + Project [d_date_sk,d_date,d_week_seq] + Filter [d_year,d_date_sk,d_week_seq,d_date] + ColumnarToRow + InputAdapter + Scan parquet default.date_dim [d_date_sk,d_date,d_week_seq,d_year] + Filter [d_week_seq,d_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.date_dim [d_date_sk,d_week_seq] + Filter [inv_quantity_on_hand,inv_item_sk,inv_warehouse_sk,inv_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.inventory [inv_date_sk,inv_item_sk,inv_warehouse_sk,inv_quantity_on_hand] InputAdapter BroadcastExchange #12 WholeStageCodegen (12) @@ -108,10 +112,6 @@ TakeOrderedAndProject [total_cnt,i_item_desc,w_warehouse_name,d_week_seq,no_prom ColumnarToRow InputAdapter Scan parquet default.warehouse [w_warehouse_sk,w_warehouse_name] - Filter [inv_quantity_on_hand,inv_item_sk,inv_warehouse_sk,inv_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.inventory [inv_date_sk,inv_item_sk,inv_warehouse_sk,inv_quantity_on_hand] InputAdapter BroadcastExchange #13 WholeStageCodegen (15) diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q75.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q75.sf100/explain.txt index 3f452dc9272dc..ce1206c0ba906 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q75.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q75.sf100/explain.txt @@ -1,142 +1,134 @@ == Physical Plan == -TakeOrderedAndProject (138) -+- * Project (137) - +- * SortMergeJoin Inner (136) - :- * Sort (74) - : +- Exchange (73) - : +- * HashAggregate (72) - : +- Exchange (71) - : +- * HashAggregate (70) - : +- * HashAggregate (69) - : +- Exchange (68) - : +- * HashAggregate (67) - : +- Union (66) - : :- * HashAggregate (47) - : : +- Exchange (46) - : : +- * HashAggregate (45) - : : +- Union (44) - : : :- * Project (25) - : : : +- SortMergeJoin LeftOuter (24) - : : : :- * Sort (18) - : : : : +- Exchange (17) - : : : : +- * Project (16) - : : : : +- * BroadcastHashJoin Inner BuildRight (15) - : : : : :- * Project (10) - : : : : : +- * BroadcastHashJoin Inner BuildRight (9) - : : : : : :- * Filter (3) - : : : : : : +- * ColumnarToRow (2) - : : : : : : +- Scan parquet default.catalog_sales (1) - : : : : : +- BroadcastExchange (8) - : : : : : +- * Project (7) - : : : : : +- * Filter (6) - : : : : : +- * ColumnarToRow (5) - : : : : : +- Scan parquet default.item (4) - : : : : +- BroadcastExchange (14) - : : : : +- * Filter (13) - : : : : +- * ColumnarToRow (12) - : : : : +- Scan parquet default.date_dim (11) - : : : +- * Sort (23) - : : : +- Exchange (22) - : : : +- * Filter (21) - : : : +- * ColumnarToRow (20) - : : : +- Scan parquet default.catalog_returns (19) - : : +- * Project (43) - : : +- SortMergeJoin LeftOuter (42) - : : :- * Sort (36) - : : : +- Exchange (35) - : : : +- * Project (34) - : : : +- * BroadcastHashJoin Inner BuildRight (33) - : : : :- * Project (31) - : : : : +- * BroadcastHashJoin Inner BuildRight (30) - : : : : :- * Filter (28) - : : : : : +- * ColumnarToRow (27) - : : : : : +- Scan parquet default.store_sales (26) - : : : : +- ReusedExchange (29) - : : : +- ReusedExchange (32) - : : +- * Sort (41) - : : +- Exchange (40) - : : +- * Filter (39) - : : +- * ColumnarToRow (38) - : : +- Scan parquet default.store_returns (37) - : +- * Project (65) - : +- SortMergeJoin LeftOuter (64) - : :- * Sort (58) - : : +- Exchange (57) - : : +- * Project (56) - : : +- * BroadcastHashJoin Inner BuildRight (55) - : : :- * Project (53) - : : : +- * BroadcastHashJoin Inner BuildRight (52) - : : : :- * Filter (50) - : : : : +- * ColumnarToRow (49) - : : : : +- Scan parquet default.web_sales (48) - : : : +- ReusedExchange (51) - : : +- ReusedExchange (54) - : +- * Sort (63) - : +- Exchange (62) - : +- * Filter (61) - : +- * ColumnarToRow (60) - : +- Scan parquet default.web_returns (59) - +- * Sort (135) - +- Exchange (134) - +- * HashAggregate (133) - +- Exchange (132) - +- * HashAggregate (131) - +- * HashAggregate (130) - +- Exchange (129) - +- * HashAggregate (128) - +- Union (127) - :- * HashAggregate (111) - : +- Exchange (110) - : +- * HashAggregate (109) - : +- Union (108) - : :- * Project (92) - : : +- SortMergeJoin LeftOuter (91) - : : :- * Sort (88) - : : : +- Exchange (87) - : : : +- * Project (86) - : : : +- * BroadcastHashJoin Inner BuildRight (85) - : : : :- * Project (80) - : : : : +- * BroadcastHashJoin Inner BuildRight (79) - : : : : :- * Filter (77) - : : : : : +- * ColumnarToRow (76) - : : : : : +- Scan parquet default.catalog_sales (75) - : : : : +- ReusedExchange (78) - : : : +- BroadcastExchange (84) - : : : +- * Filter (83) - : : : +- * ColumnarToRow (82) - : : : +- Scan parquet default.date_dim (81) - : : +- * Sort (90) - : : +- ReusedExchange (89) - : +- * Project (107) - : +- SortMergeJoin LeftOuter (106) - : :- * Sort (103) - : : +- Exchange (102) - : : +- * Project (101) - : : +- * BroadcastHashJoin Inner BuildRight (100) - : : :- * Project (98) - : : : +- * BroadcastHashJoin Inner BuildRight (97) - : : : :- * Filter (95) - : : : : +- * ColumnarToRow (94) - : : : : +- Scan parquet default.store_sales (93) - : : : +- ReusedExchange (96) - : : +- ReusedExchange (99) - : +- * Sort (105) - : +- ReusedExchange (104) - +- * Project (126) - +- SortMergeJoin LeftOuter (125) - :- * Sort (122) - : +- Exchange (121) - : +- * Project (120) - : +- * BroadcastHashJoin Inner BuildRight (119) - : :- * Project (117) - : : +- * BroadcastHashJoin Inner BuildRight (116) - : : :- * Filter (114) - : : : +- * ColumnarToRow (113) - : : : +- Scan parquet default.web_sales (112) - : : +- ReusedExchange (115) - : +- ReusedExchange (118) - +- * Sort (124) - +- ReusedExchange (123) +TakeOrderedAndProject (130) ++- * Project (129) + +- * SortMergeJoin Inner (128) + :- * Sort (70) + : +- Exchange (69) + : +- * HashAggregate (68) + : +- Exchange (67) + : +- * HashAggregate (66) + : +- * HashAggregate (65) + : +- Exchange (64) + : +- * HashAggregate (63) + : +- Union (62) + : :- * Project (25) + : : +- SortMergeJoin LeftOuter (24) + : : :- * Sort (18) + : : : +- Exchange (17) + : : : +- * Project (16) + : : : +- * BroadcastHashJoin Inner BuildRight (15) + : : : :- * Project (10) + : : : : +- * BroadcastHashJoin Inner BuildRight (9) + : : : : :- * Filter (3) + : : : : : +- * ColumnarToRow (2) + : : : : : +- Scan parquet default.catalog_sales (1) + : : : : +- BroadcastExchange (8) + : : : : +- * Project (7) + : : : : +- * Filter (6) + : : : : +- * ColumnarToRow (5) + : : : : +- Scan parquet default.item (4) + : : : +- BroadcastExchange (14) + : : : +- * Filter (13) + : : : +- * ColumnarToRow (12) + : : : +- Scan parquet default.date_dim (11) + : : +- * Sort (23) + : : +- Exchange (22) + : : +- * Filter (21) + : : +- * ColumnarToRow (20) + : : +- Scan parquet default.catalog_returns (19) + : :- * Project (43) + : : +- SortMergeJoin LeftOuter (42) + : : :- * Sort (36) + : : : +- Exchange (35) + : : : +- * Project (34) + : : : +- * BroadcastHashJoin Inner BuildRight (33) + : : : :- * Project (31) + : : : : +- * BroadcastHashJoin Inner BuildRight (30) + : : : : :- * Filter (28) + : : : : : +- * ColumnarToRow (27) + : : : : : +- Scan parquet default.store_sales (26) + : : : : +- ReusedExchange (29) + : : : +- ReusedExchange (32) + : : +- * Sort (41) + : : +- Exchange (40) + : : +- * Filter (39) + : : +- * ColumnarToRow (38) + : : +- Scan parquet default.store_returns (37) + : +- * Project (61) + : +- SortMergeJoin LeftOuter (60) + : :- * Sort (54) + : : +- Exchange (53) + : : +- * Project (52) + : : +- * BroadcastHashJoin Inner BuildRight (51) + : : :- * Project (49) + : : : +- * BroadcastHashJoin Inner BuildRight (48) + : : : :- * Filter (46) + : : : : +- * ColumnarToRow (45) + : : : : +- Scan parquet default.web_sales (44) + : : : +- ReusedExchange (47) + : : +- ReusedExchange (50) + : +- * Sort (59) + : +- Exchange (58) + : +- * Filter (57) + : +- * ColumnarToRow (56) + : +- Scan parquet default.web_returns (55) + +- * Sort (127) + +- Exchange (126) + +- * HashAggregate (125) + +- Exchange (124) + +- * HashAggregate (123) + +- * HashAggregate (122) + +- Exchange (121) + +- * HashAggregate (120) + +- Union (119) + :- * Project (88) + : +- SortMergeJoin LeftOuter (87) + : :- * Sort (84) + : : +- Exchange (83) + : : +- * Project (82) + : : +- * BroadcastHashJoin Inner BuildRight (81) + : : :- * Project (76) + : : : +- * BroadcastHashJoin Inner BuildRight (75) + : : : :- * Filter (73) + : : : : +- * ColumnarToRow (72) + : : : : +- Scan parquet default.catalog_sales (71) + : : : +- ReusedExchange (74) + : : +- BroadcastExchange (80) + : : +- * Filter (79) + : : +- * ColumnarToRow (78) + : : +- Scan parquet default.date_dim (77) + : +- * Sort (86) + : +- ReusedExchange (85) + :- * Project (103) + : +- SortMergeJoin LeftOuter (102) + : :- * Sort (99) + : : +- Exchange (98) + : : +- * Project (97) + : : +- * BroadcastHashJoin Inner BuildRight (96) + : : :- * Project (94) + : : : +- * BroadcastHashJoin Inner BuildRight (93) + : : : :- * Filter (91) + : : : : +- * ColumnarToRow (90) + : : : : +- Scan parquet default.store_sales (89) + : : : +- ReusedExchange (92) + : : +- ReusedExchange (95) + : +- * Sort (101) + : +- ReusedExchange (100) + +- * Project (118) + +- SortMergeJoin LeftOuter (117) + :- * Sort (114) + : +- Exchange (113) + : +- * Project (112) + : +- * BroadcastHashJoin Inner BuildRight (111) + : :- * Project (109) + : : +- * BroadcastHashJoin Inner BuildRight (108) + : : :- * Filter (106) + : : : +- * ColumnarToRow (105) + : : : +- Scan parquet default.web_sales (104) + : : +- ReusedExchange (107) + : +- ReusedExchange (110) + +- * Sort (116) + +- ReusedExchange (115) (1) Scan parquet default.catalog_sales @@ -213,7 +205,7 @@ Input [11]: [cs_sold_date_sk#1, cs_item_sk#2, cs_order_number#3, cs_quantity#4, (17) Exchange Input [9]: [cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price#5, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, d_year#14] -Arguments: hashpartitioning(cs_order_number#3, cs_item_sk#2, 5), true, [id=#16] +Arguments: hashpartitioning(cs_order_number#3, cs_item_sk#2, 5), ENSURE_REQUIREMENTS, [id=#16] (18) Sort [codegen id : 4] Input [9]: [cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price#5, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, d_year#14] @@ -235,7 +227,7 @@ Condition : (isnotnull(cr_order_number#18) AND isnotnull(cr_item_sk#17)) (22) Exchange Input [4]: [cr_item_sk#17, cr_order_number#18, cr_return_quantity#19, cr_return_amount#20] -Arguments: hashpartitioning(cr_order_number#18, cr_item_sk#17, 5), true, [id=#21] +Arguments: hashpartitioning(cr_order_number#18, cr_item_sk#17, 5), ENSURE_REQUIREMENTS, [id=#21] (23) Sort [codegen id : 6] Input [4]: [cr_item_sk#17, cr_order_number#18, cr_return_quantity#19, cr_return_amount#20] @@ -290,7 +282,7 @@ Input [11]: [ss_sold_date_sk#24, ss_item_sk#25, ss_ticket_number#26, ss_quantity (35) Exchange Input [9]: [ss_item_sk#25, ss_ticket_number#26, ss_quantity#27, ss_ext_sales_price#28, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, d_year#14] -Arguments: hashpartitioning(cast(ss_ticket_number#26 as bigint), cast(ss_item_sk#25 as bigint), 5), true, [id=#29] +Arguments: hashpartitioning(cast(ss_ticket_number#26 as bigint), cast(ss_item_sk#25 as bigint), 5), ENSURE_REQUIREMENTS, [id=#29] (36) Sort [codegen id : 11] Input [9]: [ss_item_sk#25, ss_ticket_number#26, ss_quantity#27, ss_ext_sales_price#28, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, d_year#14] @@ -312,7 +304,7 @@ Condition : (isnotnull(sr_ticket_number#31) AND isnotnull(sr_item_sk#30)) (40) Exchange Input [4]: [sr_item_sk#30, sr_ticket_number#31, sr_return_quantity#32, sr_return_amt#33] -Arguments: hashpartitioning(sr_ticket_number#31, sr_item_sk#30, 5), true, [id=#34] +Arguments: hashpartitioning(sr_ticket_number#31, sr_item_sk#30, 5), ENSURE_REQUIREMENTS, [id=#34] (41) Sort [codegen id : 13] Input [4]: [sr_item_sk#30, sr_ticket_number#31, sr_return_quantity#32, sr_return_amt#33] @@ -327,426 +319,386 @@ Join condition: None Output [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, (ss_quantity#27 - coalesce(sr_return_quantity#32, 0)) AS sales_cnt#35, CheckOverflow((promote_precision(cast(ss_ext_sales_price#28 as decimal(8,2))) - promote_precision(cast(coalesce(sr_return_amt#33, 0.00) as decimal(8,2)))), DecimalType(8,2), true) AS sales_amt#36] Input [13]: [ss_item_sk#25, ss_ticket_number#26, ss_quantity#27, ss_ext_sales_price#28, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, d_year#14, sr_item_sk#30, sr_ticket_number#31, sr_return_quantity#32, sr_return_amt#33] -(44) Union - -(45) HashAggregate [codegen id : 15] -Input [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#22, sales_amt#23] -Keys [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#22, sales_amt#23] -Functions: [] -Aggregate Attributes: [] -Results [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#22, sales_amt#23] - -(46) Exchange -Input [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#22, sales_amt#23] -Arguments: hashpartitioning(d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#22, sales_amt#23, 5), true, [id=#37] - -(47) HashAggregate [codegen id : 16] -Input [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#22, sales_amt#23] -Keys [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#22, sales_amt#23] -Functions: [] -Aggregate Attributes: [] -Results [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#22, sales_amt#23] - -(48) Scan parquet default.web_sales -Output [5]: [ws_sold_date_sk#38, ws_item_sk#39, ws_order_number#40, ws_quantity#41, ws_ext_sales_price#42] +(44) Scan parquet default.web_sales +Output [5]: [ws_sold_date_sk#37, ws_item_sk#38, ws_order_number#39, ws_quantity#40, ws_ext_sales_price#41] Batched: true Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_item_sk), IsNotNull(ws_sold_date_sk)] ReadSchema: struct -(49) ColumnarToRow [codegen id : 19] -Input [5]: [ws_sold_date_sk#38, ws_item_sk#39, ws_order_number#40, ws_quantity#41, ws_ext_sales_price#42] +(45) ColumnarToRow [codegen id : 17] +Input [5]: [ws_sold_date_sk#37, ws_item_sk#38, ws_order_number#39, ws_quantity#40, ws_ext_sales_price#41] -(50) Filter [codegen id : 19] -Input [5]: [ws_sold_date_sk#38, ws_item_sk#39, ws_order_number#40, ws_quantity#41, ws_ext_sales_price#42] -Condition : (isnotnull(ws_item_sk#39) AND isnotnull(ws_sold_date_sk#38)) +(46) Filter [codegen id : 17] +Input [5]: [ws_sold_date_sk#37, ws_item_sk#38, ws_order_number#39, ws_quantity#40, ws_ext_sales_price#41] +Condition : (isnotnull(ws_item_sk#38) AND isnotnull(ws_sold_date_sk#37)) -(51) ReusedExchange [Reuses operator id: 8] +(47) ReusedExchange [Reuses operator id: 8] Output [5]: [i_item_sk#6, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11] -(52) BroadcastHashJoin [codegen id : 19] -Left keys [1]: [ws_item_sk#39] +(48) BroadcastHashJoin [codegen id : 17] +Left keys [1]: [ws_item_sk#38] Right keys [1]: [i_item_sk#6] Join condition: None -(53) Project [codegen id : 19] -Output [9]: [ws_sold_date_sk#38, ws_item_sk#39, ws_order_number#40, ws_quantity#41, ws_ext_sales_price#42, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11] -Input [10]: [ws_sold_date_sk#38, ws_item_sk#39, ws_order_number#40, ws_quantity#41, ws_ext_sales_price#42, i_item_sk#6, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11] +(49) Project [codegen id : 17] +Output [9]: [ws_sold_date_sk#37, ws_item_sk#38, ws_order_number#39, ws_quantity#40, ws_ext_sales_price#41, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11] +Input [10]: [ws_sold_date_sk#37, ws_item_sk#38, ws_order_number#39, ws_quantity#40, ws_ext_sales_price#41, i_item_sk#6, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11] -(54) ReusedExchange [Reuses operator id: 14] +(50) ReusedExchange [Reuses operator id: 14] Output [2]: [d_date_sk#13, d_year#14] -(55) BroadcastHashJoin [codegen id : 19] -Left keys [1]: [ws_sold_date_sk#38] +(51) BroadcastHashJoin [codegen id : 17] +Left keys [1]: [ws_sold_date_sk#37] Right keys [1]: [d_date_sk#13] Join condition: None -(56) Project [codegen id : 19] -Output [9]: [ws_item_sk#39, ws_order_number#40, ws_quantity#41, ws_ext_sales_price#42, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, d_year#14] -Input [11]: [ws_sold_date_sk#38, ws_item_sk#39, ws_order_number#40, ws_quantity#41, ws_ext_sales_price#42, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, d_date_sk#13, d_year#14] +(52) Project [codegen id : 17] +Output [9]: [ws_item_sk#38, ws_order_number#39, ws_quantity#40, ws_ext_sales_price#41, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, d_year#14] +Input [11]: [ws_sold_date_sk#37, ws_item_sk#38, ws_order_number#39, ws_quantity#40, ws_ext_sales_price#41, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, d_date_sk#13, d_year#14] -(57) Exchange -Input [9]: [ws_item_sk#39, ws_order_number#40, ws_quantity#41, ws_ext_sales_price#42, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, d_year#14] -Arguments: hashpartitioning(cast(ws_order_number#40 as bigint), cast(ws_item_sk#39 as bigint), 5), true, [id=#43] +(53) Exchange +Input [9]: [ws_item_sk#38, ws_order_number#39, ws_quantity#40, ws_ext_sales_price#41, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, d_year#14] +Arguments: hashpartitioning(cast(ws_order_number#39 as bigint), cast(ws_item_sk#38 as bigint), 5), ENSURE_REQUIREMENTS, [id=#42] -(58) Sort [codegen id : 20] -Input [9]: [ws_item_sk#39, ws_order_number#40, ws_quantity#41, ws_ext_sales_price#42, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, d_year#14] -Arguments: [cast(ws_order_number#40 as bigint) ASC NULLS FIRST, cast(ws_item_sk#39 as bigint) ASC NULLS FIRST], false, 0 +(54) Sort [codegen id : 18] +Input [9]: [ws_item_sk#38, ws_order_number#39, ws_quantity#40, ws_ext_sales_price#41, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, d_year#14] +Arguments: [cast(ws_order_number#39 as bigint) ASC NULLS FIRST, cast(ws_item_sk#38 as bigint) ASC NULLS FIRST], false, 0 -(59) Scan parquet default.web_returns -Output [4]: [wr_item_sk#44, wr_order_number#45, wr_return_quantity#46, wr_return_amt#47] +(55) Scan parquet default.web_returns +Output [4]: [wr_item_sk#43, wr_order_number#44, wr_return_quantity#45, wr_return_amt#46] Batched: true Location [not included in comparison]/{warehouse_dir}/web_returns] PushedFilters: [IsNotNull(wr_order_number), IsNotNull(wr_item_sk)] ReadSchema: struct -(60) ColumnarToRow [codegen id : 21] -Input [4]: [wr_item_sk#44, wr_order_number#45, wr_return_quantity#46, wr_return_amt#47] +(56) ColumnarToRow [codegen id : 19] +Input [4]: [wr_item_sk#43, wr_order_number#44, wr_return_quantity#45, wr_return_amt#46] -(61) Filter [codegen id : 21] -Input [4]: [wr_item_sk#44, wr_order_number#45, wr_return_quantity#46, wr_return_amt#47] -Condition : (isnotnull(wr_order_number#45) AND isnotnull(wr_item_sk#44)) +(57) Filter [codegen id : 19] +Input [4]: [wr_item_sk#43, wr_order_number#44, wr_return_quantity#45, wr_return_amt#46] +Condition : (isnotnull(wr_order_number#44) AND isnotnull(wr_item_sk#43)) -(62) Exchange -Input [4]: [wr_item_sk#44, wr_order_number#45, wr_return_quantity#46, wr_return_amt#47] -Arguments: hashpartitioning(wr_order_number#45, wr_item_sk#44, 5), true, [id=#48] +(58) Exchange +Input [4]: [wr_item_sk#43, wr_order_number#44, wr_return_quantity#45, wr_return_amt#46] +Arguments: hashpartitioning(wr_order_number#44, wr_item_sk#43, 5), ENSURE_REQUIREMENTS, [id=#47] -(63) Sort [codegen id : 22] -Input [4]: [wr_item_sk#44, wr_order_number#45, wr_return_quantity#46, wr_return_amt#47] -Arguments: [wr_order_number#45 ASC NULLS FIRST, wr_item_sk#44 ASC NULLS FIRST], false, 0 +(59) Sort [codegen id : 20] +Input [4]: [wr_item_sk#43, wr_order_number#44, wr_return_quantity#45, wr_return_amt#46] +Arguments: [wr_order_number#44 ASC NULLS FIRST, wr_item_sk#43 ASC NULLS FIRST], false, 0 -(64) SortMergeJoin -Left keys [2]: [cast(ws_order_number#40 as bigint), cast(ws_item_sk#39 as bigint)] -Right keys [2]: [wr_order_number#45, wr_item_sk#44] +(60) SortMergeJoin +Left keys [2]: [cast(ws_order_number#39 as bigint), cast(ws_item_sk#38 as bigint)] +Right keys [2]: [wr_order_number#44, wr_item_sk#43] Join condition: None -(65) Project [codegen id : 23] -Output [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, (ws_quantity#41 - coalesce(wr_return_quantity#46, 0)) AS sales_cnt#49, CheckOverflow((promote_precision(cast(ws_ext_sales_price#42 as decimal(8,2))) - promote_precision(cast(coalesce(wr_return_amt#47, 0.00) as decimal(8,2)))), DecimalType(8,2), true) AS sales_amt#50] -Input [13]: [ws_item_sk#39, ws_order_number#40, ws_quantity#41, ws_ext_sales_price#42, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, d_year#14, wr_item_sk#44, wr_order_number#45, wr_return_quantity#46, wr_return_amt#47] +(61) Project [codegen id : 21] +Output [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, (ws_quantity#40 - coalesce(wr_return_quantity#45, 0)) AS sales_cnt#48, CheckOverflow((promote_precision(cast(ws_ext_sales_price#41 as decimal(8,2))) - promote_precision(cast(coalesce(wr_return_amt#46, 0.00) as decimal(8,2)))), DecimalType(8,2), true) AS sales_amt#49] +Input [13]: [ws_item_sk#38, ws_order_number#39, ws_quantity#40, ws_ext_sales_price#41, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, d_year#14, wr_item_sk#43, wr_order_number#44, wr_return_quantity#45, wr_return_amt#46] -(66) Union +(62) Union -(67) HashAggregate [codegen id : 24] +(63) HashAggregate [codegen id : 22] Input [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#22, sales_amt#23] Keys [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#22, sales_amt#23] Functions: [] Aggregate Attributes: [] Results [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#22, sales_amt#23] -(68) Exchange +(64) Exchange Input [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#22, sales_amt#23] -Arguments: hashpartitioning(d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#22, sales_amt#23, 5), true, [id=#51] +Arguments: hashpartitioning(d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#22, sales_amt#23, 5), ENSURE_REQUIREMENTS, [id=#50] -(69) HashAggregate [codegen id : 25] +(65) HashAggregate [codegen id : 23] Input [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#22, sales_amt#23] Keys [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#22, sales_amt#23] Functions: [] Aggregate Attributes: [] Results [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#22, sales_amt#23] -(70) HashAggregate [codegen id : 25] +(66) HashAggregate [codegen id : 23] Input [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#22, sales_amt#23] Keys [5]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11] Functions [2]: [partial_sum(cast(sales_cnt#22 as bigint)), partial_sum(UnscaledValue(sales_amt#23))] -Aggregate Attributes [2]: [sum#52, sum#53] -Results [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sum#54, sum#55] +Aggregate Attributes [2]: [sum#51, sum#52] +Results [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sum#53, sum#54] -(71) Exchange -Input [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sum#54, sum#55] -Arguments: hashpartitioning(d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, 5), true, [id=#56] +(67) Exchange +Input [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sum#53, sum#54] +Arguments: hashpartitioning(d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, 5), ENSURE_REQUIREMENTS, [id=#55] -(72) HashAggregate [codegen id : 26] -Input [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sum#54, sum#55] +(68) HashAggregate [codegen id : 24] +Input [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sum#53, sum#54] Keys [5]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11] Functions [2]: [sum(cast(sales_cnt#22 as bigint)), sum(UnscaledValue(sales_amt#23))] -Aggregate Attributes [2]: [sum(cast(sales_cnt#22 as bigint))#57, sum(UnscaledValue(sales_amt#23))#58] -Results [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sum(cast(sales_cnt#22 as bigint))#57 AS sales_cnt#59, MakeDecimal(sum(UnscaledValue(sales_amt#23))#58,18,2) AS sales_amt#60] +Aggregate Attributes [2]: [sum(cast(sales_cnt#22 as bigint))#56, sum(UnscaledValue(sales_amt#23))#57] +Results [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sum(cast(sales_cnt#22 as bigint))#56 AS sales_cnt#58, MakeDecimal(sum(UnscaledValue(sales_amt#23))#57,18,2) AS sales_amt#59] -(73) Exchange -Input [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#59, sales_amt#60] -Arguments: hashpartitioning(i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, 5), true, [id=#61] +(69) Exchange +Input [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#58, sales_amt#59] +Arguments: hashpartitioning(i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, 5), ENSURE_REQUIREMENTS, [id=#60] -(74) Sort [codegen id : 27] -Input [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#59, sales_amt#60] +(70) Sort [codegen id : 25] +Input [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#58, sales_amt#59] Arguments: [i_brand_id#7 ASC NULLS FIRST, i_class_id#8 ASC NULLS FIRST, i_category_id#9 ASC NULLS FIRST, i_manufact_id#11 ASC NULLS FIRST], false, 0 -(75) Scan parquet default.catalog_sales +(71) Scan parquet default.catalog_sales Output [5]: [cs_sold_date_sk#1, cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price#5] Batched: true Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_item_sk), IsNotNull(cs_sold_date_sk)] ReadSchema: struct -(76) ColumnarToRow [codegen id : 30] +(72) ColumnarToRow [codegen id : 28] Input [5]: [cs_sold_date_sk#1, cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price#5] -(77) Filter [codegen id : 30] +(73) Filter [codegen id : 28] Input [5]: [cs_sold_date_sk#1, cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price#5] Condition : (isnotnull(cs_item_sk#2) AND isnotnull(cs_sold_date_sk#1)) -(78) ReusedExchange [Reuses operator id: 8] -Output [5]: [i_item_sk#62, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66] +(74) ReusedExchange [Reuses operator id: 8] +Output [5]: [i_item_sk#61, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65] -(79) BroadcastHashJoin [codegen id : 30] +(75) BroadcastHashJoin [codegen id : 28] Left keys [1]: [cs_item_sk#2] -Right keys [1]: [i_item_sk#62] +Right keys [1]: [i_item_sk#61] Join condition: None -(80) Project [codegen id : 30] -Output [9]: [cs_sold_date_sk#1, cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price#5, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66] -Input [10]: [cs_sold_date_sk#1, cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price#5, i_item_sk#62, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66] +(76) Project [codegen id : 28] +Output [9]: [cs_sold_date_sk#1, cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price#5, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65] +Input [10]: [cs_sold_date_sk#1, cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price#5, i_item_sk#61, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65] -(81) Scan parquet default.date_dim -Output [2]: [d_date_sk#67, d_year#68] +(77) Scan parquet default.date_dim +Output [2]: [d_date_sk#66, d_year#67] Batched: true Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2001), IsNotNull(d_date_sk)] ReadSchema: struct -(82) ColumnarToRow [codegen id : 29] -Input [2]: [d_date_sk#67, d_year#68] +(78) ColumnarToRow [codegen id : 27] +Input [2]: [d_date_sk#66, d_year#67] -(83) Filter [codegen id : 29] -Input [2]: [d_date_sk#67, d_year#68] -Condition : ((isnotnull(d_year#68) AND (d_year#68 = 2001)) AND isnotnull(d_date_sk#67)) +(79) Filter [codegen id : 27] +Input [2]: [d_date_sk#66, d_year#67] +Condition : ((isnotnull(d_year#67) AND (d_year#67 = 2001)) AND isnotnull(d_date_sk#66)) -(84) BroadcastExchange -Input [2]: [d_date_sk#67, d_year#68] -Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, false] as bigint)),false), [id=#69] +(80) BroadcastExchange +Input [2]: [d_date_sk#66, d_year#67] +Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, false] as bigint)),false), [id=#68] -(85) BroadcastHashJoin [codegen id : 30] +(81) BroadcastHashJoin [codegen id : 28] Left keys [1]: [cs_sold_date_sk#1] -Right keys [1]: [d_date_sk#67] +Right keys [1]: [d_date_sk#66] Join condition: None -(86) Project [codegen id : 30] -Output [9]: [cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price#5, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, d_year#68] -Input [11]: [cs_sold_date_sk#1, cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price#5, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, d_date_sk#67, d_year#68] +(82) Project [codegen id : 28] +Output [9]: [cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price#5, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65, d_year#67] +Input [11]: [cs_sold_date_sk#1, cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price#5, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65, d_date_sk#66, d_year#67] -(87) Exchange -Input [9]: [cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price#5, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, d_year#68] -Arguments: hashpartitioning(cs_order_number#3, cs_item_sk#2, 5), true, [id=#70] +(83) Exchange +Input [9]: [cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price#5, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65, d_year#67] +Arguments: hashpartitioning(cs_order_number#3, cs_item_sk#2, 5), ENSURE_REQUIREMENTS, [id=#69] -(88) Sort [codegen id : 31] -Input [9]: [cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price#5, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, d_year#68] +(84) Sort [codegen id : 29] +Input [9]: [cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price#5, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65, d_year#67] Arguments: [cs_order_number#3 ASC NULLS FIRST, cs_item_sk#2 ASC NULLS FIRST], false, 0 -(89) ReusedExchange [Reuses operator id: 22] +(85) ReusedExchange [Reuses operator id: 22] Output [4]: [cr_item_sk#17, cr_order_number#18, cr_return_quantity#19, cr_return_amount#20] -(90) Sort [codegen id : 33] +(86) Sort [codegen id : 31] Input [4]: [cr_item_sk#17, cr_order_number#18, cr_return_quantity#19, cr_return_amount#20] Arguments: [cr_order_number#18 ASC NULLS FIRST, cr_item_sk#17 ASC NULLS FIRST], false, 0 -(91) SortMergeJoin +(87) SortMergeJoin Left keys [2]: [cs_order_number#3, cs_item_sk#2] Right keys [2]: [cr_order_number#18, cr_item_sk#17] Join condition: None -(92) Project [codegen id : 34] -Output [7]: [d_year#68, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, (cs_quantity#4 - coalesce(cr_return_quantity#19, 0)) AS sales_cnt#22, CheckOverflow((promote_precision(cast(cs_ext_sales_price#5 as decimal(8,2))) - promote_precision(cast(coalesce(cr_return_amount#20, 0.00) as decimal(8,2)))), DecimalType(8,2), true) AS sales_amt#23] -Input [13]: [cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price#5, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, d_year#68, cr_item_sk#17, cr_order_number#18, cr_return_quantity#19, cr_return_amount#20] +(88) Project [codegen id : 32] +Output [7]: [d_year#67, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65, (cs_quantity#4 - coalesce(cr_return_quantity#19, 0)) AS sales_cnt#22, CheckOverflow((promote_precision(cast(cs_ext_sales_price#5 as decimal(8,2))) - promote_precision(cast(coalesce(cr_return_amount#20, 0.00) as decimal(8,2)))), DecimalType(8,2), true) AS sales_amt#23] +Input [13]: [cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price#5, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65, d_year#67, cr_item_sk#17, cr_order_number#18, cr_return_quantity#19, cr_return_amount#20] -(93) Scan parquet default.store_sales +(89) Scan parquet default.store_sales Output [5]: [ss_sold_date_sk#24, ss_item_sk#25, ss_ticket_number#26, ss_quantity#27, ss_ext_sales_price#28] Batched: true Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_item_sk), IsNotNull(ss_sold_date_sk)] ReadSchema: struct -(94) ColumnarToRow [codegen id : 37] +(90) ColumnarToRow [codegen id : 35] Input [5]: [ss_sold_date_sk#24, ss_item_sk#25, ss_ticket_number#26, ss_quantity#27, ss_ext_sales_price#28] -(95) Filter [codegen id : 37] +(91) Filter [codegen id : 35] Input [5]: [ss_sold_date_sk#24, ss_item_sk#25, ss_ticket_number#26, ss_quantity#27, ss_ext_sales_price#28] Condition : (isnotnull(ss_item_sk#25) AND isnotnull(ss_sold_date_sk#24)) -(96) ReusedExchange [Reuses operator id: 8] -Output [5]: [i_item_sk#62, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66] +(92) ReusedExchange [Reuses operator id: 8] +Output [5]: [i_item_sk#61, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65] -(97) BroadcastHashJoin [codegen id : 37] +(93) BroadcastHashJoin [codegen id : 35] Left keys [1]: [ss_item_sk#25] -Right keys [1]: [i_item_sk#62] +Right keys [1]: [i_item_sk#61] Join condition: None -(98) Project [codegen id : 37] -Output [9]: [ss_sold_date_sk#24, ss_item_sk#25, ss_ticket_number#26, ss_quantity#27, ss_ext_sales_price#28, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66] -Input [10]: [ss_sold_date_sk#24, ss_item_sk#25, ss_ticket_number#26, ss_quantity#27, ss_ext_sales_price#28, i_item_sk#62, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66] +(94) Project [codegen id : 35] +Output [9]: [ss_sold_date_sk#24, ss_item_sk#25, ss_ticket_number#26, ss_quantity#27, ss_ext_sales_price#28, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65] +Input [10]: [ss_sold_date_sk#24, ss_item_sk#25, ss_ticket_number#26, ss_quantity#27, ss_ext_sales_price#28, i_item_sk#61, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65] -(99) ReusedExchange [Reuses operator id: 84] -Output [2]: [d_date_sk#67, d_year#68] +(95) ReusedExchange [Reuses operator id: 80] +Output [2]: [d_date_sk#66, d_year#67] -(100) BroadcastHashJoin [codegen id : 37] +(96) BroadcastHashJoin [codegen id : 35] Left keys [1]: [ss_sold_date_sk#24] -Right keys [1]: [d_date_sk#67] +Right keys [1]: [d_date_sk#66] Join condition: None -(101) Project [codegen id : 37] -Output [9]: [ss_item_sk#25, ss_ticket_number#26, ss_quantity#27, ss_ext_sales_price#28, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, d_year#68] -Input [11]: [ss_sold_date_sk#24, ss_item_sk#25, ss_ticket_number#26, ss_quantity#27, ss_ext_sales_price#28, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, d_date_sk#67, d_year#68] +(97) Project [codegen id : 35] +Output [9]: [ss_item_sk#25, ss_ticket_number#26, ss_quantity#27, ss_ext_sales_price#28, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65, d_year#67] +Input [11]: [ss_sold_date_sk#24, ss_item_sk#25, ss_ticket_number#26, ss_quantity#27, ss_ext_sales_price#28, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65, d_date_sk#66, d_year#67] -(102) Exchange -Input [9]: [ss_item_sk#25, ss_ticket_number#26, ss_quantity#27, ss_ext_sales_price#28, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, d_year#68] -Arguments: hashpartitioning(cast(ss_ticket_number#26 as bigint), cast(ss_item_sk#25 as bigint), 5), true, [id=#71] +(98) Exchange +Input [9]: [ss_item_sk#25, ss_ticket_number#26, ss_quantity#27, ss_ext_sales_price#28, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65, d_year#67] +Arguments: hashpartitioning(cast(ss_ticket_number#26 as bigint), cast(ss_item_sk#25 as bigint), 5), ENSURE_REQUIREMENTS, [id=#70] -(103) Sort [codegen id : 38] -Input [9]: [ss_item_sk#25, ss_ticket_number#26, ss_quantity#27, ss_ext_sales_price#28, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, d_year#68] +(99) Sort [codegen id : 36] +Input [9]: [ss_item_sk#25, ss_ticket_number#26, ss_quantity#27, ss_ext_sales_price#28, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65, d_year#67] Arguments: [cast(ss_ticket_number#26 as bigint) ASC NULLS FIRST, cast(ss_item_sk#25 as bigint) ASC NULLS FIRST], false, 0 -(104) ReusedExchange [Reuses operator id: 40] +(100) ReusedExchange [Reuses operator id: 40] Output [4]: [sr_item_sk#30, sr_ticket_number#31, sr_return_quantity#32, sr_return_amt#33] -(105) Sort [codegen id : 40] +(101) Sort [codegen id : 38] Input [4]: [sr_item_sk#30, sr_ticket_number#31, sr_return_quantity#32, sr_return_amt#33] Arguments: [sr_ticket_number#31 ASC NULLS FIRST, sr_item_sk#30 ASC NULLS FIRST], false, 0 -(106) SortMergeJoin +(102) SortMergeJoin Left keys [2]: [cast(ss_ticket_number#26 as bigint), cast(ss_item_sk#25 as bigint)] Right keys [2]: [sr_ticket_number#31, sr_item_sk#30] Join condition: None -(107) Project [codegen id : 41] -Output [7]: [d_year#68, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, (ss_quantity#27 - coalesce(sr_return_quantity#32, 0)) AS sales_cnt#72, CheckOverflow((promote_precision(cast(ss_ext_sales_price#28 as decimal(8,2))) - promote_precision(cast(coalesce(sr_return_amt#33, 0.00) as decimal(8,2)))), DecimalType(8,2), true) AS sales_amt#73] -Input [13]: [ss_item_sk#25, ss_ticket_number#26, ss_quantity#27, ss_ext_sales_price#28, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, d_year#68, sr_item_sk#30, sr_ticket_number#31, sr_return_quantity#32, sr_return_amt#33] - -(108) Union - -(109) HashAggregate [codegen id : 42] -Input [7]: [d_year#68, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, sales_cnt#22, sales_amt#23] -Keys [7]: [d_year#68, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, sales_cnt#22, sales_amt#23] -Functions: [] -Aggregate Attributes: [] -Results [7]: [d_year#68, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, sales_cnt#22, sales_amt#23] - -(110) Exchange -Input [7]: [d_year#68, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, sales_cnt#22, sales_amt#23] -Arguments: hashpartitioning(d_year#68, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, sales_cnt#22, sales_amt#23, 5), true, [id=#74] - -(111) HashAggregate [codegen id : 43] -Input [7]: [d_year#68, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, sales_cnt#22, sales_amt#23] -Keys [7]: [d_year#68, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, sales_cnt#22, sales_amt#23] -Functions: [] -Aggregate Attributes: [] -Results [7]: [d_year#68, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, sales_cnt#22, sales_amt#23] +(103) Project [codegen id : 39] +Output [7]: [d_year#67, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65, (ss_quantity#27 - coalesce(sr_return_quantity#32, 0)) AS sales_cnt#71, CheckOverflow((promote_precision(cast(ss_ext_sales_price#28 as decimal(8,2))) - promote_precision(cast(coalesce(sr_return_amt#33, 0.00) as decimal(8,2)))), DecimalType(8,2), true) AS sales_amt#72] +Input [13]: [ss_item_sk#25, ss_ticket_number#26, ss_quantity#27, ss_ext_sales_price#28, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65, d_year#67, sr_item_sk#30, sr_ticket_number#31, sr_return_quantity#32, sr_return_amt#33] -(112) Scan parquet default.web_sales -Output [5]: [ws_sold_date_sk#38, ws_item_sk#39, ws_order_number#40, ws_quantity#41, ws_ext_sales_price#42] +(104) Scan parquet default.web_sales +Output [5]: [ws_sold_date_sk#37, ws_item_sk#38, ws_order_number#39, ws_quantity#40, ws_ext_sales_price#41] Batched: true Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_item_sk), IsNotNull(ws_sold_date_sk)] ReadSchema: struct -(113) ColumnarToRow [codegen id : 46] -Input [5]: [ws_sold_date_sk#38, ws_item_sk#39, ws_order_number#40, ws_quantity#41, ws_ext_sales_price#42] +(105) ColumnarToRow [codegen id : 42] +Input [5]: [ws_sold_date_sk#37, ws_item_sk#38, ws_order_number#39, ws_quantity#40, ws_ext_sales_price#41] -(114) Filter [codegen id : 46] -Input [5]: [ws_sold_date_sk#38, ws_item_sk#39, ws_order_number#40, ws_quantity#41, ws_ext_sales_price#42] -Condition : (isnotnull(ws_item_sk#39) AND isnotnull(ws_sold_date_sk#38)) +(106) Filter [codegen id : 42] +Input [5]: [ws_sold_date_sk#37, ws_item_sk#38, ws_order_number#39, ws_quantity#40, ws_ext_sales_price#41] +Condition : (isnotnull(ws_item_sk#38) AND isnotnull(ws_sold_date_sk#37)) -(115) ReusedExchange [Reuses operator id: 8] -Output [5]: [i_item_sk#62, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66] +(107) ReusedExchange [Reuses operator id: 8] +Output [5]: [i_item_sk#61, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65] -(116) BroadcastHashJoin [codegen id : 46] -Left keys [1]: [ws_item_sk#39] -Right keys [1]: [i_item_sk#62] +(108) BroadcastHashJoin [codegen id : 42] +Left keys [1]: [ws_item_sk#38] +Right keys [1]: [i_item_sk#61] Join condition: None -(117) Project [codegen id : 46] -Output [9]: [ws_sold_date_sk#38, ws_item_sk#39, ws_order_number#40, ws_quantity#41, ws_ext_sales_price#42, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66] -Input [10]: [ws_sold_date_sk#38, ws_item_sk#39, ws_order_number#40, ws_quantity#41, ws_ext_sales_price#42, i_item_sk#62, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66] +(109) Project [codegen id : 42] +Output [9]: [ws_sold_date_sk#37, ws_item_sk#38, ws_order_number#39, ws_quantity#40, ws_ext_sales_price#41, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65] +Input [10]: [ws_sold_date_sk#37, ws_item_sk#38, ws_order_number#39, ws_quantity#40, ws_ext_sales_price#41, i_item_sk#61, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65] -(118) ReusedExchange [Reuses operator id: 84] -Output [2]: [d_date_sk#67, d_year#68] +(110) ReusedExchange [Reuses operator id: 80] +Output [2]: [d_date_sk#66, d_year#67] -(119) BroadcastHashJoin [codegen id : 46] -Left keys [1]: [ws_sold_date_sk#38] -Right keys [1]: [d_date_sk#67] +(111) BroadcastHashJoin [codegen id : 42] +Left keys [1]: [ws_sold_date_sk#37] +Right keys [1]: [d_date_sk#66] Join condition: None -(120) Project [codegen id : 46] -Output [9]: [ws_item_sk#39, ws_order_number#40, ws_quantity#41, ws_ext_sales_price#42, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, d_year#68] -Input [11]: [ws_sold_date_sk#38, ws_item_sk#39, ws_order_number#40, ws_quantity#41, ws_ext_sales_price#42, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, d_date_sk#67, d_year#68] +(112) Project [codegen id : 42] +Output [9]: [ws_item_sk#38, ws_order_number#39, ws_quantity#40, ws_ext_sales_price#41, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65, d_year#67] +Input [11]: [ws_sold_date_sk#37, ws_item_sk#38, ws_order_number#39, ws_quantity#40, ws_ext_sales_price#41, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65, d_date_sk#66, d_year#67] -(121) Exchange -Input [9]: [ws_item_sk#39, ws_order_number#40, ws_quantity#41, ws_ext_sales_price#42, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, d_year#68] -Arguments: hashpartitioning(cast(ws_order_number#40 as bigint), cast(ws_item_sk#39 as bigint), 5), true, [id=#75] +(113) Exchange +Input [9]: [ws_item_sk#38, ws_order_number#39, ws_quantity#40, ws_ext_sales_price#41, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65, d_year#67] +Arguments: hashpartitioning(cast(ws_order_number#39 as bigint), cast(ws_item_sk#38 as bigint), 5), ENSURE_REQUIREMENTS, [id=#73] -(122) Sort [codegen id : 47] -Input [9]: [ws_item_sk#39, ws_order_number#40, ws_quantity#41, ws_ext_sales_price#42, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, d_year#68] -Arguments: [cast(ws_order_number#40 as bigint) ASC NULLS FIRST, cast(ws_item_sk#39 as bigint) ASC NULLS FIRST], false, 0 +(114) Sort [codegen id : 43] +Input [9]: [ws_item_sk#38, ws_order_number#39, ws_quantity#40, ws_ext_sales_price#41, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65, d_year#67] +Arguments: [cast(ws_order_number#39 as bigint) ASC NULLS FIRST, cast(ws_item_sk#38 as bigint) ASC NULLS FIRST], false, 0 -(123) ReusedExchange [Reuses operator id: 62] -Output [4]: [wr_item_sk#44, wr_order_number#45, wr_return_quantity#46, wr_return_amt#47] +(115) ReusedExchange [Reuses operator id: 58] +Output [4]: [wr_item_sk#43, wr_order_number#44, wr_return_quantity#45, wr_return_amt#46] -(124) Sort [codegen id : 49] -Input [4]: [wr_item_sk#44, wr_order_number#45, wr_return_quantity#46, wr_return_amt#47] -Arguments: [wr_order_number#45 ASC NULLS FIRST, wr_item_sk#44 ASC NULLS FIRST], false, 0 +(116) Sort [codegen id : 45] +Input [4]: [wr_item_sk#43, wr_order_number#44, wr_return_quantity#45, wr_return_amt#46] +Arguments: [wr_order_number#44 ASC NULLS FIRST, wr_item_sk#43 ASC NULLS FIRST], false, 0 -(125) SortMergeJoin -Left keys [2]: [cast(ws_order_number#40 as bigint), cast(ws_item_sk#39 as bigint)] -Right keys [2]: [wr_order_number#45, wr_item_sk#44] +(117) SortMergeJoin +Left keys [2]: [cast(ws_order_number#39 as bigint), cast(ws_item_sk#38 as bigint)] +Right keys [2]: [wr_order_number#44, wr_item_sk#43] Join condition: None -(126) Project [codegen id : 50] -Output [7]: [d_year#68, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, (ws_quantity#41 - coalesce(wr_return_quantity#46, 0)) AS sales_cnt#76, CheckOverflow((promote_precision(cast(ws_ext_sales_price#42 as decimal(8,2))) - promote_precision(cast(coalesce(wr_return_amt#47, 0.00) as decimal(8,2)))), DecimalType(8,2), true) AS sales_amt#77] -Input [13]: [ws_item_sk#39, ws_order_number#40, ws_quantity#41, ws_ext_sales_price#42, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, d_year#68, wr_item_sk#44, wr_order_number#45, wr_return_quantity#46, wr_return_amt#47] +(118) Project [codegen id : 46] +Output [7]: [d_year#67, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65, (ws_quantity#40 - coalesce(wr_return_quantity#45, 0)) AS sales_cnt#74, CheckOverflow((promote_precision(cast(ws_ext_sales_price#41 as decimal(8,2))) - promote_precision(cast(coalesce(wr_return_amt#46, 0.00) as decimal(8,2)))), DecimalType(8,2), true) AS sales_amt#75] +Input [13]: [ws_item_sk#38, ws_order_number#39, ws_quantity#40, ws_ext_sales_price#41, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65, d_year#67, wr_item_sk#43, wr_order_number#44, wr_return_quantity#45, wr_return_amt#46] -(127) Union +(119) Union -(128) HashAggregate [codegen id : 51] -Input [7]: [d_year#68, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, sales_cnt#22, sales_amt#23] -Keys [7]: [d_year#68, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, sales_cnt#22, sales_amt#23] +(120) HashAggregate [codegen id : 47] +Input [7]: [d_year#67, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65, sales_cnt#22, sales_amt#23] +Keys [7]: [d_year#67, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65, sales_cnt#22, sales_amt#23] Functions: [] Aggregate Attributes: [] -Results [7]: [d_year#68, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, sales_cnt#22, sales_amt#23] +Results [7]: [d_year#67, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65, sales_cnt#22, sales_amt#23] -(129) Exchange -Input [7]: [d_year#68, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, sales_cnt#22, sales_amt#23] -Arguments: hashpartitioning(d_year#68, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, sales_cnt#22, sales_amt#23, 5), true, [id=#78] +(121) Exchange +Input [7]: [d_year#67, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65, sales_cnt#22, sales_amt#23] +Arguments: hashpartitioning(d_year#67, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65, sales_cnt#22, sales_amt#23, 5), ENSURE_REQUIREMENTS, [id=#76] -(130) HashAggregate [codegen id : 52] -Input [7]: [d_year#68, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, sales_cnt#22, sales_amt#23] -Keys [7]: [d_year#68, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, sales_cnt#22, sales_amt#23] +(122) HashAggregate [codegen id : 48] +Input [7]: [d_year#67, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65, sales_cnt#22, sales_amt#23] +Keys [7]: [d_year#67, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65, sales_cnt#22, sales_amt#23] Functions: [] Aggregate Attributes: [] -Results [7]: [d_year#68, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, sales_cnt#22, sales_amt#23] +Results [7]: [d_year#67, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65, sales_cnt#22, sales_amt#23] -(131) HashAggregate [codegen id : 52] -Input [7]: [d_year#68, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, sales_cnt#22, sales_amt#23] -Keys [5]: [d_year#68, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66] +(123) HashAggregate [codegen id : 48] +Input [7]: [d_year#67, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65, sales_cnt#22, sales_amt#23] +Keys [5]: [d_year#67, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65] Functions [2]: [partial_sum(cast(sales_cnt#22 as bigint)), partial_sum(UnscaledValue(sales_amt#23))] -Aggregate Attributes [2]: [sum#79, sum#80] -Results [7]: [d_year#68, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, sum#81, sum#82] +Aggregate Attributes [2]: [sum#77, sum#78] +Results [7]: [d_year#67, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65, sum#79, sum#80] -(132) Exchange -Input [7]: [d_year#68, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, sum#81, sum#82] -Arguments: hashpartitioning(d_year#68, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, 5), true, [id=#83] +(124) Exchange +Input [7]: [d_year#67, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65, sum#79, sum#80] +Arguments: hashpartitioning(d_year#67, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65, 5), ENSURE_REQUIREMENTS, [id=#81] -(133) HashAggregate [codegen id : 53] -Input [7]: [d_year#68, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, sum#81, sum#82] -Keys [5]: [d_year#68, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66] +(125) HashAggregate [codegen id : 49] +Input [7]: [d_year#67, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65, sum#79, sum#80] +Keys [5]: [d_year#67, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65] Functions [2]: [sum(cast(sales_cnt#22 as bigint)), sum(UnscaledValue(sales_amt#23))] -Aggregate Attributes [2]: [sum(cast(sales_cnt#22 as bigint))#84, sum(UnscaledValue(sales_amt#23))#85] -Results [7]: [d_year#68, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, sum(cast(sales_cnt#22 as bigint))#84 AS sales_cnt#86, MakeDecimal(sum(UnscaledValue(sales_amt#23))#85,18,2) AS sales_amt#87] +Aggregate Attributes [2]: [sum(cast(sales_cnt#22 as bigint))#82, sum(UnscaledValue(sales_amt#23))#83] +Results [7]: [d_year#67, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65, sum(cast(sales_cnt#22 as bigint))#82 AS sales_cnt#84, MakeDecimal(sum(UnscaledValue(sales_amt#23))#83,18,2) AS sales_amt#85] -(134) Exchange -Input [7]: [d_year#68, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, sales_cnt#86, sales_amt#87] -Arguments: hashpartitioning(i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, 5), true, [id=#88] +(126) Exchange +Input [7]: [d_year#67, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65, sales_cnt#84, sales_amt#85] +Arguments: hashpartitioning(i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65, 5), ENSURE_REQUIREMENTS, [id=#86] -(135) Sort [codegen id : 54] -Input [7]: [d_year#68, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, sales_cnt#86, sales_amt#87] -Arguments: [i_brand_id#63 ASC NULLS FIRST, i_class_id#64 ASC NULLS FIRST, i_category_id#65 ASC NULLS FIRST, i_manufact_id#66 ASC NULLS FIRST], false, 0 +(127) Sort [codegen id : 50] +Input [7]: [d_year#67, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65, sales_cnt#84, sales_amt#85] +Arguments: [i_brand_id#62 ASC NULLS FIRST, i_class_id#63 ASC NULLS FIRST, i_category_id#64 ASC NULLS FIRST, i_manufact_id#65 ASC NULLS FIRST], false, 0 -(136) SortMergeJoin [codegen id : 55] +(128) SortMergeJoin [codegen id : 51] Left keys [4]: [i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11] -Right keys [4]: [i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66] -Join condition: (CheckOverflow((promote_precision(cast(sales_cnt#59 as decimal(17,2))) / promote_precision(cast(sales_cnt#86 as decimal(17,2)))), DecimalType(37,20), true) < 0.90000000000000000000) +Right keys [4]: [i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65] +Join condition: (CheckOverflow((promote_precision(cast(sales_cnt#58 as decimal(17,2))) / promote_precision(cast(sales_cnt#84 as decimal(17,2)))), DecimalType(37,20), true) < 0.90000000000000000000) -(137) Project [codegen id : 55] -Output [10]: [d_year#68 AS prev_year#89, d_year#14 AS year#90, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#86 AS prev_yr_cnt#91, sales_cnt#59 AS curr_yr_cnt#92, (sales_cnt#59 - sales_cnt#86) AS sales_cnt_diff#93, CheckOverflow((promote_precision(cast(sales_amt#60 as decimal(19,2))) - promote_precision(cast(sales_amt#87 as decimal(19,2)))), DecimalType(19,2), true) AS sales_amt_diff#94] -Input [14]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#59, sales_amt#60, d_year#68, i_brand_id#63, i_class_id#64, i_category_id#65, i_manufact_id#66, sales_cnt#86, sales_amt#87] +(129) Project [codegen id : 51] +Output [10]: [d_year#67 AS prev_year#87, d_year#14 AS year#88, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#84 AS prev_yr_cnt#89, sales_cnt#58 AS curr_yr_cnt#90, (sales_cnt#58 - sales_cnt#84) AS sales_cnt_diff#91, CheckOverflow((promote_precision(cast(sales_amt#59 as decimal(19,2))) - promote_precision(cast(sales_amt#85 as decimal(19,2)))), DecimalType(19,2), true) AS sales_amt_diff#92] +Input [14]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#58, sales_amt#59, d_year#67, i_brand_id#62, i_class_id#63, i_category_id#64, i_manufact_id#65, sales_cnt#84, sales_amt#85] -(138) TakeOrderedAndProject -Input [10]: [prev_year#89, year#90, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, prev_yr_cnt#91, curr_yr_cnt#92, sales_cnt_diff#93, sales_amt_diff#94] -Arguments: 100, [sales_cnt_diff#93 ASC NULLS FIRST, sales_amt_diff#94 ASC NULLS FIRST], [prev_year#89, year#90, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, prev_yr_cnt#91, curr_yr_cnt#92, sales_cnt_diff#93, sales_amt_diff#94] +(130) TakeOrderedAndProject +Input [10]: [prev_year#87, year#88, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, prev_yr_cnt#89, curr_yr_cnt#90, sales_cnt_diff#91, sales_amt_diff#92] +Arguments: 100, [sales_cnt_diff#91 ASC NULLS FIRST, sales_amt_diff#92 ASC NULLS FIRST], [prev_year#87, year#88, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, prev_yr_cnt#89, curr_yr_cnt#90, sales_cnt_diff#91, sales_amt_diff#92] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q75.sf100/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q75.sf100/simplified.txt index 69f8b6a5b6789..b44ed2a7a3894 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q75.sf100/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q75.sf100/simplified.txt @@ -1,113 +1,105 @@ TakeOrderedAndProject [sales_cnt_diff,sales_amt_diff,prev_year,year,i_brand_id,i_class_id,i_category_id,i_manufact_id,prev_yr_cnt,curr_yr_cnt] - WholeStageCodegen (55) + WholeStageCodegen (51) Project [d_year,d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,sales_cnt,sales_cnt,sales_amt,sales_amt] SortMergeJoin [i_brand_id,i_class_id,i_category_id,i_manufact_id,i_brand_id,i_class_id,i_category_id,i_manufact_id,sales_cnt,sales_cnt] InputAdapter - WholeStageCodegen (27) + WholeStageCodegen (25) Sort [i_brand_id,i_class_id,i_category_id,i_manufact_id] InputAdapter Exchange [i_brand_id,i_class_id,i_category_id,i_manufact_id] #1 - WholeStageCodegen (26) + WholeStageCodegen (24) HashAggregate [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,sum,sum] [sum(cast(sales_cnt as bigint)),sum(UnscaledValue(sales_amt)),sales_cnt,sales_amt,sum,sum] InputAdapter Exchange [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id] #2 - WholeStageCodegen (25) + WholeStageCodegen (23) HashAggregate [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,sales_cnt,sales_amt] [sum,sum,sum,sum] HashAggregate [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,sales_cnt,sales_amt] InputAdapter Exchange [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,sales_cnt,sales_amt] #3 - WholeStageCodegen (24) + WholeStageCodegen (22) HashAggregate [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,sales_cnt,sales_amt] InputAdapter Union - WholeStageCodegen (16) - HashAggregate [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,sales_cnt,sales_amt] + WholeStageCodegen (7) + Project [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,cs_quantity,cr_return_quantity,cs_ext_sales_price,cr_return_amount] InputAdapter - Exchange [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,sales_cnt,sales_amt] #4 - WholeStageCodegen (15) - HashAggregate [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,sales_cnt,sales_amt] + SortMergeJoin [cs_order_number,cs_item_sk,cr_order_number,cr_item_sk] + WholeStageCodegen (4) + Sort [cs_order_number,cs_item_sk] InputAdapter - Union - WholeStageCodegen (7) - Project [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,cs_quantity,cr_return_quantity,cs_ext_sales_price,cr_return_amount] - InputAdapter - SortMergeJoin [cs_order_number,cs_item_sk,cr_order_number,cr_item_sk] - WholeStageCodegen (4) - Sort [cs_order_number,cs_item_sk] - InputAdapter - Exchange [cs_order_number,cs_item_sk] #5 - WholeStageCodegen (3) - Project [cs_item_sk,cs_order_number,cs_quantity,cs_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id,d_year] - BroadcastHashJoin [cs_sold_date_sk,d_date_sk] - Project [cs_sold_date_sk,cs_item_sk,cs_order_number,cs_quantity,cs_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id] - BroadcastHashJoin [cs_item_sk,i_item_sk] - Filter [cs_item_sk,cs_sold_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.catalog_sales [cs_sold_date_sk,cs_item_sk,cs_order_number,cs_quantity,cs_ext_sales_price] - InputAdapter - BroadcastExchange #6 - WholeStageCodegen (1) - Project [i_item_sk,i_brand_id,i_class_id,i_category_id,i_manufact_id] - Filter [i_category,i_item_sk,i_brand_id,i_class_id,i_category_id,i_manufact_id] - ColumnarToRow - InputAdapter - Scan parquet default.item [i_item_sk,i_brand_id,i_class_id,i_category_id,i_category,i_manufact_id] - InputAdapter - BroadcastExchange #7 - WholeStageCodegen (2) - Filter [d_year,d_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.date_dim [d_date_sk,d_year] - WholeStageCodegen (6) - Sort [cr_order_number,cr_item_sk] - InputAdapter - Exchange [cr_order_number,cr_item_sk] #8 - WholeStageCodegen (5) - Filter [cr_order_number,cr_item_sk] - ColumnarToRow - InputAdapter - Scan parquet default.catalog_returns [cr_item_sk,cr_order_number,cr_return_quantity,cr_return_amount] - WholeStageCodegen (14) - Project [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,ss_quantity,sr_return_quantity,ss_ext_sales_price,sr_return_amt] - InputAdapter - SortMergeJoin [ss_ticket_number,ss_item_sk,sr_ticket_number,sr_item_sk] - WholeStageCodegen (11) - Sort [ss_ticket_number,ss_item_sk] - InputAdapter - Exchange [ss_ticket_number,ss_item_sk] #9 - WholeStageCodegen (10) - Project [ss_item_sk,ss_ticket_number,ss_quantity,ss_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id,d_year] - BroadcastHashJoin [ss_sold_date_sk,d_date_sk] - Project [ss_sold_date_sk,ss_item_sk,ss_ticket_number,ss_quantity,ss_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id] - BroadcastHashJoin [ss_item_sk,i_item_sk] - Filter [ss_item_sk,ss_sold_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.store_sales [ss_sold_date_sk,ss_item_sk,ss_ticket_number,ss_quantity,ss_ext_sales_price] - InputAdapter - ReusedExchange [i_item_sk,i_brand_id,i_class_id,i_category_id,i_manufact_id] #6 - InputAdapter - ReusedExchange [d_date_sk,d_year] #7 - WholeStageCodegen (13) - Sort [sr_ticket_number,sr_item_sk] - InputAdapter - Exchange [sr_ticket_number,sr_item_sk] #10 - WholeStageCodegen (12) - Filter [sr_ticket_number,sr_item_sk] + Exchange [cs_order_number,cs_item_sk] #4 + WholeStageCodegen (3) + Project [cs_item_sk,cs_order_number,cs_quantity,cs_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id,d_year] + BroadcastHashJoin [cs_sold_date_sk,d_date_sk] + Project [cs_sold_date_sk,cs_item_sk,cs_order_number,cs_quantity,cs_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id] + BroadcastHashJoin [cs_item_sk,i_item_sk] + Filter [cs_item_sk,cs_sold_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.catalog_sales [cs_sold_date_sk,cs_item_sk,cs_order_number,cs_quantity,cs_ext_sales_price] + InputAdapter + BroadcastExchange #5 + WholeStageCodegen (1) + Project [i_item_sk,i_brand_id,i_class_id,i_category_id,i_manufact_id] + Filter [i_category,i_item_sk,i_brand_id,i_class_id,i_category_id,i_manufact_id] ColumnarToRow InputAdapter - Scan parquet default.store_returns [sr_item_sk,sr_ticket_number,sr_return_quantity,sr_return_amt] - WholeStageCodegen (23) + Scan parquet default.item [i_item_sk,i_brand_id,i_class_id,i_category_id,i_category,i_manufact_id] + InputAdapter + BroadcastExchange #6 + WholeStageCodegen (2) + Filter [d_year,d_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.date_dim [d_date_sk,d_year] + WholeStageCodegen (6) + Sort [cr_order_number,cr_item_sk] + InputAdapter + Exchange [cr_order_number,cr_item_sk] #7 + WholeStageCodegen (5) + Filter [cr_order_number,cr_item_sk] + ColumnarToRow + InputAdapter + Scan parquet default.catalog_returns [cr_item_sk,cr_order_number,cr_return_quantity,cr_return_amount] + WholeStageCodegen (14) + Project [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,ss_quantity,sr_return_quantity,ss_ext_sales_price,sr_return_amt] + InputAdapter + SortMergeJoin [ss_ticket_number,ss_item_sk,sr_ticket_number,sr_item_sk] + WholeStageCodegen (11) + Sort [ss_ticket_number,ss_item_sk] + InputAdapter + Exchange [ss_ticket_number,ss_item_sk] #8 + WholeStageCodegen (10) + Project [ss_item_sk,ss_ticket_number,ss_quantity,ss_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id,d_year] + BroadcastHashJoin [ss_sold_date_sk,d_date_sk] + Project [ss_sold_date_sk,ss_item_sk,ss_ticket_number,ss_quantity,ss_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id] + BroadcastHashJoin [ss_item_sk,i_item_sk] + Filter [ss_item_sk,ss_sold_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.store_sales [ss_sold_date_sk,ss_item_sk,ss_ticket_number,ss_quantity,ss_ext_sales_price] + InputAdapter + ReusedExchange [i_item_sk,i_brand_id,i_class_id,i_category_id,i_manufact_id] #5 + InputAdapter + ReusedExchange [d_date_sk,d_year] #6 + WholeStageCodegen (13) + Sort [sr_ticket_number,sr_item_sk] + InputAdapter + Exchange [sr_ticket_number,sr_item_sk] #9 + WholeStageCodegen (12) + Filter [sr_ticket_number,sr_item_sk] + ColumnarToRow + InputAdapter + Scan parquet default.store_returns [sr_item_sk,sr_ticket_number,sr_return_quantity,sr_return_amt] + WholeStageCodegen (21) Project [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,ws_quantity,wr_return_quantity,ws_ext_sales_price,wr_return_amt] InputAdapter SortMergeJoin [ws_order_number,ws_item_sk,wr_order_number,wr_item_sk] - WholeStageCodegen (20) + WholeStageCodegen (18) Sort [ws_order_number,ws_item_sk] InputAdapter - Exchange [ws_order_number,ws_item_sk] #11 - WholeStageCodegen (19) + Exchange [ws_order_number,ws_item_sk] #10 + WholeStageCodegen (17) Project [ws_item_sk,ws_order_number,ws_quantity,ws_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id,d_year] BroadcastHashJoin [ws_sold_date_sk,d_date_sk] Project [ws_sold_date_sk,ws_item_sk,ws_order_number,ws_quantity,ws_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id] @@ -117,108 +109,100 @@ TakeOrderedAndProject [sales_cnt_diff,sales_amt_diff,prev_year,year,i_brand_id,i InputAdapter Scan parquet default.web_sales [ws_sold_date_sk,ws_item_sk,ws_order_number,ws_quantity,ws_ext_sales_price] InputAdapter - ReusedExchange [i_item_sk,i_brand_id,i_class_id,i_category_id,i_manufact_id] #6 + ReusedExchange [i_item_sk,i_brand_id,i_class_id,i_category_id,i_manufact_id] #5 InputAdapter - ReusedExchange [d_date_sk,d_year] #7 - WholeStageCodegen (22) + ReusedExchange [d_date_sk,d_year] #6 + WholeStageCodegen (20) Sort [wr_order_number,wr_item_sk] InputAdapter - Exchange [wr_order_number,wr_item_sk] #12 - WholeStageCodegen (21) + Exchange [wr_order_number,wr_item_sk] #11 + WholeStageCodegen (19) Filter [wr_order_number,wr_item_sk] ColumnarToRow InputAdapter Scan parquet default.web_returns [wr_item_sk,wr_order_number,wr_return_quantity,wr_return_amt] InputAdapter - WholeStageCodegen (54) + WholeStageCodegen (50) Sort [i_brand_id,i_class_id,i_category_id,i_manufact_id] InputAdapter - Exchange [i_brand_id,i_class_id,i_category_id,i_manufact_id] #13 - WholeStageCodegen (53) + Exchange [i_brand_id,i_class_id,i_category_id,i_manufact_id] #12 + WholeStageCodegen (49) HashAggregate [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,sum,sum] [sum(cast(sales_cnt as bigint)),sum(UnscaledValue(sales_amt)),sales_cnt,sales_amt,sum,sum] InputAdapter - Exchange [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id] #14 - WholeStageCodegen (52) + Exchange [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id] #13 + WholeStageCodegen (48) HashAggregate [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,sales_cnt,sales_amt] [sum,sum,sum,sum] HashAggregate [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,sales_cnt,sales_amt] InputAdapter - Exchange [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,sales_cnt,sales_amt] #15 - WholeStageCodegen (51) + Exchange [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,sales_cnt,sales_amt] #14 + WholeStageCodegen (47) HashAggregate [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,sales_cnt,sales_amt] InputAdapter Union - WholeStageCodegen (43) - HashAggregate [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,sales_cnt,sales_amt] + WholeStageCodegen (32) + Project [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,cs_quantity,cr_return_quantity,cs_ext_sales_price,cr_return_amount] InputAdapter - Exchange [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,sales_cnt,sales_amt] #16 - WholeStageCodegen (42) - HashAggregate [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,sales_cnt,sales_amt] + SortMergeJoin [cs_order_number,cs_item_sk,cr_order_number,cr_item_sk] + WholeStageCodegen (29) + Sort [cs_order_number,cs_item_sk] InputAdapter - Union - WholeStageCodegen (34) - Project [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,cs_quantity,cr_return_quantity,cs_ext_sales_price,cr_return_amount] - InputAdapter - SortMergeJoin [cs_order_number,cs_item_sk,cr_order_number,cr_item_sk] - WholeStageCodegen (31) - Sort [cs_order_number,cs_item_sk] - InputAdapter - Exchange [cs_order_number,cs_item_sk] #17 - WholeStageCodegen (30) - Project [cs_item_sk,cs_order_number,cs_quantity,cs_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id,d_year] - BroadcastHashJoin [cs_sold_date_sk,d_date_sk] - Project [cs_sold_date_sk,cs_item_sk,cs_order_number,cs_quantity,cs_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id] - BroadcastHashJoin [cs_item_sk,i_item_sk] - Filter [cs_item_sk,cs_sold_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.catalog_sales [cs_sold_date_sk,cs_item_sk,cs_order_number,cs_quantity,cs_ext_sales_price] - InputAdapter - ReusedExchange [i_item_sk,i_brand_id,i_class_id,i_category_id,i_manufact_id] #6 - InputAdapter - BroadcastExchange #18 - WholeStageCodegen (29) - Filter [d_year,d_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.date_dim [d_date_sk,d_year] - WholeStageCodegen (33) - Sort [cr_order_number,cr_item_sk] - InputAdapter - ReusedExchange [cr_item_sk,cr_order_number,cr_return_quantity,cr_return_amount] #8 - WholeStageCodegen (41) - Project [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,ss_quantity,sr_return_quantity,ss_ext_sales_price,sr_return_amt] - InputAdapter - SortMergeJoin [ss_ticket_number,ss_item_sk,sr_ticket_number,sr_item_sk] - WholeStageCodegen (38) - Sort [ss_ticket_number,ss_item_sk] - InputAdapter - Exchange [ss_ticket_number,ss_item_sk] #19 - WholeStageCodegen (37) - Project [ss_item_sk,ss_ticket_number,ss_quantity,ss_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id,d_year] - BroadcastHashJoin [ss_sold_date_sk,d_date_sk] - Project [ss_sold_date_sk,ss_item_sk,ss_ticket_number,ss_quantity,ss_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id] - BroadcastHashJoin [ss_item_sk,i_item_sk] - Filter [ss_item_sk,ss_sold_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.store_sales [ss_sold_date_sk,ss_item_sk,ss_ticket_number,ss_quantity,ss_ext_sales_price] - InputAdapter - ReusedExchange [i_item_sk,i_brand_id,i_class_id,i_category_id,i_manufact_id] #6 - InputAdapter - ReusedExchange [d_date_sk,d_year] #18 - WholeStageCodegen (40) - Sort [sr_ticket_number,sr_item_sk] - InputAdapter - ReusedExchange [sr_item_sk,sr_ticket_number,sr_return_quantity,sr_return_amt] #10 - WholeStageCodegen (50) + Exchange [cs_order_number,cs_item_sk] #15 + WholeStageCodegen (28) + Project [cs_item_sk,cs_order_number,cs_quantity,cs_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id,d_year] + BroadcastHashJoin [cs_sold_date_sk,d_date_sk] + Project [cs_sold_date_sk,cs_item_sk,cs_order_number,cs_quantity,cs_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id] + BroadcastHashJoin [cs_item_sk,i_item_sk] + Filter [cs_item_sk,cs_sold_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.catalog_sales [cs_sold_date_sk,cs_item_sk,cs_order_number,cs_quantity,cs_ext_sales_price] + InputAdapter + ReusedExchange [i_item_sk,i_brand_id,i_class_id,i_category_id,i_manufact_id] #5 + InputAdapter + BroadcastExchange #16 + WholeStageCodegen (27) + Filter [d_year,d_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.date_dim [d_date_sk,d_year] + WholeStageCodegen (31) + Sort [cr_order_number,cr_item_sk] + InputAdapter + ReusedExchange [cr_item_sk,cr_order_number,cr_return_quantity,cr_return_amount] #7 + WholeStageCodegen (39) + Project [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,ss_quantity,sr_return_quantity,ss_ext_sales_price,sr_return_amt] + InputAdapter + SortMergeJoin [ss_ticket_number,ss_item_sk,sr_ticket_number,sr_item_sk] + WholeStageCodegen (36) + Sort [ss_ticket_number,ss_item_sk] + InputAdapter + Exchange [ss_ticket_number,ss_item_sk] #17 + WholeStageCodegen (35) + Project [ss_item_sk,ss_ticket_number,ss_quantity,ss_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id,d_year] + BroadcastHashJoin [ss_sold_date_sk,d_date_sk] + Project [ss_sold_date_sk,ss_item_sk,ss_ticket_number,ss_quantity,ss_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id] + BroadcastHashJoin [ss_item_sk,i_item_sk] + Filter [ss_item_sk,ss_sold_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.store_sales [ss_sold_date_sk,ss_item_sk,ss_ticket_number,ss_quantity,ss_ext_sales_price] + InputAdapter + ReusedExchange [i_item_sk,i_brand_id,i_class_id,i_category_id,i_manufact_id] #5 + InputAdapter + ReusedExchange [d_date_sk,d_year] #16 + WholeStageCodegen (38) + Sort [sr_ticket_number,sr_item_sk] + InputAdapter + ReusedExchange [sr_item_sk,sr_ticket_number,sr_return_quantity,sr_return_amt] #9 + WholeStageCodegen (46) Project [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,ws_quantity,wr_return_quantity,ws_ext_sales_price,wr_return_amt] InputAdapter SortMergeJoin [ws_order_number,ws_item_sk,wr_order_number,wr_item_sk] - WholeStageCodegen (47) + WholeStageCodegen (43) Sort [ws_order_number,ws_item_sk] InputAdapter - Exchange [ws_order_number,ws_item_sk] #20 - WholeStageCodegen (46) + Exchange [ws_order_number,ws_item_sk] #18 + WholeStageCodegen (42) Project [ws_item_sk,ws_order_number,ws_quantity,ws_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id,d_year] BroadcastHashJoin [ws_sold_date_sk,d_date_sk] Project [ws_sold_date_sk,ws_item_sk,ws_order_number,ws_quantity,ws_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id] @@ -228,10 +212,10 @@ TakeOrderedAndProject [sales_cnt_diff,sales_amt_diff,prev_year,year,i_brand_id,i InputAdapter Scan parquet default.web_sales [ws_sold_date_sk,ws_item_sk,ws_order_number,ws_quantity,ws_ext_sales_price] InputAdapter - ReusedExchange [i_item_sk,i_brand_id,i_class_id,i_category_id,i_manufact_id] #6 + ReusedExchange [i_item_sk,i_brand_id,i_class_id,i_category_id,i_manufact_id] #5 InputAdapter - ReusedExchange [d_date_sk,d_year] #18 - WholeStageCodegen (49) + ReusedExchange [d_date_sk,d_year] #16 + WholeStageCodegen (45) Sort [wr_order_number,wr_item_sk] InputAdapter - ReusedExchange [wr_item_sk,wr_order_number,wr_return_quantity,wr_return_amt] #12 + ReusedExchange [wr_item_sk,wr_order_number,wr_return_quantity,wr_return_amt] #11 diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q75/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q75/explain.txt index 1d8aab417f188..ae7442399ebd4 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q75/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q75/explain.txt @@ -1,121 +1,113 @@ == Physical Plan == -TakeOrderedAndProject (117) -+- * Project (116) - +- * BroadcastHashJoin Inner BuildRight (115) - :- * HashAggregate (63) - : +- Exchange (62) - : +- * HashAggregate (61) - : +- * HashAggregate (60) - : +- Exchange (59) - : +- * HashAggregate (58) - : +- Union (57) - : :- * HashAggregate (41) - : : +- Exchange (40) - : : +- * HashAggregate (39) - : : +- Union (38) - : : :- * Project (22) - : : : +- * BroadcastHashJoin LeftOuter BuildRight (21) - : : : :- * Project (16) - : : : : +- * BroadcastHashJoin Inner BuildRight (15) - : : : : :- * Project (10) - : : : : : +- * BroadcastHashJoin Inner BuildRight (9) - : : : : : :- * Filter (3) - : : : : : : +- * ColumnarToRow (2) - : : : : : : +- Scan parquet default.catalog_sales (1) - : : : : : +- BroadcastExchange (8) - : : : : : +- * Project (7) - : : : : : +- * Filter (6) - : : : : : +- * ColumnarToRow (5) - : : : : : +- Scan parquet default.item (4) - : : : : +- BroadcastExchange (14) - : : : : +- * Filter (13) - : : : : +- * ColumnarToRow (12) - : : : : +- Scan parquet default.date_dim (11) - : : : +- BroadcastExchange (20) - : : : +- * Filter (19) - : : : +- * ColumnarToRow (18) - : : : +- Scan parquet default.catalog_returns (17) - : : +- * Project (37) - : : +- * BroadcastHashJoin LeftOuter BuildRight (36) - : : :- * Project (31) - : : : +- * BroadcastHashJoin Inner BuildRight (30) - : : : :- * Project (28) - : : : : +- * BroadcastHashJoin Inner BuildRight (27) - : : : : :- * Filter (25) - : : : : : +- * ColumnarToRow (24) - : : : : : +- Scan parquet default.store_sales (23) - : : : : +- ReusedExchange (26) - : : : +- ReusedExchange (29) - : : +- BroadcastExchange (35) - : : +- * Filter (34) - : : +- * ColumnarToRow (33) - : : +- Scan parquet default.store_returns (32) - : +- * Project (56) - : +- * BroadcastHashJoin LeftOuter BuildRight (55) - : :- * Project (50) - : : +- * BroadcastHashJoin Inner BuildRight (49) - : : :- * Project (47) - : : : +- * BroadcastHashJoin Inner BuildRight (46) - : : : :- * Filter (44) - : : : : +- * ColumnarToRow (43) - : : : : +- Scan parquet default.web_sales (42) - : : : +- ReusedExchange (45) - : : +- ReusedExchange (48) - : +- BroadcastExchange (54) - : +- * Filter (53) - : +- * ColumnarToRow (52) - : +- Scan parquet default.web_returns (51) - +- BroadcastExchange (114) - +- * HashAggregate (113) - +- Exchange (112) - +- * HashAggregate (111) - +- * HashAggregate (110) - +- Exchange (109) - +- * HashAggregate (108) - +- Union (107) - :- * HashAggregate (94) - : +- Exchange (93) - : +- * HashAggregate (92) - : +- Union (91) - : :- * Project (78) - : : +- * BroadcastHashJoin LeftOuter BuildRight (77) - : : :- * Project (75) - : : : +- * BroadcastHashJoin Inner BuildRight (74) - : : : :- * Project (69) - : : : : +- * BroadcastHashJoin Inner BuildRight (68) - : : : : :- * Filter (66) - : : : : : +- * ColumnarToRow (65) - : : : : : +- Scan parquet default.catalog_sales (64) - : : : : +- ReusedExchange (67) - : : : +- BroadcastExchange (73) - : : : +- * Filter (72) - : : : +- * ColumnarToRow (71) - : : : +- Scan parquet default.date_dim (70) - : : +- ReusedExchange (76) - : +- * Project (90) - : +- * BroadcastHashJoin LeftOuter BuildRight (89) - : :- * Project (87) - : : +- * BroadcastHashJoin Inner BuildRight (86) - : : :- * Project (84) - : : : +- * BroadcastHashJoin Inner BuildRight (83) - : : : :- * Filter (81) - : : : : +- * ColumnarToRow (80) - : : : : +- Scan parquet default.store_sales (79) - : : : +- ReusedExchange (82) - : : +- ReusedExchange (85) - : +- ReusedExchange (88) - +- * Project (106) - +- * BroadcastHashJoin LeftOuter BuildRight (105) - :- * Project (103) - : +- * BroadcastHashJoin Inner BuildRight (102) - : :- * Project (100) - : : +- * BroadcastHashJoin Inner BuildRight (99) - : : :- * Filter (97) - : : : +- * ColumnarToRow (96) - : : : +- Scan parquet default.web_sales (95) - : : +- ReusedExchange (98) - : +- ReusedExchange (101) - +- ReusedExchange (104) +TakeOrderedAndProject (109) ++- * Project (108) + +- * BroadcastHashJoin Inner BuildRight (107) + :- * HashAggregate (59) + : +- Exchange (58) + : +- * HashAggregate (57) + : +- * HashAggregate (56) + : +- Exchange (55) + : +- * HashAggregate (54) + : +- Union (53) + : :- * Project (22) + : : +- * BroadcastHashJoin LeftOuter BuildRight (21) + : : :- * Project (16) + : : : +- * BroadcastHashJoin Inner BuildRight (15) + : : : :- * Project (10) + : : : : +- * BroadcastHashJoin Inner BuildRight (9) + : : : : :- * Filter (3) + : : : : : +- * ColumnarToRow (2) + : : : : : +- Scan parquet default.catalog_sales (1) + : : : : +- BroadcastExchange (8) + : : : : +- * Project (7) + : : : : +- * Filter (6) + : : : : +- * ColumnarToRow (5) + : : : : +- Scan parquet default.item (4) + : : : +- BroadcastExchange (14) + : : : +- * Filter (13) + : : : +- * ColumnarToRow (12) + : : : +- Scan parquet default.date_dim (11) + : : +- BroadcastExchange (20) + : : +- * Filter (19) + : : +- * ColumnarToRow (18) + : : +- Scan parquet default.catalog_returns (17) + : :- * Project (37) + : : +- * BroadcastHashJoin LeftOuter BuildRight (36) + : : :- * Project (31) + : : : +- * BroadcastHashJoin Inner BuildRight (30) + : : : :- * Project (28) + : : : : +- * BroadcastHashJoin Inner BuildRight (27) + : : : : :- * Filter (25) + : : : : : +- * ColumnarToRow (24) + : : : : : +- Scan parquet default.store_sales (23) + : : : : +- ReusedExchange (26) + : : : +- ReusedExchange (29) + : : +- BroadcastExchange (35) + : : +- * Filter (34) + : : +- * ColumnarToRow (33) + : : +- Scan parquet default.store_returns (32) + : +- * Project (52) + : +- * BroadcastHashJoin LeftOuter BuildRight (51) + : :- * Project (46) + : : +- * BroadcastHashJoin Inner BuildRight (45) + : : :- * Project (43) + : : : +- * BroadcastHashJoin Inner BuildRight (42) + : : : :- * Filter (40) + : : : : +- * ColumnarToRow (39) + : : : : +- Scan parquet default.web_sales (38) + : : : +- ReusedExchange (41) + : : +- ReusedExchange (44) + : +- BroadcastExchange (50) + : +- * Filter (49) + : +- * ColumnarToRow (48) + : +- Scan parquet default.web_returns (47) + +- BroadcastExchange (106) + +- * HashAggregate (105) + +- Exchange (104) + +- * HashAggregate (103) + +- * HashAggregate (102) + +- Exchange (101) + +- * HashAggregate (100) + +- Union (99) + :- * Project (74) + : +- * BroadcastHashJoin LeftOuter BuildRight (73) + : :- * Project (71) + : : +- * BroadcastHashJoin Inner BuildRight (70) + : : :- * Project (65) + : : : +- * BroadcastHashJoin Inner BuildRight (64) + : : : :- * Filter (62) + : : : : +- * ColumnarToRow (61) + : : : : +- Scan parquet default.catalog_sales (60) + : : : +- ReusedExchange (63) + : : +- BroadcastExchange (69) + : : +- * Filter (68) + : : +- * ColumnarToRow (67) + : : +- Scan parquet default.date_dim (66) + : +- ReusedExchange (72) + :- * Project (86) + : +- * BroadcastHashJoin LeftOuter BuildRight (85) + : :- * Project (83) + : : +- * BroadcastHashJoin Inner BuildRight (82) + : : :- * Project (80) + : : : +- * BroadcastHashJoin Inner BuildRight (79) + : : : :- * Filter (77) + : : : : +- * ColumnarToRow (76) + : : : : +- Scan parquet default.store_sales (75) + : : : +- ReusedExchange (78) + : : +- ReusedExchange (81) + : +- ReusedExchange (84) + +- * Project (98) + +- * BroadcastHashJoin LeftOuter BuildRight (97) + :- * Project (95) + : +- * BroadcastHashJoin Inner BuildRight (94) + : :- * Project (92) + : : +- * BroadcastHashJoin Inner BuildRight (91) + : : :- * Filter (89) + : : : +- * ColumnarToRow (88) + : : : +- Scan parquet default.web_sales (87) + : : +- ReusedExchange (90) + : +- ReusedExchange (93) + +- ReusedExchange (96) (1) Scan parquet default.catalog_sales @@ -282,366 +274,326 @@ Join condition: None Output [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, (ss_quantity#26 - coalesce(sr_return_quantity#30, 0)) AS sales_cnt#33, CheckOverflow((promote_precision(cast(ss_ext_sales_price#27 as decimal(8,2))) - promote_precision(cast(coalesce(sr_return_amt#31, 0.00) as decimal(8,2)))), DecimalType(8,2), true) AS sales_amt#34] Input [13]: [ss_item_sk#24, ss_ticket_number#25, ss_quantity#26, ss_ext_sales_price#27, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, d_year#14, sr_item_sk#28, sr_ticket_number#29, sr_return_quantity#30, sr_return_amt#31] -(38) Union - -(39) HashAggregate [codegen id : 9] -Input [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#21, sales_amt#22] -Keys [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#21, sales_amt#22] -Functions: [] -Aggregate Attributes: [] -Results [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#21, sales_amt#22] - -(40) Exchange -Input [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#21, sales_amt#22] -Arguments: hashpartitioning(d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#21, sales_amt#22, 5), true, [id=#35] - -(41) HashAggregate [codegen id : 10] -Input [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#21, sales_amt#22] -Keys [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#21, sales_amt#22] -Functions: [] -Aggregate Attributes: [] -Results [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#21, sales_amt#22] - -(42) Scan parquet default.web_sales -Output [5]: [ws_sold_date_sk#36, ws_item_sk#37, ws_order_number#38, ws_quantity#39, ws_ext_sales_price#40] +(38) Scan parquet default.web_sales +Output [5]: [ws_sold_date_sk#35, ws_item_sk#36, ws_order_number#37, ws_quantity#38, ws_ext_sales_price#39] Batched: true Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_item_sk), IsNotNull(ws_sold_date_sk)] ReadSchema: struct -(43) ColumnarToRow [codegen id : 14] -Input [5]: [ws_sold_date_sk#36, ws_item_sk#37, ws_order_number#38, ws_quantity#39, ws_ext_sales_price#40] +(39) ColumnarToRow [codegen id : 12] +Input [5]: [ws_sold_date_sk#35, ws_item_sk#36, ws_order_number#37, ws_quantity#38, ws_ext_sales_price#39] -(44) Filter [codegen id : 14] -Input [5]: [ws_sold_date_sk#36, ws_item_sk#37, ws_order_number#38, ws_quantity#39, ws_ext_sales_price#40] -Condition : (isnotnull(ws_item_sk#37) AND isnotnull(ws_sold_date_sk#36)) +(40) Filter [codegen id : 12] +Input [5]: [ws_sold_date_sk#35, ws_item_sk#36, ws_order_number#37, ws_quantity#38, ws_ext_sales_price#39] +Condition : (isnotnull(ws_item_sk#36) AND isnotnull(ws_sold_date_sk#35)) -(45) ReusedExchange [Reuses operator id: 8] +(41) ReusedExchange [Reuses operator id: 8] Output [5]: [i_item_sk#6, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11] -(46) BroadcastHashJoin [codegen id : 14] -Left keys [1]: [ws_item_sk#37] +(42) BroadcastHashJoin [codegen id : 12] +Left keys [1]: [ws_item_sk#36] Right keys [1]: [i_item_sk#6] Join condition: None -(47) Project [codegen id : 14] -Output [9]: [ws_sold_date_sk#36, ws_item_sk#37, ws_order_number#38, ws_quantity#39, ws_ext_sales_price#40, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11] -Input [10]: [ws_sold_date_sk#36, ws_item_sk#37, ws_order_number#38, ws_quantity#39, ws_ext_sales_price#40, i_item_sk#6, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11] +(43) Project [codegen id : 12] +Output [9]: [ws_sold_date_sk#35, ws_item_sk#36, ws_order_number#37, ws_quantity#38, ws_ext_sales_price#39, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11] +Input [10]: [ws_sold_date_sk#35, ws_item_sk#36, ws_order_number#37, ws_quantity#38, ws_ext_sales_price#39, i_item_sk#6, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11] -(48) ReusedExchange [Reuses operator id: 14] +(44) ReusedExchange [Reuses operator id: 14] Output [2]: [d_date_sk#13, d_year#14] -(49) BroadcastHashJoin [codegen id : 14] -Left keys [1]: [ws_sold_date_sk#36] +(45) BroadcastHashJoin [codegen id : 12] +Left keys [1]: [ws_sold_date_sk#35] Right keys [1]: [d_date_sk#13] Join condition: None -(50) Project [codegen id : 14] -Output [9]: [ws_item_sk#37, ws_order_number#38, ws_quantity#39, ws_ext_sales_price#40, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, d_year#14] -Input [11]: [ws_sold_date_sk#36, ws_item_sk#37, ws_order_number#38, ws_quantity#39, ws_ext_sales_price#40, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, d_date_sk#13, d_year#14] +(46) Project [codegen id : 12] +Output [9]: [ws_item_sk#36, ws_order_number#37, ws_quantity#38, ws_ext_sales_price#39, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, d_year#14] +Input [11]: [ws_sold_date_sk#35, ws_item_sk#36, ws_order_number#37, ws_quantity#38, ws_ext_sales_price#39, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, d_date_sk#13, d_year#14] -(51) Scan parquet default.web_returns -Output [4]: [wr_item_sk#41, wr_order_number#42, wr_return_quantity#43, wr_return_amt#44] +(47) Scan parquet default.web_returns +Output [4]: [wr_item_sk#40, wr_order_number#41, wr_return_quantity#42, wr_return_amt#43] Batched: true Location [not included in comparison]/{warehouse_dir}/web_returns] PushedFilters: [IsNotNull(wr_order_number), IsNotNull(wr_item_sk)] ReadSchema: struct -(52) ColumnarToRow [codegen id : 13] -Input [4]: [wr_item_sk#41, wr_order_number#42, wr_return_quantity#43, wr_return_amt#44] +(48) ColumnarToRow [codegen id : 11] +Input [4]: [wr_item_sk#40, wr_order_number#41, wr_return_quantity#42, wr_return_amt#43] -(53) Filter [codegen id : 13] -Input [4]: [wr_item_sk#41, wr_order_number#42, wr_return_quantity#43, wr_return_amt#44] -Condition : (isnotnull(wr_order_number#42) AND isnotnull(wr_item_sk#41)) +(49) Filter [codegen id : 11] +Input [4]: [wr_item_sk#40, wr_order_number#41, wr_return_quantity#42, wr_return_amt#43] +Condition : (isnotnull(wr_order_number#41) AND isnotnull(wr_item_sk#40)) -(54) BroadcastExchange -Input [4]: [wr_item_sk#41, wr_order_number#42, wr_return_quantity#43, wr_return_amt#44] -Arguments: HashedRelationBroadcastMode(List(input[1, bigint, false], input[0, bigint, false]),false), [id=#45] +(50) BroadcastExchange +Input [4]: [wr_item_sk#40, wr_order_number#41, wr_return_quantity#42, wr_return_amt#43] +Arguments: HashedRelationBroadcastMode(List(input[1, bigint, false], input[0, bigint, false]),false), [id=#44] -(55) BroadcastHashJoin [codegen id : 14] -Left keys [2]: [cast(ws_order_number#38 as bigint), cast(ws_item_sk#37 as bigint)] -Right keys [2]: [wr_order_number#42, wr_item_sk#41] +(51) BroadcastHashJoin [codegen id : 12] +Left keys [2]: [cast(ws_order_number#37 as bigint), cast(ws_item_sk#36 as bigint)] +Right keys [2]: [wr_order_number#41, wr_item_sk#40] Join condition: None -(56) Project [codegen id : 14] -Output [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, (ws_quantity#39 - coalesce(wr_return_quantity#43, 0)) AS sales_cnt#46, CheckOverflow((promote_precision(cast(ws_ext_sales_price#40 as decimal(8,2))) - promote_precision(cast(coalesce(wr_return_amt#44, 0.00) as decimal(8,2)))), DecimalType(8,2), true) AS sales_amt#47] -Input [13]: [ws_item_sk#37, ws_order_number#38, ws_quantity#39, ws_ext_sales_price#40, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, d_year#14, wr_item_sk#41, wr_order_number#42, wr_return_quantity#43, wr_return_amt#44] +(52) Project [codegen id : 12] +Output [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, (ws_quantity#38 - coalesce(wr_return_quantity#42, 0)) AS sales_cnt#45, CheckOverflow((promote_precision(cast(ws_ext_sales_price#39 as decimal(8,2))) - promote_precision(cast(coalesce(wr_return_amt#43, 0.00) as decimal(8,2)))), DecimalType(8,2), true) AS sales_amt#46] +Input [13]: [ws_item_sk#36, ws_order_number#37, ws_quantity#38, ws_ext_sales_price#39, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, d_year#14, wr_item_sk#40, wr_order_number#41, wr_return_quantity#42, wr_return_amt#43] -(57) Union +(53) Union -(58) HashAggregate [codegen id : 15] +(54) HashAggregate [codegen id : 13] Input [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#21, sales_amt#22] Keys [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#21, sales_amt#22] Functions: [] Aggregate Attributes: [] Results [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#21, sales_amt#22] -(59) Exchange +(55) Exchange Input [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#21, sales_amt#22] -Arguments: hashpartitioning(d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#21, sales_amt#22, 5), true, [id=#48] +Arguments: hashpartitioning(d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#21, sales_amt#22, 5), ENSURE_REQUIREMENTS, [id=#47] -(60) HashAggregate [codegen id : 16] +(56) HashAggregate [codegen id : 14] Input [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#21, sales_amt#22] Keys [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#21, sales_amt#22] Functions: [] Aggregate Attributes: [] Results [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#21, sales_amt#22] -(61) HashAggregate [codegen id : 16] +(57) HashAggregate [codegen id : 14] Input [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#21, sales_amt#22] Keys [5]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11] Functions [2]: [partial_sum(cast(sales_cnt#21 as bigint)), partial_sum(UnscaledValue(sales_amt#22))] -Aggregate Attributes [2]: [sum#49, sum#50] -Results [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sum#51, sum#52] +Aggregate Attributes [2]: [sum#48, sum#49] +Results [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sum#50, sum#51] -(62) Exchange -Input [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sum#51, sum#52] -Arguments: hashpartitioning(d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, 5), true, [id=#53] +(58) Exchange +Input [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sum#50, sum#51] +Arguments: hashpartitioning(d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, 5), ENSURE_REQUIREMENTS, [id=#52] -(63) HashAggregate [codegen id : 34] -Input [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sum#51, sum#52] +(59) HashAggregate [codegen id : 30] +Input [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sum#50, sum#51] Keys [5]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11] Functions [2]: [sum(cast(sales_cnt#21 as bigint)), sum(UnscaledValue(sales_amt#22))] -Aggregate Attributes [2]: [sum(cast(sales_cnt#21 as bigint))#54, sum(UnscaledValue(sales_amt#22))#55] -Results [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sum(cast(sales_cnt#21 as bigint))#54 AS sales_cnt#56, MakeDecimal(sum(UnscaledValue(sales_amt#22))#55,18,2) AS sales_amt#57] +Aggregate Attributes [2]: [sum(cast(sales_cnt#21 as bigint))#53, sum(UnscaledValue(sales_amt#22))#54] +Results [7]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sum(cast(sales_cnt#21 as bigint))#53 AS sales_cnt#55, MakeDecimal(sum(UnscaledValue(sales_amt#22))#54,18,2) AS sales_amt#56] -(64) Scan parquet default.catalog_sales +(60) Scan parquet default.catalog_sales Output [5]: [cs_sold_date_sk#1, cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price#5] Batched: true Location [not included in comparison]/{warehouse_dir}/catalog_sales] PushedFilters: [IsNotNull(cs_item_sk), IsNotNull(cs_sold_date_sk)] ReadSchema: struct -(65) ColumnarToRow [codegen id : 20] +(61) ColumnarToRow [codegen id : 18] Input [5]: [cs_sold_date_sk#1, cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price#5] -(66) Filter [codegen id : 20] +(62) Filter [codegen id : 18] Input [5]: [cs_sold_date_sk#1, cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price#5] Condition : (isnotnull(cs_item_sk#2) AND isnotnull(cs_sold_date_sk#1)) -(67) ReusedExchange [Reuses operator id: 8] -Output [5]: [i_item_sk#58, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62] +(63) ReusedExchange [Reuses operator id: 8] +Output [5]: [i_item_sk#57, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61] -(68) BroadcastHashJoin [codegen id : 20] +(64) BroadcastHashJoin [codegen id : 18] Left keys [1]: [cs_item_sk#2] -Right keys [1]: [i_item_sk#58] +Right keys [1]: [i_item_sk#57] Join condition: None -(69) Project [codegen id : 20] -Output [9]: [cs_sold_date_sk#1, cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price#5, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62] -Input [10]: [cs_sold_date_sk#1, cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price#5, i_item_sk#58, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62] +(65) Project [codegen id : 18] +Output [9]: [cs_sold_date_sk#1, cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price#5, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61] +Input [10]: [cs_sold_date_sk#1, cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price#5, i_item_sk#57, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61] -(70) Scan parquet default.date_dim -Output [2]: [d_date_sk#63, d_year#64] +(66) Scan parquet default.date_dim +Output [2]: [d_date_sk#62, d_year#63] Batched: true Location [not included in comparison]/{warehouse_dir}/date_dim] PushedFilters: [IsNotNull(d_year), EqualTo(d_year,2001), IsNotNull(d_date_sk)] ReadSchema: struct -(71) ColumnarToRow [codegen id : 18] -Input [2]: [d_date_sk#63, d_year#64] +(67) ColumnarToRow [codegen id : 16] +Input [2]: [d_date_sk#62, d_year#63] -(72) Filter [codegen id : 18] -Input [2]: [d_date_sk#63, d_year#64] -Condition : ((isnotnull(d_year#64) AND (d_year#64 = 2001)) AND isnotnull(d_date_sk#63)) +(68) Filter [codegen id : 16] +Input [2]: [d_date_sk#62, d_year#63] +Condition : ((isnotnull(d_year#63) AND (d_year#63 = 2001)) AND isnotnull(d_date_sk#62)) -(73) BroadcastExchange -Input [2]: [d_date_sk#63, d_year#64] -Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, false] as bigint)),false), [id=#65] +(69) BroadcastExchange +Input [2]: [d_date_sk#62, d_year#63] +Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, false] as bigint)),false), [id=#64] -(74) BroadcastHashJoin [codegen id : 20] +(70) BroadcastHashJoin [codegen id : 18] Left keys [1]: [cs_sold_date_sk#1] -Right keys [1]: [d_date_sk#63] +Right keys [1]: [d_date_sk#62] Join condition: None -(75) Project [codegen id : 20] -Output [9]: [cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price#5, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62, d_year#64] -Input [11]: [cs_sold_date_sk#1, cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price#5, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62, d_date_sk#63, d_year#64] +(71) Project [codegen id : 18] +Output [9]: [cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price#5, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61, d_year#63] +Input [11]: [cs_sold_date_sk#1, cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price#5, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61, d_date_sk#62, d_year#63] -(76) ReusedExchange [Reuses operator id: 20] +(72) ReusedExchange [Reuses operator id: 20] Output [4]: [cr_item_sk#16, cr_order_number#17, cr_return_quantity#18, cr_return_amount#19] -(77) BroadcastHashJoin [codegen id : 20] +(73) BroadcastHashJoin [codegen id : 18] Left keys [2]: [cs_order_number#3, cs_item_sk#2] Right keys [2]: [cr_order_number#17, cr_item_sk#16] Join condition: None -(78) Project [codegen id : 20] -Output [7]: [d_year#64, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62, (cs_quantity#4 - coalesce(cr_return_quantity#18, 0)) AS sales_cnt#21, CheckOverflow((promote_precision(cast(cs_ext_sales_price#5 as decimal(8,2))) - promote_precision(cast(coalesce(cr_return_amount#19, 0.00) as decimal(8,2)))), DecimalType(8,2), true) AS sales_amt#22] -Input [13]: [cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price#5, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62, d_year#64, cr_item_sk#16, cr_order_number#17, cr_return_quantity#18, cr_return_amount#19] +(74) Project [codegen id : 18] +Output [7]: [d_year#63, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61, (cs_quantity#4 - coalesce(cr_return_quantity#18, 0)) AS sales_cnt#21, CheckOverflow((promote_precision(cast(cs_ext_sales_price#5 as decimal(8,2))) - promote_precision(cast(coalesce(cr_return_amount#19, 0.00) as decimal(8,2)))), DecimalType(8,2), true) AS sales_amt#22] +Input [13]: [cs_item_sk#2, cs_order_number#3, cs_quantity#4, cs_ext_sales_price#5, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61, d_year#63, cr_item_sk#16, cr_order_number#17, cr_return_quantity#18, cr_return_amount#19] -(79) Scan parquet default.store_sales +(75) Scan parquet default.store_sales Output [5]: [ss_sold_date_sk#23, ss_item_sk#24, ss_ticket_number#25, ss_quantity#26, ss_ext_sales_price#27] Batched: true Location [not included in comparison]/{warehouse_dir}/store_sales] PushedFilters: [IsNotNull(ss_item_sk), IsNotNull(ss_sold_date_sk)] ReadSchema: struct -(80) ColumnarToRow [codegen id : 24] +(76) ColumnarToRow [codegen id : 22] Input [5]: [ss_sold_date_sk#23, ss_item_sk#24, ss_ticket_number#25, ss_quantity#26, ss_ext_sales_price#27] -(81) Filter [codegen id : 24] +(77) Filter [codegen id : 22] Input [5]: [ss_sold_date_sk#23, ss_item_sk#24, ss_ticket_number#25, ss_quantity#26, ss_ext_sales_price#27] Condition : (isnotnull(ss_item_sk#24) AND isnotnull(ss_sold_date_sk#23)) -(82) ReusedExchange [Reuses operator id: 8] -Output [5]: [i_item_sk#58, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62] +(78) ReusedExchange [Reuses operator id: 8] +Output [5]: [i_item_sk#57, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61] -(83) BroadcastHashJoin [codegen id : 24] +(79) BroadcastHashJoin [codegen id : 22] Left keys [1]: [ss_item_sk#24] -Right keys [1]: [i_item_sk#58] +Right keys [1]: [i_item_sk#57] Join condition: None -(84) Project [codegen id : 24] -Output [9]: [ss_sold_date_sk#23, ss_item_sk#24, ss_ticket_number#25, ss_quantity#26, ss_ext_sales_price#27, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62] -Input [10]: [ss_sold_date_sk#23, ss_item_sk#24, ss_ticket_number#25, ss_quantity#26, ss_ext_sales_price#27, i_item_sk#58, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62] +(80) Project [codegen id : 22] +Output [9]: [ss_sold_date_sk#23, ss_item_sk#24, ss_ticket_number#25, ss_quantity#26, ss_ext_sales_price#27, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61] +Input [10]: [ss_sold_date_sk#23, ss_item_sk#24, ss_ticket_number#25, ss_quantity#26, ss_ext_sales_price#27, i_item_sk#57, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61] -(85) ReusedExchange [Reuses operator id: 73] -Output [2]: [d_date_sk#63, d_year#64] +(81) ReusedExchange [Reuses operator id: 69] +Output [2]: [d_date_sk#62, d_year#63] -(86) BroadcastHashJoin [codegen id : 24] +(82) BroadcastHashJoin [codegen id : 22] Left keys [1]: [ss_sold_date_sk#23] -Right keys [1]: [d_date_sk#63] +Right keys [1]: [d_date_sk#62] Join condition: None -(87) Project [codegen id : 24] -Output [9]: [ss_item_sk#24, ss_ticket_number#25, ss_quantity#26, ss_ext_sales_price#27, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62, d_year#64] -Input [11]: [ss_sold_date_sk#23, ss_item_sk#24, ss_ticket_number#25, ss_quantity#26, ss_ext_sales_price#27, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62, d_date_sk#63, d_year#64] +(83) Project [codegen id : 22] +Output [9]: [ss_item_sk#24, ss_ticket_number#25, ss_quantity#26, ss_ext_sales_price#27, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61, d_year#63] +Input [11]: [ss_sold_date_sk#23, ss_item_sk#24, ss_ticket_number#25, ss_quantity#26, ss_ext_sales_price#27, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61, d_date_sk#62, d_year#63] -(88) ReusedExchange [Reuses operator id: 35] +(84) ReusedExchange [Reuses operator id: 35] Output [4]: [sr_item_sk#28, sr_ticket_number#29, sr_return_quantity#30, sr_return_amt#31] -(89) BroadcastHashJoin [codegen id : 24] +(85) BroadcastHashJoin [codegen id : 22] Left keys [2]: [cast(ss_ticket_number#25 as bigint), cast(ss_item_sk#24 as bigint)] Right keys [2]: [sr_ticket_number#29, sr_item_sk#28] Join condition: None -(90) Project [codegen id : 24] -Output [7]: [d_year#64, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62, (ss_quantity#26 - coalesce(sr_return_quantity#30, 0)) AS sales_cnt#66, CheckOverflow((promote_precision(cast(ss_ext_sales_price#27 as decimal(8,2))) - promote_precision(cast(coalesce(sr_return_amt#31, 0.00) as decimal(8,2)))), DecimalType(8,2), true) AS sales_amt#67] -Input [13]: [ss_item_sk#24, ss_ticket_number#25, ss_quantity#26, ss_ext_sales_price#27, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62, d_year#64, sr_item_sk#28, sr_ticket_number#29, sr_return_quantity#30, sr_return_amt#31] - -(91) Union - -(92) HashAggregate [codegen id : 25] -Input [7]: [d_year#64, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62, sales_cnt#21, sales_amt#22] -Keys [7]: [d_year#64, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62, sales_cnt#21, sales_amt#22] -Functions: [] -Aggregate Attributes: [] -Results [7]: [d_year#64, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62, sales_cnt#21, sales_amt#22] - -(93) Exchange -Input [7]: [d_year#64, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62, sales_cnt#21, sales_amt#22] -Arguments: hashpartitioning(d_year#64, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62, sales_cnt#21, sales_amt#22, 5), true, [id=#68] - -(94) HashAggregate [codegen id : 26] -Input [7]: [d_year#64, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62, sales_cnt#21, sales_amt#22] -Keys [7]: [d_year#64, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62, sales_cnt#21, sales_amt#22] -Functions: [] -Aggregate Attributes: [] -Results [7]: [d_year#64, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62, sales_cnt#21, sales_amt#22] +(86) Project [codegen id : 22] +Output [7]: [d_year#63, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61, (ss_quantity#26 - coalesce(sr_return_quantity#30, 0)) AS sales_cnt#65, CheckOverflow((promote_precision(cast(ss_ext_sales_price#27 as decimal(8,2))) - promote_precision(cast(coalesce(sr_return_amt#31, 0.00) as decimal(8,2)))), DecimalType(8,2), true) AS sales_amt#66] +Input [13]: [ss_item_sk#24, ss_ticket_number#25, ss_quantity#26, ss_ext_sales_price#27, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61, d_year#63, sr_item_sk#28, sr_ticket_number#29, sr_return_quantity#30, sr_return_amt#31] -(95) Scan parquet default.web_sales -Output [5]: [ws_sold_date_sk#36, ws_item_sk#37, ws_order_number#38, ws_quantity#39, ws_ext_sales_price#40] +(87) Scan parquet default.web_sales +Output [5]: [ws_sold_date_sk#35, ws_item_sk#36, ws_order_number#37, ws_quantity#38, ws_ext_sales_price#39] Batched: true Location [not included in comparison]/{warehouse_dir}/web_sales] PushedFilters: [IsNotNull(ws_item_sk), IsNotNull(ws_sold_date_sk)] ReadSchema: struct -(96) ColumnarToRow [codegen id : 30] -Input [5]: [ws_sold_date_sk#36, ws_item_sk#37, ws_order_number#38, ws_quantity#39, ws_ext_sales_price#40] +(88) ColumnarToRow [codegen id : 26] +Input [5]: [ws_sold_date_sk#35, ws_item_sk#36, ws_order_number#37, ws_quantity#38, ws_ext_sales_price#39] -(97) Filter [codegen id : 30] -Input [5]: [ws_sold_date_sk#36, ws_item_sk#37, ws_order_number#38, ws_quantity#39, ws_ext_sales_price#40] -Condition : (isnotnull(ws_item_sk#37) AND isnotnull(ws_sold_date_sk#36)) +(89) Filter [codegen id : 26] +Input [5]: [ws_sold_date_sk#35, ws_item_sk#36, ws_order_number#37, ws_quantity#38, ws_ext_sales_price#39] +Condition : (isnotnull(ws_item_sk#36) AND isnotnull(ws_sold_date_sk#35)) -(98) ReusedExchange [Reuses operator id: 8] -Output [5]: [i_item_sk#58, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62] +(90) ReusedExchange [Reuses operator id: 8] +Output [5]: [i_item_sk#57, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61] -(99) BroadcastHashJoin [codegen id : 30] -Left keys [1]: [ws_item_sk#37] -Right keys [1]: [i_item_sk#58] +(91) BroadcastHashJoin [codegen id : 26] +Left keys [1]: [ws_item_sk#36] +Right keys [1]: [i_item_sk#57] Join condition: None -(100) Project [codegen id : 30] -Output [9]: [ws_sold_date_sk#36, ws_item_sk#37, ws_order_number#38, ws_quantity#39, ws_ext_sales_price#40, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62] -Input [10]: [ws_sold_date_sk#36, ws_item_sk#37, ws_order_number#38, ws_quantity#39, ws_ext_sales_price#40, i_item_sk#58, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62] +(92) Project [codegen id : 26] +Output [9]: [ws_sold_date_sk#35, ws_item_sk#36, ws_order_number#37, ws_quantity#38, ws_ext_sales_price#39, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61] +Input [10]: [ws_sold_date_sk#35, ws_item_sk#36, ws_order_number#37, ws_quantity#38, ws_ext_sales_price#39, i_item_sk#57, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61] -(101) ReusedExchange [Reuses operator id: 73] -Output [2]: [d_date_sk#63, d_year#64] +(93) ReusedExchange [Reuses operator id: 69] +Output [2]: [d_date_sk#62, d_year#63] -(102) BroadcastHashJoin [codegen id : 30] -Left keys [1]: [ws_sold_date_sk#36] -Right keys [1]: [d_date_sk#63] +(94) BroadcastHashJoin [codegen id : 26] +Left keys [1]: [ws_sold_date_sk#35] +Right keys [1]: [d_date_sk#62] Join condition: None -(103) Project [codegen id : 30] -Output [9]: [ws_item_sk#37, ws_order_number#38, ws_quantity#39, ws_ext_sales_price#40, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62, d_year#64] -Input [11]: [ws_sold_date_sk#36, ws_item_sk#37, ws_order_number#38, ws_quantity#39, ws_ext_sales_price#40, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62, d_date_sk#63, d_year#64] +(95) Project [codegen id : 26] +Output [9]: [ws_item_sk#36, ws_order_number#37, ws_quantity#38, ws_ext_sales_price#39, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61, d_year#63] +Input [11]: [ws_sold_date_sk#35, ws_item_sk#36, ws_order_number#37, ws_quantity#38, ws_ext_sales_price#39, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61, d_date_sk#62, d_year#63] -(104) ReusedExchange [Reuses operator id: 54] -Output [4]: [wr_item_sk#41, wr_order_number#42, wr_return_quantity#43, wr_return_amt#44] +(96) ReusedExchange [Reuses operator id: 50] +Output [4]: [wr_item_sk#40, wr_order_number#41, wr_return_quantity#42, wr_return_amt#43] -(105) BroadcastHashJoin [codegen id : 30] -Left keys [2]: [cast(ws_order_number#38 as bigint), cast(ws_item_sk#37 as bigint)] -Right keys [2]: [wr_order_number#42, wr_item_sk#41] +(97) BroadcastHashJoin [codegen id : 26] +Left keys [2]: [cast(ws_order_number#37 as bigint), cast(ws_item_sk#36 as bigint)] +Right keys [2]: [wr_order_number#41, wr_item_sk#40] Join condition: None -(106) Project [codegen id : 30] -Output [7]: [d_year#64, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62, (ws_quantity#39 - coalesce(wr_return_quantity#43, 0)) AS sales_cnt#69, CheckOverflow((promote_precision(cast(ws_ext_sales_price#40 as decimal(8,2))) - promote_precision(cast(coalesce(wr_return_amt#44, 0.00) as decimal(8,2)))), DecimalType(8,2), true) AS sales_amt#70] -Input [13]: [ws_item_sk#37, ws_order_number#38, ws_quantity#39, ws_ext_sales_price#40, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62, d_year#64, wr_item_sk#41, wr_order_number#42, wr_return_quantity#43, wr_return_amt#44] +(98) Project [codegen id : 26] +Output [7]: [d_year#63, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61, (ws_quantity#38 - coalesce(wr_return_quantity#42, 0)) AS sales_cnt#67, CheckOverflow((promote_precision(cast(ws_ext_sales_price#39 as decimal(8,2))) - promote_precision(cast(coalesce(wr_return_amt#43, 0.00) as decimal(8,2)))), DecimalType(8,2), true) AS sales_amt#68] +Input [13]: [ws_item_sk#36, ws_order_number#37, ws_quantity#38, ws_ext_sales_price#39, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61, d_year#63, wr_item_sk#40, wr_order_number#41, wr_return_quantity#42, wr_return_amt#43] -(107) Union +(99) Union -(108) HashAggregate [codegen id : 31] -Input [7]: [d_year#64, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62, sales_cnt#21, sales_amt#22] -Keys [7]: [d_year#64, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62, sales_cnt#21, sales_amt#22] +(100) HashAggregate [codegen id : 27] +Input [7]: [d_year#63, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61, sales_cnt#21, sales_amt#22] +Keys [7]: [d_year#63, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61, sales_cnt#21, sales_amt#22] Functions: [] Aggregate Attributes: [] -Results [7]: [d_year#64, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62, sales_cnt#21, sales_amt#22] +Results [7]: [d_year#63, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61, sales_cnt#21, sales_amt#22] -(109) Exchange -Input [7]: [d_year#64, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62, sales_cnt#21, sales_amt#22] -Arguments: hashpartitioning(d_year#64, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62, sales_cnt#21, sales_amt#22, 5), true, [id=#71] +(101) Exchange +Input [7]: [d_year#63, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61, sales_cnt#21, sales_amt#22] +Arguments: hashpartitioning(d_year#63, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61, sales_cnt#21, sales_amt#22, 5), ENSURE_REQUIREMENTS, [id=#69] -(110) HashAggregate [codegen id : 32] -Input [7]: [d_year#64, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62, sales_cnt#21, sales_amt#22] -Keys [7]: [d_year#64, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62, sales_cnt#21, sales_amt#22] +(102) HashAggregate [codegen id : 28] +Input [7]: [d_year#63, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61, sales_cnt#21, sales_amt#22] +Keys [7]: [d_year#63, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61, sales_cnt#21, sales_amt#22] Functions: [] Aggregate Attributes: [] -Results [7]: [d_year#64, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62, sales_cnt#21, sales_amt#22] +Results [7]: [d_year#63, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61, sales_cnt#21, sales_amt#22] -(111) HashAggregate [codegen id : 32] -Input [7]: [d_year#64, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62, sales_cnt#21, sales_amt#22] -Keys [5]: [d_year#64, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62] +(103) HashAggregate [codegen id : 28] +Input [7]: [d_year#63, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61, sales_cnt#21, sales_amt#22] +Keys [5]: [d_year#63, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61] Functions [2]: [partial_sum(cast(sales_cnt#21 as bigint)), partial_sum(UnscaledValue(sales_amt#22))] -Aggregate Attributes [2]: [sum#72, sum#73] -Results [7]: [d_year#64, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62, sum#74, sum#75] +Aggregate Attributes [2]: [sum#70, sum#71] +Results [7]: [d_year#63, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61, sum#72, sum#73] -(112) Exchange -Input [7]: [d_year#64, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62, sum#74, sum#75] -Arguments: hashpartitioning(d_year#64, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62, 5), true, [id=#76] +(104) Exchange +Input [7]: [d_year#63, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61, sum#72, sum#73] +Arguments: hashpartitioning(d_year#63, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61, 5), ENSURE_REQUIREMENTS, [id=#74] -(113) HashAggregate [codegen id : 33] -Input [7]: [d_year#64, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62, sum#74, sum#75] -Keys [5]: [d_year#64, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62] +(105) HashAggregate [codegen id : 29] +Input [7]: [d_year#63, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61, sum#72, sum#73] +Keys [5]: [d_year#63, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61] Functions [2]: [sum(cast(sales_cnt#21 as bigint)), sum(UnscaledValue(sales_amt#22))] -Aggregate Attributes [2]: [sum(cast(sales_cnt#21 as bigint))#77, sum(UnscaledValue(sales_amt#22))#78] -Results [7]: [d_year#64, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62, sum(cast(sales_cnt#21 as bigint))#77 AS sales_cnt#79, MakeDecimal(sum(UnscaledValue(sales_amt#22))#78,18,2) AS sales_amt#80] +Aggregate Attributes [2]: [sum(cast(sales_cnt#21 as bigint))#75, sum(UnscaledValue(sales_amt#22))#76] +Results [7]: [d_year#63, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61, sum(cast(sales_cnt#21 as bigint))#75 AS sales_cnt#77, MakeDecimal(sum(UnscaledValue(sales_amt#22))#76,18,2) AS sales_amt#78] -(114) BroadcastExchange -Input [7]: [d_year#64, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62, sales_cnt#79, sales_amt#80] -Arguments: HashedRelationBroadcastMode(List(input[1, int, true], input[2, int, true], input[3, int, true], input[4, int, true]),false), [id=#81] +(106) BroadcastExchange +Input [7]: [d_year#63, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61, sales_cnt#77, sales_amt#78] +Arguments: HashedRelationBroadcastMode(List(input[1, int, true], input[2, int, true], input[3, int, true], input[4, int, true]),false), [id=#79] -(115) BroadcastHashJoin [codegen id : 34] +(107) BroadcastHashJoin [codegen id : 30] Left keys [4]: [i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11] -Right keys [4]: [i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62] -Join condition: (CheckOverflow((promote_precision(cast(sales_cnt#56 as decimal(17,2))) / promote_precision(cast(sales_cnt#79 as decimal(17,2)))), DecimalType(37,20), true) < 0.90000000000000000000) +Right keys [4]: [i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61] +Join condition: (CheckOverflow((promote_precision(cast(sales_cnt#55 as decimal(17,2))) / promote_precision(cast(sales_cnt#77 as decimal(17,2)))), DecimalType(37,20), true) < 0.90000000000000000000) -(116) Project [codegen id : 34] -Output [10]: [d_year#64 AS prev_year#82, d_year#14 AS year#83, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#79 AS prev_yr_cnt#84, sales_cnt#56 AS curr_yr_cnt#85, (sales_cnt#56 - sales_cnt#79) AS sales_cnt_diff#86, CheckOverflow((promote_precision(cast(sales_amt#57 as decimal(19,2))) - promote_precision(cast(sales_amt#80 as decimal(19,2)))), DecimalType(19,2), true) AS sales_amt_diff#87] -Input [14]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#56, sales_amt#57, d_year#64, i_brand_id#59, i_class_id#60, i_category_id#61, i_manufact_id#62, sales_cnt#79, sales_amt#80] +(108) Project [codegen id : 30] +Output [10]: [d_year#63 AS prev_year#80, d_year#14 AS year#81, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#77 AS prev_yr_cnt#82, sales_cnt#55 AS curr_yr_cnt#83, (sales_cnt#55 - sales_cnt#77) AS sales_cnt_diff#84, CheckOverflow((promote_precision(cast(sales_amt#56 as decimal(19,2))) - promote_precision(cast(sales_amt#78 as decimal(19,2)))), DecimalType(19,2), true) AS sales_amt_diff#85] +Input [14]: [d_year#14, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, sales_cnt#55, sales_amt#56, d_year#63, i_brand_id#58, i_class_id#59, i_category_id#60, i_manufact_id#61, sales_cnt#77, sales_amt#78] -(117) TakeOrderedAndProject -Input [10]: [prev_year#82, year#83, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, prev_yr_cnt#84, curr_yr_cnt#85, sales_cnt_diff#86, sales_amt_diff#87] -Arguments: 100, [sales_cnt_diff#86 ASC NULLS FIRST, sales_amt_diff#87 ASC NULLS FIRST], [prev_year#82, year#83, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, prev_yr_cnt#84, curr_yr_cnt#85, sales_cnt_diff#86, sales_amt_diff#87] +(109) TakeOrderedAndProject +Input [10]: [prev_year#80, year#81, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, prev_yr_cnt#82, curr_yr_cnt#83, sales_cnt_diff#84, sales_amt_diff#85] +Arguments: 100, [sales_cnt_diff#84 ASC NULLS FIRST, sales_amt_diff#85 ASC NULLS FIRST], [prev_year#80, year#81, i_brand_id#7, i_class_id#8, i_category_id#9, i_manufact_id#11, prev_yr_cnt#82, curr_yr_cnt#83, sales_cnt_diff#84, sales_amt_diff#85] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q75/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q75/simplified.txt index d1c20801ec5fd..068187c44771a 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q75/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q75/simplified.txt @@ -1,83 +1,75 @@ TakeOrderedAndProject [sales_cnt_diff,sales_amt_diff,prev_year,year,i_brand_id,i_class_id,i_category_id,i_manufact_id,prev_yr_cnt,curr_yr_cnt] - WholeStageCodegen (34) + WholeStageCodegen (30) Project [d_year,d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,sales_cnt,sales_cnt,sales_amt,sales_amt] BroadcastHashJoin [i_brand_id,i_class_id,i_category_id,i_manufact_id,i_brand_id,i_class_id,i_category_id,i_manufact_id,sales_cnt,sales_cnt] HashAggregate [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,sum,sum] [sum(cast(sales_cnt as bigint)),sum(UnscaledValue(sales_amt)),sales_cnt,sales_amt,sum,sum] InputAdapter Exchange [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id] #1 - WholeStageCodegen (16) + WholeStageCodegen (14) HashAggregate [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,sales_cnt,sales_amt] [sum,sum,sum,sum] HashAggregate [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,sales_cnt,sales_amt] InputAdapter Exchange [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,sales_cnt,sales_amt] #2 - WholeStageCodegen (15) + WholeStageCodegen (13) HashAggregate [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,sales_cnt,sales_amt] InputAdapter Union - WholeStageCodegen (10) - HashAggregate [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,sales_cnt,sales_amt] - InputAdapter - Exchange [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,sales_cnt,sales_amt] #3 - WholeStageCodegen (9) - HashAggregate [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,sales_cnt,sales_amt] - InputAdapter - Union - WholeStageCodegen (4) - Project [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,cs_quantity,cr_return_quantity,cs_ext_sales_price,cr_return_amount] - BroadcastHashJoin [cs_order_number,cs_item_sk,cr_order_number,cr_item_sk] - Project [cs_item_sk,cs_order_number,cs_quantity,cs_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id,d_year] - BroadcastHashJoin [cs_sold_date_sk,d_date_sk] - Project [cs_sold_date_sk,cs_item_sk,cs_order_number,cs_quantity,cs_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id] - BroadcastHashJoin [cs_item_sk,i_item_sk] - Filter [cs_item_sk,cs_sold_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.catalog_sales [cs_sold_date_sk,cs_item_sk,cs_order_number,cs_quantity,cs_ext_sales_price] - InputAdapter - BroadcastExchange #4 - WholeStageCodegen (1) - Project [i_item_sk,i_brand_id,i_class_id,i_category_id,i_manufact_id] - Filter [i_category,i_item_sk,i_brand_id,i_class_id,i_category_id,i_manufact_id] - ColumnarToRow - InputAdapter - Scan parquet default.item [i_item_sk,i_brand_id,i_class_id,i_category_id,i_category,i_manufact_id] - InputAdapter - BroadcastExchange #5 - WholeStageCodegen (2) - Filter [d_year,d_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.date_dim [d_date_sk,d_year] - InputAdapter - BroadcastExchange #6 - WholeStageCodegen (3) - Filter [cr_order_number,cr_item_sk] - ColumnarToRow - InputAdapter - Scan parquet default.catalog_returns [cr_item_sk,cr_order_number,cr_return_quantity,cr_return_amount] - WholeStageCodegen (8) - Project [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,ss_quantity,sr_return_quantity,ss_ext_sales_price,sr_return_amt] - BroadcastHashJoin [ss_ticket_number,ss_item_sk,sr_ticket_number,sr_item_sk] - Project [ss_item_sk,ss_ticket_number,ss_quantity,ss_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id,d_year] - BroadcastHashJoin [ss_sold_date_sk,d_date_sk] - Project [ss_sold_date_sk,ss_item_sk,ss_ticket_number,ss_quantity,ss_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id] - BroadcastHashJoin [ss_item_sk,i_item_sk] - Filter [ss_item_sk,ss_sold_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.store_sales [ss_sold_date_sk,ss_item_sk,ss_ticket_number,ss_quantity,ss_ext_sales_price] - InputAdapter - ReusedExchange [i_item_sk,i_brand_id,i_class_id,i_category_id,i_manufact_id] #4 + WholeStageCodegen (4) + Project [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,cs_quantity,cr_return_quantity,cs_ext_sales_price,cr_return_amount] + BroadcastHashJoin [cs_order_number,cs_item_sk,cr_order_number,cr_item_sk] + Project [cs_item_sk,cs_order_number,cs_quantity,cs_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id,d_year] + BroadcastHashJoin [cs_sold_date_sk,d_date_sk] + Project [cs_sold_date_sk,cs_item_sk,cs_order_number,cs_quantity,cs_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id] + BroadcastHashJoin [cs_item_sk,i_item_sk] + Filter [cs_item_sk,cs_sold_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.catalog_sales [cs_sold_date_sk,cs_item_sk,cs_order_number,cs_quantity,cs_ext_sales_price] + InputAdapter + BroadcastExchange #3 + WholeStageCodegen (1) + Project [i_item_sk,i_brand_id,i_class_id,i_category_id,i_manufact_id] + Filter [i_category,i_item_sk,i_brand_id,i_class_id,i_category_id,i_manufact_id] + ColumnarToRow InputAdapter - ReusedExchange [d_date_sk,d_year] #5 - InputAdapter - BroadcastExchange #7 - WholeStageCodegen (7) - Filter [sr_ticket_number,sr_item_sk] - ColumnarToRow - InputAdapter - Scan parquet default.store_returns [sr_item_sk,sr_ticket_number,sr_return_quantity,sr_return_amt] - WholeStageCodegen (14) + Scan parquet default.item [i_item_sk,i_brand_id,i_class_id,i_category_id,i_category,i_manufact_id] + InputAdapter + BroadcastExchange #4 + WholeStageCodegen (2) + Filter [d_year,d_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.date_dim [d_date_sk,d_year] + InputAdapter + BroadcastExchange #5 + WholeStageCodegen (3) + Filter [cr_order_number,cr_item_sk] + ColumnarToRow + InputAdapter + Scan parquet default.catalog_returns [cr_item_sk,cr_order_number,cr_return_quantity,cr_return_amount] + WholeStageCodegen (8) + Project [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,ss_quantity,sr_return_quantity,ss_ext_sales_price,sr_return_amt] + BroadcastHashJoin [ss_ticket_number,ss_item_sk,sr_ticket_number,sr_item_sk] + Project [ss_item_sk,ss_ticket_number,ss_quantity,ss_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id,d_year] + BroadcastHashJoin [ss_sold_date_sk,d_date_sk] + Project [ss_sold_date_sk,ss_item_sk,ss_ticket_number,ss_quantity,ss_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id] + BroadcastHashJoin [ss_item_sk,i_item_sk] + Filter [ss_item_sk,ss_sold_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.store_sales [ss_sold_date_sk,ss_item_sk,ss_ticket_number,ss_quantity,ss_ext_sales_price] + InputAdapter + ReusedExchange [i_item_sk,i_brand_id,i_class_id,i_category_id,i_manufact_id] #3 + InputAdapter + ReusedExchange [d_date_sk,d_year] #4 + InputAdapter + BroadcastExchange #6 + WholeStageCodegen (7) + Filter [sr_ticket_number,sr_item_sk] + ColumnarToRow + InputAdapter + Scan parquet default.store_returns [sr_item_sk,sr_ticket_number,sr_return_quantity,sr_return_amt] + WholeStageCodegen (12) Project [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,ws_quantity,wr_return_quantity,ws_ext_sales_price,wr_return_amt] BroadcastHashJoin [ws_order_number,ws_item_sk,wr_order_number,wr_item_sk] Project [ws_item_sk,ws_order_number,ws_quantity,ws_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id,d_year] @@ -89,79 +81,71 @@ TakeOrderedAndProject [sales_cnt_diff,sales_amt_diff,prev_year,year,i_brand_id,i InputAdapter Scan parquet default.web_sales [ws_sold_date_sk,ws_item_sk,ws_order_number,ws_quantity,ws_ext_sales_price] InputAdapter - ReusedExchange [i_item_sk,i_brand_id,i_class_id,i_category_id,i_manufact_id] #4 + ReusedExchange [i_item_sk,i_brand_id,i_class_id,i_category_id,i_manufact_id] #3 InputAdapter - ReusedExchange [d_date_sk,d_year] #5 + ReusedExchange [d_date_sk,d_year] #4 InputAdapter - BroadcastExchange #8 - WholeStageCodegen (13) + BroadcastExchange #7 + WholeStageCodegen (11) Filter [wr_order_number,wr_item_sk] ColumnarToRow InputAdapter Scan parquet default.web_returns [wr_item_sk,wr_order_number,wr_return_quantity,wr_return_amt] InputAdapter - BroadcastExchange #9 - WholeStageCodegen (33) + BroadcastExchange #8 + WholeStageCodegen (29) HashAggregate [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,sum,sum] [sum(cast(sales_cnt as bigint)),sum(UnscaledValue(sales_amt)),sales_cnt,sales_amt,sum,sum] InputAdapter - Exchange [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id] #10 - WholeStageCodegen (32) + Exchange [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id] #9 + WholeStageCodegen (28) HashAggregate [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,sales_cnt,sales_amt] [sum,sum,sum,sum] HashAggregate [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,sales_cnt,sales_amt] InputAdapter - Exchange [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,sales_cnt,sales_amt] #11 - WholeStageCodegen (31) + Exchange [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,sales_cnt,sales_amt] #10 + WholeStageCodegen (27) HashAggregate [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,sales_cnt,sales_amt] InputAdapter Union + WholeStageCodegen (18) + Project [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,cs_quantity,cr_return_quantity,cs_ext_sales_price,cr_return_amount] + BroadcastHashJoin [cs_order_number,cs_item_sk,cr_order_number,cr_item_sk] + Project [cs_item_sk,cs_order_number,cs_quantity,cs_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id,d_year] + BroadcastHashJoin [cs_sold_date_sk,d_date_sk] + Project [cs_sold_date_sk,cs_item_sk,cs_order_number,cs_quantity,cs_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id] + BroadcastHashJoin [cs_item_sk,i_item_sk] + Filter [cs_item_sk,cs_sold_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.catalog_sales [cs_sold_date_sk,cs_item_sk,cs_order_number,cs_quantity,cs_ext_sales_price] + InputAdapter + ReusedExchange [i_item_sk,i_brand_id,i_class_id,i_category_id,i_manufact_id] #3 + InputAdapter + BroadcastExchange #11 + WholeStageCodegen (16) + Filter [d_year,d_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.date_dim [d_date_sk,d_year] + InputAdapter + ReusedExchange [cr_item_sk,cr_order_number,cr_return_quantity,cr_return_amount] #5 + WholeStageCodegen (22) + Project [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,ss_quantity,sr_return_quantity,ss_ext_sales_price,sr_return_amt] + BroadcastHashJoin [ss_ticket_number,ss_item_sk,sr_ticket_number,sr_item_sk] + Project [ss_item_sk,ss_ticket_number,ss_quantity,ss_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id,d_year] + BroadcastHashJoin [ss_sold_date_sk,d_date_sk] + Project [ss_sold_date_sk,ss_item_sk,ss_ticket_number,ss_quantity,ss_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id] + BroadcastHashJoin [ss_item_sk,i_item_sk] + Filter [ss_item_sk,ss_sold_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.store_sales [ss_sold_date_sk,ss_item_sk,ss_ticket_number,ss_quantity,ss_ext_sales_price] + InputAdapter + ReusedExchange [i_item_sk,i_brand_id,i_class_id,i_category_id,i_manufact_id] #3 + InputAdapter + ReusedExchange [d_date_sk,d_year] #11 + InputAdapter + ReusedExchange [sr_item_sk,sr_ticket_number,sr_return_quantity,sr_return_amt] #6 WholeStageCodegen (26) - HashAggregate [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,sales_cnt,sales_amt] - InputAdapter - Exchange [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,sales_cnt,sales_amt] #12 - WholeStageCodegen (25) - HashAggregate [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,sales_cnt,sales_amt] - InputAdapter - Union - WholeStageCodegen (20) - Project [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,cs_quantity,cr_return_quantity,cs_ext_sales_price,cr_return_amount] - BroadcastHashJoin [cs_order_number,cs_item_sk,cr_order_number,cr_item_sk] - Project [cs_item_sk,cs_order_number,cs_quantity,cs_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id,d_year] - BroadcastHashJoin [cs_sold_date_sk,d_date_sk] - Project [cs_sold_date_sk,cs_item_sk,cs_order_number,cs_quantity,cs_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id] - BroadcastHashJoin [cs_item_sk,i_item_sk] - Filter [cs_item_sk,cs_sold_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.catalog_sales [cs_sold_date_sk,cs_item_sk,cs_order_number,cs_quantity,cs_ext_sales_price] - InputAdapter - ReusedExchange [i_item_sk,i_brand_id,i_class_id,i_category_id,i_manufact_id] #4 - InputAdapter - BroadcastExchange #13 - WholeStageCodegen (18) - Filter [d_year,d_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.date_dim [d_date_sk,d_year] - InputAdapter - ReusedExchange [cr_item_sk,cr_order_number,cr_return_quantity,cr_return_amount] #6 - WholeStageCodegen (24) - Project [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,ss_quantity,sr_return_quantity,ss_ext_sales_price,sr_return_amt] - BroadcastHashJoin [ss_ticket_number,ss_item_sk,sr_ticket_number,sr_item_sk] - Project [ss_item_sk,ss_ticket_number,ss_quantity,ss_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id,d_year] - BroadcastHashJoin [ss_sold_date_sk,d_date_sk] - Project [ss_sold_date_sk,ss_item_sk,ss_ticket_number,ss_quantity,ss_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id] - BroadcastHashJoin [ss_item_sk,i_item_sk] - Filter [ss_item_sk,ss_sold_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.store_sales [ss_sold_date_sk,ss_item_sk,ss_ticket_number,ss_quantity,ss_ext_sales_price] - InputAdapter - ReusedExchange [i_item_sk,i_brand_id,i_class_id,i_category_id,i_manufact_id] #4 - InputAdapter - ReusedExchange [d_date_sk,d_year] #13 - InputAdapter - ReusedExchange [sr_item_sk,sr_ticket_number,sr_return_quantity,sr_return_amt] #7 - WholeStageCodegen (30) Project [d_year,i_brand_id,i_class_id,i_category_id,i_manufact_id,ws_quantity,wr_return_quantity,ws_ext_sales_price,wr_return_amt] BroadcastHashJoin [ws_order_number,ws_item_sk,wr_order_number,wr_item_sk] Project [ws_item_sk,ws_order_number,ws_quantity,ws_ext_sales_price,i_brand_id,i_class_id,i_category_id,i_manufact_id,d_year] @@ -173,8 +157,8 @@ TakeOrderedAndProject [sales_cnt_diff,sales_amt_diff,prev_year,year,i_brand_id,i InputAdapter Scan parquet default.web_sales [ws_sold_date_sk,ws_item_sk,ws_order_number,ws_quantity,ws_ext_sales_price] InputAdapter - ReusedExchange [i_item_sk,i_brand_id,i_class_id,i_category_id,i_manufact_id] #4 + ReusedExchange [i_item_sk,i_brand_id,i_class_id,i_category_id,i_manufact_id] #3 InputAdapter - ReusedExchange [d_date_sk,d_year] #13 + ReusedExchange [d_date_sk,d_year] #11 InputAdapter - ReusedExchange [wr_item_sk,wr_order_number,wr_return_quantity,wr_return_amt] #8 + ReusedExchange [wr_item_sk,wr_order_number,wr_return_quantity,wr_return_amt] #7 diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q77a.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q77a.sf100/explain.txt index ac49cc0548c08..56a010e2ddb91 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q77a.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q77a.sf100/explain.txt @@ -1,112 +1,108 @@ == Physical Plan == -TakeOrderedAndProject (108) -+- * HashAggregate (107) - +- Exchange (106) - +- * HashAggregate (105) - +- Union (104) - :- * HashAggregate (98) - : +- Exchange (97) - : +- * HashAggregate (96) - : +- Union (95) - : :- * HashAggregate (89) - : : +- Exchange (88) - : : +- * HashAggregate (87) - : : +- Union (86) - : : :- * Project (34) - : : : +- * BroadcastHashJoin LeftOuter BuildRight (33) - : : : :- * HashAggregate (19) - : : : : +- Exchange (18) - : : : : +- * HashAggregate (17) - : : : : +- * Project (16) - : : : : +- * BroadcastHashJoin Inner BuildRight (15) - : : : : :- * Project (10) - : : : : : +- * BroadcastHashJoin Inner BuildRight (9) - : : : : : :- * Filter (3) - : : : : : : +- * ColumnarToRow (2) - : : : : : : +- Scan parquet default.store_sales (1) - : : : : : +- BroadcastExchange (8) - : : : : : +- * Project (7) - : : : : : +- * Filter (6) - : : : : : +- * ColumnarToRow (5) - : : : : : +- Scan parquet default.date_dim (4) - : : : : +- BroadcastExchange (14) - : : : : +- * Filter (13) - : : : : +- * ColumnarToRow (12) - : : : : +- Scan parquet default.store (11) - : : : +- BroadcastExchange (32) - : : : +- * HashAggregate (31) - : : : +- Exchange (30) - : : : +- * HashAggregate (29) - : : : +- * Project (28) - : : : +- * BroadcastHashJoin Inner BuildRight (27) - : : : :- * Project (25) - : : : : +- * BroadcastHashJoin Inner BuildRight (24) - : : : : :- * Filter (22) - : : : : : +- * ColumnarToRow (21) - : : : : : +- Scan parquet default.store_returns (20) - : : : : +- ReusedExchange (23) - : : : +- ReusedExchange (26) - : : :- * Project (55) - : : : +- BroadcastNestedLoopJoin Inner BuildRight (54) - : : : :- * HashAggregate (43) - : : : : +- Exchange (42) - : : : : +- * HashAggregate (41) - : : : : +- * Project (40) - : : : : +- * BroadcastHashJoin Inner BuildRight (39) - : : : : :- * Filter (37) - : : : : : +- * ColumnarToRow (36) - : : : : : +- Scan parquet default.catalog_sales (35) - : : : : +- ReusedExchange (38) - : : : +- BroadcastExchange (53) - : : : +- * HashAggregate (52) - : : : +- Exchange (51) - : : : +- * HashAggregate (50) - : : : +- * Project (49) - : : : +- * BroadcastHashJoin Inner BuildRight (48) - : : : :- * Filter (46) - : : : : +- * ColumnarToRow (45) - : : : : +- Scan parquet default.catalog_returns (44) - : : : +- ReusedExchange (47) - : : +- * Project (85) - : : +- * BroadcastHashJoin LeftOuter BuildRight (84) - : : :- * HashAggregate (70) - : : : +- Exchange (69) - : : : +- * HashAggregate (68) - : : : +- * Project (67) - : : : +- * BroadcastHashJoin Inner BuildRight (66) - : : : :- * Project (61) - : : : : +- * BroadcastHashJoin Inner BuildRight (60) - : : : : :- * Filter (58) - : : : : : +- * ColumnarToRow (57) - : : : : : +- Scan parquet default.web_sales (56) - : : : : +- ReusedExchange (59) - : : : +- BroadcastExchange (65) - : : : +- * Filter (64) - : : : +- * ColumnarToRow (63) - : : : +- Scan parquet default.web_page (62) - : : +- BroadcastExchange (83) - : : +- * HashAggregate (82) - : : +- Exchange (81) - : : +- * HashAggregate (80) - : : +- * Project (79) - : : +- * BroadcastHashJoin Inner BuildRight (78) - : : :- * Project (76) - : : : +- * BroadcastHashJoin Inner BuildRight (75) - : : : :- * Filter (73) - : : : : +- * ColumnarToRow (72) - : : : : +- Scan parquet default.web_returns (71) - : : : +- ReusedExchange (74) - : : +- ReusedExchange (77) - : +- * HashAggregate (94) - : +- Exchange (93) - : +- * HashAggregate (92) - : +- * HashAggregate (91) - : +- ReusedExchange (90) - +- * HashAggregate (103) - +- Exchange (102) - +- * HashAggregate (101) - +- * HashAggregate (100) - +- ReusedExchange (99) +TakeOrderedAndProject (104) ++- * HashAggregate (103) + +- Exchange (102) + +- * HashAggregate (101) + +- Union (100) + :- * HashAggregate (89) + : +- Exchange (88) + : +- * HashAggregate (87) + : +- Union (86) + : :- * Project (34) + : : +- * BroadcastHashJoin LeftOuter BuildRight (33) + : : :- * HashAggregate (19) + : : : +- Exchange (18) + : : : +- * HashAggregate (17) + : : : +- * Project (16) + : : : +- * BroadcastHashJoin Inner BuildRight (15) + : : : :- * Project (10) + : : : : +- * BroadcastHashJoin Inner BuildRight (9) + : : : : :- * Filter (3) + : : : : : +- * ColumnarToRow (2) + : : : : : +- Scan parquet default.store_sales (1) + : : : : +- BroadcastExchange (8) + : : : : +- * Project (7) + : : : : +- * Filter (6) + : : : : +- * ColumnarToRow (5) + : : : : +- Scan parquet default.date_dim (4) + : : : +- BroadcastExchange (14) + : : : +- * Filter (13) + : : : +- * ColumnarToRow (12) + : : : +- Scan parquet default.store (11) + : : +- BroadcastExchange (32) + : : +- * HashAggregate (31) + : : +- Exchange (30) + : : +- * HashAggregate (29) + : : +- * Project (28) + : : +- * BroadcastHashJoin Inner BuildRight (27) + : : :- * Project (25) + : : : +- * BroadcastHashJoin Inner BuildRight (24) + : : : :- * Filter (22) + : : : : +- * ColumnarToRow (21) + : : : : +- Scan parquet default.store_returns (20) + : : : +- ReusedExchange (23) + : : +- ReusedExchange (26) + : :- * Project (55) + : : +- BroadcastNestedLoopJoin Inner BuildRight (54) + : : :- * HashAggregate (43) + : : : +- Exchange (42) + : : : +- * HashAggregate (41) + : : : +- * Project (40) + : : : +- * BroadcastHashJoin Inner BuildRight (39) + : : : :- * Filter (37) + : : : : +- * ColumnarToRow (36) + : : : : +- Scan parquet default.catalog_sales (35) + : : : +- ReusedExchange (38) + : : +- BroadcastExchange (53) + : : +- * HashAggregate (52) + : : +- Exchange (51) + : : +- * HashAggregate (50) + : : +- * Project (49) + : : +- * BroadcastHashJoin Inner BuildRight (48) + : : :- * Filter (46) + : : : +- * ColumnarToRow (45) + : : : +- Scan parquet default.catalog_returns (44) + : : +- ReusedExchange (47) + : +- * Project (85) + : +- * BroadcastHashJoin LeftOuter BuildRight (84) + : :- * HashAggregate (70) + : : +- Exchange (69) + : : +- * HashAggregate (68) + : : +- * Project (67) + : : +- * BroadcastHashJoin Inner BuildRight (66) + : : :- * Project (61) + : : : +- * BroadcastHashJoin Inner BuildRight (60) + : : : :- * Filter (58) + : : : : +- * ColumnarToRow (57) + : : : : +- Scan parquet default.web_sales (56) + : : : +- ReusedExchange (59) + : : +- BroadcastExchange (65) + : : +- * Filter (64) + : : +- * ColumnarToRow (63) + : : +- Scan parquet default.web_page (62) + : +- BroadcastExchange (83) + : +- * HashAggregate (82) + : +- Exchange (81) + : +- * HashAggregate (80) + : +- * Project (79) + : +- * BroadcastHashJoin Inner BuildRight (78) + : :- * Project (76) + : : +- * BroadcastHashJoin Inner BuildRight (75) + : : :- * Filter (73) + : : : +- * ColumnarToRow (72) + : : : +- Scan parquet default.web_returns (71) + : : +- ReusedExchange (74) + : +- ReusedExchange (77) + :- * HashAggregate (94) + : +- Exchange (93) + : +- * HashAggregate (92) + : +- * HashAggregate (91) + : +- ReusedExchange (90) + +- * HashAggregate (99) + +- Exchange (98) + +- * HashAggregate (97) + +- * HashAggregate (96) + +- ReusedExchange (95) (1) Scan parquet default.store_sales @@ -190,7 +186,7 @@ Results [3]: [s_store_sk#8, sum#12, sum#13] (18) Exchange Input [3]: [s_store_sk#8, sum#12, sum#13] -Arguments: hashpartitioning(s_store_sk#8, 5), true, [id=#14] +Arguments: hashpartitioning(s_store_sk#8, 5), ENSURE_REQUIREMENTS, [id=#14] (19) HashAggregate [codegen id : 8] Input [3]: [s_store_sk#8, sum#12, sum#13] @@ -246,7 +242,7 @@ Results [3]: [s_store_sk#23, sum#26, sum#27] (30) Exchange Input [3]: [s_store_sk#23, sum#26, sum#27] -Arguments: hashpartitioning(s_store_sk#23, 5), true, [id=#28] +Arguments: hashpartitioning(s_store_sk#23, 5), ENSURE_REQUIREMENTS, [id=#28] (31) HashAggregate [codegen id : 7] Input [3]: [s_store_sk#23, sum#26, sum#27] @@ -303,7 +299,7 @@ Results [3]: [cs_call_center_sk#39, sum#44, sum#45] (42) Exchange Input [3]: [cs_call_center_sk#39, sum#44, sum#45] -Arguments: hashpartitioning(cs_call_center_sk#39, 5), true, [id=#46] +Arguments: hashpartitioning(cs_call_center_sk#39, 5), ENSURE_REQUIREMENTS, [id=#46] (43) HashAggregate [codegen id : 11] Input [3]: [cs_call_center_sk#39, sum#44, sum#45] @@ -347,7 +343,7 @@ Results [2]: [sum#56, sum#57] (51) Exchange Input [2]: [sum#56, sum#57] -Arguments: SinglePartition, true, [id=#58] +Arguments: SinglePartition, ENSURE_REQUIREMENTS, [id=#58] (52) HashAggregate [codegen id : 14] Input [2]: [sum#56, sum#57] @@ -429,7 +425,7 @@ Results [3]: [wp_web_page_sk#71, sum#75, sum#76] (69) Exchange Input [3]: [wp_web_page_sk#71, sum#75, sum#76] -Arguments: hashpartitioning(wp_web_page_sk#71, 5), true, [id=#77] +Arguments: hashpartitioning(wp_web_page_sk#71, 5), ENSURE_REQUIREMENTS, [id=#77] (70) HashAggregate [codegen id : 23] Input [3]: [wp_web_page_sk#71, sum#75, sum#76] @@ -485,7 +481,7 @@ Results [3]: [wp_web_page_sk#86, sum#89, sum#90] (81) Exchange Input [3]: [wp_web_page_sk#86, sum#89, sum#90] -Arguments: hashpartitioning(wp_web_page_sk#86, 5), true, [id=#91] +Arguments: hashpartitioning(wp_web_page_sk#86, 5), ENSURE_REQUIREMENTS, [id=#91] (82) HashAggregate [codegen id : 22] Input [3]: [wp_web_page_sk#86, sum#89, sum#90] @@ -518,7 +514,7 @@ Results [8]: [channel#34, id#35, sum#107, isEmpty#108, sum#109, isEmpty#110, sum (88) Exchange Input [8]: [channel#34, id#35, sum#107, isEmpty#108, sum#109, isEmpty#110, sum#111, isEmpty#112] -Arguments: hashpartitioning(channel#34, id#35, 5), true, [id=#113] +Arguments: hashpartitioning(channel#34, id#35, 5), ENSURE_REQUIREMENTS, [id=#113] (89) HashAggregate [codegen id : 25] Input [8]: [channel#34, id#35, sum#107, isEmpty#108, sum#109, isEmpty#110, sum#111, isEmpty#112] @@ -546,7 +542,7 @@ Results [7]: [channel#34, sum#139, isEmpty#140, sum#141, isEmpty#142, sum#143, i (93) Exchange Input [7]: [channel#34, sum#139, isEmpty#140, sum#141, isEmpty#142, sum#143, isEmpty#144] -Arguments: hashpartitioning(channel#34, 5), true, [id=#145] +Arguments: hashpartitioning(channel#34, 5), ENSURE_REQUIREMENTS, [id=#145] (94) HashAggregate [codegen id : 51] Input [7]: [channel#34, sum#139, isEmpty#140, sum#141, isEmpty#142, sum#143, isEmpty#144] @@ -555,75 +551,55 @@ Functions [3]: [sum(sales#130), sum(returns#131), sum(profit#132)] Aggregate Attributes [3]: [sum(sales#130)#146, sum(returns#131)#147, sum(profit#132)#148] Results [5]: [channel#34, null AS id#149, sum(sales#130)#146 AS sales#150, sum(returns#131)#147 AS returns#151, sum(profit#132)#148 AS profit#152] -(95) Union +(95) ReusedExchange [Reuses operator id: 88] +Output [8]: [channel#34, id#35, sum#153, isEmpty#154, sum#155, isEmpty#156, sum#157, isEmpty#158] -(96) HashAggregate [codegen id : 52] -Input [5]: [channel#34, id#35, sales#117, returns#118, profit#119] -Keys [5]: [channel#34, id#35, sales#117, returns#118, profit#119] -Functions: [] -Aggregate Attributes: [] -Results [5]: [channel#34, id#35, sales#117, returns#118, profit#119] - -(97) Exchange -Input [5]: [channel#34, id#35, sales#117, returns#118, profit#119] -Arguments: hashpartitioning(channel#34, id#35, sales#117, returns#118, profit#119, 5), true, [id=#153] - -(98) HashAggregate [codegen id : 53] -Input [5]: [channel#34, id#35, sales#117, returns#118, profit#119] -Keys [5]: [channel#34, id#35, sales#117, returns#118, profit#119] -Functions: [] -Aggregate Attributes: [] -Results [5]: [channel#34, id#35, sales#117, returns#118, profit#119] - -(99) ReusedExchange [Reuses operator id: 88] -Output [8]: [channel#34, id#35, sum#154, isEmpty#155, sum#156, isEmpty#157, sum#158, isEmpty#159] - -(100) HashAggregate [codegen id : 78] -Input [8]: [channel#34, id#35, sum#154, isEmpty#155, sum#156, isEmpty#157, sum#158, isEmpty#159] +(96) HashAggregate [codegen id : 76] +Input [8]: [channel#34, id#35, sum#153, isEmpty#154, sum#155, isEmpty#156, sum#157, isEmpty#158] Keys [2]: [channel#34, id#35] -Functions [3]: [sum(sales#17), sum(returns#36), sum(profit#160)] -Aggregate Attributes [3]: [sum(sales#17)#161, sum(returns#36)#162, sum(profit#160)#163] -Results [3]: [sum(sales#17)#161 AS sales#130, sum(returns#36)#162 AS returns#131, sum(profit#160)#163 AS profit#132] +Functions [3]: [sum(sales#17), sum(returns#36), sum(profit#159)] +Aggregate Attributes [3]: [sum(sales#17)#160, sum(returns#36)#161, sum(profit#159)#162] +Results [3]: [sum(sales#17)#160 AS sales#130, sum(returns#36)#161 AS returns#131, sum(profit#159)#162 AS profit#132] -(101) HashAggregate [codegen id : 78] +(97) HashAggregate [codegen id : 76] Input [3]: [sales#130, returns#131, profit#132] Keys: [] Functions [3]: [partial_sum(sales#130), partial_sum(returns#131), partial_sum(profit#132)] -Aggregate Attributes [6]: [sum#164, isEmpty#165, sum#166, isEmpty#167, sum#168, isEmpty#169] -Results [6]: [sum#170, isEmpty#171, sum#172, isEmpty#173, sum#174, isEmpty#175] +Aggregate Attributes [6]: [sum#163, isEmpty#164, sum#165, isEmpty#166, sum#167, isEmpty#168] +Results [6]: [sum#169, isEmpty#170, sum#171, isEmpty#172, sum#173, isEmpty#174] -(102) Exchange -Input [6]: [sum#170, isEmpty#171, sum#172, isEmpty#173, sum#174, isEmpty#175] -Arguments: SinglePartition, true, [id=#176] +(98) Exchange +Input [6]: [sum#169, isEmpty#170, sum#171, isEmpty#172, sum#173, isEmpty#174] +Arguments: SinglePartition, ENSURE_REQUIREMENTS, [id=#175] -(103) HashAggregate [codegen id : 79] -Input [6]: [sum#170, isEmpty#171, sum#172, isEmpty#173, sum#174, isEmpty#175] +(99) HashAggregate [codegen id : 77] +Input [6]: [sum#169, isEmpty#170, sum#171, isEmpty#172, sum#173, isEmpty#174] Keys: [] Functions [3]: [sum(sales#130), sum(returns#131), sum(profit#132)] -Aggregate Attributes [3]: [sum(sales#130)#177, sum(returns#131)#178, sum(profit#132)#179] -Results [5]: [null AS channel#180, null AS id#181, sum(sales#130)#177 AS sales#182, sum(returns#131)#178 AS returns#183, sum(profit#132)#179 AS profit#184] +Aggregate Attributes [3]: [sum(sales#130)#176, sum(returns#131)#177, sum(profit#132)#178] +Results [5]: [null AS channel#179, null AS id#180, sum(sales#130)#176 AS sales#181, sum(returns#131)#177 AS returns#182, sum(profit#132)#178 AS profit#183] -(104) Union +(100) Union -(105) HashAggregate [codegen id : 80] +(101) HashAggregate [codegen id : 78] Input [5]: [channel#34, id#35, sales#117, returns#118, profit#119] Keys [5]: [channel#34, id#35, sales#117, returns#118, profit#119] Functions: [] Aggregate Attributes: [] Results [5]: [channel#34, id#35, sales#117, returns#118, profit#119] -(106) Exchange +(102) Exchange Input [5]: [channel#34, id#35, sales#117, returns#118, profit#119] -Arguments: hashpartitioning(channel#34, id#35, sales#117, returns#118, profit#119, 5), true, [id=#185] +Arguments: hashpartitioning(channel#34, id#35, sales#117, returns#118, profit#119, 5), ENSURE_REQUIREMENTS, [id=#184] -(107) HashAggregate [codegen id : 81] +(103) HashAggregate [codegen id : 79] Input [5]: [channel#34, id#35, sales#117, returns#118, profit#119] Keys [5]: [channel#34, id#35, sales#117, returns#118, profit#119] Functions: [] Aggregate Attributes: [] Results [5]: [channel#34, id#35, sales#117, returns#118, profit#119] -(108) TakeOrderedAndProject +(104) TakeOrderedAndProject Input [5]: [channel#34, id#35, sales#117, returns#118, profit#119] Arguments: 100, [channel#34 ASC NULLS FIRST, id#35 ASC NULLS FIRST], [channel#34, id#35, sales#117, returns#118, profit#119] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q77a.sf100/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q77a.sf100/simplified.txt index 92c25891f940e..3a5d78047c24b 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q77a.sf100/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q77a.sf100/simplified.txt @@ -1,172 +1,164 @@ TakeOrderedAndProject [channel,id,sales,returns,profit] - WholeStageCodegen (81) + WholeStageCodegen (79) HashAggregate [channel,id,sales,returns,profit] InputAdapter Exchange [channel,id,sales,returns,profit] #1 - WholeStageCodegen (80) + WholeStageCodegen (78) HashAggregate [channel,id,sales,returns,profit] InputAdapter Union - WholeStageCodegen (53) - HashAggregate [channel,id,sales,returns,profit] + WholeStageCodegen (25) + HashAggregate [channel,id,sum,isEmpty,sum,isEmpty,sum,isEmpty] [sum(sales),sum(returns),sum(profit),sales,returns,profit,sum,isEmpty,sum,isEmpty,sum,isEmpty] InputAdapter - Exchange [channel,id,sales,returns,profit] #2 - WholeStageCodegen (52) - HashAggregate [channel,id,sales,returns,profit] + Exchange [channel,id] #2 + WholeStageCodegen (24) + HashAggregate [channel,id,sales,returns,profit] [sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty] InputAdapter Union - WholeStageCodegen (25) - HashAggregate [channel,id,sum,isEmpty,sum,isEmpty,sum,isEmpty] [sum(sales),sum(returns),sum(profit),sales,returns,profit,sum,isEmpty,sum,isEmpty,sum,isEmpty] - InputAdapter - Exchange [channel,id] #3 - WholeStageCodegen (24) - HashAggregate [channel,id,sales,returns,profit] [sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty] - InputAdapter - Union - WholeStageCodegen (8) - Project [s_store_sk,sales,returns,profit,profit_loss] - BroadcastHashJoin [s_store_sk,s_store_sk] - HashAggregate [s_store_sk,sum,sum] [sum(UnscaledValue(ss_ext_sales_price)),sum(UnscaledValue(ss_net_profit)),sales,profit,sum,sum] + WholeStageCodegen (8) + Project [s_store_sk,sales,returns,profit,profit_loss] + BroadcastHashJoin [s_store_sk,s_store_sk] + HashAggregate [s_store_sk,sum,sum] [sum(UnscaledValue(ss_ext_sales_price)),sum(UnscaledValue(ss_net_profit)),sales,profit,sum,sum] + InputAdapter + Exchange [s_store_sk] #3 + WholeStageCodegen (3) + HashAggregate [s_store_sk,ss_ext_sales_price,ss_net_profit] [sum,sum,sum,sum] + Project [ss_ext_sales_price,ss_net_profit,s_store_sk] + BroadcastHashJoin [ss_store_sk,s_store_sk] + Project [ss_store_sk,ss_ext_sales_price,ss_net_profit] + BroadcastHashJoin [ss_sold_date_sk,d_date_sk] + Filter [ss_sold_date_sk,ss_store_sk] + ColumnarToRow + InputAdapter + Scan parquet default.store_sales [ss_sold_date_sk,ss_store_sk,ss_ext_sales_price,ss_net_profit] InputAdapter - Exchange [s_store_sk] #4 - WholeStageCodegen (3) - HashAggregate [s_store_sk,ss_ext_sales_price,ss_net_profit] [sum,sum,sum,sum] - Project [ss_ext_sales_price,ss_net_profit,s_store_sk] - BroadcastHashJoin [ss_store_sk,s_store_sk] - Project [ss_store_sk,ss_ext_sales_price,ss_net_profit] - BroadcastHashJoin [ss_sold_date_sk,d_date_sk] - Filter [ss_sold_date_sk,ss_store_sk] - ColumnarToRow - InputAdapter - Scan parquet default.store_sales [ss_sold_date_sk,ss_store_sk,ss_ext_sales_price,ss_net_profit] - InputAdapter - BroadcastExchange #5 - WholeStageCodegen (1) - Project [d_date_sk] - Filter [d_date,d_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.date_dim [d_date_sk,d_date] + BroadcastExchange #4 + WholeStageCodegen (1) + Project [d_date_sk] + Filter [d_date,d_date_sk] + ColumnarToRow InputAdapter - BroadcastExchange #6 - WholeStageCodegen (2) - Filter [s_store_sk] - ColumnarToRow - InputAdapter - Scan parquet default.store [s_store_sk] - InputAdapter - BroadcastExchange #7 - WholeStageCodegen (7) - HashAggregate [s_store_sk,sum,sum] [sum(UnscaledValue(sr_return_amt)),sum(UnscaledValue(sr_net_loss)),returns,profit_loss,sum,sum] - InputAdapter - Exchange [s_store_sk] #8 - WholeStageCodegen (6) - HashAggregate [s_store_sk,sr_return_amt,sr_net_loss] [sum,sum,sum,sum] - Project [sr_return_amt,sr_net_loss,s_store_sk] - BroadcastHashJoin [sr_returned_date_sk,d_date_sk] - Project [sr_returned_date_sk,sr_return_amt,sr_net_loss,s_store_sk] - BroadcastHashJoin [sr_store_sk,s_store_sk] - Filter [sr_returned_date_sk,sr_store_sk] - ColumnarToRow - InputAdapter - Scan parquet default.store_returns [sr_returned_date_sk,sr_store_sk,sr_return_amt,sr_net_loss] - InputAdapter - ReusedExchange [s_store_sk] #6 - InputAdapter - ReusedExchange [d_date_sk] #5 - WholeStageCodegen (15) - Project [cs_call_center_sk,sales,returns,profit,profit_loss] + Scan parquet default.date_dim [d_date_sk,d_date] InputAdapter - BroadcastNestedLoopJoin - WholeStageCodegen (11) - HashAggregate [cs_call_center_sk,sum,sum] [sum(UnscaledValue(cs_ext_sales_price)),sum(UnscaledValue(cs_net_profit)),sales,profit,sum,sum] + BroadcastExchange #5 + WholeStageCodegen (2) + Filter [s_store_sk] + ColumnarToRow + InputAdapter + Scan parquet default.store [s_store_sk] + InputAdapter + BroadcastExchange #6 + WholeStageCodegen (7) + HashAggregate [s_store_sk,sum,sum] [sum(UnscaledValue(sr_return_amt)),sum(UnscaledValue(sr_net_loss)),returns,profit_loss,sum,sum] + InputAdapter + Exchange [s_store_sk] #7 + WholeStageCodegen (6) + HashAggregate [s_store_sk,sr_return_amt,sr_net_loss] [sum,sum,sum,sum] + Project [sr_return_amt,sr_net_loss,s_store_sk] + BroadcastHashJoin [sr_returned_date_sk,d_date_sk] + Project [sr_returned_date_sk,sr_return_amt,sr_net_loss,s_store_sk] + BroadcastHashJoin [sr_store_sk,s_store_sk] + Filter [sr_returned_date_sk,sr_store_sk] + ColumnarToRow + InputAdapter + Scan parquet default.store_returns [sr_returned_date_sk,sr_store_sk,sr_return_amt,sr_net_loss] + InputAdapter + ReusedExchange [s_store_sk] #5 + InputAdapter + ReusedExchange [d_date_sk] #4 + WholeStageCodegen (15) + Project [cs_call_center_sk,sales,returns,profit,profit_loss] + InputAdapter + BroadcastNestedLoopJoin + WholeStageCodegen (11) + HashAggregate [cs_call_center_sk,sum,sum] [sum(UnscaledValue(cs_ext_sales_price)),sum(UnscaledValue(cs_net_profit)),sales,profit,sum,sum] + InputAdapter + Exchange [cs_call_center_sk] #8 + WholeStageCodegen (10) + HashAggregate [cs_call_center_sk,cs_ext_sales_price,cs_net_profit] [sum,sum,sum,sum] + Project [cs_call_center_sk,cs_ext_sales_price,cs_net_profit] + BroadcastHashJoin [cs_sold_date_sk,d_date_sk] + Filter [cs_sold_date_sk] + ColumnarToRow InputAdapter - Exchange [cs_call_center_sk] #9 - WholeStageCodegen (10) - HashAggregate [cs_call_center_sk,cs_ext_sales_price,cs_net_profit] [sum,sum,sum,sum] - Project [cs_call_center_sk,cs_ext_sales_price,cs_net_profit] - BroadcastHashJoin [cs_sold_date_sk,d_date_sk] - Filter [cs_sold_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.catalog_sales [cs_sold_date_sk,cs_call_center_sk,cs_ext_sales_price,cs_net_profit] - InputAdapter - ReusedExchange [d_date_sk] #5 - BroadcastExchange #10 - WholeStageCodegen (14) - HashAggregate [sum,sum] [sum(UnscaledValue(cr_return_amount)),sum(UnscaledValue(cr_net_loss)),returns,profit_loss,sum,sum] + Scan parquet default.catalog_sales [cs_sold_date_sk,cs_call_center_sk,cs_ext_sales_price,cs_net_profit] + InputAdapter + ReusedExchange [d_date_sk] #4 + BroadcastExchange #9 + WholeStageCodegen (14) + HashAggregate [sum,sum] [sum(UnscaledValue(cr_return_amount)),sum(UnscaledValue(cr_net_loss)),returns,profit_loss,sum,sum] + InputAdapter + Exchange #10 + WholeStageCodegen (13) + HashAggregate [cr_return_amount,cr_net_loss] [sum,sum,sum,sum] + Project [cr_return_amount,cr_net_loss] + BroadcastHashJoin [cr_returned_date_sk,d_date_sk] + Filter [cr_returned_date_sk] + ColumnarToRow InputAdapter - Exchange #11 - WholeStageCodegen (13) - HashAggregate [cr_return_amount,cr_net_loss] [sum,sum,sum,sum] - Project [cr_return_amount,cr_net_loss] - BroadcastHashJoin [cr_returned_date_sk,d_date_sk] - Filter [cr_returned_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.catalog_returns [cr_returned_date_sk,cr_return_amount,cr_net_loss] - InputAdapter - ReusedExchange [d_date_sk] #5 - WholeStageCodegen (23) - Project [wp_web_page_sk,sales,returns,profit,profit_loss] - BroadcastHashJoin [wp_web_page_sk,wp_web_page_sk] - HashAggregate [wp_web_page_sk,sum,sum] [sum(UnscaledValue(ws_ext_sales_price)),sum(UnscaledValue(ws_net_profit)),sales,profit,sum,sum] + Scan parquet default.catalog_returns [cr_returned_date_sk,cr_return_amount,cr_net_loss] + InputAdapter + ReusedExchange [d_date_sk] #4 + WholeStageCodegen (23) + Project [wp_web_page_sk,sales,returns,profit,profit_loss] + BroadcastHashJoin [wp_web_page_sk,wp_web_page_sk] + HashAggregate [wp_web_page_sk,sum,sum] [sum(UnscaledValue(ws_ext_sales_price)),sum(UnscaledValue(ws_net_profit)),sales,profit,sum,sum] + InputAdapter + Exchange [wp_web_page_sk] #11 + WholeStageCodegen (18) + HashAggregate [wp_web_page_sk,ws_ext_sales_price,ws_net_profit] [sum,sum,sum,sum] + Project [ws_ext_sales_price,ws_net_profit,wp_web_page_sk] + BroadcastHashJoin [ws_web_page_sk,wp_web_page_sk] + Project [ws_web_page_sk,ws_ext_sales_price,ws_net_profit] + BroadcastHashJoin [ws_sold_date_sk,d_date_sk] + Filter [ws_sold_date_sk,ws_web_page_sk] + ColumnarToRow + InputAdapter + Scan parquet default.web_sales [ws_sold_date_sk,ws_web_page_sk,ws_ext_sales_price,ws_net_profit] InputAdapter - Exchange [wp_web_page_sk] #12 - WholeStageCodegen (18) - HashAggregate [wp_web_page_sk,ws_ext_sales_price,ws_net_profit] [sum,sum,sum,sum] - Project [ws_ext_sales_price,ws_net_profit,wp_web_page_sk] - BroadcastHashJoin [ws_web_page_sk,wp_web_page_sk] - Project [ws_web_page_sk,ws_ext_sales_price,ws_net_profit] - BroadcastHashJoin [ws_sold_date_sk,d_date_sk] - Filter [ws_sold_date_sk,ws_web_page_sk] - ColumnarToRow - InputAdapter - Scan parquet default.web_sales [ws_sold_date_sk,ws_web_page_sk,ws_ext_sales_price,ws_net_profit] - InputAdapter - ReusedExchange [d_date_sk] #5 - InputAdapter - BroadcastExchange #13 - WholeStageCodegen (17) - Filter [wp_web_page_sk] - ColumnarToRow - InputAdapter - Scan parquet default.web_page [wp_web_page_sk] - InputAdapter - BroadcastExchange #14 - WholeStageCodegen (22) - HashAggregate [wp_web_page_sk,sum,sum] [sum(UnscaledValue(wr_return_amt)),sum(UnscaledValue(wr_net_loss)),returns,profit_loss,sum,sum] + ReusedExchange [d_date_sk] #4 + InputAdapter + BroadcastExchange #12 + WholeStageCodegen (17) + Filter [wp_web_page_sk] + ColumnarToRow InputAdapter - Exchange [wp_web_page_sk] #15 - WholeStageCodegen (21) - HashAggregate [wp_web_page_sk,wr_return_amt,wr_net_loss] [sum,sum,sum,sum] - Project [wr_return_amt,wr_net_loss,wp_web_page_sk] - BroadcastHashJoin [wr_returned_date_sk,d_date_sk] - Project [wr_returned_date_sk,wr_return_amt,wr_net_loss,wp_web_page_sk] - BroadcastHashJoin [wr_web_page_sk,wp_web_page_sk] - Filter [wr_returned_date_sk,wr_web_page_sk] - ColumnarToRow - InputAdapter - Scan parquet default.web_returns [wr_returned_date_sk,wr_web_page_sk,wr_return_amt,wr_net_loss] - InputAdapter - ReusedExchange [wp_web_page_sk] #13 - InputAdapter - ReusedExchange [d_date_sk] #5 - WholeStageCodegen (51) - HashAggregate [channel,sum,isEmpty,sum,isEmpty,sum,isEmpty] [sum(sales),sum(returns),sum(profit),id,sales,returns,profit,sum,isEmpty,sum,isEmpty,sum,isEmpty] - InputAdapter - Exchange [channel] #16 - WholeStageCodegen (50) - HashAggregate [channel,sales,returns,profit] [sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty] - HashAggregate [channel,id,sum,isEmpty,sum,isEmpty,sum,isEmpty] [sum(sales),sum(returns),sum(profit),sales,returns,profit,sum,isEmpty,sum,isEmpty,sum,isEmpty] + Scan parquet default.web_page [wp_web_page_sk] + InputAdapter + BroadcastExchange #13 + WholeStageCodegen (22) + HashAggregate [wp_web_page_sk,sum,sum] [sum(UnscaledValue(wr_return_amt)),sum(UnscaledValue(wr_net_loss)),returns,profit_loss,sum,sum] InputAdapter - ReusedExchange [channel,id,sum,isEmpty,sum,isEmpty,sum,isEmpty] #3 - WholeStageCodegen (79) + Exchange [wp_web_page_sk] #14 + WholeStageCodegen (21) + HashAggregate [wp_web_page_sk,wr_return_amt,wr_net_loss] [sum,sum,sum,sum] + Project [wr_return_amt,wr_net_loss,wp_web_page_sk] + BroadcastHashJoin [wr_returned_date_sk,d_date_sk] + Project [wr_returned_date_sk,wr_return_amt,wr_net_loss,wp_web_page_sk] + BroadcastHashJoin [wr_web_page_sk,wp_web_page_sk] + Filter [wr_returned_date_sk,wr_web_page_sk] + ColumnarToRow + InputAdapter + Scan parquet default.web_returns [wr_returned_date_sk,wr_web_page_sk,wr_return_amt,wr_net_loss] + InputAdapter + ReusedExchange [wp_web_page_sk] #12 + InputAdapter + ReusedExchange [d_date_sk] #4 + WholeStageCodegen (51) + HashAggregate [channel,sum,isEmpty,sum,isEmpty,sum,isEmpty] [sum(sales),sum(returns),sum(profit),id,sales,returns,profit,sum,isEmpty,sum,isEmpty,sum,isEmpty] + InputAdapter + Exchange [channel] #15 + WholeStageCodegen (50) + HashAggregate [channel,sales,returns,profit] [sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty] + HashAggregate [channel,id,sum,isEmpty,sum,isEmpty,sum,isEmpty] [sum(sales),sum(returns),sum(profit),sales,returns,profit,sum,isEmpty,sum,isEmpty,sum,isEmpty] + InputAdapter + ReusedExchange [channel,id,sum,isEmpty,sum,isEmpty,sum,isEmpty] #2 + WholeStageCodegen (77) HashAggregate [sum,isEmpty,sum,isEmpty,sum,isEmpty] [sum(sales),sum(returns),sum(profit),channel,id,sales,returns,profit,sum,isEmpty,sum,isEmpty,sum,isEmpty] InputAdapter - Exchange #17 - WholeStageCodegen (78) + Exchange #16 + WholeStageCodegen (76) HashAggregate [sales,returns,profit] [sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty] HashAggregate [channel,id,sum,isEmpty,sum,isEmpty,sum,isEmpty] [sum(sales),sum(returns),sum(profit),sales,returns,profit,sum,isEmpty,sum,isEmpty,sum,isEmpty] InputAdapter - ReusedExchange [channel,id,sum,isEmpty,sum,isEmpty,sum,isEmpty] #3 + ReusedExchange [channel,id,sum,isEmpty,sum,isEmpty,sum,isEmpty] #2 diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q77a/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q77a/explain.txt index c18698ebc5b45..2d3ca673c2b08 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q77a/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q77a/explain.txt @@ -1,112 +1,108 @@ == Physical Plan == -TakeOrderedAndProject (108) -+- * HashAggregate (107) - +- Exchange (106) - +- * HashAggregate (105) - +- Union (104) - :- * HashAggregate (98) - : +- Exchange (97) - : +- * HashAggregate (96) - : +- Union (95) - : :- * HashAggregate (89) - : : +- Exchange (88) - : : +- * HashAggregate (87) - : : +- Union (86) - : : :- * Project (34) - : : : +- * BroadcastHashJoin LeftOuter BuildRight (33) - : : : :- * HashAggregate (19) - : : : : +- Exchange (18) - : : : : +- * HashAggregate (17) - : : : : +- * Project (16) - : : : : +- * BroadcastHashJoin Inner BuildRight (15) - : : : : :- * Project (10) - : : : : : +- * BroadcastHashJoin Inner BuildRight (9) - : : : : : :- * Filter (3) - : : : : : : +- * ColumnarToRow (2) - : : : : : : +- Scan parquet default.store_sales (1) - : : : : : +- BroadcastExchange (8) - : : : : : +- * Project (7) - : : : : : +- * Filter (6) - : : : : : +- * ColumnarToRow (5) - : : : : : +- Scan parquet default.date_dim (4) - : : : : +- BroadcastExchange (14) - : : : : +- * Filter (13) - : : : : +- * ColumnarToRow (12) - : : : : +- Scan parquet default.store (11) - : : : +- BroadcastExchange (32) - : : : +- * HashAggregate (31) - : : : +- Exchange (30) - : : : +- * HashAggregate (29) - : : : +- * Project (28) - : : : +- * BroadcastHashJoin Inner BuildRight (27) - : : : :- * Project (25) - : : : : +- * BroadcastHashJoin Inner BuildRight (24) - : : : : :- * Filter (22) - : : : : : +- * ColumnarToRow (21) - : : : : : +- Scan parquet default.store_returns (20) - : : : : +- ReusedExchange (23) - : : : +- ReusedExchange (26) - : : :- * Project (55) - : : : +- BroadcastNestedLoopJoin Inner BuildLeft (54) - : : : :- BroadcastExchange (44) - : : : : +- * HashAggregate (43) - : : : : +- Exchange (42) - : : : : +- * HashAggregate (41) - : : : : +- * Project (40) - : : : : +- * BroadcastHashJoin Inner BuildRight (39) - : : : : :- * Filter (37) - : : : : : +- * ColumnarToRow (36) - : : : : : +- Scan parquet default.catalog_sales (35) - : : : : +- ReusedExchange (38) - : : : +- * HashAggregate (53) - : : : +- Exchange (52) - : : : +- * HashAggregate (51) - : : : +- * Project (50) - : : : +- * BroadcastHashJoin Inner BuildRight (49) - : : : :- * Filter (47) - : : : : +- * ColumnarToRow (46) - : : : : +- Scan parquet default.catalog_returns (45) - : : : +- ReusedExchange (48) - : : +- * Project (85) - : : +- * BroadcastHashJoin LeftOuter BuildRight (84) - : : :- * HashAggregate (70) - : : : +- Exchange (69) - : : : +- * HashAggregate (68) - : : : +- * Project (67) - : : : +- * BroadcastHashJoin Inner BuildRight (66) - : : : :- * Project (61) - : : : : +- * BroadcastHashJoin Inner BuildRight (60) - : : : : :- * Filter (58) - : : : : : +- * ColumnarToRow (57) - : : : : : +- Scan parquet default.web_sales (56) - : : : : +- ReusedExchange (59) - : : : +- BroadcastExchange (65) - : : : +- * Filter (64) - : : : +- * ColumnarToRow (63) - : : : +- Scan parquet default.web_page (62) - : : +- BroadcastExchange (83) - : : +- * HashAggregate (82) - : : +- Exchange (81) - : : +- * HashAggregate (80) - : : +- * Project (79) - : : +- * BroadcastHashJoin Inner BuildRight (78) - : : :- * Project (76) - : : : +- * BroadcastHashJoin Inner BuildRight (75) - : : : :- * Filter (73) - : : : : +- * ColumnarToRow (72) - : : : : +- Scan parquet default.web_returns (71) - : : : +- ReusedExchange (74) - : : +- ReusedExchange (77) - : +- * HashAggregate (94) - : +- Exchange (93) - : +- * HashAggregate (92) - : +- * HashAggregate (91) - : +- ReusedExchange (90) - +- * HashAggregate (103) - +- Exchange (102) - +- * HashAggregate (101) - +- * HashAggregate (100) - +- ReusedExchange (99) +TakeOrderedAndProject (104) ++- * HashAggregate (103) + +- Exchange (102) + +- * HashAggregate (101) + +- Union (100) + :- * HashAggregate (89) + : +- Exchange (88) + : +- * HashAggregate (87) + : +- Union (86) + : :- * Project (34) + : : +- * BroadcastHashJoin LeftOuter BuildRight (33) + : : :- * HashAggregate (19) + : : : +- Exchange (18) + : : : +- * HashAggregate (17) + : : : +- * Project (16) + : : : +- * BroadcastHashJoin Inner BuildRight (15) + : : : :- * Project (10) + : : : : +- * BroadcastHashJoin Inner BuildRight (9) + : : : : :- * Filter (3) + : : : : : +- * ColumnarToRow (2) + : : : : : +- Scan parquet default.store_sales (1) + : : : : +- BroadcastExchange (8) + : : : : +- * Project (7) + : : : : +- * Filter (6) + : : : : +- * ColumnarToRow (5) + : : : : +- Scan parquet default.date_dim (4) + : : : +- BroadcastExchange (14) + : : : +- * Filter (13) + : : : +- * ColumnarToRow (12) + : : : +- Scan parquet default.store (11) + : : +- BroadcastExchange (32) + : : +- * HashAggregate (31) + : : +- Exchange (30) + : : +- * HashAggregate (29) + : : +- * Project (28) + : : +- * BroadcastHashJoin Inner BuildRight (27) + : : :- * Project (25) + : : : +- * BroadcastHashJoin Inner BuildRight (24) + : : : :- * Filter (22) + : : : : +- * ColumnarToRow (21) + : : : : +- Scan parquet default.store_returns (20) + : : : +- ReusedExchange (23) + : : +- ReusedExchange (26) + : :- * Project (55) + : : +- BroadcastNestedLoopJoin Inner BuildLeft (54) + : : :- BroadcastExchange (44) + : : : +- * HashAggregate (43) + : : : +- Exchange (42) + : : : +- * HashAggregate (41) + : : : +- * Project (40) + : : : +- * BroadcastHashJoin Inner BuildRight (39) + : : : :- * Filter (37) + : : : : +- * ColumnarToRow (36) + : : : : +- Scan parquet default.catalog_sales (35) + : : : +- ReusedExchange (38) + : : +- * HashAggregate (53) + : : +- Exchange (52) + : : +- * HashAggregate (51) + : : +- * Project (50) + : : +- * BroadcastHashJoin Inner BuildRight (49) + : : :- * Filter (47) + : : : +- * ColumnarToRow (46) + : : : +- Scan parquet default.catalog_returns (45) + : : +- ReusedExchange (48) + : +- * Project (85) + : +- * BroadcastHashJoin LeftOuter BuildRight (84) + : :- * HashAggregate (70) + : : +- Exchange (69) + : : +- * HashAggregate (68) + : : +- * Project (67) + : : +- * BroadcastHashJoin Inner BuildRight (66) + : : :- * Project (61) + : : : +- * BroadcastHashJoin Inner BuildRight (60) + : : : :- * Filter (58) + : : : : +- * ColumnarToRow (57) + : : : : +- Scan parquet default.web_sales (56) + : : : +- ReusedExchange (59) + : : +- BroadcastExchange (65) + : : +- * Filter (64) + : : +- * ColumnarToRow (63) + : : +- Scan parquet default.web_page (62) + : +- BroadcastExchange (83) + : +- * HashAggregate (82) + : +- Exchange (81) + : +- * HashAggregate (80) + : +- * Project (79) + : +- * BroadcastHashJoin Inner BuildRight (78) + : :- * Project (76) + : : +- * BroadcastHashJoin Inner BuildRight (75) + : : :- * Filter (73) + : : : +- * ColumnarToRow (72) + : : : +- Scan parquet default.web_returns (71) + : : +- ReusedExchange (74) + : +- ReusedExchange (77) + :- * HashAggregate (94) + : +- Exchange (93) + : +- * HashAggregate (92) + : +- * HashAggregate (91) + : +- ReusedExchange (90) + +- * HashAggregate (99) + +- Exchange (98) + +- * HashAggregate (97) + +- * HashAggregate (96) + +- ReusedExchange (95) (1) Scan parquet default.store_sales @@ -190,7 +186,7 @@ Results [3]: [s_store_sk#8, sum#12, sum#13] (18) Exchange Input [3]: [s_store_sk#8, sum#12, sum#13] -Arguments: hashpartitioning(s_store_sk#8, 5), true, [id=#14] +Arguments: hashpartitioning(s_store_sk#8, 5), ENSURE_REQUIREMENTS, [id=#14] (19) HashAggregate [codegen id : 8] Input [3]: [s_store_sk#8, sum#12, sum#13] @@ -246,7 +242,7 @@ Results [3]: [s_store_sk#23, sum#26, sum#27] (30) Exchange Input [3]: [s_store_sk#23, sum#26, sum#27] -Arguments: hashpartitioning(s_store_sk#23, 5), true, [id=#28] +Arguments: hashpartitioning(s_store_sk#23, 5), ENSURE_REQUIREMENTS, [id=#28] (31) HashAggregate [codegen id : 7] Input [3]: [s_store_sk#23, sum#26, sum#27] @@ -303,7 +299,7 @@ Results [3]: [cs_call_center_sk#39, sum#44, sum#45] (42) Exchange Input [3]: [cs_call_center_sk#39, sum#44, sum#45] -Arguments: hashpartitioning(cs_call_center_sk#39, 5), true, [id=#46] +Arguments: hashpartitioning(cs_call_center_sk#39, 5), ENSURE_REQUIREMENTS, [id=#46] (43) HashAggregate [codegen id : 11] Input [3]: [cs_call_center_sk#39, sum#44, sum#45] @@ -351,7 +347,7 @@ Results [2]: [sum#57, sum#58] (52) Exchange Input [2]: [sum#57, sum#58] -Arguments: SinglePartition, true, [id=#59] +Arguments: SinglePartition, ENSURE_REQUIREMENTS, [id=#59] (53) HashAggregate [codegen id : 14] Input [2]: [sum#57, sum#58] @@ -429,7 +425,7 @@ Results [3]: [wp_web_page_sk#71, sum#75, sum#76] (69) Exchange Input [3]: [wp_web_page_sk#71, sum#75, sum#76] -Arguments: hashpartitioning(wp_web_page_sk#71, 5), true, [id=#77] +Arguments: hashpartitioning(wp_web_page_sk#71, 5), ENSURE_REQUIREMENTS, [id=#77] (70) HashAggregate [codegen id : 23] Input [3]: [wp_web_page_sk#71, sum#75, sum#76] @@ -485,7 +481,7 @@ Results [3]: [wp_web_page_sk#86, sum#89, sum#90] (81) Exchange Input [3]: [wp_web_page_sk#86, sum#89, sum#90] -Arguments: hashpartitioning(wp_web_page_sk#86, 5), true, [id=#91] +Arguments: hashpartitioning(wp_web_page_sk#86, 5), ENSURE_REQUIREMENTS, [id=#91] (82) HashAggregate [codegen id : 22] Input [3]: [wp_web_page_sk#86, sum#89, sum#90] @@ -518,7 +514,7 @@ Results [8]: [channel#34, id#35, sum#107, isEmpty#108, sum#109, isEmpty#110, sum (88) Exchange Input [8]: [channel#34, id#35, sum#107, isEmpty#108, sum#109, isEmpty#110, sum#111, isEmpty#112] -Arguments: hashpartitioning(channel#34, id#35, 5), true, [id=#113] +Arguments: hashpartitioning(channel#34, id#35, 5), ENSURE_REQUIREMENTS, [id=#113] (89) HashAggregate [codegen id : 25] Input [8]: [channel#34, id#35, sum#107, isEmpty#108, sum#109, isEmpty#110, sum#111, isEmpty#112] @@ -546,7 +542,7 @@ Results [7]: [channel#34, sum#139, isEmpty#140, sum#141, isEmpty#142, sum#143, i (93) Exchange Input [7]: [channel#34, sum#139, isEmpty#140, sum#141, isEmpty#142, sum#143, isEmpty#144] -Arguments: hashpartitioning(channel#34, 5), true, [id=#145] +Arguments: hashpartitioning(channel#34, 5), ENSURE_REQUIREMENTS, [id=#145] (94) HashAggregate [codegen id : 51] Input [7]: [channel#34, sum#139, isEmpty#140, sum#141, isEmpty#142, sum#143, isEmpty#144] @@ -555,75 +551,55 @@ Functions [3]: [sum(sales#130), sum(returns#131), sum(profit#132)] Aggregate Attributes [3]: [sum(sales#130)#146, sum(returns#131)#147, sum(profit#132)#148] Results [5]: [channel#34, null AS id#149, sum(sales#130)#146 AS sales#150, sum(returns#131)#147 AS returns#151, sum(profit#132)#148 AS profit#152] -(95) Union +(95) ReusedExchange [Reuses operator id: 88] +Output [8]: [channel#34, id#35, sum#153, isEmpty#154, sum#155, isEmpty#156, sum#157, isEmpty#158] -(96) HashAggregate [codegen id : 52] -Input [5]: [channel#34, id#35, sales#117, returns#118, profit#119] -Keys [5]: [channel#34, id#35, sales#117, returns#118, profit#119] -Functions: [] -Aggregate Attributes: [] -Results [5]: [channel#34, id#35, sales#117, returns#118, profit#119] - -(97) Exchange -Input [5]: [channel#34, id#35, sales#117, returns#118, profit#119] -Arguments: hashpartitioning(channel#34, id#35, sales#117, returns#118, profit#119, 5), true, [id=#153] - -(98) HashAggregate [codegen id : 53] -Input [5]: [channel#34, id#35, sales#117, returns#118, profit#119] -Keys [5]: [channel#34, id#35, sales#117, returns#118, profit#119] -Functions: [] -Aggregate Attributes: [] -Results [5]: [channel#34, id#35, sales#117, returns#118, profit#119] - -(99) ReusedExchange [Reuses operator id: 88] -Output [8]: [channel#34, id#35, sum#154, isEmpty#155, sum#156, isEmpty#157, sum#158, isEmpty#159] - -(100) HashAggregate [codegen id : 78] -Input [8]: [channel#34, id#35, sum#154, isEmpty#155, sum#156, isEmpty#157, sum#158, isEmpty#159] +(96) HashAggregate [codegen id : 76] +Input [8]: [channel#34, id#35, sum#153, isEmpty#154, sum#155, isEmpty#156, sum#157, isEmpty#158] Keys [2]: [channel#34, id#35] -Functions [3]: [sum(sales#17), sum(returns#36), sum(profit#160)] -Aggregate Attributes [3]: [sum(sales#17)#161, sum(returns#36)#162, sum(profit#160)#163] -Results [3]: [sum(sales#17)#161 AS sales#130, sum(returns#36)#162 AS returns#131, sum(profit#160)#163 AS profit#132] +Functions [3]: [sum(sales#17), sum(returns#36), sum(profit#159)] +Aggregate Attributes [3]: [sum(sales#17)#160, sum(returns#36)#161, sum(profit#159)#162] +Results [3]: [sum(sales#17)#160 AS sales#130, sum(returns#36)#161 AS returns#131, sum(profit#159)#162 AS profit#132] -(101) HashAggregate [codegen id : 78] +(97) HashAggregate [codegen id : 76] Input [3]: [sales#130, returns#131, profit#132] Keys: [] Functions [3]: [partial_sum(sales#130), partial_sum(returns#131), partial_sum(profit#132)] -Aggregate Attributes [6]: [sum#164, isEmpty#165, sum#166, isEmpty#167, sum#168, isEmpty#169] -Results [6]: [sum#170, isEmpty#171, sum#172, isEmpty#173, sum#174, isEmpty#175] +Aggregate Attributes [6]: [sum#163, isEmpty#164, sum#165, isEmpty#166, sum#167, isEmpty#168] +Results [6]: [sum#169, isEmpty#170, sum#171, isEmpty#172, sum#173, isEmpty#174] -(102) Exchange -Input [6]: [sum#170, isEmpty#171, sum#172, isEmpty#173, sum#174, isEmpty#175] -Arguments: SinglePartition, true, [id=#176] +(98) Exchange +Input [6]: [sum#169, isEmpty#170, sum#171, isEmpty#172, sum#173, isEmpty#174] +Arguments: SinglePartition, ENSURE_REQUIREMENTS, [id=#175] -(103) HashAggregate [codegen id : 79] -Input [6]: [sum#170, isEmpty#171, sum#172, isEmpty#173, sum#174, isEmpty#175] +(99) HashAggregate [codegen id : 77] +Input [6]: [sum#169, isEmpty#170, sum#171, isEmpty#172, sum#173, isEmpty#174] Keys: [] Functions [3]: [sum(sales#130), sum(returns#131), sum(profit#132)] -Aggregate Attributes [3]: [sum(sales#130)#177, sum(returns#131)#178, sum(profit#132)#179] -Results [5]: [null AS channel#180, null AS id#181, sum(sales#130)#177 AS sales#182, sum(returns#131)#178 AS returns#183, sum(profit#132)#179 AS profit#184] +Aggregate Attributes [3]: [sum(sales#130)#176, sum(returns#131)#177, sum(profit#132)#178] +Results [5]: [null AS channel#179, null AS id#180, sum(sales#130)#176 AS sales#181, sum(returns#131)#177 AS returns#182, sum(profit#132)#178 AS profit#183] -(104) Union +(100) Union -(105) HashAggregate [codegen id : 80] +(101) HashAggregate [codegen id : 78] Input [5]: [channel#34, id#35, sales#117, returns#118, profit#119] Keys [5]: [channel#34, id#35, sales#117, returns#118, profit#119] Functions: [] Aggregate Attributes: [] Results [5]: [channel#34, id#35, sales#117, returns#118, profit#119] -(106) Exchange +(102) Exchange Input [5]: [channel#34, id#35, sales#117, returns#118, profit#119] -Arguments: hashpartitioning(channel#34, id#35, sales#117, returns#118, profit#119, 5), true, [id=#185] +Arguments: hashpartitioning(channel#34, id#35, sales#117, returns#118, profit#119, 5), ENSURE_REQUIREMENTS, [id=#184] -(107) HashAggregate [codegen id : 81] +(103) HashAggregate [codegen id : 79] Input [5]: [channel#34, id#35, sales#117, returns#118, profit#119] Keys [5]: [channel#34, id#35, sales#117, returns#118, profit#119] Functions: [] Aggregate Attributes: [] Results [5]: [channel#34, id#35, sales#117, returns#118, profit#119] -(108) TakeOrderedAndProject +(104) TakeOrderedAndProject Input [5]: [channel#34, id#35, sales#117, returns#118, profit#119] Arguments: 100, [channel#34 ASC NULLS FIRST, id#35 ASC NULLS FIRST], [channel#34, id#35, sales#117, returns#118, profit#119] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q77a/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q77a/simplified.txt index 864039e512231..47b743fee91dd 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q77a/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q77a/simplified.txt @@ -1,172 +1,164 @@ TakeOrderedAndProject [channel,id,sales,returns,profit] - WholeStageCodegen (81) + WholeStageCodegen (79) HashAggregate [channel,id,sales,returns,profit] InputAdapter Exchange [channel,id,sales,returns,profit] #1 - WholeStageCodegen (80) + WholeStageCodegen (78) HashAggregate [channel,id,sales,returns,profit] InputAdapter Union - WholeStageCodegen (53) - HashAggregate [channel,id,sales,returns,profit] + WholeStageCodegen (25) + HashAggregate [channel,id,sum,isEmpty,sum,isEmpty,sum,isEmpty] [sum(sales),sum(returns),sum(profit),sales,returns,profit,sum,isEmpty,sum,isEmpty,sum,isEmpty] InputAdapter - Exchange [channel,id,sales,returns,profit] #2 - WholeStageCodegen (52) - HashAggregate [channel,id,sales,returns,profit] + Exchange [channel,id] #2 + WholeStageCodegen (24) + HashAggregate [channel,id,sales,returns,profit] [sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty] InputAdapter Union - WholeStageCodegen (25) - HashAggregate [channel,id,sum,isEmpty,sum,isEmpty,sum,isEmpty] [sum(sales),sum(returns),sum(profit),sales,returns,profit,sum,isEmpty,sum,isEmpty,sum,isEmpty] - InputAdapter - Exchange [channel,id] #3 - WholeStageCodegen (24) - HashAggregate [channel,id,sales,returns,profit] [sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty] - InputAdapter - Union - WholeStageCodegen (8) - Project [s_store_sk,sales,returns,profit,profit_loss] - BroadcastHashJoin [s_store_sk,s_store_sk] - HashAggregate [s_store_sk,sum,sum] [sum(UnscaledValue(ss_ext_sales_price)),sum(UnscaledValue(ss_net_profit)),sales,profit,sum,sum] + WholeStageCodegen (8) + Project [s_store_sk,sales,returns,profit,profit_loss] + BroadcastHashJoin [s_store_sk,s_store_sk] + HashAggregate [s_store_sk,sum,sum] [sum(UnscaledValue(ss_ext_sales_price)),sum(UnscaledValue(ss_net_profit)),sales,profit,sum,sum] + InputAdapter + Exchange [s_store_sk] #3 + WholeStageCodegen (3) + HashAggregate [s_store_sk,ss_ext_sales_price,ss_net_profit] [sum,sum,sum,sum] + Project [ss_ext_sales_price,ss_net_profit,s_store_sk] + BroadcastHashJoin [ss_store_sk,s_store_sk] + Project [ss_store_sk,ss_ext_sales_price,ss_net_profit] + BroadcastHashJoin [ss_sold_date_sk,d_date_sk] + Filter [ss_sold_date_sk,ss_store_sk] + ColumnarToRow + InputAdapter + Scan parquet default.store_sales [ss_sold_date_sk,ss_store_sk,ss_ext_sales_price,ss_net_profit] InputAdapter - Exchange [s_store_sk] #4 - WholeStageCodegen (3) - HashAggregate [s_store_sk,ss_ext_sales_price,ss_net_profit] [sum,sum,sum,sum] - Project [ss_ext_sales_price,ss_net_profit,s_store_sk] - BroadcastHashJoin [ss_store_sk,s_store_sk] - Project [ss_store_sk,ss_ext_sales_price,ss_net_profit] - BroadcastHashJoin [ss_sold_date_sk,d_date_sk] - Filter [ss_sold_date_sk,ss_store_sk] - ColumnarToRow - InputAdapter - Scan parquet default.store_sales [ss_sold_date_sk,ss_store_sk,ss_ext_sales_price,ss_net_profit] - InputAdapter - BroadcastExchange #5 - WholeStageCodegen (1) - Project [d_date_sk] - Filter [d_date,d_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.date_dim [d_date_sk,d_date] + BroadcastExchange #4 + WholeStageCodegen (1) + Project [d_date_sk] + Filter [d_date,d_date_sk] + ColumnarToRow InputAdapter - BroadcastExchange #6 - WholeStageCodegen (2) - Filter [s_store_sk] - ColumnarToRow - InputAdapter - Scan parquet default.store [s_store_sk] - InputAdapter - BroadcastExchange #7 - WholeStageCodegen (7) - HashAggregate [s_store_sk,sum,sum] [sum(UnscaledValue(sr_return_amt)),sum(UnscaledValue(sr_net_loss)),returns,profit_loss,sum,sum] - InputAdapter - Exchange [s_store_sk] #8 - WholeStageCodegen (6) - HashAggregate [s_store_sk,sr_return_amt,sr_net_loss] [sum,sum,sum,sum] - Project [sr_return_amt,sr_net_loss,s_store_sk] - BroadcastHashJoin [sr_store_sk,s_store_sk] - Project [sr_store_sk,sr_return_amt,sr_net_loss] - BroadcastHashJoin [sr_returned_date_sk,d_date_sk] - Filter [sr_returned_date_sk,sr_store_sk] - ColumnarToRow - InputAdapter - Scan parquet default.store_returns [sr_returned_date_sk,sr_store_sk,sr_return_amt,sr_net_loss] - InputAdapter - ReusedExchange [d_date_sk] #5 - InputAdapter - ReusedExchange [s_store_sk] #6 - WholeStageCodegen (15) - Project [cs_call_center_sk,sales,returns,profit,profit_loss] + Scan parquet default.date_dim [d_date_sk,d_date] InputAdapter - BroadcastNestedLoopJoin - BroadcastExchange #9 - WholeStageCodegen (11) - HashAggregate [cs_call_center_sk,sum,sum] [sum(UnscaledValue(cs_ext_sales_price)),sum(UnscaledValue(cs_net_profit)),sales,profit,sum,sum] + BroadcastExchange #5 + WholeStageCodegen (2) + Filter [s_store_sk] + ColumnarToRow + InputAdapter + Scan parquet default.store [s_store_sk] + InputAdapter + BroadcastExchange #6 + WholeStageCodegen (7) + HashAggregate [s_store_sk,sum,sum] [sum(UnscaledValue(sr_return_amt)),sum(UnscaledValue(sr_net_loss)),returns,profit_loss,sum,sum] + InputAdapter + Exchange [s_store_sk] #7 + WholeStageCodegen (6) + HashAggregate [s_store_sk,sr_return_amt,sr_net_loss] [sum,sum,sum,sum] + Project [sr_return_amt,sr_net_loss,s_store_sk] + BroadcastHashJoin [sr_store_sk,s_store_sk] + Project [sr_store_sk,sr_return_amt,sr_net_loss] + BroadcastHashJoin [sr_returned_date_sk,d_date_sk] + Filter [sr_returned_date_sk,sr_store_sk] + ColumnarToRow + InputAdapter + Scan parquet default.store_returns [sr_returned_date_sk,sr_store_sk,sr_return_amt,sr_net_loss] InputAdapter - Exchange [cs_call_center_sk] #10 - WholeStageCodegen (10) - HashAggregate [cs_call_center_sk,cs_ext_sales_price,cs_net_profit] [sum,sum,sum,sum] - Project [cs_call_center_sk,cs_ext_sales_price,cs_net_profit] - BroadcastHashJoin [cs_sold_date_sk,d_date_sk] - Filter [cs_sold_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.catalog_sales [cs_sold_date_sk,cs_call_center_sk,cs_ext_sales_price,cs_net_profit] - InputAdapter - ReusedExchange [d_date_sk] #5 - WholeStageCodegen (14) - HashAggregate [sum,sum] [sum(UnscaledValue(cr_return_amount)),sum(UnscaledValue(cr_net_loss)),returns,profit_loss,sum,sum] + ReusedExchange [d_date_sk] #4 + InputAdapter + ReusedExchange [s_store_sk] #5 + WholeStageCodegen (15) + Project [cs_call_center_sk,sales,returns,profit,profit_loss] + InputAdapter + BroadcastNestedLoopJoin + BroadcastExchange #8 + WholeStageCodegen (11) + HashAggregate [cs_call_center_sk,sum,sum] [sum(UnscaledValue(cs_ext_sales_price)),sum(UnscaledValue(cs_net_profit)),sales,profit,sum,sum] + InputAdapter + Exchange [cs_call_center_sk] #9 + WholeStageCodegen (10) + HashAggregate [cs_call_center_sk,cs_ext_sales_price,cs_net_profit] [sum,sum,sum,sum] + Project [cs_call_center_sk,cs_ext_sales_price,cs_net_profit] + BroadcastHashJoin [cs_sold_date_sk,d_date_sk] + Filter [cs_sold_date_sk] + ColumnarToRow + InputAdapter + Scan parquet default.catalog_sales [cs_sold_date_sk,cs_call_center_sk,cs_ext_sales_price,cs_net_profit] + InputAdapter + ReusedExchange [d_date_sk] #4 + WholeStageCodegen (14) + HashAggregate [sum,sum] [sum(UnscaledValue(cr_return_amount)),sum(UnscaledValue(cr_net_loss)),returns,profit_loss,sum,sum] + InputAdapter + Exchange #10 + WholeStageCodegen (13) + HashAggregate [cr_return_amount,cr_net_loss] [sum,sum,sum,sum] + Project [cr_return_amount,cr_net_loss] + BroadcastHashJoin [cr_returned_date_sk,d_date_sk] + Filter [cr_returned_date_sk] + ColumnarToRow InputAdapter - Exchange #11 - WholeStageCodegen (13) - HashAggregate [cr_return_amount,cr_net_loss] [sum,sum,sum,sum] - Project [cr_return_amount,cr_net_loss] - BroadcastHashJoin [cr_returned_date_sk,d_date_sk] - Filter [cr_returned_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.catalog_returns [cr_returned_date_sk,cr_return_amount,cr_net_loss] - InputAdapter - ReusedExchange [d_date_sk] #5 - WholeStageCodegen (23) - Project [wp_web_page_sk,sales,returns,profit,profit_loss] - BroadcastHashJoin [wp_web_page_sk,wp_web_page_sk] - HashAggregate [wp_web_page_sk,sum,sum] [sum(UnscaledValue(ws_ext_sales_price)),sum(UnscaledValue(ws_net_profit)),sales,profit,sum,sum] + Scan parquet default.catalog_returns [cr_returned_date_sk,cr_return_amount,cr_net_loss] InputAdapter - Exchange [wp_web_page_sk] #12 - WholeStageCodegen (18) - HashAggregate [wp_web_page_sk,ws_ext_sales_price,ws_net_profit] [sum,sum,sum,sum] - Project [ws_ext_sales_price,ws_net_profit,wp_web_page_sk] - BroadcastHashJoin [ws_web_page_sk,wp_web_page_sk] - Project [ws_web_page_sk,ws_ext_sales_price,ws_net_profit] - BroadcastHashJoin [ws_sold_date_sk,d_date_sk] - Filter [ws_sold_date_sk,ws_web_page_sk] - ColumnarToRow - InputAdapter - Scan parquet default.web_sales [ws_sold_date_sk,ws_web_page_sk,ws_ext_sales_price,ws_net_profit] - InputAdapter - ReusedExchange [d_date_sk] #5 - InputAdapter - BroadcastExchange #13 - WholeStageCodegen (17) - Filter [wp_web_page_sk] - ColumnarToRow - InputAdapter - Scan parquet default.web_page [wp_web_page_sk] - InputAdapter - BroadcastExchange #14 - WholeStageCodegen (22) - HashAggregate [wp_web_page_sk,sum,sum] [sum(UnscaledValue(wr_return_amt)),sum(UnscaledValue(wr_net_loss)),returns,profit_loss,sum,sum] + ReusedExchange [d_date_sk] #4 + WholeStageCodegen (23) + Project [wp_web_page_sk,sales,returns,profit,profit_loss] + BroadcastHashJoin [wp_web_page_sk,wp_web_page_sk] + HashAggregate [wp_web_page_sk,sum,sum] [sum(UnscaledValue(ws_ext_sales_price)),sum(UnscaledValue(ws_net_profit)),sales,profit,sum,sum] + InputAdapter + Exchange [wp_web_page_sk] #11 + WholeStageCodegen (18) + HashAggregate [wp_web_page_sk,ws_ext_sales_price,ws_net_profit] [sum,sum,sum,sum] + Project [ws_ext_sales_price,ws_net_profit,wp_web_page_sk] + BroadcastHashJoin [ws_web_page_sk,wp_web_page_sk] + Project [ws_web_page_sk,ws_ext_sales_price,ws_net_profit] + BroadcastHashJoin [ws_sold_date_sk,d_date_sk] + Filter [ws_sold_date_sk,ws_web_page_sk] + ColumnarToRow + InputAdapter + Scan parquet default.web_sales [ws_sold_date_sk,ws_web_page_sk,ws_ext_sales_price,ws_net_profit] + InputAdapter + ReusedExchange [d_date_sk] #4 + InputAdapter + BroadcastExchange #12 + WholeStageCodegen (17) + Filter [wp_web_page_sk] + ColumnarToRow InputAdapter - Exchange [wp_web_page_sk] #15 - WholeStageCodegen (21) - HashAggregate [wp_web_page_sk,wr_return_amt,wr_net_loss] [sum,sum,sum,sum] - Project [wr_return_amt,wr_net_loss,wp_web_page_sk] - BroadcastHashJoin [wr_web_page_sk,wp_web_page_sk] - Project [wr_web_page_sk,wr_return_amt,wr_net_loss] - BroadcastHashJoin [wr_returned_date_sk,d_date_sk] - Filter [wr_returned_date_sk,wr_web_page_sk] - ColumnarToRow - InputAdapter - Scan parquet default.web_returns [wr_returned_date_sk,wr_web_page_sk,wr_return_amt,wr_net_loss] - InputAdapter - ReusedExchange [d_date_sk] #5 - InputAdapter - ReusedExchange [wp_web_page_sk] #13 - WholeStageCodegen (51) - HashAggregate [channel,sum,isEmpty,sum,isEmpty,sum,isEmpty] [sum(sales),sum(returns),sum(profit),id,sales,returns,profit,sum,isEmpty,sum,isEmpty,sum,isEmpty] - InputAdapter - Exchange [channel] #16 - WholeStageCodegen (50) - HashAggregate [channel,sales,returns,profit] [sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty] - HashAggregate [channel,id,sum,isEmpty,sum,isEmpty,sum,isEmpty] [sum(sales),sum(returns),sum(profit),sales,returns,profit,sum,isEmpty,sum,isEmpty,sum,isEmpty] + Scan parquet default.web_page [wp_web_page_sk] + InputAdapter + BroadcastExchange #13 + WholeStageCodegen (22) + HashAggregate [wp_web_page_sk,sum,sum] [sum(UnscaledValue(wr_return_amt)),sum(UnscaledValue(wr_net_loss)),returns,profit_loss,sum,sum] InputAdapter - ReusedExchange [channel,id,sum,isEmpty,sum,isEmpty,sum,isEmpty] #3 - WholeStageCodegen (79) + Exchange [wp_web_page_sk] #14 + WholeStageCodegen (21) + HashAggregate [wp_web_page_sk,wr_return_amt,wr_net_loss] [sum,sum,sum,sum] + Project [wr_return_amt,wr_net_loss,wp_web_page_sk] + BroadcastHashJoin [wr_web_page_sk,wp_web_page_sk] + Project [wr_web_page_sk,wr_return_amt,wr_net_loss] + BroadcastHashJoin [wr_returned_date_sk,d_date_sk] + Filter [wr_returned_date_sk,wr_web_page_sk] + ColumnarToRow + InputAdapter + Scan parquet default.web_returns [wr_returned_date_sk,wr_web_page_sk,wr_return_amt,wr_net_loss] + InputAdapter + ReusedExchange [d_date_sk] #4 + InputAdapter + ReusedExchange [wp_web_page_sk] #12 + WholeStageCodegen (51) + HashAggregate [channel,sum,isEmpty,sum,isEmpty,sum,isEmpty] [sum(sales),sum(returns),sum(profit),id,sales,returns,profit,sum,isEmpty,sum,isEmpty,sum,isEmpty] + InputAdapter + Exchange [channel] #15 + WholeStageCodegen (50) + HashAggregate [channel,sales,returns,profit] [sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty] + HashAggregate [channel,id,sum,isEmpty,sum,isEmpty,sum,isEmpty] [sum(sales),sum(returns),sum(profit),sales,returns,profit,sum,isEmpty,sum,isEmpty,sum,isEmpty] + InputAdapter + ReusedExchange [channel,id,sum,isEmpty,sum,isEmpty,sum,isEmpty] #2 + WholeStageCodegen (77) HashAggregate [sum,isEmpty,sum,isEmpty,sum,isEmpty] [sum(sales),sum(returns),sum(profit),channel,id,sales,returns,profit,sum,isEmpty,sum,isEmpty,sum,isEmpty] InputAdapter - Exchange #17 - WholeStageCodegen (78) + Exchange #16 + WholeStageCodegen (76) HashAggregate [sales,returns,profit] [sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty] HashAggregate [channel,id,sum,isEmpty,sum,isEmpty,sum,isEmpty] [sum(sales),sum(returns),sum(profit),sales,returns,profit,sum,isEmpty,sum,isEmpty,sum,isEmpty] InputAdapter - ReusedExchange [channel,id,sum,isEmpty,sum,isEmpty,sum,isEmpty] #3 + ReusedExchange [channel,id,sum,isEmpty,sum,isEmpty,sum,isEmpty] #2 diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q80a.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q80a.sf100/explain.txt index e6210f4a26281..4aa23cbe8b905 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q80a.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q80a.sf100/explain.txt @@ -1,129 +1,125 @@ == Physical Plan == -TakeOrderedAndProject (125) -+- * HashAggregate (124) - +- Exchange (123) - +- * HashAggregate (122) - +- Union (121) - :- * HashAggregate (115) - : +- Exchange (114) - : +- * HashAggregate (113) - : +- Union (112) - : :- * HashAggregate (106) - : : +- Exchange (105) - : : +- * HashAggregate (104) - : : +- Union (103) - : : :- * HashAggregate (42) - : : : +- Exchange (41) - : : : +- * HashAggregate (40) - : : : +- * Project (39) - : : : +- * BroadcastHashJoin Inner BuildRight (38) - : : : :- * Project (33) - : : : : +- * BroadcastHashJoin Inner BuildRight (32) - : : : : :- * Project (26) - : : : : : +- * BroadcastHashJoin Inner BuildRight (25) - : : : : : :- * Project (19) - : : : : : : +- * BroadcastHashJoin Inner BuildRight (18) - : : : : : : :- * Project (12) - : : : : : : : +- SortMergeJoin LeftOuter (11) - : : : : : : : :- * Sort (5) - : : : : : : : : +- Exchange (4) - : : : : : : : : +- * Filter (3) - : : : : : : : : +- * ColumnarToRow (2) - : : : : : : : : +- Scan parquet default.store_sales (1) - : : : : : : : +- * Sort (10) - : : : : : : : +- Exchange (9) - : : : : : : : +- * Filter (8) - : : : : : : : +- * ColumnarToRow (7) - : : : : : : : +- Scan parquet default.store_returns (6) - : : : : : : +- BroadcastExchange (17) - : : : : : : +- * Project (16) - : : : : : : +- * Filter (15) - : : : : : : +- * ColumnarToRow (14) - : : : : : : +- Scan parquet default.item (13) - : : : : : +- BroadcastExchange (24) - : : : : : +- * Project (23) - : : : : : +- * Filter (22) - : : : : : +- * ColumnarToRow (21) - : : : : : +- Scan parquet default.date_dim (20) - : : : : +- BroadcastExchange (31) - : : : : +- * Project (30) - : : : : +- * Filter (29) - : : : : +- * ColumnarToRow (28) - : : : : +- Scan parquet default.promotion (27) - : : : +- BroadcastExchange (37) - : : : +- * Filter (36) - : : : +- * ColumnarToRow (35) - : : : +- Scan parquet default.store (34) - : : :- * HashAggregate (72) - : : : +- Exchange (71) - : : : +- * HashAggregate (70) - : : : +- * Project (69) - : : : +- * BroadcastHashJoin Inner BuildRight (68) - : : : :- * Project (63) - : : : : +- * BroadcastHashJoin Inner BuildRight (62) - : : : : :- * Project (60) - : : : : : +- * BroadcastHashJoin Inner BuildRight (59) - : : : : : :- * Project (57) - : : : : : : +- * BroadcastHashJoin Inner BuildRight (56) - : : : : : : :- * Project (54) - : : : : : : : +- SortMergeJoin LeftOuter (53) - : : : : : : : :- * Sort (47) - : : : : : : : : +- Exchange (46) - : : : : : : : : +- * Filter (45) - : : : : : : : : +- * ColumnarToRow (44) - : : : : : : : : +- Scan parquet default.catalog_sales (43) - : : : : : : : +- * Sort (52) - : : : : : : : +- Exchange (51) - : : : : : : : +- * Filter (50) - : : : : : : : +- * ColumnarToRow (49) - : : : : : : : +- Scan parquet default.catalog_returns (48) - : : : : : : +- ReusedExchange (55) - : : : : : +- ReusedExchange (58) - : : : : +- ReusedExchange (61) - : : : +- BroadcastExchange (67) - : : : +- * Filter (66) - : : : +- * ColumnarToRow (65) - : : : +- Scan parquet default.catalog_page (64) - : : +- * HashAggregate (102) - : : +- Exchange (101) - : : +- * HashAggregate (100) - : : +- * Project (99) - : : +- * BroadcastHashJoin Inner BuildRight (98) - : : :- * Project (93) - : : : +- * BroadcastHashJoin Inner BuildRight (92) - : : : :- * Project (90) - : : : : +- * BroadcastHashJoin Inner BuildRight (89) - : : : : :- * Project (87) - : : : : : +- * BroadcastHashJoin Inner BuildRight (86) - : : : : : :- * Project (84) - : : : : : : +- SortMergeJoin LeftOuter (83) - : : : : : : :- * Sort (77) - : : : : : : : +- Exchange (76) - : : : : : : : +- * Filter (75) - : : : : : : : +- * ColumnarToRow (74) - : : : : : : : +- Scan parquet default.web_sales (73) - : : : : : : +- * Sort (82) - : : : : : : +- Exchange (81) - : : : : : : +- * Filter (80) - : : : : : : +- * ColumnarToRow (79) - : : : : : : +- Scan parquet default.web_returns (78) - : : : : : +- ReusedExchange (85) - : : : : +- ReusedExchange (88) - : : : +- ReusedExchange (91) - : : +- BroadcastExchange (97) - : : +- * Filter (96) - : : +- * ColumnarToRow (95) - : : +- Scan parquet default.web_site (94) - : +- * HashAggregate (111) - : +- Exchange (110) - : +- * HashAggregate (109) - : +- * HashAggregate (108) - : +- ReusedExchange (107) - +- * HashAggregate (120) - +- Exchange (119) - +- * HashAggregate (118) - +- * HashAggregate (117) - +- ReusedExchange (116) +TakeOrderedAndProject (121) ++- * HashAggregate (120) + +- Exchange (119) + +- * HashAggregate (118) + +- Union (117) + :- * HashAggregate (106) + : +- Exchange (105) + : +- * HashAggregate (104) + : +- Union (103) + : :- * HashAggregate (42) + : : +- Exchange (41) + : : +- * HashAggregate (40) + : : +- * Project (39) + : : +- * BroadcastHashJoin Inner BuildRight (38) + : : :- * Project (33) + : : : +- * BroadcastHashJoin Inner BuildRight (32) + : : : :- * Project (26) + : : : : +- * BroadcastHashJoin Inner BuildRight (25) + : : : : :- * Project (19) + : : : : : +- * BroadcastHashJoin Inner BuildRight (18) + : : : : : :- * Project (12) + : : : : : : +- SortMergeJoin LeftOuter (11) + : : : : : : :- * Sort (5) + : : : : : : : +- Exchange (4) + : : : : : : : +- * Filter (3) + : : : : : : : +- * ColumnarToRow (2) + : : : : : : : +- Scan parquet default.store_sales (1) + : : : : : : +- * Sort (10) + : : : : : : +- Exchange (9) + : : : : : : +- * Filter (8) + : : : : : : +- * ColumnarToRow (7) + : : : : : : +- Scan parquet default.store_returns (6) + : : : : : +- BroadcastExchange (17) + : : : : : +- * Project (16) + : : : : : +- * Filter (15) + : : : : : +- * ColumnarToRow (14) + : : : : : +- Scan parquet default.item (13) + : : : : +- BroadcastExchange (24) + : : : : +- * Project (23) + : : : : +- * Filter (22) + : : : : +- * ColumnarToRow (21) + : : : : +- Scan parquet default.promotion (20) + : : : +- BroadcastExchange (31) + : : : +- * Project (30) + : : : +- * Filter (29) + : : : +- * ColumnarToRow (28) + : : : +- Scan parquet default.date_dim (27) + : : +- BroadcastExchange (37) + : : +- * Filter (36) + : : +- * ColumnarToRow (35) + : : +- Scan parquet default.store (34) + : :- * HashAggregate (72) + : : +- Exchange (71) + : : +- * HashAggregate (70) + : : +- * Project (69) + : : +- * BroadcastHashJoin Inner BuildRight (68) + : : :- * Project (63) + : : : +- * BroadcastHashJoin Inner BuildRight (62) + : : : :- * Project (60) + : : : : +- * BroadcastHashJoin Inner BuildRight (59) + : : : : :- * Project (57) + : : : : : +- * BroadcastHashJoin Inner BuildRight (56) + : : : : : :- * Project (54) + : : : : : : +- SortMergeJoin LeftOuter (53) + : : : : : : :- * Sort (47) + : : : : : : : +- Exchange (46) + : : : : : : : +- * Filter (45) + : : : : : : : +- * ColumnarToRow (44) + : : : : : : : +- Scan parquet default.catalog_sales (43) + : : : : : : +- * Sort (52) + : : : : : : +- Exchange (51) + : : : : : : +- * Filter (50) + : : : : : : +- * ColumnarToRow (49) + : : : : : : +- Scan parquet default.catalog_returns (48) + : : : : : +- ReusedExchange (55) + : : : : +- ReusedExchange (58) + : : : +- ReusedExchange (61) + : : +- BroadcastExchange (67) + : : +- * Filter (66) + : : +- * ColumnarToRow (65) + : : +- Scan parquet default.catalog_page (64) + : +- * HashAggregate (102) + : +- Exchange (101) + : +- * HashAggregate (100) + : +- * Project (99) + : +- * BroadcastHashJoin Inner BuildRight (98) + : :- * Project (93) + : : +- * BroadcastHashJoin Inner BuildRight (92) + : : :- * Project (90) + : : : +- * BroadcastHashJoin Inner BuildRight (89) + : : : :- * Project (87) + : : : : +- * BroadcastHashJoin Inner BuildRight (86) + : : : : :- * Project (84) + : : : : : +- SortMergeJoin LeftOuter (83) + : : : : : :- * Sort (77) + : : : : : : +- Exchange (76) + : : : : : : +- * Filter (75) + : : : : : : +- * ColumnarToRow (74) + : : : : : : +- Scan parquet default.web_sales (73) + : : : : : +- * Sort (82) + : : : : : +- Exchange (81) + : : : : : +- * Filter (80) + : : : : : +- * ColumnarToRow (79) + : : : : : +- Scan parquet default.web_returns (78) + : : : : +- ReusedExchange (85) + : : : +- ReusedExchange (88) + : : +- ReusedExchange (91) + : +- BroadcastExchange (97) + : +- * Filter (96) + : +- * ColumnarToRow (95) + : +- Scan parquet default.web_site (94) + :- * HashAggregate (111) + : +- Exchange (110) + : +- * HashAggregate (109) + : +- * HashAggregate (108) + : +- ReusedExchange (107) + +- * HashAggregate (116) + +- Exchange (115) + +- * HashAggregate (114) + +- * HashAggregate (113) + +- ReusedExchange (112) (1) Scan parquet default.store_sales @@ -142,7 +138,7 @@ Condition : (((isnotnull(ss_sold_date_sk#1) AND isnotnull(ss_store_sk#3)) AND is (4) Exchange Input [7]: [ss_sold_date_sk#1, ss_item_sk#2, ss_store_sk#3, ss_promo_sk#4, ss_ticket_number#5, ss_ext_sales_price#6, ss_net_profit#7] -Arguments: hashpartitioning(cast(ss_item_sk#2 as bigint), cast(ss_ticket_number#5 as bigint), 5), true, [id=#8] +Arguments: hashpartitioning(cast(ss_item_sk#2 as bigint), cast(ss_ticket_number#5 as bigint), 5), ENSURE_REQUIREMENTS, [id=#8] (5) Sort [codegen id : 2] Input [7]: [ss_sold_date_sk#1, ss_item_sk#2, ss_store_sk#3, ss_promo_sk#4, ss_ticket_number#5, ss_ext_sales_price#6, ss_net_profit#7] @@ -164,7 +160,7 @@ Condition : (isnotnull(sr_item_sk#9) AND isnotnull(sr_ticket_number#10)) (9) Exchange Input [4]: [sr_item_sk#9, sr_ticket_number#10, sr_return_amt#11, sr_net_loss#12] -Arguments: hashpartitioning(sr_item_sk#9, sr_ticket_number#10, 5), true, [id=#13] +Arguments: hashpartitioning(sr_item_sk#9, sr_ticket_number#10, 5), ENSURE_REQUIREMENTS, [id=#13] (10) Sort [codegen id : 4] Input [4]: [sr_item_sk#9, sr_ticket_number#10, sr_return_amt#11, sr_net_loss#12] @@ -210,67 +206,67 @@ Join condition: None Output [7]: [ss_sold_date_sk#1, ss_store_sk#3, ss_promo_sk#4, ss_ext_sales_price#6, ss_net_profit#7, sr_return_amt#11, sr_net_loss#12] Input [9]: [ss_sold_date_sk#1, ss_item_sk#2, ss_store_sk#3, ss_promo_sk#4, ss_ext_sales_price#6, ss_net_profit#7, sr_return_amt#11, sr_net_loss#12, i_item_sk#14] -(20) Scan parquet default.date_dim -Output [2]: [d_date_sk#17, d_date#18] +(20) Scan parquet default.promotion +Output [2]: [p_promo_sk#17, p_channel_tv#18] Batched: true -Location [not included in comparison]/{warehouse_dir}/date_dim] -PushedFilters: [IsNotNull(d_date), GreaterThanOrEqual(d_date,1998-08-04), LessThanOrEqual(d_date,1998-09-03), IsNotNull(d_date_sk)] -ReadSchema: struct +Location [not included in comparison]/{warehouse_dir}/promotion] +PushedFilters: [IsNotNull(p_channel_tv), EqualTo(p_channel_tv,N), IsNotNull(p_promo_sk)] +ReadSchema: struct (21) ColumnarToRow [codegen id : 6] -Input [2]: [d_date_sk#17, d_date#18] +Input [2]: [p_promo_sk#17, p_channel_tv#18] (22) Filter [codegen id : 6] -Input [2]: [d_date_sk#17, d_date#18] -Condition : (((isnotnull(d_date#18) AND (d_date#18 >= 10442)) AND (d_date#18 <= 10472)) AND isnotnull(d_date_sk#17)) +Input [2]: [p_promo_sk#17, p_channel_tv#18] +Condition : ((isnotnull(p_channel_tv#18) AND (p_channel_tv#18 = N)) AND isnotnull(p_promo_sk#17)) (23) Project [codegen id : 6] -Output [1]: [d_date_sk#17] -Input [2]: [d_date_sk#17, d_date#18] +Output [1]: [p_promo_sk#17] +Input [2]: [p_promo_sk#17, p_channel_tv#18] (24) BroadcastExchange -Input [1]: [d_date_sk#17] +Input [1]: [p_promo_sk#17] Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#19] (25) BroadcastHashJoin [codegen id : 9] -Left keys [1]: [ss_sold_date_sk#1] -Right keys [1]: [d_date_sk#17] +Left keys [1]: [ss_promo_sk#4] +Right keys [1]: [p_promo_sk#17] Join condition: None (26) Project [codegen id : 9] -Output [6]: [ss_store_sk#3, ss_promo_sk#4, ss_ext_sales_price#6, ss_net_profit#7, sr_return_amt#11, sr_net_loss#12] -Input [8]: [ss_sold_date_sk#1, ss_store_sk#3, ss_promo_sk#4, ss_ext_sales_price#6, ss_net_profit#7, sr_return_amt#11, sr_net_loss#12, d_date_sk#17] +Output [6]: [ss_sold_date_sk#1, ss_store_sk#3, ss_ext_sales_price#6, ss_net_profit#7, sr_return_amt#11, sr_net_loss#12] +Input [8]: [ss_sold_date_sk#1, ss_store_sk#3, ss_promo_sk#4, ss_ext_sales_price#6, ss_net_profit#7, sr_return_amt#11, sr_net_loss#12, p_promo_sk#17] -(27) Scan parquet default.promotion -Output [2]: [p_promo_sk#20, p_channel_tv#21] +(27) Scan parquet default.date_dim +Output [2]: [d_date_sk#20, d_date#21] Batched: true -Location [not included in comparison]/{warehouse_dir}/promotion] -PushedFilters: [IsNotNull(p_channel_tv), EqualTo(p_channel_tv,N), IsNotNull(p_promo_sk)] -ReadSchema: struct +Location [not included in comparison]/{warehouse_dir}/date_dim] +PushedFilters: [IsNotNull(d_date), GreaterThanOrEqual(d_date,1998-08-04), LessThanOrEqual(d_date,1998-09-03), IsNotNull(d_date_sk)] +ReadSchema: struct (28) ColumnarToRow [codegen id : 7] -Input [2]: [p_promo_sk#20, p_channel_tv#21] +Input [2]: [d_date_sk#20, d_date#21] (29) Filter [codegen id : 7] -Input [2]: [p_promo_sk#20, p_channel_tv#21] -Condition : ((isnotnull(p_channel_tv#21) AND (p_channel_tv#21 = N)) AND isnotnull(p_promo_sk#20)) +Input [2]: [d_date_sk#20, d_date#21] +Condition : (((isnotnull(d_date#21) AND (d_date#21 >= 10442)) AND (d_date#21 <= 10472)) AND isnotnull(d_date_sk#20)) (30) Project [codegen id : 7] -Output [1]: [p_promo_sk#20] -Input [2]: [p_promo_sk#20, p_channel_tv#21] +Output [1]: [d_date_sk#20] +Input [2]: [d_date_sk#20, d_date#21] (31) BroadcastExchange -Input [1]: [p_promo_sk#20] +Input [1]: [d_date_sk#20] Arguments: HashedRelationBroadcastMode(List(cast(input[0, int, true] as bigint)),false), [id=#22] (32) BroadcastHashJoin [codegen id : 9] -Left keys [1]: [ss_promo_sk#4] -Right keys [1]: [p_promo_sk#20] +Left keys [1]: [ss_sold_date_sk#1] +Right keys [1]: [d_date_sk#20] Join condition: None (33) Project [codegen id : 9] Output [5]: [ss_store_sk#3, ss_ext_sales_price#6, ss_net_profit#7, sr_return_amt#11, sr_net_loss#12] -Input [7]: [ss_store_sk#3, ss_promo_sk#4, ss_ext_sales_price#6, ss_net_profit#7, sr_return_amt#11, sr_net_loss#12, p_promo_sk#20] +Input [7]: [ss_sold_date_sk#1, ss_store_sk#3, ss_ext_sales_price#6, ss_net_profit#7, sr_return_amt#11, sr_net_loss#12, d_date_sk#20] (34) Scan parquet default.store Output [2]: [s_store_sk#23, s_store_id#24] @@ -308,7 +304,7 @@ Results [6]: [s_store_id#24, sum#31, sum#32, isEmpty#33, sum#34, isEmpty#35] (41) Exchange Input [6]: [s_store_id#24, sum#31, sum#32, isEmpty#33, sum#34, isEmpty#35] -Arguments: hashpartitioning(s_store_id#24, 5), true, [id=#36] +Arguments: hashpartitioning(s_store_id#24, 5), ENSURE_REQUIREMENTS, [id=#36] (42) HashAggregate [codegen id : 10] Input [6]: [s_store_id#24, sum#31, sum#32, isEmpty#33, sum#34, isEmpty#35] @@ -333,7 +329,7 @@ Condition : (((isnotnull(cs_sold_date_sk#45) AND isnotnull(cs_catalog_page_sk#46 (46) Exchange Input [7]: [cs_sold_date_sk#45, cs_catalog_page_sk#46, cs_item_sk#47, cs_promo_sk#48, cs_order_number#49, cs_ext_sales_price#50, cs_net_profit#51] -Arguments: hashpartitioning(cs_item_sk#47, cs_order_number#49, 5), true, [id=#52] +Arguments: hashpartitioning(cs_item_sk#47, cs_order_number#49, 5), ENSURE_REQUIREMENTS, [id=#52] (47) Sort [codegen id : 12] Input [7]: [cs_sold_date_sk#45, cs_catalog_page_sk#46, cs_item_sk#47, cs_promo_sk#48, cs_order_number#49, cs_ext_sales_price#50, cs_net_profit#51] @@ -355,7 +351,7 @@ Condition : (isnotnull(cr_item_sk#53) AND isnotnull(cr_order_number#54)) (51) Exchange Input [4]: [cr_item_sk#53, cr_order_number#54, cr_return_amount#55, cr_net_loss#56] -Arguments: hashpartitioning(cr_item_sk#53, cr_order_number#54, 5), true, [id=#57] +Arguments: hashpartitioning(cr_item_sk#53, cr_order_number#54, 5), ENSURE_REQUIREMENTS, [id=#57] (52) Sort [codegen id : 14] Input [4]: [cr_item_sk#53, cr_order_number#54, cr_return_amount#55, cr_net_loss#56] @@ -383,28 +379,28 @@ Output [7]: [cs_sold_date_sk#45, cs_catalog_page_sk#46, cs_promo_sk#48, cs_ext_s Input [9]: [cs_sold_date_sk#45, cs_catalog_page_sk#46, cs_item_sk#47, cs_promo_sk#48, cs_ext_sales_price#50, cs_net_profit#51, cr_return_amount#55, cr_net_loss#56, i_item_sk#14] (58) ReusedExchange [Reuses operator id: 24] -Output [1]: [d_date_sk#17] +Output [1]: [p_promo_sk#17] (59) BroadcastHashJoin [codegen id : 19] -Left keys [1]: [cs_sold_date_sk#45] -Right keys [1]: [d_date_sk#17] +Left keys [1]: [cs_promo_sk#48] +Right keys [1]: [p_promo_sk#17] Join condition: None (60) Project [codegen id : 19] -Output [6]: [cs_catalog_page_sk#46, cs_promo_sk#48, cs_ext_sales_price#50, cs_net_profit#51, cr_return_amount#55, cr_net_loss#56] -Input [8]: [cs_sold_date_sk#45, cs_catalog_page_sk#46, cs_promo_sk#48, cs_ext_sales_price#50, cs_net_profit#51, cr_return_amount#55, cr_net_loss#56, d_date_sk#17] +Output [6]: [cs_sold_date_sk#45, cs_catalog_page_sk#46, cs_ext_sales_price#50, cs_net_profit#51, cr_return_amount#55, cr_net_loss#56] +Input [8]: [cs_sold_date_sk#45, cs_catalog_page_sk#46, cs_promo_sk#48, cs_ext_sales_price#50, cs_net_profit#51, cr_return_amount#55, cr_net_loss#56, p_promo_sk#17] (61) ReusedExchange [Reuses operator id: 31] -Output [1]: [p_promo_sk#20] +Output [1]: [d_date_sk#20] (62) BroadcastHashJoin [codegen id : 19] -Left keys [1]: [cs_promo_sk#48] -Right keys [1]: [p_promo_sk#20] +Left keys [1]: [cs_sold_date_sk#45] +Right keys [1]: [d_date_sk#20] Join condition: None (63) Project [codegen id : 19] Output [5]: [cs_catalog_page_sk#46, cs_ext_sales_price#50, cs_net_profit#51, cr_return_amount#55, cr_net_loss#56] -Input [7]: [cs_catalog_page_sk#46, cs_promo_sk#48, cs_ext_sales_price#50, cs_net_profit#51, cr_return_amount#55, cr_net_loss#56, p_promo_sk#20] +Input [7]: [cs_sold_date_sk#45, cs_catalog_page_sk#46, cs_ext_sales_price#50, cs_net_profit#51, cr_return_amount#55, cr_net_loss#56, d_date_sk#20] (64) Scan parquet default.catalog_page Output [2]: [cp_catalog_page_sk#58, cp_catalog_page_id#59] @@ -442,7 +438,7 @@ Results [6]: [cp_catalog_page_id#59, sum#66, sum#67, isEmpty#68, sum#69, isEmpty (71) Exchange Input [6]: [cp_catalog_page_id#59, sum#66, sum#67, isEmpty#68, sum#69, isEmpty#70] -Arguments: hashpartitioning(cp_catalog_page_id#59, 5), true, [id=#71] +Arguments: hashpartitioning(cp_catalog_page_id#59, 5), ENSURE_REQUIREMENTS, [id=#71] (72) HashAggregate [codegen id : 20] Input [6]: [cp_catalog_page_id#59, sum#66, sum#67, isEmpty#68, sum#69, isEmpty#70] @@ -467,7 +463,7 @@ Condition : (((isnotnull(ws_sold_date_sk#80) AND isnotnull(ws_web_site_sk#82)) A (76) Exchange Input [7]: [ws_sold_date_sk#80, ws_item_sk#81, ws_web_site_sk#82, ws_promo_sk#83, ws_order_number#84, ws_ext_sales_price#85, ws_net_profit#86] -Arguments: hashpartitioning(cast(ws_item_sk#81 as bigint), cast(ws_order_number#84 as bigint), 5), true, [id=#87] +Arguments: hashpartitioning(cast(ws_item_sk#81 as bigint), cast(ws_order_number#84 as bigint), 5), ENSURE_REQUIREMENTS, [id=#87] (77) Sort [codegen id : 22] Input [7]: [ws_sold_date_sk#80, ws_item_sk#81, ws_web_site_sk#82, ws_promo_sk#83, ws_order_number#84, ws_ext_sales_price#85, ws_net_profit#86] @@ -489,7 +485,7 @@ Condition : (isnotnull(wr_item_sk#88) AND isnotnull(wr_order_number#89)) (81) Exchange Input [4]: [wr_item_sk#88, wr_order_number#89, wr_return_amt#90, wr_net_loss#91] -Arguments: hashpartitioning(wr_item_sk#88, wr_order_number#89, 5), true, [id=#92] +Arguments: hashpartitioning(wr_item_sk#88, wr_order_number#89, 5), ENSURE_REQUIREMENTS, [id=#92] (82) Sort [codegen id : 24] Input [4]: [wr_item_sk#88, wr_order_number#89, wr_return_amt#90, wr_net_loss#91] @@ -517,28 +513,28 @@ Output [7]: [ws_sold_date_sk#80, ws_web_site_sk#82, ws_promo_sk#83, ws_ext_sales Input [9]: [ws_sold_date_sk#80, ws_item_sk#81, ws_web_site_sk#82, ws_promo_sk#83, ws_ext_sales_price#85, ws_net_profit#86, wr_return_amt#90, wr_net_loss#91, i_item_sk#14] (88) ReusedExchange [Reuses operator id: 24] -Output [1]: [d_date_sk#17] +Output [1]: [p_promo_sk#17] (89) BroadcastHashJoin [codegen id : 29] -Left keys [1]: [ws_sold_date_sk#80] -Right keys [1]: [d_date_sk#17] +Left keys [1]: [ws_promo_sk#83] +Right keys [1]: [p_promo_sk#17] Join condition: None (90) Project [codegen id : 29] -Output [6]: [ws_web_site_sk#82, ws_promo_sk#83, ws_ext_sales_price#85, ws_net_profit#86, wr_return_amt#90, wr_net_loss#91] -Input [8]: [ws_sold_date_sk#80, ws_web_site_sk#82, ws_promo_sk#83, ws_ext_sales_price#85, ws_net_profit#86, wr_return_amt#90, wr_net_loss#91, d_date_sk#17] +Output [6]: [ws_sold_date_sk#80, ws_web_site_sk#82, ws_ext_sales_price#85, ws_net_profit#86, wr_return_amt#90, wr_net_loss#91] +Input [8]: [ws_sold_date_sk#80, ws_web_site_sk#82, ws_promo_sk#83, ws_ext_sales_price#85, ws_net_profit#86, wr_return_amt#90, wr_net_loss#91, p_promo_sk#17] (91) ReusedExchange [Reuses operator id: 31] -Output [1]: [p_promo_sk#20] +Output [1]: [d_date_sk#20] (92) BroadcastHashJoin [codegen id : 29] -Left keys [1]: [ws_promo_sk#83] -Right keys [1]: [p_promo_sk#20] +Left keys [1]: [ws_sold_date_sk#80] +Right keys [1]: [d_date_sk#20] Join condition: None (93) Project [codegen id : 29] Output [5]: [ws_web_site_sk#82, ws_ext_sales_price#85, ws_net_profit#86, wr_return_amt#90, wr_net_loss#91] -Input [7]: [ws_web_site_sk#82, ws_promo_sk#83, ws_ext_sales_price#85, ws_net_profit#86, wr_return_amt#90, wr_net_loss#91, p_promo_sk#20] +Input [7]: [ws_sold_date_sk#80, ws_web_site_sk#82, ws_ext_sales_price#85, ws_net_profit#86, wr_return_amt#90, wr_net_loss#91, d_date_sk#20] (94) Scan parquet default.web_site Output [2]: [web_site_sk#93, web_site_id#94] @@ -576,7 +572,7 @@ Results [6]: [web_site_id#94, sum#101, sum#102, isEmpty#103, sum#104, isEmpty#10 (101) Exchange Input [6]: [web_site_id#94, sum#101, sum#102, isEmpty#103, sum#104, isEmpty#105] -Arguments: hashpartitioning(web_site_id#94, 5), true, [id=#106] +Arguments: hashpartitioning(web_site_id#94, 5), ENSURE_REQUIREMENTS, [id=#106] (102) HashAggregate [codegen id : 30] Input [6]: [web_site_id#94, sum#101, sum#102, isEmpty#103, sum#104, isEmpty#105] @@ -596,7 +592,7 @@ Results [8]: [channel#40, id#41, sum#121, isEmpty#122, sum#123, isEmpty#124, sum (105) Exchange Input [8]: [channel#40, id#41, sum#121, isEmpty#122, sum#123, isEmpty#124, sum#125, isEmpty#126] -Arguments: hashpartitioning(channel#40, id#41, 5), true, [id=#127] +Arguments: hashpartitioning(channel#40, id#41, 5), ENSURE_REQUIREMENTS, [id=#127] (106) HashAggregate [codegen id : 32] Input [8]: [channel#40, id#41, sum#121, isEmpty#122, sum#123, isEmpty#124, sum#125, isEmpty#126] @@ -624,7 +620,7 @@ Results [7]: [channel#40, sum#152, isEmpty#153, sum#154, isEmpty#155, sum#156, i (110) Exchange Input [7]: [channel#40, sum#152, isEmpty#153, sum#154, isEmpty#155, sum#156, isEmpty#157] -Arguments: hashpartitioning(channel#40, 5), true, [id=#158] +Arguments: hashpartitioning(channel#40, 5), ENSURE_REQUIREMENTS, [id=#158] (111) HashAggregate [codegen id : 65] Input [7]: [channel#40, sum#152, isEmpty#153, sum#154, isEmpty#155, sum#156, isEmpty#157] @@ -633,75 +629,55 @@ Functions [3]: [sum(sales#143), sum(returns#144), sum(profit#145)] Aggregate Attributes [3]: [sum(sales#143)#159, sum(returns#144)#160, sum(profit#145)#161] Results [5]: [channel#40, null AS id#162, sum(sales#143)#159 AS sales#163, sum(returns#144)#160 AS returns#164, sum(profit#145)#161 AS profit#165] -(112) Union - -(113) HashAggregate [codegen id : 66] -Input [5]: [channel#40, id#41, sales#131, returns#132, profit#133] -Keys [5]: [channel#40, id#41, sales#131, returns#132, profit#133] -Functions: [] -Aggregate Attributes: [] -Results [5]: [channel#40, id#41, sales#131, returns#132, profit#133] - -(114) Exchange -Input [5]: [channel#40, id#41, sales#131, returns#132, profit#133] -Arguments: hashpartitioning(channel#40, id#41, sales#131, returns#132, profit#133, 5), true, [id=#166] - -(115) HashAggregate [codegen id : 67] -Input [5]: [channel#40, id#41, sales#131, returns#132, profit#133] -Keys [5]: [channel#40, id#41, sales#131, returns#132, profit#133] -Functions: [] -Aggregate Attributes: [] -Results [5]: [channel#40, id#41, sales#131, returns#132, profit#133] - -(116) ReusedExchange [Reuses operator id: 105] -Output [8]: [channel#40, id#41, sum#167, isEmpty#168, sum#169, isEmpty#170, sum#171, isEmpty#172] +(112) ReusedExchange [Reuses operator id: 105] +Output [8]: [channel#40, id#41, sum#166, isEmpty#167, sum#168, isEmpty#169, sum#170, isEmpty#171] -(117) HashAggregate [codegen id : 99] -Input [8]: [channel#40, id#41, sum#167, isEmpty#168, sum#169, isEmpty#170, sum#171, isEmpty#172] +(113) HashAggregate [codegen id : 97] +Input [8]: [channel#40, id#41, sum#166, isEmpty#167, sum#168, isEmpty#169, sum#170, isEmpty#171] Keys [2]: [channel#40, id#41] Functions [3]: [sum(sales#42), sum(returns#43), sum(profit#44)] -Aggregate Attributes [3]: [sum(sales#42)#173, sum(returns#43)#174, sum(profit#44)#175] -Results [3]: [sum(sales#42)#173 AS sales#143, sum(returns#43)#174 AS returns#144, sum(profit#44)#175 AS profit#145] +Aggregate Attributes [3]: [sum(sales#42)#172, sum(returns#43)#173, sum(profit#44)#174] +Results [3]: [sum(sales#42)#172 AS sales#143, sum(returns#43)#173 AS returns#144, sum(profit#44)#174 AS profit#145] -(118) HashAggregate [codegen id : 99] +(114) HashAggregate [codegen id : 97] Input [3]: [sales#143, returns#144, profit#145] Keys: [] Functions [3]: [partial_sum(sales#143), partial_sum(returns#144), partial_sum(profit#145)] -Aggregate Attributes [6]: [sum#176, isEmpty#177, sum#178, isEmpty#179, sum#180, isEmpty#181] -Results [6]: [sum#182, isEmpty#183, sum#184, isEmpty#185, sum#186, isEmpty#187] +Aggregate Attributes [6]: [sum#175, isEmpty#176, sum#177, isEmpty#178, sum#179, isEmpty#180] +Results [6]: [sum#181, isEmpty#182, sum#183, isEmpty#184, sum#185, isEmpty#186] -(119) Exchange -Input [6]: [sum#182, isEmpty#183, sum#184, isEmpty#185, sum#186, isEmpty#187] -Arguments: SinglePartition, true, [id=#188] +(115) Exchange +Input [6]: [sum#181, isEmpty#182, sum#183, isEmpty#184, sum#185, isEmpty#186] +Arguments: SinglePartition, ENSURE_REQUIREMENTS, [id=#187] -(120) HashAggregate [codegen id : 100] -Input [6]: [sum#182, isEmpty#183, sum#184, isEmpty#185, sum#186, isEmpty#187] +(116) HashAggregate [codegen id : 98] +Input [6]: [sum#181, isEmpty#182, sum#183, isEmpty#184, sum#185, isEmpty#186] Keys: [] Functions [3]: [sum(sales#143), sum(returns#144), sum(profit#145)] -Aggregate Attributes [3]: [sum(sales#143)#189, sum(returns#144)#190, sum(profit#145)#191] -Results [5]: [null AS channel#192, null AS id#193, sum(sales#143)#189 AS sales#194, sum(returns#144)#190 AS returns#195, sum(profit#145)#191 AS profit#196] +Aggregate Attributes [3]: [sum(sales#143)#188, sum(returns#144)#189, sum(profit#145)#190] +Results [5]: [null AS channel#191, null AS id#192, sum(sales#143)#188 AS sales#193, sum(returns#144)#189 AS returns#194, sum(profit#145)#190 AS profit#195] -(121) Union +(117) Union -(122) HashAggregate [codegen id : 101] +(118) HashAggregate [codegen id : 99] Input [5]: [channel#40, id#41, sales#131, returns#132, profit#133] Keys [5]: [channel#40, id#41, sales#131, returns#132, profit#133] Functions: [] Aggregate Attributes: [] Results [5]: [channel#40, id#41, sales#131, returns#132, profit#133] -(123) Exchange +(119) Exchange Input [5]: [channel#40, id#41, sales#131, returns#132, profit#133] -Arguments: hashpartitioning(channel#40, id#41, sales#131, returns#132, profit#133, 5), true, [id=#197] +Arguments: hashpartitioning(channel#40, id#41, sales#131, returns#132, profit#133, 5), ENSURE_REQUIREMENTS, [id=#196] -(124) HashAggregate [codegen id : 102] +(120) HashAggregate [codegen id : 100] Input [5]: [channel#40, id#41, sales#131, returns#132, profit#133] Keys [5]: [channel#40, id#41, sales#131, returns#132, profit#133] Functions: [] Aggregate Attributes: [] Results [5]: [channel#40, id#41, sales#131, returns#132, profit#133] -(125) TakeOrderedAndProject +(121) TakeOrderedAndProject Input [5]: [channel#40, id#41, sales#131, returns#132, profit#133] Arguments: 100, [channel#40 ASC NULLS FIRST, id#41 ASC NULLS FIRST], [channel#40, id#41, sales#131, returns#132, profit#133] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q80a.sf100/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q80a.sf100/simplified.txt index 13781c8bd5993..c26c5b81259e6 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q80a.sf100/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q80a.sf100/simplified.txt @@ -1,205 +1,197 @@ TakeOrderedAndProject [channel,id,sales,returns,profit] - WholeStageCodegen (102) + WholeStageCodegen (100) HashAggregate [channel,id,sales,returns,profit] InputAdapter Exchange [channel,id,sales,returns,profit] #1 - WholeStageCodegen (101) + WholeStageCodegen (99) HashAggregate [channel,id,sales,returns,profit] InputAdapter Union - WholeStageCodegen (67) - HashAggregate [channel,id,sales,returns,profit] + WholeStageCodegen (32) + HashAggregate [channel,id,sum,isEmpty,sum,isEmpty,sum,isEmpty] [sum(sales),sum(returns),sum(profit),sales,returns,profit,sum,isEmpty,sum,isEmpty,sum,isEmpty] InputAdapter - Exchange [channel,id,sales,returns,profit] #2 - WholeStageCodegen (66) - HashAggregate [channel,id,sales,returns,profit] + Exchange [channel,id] #2 + WholeStageCodegen (31) + HashAggregate [channel,id,sales,returns,profit] [sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty] InputAdapter Union - WholeStageCodegen (32) - HashAggregate [channel,id,sum,isEmpty,sum,isEmpty,sum,isEmpty] [sum(sales),sum(returns),sum(profit),sales,returns,profit,sum,isEmpty,sum,isEmpty,sum,isEmpty] + WholeStageCodegen (10) + HashAggregate [s_store_id,sum,sum,isEmpty,sum,isEmpty] [sum(UnscaledValue(ss_ext_sales_price)),sum(coalesce(cast(sr_return_amt as decimal(12,2)), 0.00)),sum(CheckOverflow((promote_precision(cast(ss_net_profit as decimal(13,2))) - promote_precision(cast(coalesce(cast(sr_net_loss as decimal(12,2)), 0.00) as decimal(13,2)))), DecimalType(13,2), true)),channel,id,sales,returns,profit,sum,sum,isEmpty,sum,isEmpty] InputAdapter - Exchange [channel,id] #3 - WholeStageCodegen (31) - HashAggregate [channel,id,sales,returns,profit] [sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty] - InputAdapter - Union - WholeStageCodegen (10) - HashAggregate [s_store_id,sum,sum,isEmpty,sum,isEmpty] [sum(UnscaledValue(ss_ext_sales_price)),sum(coalesce(cast(sr_return_amt as decimal(12,2)), 0.00)),sum(CheckOverflow((promote_precision(cast(ss_net_profit as decimal(13,2))) - promote_precision(cast(coalesce(cast(sr_net_loss as decimal(12,2)), 0.00) as decimal(13,2)))), DecimalType(13,2), true)),channel,id,sales,returns,profit,sum,sum,isEmpty,sum,isEmpty] - InputAdapter - Exchange [s_store_id] #4 - WholeStageCodegen (9) - HashAggregate [s_store_id,ss_ext_sales_price,sr_return_amt,ss_net_profit,sr_net_loss] [sum,sum,isEmpty,sum,isEmpty,sum,sum,isEmpty,sum,isEmpty] - Project [ss_ext_sales_price,ss_net_profit,sr_return_amt,sr_net_loss,s_store_id] - BroadcastHashJoin [ss_store_sk,s_store_sk] - Project [ss_store_sk,ss_ext_sales_price,ss_net_profit,sr_return_amt,sr_net_loss] - BroadcastHashJoin [ss_promo_sk,p_promo_sk] - Project [ss_store_sk,ss_promo_sk,ss_ext_sales_price,ss_net_profit,sr_return_amt,sr_net_loss] - BroadcastHashJoin [ss_sold_date_sk,d_date_sk] - Project [ss_sold_date_sk,ss_store_sk,ss_promo_sk,ss_ext_sales_price,ss_net_profit,sr_return_amt,sr_net_loss] - BroadcastHashJoin [ss_item_sk,i_item_sk] - Project [ss_sold_date_sk,ss_item_sk,ss_store_sk,ss_promo_sk,ss_ext_sales_price,ss_net_profit,sr_return_amt,sr_net_loss] - InputAdapter - SortMergeJoin [ss_item_sk,ss_ticket_number,sr_item_sk,sr_ticket_number] - WholeStageCodegen (2) - Sort [ss_item_sk,ss_ticket_number] - InputAdapter - Exchange [ss_item_sk,ss_ticket_number] #5 - WholeStageCodegen (1) - Filter [ss_sold_date_sk,ss_store_sk,ss_item_sk,ss_promo_sk] - ColumnarToRow - InputAdapter - Scan parquet default.store_sales [ss_sold_date_sk,ss_item_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_ext_sales_price,ss_net_profit] - WholeStageCodegen (4) - Sort [sr_item_sk,sr_ticket_number] - InputAdapter - Exchange [sr_item_sk,sr_ticket_number] #6 - WholeStageCodegen (3) - Filter [sr_item_sk,sr_ticket_number] - ColumnarToRow - InputAdapter - Scan parquet default.store_returns [sr_item_sk,sr_ticket_number,sr_return_amt,sr_net_loss] - InputAdapter - BroadcastExchange #7 - WholeStageCodegen (5) - Project [i_item_sk] - Filter [i_current_price,i_item_sk] - ColumnarToRow - InputAdapter - Scan parquet default.item [i_item_sk,i_current_price] - InputAdapter - BroadcastExchange #8 - WholeStageCodegen (6) - Project [d_date_sk] - Filter [d_date,d_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.date_dim [d_date_sk,d_date] - InputAdapter - BroadcastExchange #9 - WholeStageCodegen (7) - Project [p_promo_sk] - Filter [p_channel_tv,p_promo_sk] + Exchange [s_store_id] #3 + WholeStageCodegen (9) + HashAggregate [s_store_id,ss_ext_sales_price,sr_return_amt,ss_net_profit,sr_net_loss] [sum,sum,isEmpty,sum,isEmpty,sum,sum,isEmpty,sum,isEmpty] + Project [ss_ext_sales_price,ss_net_profit,sr_return_amt,sr_net_loss,s_store_id] + BroadcastHashJoin [ss_store_sk,s_store_sk] + Project [ss_store_sk,ss_ext_sales_price,ss_net_profit,sr_return_amt,sr_net_loss] + BroadcastHashJoin [ss_sold_date_sk,d_date_sk] + Project [ss_sold_date_sk,ss_store_sk,ss_ext_sales_price,ss_net_profit,sr_return_amt,sr_net_loss] + BroadcastHashJoin [ss_promo_sk,p_promo_sk] + Project [ss_sold_date_sk,ss_store_sk,ss_promo_sk,ss_ext_sales_price,ss_net_profit,sr_return_amt,sr_net_loss] + BroadcastHashJoin [ss_item_sk,i_item_sk] + Project [ss_sold_date_sk,ss_item_sk,ss_store_sk,ss_promo_sk,ss_ext_sales_price,ss_net_profit,sr_return_amt,sr_net_loss] + InputAdapter + SortMergeJoin [ss_item_sk,ss_ticket_number,sr_item_sk,sr_ticket_number] + WholeStageCodegen (2) + Sort [ss_item_sk,ss_ticket_number] + InputAdapter + Exchange [ss_item_sk,ss_ticket_number] #4 + WholeStageCodegen (1) + Filter [ss_sold_date_sk,ss_store_sk,ss_item_sk,ss_promo_sk] ColumnarToRow InputAdapter - Scan parquet default.promotion [p_promo_sk,p_channel_tv] - InputAdapter - BroadcastExchange #10 - WholeStageCodegen (8) - Filter [s_store_sk] - ColumnarToRow - InputAdapter - Scan parquet default.store [s_store_sk,s_store_id] - WholeStageCodegen (20) - HashAggregate [cp_catalog_page_id,sum,sum,isEmpty,sum,isEmpty] [sum(UnscaledValue(cs_ext_sales_price)),sum(coalesce(cast(cr_return_amount as decimal(12,2)), 0.00)),sum(CheckOverflow((promote_precision(cast(cs_net_profit as decimal(13,2))) - promote_precision(cast(coalesce(cast(cr_net_loss as decimal(12,2)), 0.00) as decimal(13,2)))), DecimalType(13,2), true)),channel,id,sales,returns,profit,sum,sum,isEmpty,sum,isEmpty] - InputAdapter - Exchange [cp_catalog_page_id] #11 - WholeStageCodegen (19) - HashAggregate [cp_catalog_page_id,cs_ext_sales_price,cr_return_amount,cs_net_profit,cr_net_loss] [sum,sum,isEmpty,sum,isEmpty,sum,sum,isEmpty,sum,isEmpty] - Project [cs_ext_sales_price,cs_net_profit,cr_return_amount,cr_net_loss,cp_catalog_page_id] - BroadcastHashJoin [cs_catalog_page_sk,cp_catalog_page_sk] - Project [cs_catalog_page_sk,cs_ext_sales_price,cs_net_profit,cr_return_amount,cr_net_loss] - BroadcastHashJoin [cs_promo_sk,p_promo_sk] - Project [cs_catalog_page_sk,cs_promo_sk,cs_ext_sales_price,cs_net_profit,cr_return_amount,cr_net_loss] - BroadcastHashJoin [cs_sold_date_sk,d_date_sk] - Project [cs_sold_date_sk,cs_catalog_page_sk,cs_promo_sk,cs_ext_sales_price,cs_net_profit,cr_return_amount,cr_net_loss] - BroadcastHashJoin [cs_item_sk,i_item_sk] - Project [cs_sold_date_sk,cs_catalog_page_sk,cs_item_sk,cs_promo_sk,cs_ext_sales_price,cs_net_profit,cr_return_amount,cr_net_loss] - InputAdapter - SortMergeJoin [cs_item_sk,cs_order_number,cr_item_sk,cr_order_number] - WholeStageCodegen (12) - Sort [cs_item_sk,cs_order_number] - InputAdapter - Exchange [cs_item_sk,cs_order_number] #12 - WholeStageCodegen (11) - Filter [cs_sold_date_sk,cs_catalog_page_sk,cs_item_sk,cs_promo_sk] - ColumnarToRow - InputAdapter - Scan parquet default.catalog_sales [cs_sold_date_sk,cs_catalog_page_sk,cs_item_sk,cs_promo_sk,cs_order_number,cs_ext_sales_price,cs_net_profit] - WholeStageCodegen (14) - Sort [cr_item_sk,cr_order_number] - InputAdapter - Exchange [cr_item_sk,cr_order_number] #13 - WholeStageCodegen (13) - Filter [cr_item_sk,cr_order_number] - ColumnarToRow - InputAdapter - Scan parquet default.catalog_returns [cr_item_sk,cr_order_number,cr_return_amount,cr_net_loss] - InputAdapter - ReusedExchange [i_item_sk] #7 + Scan parquet default.store_sales [ss_sold_date_sk,ss_item_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_ext_sales_price,ss_net_profit] + WholeStageCodegen (4) + Sort [sr_item_sk,sr_ticket_number] + InputAdapter + Exchange [sr_item_sk,sr_ticket_number] #5 + WholeStageCodegen (3) + Filter [sr_item_sk,sr_ticket_number] + ColumnarToRow + InputAdapter + Scan parquet default.store_returns [sr_item_sk,sr_ticket_number,sr_return_amt,sr_net_loss] + InputAdapter + BroadcastExchange #6 + WholeStageCodegen (5) + Project [i_item_sk] + Filter [i_current_price,i_item_sk] + ColumnarToRow InputAdapter - ReusedExchange [d_date_sk] #8 + Scan parquet default.item [i_item_sk,i_current_price] + InputAdapter + BroadcastExchange #7 + WholeStageCodegen (6) + Project [p_promo_sk] + Filter [p_channel_tv,p_promo_sk] + ColumnarToRow InputAdapter - ReusedExchange [p_promo_sk] #9 - InputAdapter - BroadcastExchange #14 - WholeStageCodegen (18) - Filter [cp_catalog_page_sk] - ColumnarToRow - InputAdapter - Scan parquet default.catalog_page [cp_catalog_page_sk,cp_catalog_page_id] - WholeStageCodegen (30) - HashAggregate [web_site_id,sum,sum,isEmpty,sum,isEmpty] [sum(UnscaledValue(ws_ext_sales_price)),sum(coalesce(cast(wr_return_amt as decimal(12,2)), 0.00)),sum(CheckOverflow((promote_precision(cast(ws_net_profit as decimal(13,2))) - promote_precision(cast(coalesce(cast(wr_net_loss as decimal(12,2)), 0.00) as decimal(13,2)))), DecimalType(13,2), true)),channel,id,sales,returns,profit,sum,sum,isEmpty,sum,isEmpty] + Scan parquet default.promotion [p_promo_sk,p_channel_tv] InputAdapter - Exchange [web_site_id] #15 - WholeStageCodegen (29) - HashAggregate [web_site_id,ws_ext_sales_price,wr_return_amt,ws_net_profit,wr_net_loss] [sum,sum,isEmpty,sum,isEmpty,sum,sum,isEmpty,sum,isEmpty] - Project [ws_ext_sales_price,ws_net_profit,wr_return_amt,wr_net_loss,web_site_id] - BroadcastHashJoin [ws_web_site_sk,web_site_sk] - Project [ws_web_site_sk,ws_ext_sales_price,ws_net_profit,wr_return_amt,wr_net_loss] - BroadcastHashJoin [ws_promo_sk,p_promo_sk] - Project [ws_web_site_sk,ws_promo_sk,ws_ext_sales_price,ws_net_profit,wr_return_amt,wr_net_loss] - BroadcastHashJoin [ws_sold_date_sk,d_date_sk] - Project [ws_sold_date_sk,ws_web_site_sk,ws_promo_sk,ws_ext_sales_price,ws_net_profit,wr_return_amt,wr_net_loss] - BroadcastHashJoin [ws_item_sk,i_item_sk] - Project [ws_sold_date_sk,ws_item_sk,ws_web_site_sk,ws_promo_sk,ws_ext_sales_price,ws_net_profit,wr_return_amt,wr_net_loss] - InputAdapter - SortMergeJoin [ws_item_sk,ws_order_number,wr_item_sk,wr_order_number] - WholeStageCodegen (22) - Sort [ws_item_sk,ws_order_number] - InputAdapter - Exchange [ws_item_sk,ws_order_number] #16 - WholeStageCodegen (21) - Filter [ws_sold_date_sk,ws_web_site_sk,ws_item_sk,ws_promo_sk] - ColumnarToRow - InputAdapter - Scan parquet default.web_sales [ws_sold_date_sk,ws_item_sk,ws_web_site_sk,ws_promo_sk,ws_order_number,ws_ext_sales_price,ws_net_profit] - WholeStageCodegen (24) - Sort [wr_item_sk,wr_order_number] - InputAdapter - Exchange [wr_item_sk,wr_order_number] #17 - WholeStageCodegen (23) - Filter [wr_item_sk,wr_order_number] - ColumnarToRow - InputAdapter - Scan parquet default.web_returns [wr_item_sk,wr_order_number,wr_return_amt,wr_net_loss] - InputAdapter - ReusedExchange [i_item_sk] #7 - InputAdapter - ReusedExchange [d_date_sk] #8 - InputAdapter - ReusedExchange [p_promo_sk] #9 + BroadcastExchange #8 + WholeStageCodegen (7) + Project [d_date_sk] + Filter [d_date,d_date_sk] + ColumnarToRow InputAdapter - BroadcastExchange #18 - WholeStageCodegen (28) - Filter [web_site_sk] - ColumnarToRow - InputAdapter - Scan parquet default.web_site [web_site_sk,web_site_id] - WholeStageCodegen (65) - HashAggregate [channel,sum,isEmpty,sum,isEmpty,sum,isEmpty] [sum(sales),sum(returns),sum(profit),id,sales,returns,profit,sum,isEmpty,sum,isEmpty,sum,isEmpty] + Scan parquet default.date_dim [d_date_sk,d_date] + InputAdapter + BroadcastExchange #9 + WholeStageCodegen (8) + Filter [s_store_sk] + ColumnarToRow + InputAdapter + Scan parquet default.store [s_store_sk,s_store_id] + WholeStageCodegen (20) + HashAggregate [cp_catalog_page_id,sum,sum,isEmpty,sum,isEmpty] [sum(UnscaledValue(cs_ext_sales_price)),sum(coalesce(cast(cr_return_amount as decimal(12,2)), 0.00)),sum(CheckOverflow((promote_precision(cast(cs_net_profit as decimal(13,2))) - promote_precision(cast(coalesce(cast(cr_net_loss as decimal(12,2)), 0.00) as decimal(13,2)))), DecimalType(13,2), true)),channel,id,sales,returns,profit,sum,sum,isEmpty,sum,isEmpty] + InputAdapter + Exchange [cp_catalog_page_id] #10 + WholeStageCodegen (19) + HashAggregate [cp_catalog_page_id,cs_ext_sales_price,cr_return_amount,cs_net_profit,cr_net_loss] [sum,sum,isEmpty,sum,isEmpty,sum,sum,isEmpty,sum,isEmpty] + Project [cs_ext_sales_price,cs_net_profit,cr_return_amount,cr_net_loss,cp_catalog_page_id] + BroadcastHashJoin [cs_catalog_page_sk,cp_catalog_page_sk] + Project [cs_catalog_page_sk,cs_ext_sales_price,cs_net_profit,cr_return_amount,cr_net_loss] + BroadcastHashJoin [cs_sold_date_sk,d_date_sk] + Project [cs_sold_date_sk,cs_catalog_page_sk,cs_ext_sales_price,cs_net_profit,cr_return_amount,cr_net_loss] + BroadcastHashJoin [cs_promo_sk,p_promo_sk] + Project [cs_sold_date_sk,cs_catalog_page_sk,cs_promo_sk,cs_ext_sales_price,cs_net_profit,cr_return_amount,cr_net_loss] + BroadcastHashJoin [cs_item_sk,i_item_sk] + Project [cs_sold_date_sk,cs_catalog_page_sk,cs_item_sk,cs_promo_sk,cs_ext_sales_price,cs_net_profit,cr_return_amount,cr_net_loss] + InputAdapter + SortMergeJoin [cs_item_sk,cs_order_number,cr_item_sk,cr_order_number] + WholeStageCodegen (12) + Sort [cs_item_sk,cs_order_number] + InputAdapter + Exchange [cs_item_sk,cs_order_number] #11 + WholeStageCodegen (11) + Filter [cs_sold_date_sk,cs_catalog_page_sk,cs_item_sk,cs_promo_sk] + ColumnarToRow + InputAdapter + Scan parquet default.catalog_sales [cs_sold_date_sk,cs_catalog_page_sk,cs_item_sk,cs_promo_sk,cs_order_number,cs_ext_sales_price,cs_net_profit] + WholeStageCodegen (14) + Sort [cr_item_sk,cr_order_number] + InputAdapter + Exchange [cr_item_sk,cr_order_number] #12 + WholeStageCodegen (13) + Filter [cr_item_sk,cr_order_number] + ColumnarToRow + InputAdapter + Scan parquet default.catalog_returns [cr_item_sk,cr_order_number,cr_return_amount,cr_net_loss] + InputAdapter + ReusedExchange [i_item_sk] #6 + InputAdapter + ReusedExchange [p_promo_sk] #7 + InputAdapter + ReusedExchange [d_date_sk] #8 + InputAdapter + BroadcastExchange #13 + WholeStageCodegen (18) + Filter [cp_catalog_page_sk] + ColumnarToRow + InputAdapter + Scan parquet default.catalog_page [cp_catalog_page_sk,cp_catalog_page_id] + WholeStageCodegen (30) + HashAggregate [web_site_id,sum,sum,isEmpty,sum,isEmpty] [sum(UnscaledValue(ws_ext_sales_price)),sum(coalesce(cast(wr_return_amt as decimal(12,2)), 0.00)),sum(CheckOverflow((promote_precision(cast(ws_net_profit as decimal(13,2))) - promote_precision(cast(coalesce(cast(wr_net_loss as decimal(12,2)), 0.00) as decimal(13,2)))), DecimalType(13,2), true)),channel,id,sales,returns,profit,sum,sum,isEmpty,sum,isEmpty] InputAdapter - Exchange [channel] #19 - WholeStageCodegen (64) - HashAggregate [channel,sales,returns,profit] [sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty] - HashAggregate [channel,id,sum,isEmpty,sum,isEmpty,sum,isEmpty] [sum(sales),sum(returns),sum(profit),sales,returns,profit,sum,isEmpty,sum,isEmpty,sum,isEmpty] - InputAdapter - ReusedExchange [channel,id,sum,isEmpty,sum,isEmpty,sum,isEmpty] #3 - WholeStageCodegen (100) + Exchange [web_site_id] #14 + WholeStageCodegen (29) + HashAggregate [web_site_id,ws_ext_sales_price,wr_return_amt,ws_net_profit,wr_net_loss] [sum,sum,isEmpty,sum,isEmpty,sum,sum,isEmpty,sum,isEmpty] + Project [ws_ext_sales_price,ws_net_profit,wr_return_amt,wr_net_loss,web_site_id] + BroadcastHashJoin [ws_web_site_sk,web_site_sk] + Project [ws_web_site_sk,ws_ext_sales_price,ws_net_profit,wr_return_amt,wr_net_loss] + BroadcastHashJoin [ws_sold_date_sk,d_date_sk] + Project [ws_sold_date_sk,ws_web_site_sk,ws_ext_sales_price,ws_net_profit,wr_return_amt,wr_net_loss] + BroadcastHashJoin [ws_promo_sk,p_promo_sk] + Project [ws_sold_date_sk,ws_web_site_sk,ws_promo_sk,ws_ext_sales_price,ws_net_profit,wr_return_amt,wr_net_loss] + BroadcastHashJoin [ws_item_sk,i_item_sk] + Project [ws_sold_date_sk,ws_item_sk,ws_web_site_sk,ws_promo_sk,ws_ext_sales_price,ws_net_profit,wr_return_amt,wr_net_loss] + InputAdapter + SortMergeJoin [ws_item_sk,ws_order_number,wr_item_sk,wr_order_number] + WholeStageCodegen (22) + Sort [ws_item_sk,ws_order_number] + InputAdapter + Exchange [ws_item_sk,ws_order_number] #15 + WholeStageCodegen (21) + Filter [ws_sold_date_sk,ws_web_site_sk,ws_item_sk,ws_promo_sk] + ColumnarToRow + InputAdapter + Scan parquet default.web_sales [ws_sold_date_sk,ws_item_sk,ws_web_site_sk,ws_promo_sk,ws_order_number,ws_ext_sales_price,ws_net_profit] + WholeStageCodegen (24) + Sort [wr_item_sk,wr_order_number] + InputAdapter + Exchange [wr_item_sk,wr_order_number] #16 + WholeStageCodegen (23) + Filter [wr_item_sk,wr_order_number] + ColumnarToRow + InputAdapter + Scan parquet default.web_returns [wr_item_sk,wr_order_number,wr_return_amt,wr_net_loss] + InputAdapter + ReusedExchange [i_item_sk] #6 + InputAdapter + ReusedExchange [p_promo_sk] #7 + InputAdapter + ReusedExchange [d_date_sk] #8 + InputAdapter + BroadcastExchange #17 + WholeStageCodegen (28) + Filter [web_site_sk] + ColumnarToRow + InputAdapter + Scan parquet default.web_site [web_site_sk,web_site_id] + WholeStageCodegen (65) + HashAggregate [channel,sum,isEmpty,sum,isEmpty,sum,isEmpty] [sum(sales),sum(returns),sum(profit),id,sales,returns,profit,sum,isEmpty,sum,isEmpty,sum,isEmpty] + InputAdapter + Exchange [channel] #18 + WholeStageCodegen (64) + HashAggregate [channel,sales,returns,profit] [sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty] + HashAggregate [channel,id,sum,isEmpty,sum,isEmpty,sum,isEmpty] [sum(sales),sum(returns),sum(profit),sales,returns,profit,sum,isEmpty,sum,isEmpty,sum,isEmpty] + InputAdapter + ReusedExchange [channel,id,sum,isEmpty,sum,isEmpty,sum,isEmpty] #2 + WholeStageCodegen (98) HashAggregate [sum,isEmpty,sum,isEmpty,sum,isEmpty] [sum(sales),sum(returns),sum(profit),channel,id,sales,returns,profit,sum,isEmpty,sum,isEmpty,sum,isEmpty] InputAdapter - Exchange #20 - WholeStageCodegen (99) + Exchange #19 + WholeStageCodegen (97) HashAggregate [sales,returns,profit] [sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty] HashAggregate [channel,id,sum,isEmpty,sum,isEmpty,sum,isEmpty] [sum(sales),sum(returns),sum(profit),sales,returns,profit,sum,isEmpty,sum,isEmpty,sum,isEmpty] InputAdapter - ReusedExchange [channel,id,sum,isEmpty,sum,isEmpty,sum,isEmpty] #3 + ReusedExchange [channel,id,sum,isEmpty,sum,isEmpty,sum,isEmpty] #2 diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q80a/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q80a/explain.txt index ddfdeadcf8eb3..9e687a07c2ca0 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q80a/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q80a/explain.txt @@ -1,120 +1,116 @@ == Physical Plan == -TakeOrderedAndProject (116) -+- * HashAggregate (115) - +- Exchange (114) - +- * HashAggregate (113) - +- Union (112) - :- * HashAggregate (106) - : +- Exchange (105) - : +- * HashAggregate (104) - : +- Union (103) - : :- * HashAggregate (97) - : : +- Exchange (96) - : : +- * HashAggregate (95) - : : +- Union (94) - : : :- * HashAggregate (39) - : : : +- Exchange (38) - : : : +- * HashAggregate (37) - : : : +- * Project (36) - : : : +- * BroadcastHashJoin Inner BuildRight (35) - : : : :- * Project (29) - : : : : +- * BroadcastHashJoin Inner BuildRight (28) - : : : : :- * Project (22) - : : : : : +- * BroadcastHashJoin Inner BuildRight (21) - : : : : : :- * Project (16) - : : : : : : +- * BroadcastHashJoin Inner BuildRight (15) - : : : : : : :- * Project (9) - : : : : : : : +- * BroadcastHashJoin LeftOuter BuildRight (8) - : : : : : : : :- * Filter (3) - : : : : : : : : +- * ColumnarToRow (2) - : : : : : : : : +- Scan parquet default.store_sales (1) - : : : : : : : +- BroadcastExchange (7) - : : : : : : : +- * Filter (6) - : : : : : : : +- * ColumnarToRow (5) - : : : : : : : +- Scan parquet default.store_returns (4) - : : : : : : +- BroadcastExchange (14) - : : : : : : +- * Project (13) - : : : : : : +- * Filter (12) - : : : : : : +- * ColumnarToRow (11) - : : : : : : +- Scan parquet default.date_dim (10) - : : : : : +- BroadcastExchange (20) - : : : : : +- * Filter (19) - : : : : : +- * ColumnarToRow (18) - : : : : : +- Scan parquet default.store (17) - : : : : +- BroadcastExchange (27) - : : : : +- * Project (26) - : : : : +- * Filter (25) - : : : : +- * ColumnarToRow (24) - : : : : +- Scan parquet default.item (23) - : : : +- BroadcastExchange (34) - : : : +- * Project (33) - : : : +- * Filter (32) - : : : +- * ColumnarToRow (31) - : : : +- Scan parquet default.promotion (30) - : : :- * HashAggregate (66) - : : : +- Exchange (65) - : : : +- * HashAggregate (64) - : : : +- * Project (63) - : : : +- * BroadcastHashJoin Inner BuildRight (62) - : : : :- * Project (60) - : : : : +- * BroadcastHashJoin Inner BuildRight (59) - : : : : :- * Project (57) - : : : : : +- * BroadcastHashJoin Inner BuildRight (56) - : : : : : :- * Project (51) - : : : : : : +- * BroadcastHashJoin Inner BuildRight (50) - : : : : : : :- * Project (48) - : : : : : : : +- * BroadcastHashJoin LeftOuter BuildRight (47) - : : : : : : : :- * Filter (42) - : : : : : : : : +- * ColumnarToRow (41) - : : : : : : : : +- Scan parquet default.catalog_sales (40) - : : : : : : : +- BroadcastExchange (46) - : : : : : : : +- * Filter (45) - : : : : : : : +- * ColumnarToRow (44) - : : : : : : : +- Scan parquet default.catalog_returns (43) - : : : : : : +- ReusedExchange (49) - : : : : : +- BroadcastExchange (55) - : : : : : +- * Filter (54) - : : : : : +- * ColumnarToRow (53) - : : : : : +- Scan parquet default.catalog_page (52) - : : : : +- ReusedExchange (58) - : : : +- ReusedExchange (61) - : : +- * HashAggregate (93) - : : +- Exchange (92) - : : +- * HashAggregate (91) - : : +- * Project (90) - : : +- * BroadcastHashJoin Inner BuildRight (89) - : : :- * Project (87) - : : : +- * BroadcastHashJoin Inner BuildRight (86) - : : : :- * Project (84) - : : : : +- * BroadcastHashJoin Inner BuildRight (83) - : : : : :- * Project (78) - : : : : : +- * BroadcastHashJoin Inner BuildRight (77) - : : : : : :- * Project (75) - : : : : : : +- * BroadcastHashJoin LeftOuter BuildRight (74) - : : : : : : :- * Filter (69) - : : : : : : : +- * ColumnarToRow (68) - : : : : : : : +- Scan parquet default.web_sales (67) - : : : : : : +- BroadcastExchange (73) - : : : : : : +- * Filter (72) - : : : : : : +- * ColumnarToRow (71) - : : : : : : +- Scan parquet default.web_returns (70) - : : : : : +- ReusedExchange (76) - : : : : +- BroadcastExchange (82) - : : : : +- * Filter (81) - : : : : +- * ColumnarToRow (80) - : : : : +- Scan parquet default.web_site (79) - : : : +- ReusedExchange (85) - : : +- ReusedExchange (88) - : +- * HashAggregate (102) - : +- Exchange (101) - : +- * HashAggregate (100) - : +- * HashAggregate (99) - : +- ReusedExchange (98) - +- * HashAggregate (111) - +- Exchange (110) - +- * HashAggregate (109) - +- * HashAggregate (108) - +- ReusedExchange (107) +TakeOrderedAndProject (112) ++- * HashAggregate (111) + +- Exchange (110) + +- * HashAggregate (109) + +- Union (108) + :- * HashAggregate (97) + : +- Exchange (96) + : +- * HashAggregate (95) + : +- Union (94) + : :- * HashAggregate (39) + : : +- Exchange (38) + : : +- * HashAggregate (37) + : : +- * Project (36) + : : +- * BroadcastHashJoin Inner BuildRight (35) + : : :- * Project (29) + : : : +- * BroadcastHashJoin Inner BuildRight (28) + : : : :- * Project (22) + : : : : +- * BroadcastHashJoin Inner BuildRight (21) + : : : : :- * Project (16) + : : : : : +- * BroadcastHashJoin Inner BuildRight (15) + : : : : : :- * Project (9) + : : : : : : +- * BroadcastHashJoin LeftOuter BuildRight (8) + : : : : : : :- * Filter (3) + : : : : : : : +- * ColumnarToRow (2) + : : : : : : : +- Scan parquet default.store_sales (1) + : : : : : : +- BroadcastExchange (7) + : : : : : : +- * Filter (6) + : : : : : : +- * ColumnarToRow (5) + : : : : : : +- Scan parquet default.store_returns (4) + : : : : : +- BroadcastExchange (14) + : : : : : +- * Project (13) + : : : : : +- * Filter (12) + : : : : : +- * ColumnarToRow (11) + : : : : : +- Scan parquet default.date_dim (10) + : : : : +- BroadcastExchange (20) + : : : : +- * Filter (19) + : : : : +- * ColumnarToRow (18) + : : : : +- Scan parquet default.store (17) + : : : +- BroadcastExchange (27) + : : : +- * Project (26) + : : : +- * Filter (25) + : : : +- * ColumnarToRow (24) + : : : +- Scan parquet default.item (23) + : : +- BroadcastExchange (34) + : : +- * Project (33) + : : +- * Filter (32) + : : +- * ColumnarToRow (31) + : : +- Scan parquet default.promotion (30) + : :- * HashAggregate (66) + : : +- Exchange (65) + : : +- * HashAggregate (64) + : : +- * Project (63) + : : +- * BroadcastHashJoin Inner BuildRight (62) + : : :- * Project (60) + : : : +- * BroadcastHashJoin Inner BuildRight (59) + : : : :- * Project (57) + : : : : +- * BroadcastHashJoin Inner BuildRight (56) + : : : : :- * Project (51) + : : : : : +- * BroadcastHashJoin Inner BuildRight (50) + : : : : : :- * Project (48) + : : : : : : +- * BroadcastHashJoin LeftOuter BuildRight (47) + : : : : : : :- * Filter (42) + : : : : : : : +- * ColumnarToRow (41) + : : : : : : : +- Scan parquet default.catalog_sales (40) + : : : : : : +- BroadcastExchange (46) + : : : : : : +- * Filter (45) + : : : : : : +- * ColumnarToRow (44) + : : : : : : +- Scan parquet default.catalog_returns (43) + : : : : : +- ReusedExchange (49) + : : : : +- BroadcastExchange (55) + : : : : +- * Filter (54) + : : : : +- * ColumnarToRow (53) + : : : : +- Scan parquet default.catalog_page (52) + : : : +- ReusedExchange (58) + : : +- ReusedExchange (61) + : +- * HashAggregate (93) + : +- Exchange (92) + : +- * HashAggregate (91) + : +- * Project (90) + : +- * BroadcastHashJoin Inner BuildRight (89) + : :- * Project (87) + : : +- * BroadcastHashJoin Inner BuildRight (86) + : : :- * Project (84) + : : : +- * BroadcastHashJoin Inner BuildRight (83) + : : : :- * Project (78) + : : : : +- * BroadcastHashJoin Inner BuildRight (77) + : : : : :- * Project (75) + : : : : : +- * BroadcastHashJoin LeftOuter BuildRight (74) + : : : : : :- * Filter (69) + : : : : : : +- * ColumnarToRow (68) + : : : : : : +- Scan parquet default.web_sales (67) + : : : : : +- BroadcastExchange (73) + : : : : : +- * Filter (72) + : : : : : +- * ColumnarToRow (71) + : : : : : +- Scan parquet default.web_returns (70) + : : : : +- ReusedExchange (76) + : : : +- BroadcastExchange (82) + : : : +- * Filter (81) + : : : +- * ColumnarToRow (80) + : : : +- Scan parquet default.web_site (79) + : : +- ReusedExchange (85) + : +- ReusedExchange (88) + :- * HashAggregate (102) + : +- Exchange (101) + : +- * HashAggregate (100) + : +- * HashAggregate (99) + : +- ReusedExchange (98) + +- * HashAggregate (107) + +- Exchange (106) + +- * HashAggregate (105) + +- * HashAggregate (104) + +- ReusedExchange (103) (1) Scan parquet default.store_sales @@ -287,7 +283,7 @@ Results [6]: [s_store_id#17, sum#30, sum#31, isEmpty#32, sum#33, isEmpty#34] (38) Exchange Input [6]: [s_store_id#17, sum#30, sum#31, isEmpty#32, sum#33, isEmpty#34] -Arguments: hashpartitioning(s_store_id#17, 5), true, [id=#35] +Arguments: hashpartitioning(s_store_id#17, 5), ENSURE_REQUIREMENTS, [id=#35] (39) HashAggregate [codegen id : 7] Input [6]: [s_store_id#17, sum#30, sum#31, isEmpty#32, sum#33, isEmpty#34] @@ -409,7 +405,7 @@ Results [6]: [cp_catalog_page_id#57, sum#64, sum#65, isEmpty#66, sum#67, isEmpty (65) Exchange Input [6]: [cp_catalog_page_id#57, sum#64, sum#65, isEmpty#66, sum#67, isEmpty#68] -Arguments: hashpartitioning(cp_catalog_page_id#57, 5), true, [id=#69] +Arguments: hashpartitioning(cp_catalog_page_id#57, 5), ENSURE_REQUIREMENTS, [id=#69] (66) HashAggregate [codegen id : 14] Input [6]: [cp_catalog_page_id#57, sum#64, sum#65, isEmpty#66, sum#67, isEmpty#68] @@ -531,7 +527,7 @@ Results [6]: [web_site_id#91, sum#98, sum#99, isEmpty#100, sum#101, isEmpty#102] (92) Exchange Input [6]: [web_site_id#91, sum#98, sum#99, isEmpty#100, sum#101, isEmpty#102] -Arguments: hashpartitioning(web_site_id#91, 5), true, [id=#103] +Arguments: hashpartitioning(web_site_id#91, 5), ENSURE_REQUIREMENTS, [id=#103] (93) HashAggregate [codegen id : 21] Input [6]: [web_site_id#91, sum#98, sum#99, isEmpty#100, sum#101, isEmpty#102] @@ -551,7 +547,7 @@ Results [8]: [channel#39, id#40, sum#118, isEmpty#119, sum#120, isEmpty#121, sum (96) Exchange Input [8]: [channel#39, id#40, sum#118, isEmpty#119, sum#120, isEmpty#121, sum#122, isEmpty#123] -Arguments: hashpartitioning(channel#39, id#40, 5), true, [id=#124] +Arguments: hashpartitioning(channel#39, id#40, 5), ENSURE_REQUIREMENTS, [id=#124] (97) HashAggregate [codegen id : 23] Input [8]: [channel#39, id#40, sum#118, isEmpty#119, sum#120, isEmpty#121, sum#122, isEmpty#123] @@ -579,7 +575,7 @@ Results [7]: [channel#39, sum#149, isEmpty#150, sum#151, isEmpty#152, sum#153, i (101) Exchange Input [7]: [channel#39, sum#149, isEmpty#150, sum#151, isEmpty#152, sum#153, isEmpty#154] -Arguments: hashpartitioning(channel#39, 5), true, [id=#155] +Arguments: hashpartitioning(channel#39, 5), ENSURE_REQUIREMENTS, [id=#155] (102) HashAggregate [codegen id : 47] Input [7]: [channel#39, sum#149, isEmpty#150, sum#151, isEmpty#152, sum#153, isEmpty#154] @@ -588,75 +584,55 @@ Functions [3]: [sum(sales#140), sum(returns#141), sum(profit#142)] Aggregate Attributes [3]: [sum(sales#140)#156, sum(returns#141)#157, sum(profit#142)#158] Results [5]: [channel#39, null AS id#159, sum(sales#140)#156 AS sales#160, sum(returns#141)#157 AS returns#161, sum(profit#142)#158 AS profit#162] -(103) Union +(103) ReusedExchange [Reuses operator id: 96] +Output [8]: [channel#39, id#40, sum#163, isEmpty#164, sum#165, isEmpty#166, sum#167, isEmpty#168] -(104) HashAggregate [codegen id : 48] -Input [5]: [channel#39, id#40, sales#128, returns#129, profit#130] -Keys [5]: [channel#39, id#40, sales#128, returns#129, profit#130] -Functions: [] -Aggregate Attributes: [] -Results [5]: [channel#39, id#40, sales#128, returns#129, profit#130] - -(105) Exchange -Input [5]: [channel#39, id#40, sales#128, returns#129, profit#130] -Arguments: hashpartitioning(channel#39, id#40, sales#128, returns#129, profit#130, 5), true, [id=#163] - -(106) HashAggregate [codegen id : 49] -Input [5]: [channel#39, id#40, sales#128, returns#129, profit#130] -Keys [5]: [channel#39, id#40, sales#128, returns#129, profit#130] -Functions: [] -Aggregate Attributes: [] -Results [5]: [channel#39, id#40, sales#128, returns#129, profit#130] - -(107) ReusedExchange [Reuses operator id: 96] -Output [8]: [channel#39, id#40, sum#164, isEmpty#165, sum#166, isEmpty#167, sum#168, isEmpty#169] - -(108) HashAggregate [codegen id : 72] -Input [8]: [channel#39, id#40, sum#164, isEmpty#165, sum#166, isEmpty#167, sum#168, isEmpty#169] +(104) HashAggregate [codegen id : 70] +Input [8]: [channel#39, id#40, sum#163, isEmpty#164, sum#165, isEmpty#166, sum#167, isEmpty#168] Keys [2]: [channel#39, id#40] Functions [3]: [sum(sales#41), sum(returns#42), sum(profit#43)] -Aggregate Attributes [3]: [sum(sales#41)#170, sum(returns#42)#171, sum(profit#43)#172] -Results [3]: [sum(sales#41)#170 AS sales#140, sum(returns#42)#171 AS returns#141, sum(profit#43)#172 AS profit#142] +Aggregate Attributes [3]: [sum(sales#41)#169, sum(returns#42)#170, sum(profit#43)#171] +Results [3]: [sum(sales#41)#169 AS sales#140, sum(returns#42)#170 AS returns#141, sum(profit#43)#171 AS profit#142] -(109) HashAggregate [codegen id : 72] +(105) HashAggregate [codegen id : 70] Input [3]: [sales#140, returns#141, profit#142] Keys: [] Functions [3]: [partial_sum(sales#140), partial_sum(returns#141), partial_sum(profit#142)] -Aggregate Attributes [6]: [sum#173, isEmpty#174, sum#175, isEmpty#176, sum#177, isEmpty#178] -Results [6]: [sum#179, isEmpty#180, sum#181, isEmpty#182, sum#183, isEmpty#184] +Aggregate Attributes [6]: [sum#172, isEmpty#173, sum#174, isEmpty#175, sum#176, isEmpty#177] +Results [6]: [sum#178, isEmpty#179, sum#180, isEmpty#181, sum#182, isEmpty#183] -(110) Exchange -Input [6]: [sum#179, isEmpty#180, sum#181, isEmpty#182, sum#183, isEmpty#184] -Arguments: SinglePartition, true, [id=#185] +(106) Exchange +Input [6]: [sum#178, isEmpty#179, sum#180, isEmpty#181, sum#182, isEmpty#183] +Arguments: SinglePartition, ENSURE_REQUIREMENTS, [id=#184] -(111) HashAggregate [codegen id : 73] -Input [6]: [sum#179, isEmpty#180, sum#181, isEmpty#182, sum#183, isEmpty#184] +(107) HashAggregate [codegen id : 71] +Input [6]: [sum#178, isEmpty#179, sum#180, isEmpty#181, sum#182, isEmpty#183] Keys: [] Functions [3]: [sum(sales#140), sum(returns#141), sum(profit#142)] -Aggregate Attributes [3]: [sum(sales#140)#186, sum(returns#141)#187, sum(profit#142)#188] -Results [5]: [null AS channel#189, null AS id#190, sum(sales#140)#186 AS sales#191, sum(returns#141)#187 AS returns#192, sum(profit#142)#188 AS profit#193] +Aggregate Attributes [3]: [sum(sales#140)#185, sum(returns#141)#186, sum(profit#142)#187] +Results [5]: [null AS channel#188, null AS id#189, sum(sales#140)#185 AS sales#190, sum(returns#141)#186 AS returns#191, sum(profit#142)#187 AS profit#192] -(112) Union +(108) Union -(113) HashAggregate [codegen id : 74] +(109) HashAggregate [codegen id : 72] Input [5]: [channel#39, id#40, sales#128, returns#129, profit#130] Keys [5]: [channel#39, id#40, sales#128, returns#129, profit#130] Functions: [] Aggregate Attributes: [] Results [5]: [channel#39, id#40, sales#128, returns#129, profit#130] -(114) Exchange +(110) Exchange Input [5]: [channel#39, id#40, sales#128, returns#129, profit#130] -Arguments: hashpartitioning(channel#39, id#40, sales#128, returns#129, profit#130, 5), true, [id=#194] +Arguments: hashpartitioning(channel#39, id#40, sales#128, returns#129, profit#130, 5), ENSURE_REQUIREMENTS, [id=#193] -(115) HashAggregate [codegen id : 75] +(111) HashAggregate [codegen id : 73] Input [5]: [channel#39, id#40, sales#128, returns#129, profit#130] Keys [5]: [channel#39, id#40, sales#128, returns#129, profit#130] Functions: [] Aggregate Attributes: [] Results [5]: [channel#39, id#40, sales#128, returns#129, profit#130] -(116) TakeOrderedAndProject +(112) TakeOrderedAndProject Input [5]: [channel#39, id#40, sales#128, returns#129, profit#130] Arguments: 100, [channel#39 ASC NULLS FIRST, id#40 ASC NULLS FIRST], [channel#39, id#40, sales#128, returns#129, profit#130] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q80a/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q80a/simplified.txt index 602a670a49116..142af3f0755f3 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q80a/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q80a/simplified.txt @@ -1,181 +1,173 @@ TakeOrderedAndProject [channel,id,sales,returns,profit] - WholeStageCodegen (75) + WholeStageCodegen (73) HashAggregate [channel,id,sales,returns,profit] InputAdapter Exchange [channel,id,sales,returns,profit] #1 - WholeStageCodegen (74) + WholeStageCodegen (72) HashAggregate [channel,id,sales,returns,profit] InputAdapter Union - WholeStageCodegen (49) - HashAggregate [channel,id,sales,returns,profit] + WholeStageCodegen (23) + HashAggregate [channel,id,sum,isEmpty,sum,isEmpty,sum,isEmpty] [sum(sales),sum(returns),sum(profit),sales,returns,profit,sum,isEmpty,sum,isEmpty,sum,isEmpty] InputAdapter - Exchange [channel,id,sales,returns,profit] #2 - WholeStageCodegen (48) - HashAggregate [channel,id,sales,returns,profit] + Exchange [channel,id] #2 + WholeStageCodegen (22) + HashAggregate [channel,id,sales,returns,profit] [sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty] InputAdapter Union - WholeStageCodegen (23) - HashAggregate [channel,id,sum,isEmpty,sum,isEmpty,sum,isEmpty] [sum(sales),sum(returns),sum(profit),sales,returns,profit,sum,isEmpty,sum,isEmpty,sum,isEmpty] + WholeStageCodegen (7) + HashAggregate [s_store_id,sum,sum,isEmpty,sum,isEmpty] [sum(UnscaledValue(ss_ext_sales_price)),sum(coalesce(cast(sr_return_amt as decimal(12,2)), 0.00)),sum(CheckOverflow((promote_precision(cast(ss_net_profit as decimal(13,2))) - promote_precision(cast(coalesce(cast(sr_net_loss as decimal(12,2)), 0.00) as decimal(13,2)))), DecimalType(13,2), true)),channel,id,sales,returns,profit,sum,sum,isEmpty,sum,isEmpty] InputAdapter - Exchange [channel,id] #3 - WholeStageCodegen (22) - HashAggregate [channel,id,sales,returns,profit] [sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty] - InputAdapter - Union - WholeStageCodegen (7) - HashAggregate [s_store_id,sum,sum,isEmpty,sum,isEmpty] [sum(UnscaledValue(ss_ext_sales_price)),sum(coalesce(cast(sr_return_amt as decimal(12,2)), 0.00)),sum(CheckOverflow((promote_precision(cast(ss_net_profit as decimal(13,2))) - promote_precision(cast(coalesce(cast(sr_net_loss as decimal(12,2)), 0.00) as decimal(13,2)))), DecimalType(13,2), true)),channel,id,sales,returns,profit,sum,sum,isEmpty,sum,isEmpty] - InputAdapter - Exchange [s_store_id] #4 - WholeStageCodegen (6) - HashAggregate [s_store_id,ss_ext_sales_price,sr_return_amt,ss_net_profit,sr_net_loss] [sum,sum,isEmpty,sum,isEmpty,sum,sum,isEmpty,sum,isEmpty] - Project [ss_ext_sales_price,ss_net_profit,sr_return_amt,sr_net_loss,s_store_id] - BroadcastHashJoin [ss_promo_sk,p_promo_sk] - Project [ss_promo_sk,ss_ext_sales_price,ss_net_profit,sr_return_amt,sr_net_loss,s_store_id] - BroadcastHashJoin [ss_item_sk,i_item_sk] - Project [ss_item_sk,ss_promo_sk,ss_ext_sales_price,ss_net_profit,sr_return_amt,sr_net_loss,s_store_id] - BroadcastHashJoin [ss_store_sk,s_store_sk] - Project [ss_item_sk,ss_store_sk,ss_promo_sk,ss_ext_sales_price,ss_net_profit,sr_return_amt,sr_net_loss] - BroadcastHashJoin [ss_sold_date_sk,d_date_sk] - Project [ss_sold_date_sk,ss_item_sk,ss_store_sk,ss_promo_sk,ss_ext_sales_price,ss_net_profit,sr_return_amt,sr_net_loss] - BroadcastHashJoin [ss_item_sk,ss_ticket_number,sr_item_sk,sr_ticket_number] - Filter [ss_sold_date_sk,ss_store_sk,ss_item_sk,ss_promo_sk] - ColumnarToRow - InputAdapter - Scan parquet default.store_sales [ss_sold_date_sk,ss_item_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_ext_sales_price,ss_net_profit] - InputAdapter - BroadcastExchange #5 - WholeStageCodegen (1) - Filter [sr_item_sk,sr_ticket_number] - ColumnarToRow - InputAdapter - Scan parquet default.store_returns [sr_item_sk,sr_ticket_number,sr_return_amt,sr_net_loss] - InputAdapter - BroadcastExchange #6 - WholeStageCodegen (2) - Project [d_date_sk] - Filter [d_date,d_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.date_dim [d_date_sk,d_date] - InputAdapter - BroadcastExchange #7 - WholeStageCodegen (3) - Filter [s_store_sk] - ColumnarToRow - InputAdapter - Scan parquet default.store [s_store_sk,s_store_id] + Exchange [s_store_id] #3 + WholeStageCodegen (6) + HashAggregate [s_store_id,ss_ext_sales_price,sr_return_amt,ss_net_profit,sr_net_loss] [sum,sum,isEmpty,sum,isEmpty,sum,sum,isEmpty,sum,isEmpty] + Project [ss_ext_sales_price,ss_net_profit,sr_return_amt,sr_net_loss,s_store_id] + BroadcastHashJoin [ss_promo_sk,p_promo_sk] + Project [ss_promo_sk,ss_ext_sales_price,ss_net_profit,sr_return_amt,sr_net_loss,s_store_id] + BroadcastHashJoin [ss_item_sk,i_item_sk] + Project [ss_item_sk,ss_promo_sk,ss_ext_sales_price,ss_net_profit,sr_return_amt,sr_net_loss,s_store_id] + BroadcastHashJoin [ss_store_sk,s_store_sk] + Project [ss_item_sk,ss_store_sk,ss_promo_sk,ss_ext_sales_price,ss_net_profit,sr_return_amt,sr_net_loss] + BroadcastHashJoin [ss_sold_date_sk,d_date_sk] + Project [ss_sold_date_sk,ss_item_sk,ss_store_sk,ss_promo_sk,ss_ext_sales_price,ss_net_profit,sr_return_amt,sr_net_loss] + BroadcastHashJoin [ss_item_sk,ss_ticket_number,sr_item_sk,sr_ticket_number] + Filter [ss_sold_date_sk,ss_store_sk,ss_item_sk,ss_promo_sk] + ColumnarToRow InputAdapter - BroadcastExchange #8 - WholeStageCodegen (4) - Project [i_item_sk] - Filter [i_current_price,i_item_sk] - ColumnarToRow - InputAdapter - Scan parquet default.item [i_item_sk,i_current_price] + Scan parquet default.store_sales [ss_sold_date_sk,ss_item_sk,ss_store_sk,ss_promo_sk,ss_ticket_number,ss_ext_sales_price,ss_net_profit] InputAdapter - BroadcastExchange #9 - WholeStageCodegen (5) - Project [p_promo_sk] - Filter [p_channel_tv,p_promo_sk] - ColumnarToRow - InputAdapter - Scan parquet default.promotion [p_promo_sk,p_channel_tv] - WholeStageCodegen (14) - HashAggregate [cp_catalog_page_id,sum,sum,isEmpty,sum,isEmpty] [sum(UnscaledValue(cs_ext_sales_price)),sum(coalesce(cast(cr_return_amount as decimal(12,2)), 0.00)),sum(CheckOverflow((promote_precision(cast(cs_net_profit as decimal(13,2))) - promote_precision(cast(coalesce(cast(cr_net_loss as decimal(12,2)), 0.00) as decimal(13,2)))), DecimalType(13,2), true)),channel,id,sales,returns,profit,sum,sum,isEmpty,sum,isEmpty] - InputAdapter - Exchange [cp_catalog_page_id] #10 - WholeStageCodegen (13) - HashAggregate [cp_catalog_page_id,cs_ext_sales_price,cr_return_amount,cs_net_profit,cr_net_loss] [sum,sum,isEmpty,sum,isEmpty,sum,sum,isEmpty,sum,isEmpty] - Project [cs_ext_sales_price,cs_net_profit,cr_return_amount,cr_net_loss,cp_catalog_page_id] - BroadcastHashJoin [cs_promo_sk,p_promo_sk] - Project [cs_promo_sk,cs_ext_sales_price,cs_net_profit,cr_return_amount,cr_net_loss,cp_catalog_page_id] - BroadcastHashJoin [cs_item_sk,i_item_sk] - Project [cs_item_sk,cs_promo_sk,cs_ext_sales_price,cs_net_profit,cr_return_amount,cr_net_loss,cp_catalog_page_id] - BroadcastHashJoin [cs_catalog_page_sk,cp_catalog_page_sk] - Project [cs_catalog_page_sk,cs_item_sk,cs_promo_sk,cs_ext_sales_price,cs_net_profit,cr_return_amount,cr_net_loss] - BroadcastHashJoin [cs_sold_date_sk,d_date_sk] - Project [cs_sold_date_sk,cs_catalog_page_sk,cs_item_sk,cs_promo_sk,cs_ext_sales_price,cs_net_profit,cr_return_amount,cr_net_loss] - BroadcastHashJoin [cs_item_sk,cs_order_number,cr_item_sk,cr_order_number] - Filter [cs_sold_date_sk,cs_catalog_page_sk,cs_item_sk,cs_promo_sk] - ColumnarToRow - InputAdapter - Scan parquet default.catalog_sales [cs_sold_date_sk,cs_catalog_page_sk,cs_item_sk,cs_promo_sk,cs_order_number,cs_ext_sales_price,cs_net_profit] - InputAdapter - BroadcastExchange #11 - WholeStageCodegen (8) - Filter [cr_item_sk,cr_order_number] - ColumnarToRow - InputAdapter - Scan parquet default.catalog_returns [cr_item_sk,cr_order_number,cr_return_amount,cr_net_loss] - InputAdapter - ReusedExchange [d_date_sk] #6 + BroadcastExchange #4 + WholeStageCodegen (1) + Filter [sr_item_sk,sr_ticket_number] + ColumnarToRow + InputAdapter + Scan parquet default.store_returns [sr_item_sk,sr_ticket_number,sr_return_amt,sr_net_loss] + InputAdapter + BroadcastExchange #5 + WholeStageCodegen (2) + Project [d_date_sk] + Filter [d_date,d_date_sk] + ColumnarToRow InputAdapter - BroadcastExchange #12 - WholeStageCodegen (10) - Filter [cp_catalog_page_sk] - ColumnarToRow - InputAdapter - Scan parquet default.catalog_page [cp_catalog_page_sk,cp_catalog_page_id] + Scan parquet default.date_dim [d_date_sk,d_date] + InputAdapter + BroadcastExchange #6 + WholeStageCodegen (3) + Filter [s_store_sk] + ColumnarToRow + InputAdapter + Scan parquet default.store [s_store_sk,s_store_id] + InputAdapter + BroadcastExchange #7 + WholeStageCodegen (4) + Project [i_item_sk] + Filter [i_current_price,i_item_sk] + ColumnarToRow + InputAdapter + Scan parquet default.item [i_item_sk,i_current_price] + InputAdapter + BroadcastExchange #8 + WholeStageCodegen (5) + Project [p_promo_sk] + Filter [p_channel_tv,p_promo_sk] + ColumnarToRow + InputAdapter + Scan parquet default.promotion [p_promo_sk,p_channel_tv] + WholeStageCodegen (14) + HashAggregate [cp_catalog_page_id,sum,sum,isEmpty,sum,isEmpty] [sum(UnscaledValue(cs_ext_sales_price)),sum(coalesce(cast(cr_return_amount as decimal(12,2)), 0.00)),sum(CheckOverflow((promote_precision(cast(cs_net_profit as decimal(13,2))) - promote_precision(cast(coalesce(cast(cr_net_loss as decimal(12,2)), 0.00) as decimal(13,2)))), DecimalType(13,2), true)),channel,id,sales,returns,profit,sum,sum,isEmpty,sum,isEmpty] + InputAdapter + Exchange [cp_catalog_page_id] #9 + WholeStageCodegen (13) + HashAggregate [cp_catalog_page_id,cs_ext_sales_price,cr_return_amount,cs_net_profit,cr_net_loss] [sum,sum,isEmpty,sum,isEmpty,sum,sum,isEmpty,sum,isEmpty] + Project [cs_ext_sales_price,cs_net_profit,cr_return_amount,cr_net_loss,cp_catalog_page_id] + BroadcastHashJoin [cs_promo_sk,p_promo_sk] + Project [cs_promo_sk,cs_ext_sales_price,cs_net_profit,cr_return_amount,cr_net_loss,cp_catalog_page_id] + BroadcastHashJoin [cs_item_sk,i_item_sk] + Project [cs_item_sk,cs_promo_sk,cs_ext_sales_price,cs_net_profit,cr_return_amount,cr_net_loss,cp_catalog_page_id] + BroadcastHashJoin [cs_catalog_page_sk,cp_catalog_page_sk] + Project [cs_catalog_page_sk,cs_item_sk,cs_promo_sk,cs_ext_sales_price,cs_net_profit,cr_return_amount,cr_net_loss] + BroadcastHashJoin [cs_sold_date_sk,d_date_sk] + Project [cs_sold_date_sk,cs_catalog_page_sk,cs_item_sk,cs_promo_sk,cs_ext_sales_price,cs_net_profit,cr_return_amount,cr_net_loss] + BroadcastHashJoin [cs_item_sk,cs_order_number,cr_item_sk,cr_order_number] + Filter [cs_sold_date_sk,cs_catalog_page_sk,cs_item_sk,cs_promo_sk] + ColumnarToRow InputAdapter - ReusedExchange [i_item_sk] #8 + Scan parquet default.catalog_sales [cs_sold_date_sk,cs_catalog_page_sk,cs_item_sk,cs_promo_sk,cs_order_number,cs_ext_sales_price,cs_net_profit] InputAdapter - ReusedExchange [p_promo_sk] #9 - WholeStageCodegen (21) - HashAggregate [web_site_id,sum,sum,isEmpty,sum,isEmpty] [sum(UnscaledValue(ws_ext_sales_price)),sum(coalesce(cast(wr_return_amt as decimal(12,2)), 0.00)),sum(CheckOverflow((promote_precision(cast(ws_net_profit as decimal(13,2))) - promote_precision(cast(coalesce(cast(wr_net_loss as decimal(12,2)), 0.00) as decimal(13,2)))), DecimalType(13,2), true)),channel,id,sales,returns,profit,sum,sum,isEmpty,sum,isEmpty] + BroadcastExchange #10 + WholeStageCodegen (8) + Filter [cr_item_sk,cr_order_number] + ColumnarToRow + InputAdapter + Scan parquet default.catalog_returns [cr_item_sk,cr_order_number,cr_return_amount,cr_net_loss] + InputAdapter + ReusedExchange [d_date_sk] #5 + InputAdapter + BroadcastExchange #11 + WholeStageCodegen (10) + Filter [cp_catalog_page_sk] + ColumnarToRow + InputAdapter + Scan parquet default.catalog_page [cp_catalog_page_sk,cp_catalog_page_id] InputAdapter - Exchange [web_site_id] #13 - WholeStageCodegen (20) - HashAggregate [web_site_id,ws_ext_sales_price,wr_return_amt,ws_net_profit,wr_net_loss] [sum,sum,isEmpty,sum,isEmpty,sum,sum,isEmpty,sum,isEmpty] - Project [ws_ext_sales_price,ws_net_profit,wr_return_amt,wr_net_loss,web_site_id] - BroadcastHashJoin [ws_promo_sk,p_promo_sk] - Project [ws_promo_sk,ws_ext_sales_price,ws_net_profit,wr_return_amt,wr_net_loss,web_site_id] - BroadcastHashJoin [ws_item_sk,i_item_sk] - Project [ws_item_sk,ws_promo_sk,ws_ext_sales_price,ws_net_profit,wr_return_amt,wr_net_loss,web_site_id] - BroadcastHashJoin [ws_web_site_sk,web_site_sk] - Project [ws_item_sk,ws_web_site_sk,ws_promo_sk,ws_ext_sales_price,ws_net_profit,wr_return_amt,wr_net_loss] - BroadcastHashJoin [ws_sold_date_sk,d_date_sk] - Project [ws_sold_date_sk,ws_item_sk,ws_web_site_sk,ws_promo_sk,ws_ext_sales_price,ws_net_profit,wr_return_amt,wr_net_loss] - BroadcastHashJoin [ws_item_sk,ws_order_number,wr_item_sk,wr_order_number] - Filter [ws_sold_date_sk,ws_web_site_sk,ws_item_sk,ws_promo_sk] - ColumnarToRow - InputAdapter - Scan parquet default.web_sales [ws_sold_date_sk,ws_item_sk,ws_web_site_sk,ws_promo_sk,ws_order_number,ws_ext_sales_price,ws_net_profit] - InputAdapter - BroadcastExchange #14 - WholeStageCodegen (15) - Filter [wr_item_sk,wr_order_number] - ColumnarToRow - InputAdapter - Scan parquet default.web_returns [wr_item_sk,wr_order_number,wr_return_amt,wr_net_loss] - InputAdapter - ReusedExchange [d_date_sk] #6 - InputAdapter - BroadcastExchange #15 - WholeStageCodegen (17) - Filter [web_site_sk] - ColumnarToRow - InputAdapter - Scan parquet default.web_site [web_site_sk,web_site_id] + ReusedExchange [i_item_sk] #7 + InputAdapter + ReusedExchange [p_promo_sk] #8 + WholeStageCodegen (21) + HashAggregate [web_site_id,sum,sum,isEmpty,sum,isEmpty] [sum(UnscaledValue(ws_ext_sales_price)),sum(coalesce(cast(wr_return_amt as decimal(12,2)), 0.00)),sum(CheckOverflow((promote_precision(cast(ws_net_profit as decimal(13,2))) - promote_precision(cast(coalesce(cast(wr_net_loss as decimal(12,2)), 0.00) as decimal(13,2)))), DecimalType(13,2), true)),channel,id,sales,returns,profit,sum,sum,isEmpty,sum,isEmpty] + InputAdapter + Exchange [web_site_id] #12 + WholeStageCodegen (20) + HashAggregate [web_site_id,ws_ext_sales_price,wr_return_amt,ws_net_profit,wr_net_loss] [sum,sum,isEmpty,sum,isEmpty,sum,sum,isEmpty,sum,isEmpty] + Project [ws_ext_sales_price,ws_net_profit,wr_return_amt,wr_net_loss,web_site_id] + BroadcastHashJoin [ws_promo_sk,p_promo_sk] + Project [ws_promo_sk,ws_ext_sales_price,ws_net_profit,wr_return_amt,wr_net_loss,web_site_id] + BroadcastHashJoin [ws_item_sk,i_item_sk] + Project [ws_item_sk,ws_promo_sk,ws_ext_sales_price,ws_net_profit,wr_return_amt,wr_net_loss,web_site_id] + BroadcastHashJoin [ws_web_site_sk,web_site_sk] + Project [ws_item_sk,ws_web_site_sk,ws_promo_sk,ws_ext_sales_price,ws_net_profit,wr_return_amt,wr_net_loss] + BroadcastHashJoin [ws_sold_date_sk,d_date_sk] + Project [ws_sold_date_sk,ws_item_sk,ws_web_site_sk,ws_promo_sk,ws_ext_sales_price,ws_net_profit,wr_return_amt,wr_net_loss] + BroadcastHashJoin [ws_item_sk,ws_order_number,wr_item_sk,wr_order_number] + Filter [ws_sold_date_sk,ws_web_site_sk,ws_item_sk,ws_promo_sk] + ColumnarToRow InputAdapter - ReusedExchange [i_item_sk] #8 + Scan parquet default.web_sales [ws_sold_date_sk,ws_item_sk,ws_web_site_sk,ws_promo_sk,ws_order_number,ws_ext_sales_price,ws_net_profit] InputAdapter - ReusedExchange [p_promo_sk] #9 - WholeStageCodegen (47) - HashAggregate [channel,sum,isEmpty,sum,isEmpty,sum,isEmpty] [sum(sales),sum(returns),sum(profit),id,sales,returns,profit,sum,isEmpty,sum,isEmpty,sum,isEmpty] - InputAdapter - Exchange [channel] #16 - WholeStageCodegen (46) - HashAggregate [channel,sales,returns,profit] [sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty] - HashAggregate [channel,id,sum,isEmpty,sum,isEmpty,sum,isEmpty] [sum(sales),sum(returns),sum(profit),sales,returns,profit,sum,isEmpty,sum,isEmpty,sum,isEmpty] - InputAdapter - ReusedExchange [channel,id,sum,isEmpty,sum,isEmpty,sum,isEmpty] #3 - WholeStageCodegen (73) + BroadcastExchange #13 + WholeStageCodegen (15) + Filter [wr_item_sk,wr_order_number] + ColumnarToRow + InputAdapter + Scan parquet default.web_returns [wr_item_sk,wr_order_number,wr_return_amt,wr_net_loss] + InputAdapter + ReusedExchange [d_date_sk] #5 + InputAdapter + BroadcastExchange #14 + WholeStageCodegen (17) + Filter [web_site_sk] + ColumnarToRow + InputAdapter + Scan parquet default.web_site [web_site_sk,web_site_id] + InputAdapter + ReusedExchange [i_item_sk] #7 + InputAdapter + ReusedExchange [p_promo_sk] #8 + WholeStageCodegen (47) + HashAggregate [channel,sum,isEmpty,sum,isEmpty,sum,isEmpty] [sum(sales),sum(returns),sum(profit),id,sales,returns,profit,sum,isEmpty,sum,isEmpty,sum,isEmpty] + InputAdapter + Exchange [channel] #15 + WholeStageCodegen (46) + HashAggregate [channel,sales,returns,profit] [sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty] + HashAggregate [channel,id,sum,isEmpty,sum,isEmpty,sum,isEmpty] [sum(sales),sum(returns),sum(profit),sales,returns,profit,sum,isEmpty,sum,isEmpty,sum,isEmpty] + InputAdapter + ReusedExchange [channel,id,sum,isEmpty,sum,isEmpty,sum,isEmpty] #2 + WholeStageCodegen (71) HashAggregate [sum,isEmpty,sum,isEmpty,sum,isEmpty] [sum(sales),sum(returns),sum(profit),channel,id,sales,returns,profit,sum,isEmpty,sum,isEmpty,sum,isEmpty] InputAdapter - Exchange #17 - WholeStageCodegen (72) + Exchange #16 + WholeStageCodegen (70) HashAggregate [sales,returns,profit] [sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty,sum,isEmpty] HashAggregate [channel,id,sum,isEmpty,sum,isEmpty,sum,isEmpty] [sum(sales),sum(returns),sum(profit),sales,returns,profit,sum,isEmpty,sum,isEmpty,sum,isEmpty] InputAdapter - ReusedExchange [channel,id,sum,isEmpty,sum,isEmpty,sum,isEmpty] #3 + ReusedExchange [channel,id,sum,isEmpty,sum,isEmpty,sum,isEmpty] #2 diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q86a.sf100/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q86a.sf100/explain.txt index f61c214640e33..96f13872a2ba2 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q86a.sf100/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q86a.sf100/explain.txt @@ -1,46 +1,42 @@ == Physical Plan == -TakeOrderedAndProject (42) -+- * Project (41) - +- Window (40) - +- * Sort (39) - +- Exchange (38) - +- * HashAggregate (37) - +- Exchange (36) - +- * HashAggregate (35) - +- Union (34) - :- * HashAggregate (28) - : +- Exchange (27) - : +- * HashAggregate (26) - : +- Union (25) - : :- * HashAggregate (19) - : : +- Exchange (18) - : : +- * HashAggregate (17) - : : +- * Project (16) - : : +- * BroadcastHashJoin Inner BuildRight (15) - : : :- * Project (10) - : : : +- * BroadcastHashJoin Inner BuildRight (9) - : : : :- * Filter (3) - : : : : +- * ColumnarToRow (2) - : : : : +- Scan parquet default.web_sales (1) - : : : +- BroadcastExchange (8) - : : : +- * Project (7) - : : : +- * Filter (6) - : : : +- * ColumnarToRow (5) - : : : +- Scan parquet default.date_dim (4) - : : +- BroadcastExchange (14) - : : +- * Filter (13) - : : +- * ColumnarToRow (12) - : : +- Scan parquet default.item (11) - : +- * HashAggregate (24) - : +- Exchange (23) - : +- * HashAggregate (22) - : +- * HashAggregate (21) - : +- ReusedExchange (20) - +- * HashAggregate (33) - +- Exchange (32) - +- * HashAggregate (31) - +- * HashAggregate (30) - +- ReusedExchange (29) +TakeOrderedAndProject (38) ++- * Project (37) + +- Window (36) + +- * Sort (35) + +- Exchange (34) + +- * HashAggregate (33) + +- Exchange (32) + +- * HashAggregate (31) + +- Union (30) + :- * HashAggregate (19) + : +- Exchange (18) + : +- * HashAggregate (17) + : +- * Project (16) + : +- * BroadcastHashJoin Inner BuildRight (15) + : :- * Project (10) + : : +- * BroadcastHashJoin Inner BuildRight (9) + : : :- * Filter (3) + : : : +- * ColumnarToRow (2) + : : : +- Scan parquet default.web_sales (1) + : : +- BroadcastExchange (8) + : : +- * Project (7) + : : +- * Filter (6) + : : +- * ColumnarToRow (5) + : : +- Scan parquet default.date_dim (4) + : +- BroadcastExchange (14) + : +- * Filter (13) + : +- * ColumnarToRow (12) + : +- Scan parquet default.item (11) + :- * HashAggregate (24) + : +- Exchange (23) + : +- * HashAggregate (22) + : +- * HashAggregate (21) + : +- ReusedExchange (20) + +- * HashAggregate (29) + +- Exchange (28) + +- * HashAggregate (27) + +- * HashAggregate (26) + +- ReusedExchange (25) (1) Scan parquet default.web_sales @@ -124,7 +120,7 @@ Results [3]: [i_category#9, i_class#8, sum#12] (18) Exchange Input [3]: [i_category#9, i_class#8, sum#12] -Arguments: hashpartitioning(i_category#9, i_class#8, 5), true, [id=#13] +Arguments: hashpartitioning(i_category#9, i_class#8, 5), ENSURE_REQUIREMENTS, [id=#13] (19) HashAggregate [codegen id : 4] Input [3]: [i_category#9, i_class#8, sum#12] @@ -152,7 +148,7 @@ Results [3]: [i_category#9, sum#24, isEmpty#25] (23) Exchange Input [3]: [i_category#9, sum#24, isEmpty#25] -Arguments: hashpartitioning(i_category#9, 5), true, [id=#26] +Arguments: hashpartitioning(i_category#9, 5), ENSURE_REQUIREMENTS, [id=#26] (24) HashAggregate [codegen id : 9] Input [3]: [i_category#9, sum#24, isEmpty#25] @@ -161,91 +157,71 @@ Functions [1]: [sum(total_sum#21)] Aggregate Attributes [1]: [sum(total_sum#21)#27] Results [6]: [sum(total_sum#21)#27 AS total_sum#28, i_category#9, null AS i_class#29, 0 AS g_category#30, 1 AS g_class#31, 1 AS lochierarchy#32] -(25) Union +(25) ReusedExchange [Reuses operator id: 18] +Output [3]: [i_category#9, i_class#8, sum#33] -(26) HashAggregate [codegen id : 10] -Input [6]: [total_sum#15, i_category#9, i_class#8, g_category#16, g_class#17, lochierarchy#18] -Keys [6]: [total_sum#15, i_category#9, i_class#8, g_category#16, g_class#17, lochierarchy#18] -Functions: [] -Aggregate Attributes: [] -Results [6]: [total_sum#15, i_category#9, i_class#8, g_category#16, g_class#17, lochierarchy#18] - -(27) Exchange -Input [6]: [total_sum#15, i_category#9, i_class#8, g_category#16, g_class#17, lochierarchy#18] -Arguments: hashpartitioning(total_sum#15, i_category#9, i_class#8, g_category#16, g_class#17, lochierarchy#18, 5), true, [id=#33] - -(28) HashAggregate [codegen id : 11] -Input [6]: [total_sum#15, i_category#9, i_class#8, g_category#16, g_class#17, lochierarchy#18] -Keys [6]: [total_sum#15, i_category#9, i_class#8, g_category#16, g_class#17, lochierarchy#18] -Functions: [] -Aggregate Attributes: [] -Results [6]: [total_sum#15, i_category#9, i_class#8, g_category#16, g_class#17, lochierarchy#18] - -(29) ReusedExchange [Reuses operator id: 18] -Output [3]: [i_category#9, i_class#8, sum#34] - -(30) HashAggregate [codegen id : 15] -Input [3]: [i_category#9, i_class#8, sum#34] +(26) HashAggregate [codegen id : 13] +Input [3]: [i_category#9, i_class#8, sum#33] Keys [2]: [i_category#9, i_class#8] Functions [1]: [sum(UnscaledValue(ws_net_paid#3))] -Aggregate Attributes [1]: [sum(UnscaledValue(ws_net_paid#3))#35] -Results [1]: [MakeDecimal(sum(UnscaledValue(ws_net_paid#3))#35,17,2) AS total_sum#21] +Aggregate Attributes [1]: [sum(UnscaledValue(ws_net_paid#3))#34] +Results [1]: [MakeDecimal(sum(UnscaledValue(ws_net_paid#3))#34,17,2) AS total_sum#21] -(31) HashAggregate [codegen id : 15] +(27) HashAggregate [codegen id : 13] Input [1]: [total_sum#21] Keys: [] Functions [1]: [partial_sum(total_sum#21)] -Aggregate Attributes [2]: [sum#36, isEmpty#37] -Results [2]: [sum#38, isEmpty#39] +Aggregate Attributes [2]: [sum#35, isEmpty#36] +Results [2]: [sum#37, isEmpty#38] -(32) Exchange -Input [2]: [sum#38, isEmpty#39] -Arguments: SinglePartition, true, [id=#40] +(28) Exchange +Input [2]: [sum#37, isEmpty#38] +Arguments: SinglePartition, ENSURE_REQUIREMENTS, [id=#39] -(33) HashAggregate [codegen id : 16] -Input [2]: [sum#38, isEmpty#39] +(29) HashAggregate [codegen id : 14] +Input [2]: [sum#37, isEmpty#38] Keys: [] Functions [1]: [sum(total_sum#21)] -Aggregate Attributes [1]: [sum(total_sum#21)#41] -Results [6]: [sum(total_sum#21)#41 AS total_sum#42, null AS i_category#43, null AS i_class#44, 1 AS g_category#45, 1 AS g_class#46, 2 AS lochierarchy#47] +Aggregate Attributes [1]: [sum(total_sum#21)#40] +Results [6]: [sum(total_sum#21)#40 AS total_sum#41, null AS i_category#42, null AS i_class#43, 1 AS g_category#44, 1 AS g_class#45, 2 AS lochierarchy#46] -(34) Union +(30) Union -(35) HashAggregate [codegen id : 17] +(31) HashAggregate [codegen id : 15] Input [6]: [total_sum#15, i_category#9, i_class#8, g_category#16, g_class#17, lochierarchy#18] Keys [6]: [total_sum#15, i_category#9, i_class#8, g_category#16, g_class#17, lochierarchy#18] Functions: [] Aggregate Attributes: [] Results [6]: [total_sum#15, i_category#9, i_class#8, g_category#16, g_class#17, lochierarchy#18] -(36) Exchange +(32) Exchange Input [6]: [total_sum#15, i_category#9, i_class#8, g_category#16, g_class#17, lochierarchy#18] -Arguments: hashpartitioning(total_sum#15, i_category#9, i_class#8, g_category#16, g_class#17, lochierarchy#18, 5), true, [id=#48] +Arguments: hashpartitioning(total_sum#15, i_category#9, i_class#8, g_category#16, g_class#17, lochierarchy#18, 5), ENSURE_REQUIREMENTS, [id=#47] -(37) HashAggregate [codegen id : 18] +(33) HashAggregate [codegen id : 16] Input [6]: [total_sum#15, i_category#9, i_class#8, g_category#16, g_class#17, lochierarchy#18] Keys [6]: [total_sum#15, i_category#9, i_class#8, g_category#16, g_class#17, lochierarchy#18] Functions: [] Aggregate Attributes: [] -Results [5]: [total_sum#15, i_category#9, i_class#8, lochierarchy#18, CASE WHEN (g_class#17 = 0) THEN i_category#9 END AS _w0#49] +Results [5]: [total_sum#15, i_category#9, i_class#8, lochierarchy#18, CASE WHEN (g_class#17 = 0) THEN i_category#9 END AS _w0#48] -(38) Exchange -Input [5]: [total_sum#15, i_category#9, i_class#8, lochierarchy#18, _w0#49] -Arguments: hashpartitioning(lochierarchy#18, _w0#49, 5), true, [id=#50] +(34) Exchange +Input [5]: [total_sum#15, i_category#9, i_class#8, lochierarchy#18, _w0#48] +Arguments: hashpartitioning(lochierarchy#18, _w0#48, 5), ENSURE_REQUIREMENTS, [id=#49] -(39) Sort [codegen id : 19] -Input [5]: [total_sum#15, i_category#9, i_class#8, lochierarchy#18, _w0#49] -Arguments: [lochierarchy#18 ASC NULLS FIRST, _w0#49 ASC NULLS FIRST, total_sum#15 DESC NULLS LAST], false, 0 +(35) Sort [codegen id : 17] +Input [5]: [total_sum#15, i_category#9, i_class#8, lochierarchy#18, _w0#48] +Arguments: [lochierarchy#18 ASC NULLS FIRST, _w0#48 ASC NULLS FIRST, total_sum#15 DESC NULLS LAST], false, 0 -(40) Window -Input [5]: [total_sum#15, i_category#9, i_class#8, lochierarchy#18, _w0#49] -Arguments: [rank(total_sum#15) windowspecdefinition(lochierarchy#18, _w0#49, total_sum#15 DESC NULLS LAST, specifiedwindowframe(RowFrame, unboundedpreceding$(), currentrow$())) AS rank_within_parent#51], [lochierarchy#18, _w0#49], [total_sum#15 DESC NULLS LAST] +(36) Window +Input [5]: [total_sum#15, i_category#9, i_class#8, lochierarchy#18, _w0#48] +Arguments: [rank(total_sum#15) windowspecdefinition(lochierarchy#18, _w0#48, total_sum#15 DESC NULLS LAST, specifiedwindowframe(RowFrame, unboundedpreceding$(), currentrow$())) AS rank_within_parent#50], [lochierarchy#18, _w0#48], [total_sum#15 DESC NULLS LAST] -(41) Project [codegen id : 20] -Output [5]: [total_sum#15, i_category#9, i_class#8, lochierarchy#18, rank_within_parent#51] -Input [6]: [total_sum#15, i_category#9, i_class#8, lochierarchy#18, _w0#49, rank_within_parent#51] +(37) Project [codegen id : 18] +Output [5]: [total_sum#15, i_category#9, i_class#8, lochierarchy#18, rank_within_parent#50] +Input [6]: [total_sum#15, i_category#9, i_class#8, lochierarchy#18, _w0#48, rank_within_parent#50] -(42) TakeOrderedAndProject -Input [5]: [total_sum#15, i_category#9, i_class#8, lochierarchy#18, rank_within_parent#51] -Arguments: 100, [lochierarchy#18 DESC NULLS LAST, CASE WHEN (lochierarchy#18 = 0) THEN i_category#9 END ASC NULLS FIRST, rank_within_parent#51 ASC NULLS FIRST], [total_sum#15, i_category#9, i_class#8, lochierarchy#18, rank_within_parent#51] +(38) TakeOrderedAndProject +Input [5]: [total_sum#15, i_category#9, i_class#8, lochierarchy#18, rank_within_parent#50] +Arguments: 100, [lochierarchy#18 DESC NULLS LAST, CASE WHEN (lochierarchy#18 = 0) THEN i_category#9 END ASC NULLS FIRST, rank_within_parent#50 ASC NULLS FIRST], [total_sum#15, i_category#9, i_class#8, lochierarchy#18, rank_within_parent#50] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q86a.sf100/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q86a.sf100/simplified.txt index 2bd128100f527..d2d6b37e90f71 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q86a.sf100/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q86a.sf100/simplified.txt @@ -1,72 +1,64 @@ TakeOrderedAndProject [lochierarchy,i_category,rank_within_parent,total_sum,i_class] - WholeStageCodegen (20) + WholeStageCodegen (18) Project [total_sum,i_category,i_class,lochierarchy,rank_within_parent] InputAdapter Window [total_sum,lochierarchy,_w0] - WholeStageCodegen (19) + WholeStageCodegen (17) Sort [lochierarchy,_w0,total_sum] InputAdapter Exchange [lochierarchy,_w0] #1 - WholeStageCodegen (18) + WholeStageCodegen (16) HashAggregate [total_sum,i_category,i_class,g_category,g_class,lochierarchy] [_w0] InputAdapter Exchange [total_sum,i_category,i_class,g_category,g_class,lochierarchy] #2 - WholeStageCodegen (17) + WholeStageCodegen (15) HashAggregate [total_sum,i_category,i_class,g_category,g_class,lochierarchy] InputAdapter Union - WholeStageCodegen (11) - HashAggregate [total_sum,i_category,i_class,g_category,g_class,lochierarchy] + WholeStageCodegen (4) + HashAggregate [i_category,i_class,sum] [sum(UnscaledValue(ws_net_paid)),total_sum,g_category,g_class,lochierarchy,sum] InputAdapter - Exchange [total_sum,i_category,i_class,g_category,g_class,lochierarchy] #3 - WholeStageCodegen (10) - HashAggregate [total_sum,i_category,i_class,g_category,g_class,lochierarchy] - InputAdapter - Union - WholeStageCodegen (4) - HashAggregate [i_category,i_class,sum] [sum(UnscaledValue(ws_net_paid)),total_sum,g_category,g_class,lochierarchy,sum] + Exchange [i_category,i_class] #3 + WholeStageCodegen (3) + HashAggregate [i_category,i_class,ws_net_paid] [sum,sum] + Project [ws_net_paid,i_class,i_category] + BroadcastHashJoin [ws_item_sk,i_item_sk] + Project [ws_item_sk,ws_net_paid] + BroadcastHashJoin [ws_sold_date_sk,d_date_sk] + Filter [ws_sold_date_sk,ws_item_sk] + ColumnarToRow + InputAdapter + Scan parquet default.web_sales [ws_sold_date_sk,ws_item_sk,ws_net_paid] InputAdapter - Exchange [i_category,i_class] #4 - WholeStageCodegen (3) - HashAggregate [i_category,i_class,ws_net_paid] [sum,sum] - Project [ws_net_paid,i_class,i_category] - BroadcastHashJoin [ws_item_sk,i_item_sk] - Project [ws_item_sk,ws_net_paid] - BroadcastHashJoin [ws_sold_date_sk,d_date_sk] - Filter [ws_sold_date_sk,ws_item_sk] - ColumnarToRow - InputAdapter - Scan parquet default.web_sales [ws_sold_date_sk,ws_item_sk,ws_net_paid] - InputAdapter - BroadcastExchange #5 - WholeStageCodegen (1) - Project [d_date_sk] - Filter [d_month_seq,d_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.date_dim [d_date_sk,d_month_seq] + BroadcastExchange #4 + WholeStageCodegen (1) + Project [d_date_sk] + Filter [d_month_seq,d_date_sk] + ColumnarToRow InputAdapter - BroadcastExchange #6 - WholeStageCodegen (2) - Filter [i_item_sk] - ColumnarToRow - InputAdapter - Scan parquet default.item [i_item_sk,i_class,i_category] - WholeStageCodegen (9) - HashAggregate [i_category,sum,isEmpty] [sum(total_sum),total_sum,i_class,g_category,g_class,lochierarchy,sum,isEmpty] - InputAdapter - Exchange [i_category] #7 - WholeStageCodegen (8) - HashAggregate [i_category,total_sum] [sum,isEmpty,sum,isEmpty] - HashAggregate [i_category,i_class,sum] [sum(UnscaledValue(ws_net_paid)),total_sum,sum] - InputAdapter - ReusedExchange [i_category,i_class,sum] #4 - WholeStageCodegen (16) + Scan parquet default.date_dim [d_date_sk,d_month_seq] + InputAdapter + BroadcastExchange #5 + WholeStageCodegen (2) + Filter [i_item_sk] + ColumnarToRow + InputAdapter + Scan parquet default.item [i_item_sk,i_class,i_category] + WholeStageCodegen (9) + HashAggregate [i_category,sum,isEmpty] [sum(total_sum),total_sum,i_class,g_category,g_class,lochierarchy,sum,isEmpty] + InputAdapter + Exchange [i_category] #6 + WholeStageCodegen (8) + HashAggregate [i_category,total_sum] [sum,isEmpty,sum,isEmpty] + HashAggregate [i_category,i_class,sum] [sum(UnscaledValue(ws_net_paid)),total_sum,sum] + InputAdapter + ReusedExchange [i_category,i_class,sum] #3 + WholeStageCodegen (14) HashAggregate [sum,isEmpty] [sum(total_sum),total_sum,i_category,i_class,g_category,g_class,lochierarchy,sum,isEmpty] InputAdapter - Exchange #8 - WholeStageCodegen (15) + Exchange #7 + WholeStageCodegen (13) HashAggregate [total_sum] [sum,isEmpty,sum,isEmpty] HashAggregate [i_category,i_class,sum] [sum(UnscaledValue(ws_net_paid)),total_sum,sum] InputAdapter - ReusedExchange [i_category,i_class,sum] #4 + ReusedExchange [i_category,i_class,sum] #3 diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q86a/explain.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q86a/explain.txt index f61c214640e33..96f13872a2ba2 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q86a/explain.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q86a/explain.txt @@ -1,46 +1,42 @@ == Physical Plan == -TakeOrderedAndProject (42) -+- * Project (41) - +- Window (40) - +- * Sort (39) - +- Exchange (38) - +- * HashAggregate (37) - +- Exchange (36) - +- * HashAggregate (35) - +- Union (34) - :- * HashAggregate (28) - : +- Exchange (27) - : +- * HashAggregate (26) - : +- Union (25) - : :- * HashAggregate (19) - : : +- Exchange (18) - : : +- * HashAggregate (17) - : : +- * Project (16) - : : +- * BroadcastHashJoin Inner BuildRight (15) - : : :- * Project (10) - : : : +- * BroadcastHashJoin Inner BuildRight (9) - : : : :- * Filter (3) - : : : : +- * ColumnarToRow (2) - : : : : +- Scan parquet default.web_sales (1) - : : : +- BroadcastExchange (8) - : : : +- * Project (7) - : : : +- * Filter (6) - : : : +- * ColumnarToRow (5) - : : : +- Scan parquet default.date_dim (4) - : : +- BroadcastExchange (14) - : : +- * Filter (13) - : : +- * ColumnarToRow (12) - : : +- Scan parquet default.item (11) - : +- * HashAggregate (24) - : +- Exchange (23) - : +- * HashAggregate (22) - : +- * HashAggregate (21) - : +- ReusedExchange (20) - +- * HashAggregate (33) - +- Exchange (32) - +- * HashAggregate (31) - +- * HashAggregate (30) - +- ReusedExchange (29) +TakeOrderedAndProject (38) ++- * Project (37) + +- Window (36) + +- * Sort (35) + +- Exchange (34) + +- * HashAggregate (33) + +- Exchange (32) + +- * HashAggregate (31) + +- Union (30) + :- * HashAggregate (19) + : +- Exchange (18) + : +- * HashAggregate (17) + : +- * Project (16) + : +- * BroadcastHashJoin Inner BuildRight (15) + : :- * Project (10) + : : +- * BroadcastHashJoin Inner BuildRight (9) + : : :- * Filter (3) + : : : +- * ColumnarToRow (2) + : : : +- Scan parquet default.web_sales (1) + : : +- BroadcastExchange (8) + : : +- * Project (7) + : : +- * Filter (6) + : : +- * ColumnarToRow (5) + : : +- Scan parquet default.date_dim (4) + : +- BroadcastExchange (14) + : +- * Filter (13) + : +- * ColumnarToRow (12) + : +- Scan parquet default.item (11) + :- * HashAggregate (24) + : +- Exchange (23) + : +- * HashAggregate (22) + : +- * HashAggregate (21) + : +- ReusedExchange (20) + +- * HashAggregate (29) + +- Exchange (28) + +- * HashAggregate (27) + +- * HashAggregate (26) + +- ReusedExchange (25) (1) Scan parquet default.web_sales @@ -124,7 +120,7 @@ Results [3]: [i_category#9, i_class#8, sum#12] (18) Exchange Input [3]: [i_category#9, i_class#8, sum#12] -Arguments: hashpartitioning(i_category#9, i_class#8, 5), true, [id=#13] +Arguments: hashpartitioning(i_category#9, i_class#8, 5), ENSURE_REQUIREMENTS, [id=#13] (19) HashAggregate [codegen id : 4] Input [3]: [i_category#9, i_class#8, sum#12] @@ -152,7 +148,7 @@ Results [3]: [i_category#9, sum#24, isEmpty#25] (23) Exchange Input [3]: [i_category#9, sum#24, isEmpty#25] -Arguments: hashpartitioning(i_category#9, 5), true, [id=#26] +Arguments: hashpartitioning(i_category#9, 5), ENSURE_REQUIREMENTS, [id=#26] (24) HashAggregate [codegen id : 9] Input [3]: [i_category#9, sum#24, isEmpty#25] @@ -161,91 +157,71 @@ Functions [1]: [sum(total_sum#21)] Aggregate Attributes [1]: [sum(total_sum#21)#27] Results [6]: [sum(total_sum#21)#27 AS total_sum#28, i_category#9, null AS i_class#29, 0 AS g_category#30, 1 AS g_class#31, 1 AS lochierarchy#32] -(25) Union +(25) ReusedExchange [Reuses operator id: 18] +Output [3]: [i_category#9, i_class#8, sum#33] -(26) HashAggregate [codegen id : 10] -Input [6]: [total_sum#15, i_category#9, i_class#8, g_category#16, g_class#17, lochierarchy#18] -Keys [6]: [total_sum#15, i_category#9, i_class#8, g_category#16, g_class#17, lochierarchy#18] -Functions: [] -Aggregate Attributes: [] -Results [6]: [total_sum#15, i_category#9, i_class#8, g_category#16, g_class#17, lochierarchy#18] - -(27) Exchange -Input [6]: [total_sum#15, i_category#9, i_class#8, g_category#16, g_class#17, lochierarchy#18] -Arguments: hashpartitioning(total_sum#15, i_category#9, i_class#8, g_category#16, g_class#17, lochierarchy#18, 5), true, [id=#33] - -(28) HashAggregate [codegen id : 11] -Input [6]: [total_sum#15, i_category#9, i_class#8, g_category#16, g_class#17, lochierarchy#18] -Keys [6]: [total_sum#15, i_category#9, i_class#8, g_category#16, g_class#17, lochierarchy#18] -Functions: [] -Aggregate Attributes: [] -Results [6]: [total_sum#15, i_category#9, i_class#8, g_category#16, g_class#17, lochierarchy#18] - -(29) ReusedExchange [Reuses operator id: 18] -Output [3]: [i_category#9, i_class#8, sum#34] - -(30) HashAggregate [codegen id : 15] -Input [3]: [i_category#9, i_class#8, sum#34] +(26) HashAggregate [codegen id : 13] +Input [3]: [i_category#9, i_class#8, sum#33] Keys [2]: [i_category#9, i_class#8] Functions [1]: [sum(UnscaledValue(ws_net_paid#3))] -Aggregate Attributes [1]: [sum(UnscaledValue(ws_net_paid#3))#35] -Results [1]: [MakeDecimal(sum(UnscaledValue(ws_net_paid#3))#35,17,2) AS total_sum#21] +Aggregate Attributes [1]: [sum(UnscaledValue(ws_net_paid#3))#34] +Results [1]: [MakeDecimal(sum(UnscaledValue(ws_net_paid#3))#34,17,2) AS total_sum#21] -(31) HashAggregate [codegen id : 15] +(27) HashAggregate [codegen id : 13] Input [1]: [total_sum#21] Keys: [] Functions [1]: [partial_sum(total_sum#21)] -Aggregate Attributes [2]: [sum#36, isEmpty#37] -Results [2]: [sum#38, isEmpty#39] +Aggregate Attributes [2]: [sum#35, isEmpty#36] +Results [2]: [sum#37, isEmpty#38] -(32) Exchange -Input [2]: [sum#38, isEmpty#39] -Arguments: SinglePartition, true, [id=#40] +(28) Exchange +Input [2]: [sum#37, isEmpty#38] +Arguments: SinglePartition, ENSURE_REQUIREMENTS, [id=#39] -(33) HashAggregate [codegen id : 16] -Input [2]: [sum#38, isEmpty#39] +(29) HashAggregate [codegen id : 14] +Input [2]: [sum#37, isEmpty#38] Keys: [] Functions [1]: [sum(total_sum#21)] -Aggregate Attributes [1]: [sum(total_sum#21)#41] -Results [6]: [sum(total_sum#21)#41 AS total_sum#42, null AS i_category#43, null AS i_class#44, 1 AS g_category#45, 1 AS g_class#46, 2 AS lochierarchy#47] +Aggregate Attributes [1]: [sum(total_sum#21)#40] +Results [6]: [sum(total_sum#21)#40 AS total_sum#41, null AS i_category#42, null AS i_class#43, 1 AS g_category#44, 1 AS g_class#45, 2 AS lochierarchy#46] -(34) Union +(30) Union -(35) HashAggregate [codegen id : 17] +(31) HashAggregate [codegen id : 15] Input [6]: [total_sum#15, i_category#9, i_class#8, g_category#16, g_class#17, lochierarchy#18] Keys [6]: [total_sum#15, i_category#9, i_class#8, g_category#16, g_class#17, lochierarchy#18] Functions: [] Aggregate Attributes: [] Results [6]: [total_sum#15, i_category#9, i_class#8, g_category#16, g_class#17, lochierarchy#18] -(36) Exchange +(32) Exchange Input [6]: [total_sum#15, i_category#9, i_class#8, g_category#16, g_class#17, lochierarchy#18] -Arguments: hashpartitioning(total_sum#15, i_category#9, i_class#8, g_category#16, g_class#17, lochierarchy#18, 5), true, [id=#48] +Arguments: hashpartitioning(total_sum#15, i_category#9, i_class#8, g_category#16, g_class#17, lochierarchy#18, 5), ENSURE_REQUIREMENTS, [id=#47] -(37) HashAggregate [codegen id : 18] +(33) HashAggregate [codegen id : 16] Input [6]: [total_sum#15, i_category#9, i_class#8, g_category#16, g_class#17, lochierarchy#18] Keys [6]: [total_sum#15, i_category#9, i_class#8, g_category#16, g_class#17, lochierarchy#18] Functions: [] Aggregate Attributes: [] -Results [5]: [total_sum#15, i_category#9, i_class#8, lochierarchy#18, CASE WHEN (g_class#17 = 0) THEN i_category#9 END AS _w0#49] +Results [5]: [total_sum#15, i_category#9, i_class#8, lochierarchy#18, CASE WHEN (g_class#17 = 0) THEN i_category#9 END AS _w0#48] -(38) Exchange -Input [5]: [total_sum#15, i_category#9, i_class#8, lochierarchy#18, _w0#49] -Arguments: hashpartitioning(lochierarchy#18, _w0#49, 5), true, [id=#50] +(34) Exchange +Input [5]: [total_sum#15, i_category#9, i_class#8, lochierarchy#18, _w0#48] +Arguments: hashpartitioning(lochierarchy#18, _w0#48, 5), ENSURE_REQUIREMENTS, [id=#49] -(39) Sort [codegen id : 19] -Input [5]: [total_sum#15, i_category#9, i_class#8, lochierarchy#18, _w0#49] -Arguments: [lochierarchy#18 ASC NULLS FIRST, _w0#49 ASC NULLS FIRST, total_sum#15 DESC NULLS LAST], false, 0 +(35) Sort [codegen id : 17] +Input [5]: [total_sum#15, i_category#9, i_class#8, lochierarchy#18, _w0#48] +Arguments: [lochierarchy#18 ASC NULLS FIRST, _w0#48 ASC NULLS FIRST, total_sum#15 DESC NULLS LAST], false, 0 -(40) Window -Input [5]: [total_sum#15, i_category#9, i_class#8, lochierarchy#18, _w0#49] -Arguments: [rank(total_sum#15) windowspecdefinition(lochierarchy#18, _w0#49, total_sum#15 DESC NULLS LAST, specifiedwindowframe(RowFrame, unboundedpreceding$(), currentrow$())) AS rank_within_parent#51], [lochierarchy#18, _w0#49], [total_sum#15 DESC NULLS LAST] +(36) Window +Input [5]: [total_sum#15, i_category#9, i_class#8, lochierarchy#18, _w0#48] +Arguments: [rank(total_sum#15) windowspecdefinition(lochierarchy#18, _w0#48, total_sum#15 DESC NULLS LAST, specifiedwindowframe(RowFrame, unboundedpreceding$(), currentrow$())) AS rank_within_parent#50], [lochierarchy#18, _w0#48], [total_sum#15 DESC NULLS LAST] -(41) Project [codegen id : 20] -Output [5]: [total_sum#15, i_category#9, i_class#8, lochierarchy#18, rank_within_parent#51] -Input [6]: [total_sum#15, i_category#9, i_class#8, lochierarchy#18, _w0#49, rank_within_parent#51] +(37) Project [codegen id : 18] +Output [5]: [total_sum#15, i_category#9, i_class#8, lochierarchy#18, rank_within_parent#50] +Input [6]: [total_sum#15, i_category#9, i_class#8, lochierarchy#18, _w0#48, rank_within_parent#50] -(42) TakeOrderedAndProject -Input [5]: [total_sum#15, i_category#9, i_class#8, lochierarchy#18, rank_within_parent#51] -Arguments: 100, [lochierarchy#18 DESC NULLS LAST, CASE WHEN (lochierarchy#18 = 0) THEN i_category#9 END ASC NULLS FIRST, rank_within_parent#51 ASC NULLS FIRST], [total_sum#15, i_category#9, i_class#8, lochierarchy#18, rank_within_parent#51] +(38) TakeOrderedAndProject +Input [5]: [total_sum#15, i_category#9, i_class#8, lochierarchy#18, rank_within_parent#50] +Arguments: 100, [lochierarchy#18 DESC NULLS LAST, CASE WHEN (lochierarchy#18 = 0) THEN i_category#9 END ASC NULLS FIRST, rank_within_parent#50 ASC NULLS FIRST], [total_sum#15, i_category#9, i_class#8, lochierarchy#18, rank_within_parent#50] diff --git a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q86a/simplified.txt b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q86a/simplified.txt index 2bd128100f527..d2d6b37e90f71 100644 --- a/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q86a/simplified.txt +++ b/sql/core/src/test/resources/tpcds-plan-stability/approved-plans-v2_7/q86a/simplified.txt @@ -1,72 +1,64 @@ TakeOrderedAndProject [lochierarchy,i_category,rank_within_parent,total_sum,i_class] - WholeStageCodegen (20) + WholeStageCodegen (18) Project [total_sum,i_category,i_class,lochierarchy,rank_within_parent] InputAdapter Window [total_sum,lochierarchy,_w0] - WholeStageCodegen (19) + WholeStageCodegen (17) Sort [lochierarchy,_w0,total_sum] InputAdapter Exchange [lochierarchy,_w0] #1 - WholeStageCodegen (18) + WholeStageCodegen (16) HashAggregate [total_sum,i_category,i_class,g_category,g_class,lochierarchy] [_w0] InputAdapter Exchange [total_sum,i_category,i_class,g_category,g_class,lochierarchy] #2 - WholeStageCodegen (17) + WholeStageCodegen (15) HashAggregate [total_sum,i_category,i_class,g_category,g_class,lochierarchy] InputAdapter Union - WholeStageCodegen (11) - HashAggregate [total_sum,i_category,i_class,g_category,g_class,lochierarchy] + WholeStageCodegen (4) + HashAggregate [i_category,i_class,sum] [sum(UnscaledValue(ws_net_paid)),total_sum,g_category,g_class,lochierarchy,sum] InputAdapter - Exchange [total_sum,i_category,i_class,g_category,g_class,lochierarchy] #3 - WholeStageCodegen (10) - HashAggregate [total_sum,i_category,i_class,g_category,g_class,lochierarchy] - InputAdapter - Union - WholeStageCodegen (4) - HashAggregate [i_category,i_class,sum] [sum(UnscaledValue(ws_net_paid)),total_sum,g_category,g_class,lochierarchy,sum] + Exchange [i_category,i_class] #3 + WholeStageCodegen (3) + HashAggregate [i_category,i_class,ws_net_paid] [sum,sum] + Project [ws_net_paid,i_class,i_category] + BroadcastHashJoin [ws_item_sk,i_item_sk] + Project [ws_item_sk,ws_net_paid] + BroadcastHashJoin [ws_sold_date_sk,d_date_sk] + Filter [ws_sold_date_sk,ws_item_sk] + ColumnarToRow + InputAdapter + Scan parquet default.web_sales [ws_sold_date_sk,ws_item_sk,ws_net_paid] InputAdapter - Exchange [i_category,i_class] #4 - WholeStageCodegen (3) - HashAggregate [i_category,i_class,ws_net_paid] [sum,sum] - Project [ws_net_paid,i_class,i_category] - BroadcastHashJoin [ws_item_sk,i_item_sk] - Project [ws_item_sk,ws_net_paid] - BroadcastHashJoin [ws_sold_date_sk,d_date_sk] - Filter [ws_sold_date_sk,ws_item_sk] - ColumnarToRow - InputAdapter - Scan parquet default.web_sales [ws_sold_date_sk,ws_item_sk,ws_net_paid] - InputAdapter - BroadcastExchange #5 - WholeStageCodegen (1) - Project [d_date_sk] - Filter [d_month_seq,d_date_sk] - ColumnarToRow - InputAdapter - Scan parquet default.date_dim [d_date_sk,d_month_seq] + BroadcastExchange #4 + WholeStageCodegen (1) + Project [d_date_sk] + Filter [d_month_seq,d_date_sk] + ColumnarToRow InputAdapter - BroadcastExchange #6 - WholeStageCodegen (2) - Filter [i_item_sk] - ColumnarToRow - InputAdapter - Scan parquet default.item [i_item_sk,i_class,i_category] - WholeStageCodegen (9) - HashAggregate [i_category,sum,isEmpty] [sum(total_sum),total_sum,i_class,g_category,g_class,lochierarchy,sum,isEmpty] - InputAdapter - Exchange [i_category] #7 - WholeStageCodegen (8) - HashAggregate [i_category,total_sum] [sum,isEmpty,sum,isEmpty] - HashAggregate [i_category,i_class,sum] [sum(UnscaledValue(ws_net_paid)),total_sum,sum] - InputAdapter - ReusedExchange [i_category,i_class,sum] #4 - WholeStageCodegen (16) + Scan parquet default.date_dim [d_date_sk,d_month_seq] + InputAdapter + BroadcastExchange #5 + WholeStageCodegen (2) + Filter [i_item_sk] + ColumnarToRow + InputAdapter + Scan parquet default.item [i_item_sk,i_class,i_category] + WholeStageCodegen (9) + HashAggregate [i_category,sum,isEmpty] [sum(total_sum),total_sum,i_class,g_category,g_class,lochierarchy,sum,isEmpty] + InputAdapter + Exchange [i_category] #6 + WholeStageCodegen (8) + HashAggregate [i_category,total_sum] [sum,isEmpty,sum,isEmpty] + HashAggregate [i_category,i_class,sum] [sum(UnscaledValue(ws_net_paid)),total_sum,sum] + InputAdapter + ReusedExchange [i_category,i_class,sum] #3 + WholeStageCodegen (14) HashAggregate [sum,isEmpty] [sum(total_sum),total_sum,i_category,i_class,g_category,g_class,lochierarchy,sum,isEmpty] InputAdapter - Exchange #8 - WholeStageCodegen (15) + Exchange #7 + WholeStageCodegen (13) HashAggregate [total_sum] [sum,isEmpty,sum,isEmpty] HashAggregate [i_category,i_class,sum] [sum(UnscaledValue(ws_net_paid)),total_sum,sum] InputAdapter - ReusedExchange [i_category,i_class,sum] #4 + ReusedExchange [i_category,i_class,sum] #3 diff --git a/sql/core/src/test/scala/org/apache/spark/deploy/history/Utils.scala b/sql/core/src/test/scala/org/apache/spark/deploy/history/Utils.scala new file mode 100644 index 0000000000000..f73305b1b001e --- /dev/null +++ b/sql/core/src/test/scala/org/apache/spark/deploy/history/Utils.scala @@ -0,0 +1,40 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.deploy.history + +import org.apache.spark.SparkConf +import org.apache.spark.internal.config.History.HISTORY_LOG_DIR +import org.apache.spark.util.ManualClock + +object Utils { + def withFsHistoryProvider(logDir: String)(fn: FsHistoryProvider => Unit): Unit = { + var provider: FsHistoryProvider = null + try { + val clock = new ManualClock() + val conf = new SparkConf().set(HISTORY_LOG_DIR, logDir) + val provider = new FsHistoryProvider(conf, clock) + provider.checkForLogs() + fn(provider) + } finally { + if (provider != null) { + provider.stop() + provider = null + } + } + } +} diff --git a/sql/core/src/test/scala/org/apache/spark/sql/ApproximatePercentileQuerySuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/ApproximatePercentileQuerySuite.scala index 2b4abed645910..4991e397eb11c 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/ApproximatePercentileQuerySuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/ApproximatePercentileQuerySuite.scala @@ -150,7 +150,7 @@ class ApproximatePercentileQuerySuite extends QueryTest with SharedSparkSession (1 to 1000).toDF("col").createOrReplaceTempView(table) checkAnswer( spark.sql(s"SELECT percentile_approx(col, array(0.25 + 0.25D), 200 + 800) FROM $table"), - Row(Seq(499)) + Row(Seq(500)) ) } } @@ -296,4 +296,23 @@ class ApproximatePercentileQuerySuite extends QueryTest with SharedSparkSession buffer.quantileSummaries assert(buffer.isCompressed) } + + test("SPARK-32908: maximum target error in percentile_approx") { + withTempView(table) { + spark.read + .schema("col int") + .csv(testFile("test-data/percentile_approx-input.csv.bz2")) + .repartition(1) + .createOrReplaceTempView(table) + checkAnswer( + spark.sql( + s"""SELECT + | percentile_approx(col, 0.77, 1000), + | percentile_approx(col, 0.77, 10000), + | percentile_approx(col, 0.77, 100000), + | percentile_approx(col, 0.77, 1000000) + |FROM $table""".stripMargin), + Row(18, 17, 17, 17)) + } + } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/BenchmarkQueryTest.scala b/sql/core/src/test/scala/org/apache/spark/sql/BenchmarkQueryTest.scala index 2c3b37a1498ec..d58bf2c6260b1 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/BenchmarkQueryTest.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/BenchmarkQueryTest.scala @@ -63,11 +63,17 @@ abstract class BenchmarkQueryTest extends QueryTest with SharedSparkSession { protected def checkGeneratedCode(plan: SparkPlan, checkMethodCodeSize: Boolean = true): Unit = { val codegenSubtrees = new collection.mutable.HashSet[WholeStageCodegenExec]() - plan foreach { - case s: WholeStageCodegenExec => - codegenSubtrees += s - case _ => + + def findSubtrees(plan: SparkPlan): Unit = { + plan foreach { + case s: WholeStageCodegenExec => + codegenSubtrees += s + case s => + s.subqueries.foreach(findSubtrees) + } } + + findSubtrees(plan) codegenSubtrees.toSeq.foreach { subtree => val code = subtree.doCodeGen()._2 val (_, ByteCodeStats(maxMethodCodeSize, _, _)) = try { diff --git a/sql/core/src/test/scala/org/apache/spark/sql/CachedTableSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/CachedTableSuite.scala index 20f2a7f947b81..11eba933284f8 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/CachedTableSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/CachedTableSuite.scala @@ -17,6 +17,9 @@ package org.apache.spark.sql +import java.io.{File, FilenameFilter} +import java.nio.file.{Files, Paths} + import scala.collection.mutable.HashSet import scala.concurrent.duration._ @@ -25,6 +28,7 @@ import org.apache.spark.executor.DataReadMethod._ import org.apache.spark.executor.DataReadMethod.DataReadMethod import org.apache.spark.scheduler.{SparkListener, SparkListenerJobStart} import org.apache.spark.sql.catalyst.TableIdentifier +import org.apache.spark.sql.catalyst.analysis.TempTableAlreadyExistsException import org.apache.spark.sql.catalyst.expressions.SubqueryExpression import org.apache.spark.sql.catalyst.plans.logical.{BROADCAST, Join, JoinStrategyHint, SHUFFLE_HASH} import org.apache.spark.sql.catalyst.util.DateTimeConstants @@ -140,6 +144,16 @@ class CachedTableSuite extends QueryTest with SQLTestUtils } } + test("cache table as select - existing temp view") { + withTempView("tempView") { + sql("CREATE TEMPORARY VIEW tempView as SELECT 1") + val e = intercept[TempTableAlreadyExistsException] { + sql("CACHE TABLE tempView AS SELECT 1") + } + assert(e.getMessage.contains("Temporary view 'tempView' already exists")) + } + } + test("uncaching temp table") { withTempView("tempTable1", "tempTable2") { testData.select("key").createOrReplaceTempView("tempTable1") @@ -824,7 +838,7 @@ class CachedTableSuite extends QueryTest with SQLTestUtils } } - test("SPARK-19993 nested subquery caching and scalar + predicate subqueris") { + test("SPARK-19993 nested subquery caching and scalar + predicate subqueries") { withTempView("t1", "t2", "t3", "t4") { Seq(1).toDF("c1").createOrReplaceTempView("t1") Seq(2).toDF("c1").createOrReplaceTempView("t2") @@ -875,17 +889,17 @@ class CachedTableSuite extends QueryTest with SQLTestUtils } private def checkIfNoJobTriggered[T](f: => T): T = { - var numJobTrigered = 0 + var numJobTriggered = 0 val jobListener = new SparkListener { override def onJobStart(jobStart: SparkListenerJobStart): Unit = { - numJobTrigered += 1 + numJobTriggered += 1 } } sparkContext.addSparkListener(jobListener) try { val result = f sparkContext.listenerBus.waitUntilEmpty() - assert(numJobTrigered === 0) + assert(numJobTriggered === 0) result } finally { sparkContext.removeSparkListener(jobListener) @@ -1184,4 +1198,111 @@ class CachedTableSuite extends QueryTest with SQLTestUtils assert(spark.sharedState.cacheManager.isEmpty) } } + + test("SPARK-33228: Don't uncache data when replacing an existing view having the same plan") { + withTempView("tempView") { + spark.catalog.clearCache() + val df = spark.range(1).selectExpr("id a", "id b") + df.cache() + assert(spark.sharedState.cacheManager.lookupCachedData(df).isDefined) + df.createOrReplaceTempView("tempView") + assert(spark.sharedState.cacheManager.lookupCachedData(df).isDefined) + df.createOrReplaceTempView("tempView") + assert(spark.sharedState.cacheManager.lookupCachedData(df).isDefined) + } + + withTempView("tempGlobalTempView") { + spark.catalog.clearCache() + val df = spark.range(1).selectExpr("id a", "id b") + df.cache() + assert(spark.sharedState.cacheManager.lookupCachedData(df).isDefined) + df.createOrReplaceGlobalTempView("tempGlobalTempView") + assert(spark.sharedState.cacheManager.lookupCachedData(df).isDefined) + df.createOrReplaceGlobalTempView("tempGlobalTempView") + assert(spark.sharedState.cacheManager.lookupCachedData(df).isDefined) + } + } + + test("SPARK-33290: REFRESH TABLE should invalidate all caches referencing the table") { + withTable("t") { + withTempPath { path => + withTempView("tempView1", "tempView2") { + Seq((1 -> "a")).toDF("i", "j").write.parquet(path.getCanonicalPath) + sql(s"CREATE TABLE t USING parquet LOCATION '${path.toURI}'") + sql("CREATE TEMPORARY VIEW tempView1 AS SELECT * FROM t") + sql("CACHE TABLE tempView2 AS SELECT i FROM tempView1") + checkAnswer(sql("SELECT * FROM tempView1"), Seq(Row(1, "a"))) + checkAnswer(sql("SELECT * FROM tempView2"), Seq(Row(1))) + + Utils.deleteRecursively(path) + sql("REFRESH TABLE tempView1") + checkAnswer(sql("SELECT * FROM tempView1"), Seq.empty) + checkAnswer(sql("SELECT * FROM tempView2"), Seq.empty) + } + } + } + } + + test("SPARK-33729: REFRESH TABLE should not use cached/stale plan") { + def moveParquetFiles(src: File, dst: File): Unit = { + src.listFiles(new FilenameFilter { + override def accept(dir: File, name: String): Boolean = name.endsWith("parquet") + }).foreach { f => + Files.move(f.toPath, Paths.get(dst.getAbsolutePath, f.getName)) + } + // cleanup the rest of the files + src.listFiles().foreach(_.delete()) + src.delete() + } + + withTable("t") { + withTempDir { dir => + val path1 = new File(dir, "path1") + Seq((1 -> "a")).toDF("i", "j").write.parquet(path1.getCanonicalPath) + moveParquetFiles(path1, dir) + sql(s"CREATE TABLE t (i INT, j STRING) USING parquet LOCATION '${dir.toURI}'") + sql("CACHE TABLE t") + checkAnswer(sql("SELECT * FROM t"), Row(1, "a") :: Nil) + + val path2 = new File(dir, "path2") + Seq(2 -> "b").toDF("i", "j").write.parquet(path2.getCanonicalPath) + moveParquetFiles(path2, dir) + sql("REFRESH TABLE t") + checkAnswer(sql("SELECT * FROM t"), Row(1, "a") :: Row(2, "b") :: Nil) + } + } + } + + test("SPARK-33647: cache table support for permanent view") { + withView("v1") { + spark.catalog.clearCache() + sql("create or replace view v1 as select 1") + sql("cache table v1") + assert(spark.sharedState.cacheManager.lookupCachedData(sql("select 1")).isDefined) + sql("create or replace view v1 as select 1, 2") + assert(spark.sharedState.cacheManager.lookupCachedData(sql("select 1")).isEmpty) + sql("cache table v1") + assert(spark.sharedState.cacheManager.lookupCachedData(sql("select 1, 2")).isDefined) + } + } + + test("SPARK-33786: Cache's storage level should be respected when a table name is altered.") { + withTable("old", "new") { + withTempPath { path => + def getStorageLevel(tableName: String): StorageLevel = { + val table = spark.table(tableName) + val cachedData = spark.sharedState.cacheManager.lookupCachedData(table).get + cachedData.cachedRepresentation.cacheBuilder.storageLevel + } + Seq(1 -> "a").toDF("i", "j").write.parquet(path.getCanonicalPath) + sql(s"CREATE TABLE old USING parquet LOCATION '${path.toURI}'") + sql("CACHE TABLE old OPTIONS('storageLevel' 'MEMORY_ONLY')") + val oldStorageLevel = getStorageLevel("old") + + sql("ALTER TABLE old RENAME TO new") + val newStorageLevel = getStorageLevel("new") + assert(oldStorageLevel === newStorageLevel) + } + } + } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/CharVarcharTestSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/CharVarcharTestSuite.scala new file mode 100644 index 0000000000000..fb35d6cf8dacb --- /dev/null +++ b/sql/core/src/test/scala/org/apache/spark/sql/CharVarcharTestSuite.scala @@ -0,0 +1,683 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql + +import org.apache.spark.{SparkConf, SparkException} +import org.apache.spark.sql.catalyst.util.CharVarcharUtils +import org.apache.spark.sql.connector.{InMemoryPartitionTableCatalog, SchemaRequiredDataSource} +import org.apache.spark.sql.execution.datasources.LogicalRelation +import org.apache.spark.sql.execution.datasources.v2.DataSourceV2Relation +import org.apache.spark.sql.internal.SQLConf +import org.apache.spark.sql.sources.SimpleInsertSource +import org.apache.spark.sql.test.{SharedSparkSession, SQLTestUtils} +import org.apache.spark.sql.types._ + +// The base trait for char/varchar tests that need to be run with different table implementations. +trait CharVarcharTestSuite extends QueryTest with SQLTestUtils { + + def format: String + + def checkColType(f: StructField, dt: DataType): Unit = { + assert(f.dataType == CharVarcharUtils.replaceCharVarcharWithString(dt)) + assert(CharVarcharUtils.getRawType(f.metadata) == Some(dt)) + } + + test("char type values should be padded: top-level columns") { + withTable("t") { + sql(s"CREATE TABLE t(i STRING, c CHAR(5)) USING $format") + sql("INSERT INTO t VALUES ('1', 'a')") + checkAnswer(spark.table("t"), Row("1", "a" + " " * 4)) + checkColType(spark.table("t").schema(1), CharType(5)) + + sql("INSERT OVERWRITE t VALUES ('1', null)") + checkAnswer(spark.table("t"), Row("1", null)) + } + } + + test("char type values should be padded: partitioned columns") { + withTable("t") { + sql(s"CREATE TABLE t(i STRING, c CHAR(5)) USING $format PARTITIONED BY (c)") + sql("INSERT INTO t VALUES ('1', 'a')") + checkAnswer(spark.table("t"), Row("1", "a" + " " * 4)) + checkColType(spark.table("t").schema(1), CharType(5)) + + sql("ALTER TABLE t DROP PARTITION(c='a')") + sql("INSERT OVERWRITE t VALUES ('1', null)") + checkAnswer(spark.table("t"), Row("1", null)) + } + } + + test("char type values should be padded: nested in struct") { + withTable("t") { + sql(s"CREATE TABLE t(i STRING, c STRUCT) USING $format") + sql("INSERT INTO t VALUES ('1', struct('a'))") + checkAnswer(spark.table("t"), Row("1", Row("a" + " " * 4))) + checkColType(spark.table("t").schema(1), new StructType().add("c", CharType(5))) + + sql("INSERT OVERWRITE t VALUES ('1', null)") + checkAnswer(spark.table("t"), Row("1", null)) + sql("INSERT OVERWRITE t VALUES ('1', struct(null))") + checkAnswer(spark.table("t"), Row("1", Row(null))) + } + } + + test("char type values should be padded: nested in array") { + withTable("t") { + sql(s"CREATE TABLE t(i STRING, c ARRAY) USING $format") + sql("INSERT INTO t VALUES ('1', array('a', 'ab'))") + checkAnswer(spark.table("t"), Row("1", Seq("a" + " " * 4, "ab" + " " * 3))) + checkColType(spark.table("t").schema(1), ArrayType(CharType(5))) + + sql("INSERT OVERWRITE t VALUES ('1', null)") + checkAnswer(spark.table("t"), Row("1", null)) + sql("INSERT OVERWRITE t VALUES ('1', array(null))") + checkAnswer(spark.table("t"), Row("1", Seq(null))) + } + } + + test("char type values should be padded: nested in map key") { + withTable("t") { + sql(s"CREATE TABLE t(i STRING, c MAP) USING $format") + sql("INSERT INTO t VALUES ('1', map('a', 'ab'))") + checkAnswer(spark.table("t"), Row("1", Map(("a" + " " * 4, "ab")))) + checkColType(spark.table("t").schema(1), MapType(CharType(5), StringType)) + + sql("INSERT OVERWRITE t VALUES ('1', null)") + checkAnswer(spark.table("t"), Row("1", null)) + } + } + + test("char type values should be padded: nested in map value") { + withTable("t") { + sql(s"CREATE TABLE t(i STRING, c MAP) USING $format") + sql("INSERT INTO t VALUES ('1', map('a', 'ab'))") + checkAnswer(spark.table("t"), Row("1", Map(("a", "ab" + " " * 3)))) + checkColType(spark.table("t").schema(1), MapType(StringType, CharType(5))) + + sql("INSERT OVERWRITE t VALUES ('1', null)") + checkAnswer(spark.table("t"), Row("1", null)) + sql("INSERT OVERWRITE t VALUES ('1', map('a', null))") + checkAnswer(spark.table("t"), Row("1", Map("a" -> null))) + } + } + + test("char type values should be padded: nested in both map key and value") { + withTable("t") { + sql(s"CREATE TABLE t(i STRING, c MAP) USING $format") + sql("INSERT INTO t VALUES ('1', map('a', 'ab'))") + checkAnswer(spark.table("t"), Row("1", Map(("a" + " " * 4, "ab" + " " * 8)))) + checkColType(spark.table("t").schema(1), MapType(CharType(5), CharType(10))) + + sql("INSERT OVERWRITE t VALUES ('1', null)") + checkAnswer(spark.table("t"), Row("1", null)) + } + } + + test("char type values should be padded: nested in struct of array") { + withTable("t") { + sql(s"CREATE TABLE t(i STRING, c STRUCT>) USING $format") + sql("INSERT INTO t VALUES ('1', struct(array('a', 'ab')))") + checkAnswer(spark.table("t"), Row("1", Row(Seq("a" + " " * 4, "ab" + " " * 3)))) + checkColType(spark.table("t").schema(1), + new StructType().add("c", ArrayType(CharType(5)))) + + sql("INSERT OVERWRITE t VALUES ('1', null)") + checkAnswer(spark.table("t"), Row("1", null)) + sql("INSERT OVERWRITE t VALUES ('1', struct(null))") + checkAnswer(spark.table("t"), Row("1", Row(null))) + sql("INSERT OVERWRITE t VALUES ('1', struct(array(null)))") + checkAnswer(spark.table("t"), Row("1", Row(Seq(null)))) + } + } + + test("char type values should be padded: nested in array of struct") { + withTable("t") { + sql(s"CREATE TABLE t(i STRING, c ARRAY>) USING $format") + sql("INSERT INTO t VALUES ('1', array(struct('a'), struct('ab')))") + checkAnswer(spark.table("t"), Row("1", Seq(Row("a" + " " * 4), Row("ab" + " " * 3)))) + checkColType(spark.table("t").schema(1), + ArrayType(new StructType().add("c", CharType(5)))) + + sql("INSERT OVERWRITE t VALUES ('1', null)") + checkAnswer(spark.table("t"), Row("1", null)) + sql("INSERT OVERWRITE t VALUES ('1', array(null))") + checkAnswer(spark.table("t"), Row("1", Seq(null))) + sql("INSERT OVERWRITE t VALUES ('1', array(struct(null)))") + checkAnswer(spark.table("t"), Row("1", Seq(Row(null)))) + } + } + + test("char type values should be padded: nested in array of array") { + withTable("t") { + sql(s"CREATE TABLE t(i STRING, c ARRAY>) USING $format") + sql("INSERT INTO t VALUES ('1', array(array('a', 'ab')))") + checkAnswer(spark.table("t"), Row("1", Seq(Seq("a" + " " * 4, "ab" + " " * 3)))) + checkColType(spark.table("t").schema(1), ArrayType(ArrayType(CharType(5)))) + + sql("INSERT OVERWRITE t VALUES ('1', null)") + checkAnswer(spark.table("t"), Row("1", null)) + sql("INSERT OVERWRITE t VALUES ('1', array(null))") + checkAnswer(spark.table("t"), Row("1", Seq(null))) + sql("INSERT OVERWRITE t VALUES ('1', array(array(null)))") + checkAnswer(spark.table("t"), Row("1", Seq(Seq(null)))) + } + } + + private def testTableWrite(f: String => Unit): Unit = { + withTable("t") { f("char") } + withTable("t") { f("varchar") } + } + + test("length check for input string values: top-level columns") { + testTableWrite { typeName => + sql(s"CREATE TABLE t(c $typeName(5)) USING $format") + sql("INSERT INTO t VALUES (null)") + checkAnswer(spark.table("t"), Row(null)) + val e = intercept[SparkException](sql("INSERT INTO t VALUES ('123456')")) + assert(e.getCause.getMessage.contains( + s"input string of length 6 exceeds $typeName type length limitation: 5")) + } + } + + test("length check for input string values: partitioned columns") { + // DS V2 doesn't support partitioned table. + if (!conf.contains(SQLConf.DEFAULT_CATALOG.key)) { + testTableWrite { typeName => + sql(s"CREATE TABLE t(i INT, c $typeName(5)) USING $format PARTITIONED BY (c)") + sql("INSERT INTO t VALUES (1, null)") + checkAnswer(spark.table("t"), Row(1, null)) + val e = intercept[SparkException](sql("INSERT INTO t VALUES (1, '123456')")) + assert(e.getCause.getMessage.contains( + s"input string of length 6 exceeds $typeName type length limitation: 5")) + } + } + } + + test("length check for input string values: nested in struct") { + testTableWrite { typeName => + sql(s"CREATE TABLE t(c STRUCT) USING $format") + sql("INSERT INTO t SELECT struct(null)") + checkAnswer(spark.table("t"), Row(Row(null))) + val e = intercept[SparkException](sql("INSERT INTO t SELECT struct('123456')")) + assert(e.getCause.getMessage.contains( + s"input string of length 6 exceeds $typeName type length limitation: 5")) + } + } + + test("length check for input string values: nested in array") { + testTableWrite { typeName => + sql(s"CREATE TABLE t(c ARRAY<$typeName(5)>) USING $format") + sql("INSERT INTO t VALUES (array(null))") + checkAnswer(spark.table("t"), Row(Seq(null))) + val e = intercept[SparkException](sql("INSERT INTO t VALUES (array('a', '123456'))")) + assert(e.getCause.getMessage.contains( + s"input string of length 6 exceeds $typeName type length limitation: 5")) + } + } + + test("length check for input string values: nested in map key") { + testTableWrite { typeName => + sql(s"CREATE TABLE t(c MAP<$typeName(5), STRING>) USING $format") + val e = intercept[SparkException](sql("INSERT INTO t VALUES (map('123456', 'a'))")) + assert(e.getCause.getMessage.contains( + s"input string of length 6 exceeds $typeName type length limitation: 5")) + } + } + + test("length check for input string values: nested in map value") { + testTableWrite { typeName => + sql(s"CREATE TABLE t(c MAP) USING $format") + sql("INSERT INTO t VALUES (map('a', null))") + checkAnswer(spark.table("t"), Row(Map("a" -> null))) + val e = intercept[SparkException](sql("INSERT INTO t VALUES (map('a', '123456'))")) + assert(e.getCause.getMessage.contains( + s"input string of length 6 exceeds $typeName type length limitation: 5")) + } + } + + test("length check for input string values: nested in both map key and value") { + testTableWrite { typeName => + sql(s"CREATE TABLE t(c MAP<$typeName(5), $typeName(5)>) USING $format") + val e1 = intercept[SparkException](sql("INSERT INTO t VALUES (map('123456', 'a'))")) + assert(e1.getCause.getMessage.contains( + s"input string of length 6 exceeds $typeName type length limitation: 5")) + val e2 = intercept[SparkException](sql("INSERT INTO t VALUES (map('a', '123456'))")) + assert(e2.getCause.getMessage.contains( + s"input string of length 6 exceeds $typeName type length limitation: 5")) + } + } + + test("length check for input string values: nested in struct of array") { + testTableWrite { typeName => + sql(s"CREATE TABLE t(c STRUCT>) USING $format") + sql("INSERT INTO t SELECT struct(array(null))") + checkAnswer(spark.table("t"), Row(Row(Seq(null)))) + val e = intercept[SparkException](sql("INSERT INTO t SELECT struct(array('123456'))")) + assert(e.getCause.getMessage.contains( + s"input string of length 6 exceeds $typeName type length limitation: 5")) + } + } + + test("length check for input string values: nested in array of struct") { + testTableWrite { typeName => + sql(s"CREATE TABLE t(c ARRAY>) USING $format") + sql("INSERT INTO t VALUES (array(struct(null)))") + checkAnswer(spark.table("t"), Row(Seq(Row(null)))) + val e = intercept[SparkException](sql("INSERT INTO t VALUES (array(struct('123456')))")) + assert(e.getCause.getMessage.contains( + s"input string of length 6 exceeds $typeName type length limitation: 5")) + } + } + + test("length check for input string values: nested in array of array") { + testTableWrite { typeName => + sql(s"CREATE TABLE t(c ARRAY>) USING $format") + sql("INSERT INTO t VALUES (array(array(null)))") + checkAnswer(spark.table("t"), Row(Seq(Seq(null)))) + val e = intercept[SparkException](sql("INSERT INTO t VALUES (array(array('123456')))")) + assert(e.getCause.getMessage.contains( + s"input string of length 6 exceeds $typeName type length limitation: 5")) + } + } + + test("length check for input string values: with trailing spaces") { + withTable("t") { + sql(s"CREATE TABLE t(c1 CHAR(5), c2 VARCHAR(5)) USING $format") + sql("INSERT INTO t VALUES ('12 ', '12 ')") + sql("INSERT INTO t VALUES ('1234 ', '1234 ')") + checkAnswer(spark.table("t"), Seq( + Row("12" + " " * 3, "12 "), + Row("1234 ", "1234 "))) + } + } + + test("length check for input string values: with implicit cast") { + withTable("t") { + sql(s"CREATE TABLE t(c1 CHAR(5), c2 VARCHAR(5)) USING $format") + sql("INSERT INTO t VALUES (1234, 1234)") + checkAnswer(spark.table("t"), Row("1234 ", "1234")) + val e1 = intercept[SparkException](sql("INSERT INTO t VALUES (123456, 1)")) + assert(e1.getCause.getMessage.contains( + "input string of length 6 exceeds char type length limitation: 5")) + val e2 = intercept[SparkException](sql("INSERT INTO t VALUES (1, 123456)")) + assert(e2.getCause.getMessage.contains( + "input string of length 6 exceeds varchar type length limitation: 5")) + } + } + + private def testConditions(df: DataFrame, conditions: Seq[(String, Boolean)]): Unit = { + checkAnswer(df.selectExpr(conditions.map(_._1): _*), Row.fromSeq(conditions.map(_._2))) + } + + test("char type comparison: top-level columns") { + withTable("t") { + sql(s"CREATE TABLE t(c1 CHAR(2), c2 CHAR(5)) USING $format") + sql("INSERT INTO t VALUES ('a', 'a')") + testConditions(spark.table("t"), Seq( + ("c1 = 'a'", true), + ("'a' = c1", true), + ("c1 = 'a '", true), + ("c1 > 'a'", false), + ("c1 IN ('a', 'b')", true), + ("c1 = c2", true), + ("c1 < c2", false), + ("c1 IN (c2)", true))) + } + } + + test("char type comparison: partitioned columns") { + withTable("t") { + sql(s"CREATE TABLE t(i INT, c1 CHAR(2), c2 CHAR(5)) USING $format PARTITIONED BY (c1, c2)") + sql("INSERT INTO t VALUES (1, 'a', 'a')") + testConditions(spark.table("t"), Seq( + ("c1 = 'a'", true), + ("'a' = c1", true), + ("c1 = 'a '", true), + ("c1 > 'a'", false), + ("c1 IN ('a', 'b')", true), + ("c1 = c2", true), + ("c1 < c2", false), + ("c1 IN (c2)", true))) + } + } + + test("char type comparison: partition pruning") { + withTable("t") { + sql(s"CREATE TABLE t(i INT, c1 CHAR(2), c2 VARCHAR(5)) USING $format PARTITIONED BY (c1, c2)") + sql("INSERT INTO t VALUES (1, 'a', 'a')") + Seq(("c1 = 'a'", true), + ("'a' = c1", true), + ("c1 = 'a '", true), + ("c1 > 'a'", false), + ("c1 IN ('a', 'b')", true), + ("c2 = 'a '", false), + ("c2 = 'a'", true), + ("c2 IN ('a', 'b')", true)).foreach { case (con, res) => + val df = spark.table("t") + withClue(con) { + checkAnswer(df.where(con), df.where(res.toString)) + } + } + } + } + + test("char type comparison: join") { + withTable("t1", "t2") { + sql(s"CREATE TABLE t1(c CHAR(2)) USING $format") + sql(s"CREATE TABLE t2(c CHAR(5)) USING $format") + sql("INSERT INTO t1 VALUES ('a')") + sql("INSERT INTO t2 VALUES ('a')") + checkAnswer(sql("SELECT t1.c FROM t1 JOIN t2 ON t1.c = t2.c"), Row("a ")) + } + } + + test("char type comparison: nested in struct") { + withTable("t") { + sql(s"CREATE TABLE t(c1 STRUCT, c2 STRUCT) USING $format") + sql("INSERT INTO t VALUES (struct('a'), struct('a'))") + testConditions(spark.table("t"), Seq( + ("c1 = c2", true), + ("c1 < c2", false), + ("c1 IN (c2)", true))) + } + } + + test("char type comparison: nested in array") { + withTable("t") { + sql(s"CREATE TABLE t(c1 ARRAY, c2 ARRAY) USING $format") + sql("INSERT INTO t VALUES (array('a', 'b'), array('a', 'b'))") + testConditions(spark.table("t"), Seq( + ("c1 = c2", true), + ("c1 < c2", false), + ("c1 IN (c2)", true))) + } + } + + test("char type comparison: nested in struct of array") { + withTable("t") { + sql("CREATE TABLE t(c1 STRUCT>, c2 STRUCT>) " + + s"USING $format") + sql("INSERT INTO t VALUES (struct(array('a', 'b')), struct(array('a', 'b')))") + testConditions(spark.table("t"), Seq( + ("c1 = c2", true), + ("c1 < c2", false), + ("c1 IN (c2)", true))) + } + } + + test("char type comparison: nested in array of struct") { + withTable("t") { + sql("CREATE TABLE t(c1 ARRAY>, c2 ARRAY>) " + + s"USING $format") + sql("INSERT INTO t VALUES (array(struct('a')), array(struct('a')))") + testConditions(spark.table("t"), Seq( + ("c1 = c2", true), + ("c1 < c2", false), + ("c1 IN (c2)", true))) + } + } + + test("char type comparison: nested in array of array") { + withTable("t") { + sql("CREATE TABLE t(c1 ARRAY>, c2 ARRAY>) " + + s"USING $format") + sql("INSERT INTO t VALUES (array(array('a')), array(array('a')))") + testConditions(spark.table("t"), Seq( + ("c1 = c2", true), + ("c1 < c2", false), + ("c1 IN (c2)", true))) + } + } + + test("SPARK-33892: DESCRIBE TABLE w/ char/varchar") { + withTable("t") { + sql(s"CREATE TABLE t(v VARCHAR(3), c CHAR(5)) USING $format") + checkAnswer(sql("desc t").selectExpr("data_type").where("data_type like '%char%'"), + Seq(Row("char(5)"), Row("varchar(3)"))) + } + } + + test("SPARK-33992: char/varchar resolution in correlated sub query") { + withTable("t1", "t2") { + sql(s"CREATE TABLE t1(v VARCHAR(3), c CHAR(5)) USING $format") + sql(s"CREATE TABLE t2(v VARCHAR(3), c CHAR(5)) USING $format") + sql("INSERT INTO t1 VALUES ('c', 'b')") + sql("INSERT INTO t2 VALUES ('a', 'b')") + + checkAnswer(sql( + """ + |SELECT v FROM t1 + |WHERE 'a' IN (SELECT v FROM t2 WHERE t1.c = t2.c )""".stripMargin), + Row("c")) + } + } + + test("SPARK-34003: fix char/varchar fails w/ both group by and order by ") { + withTable("t") { + sql(s"CREATE TABLE t(v VARCHAR(3), i INT) USING $format") + sql("INSERT INTO t VALUES ('c', 1)") + checkAnswer(sql("SELECT v, sum(i) FROM t GROUP BY v ORDER BY v"), Row("c", 1)) + } + } +} + +// Some basic char/varchar tests which doesn't rely on table implementation. +class BasicCharVarcharTestSuite extends QueryTest with SharedSparkSession { + import testImplicits._ + + test("user-specified schema in cast") { + def assertNoCharType(df: DataFrame): Unit = { + checkAnswer(df, Row("0")) + assert(df.schema.map(_.dataType) == Seq(StringType)) + } + + val logAppender = new LogAppender("The Spark cast operator does not support char/varchar" + + " type and simply treats them as string type. Please use string type directly to avoid" + + " confusion.") + withLogAppender(logAppender) { + assertNoCharType(spark.range(1).select($"id".cast("char(5)"))) + assertNoCharType(spark.range(1).select($"id".cast(CharType(5)))) + assertNoCharType(spark.range(1).selectExpr("CAST(id AS CHAR(5))")) + assertNoCharType(sql("SELECT CAST(id AS CHAR(5)) FROM range(1)")) + } + } + + def failWithInvalidCharUsage[T](fn: => T): Unit = { + val e = intercept[AnalysisException](fn) + assert(e.getMessage contains "char/varchar type can only be used in the table schema") + } + + test("invalidate char/varchar in functions") { + failWithInvalidCharUsage(sql("""SELECT from_json('{"a": "str"}', 'a CHAR(5)')""")) + withSQLConf((SQLConf.LEGACY_CHAR_VARCHAR_AS_STRING.key, "true")) { + val df = sql("""SELECT from_json('{"a": "str"}', 'a CHAR(5)')""") + checkAnswer(df, Row(Row("str"))) + val schema = df.schema.head.dataType.asInstanceOf[StructType] + assert(schema.map(_.dataType) == Seq(StringType)) + } + } + + test("invalidate char/varchar in SparkSession createDataframe") { + val df = spark.range(10).map(_.toString).toDF() + val schema = new StructType().add("id", CharType(5)) + failWithInvalidCharUsage(spark.createDataFrame(df.collectAsList(), schema)) + failWithInvalidCharUsage(spark.createDataFrame(df.rdd, schema)) + failWithInvalidCharUsage(spark.createDataFrame(df.toJavaRDD, schema)) + withSQLConf((SQLConf.LEGACY_CHAR_VARCHAR_AS_STRING.key, "true")) { + val df1 = spark.createDataFrame(df.collectAsList(), schema) + checkAnswer(df1, df) + assert(df1.schema.head.dataType === StringType) + } + } + + test("invalidate char/varchar in spark.read.schema") { + failWithInvalidCharUsage(spark.read.schema(new StructType().add("id", CharType(5)))) + failWithInvalidCharUsage(spark.read.schema("id char(5)")) + withSQLConf((SQLConf.LEGACY_CHAR_VARCHAR_AS_STRING.key, "true")) { + val ds = spark.range(10).map(_.toString) + val df1 = spark.read.schema(new StructType().add("id", CharType(5))).csv(ds) + assert(df1.schema.map(_.dataType) == Seq(StringType)) + val df2 = spark.read.schema("id char(5)").csv(ds) + assert(df2.schema.map(_.dataType) == Seq(StringType)) + + def checkSchema(df: DataFrame): Unit = { + val schemas = df.queryExecution.analyzed.collect { + case l: LogicalRelation => l.relation.schema + case d: DataSourceV2Relation => d.table.schema() + } + assert(schemas.length == 1) + assert(schemas.head.map(_.dataType) == Seq(StringType)) + } + + // user-specified schema in DataFrameReader: DSV1 + checkSchema(spark.read.schema(new StructType().add("id", CharType(5))) + .format(classOf[SimpleInsertSource].getName).load()) + checkSchema(spark.read.schema("id char(5)") + .format(classOf[SimpleInsertSource].getName).load()) + + // user-specified schema in DataFrameReader: DSV2 + checkSchema(spark.read.schema(new StructType().add("id", CharType(5))) + .format(classOf[SchemaRequiredDataSource].getName).load()) + checkSchema(spark.read.schema("id char(5)") + .format(classOf[SchemaRequiredDataSource].getName).load()) + } + } + + test("invalidate char/varchar in udf's result type") { + failWithInvalidCharUsage(spark.udf.register("testchar", () => "B", VarcharType(1))) + failWithInvalidCharUsage(spark.udf.register("testchar2", (x: String) => x, VarcharType(1))) + withSQLConf((SQLConf.LEGACY_CHAR_VARCHAR_AS_STRING.key, "true")) { + spark.udf.register("testchar", () => "B", VarcharType(1)) + spark.udf.register("testchar2", (x: String) => x, VarcharType(1)) + val df1 = spark.sql("select testchar()") + checkAnswer(df1, Row("B")) + assert(df1.schema.head.dataType === StringType) + val df2 = spark.sql("select testchar2('abc')") + checkAnswer(df2, Row("abc")) + assert(df2.schema.head.dataType === StringType) + } + } + + test("invalidate char/varchar in spark.readStream.schema") { + failWithInvalidCharUsage(spark.readStream.schema(new StructType().add("id", CharType(5)))) + failWithInvalidCharUsage(spark.readStream.schema("id char(5)")) + withSQLConf((SQLConf.LEGACY_CHAR_VARCHAR_AS_STRING.key, "true")) { + withTempPath { dir => + spark.range(2).write.save(dir.toString) + val df1 = spark.readStream.schema(new StructType().add("id", CharType(5))) + .load(dir.toString) + assert(df1.schema.map(_.dataType) == Seq(StringType)) + val df2 = spark.readStream.schema("id char(5)").load(dir.toString) + assert(df2.schema.map(_.dataType) == Seq(StringType)) + } + } + } +} + +class FileSourceCharVarcharTestSuite extends CharVarcharTestSuite with SharedSparkSession { + override def format: String = "parquet" + override protected def sparkConf: SparkConf = { + super.sparkConf.set(SQLConf.USE_V1_SOURCE_LIST, "parquet") + } + + test("create table w/ location and fit length values") { + Seq("char", "varchar").foreach { typ => + withTempPath { dir => + withTable("t") { + sql("SELECT '12' as col").write.format(format).save(dir.toString) + sql(s"CREATE TABLE t (col $typ(2)) using $format LOCATION '$dir'") + val df = sql("select * from t") + checkAnswer(sql("select * from t"), Row("12")) + } + } + } + } + + test("create table w/ location and over length values") { + Seq("char", "varchar").foreach { typ => + withTempPath { dir => + withTable("t") { + sql("SELECT '123456' as col").write.format(format).save(dir.toString) + sql(s"CREATE TABLE t (col $typ(2)) using $format LOCATION '$dir'") + val e = intercept[SparkException] { sql("select * from t").collect() } + assert(e.getCause.getMessage.contains( + s"input string of length 6 exceeds $typ type length limitation: 2")) + } + } + } + } + + test("alter table set location w/ fit length values") { + Seq("char", "varchar").foreach { typ => + withTempPath { dir => + withTable("t") { + sql("SELECT '12' as col").write.format(format).save(dir.toString) + sql(s"CREATE TABLE t (col $typ(2)) using $format") + sql(s"ALTER TABLE t SET LOCATION '$dir'") + checkAnswer(spark.table("t"), Row("12")) + } + } + } + } + + test("alter table set location w/ over length values") { + Seq("char", "varchar").foreach { typ => + withTempPath { dir => + withTable("t") { + sql("SELECT '123456' as col").write.format(format).save(dir.toString) + sql(s"CREATE TABLE t (col $typ(2)) using $format") + sql(s"ALTER TABLE t SET LOCATION '$dir'") + val e = intercept[SparkException] { spark.table("t").collect() } + assert(e.getCause.getMessage.contains( + s"input string of length 6 exceeds $typ type length limitation: 2")) + } + } + } + } + + // TODO(SPARK-33875): Move these tests to super after DESCRIBE COLUMN v2 implemented + test("SPARK-33892: DESCRIBE COLUMN w/ char/varchar") { + withTable("t") { + sql(s"CREATE TABLE t(v VARCHAR(3), c CHAR(5)) USING $format") + checkAnswer(sql("desc t v").selectExpr("info_value").where("info_value like '%char%'"), + Row("varchar(3)")) + checkAnswer(sql("desc t c").selectExpr("info_value").where("info_value like '%char%'"), + Row("char(5)")) + } + } + + // TODO(SPARK-33898): Move these tests to super after SHOW CREATE TABLE for v2 implemented + test("SPARK-33892: SHOW CREATE TABLE w/ char/varchar") { + withTable("t") { + sql(s"CREATE TABLE t(v VARCHAR(3), c CHAR(5)) USING $format") + val rest = sql("SHOW CREATE TABLE t").head().getString(0) + assert(rest.contains("VARCHAR(3)")) + assert(rest.contains("CHAR(5)")) + } + } +} + +class DSV2CharVarcharTestSuite extends CharVarcharTestSuite + with SharedSparkSession { + override def format: String = "foo" + protected override def sparkConf = { + super.sparkConf + .set("spark.sql.catalog.testcat", classOf[InMemoryPartitionTableCatalog].getName) + .set(SQLConf.DEFAULT_CATALOG.key, "testcat") + } +} diff --git a/sql/core/src/test/scala/org/apache/spark/sql/ColumnExpressionSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/ColumnExpressionSuite.scala index 24419968c0472..01b1508d034c3 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/ColumnExpressionSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/ColumnExpressionSuite.scala @@ -24,6 +24,8 @@ import org.apache.hadoop.io.{LongWritable, Text} import org.apache.hadoop.mapreduce.lib.input.{TextInputFormat => NewTextInputFormat} import org.scalatest.matchers.should.Matchers._ +import org.apache.spark.SparkException +import org.apache.spark.sql.UpdateFieldsBenchmark._ import org.apache.spark.sql.catalyst.expressions.{InSet, Literal, NamedExpression} import org.apache.spark.sql.execution.ProjectExec import org.apache.spark.sql.functions._ @@ -922,11 +924,10 @@ class ColumnExpressionSuite extends QueryTest with SharedSparkSession { assert(inSet.sql === "('a' IN ('a', 'b'))") } - def checkAnswerAndSchema( + def checkAnswer( df: => DataFrame, expectedAnswer: Seq[Row], expectedSchema: StructType): Unit = { - checkAnswer(df, expectedAnswer) assert(df.schema == expectedSchema) } @@ -940,8 +941,8 @@ class ColumnExpressionSuite extends QueryTest with SharedSparkSession { sparkContext.parallelize(Row(Row(1, null, 3)) :: Nil), StructType(Seq(StructField("a", structType, nullable = false)))) - private lazy val nullStructLevel1: DataFrame = spark.createDataFrame( - sparkContext.parallelize(Row(null) :: Nil), + private lazy val nullableStructLevel1: DataFrame = spark.createDataFrame( + sparkContext.parallelize(Row(null) :: Row(Row(1, null, 3)) :: Nil), StructType(Seq(StructField("a", structType, nullable = true)))) private lazy val structLevel2: DataFrame = spark.createDataFrame( @@ -951,12 +952,12 @@ class ColumnExpressionSuite extends QueryTest with SharedSparkSession { StructField("a", structType, nullable = false))), nullable = false)))) - private lazy val nullStructLevel2: DataFrame = spark.createDataFrame( - sparkContext.parallelize(Row(Row(null)) :: Nil), + private lazy val nullableStructLevel2: DataFrame = spark.createDataFrame( + sparkContext.parallelize(Row(null) :: Row(Row(null)) :: Row(Row(Row(1, null, 3))) :: Nil), StructType(Seq( StructField("a", StructType(Seq( StructField("a", structType, nullable = true))), - nullable = false)))) + nullable = true)))) private lazy val structLevel3: DataFrame = spark.createDataFrame( sparkContext.parallelize(Row(Row(Row(Row(1, null, 3)))) :: Nil), @@ -1018,7 +1019,7 @@ class ColumnExpressionSuite extends QueryTest with SharedSparkSession { } test("withField should add field with no name") { - checkAnswerAndSchema( + checkAnswer( structLevel1.withColumn("a", $"a".withField("", lit(4))), Row(Row(1, null, 3, 4)) :: Nil, StructType(Seq( @@ -1031,7 +1032,7 @@ class ColumnExpressionSuite extends QueryTest with SharedSparkSession { } test("withField should add field to struct") { - checkAnswerAndSchema( + checkAnswer( structLevel1.withColumn("a", 'a.withField("d", lit(4))), Row(Row(1, null, 3, 4)) :: Nil, StructType(Seq( @@ -1043,10 +1044,10 @@ class ColumnExpressionSuite extends QueryTest with SharedSparkSession { nullable = false)))) } - test("withField should add field to null struct") { - checkAnswerAndSchema( - nullStructLevel1.withColumn("a", $"a".withField("d", lit(4))), - Row(null) :: Nil, + test("withField should add field to nullable struct") { + checkAnswer( + nullableStructLevel1.withColumn("a", $"a".withField("d", lit(4))), + Row(null) :: Row(Row(1, null, 3, 4)) :: Nil, StructType(Seq( StructField("a", StructType(Seq( StructField("a", IntegerType, nullable = false), @@ -1056,10 +1057,10 @@ class ColumnExpressionSuite extends QueryTest with SharedSparkSession { nullable = true)))) } - test("withField should add field to nested null struct") { - checkAnswerAndSchema( - nullStructLevel2.withColumn("a", $"a".withField("a.d", lit(4))), - Row(Row(null)) :: Nil, + test("withField should add field to nested nullable struct") { + checkAnswer( + nullableStructLevel2.withColumn("a", $"a".withField("a.d", lit(4))), + Row(null) :: Row(Row(null)) :: Row(Row(Row(1, null, 3, 4))) :: Nil, StructType( Seq(StructField("a", StructType(Seq( StructField("a", StructType(Seq( @@ -1068,11 +1069,11 @@ class ColumnExpressionSuite extends QueryTest with SharedSparkSession { StructField("c", IntegerType, nullable = false), StructField("d", IntegerType, nullable = false))), nullable = true))), - nullable = false)))) + nullable = true)))) } test("withField should add null field to struct") { - checkAnswerAndSchema( + checkAnswer( structLevel1.withColumn("a", 'a.withField("d", lit(null).cast(IntegerType))), Row(Row(1, null, 3, null)) :: Nil, StructType(Seq( @@ -1085,7 +1086,7 @@ class ColumnExpressionSuite extends QueryTest with SharedSparkSession { } test("withField should add multiple fields to struct") { - checkAnswerAndSchema( + checkAnswer( structLevel1.withColumn("a", 'a.withField("d", lit(4)).withField("e", lit(5))), Row(Row(1, null, 3, 4, 5)) :: Nil, StructType(Seq( @@ -1098,12 +1099,26 @@ class ColumnExpressionSuite extends QueryTest with SharedSparkSession { nullable = false)))) } + test("withField should add multiple fields to nullable struct") { + checkAnswer( + nullableStructLevel1.withColumn("a", 'a.withField("d", lit(4)).withField("e", lit(5))), + Row(null) :: Row(Row(1, null, 3, 4, 5)) :: Nil, + StructType(Seq( + StructField("a", StructType(Seq( + StructField("a", IntegerType, nullable = false), + StructField("b", IntegerType, nullable = true), + StructField("c", IntegerType, nullable = false), + StructField("d", IntegerType, nullable = false), + StructField("e", IntegerType, nullable = false))), + nullable = true)))) + } + test("withField should add field to nested struct") { Seq( structLevel2.withColumn("a", 'a.withField("a.d", lit(4))), structLevel2.withColumn("a", 'a.withField("a", $"a.a".withField("d", lit(4)))) ).foreach { df => - checkAnswerAndSchema( + checkAnswer( df, Row(Row(Row(1, null, 3, 4))) :: Nil, StructType( @@ -1118,8 +1133,50 @@ class ColumnExpressionSuite extends QueryTest with SharedSparkSession { } } + test("withField should add multiple fields to nested struct") { + Seq( + col("a").withField("a", $"a.a".withField("d", lit(4)).withField("e", lit(5))), + col("a").withField("a.d", lit(4)).withField("a.e", lit(5)) + ).foreach { column => + checkAnswer( + structLevel2.select(column.as("a")), + Row(Row(Row(1, null, 3, 4, 5))) :: Nil, + StructType(Seq( + StructField("a", StructType(Seq( + StructField("a", StructType(Seq( + StructField("a", IntegerType, nullable = false), + StructField("b", IntegerType, nullable = true), + StructField("c", IntegerType, nullable = false), + StructField("d", IntegerType, nullable = false), + StructField("e", IntegerType, nullable = false))), + nullable = false))), + nullable = false)))) + } + } + + test("withField should add multiple fields to nested nullable struct") { + Seq( + col("a").withField("a", $"a.a".withField("d", lit(4)).withField("e", lit(5))), + col("a").withField("a.d", lit(4)).withField("a.e", lit(5)) + ).foreach { column => + checkAnswer( + nullableStructLevel2.select(column.as("a")), + Row(null) :: Row(Row(null)) :: Row(Row(Row(1, null, 3, 4, 5))) :: Nil, + StructType(Seq( + StructField("a", StructType(Seq( + StructField("a", StructType(Seq( + StructField("a", IntegerType, nullable = false), + StructField("b", IntegerType, nullable = true), + StructField("c", IntegerType, nullable = false), + StructField("d", IntegerType, nullable = false), + StructField("e", IntegerType, nullable = false))), + nullable = true))), + nullable = true)))) + } + } + test("withField should add field to deeply nested struct") { - checkAnswerAndSchema( + checkAnswer( structLevel3.withColumn("a", 'a.withField("a.a.d", lit(4))), Row(Row(Row(Row(1, null, 3, 4)))) :: Nil, StructType(Seq( @@ -1136,7 +1193,7 @@ class ColumnExpressionSuite extends QueryTest with SharedSparkSession { } test("withField should replace field in struct") { - checkAnswerAndSchema( + checkAnswer( structLevel1.withColumn("a", 'a.withField("b", lit(2))), Row(Row(1, 2, 3)) :: Nil, StructType(Seq( @@ -1147,10 +1204,10 @@ class ColumnExpressionSuite extends QueryTest with SharedSparkSession { nullable = false)))) } - test("withField should replace field in null struct") { - checkAnswerAndSchema( - nullStructLevel1.withColumn("a", 'a.withField("b", lit("foo"))), - Row(null) :: Nil, + test("withField should replace field in nullable struct") { + checkAnswer( + nullableStructLevel1.withColumn("a", 'a.withField("b", lit("foo"))), + Row(null) :: Row(Row(1, "foo", 3)) :: Nil, StructType(Seq( StructField("a", StructType(Seq( StructField("a", IntegerType, nullable = false), @@ -1159,10 +1216,10 @@ class ColumnExpressionSuite extends QueryTest with SharedSparkSession { nullable = true)))) } - test("withField should replace field in nested null struct") { - checkAnswerAndSchema( - nullStructLevel2.withColumn("a", $"a".withField("a.b", lit("foo"))), - Row(Row(null)) :: Nil, + test("withField should replace field in nested nullable struct") { + checkAnswer( + nullableStructLevel2.withColumn("a", $"a".withField("a.b", lit("foo"))), + Row(null) :: Row(Row(null)) :: Row(Row(Row(1, "foo", 3))) :: Nil, StructType( Seq(StructField("a", StructType(Seq( StructField("a", StructType(Seq( @@ -1170,11 +1227,11 @@ class ColumnExpressionSuite extends QueryTest with SharedSparkSession { StructField("b", StringType, nullable = false), StructField("c", IntegerType, nullable = false))), nullable = true))), - nullable = false)))) + nullable = true)))) } test("withField should replace field with null value in struct") { - checkAnswerAndSchema( + checkAnswer( structLevel1.withColumn("a", 'a.withField("c", lit(null).cast(IntegerType))), Row(Row(1, null, null)) :: Nil, StructType(Seq( @@ -1186,7 +1243,7 @@ class ColumnExpressionSuite extends QueryTest with SharedSparkSession { } test("withField should replace multiple fields in struct") { - checkAnswerAndSchema( + checkAnswer( structLevel1.withColumn("a", 'a.withField("a", lit(10)).withField("b", lit(20))), Row(Row(10, 20, 3)) :: Nil, StructType(Seq( @@ -1197,12 +1254,24 @@ class ColumnExpressionSuite extends QueryTest with SharedSparkSession { nullable = false)))) } + test("withField should replace multiple fields in nullable struct") { + checkAnswer( + nullableStructLevel1.withColumn("a", 'a.withField("a", lit(10)).withField("b", lit(20))), + Row(null) :: Row(Row(10, 20, 3)) :: Nil, + StructType(Seq( + StructField("a", StructType(Seq( + StructField("a", IntegerType, nullable = false), + StructField("b", IntegerType, nullable = false), + StructField("c", IntegerType, nullable = false))), + nullable = true)))) + } + test("withField should replace field in nested struct") { Seq( structLevel2.withColumn("a", $"a".withField("a.b", lit(2))), structLevel2.withColumn("a", 'a.withField("a", $"a.a".withField("b", lit(2)))) ).foreach { df => - checkAnswerAndSchema( + checkAnswer( df, Row(Row(Row(1, 2, 3))) :: Nil, StructType(Seq( @@ -1216,8 +1285,46 @@ class ColumnExpressionSuite extends QueryTest with SharedSparkSession { } } + test("withField should replace multiple fields in nested struct") { + Seq( + col("a").withField("a", $"a.a".withField("a", lit(10)).withField("b", lit(20))), + col("a").withField("a.a", lit(10)).withField("a.b", lit(20)) + ).foreach { column => + checkAnswer( + structLevel2.select(column.as("a")), + Row(Row(Row(10, 20, 3))) :: Nil, + StructType(Seq( + StructField("a", StructType(Seq( + StructField("a", StructType(Seq( + StructField("a", IntegerType, nullable = false), + StructField("b", IntegerType, nullable = false), + StructField("c", IntegerType, nullable = false))), + nullable = false))), + nullable = false)))) + } + } + + test("withField should replace multiple fields in nested nullable struct") { + Seq( + col("a").withField("a", $"a.a".withField("a", lit(10)).withField("b", lit(20))), + col("a").withField("a.a", lit(10)).withField("a.b", lit(20)) + ).foreach { column => + checkAnswer( + nullableStructLevel2.select(column.as("a")), + Row(null) :: Row(Row(null)) :: Row(Row(Row(10, 20, 3))) :: Nil, + StructType(Seq( + StructField("a", StructType(Seq( + StructField("a", StructType(Seq( + StructField("a", IntegerType, nullable = false), + StructField("b", IntegerType, nullable = false), + StructField("c", IntegerType, nullable = false))), + nullable = true))), + nullable = true)))) + } + } + test("withField should replace field in deeply nested struct") { - checkAnswerAndSchema( + checkAnswer( structLevel3.withColumn("a", $"a".withField("a.a.b", lit(2))), Row(Row(Row(Row(1, 2, 3)))) :: Nil, StructType(Seq( @@ -1242,7 +1349,7 @@ class ColumnExpressionSuite extends QueryTest with SharedSparkSession { StructField("b", IntegerType, nullable = false))), nullable = false)))) - checkAnswerAndSchema( + checkAnswer( structLevel1.withColumn("a", 'a.withField("b", lit(100))), Row(Row(1, 100, 100)) :: Nil, StructType(Seq( @@ -1254,7 +1361,7 @@ class ColumnExpressionSuite extends QueryTest with SharedSparkSession { } test("withField should replace fields in struct in given order") { - checkAnswerAndSchema( + checkAnswer( structLevel1.withColumn("a", 'a.withField("b", lit(2)).withField("b", lit(20))), Row(Row(1, 20, 3)) :: Nil, StructType(Seq( @@ -1266,7 +1373,7 @@ class ColumnExpressionSuite extends QueryTest with SharedSparkSession { } test("withField should add field and then replace same field in struct") { - checkAnswerAndSchema( + checkAnswer( structLevel1.withColumn("a", 'a.withField("d", lit(4)).withField("d", lit(5))), Row(Row(1, null, 3, 5)) :: Nil, StructType(Seq( @@ -1290,7 +1397,7 @@ class ColumnExpressionSuite extends QueryTest with SharedSparkSession { nullable = false))), nullable = false)))) - checkAnswerAndSchema( + checkAnswer( df.withColumn("a", 'a.withField("`a.b`.`e.f`", lit(2))), Row(Row(Row(1, 2, 3))) :: Nil, StructType(Seq( @@ -1317,7 +1424,7 @@ class ColumnExpressionSuite extends QueryTest with SharedSparkSession { test("withField should replace field in struct even if casing is different") { withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") { - checkAnswerAndSchema( + checkAnswer( mixedCaseStructLevel1.withColumn("a", 'a.withField("A", lit(2))), Row(Row(2, 1)) :: Nil, StructType(Seq( @@ -1326,7 +1433,7 @@ class ColumnExpressionSuite extends QueryTest with SharedSparkSession { StructField("B", IntegerType, nullable = false))), nullable = false)))) - checkAnswerAndSchema( + checkAnswer( mixedCaseStructLevel1.withColumn("a", 'a.withField("b", lit(2))), Row(Row(1, 2)) :: Nil, StructType(Seq( @@ -1339,7 +1446,7 @@ class ColumnExpressionSuite extends QueryTest with SharedSparkSession { test("withField should add field to struct because casing is different") { withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") { - checkAnswerAndSchema( + checkAnswer( mixedCaseStructLevel1.withColumn("a", 'a.withField("A", lit(2))), Row(Row(1, 1, 2)) :: Nil, StructType(Seq( @@ -1349,7 +1456,7 @@ class ColumnExpressionSuite extends QueryTest with SharedSparkSession { StructField("A", IntegerType, nullable = false))), nullable = false)))) - checkAnswerAndSchema( + checkAnswer( mixedCaseStructLevel1.withColumn("a", 'a.withField("b", lit(2))), Row(Row(1, 1, 2)) :: Nil, StructType(Seq( @@ -1377,7 +1484,7 @@ class ColumnExpressionSuite extends QueryTest with SharedSparkSession { test("withField should replace nested field in struct even if casing is different") { withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") { - checkAnswerAndSchema( + checkAnswer( mixedCaseStructLevel2.withColumn("a", 'a.withField("A.a", lit(2))), Row(Row(Row(2, 1), Row(1, 1))) :: Nil, StructType(Seq( @@ -1392,7 +1499,7 @@ class ColumnExpressionSuite extends QueryTest with SharedSparkSession { nullable = false))), nullable = false)))) - checkAnswerAndSchema( + checkAnswer( mixedCaseStructLevel2.withColumn("a", 'a.withField("b.a", lit(2))), Row(Row(Row(1, 1), Row(2, 1))) :: Nil, StructType(Seq( @@ -1451,30 +1558,41 @@ class ColumnExpressionSuite extends QueryTest with SharedSparkSession { sql("SELECT named_struct('a', named_struct('b', 1), 'a', named_struct('c', 2)) struct_col") .select($"struct_col".withField("a.c", lit(3))) }.getMessage should include("Ambiguous reference to fields") + + checkAnswer( + sql("SELECT named_struct('a', named_struct('a', 1, 'b', 2)) struct_col") + .select($"struct_col".withField("a.c", lit(3)).withField("a.d", lit(4))), + Row(Row(Row(1, 2, 3, 4)))) + + checkAnswer( + sql("SELECT named_struct('a', named_struct('a', 1, 'b', 2)) struct_col") + .select($"struct_col".withField("a", + $"struct_col.a".withField("c", lit(3)).withField("d", lit(4)))), + Row(Row(Row(1, 2, 3, 4)))) } test("SPARK-32641: extracting field from non-null struct column after withField should return " + "field value") { // extract newly added field - checkAnswerAndSchema( + checkAnswer( structLevel1.withColumn("a", $"a".withField("d", lit(4)).getField("d")), Row(4) :: Nil, StructType(Seq(StructField("a", IntegerType, nullable = false)))) // extract newly replaced field - checkAnswerAndSchema( + checkAnswer( structLevel1.withColumn("a", $"a".withField("a", lit(4)).getField("a")), Row(4) :: Nil, StructType(Seq(StructField("a", IntegerType, nullable = false)))) // add new field, extract another field from original struct - checkAnswerAndSchema( + checkAnswer( structLevel1.withColumn("a", $"a".withField("d", lit(4)).getField("c")), Row(3):: Nil, StructType(Seq(StructField("a", IntegerType, nullable = false)))) // replace field, extract another field from original struct - checkAnswerAndSchema( + checkAnswer( structLevel1.withColumn("a", $"a".withField("a", lit(4)).getField("c")), Row(3):: Nil, StructType(Seq(StructField("a", IntegerType, nullable = false)))) @@ -1482,26 +1600,30 @@ class ColumnExpressionSuite extends QueryTest with SharedSparkSession { test("SPARK-32641: extracting field from null struct column after withField should return " + "null if the original struct was null") { + val nullStructLevel1 = spark.createDataFrame( + sparkContext.parallelize(Row(null) :: Nil), + StructType(Seq(StructField("a", structType, nullable = true)))) + // extract newly added field - checkAnswerAndSchema( + checkAnswer( nullStructLevel1.withColumn("a", $"a".withField("d", lit(4)).getField("d")), Row(null) :: Nil, StructType(Seq(StructField("a", IntegerType, nullable = true)))) // extract newly replaced field - checkAnswerAndSchema( + checkAnswer( nullStructLevel1.withColumn("a", $"a".withField("a", lit(4)).getField("a")), Row(null):: Nil, StructType(Seq(StructField("a", IntegerType, nullable = true)))) // add new field, extract another field from original struct - checkAnswerAndSchema( + checkAnswer( nullStructLevel1.withColumn("a", $"a".withField("d", lit(4)).getField("c")), Row(null):: Nil, StructType(Seq(StructField("a", IntegerType, nullable = true)))) // replace field, extract another field from original struct - checkAnswerAndSchema( + checkAnswer( nullStructLevel1.withColumn("a", $"a".withField("a", lit(4)).getField("c")), Row(null):: Nil, StructType(Seq(StructField("a", IntegerType, nullable = true)))) @@ -1514,27 +1636,721 @@ class ColumnExpressionSuite extends QueryTest with SharedSparkSession { StructType(Seq(StructField("a", structType, nullable = true)))) // extract newly added field - checkAnswerAndSchema( + checkAnswer( df.withColumn("a", $"a".withField("d", lit(4)).getField("d")), Row(4) :: Row(null) :: Nil, StructType(Seq(StructField("a", IntegerType, nullable = true)))) // extract newly replaced field - checkAnswerAndSchema( + checkAnswer( df.withColumn("a", $"a".withField("a", lit(4)).getField("a")), Row(4) :: Row(null):: Nil, StructType(Seq(StructField("a", IntegerType, nullable = true)))) // add new field, extract another field from original struct - checkAnswerAndSchema( + checkAnswer( df.withColumn("a", $"a".withField("d", lit(4)).getField("c")), Row(3) :: Row(null):: Nil, StructType(Seq(StructField("a", IntegerType, nullable = true)))) // replace field, extract another field from original struct - checkAnswerAndSchema( + checkAnswer( df.withColumn("a", $"a".withField("a", lit(4)).getField("c")), Row(3) :: Row(null):: Nil, StructType(Seq(StructField("a", IntegerType, nullable = true)))) } + + + test("dropFields should throw an exception if called on a non-StructType column") { + intercept[AnalysisException] { + testData.withColumn("key", $"key".dropFields("a")) + }.getMessage should include("struct argument should be struct type, got: int") + } + + test("dropFields should throw an exception if fieldName argument is null") { + intercept[IllegalArgumentException] { + structLevel1.withColumn("a", $"a".dropFields(null)) + }.getMessage should include("fieldName cannot be null") + } + + test("dropFields should throw an exception if any intermediate structs don't exist") { + intercept[AnalysisException] { + structLevel2.withColumn("a", 'a.dropFields("x.b")) + }.getMessage should include("No such struct field x in a") + + intercept[AnalysisException] { + structLevel3.withColumn("a", 'a.dropFields("a.x.b")) + }.getMessage should include("No such struct field x in a") + } + + test("dropFields should throw an exception if intermediate field is not a struct") { + intercept[AnalysisException] { + structLevel1.withColumn("a", 'a.dropFields("b.a")) + }.getMessage should include("struct argument should be struct type, got: int") + } + + test("dropFields should throw an exception if intermediate field reference is ambiguous") { + intercept[AnalysisException] { + val structLevel2: DataFrame = spark.createDataFrame( + sparkContext.parallelize(Row(Row(Row(1, null, 3), 4)) :: Nil), + StructType(Seq( + StructField("a", StructType(Seq( + StructField("a", structType, nullable = false), + StructField("a", structType, nullable = false))), + nullable = false)))) + + structLevel2.withColumn("a", 'a.dropFields("a.b")) + }.getMessage should include("Ambiguous reference to fields") + } + + test("dropFields should drop field in struct") { + checkAnswer( + structLevel1.withColumn("a", 'a.dropFields("b")), + Row(Row(1, 3)) :: Nil, + StructType(Seq( + StructField("a", StructType(Seq( + StructField("a", IntegerType, nullable = false), + StructField("c", IntegerType, nullable = false))), + nullable = false)))) + } + + test("dropFields should drop field in nullable struct") { + checkAnswer( + nullableStructLevel1.withColumn("a", $"a".dropFields("b")), + Row(null) :: Row(Row(1, 3)) :: Nil, + StructType(Seq( + StructField("a", StructType(Seq( + StructField("a", IntegerType, nullable = false), + StructField("c", IntegerType, nullable = false))), + nullable = true)))) + } + + test("dropFields should drop multiple fields in struct") { + Seq( + structLevel1.withColumn("a", $"a".dropFields("b", "c")), + structLevel1.withColumn("a", 'a.dropFields("b").dropFields("c")) + ).foreach { df => + checkAnswer( + df, + Row(Row(1)) :: Nil, + StructType(Seq( + StructField("a", StructType(Seq( + StructField("a", IntegerType, nullable = false))), + nullable = false)))) + } + } + + test("dropFields should throw an exception if no fields will be left in struct") { + intercept[AnalysisException] { + structLevel1.withColumn("a", 'a.dropFields("a", "b", "c")) + }.getMessage should include("cannot drop all fields in struct") + } + + test("dropFields should drop field with no name in struct") { + val structType = StructType(Seq( + StructField("a", IntegerType, nullable = false), + StructField("", IntegerType, nullable = false))) + + val structLevel1: DataFrame = spark.createDataFrame( + sparkContext.parallelize(Row(Row(1, 2)) :: Nil), + StructType(Seq(StructField("a", structType, nullable = false)))) + + checkAnswer( + structLevel1.withColumn("a", $"a".dropFields("")), + Row(Row(1)) :: Nil, + StructType(Seq( + StructField("a", StructType(Seq( + StructField("a", IntegerType, nullable = false))), + nullable = false)))) + } + + test("dropFields should drop field in nested struct") { + checkAnswer( + structLevel2.withColumn("a", 'a.dropFields("a.b")), + Row(Row(Row(1, 3))) :: Nil, + StructType( + Seq(StructField("a", StructType(Seq( + StructField("a", StructType(Seq( + StructField("a", IntegerType, nullable = false), + StructField("c", IntegerType, nullable = false))), + nullable = false))), + nullable = false)))) + } + + test("dropFields should drop multiple fields in nested struct") { + checkAnswer( + structLevel2.withColumn("a", 'a.dropFields("a.b", "a.c")), + Row(Row(Row(1))) :: Nil, + StructType( + Seq(StructField("a", StructType(Seq( + StructField("a", StructType(Seq( + StructField("a", IntegerType, nullable = false))), + nullable = false))), + nullable = false)))) + } + + test("dropFields should drop field in nested nullable struct") { + checkAnswer( + nullableStructLevel2.withColumn("a", $"a".dropFields("a.b")), + Row(null) :: Row(Row(null)) :: Row(Row(Row(1, 3))) :: Nil, + StructType( + Seq(StructField("a", StructType(Seq( + StructField("a", StructType(Seq( + StructField("a", IntegerType, nullable = false), + StructField("c", IntegerType, nullable = false))), + nullable = true))), + nullable = true)))) + } + + test("dropFields should drop multiple fields in nested nullable struct") { + checkAnswer( + nullableStructLevel2.withColumn("a", $"a".dropFields("a.b", "a.c")), + Row(null) :: Row(Row(null)) :: Row(Row(Row(1))) :: Nil, + StructType( + Seq(StructField("a", StructType(Seq( + StructField("a", StructType(Seq( + StructField("a", IntegerType, nullable = false))), + nullable = true))), + nullable = true)))) + } + + test("dropFields should drop field in deeply nested struct") { + checkAnswer( + structLevel3.withColumn("a", 'a.dropFields("a.a.b")), + Row(Row(Row(Row(1, 3)))) :: Nil, + StructType(Seq( + StructField("a", StructType(Seq( + StructField("a", StructType(Seq( + StructField("a", StructType(Seq( + StructField("a", IntegerType, nullable = false), + StructField("c", IntegerType, nullable = false))), + nullable = false))), + nullable = false))), + nullable = false)))) + } + + test("dropFields should drop all fields with given name in struct") { + val structLevel1 = spark.createDataFrame( + sparkContext.parallelize(Row(Row(1, 2, 3)) :: Nil), + StructType(Seq( + StructField("a", StructType(Seq( + StructField("a", IntegerType, nullable = false), + StructField("b", IntegerType, nullable = false), + StructField("b", IntegerType, nullable = false))), + nullable = false)))) + + checkAnswer( + structLevel1.withColumn("a", 'a.dropFields("b")), + Row(Row(1)) :: Nil, + StructType(Seq( + StructField("a", StructType(Seq( + StructField("a", IntegerType, nullable = false))), + nullable = false)))) + } + + test("dropFields should drop field in struct even if casing is different") { + withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") { + checkAnswer( + mixedCaseStructLevel1.withColumn("a", 'a.dropFields("A")), + Row(Row(1)) :: Nil, + StructType(Seq( + StructField("a", StructType(Seq( + StructField("B", IntegerType, nullable = false))), + nullable = false)))) + + checkAnswer( + mixedCaseStructLevel1.withColumn("a", 'a.dropFields("b")), + Row(Row(1)) :: Nil, + StructType(Seq( + StructField("a", StructType(Seq( + StructField("a", IntegerType, nullable = false))), + nullable = false)))) + } + } + + test("dropFields should not drop field in struct because casing is different") { + withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") { + checkAnswer( + mixedCaseStructLevel1.withColumn("a", 'a.dropFields("A")), + Row(Row(1, 1)) :: Nil, + StructType(Seq( + StructField("a", StructType(Seq( + StructField("a", IntegerType, nullable = false), + StructField("B", IntegerType, nullable = false))), + nullable = false)))) + + checkAnswer( + mixedCaseStructLevel1.withColumn("a", 'a.dropFields("b")), + Row(Row(1, 1)) :: Nil, + StructType(Seq( + StructField("a", StructType(Seq( + StructField("a", IntegerType, nullable = false), + StructField("B", IntegerType, nullable = false))), + nullable = false)))) + } + } + + test("dropFields should drop nested field in struct even if casing is different") { + withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") { + checkAnswer( + mixedCaseStructLevel2.withColumn("a", 'a.dropFields("A.a")), + Row(Row(Row(1), Row(1, 1))) :: Nil, + StructType(Seq( + StructField("a", StructType(Seq( + StructField("A", StructType(Seq( + StructField("b", IntegerType, nullable = false))), + nullable = false), + StructField("B", StructType(Seq( + StructField("a", IntegerType, nullable = false), + StructField("b", IntegerType, nullable = false))), + nullable = false))), + nullable = false)))) + + checkAnswer( + mixedCaseStructLevel2.withColumn("a", 'a.dropFields("b.a")), + Row(Row(Row(1, 1), Row(1))) :: Nil, + StructType(Seq( + StructField("a", StructType(Seq( + StructField("a", StructType(Seq( + StructField("a", IntegerType, nullable = false), + StructField("b", IntegerType, nullable = false))), + nullable = false), + StructField("b", StructType(Seq( + StructField("b", IntegerType, nullable = false))), + nullable = false))), + nullable = false)))) + } + } + + test("dropFields should throw an exception because casing is different") { + withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") { + intercept[AnalysisException] { + mixedCaseStructLevel2.withColumn("a", 'a.dropFields("A.a")) + }.getMessage should include("No such struct field A in a, B") + + intercept[AnalysisException] { + mixedCaseStructLevel2.withColumn("a", 'a.dropFields("b.a")) + }.getMessage should include("No such struct field b in a, B") + } + } + + test("dropFields should drop only fields that exist") { + checkAnswer( + structLevel1.withColumn("a", 'a.dropFields("d")), + Row(Row(1, null, 3)) :: Nil, + StructType(Seq( + StructField("a", StructType(Seq( + StructField("a", IntegerType, nullable = false), + StructField("b", IntegerType, nullable = true), + StructField("c", IntegerType, nullable = false))), + nullable = false)))) + + checkAnswer( + structLevel1.withColumn("a", 'a.dropFields("b", "d")), + Row(Row(1, 3)) :: Nil, + StructType(Seq( + StructField("a", StructType(Seq( + StructField("a", IntegerType, nullable = false), + StructField("c", IntegerType, nullable = false))), + nullable = false)))) + + checkAnswer( + structLevel2.withColumn("a", $"a".dropFields("a.b", "a.d")), + Row(Row(Row(1, 3))) :: Nil, + StructType( + Seq(StructField("a", StructType(Seq( + StructField("a", StructType(Seq( + StructField("a", IntegerType, nullable = false), + StructField("c", IntegerType, nullable = false))), + nullable = false))), + nullable = false)))) + } + + test("dropFields should drop multiple fields at arbitrary levels of nesting in a single call") { + val df: DataFrame = spark.createDataFrame( + sparkContext.parallelize(Row(Row(Row(1, null, 3), 4)) :: Nil), + StructType(Seq( + StructField("a", StructType(Seq( + StructField("a", structType, nullable = false), + StructField("b", IntegerType, nullable = false))), + nullable = false)))) + + checkAnswer( + df.withColumn("a", $"a".dropFields("a.b", "b")), + Row(Row(Row(1, 3))) :: Nil, + StructType(Seq( + StructField("a", StructType(Seq( + StructField("a", StructType(Seq( + StructField("a", IntegerType, nullable = false), + StructField("c", IntegerType, nullable = false))), nullable = false))), + nullable = false)))) + } + + test("dropFields user-facing examples") { + checkAnswer( + sql("SELECT named_struct('a', 1, 'b', 2) struct_col") + .select($"struct_col".dropFields("b")), + Row(Row(1))) + + checkAnswer( + sql("SELECT named_struct('a', 1, 'b', 2) struct_col") + .select($"struct_col".dropFields("c")), + Row(Row(1, 2))) + + checkAnswer( + sql("SELECT named_struct('a', 1, 'b', 2, 'c', 3) struct_col") + .select($"struct_col".dropFields("b", "c")), + Row(Row(1))) + + intercept[AnalysisException] { + sql("SELECT named_struct('a', 1, 'b', 2) struct_col") + .select($"struct_col".dropFields("a", "b")) + }.getMessage should include("cannot drop all fields in struct") + + checkAnswer( + sql("SELECT CAST(NULL AS struct) struct_col") + .select($"struct_col".dropFields("b")), + Row(null)) + + checkAnswer( + sql("SELECT named_struct('a', 1, 'b', 2, 'b', 3) struct_col") + .select($"struct_col".dropFields("b")), + Row(Row(1))) + + checkAnswer( + sql("SELECT named_struct('a', named_struct('a', 1, 'b', 2)) struct_col") + .select($"struct_col".dropFields("a.b")), + Row(Row(Row(1)))) + + intercept[AnalysisException] { + sql("SELECT named_struct('a', named_struct('b', 1), 'a', named_struct('c', 2)) struct_col") + .select($"struct_col".dropFields("a.c")) + }.getMessage should include("Ambiguous reference to fields") + + checkAnswer( + sql("SELECT named_struct('a', named_struct('a', 1, 'b', 2, 'c', 3)) struct_col") + .select($"struct_col".dropFields("a.b", "a.c")), + Row(Row(Row(1)))) + + checkAnswer( + sql("SELECT named_struct('a', named_struct('a', 1, 'b', 2, 'c', 3)) struct_col") + .select($"struct_col".withField("a", $"struct_col.a".dropFields("b", "c"))), + Row(Row(Row(1)))) + } + + test("should correctly handle different dropField + withField + getField combinations") { + val structType = StructType(Seq( + StructField("a", IntegerType, nullable = false), + StructField("b", IntegerType, nullable = false))) + + val structLevel1: DataFrame = spark.createDataFrame( + sparkContext.parallelize(Row(Row(1, 2)) :: Nil), + StructType(Seq(StructField("a", structType, nullable = false)))) + + val nullStructLevel1: DataFrame = spark.createDataFrame( + sparkContext.parallelize(Row(null) :: Nil), + StructType(Seq(StructField("a", structType, nullable = true)))) + + val nullableStructLevel1: DataFrame = spark.createDataFrame( + sparkContext.parallelize(Row(Row(1, 2)) :: Row(null) :: Nil), + StructType(Seq(StructField("a", structType, nullable = true)))) + + def check( + fieldOps: Column => Column, + getFieldName: String, + expectedValue: Option[Int]): Unit = { + + def query(df: DataFrame): DataFrame = + df.select(fieldOps(col("a")).getField(getFieldName).as("res")) + + checkAnswer( + query(structLevel1), + Row(expectedValue.orNull) :: Nil, + StructType(Seq(StructField("res", IntegerType, nullable = expectedValue.isEmpty)))) + + checkAnswer( + query(nullStructLevel1), + Row(null) :: Nil, + StructType(Seq(StructField("res", IntegerType, nullable = true)))) + + checkAnswer( + query(nullableStructLevel1), + Row(expectedValue.orNull) :: Row(null) :: Nil, + StructType(Seq(StructField("res", IntegerType, nullable = true)))) + } + + // add attribute, extract an attribute from the original struct + check(_.withField("c", lit(3)), "a", Some(1)) + check(_.withField("c", lit(3)), "b", Some(2)) + + // add attribute, extract added attribute + check(_.withField("c", lit(3)), "c", Some(3)) + check(_.withField("c", col("a.a")), "c", Some(1)) + check(_.withField("c", col("a.b")), "c", Some(2)) + check(_.withField("c", lit(null).cast(IntegerType)), "c", None) + + // replace attribute, extract an attribute from the original struct + check(_.withField("b", lit(3)), "a", Some(1)) + check(_.withField("a", lit(3)), "b", Some(2)) + + // replace attribute, extract replaced attribute + check(_.withField("b", lit(3)), "b", Some(3)) + check(_.withField("b", lit(null).cast(IntegerType)), "b", None) + check(_.withField("a", lit(3)), "a", Some(3)) + check(_.withField("a", lit(null).cast(IntegerType)), "a", None) + + // drop attribute, extract an attribute from the original struct + check(_.dropFields("b"), "a", Some(1)) + check(_.dropFields("a"), "b", Some(2)) + + // drop attribute, add attribute, extract an attribute from the original struct + check(_.dropFields("b").withField("c", lit(3)), "a", Some(1)) + check(_.dropFields("a").withField("c", lit(3)), "b", Some(2)) + + // drop attribute, add another attribute, extract added attribute + check(_.dropFields("a").withField("c", lit(3)), "c", Some(3)) + check(_.dropFields("b").withField("c", lit(3)), "c", Some(3)) + + // add attribute, drop attribute, extract an attribute from the original struct + check(_.withField("c", lit(3)).dropFields("a"), "b", Some(2)) + check(_.withField("c", lit(3)).dropFields("b"), "a", Some(1)) + + // add attribute, drop another attribute, extract added attribute + check(_.withField("c", lit(3)).dropFields("a"), "c", Some(3)) + check(_.withField("c", lit(3)).dropFields("b"), "c", Some(3)) + + // replace attribute, drop same attribute, extract an attribute from the original struct + check(_.withField("b", lit(3)).dropFields("b"), "a", Some(1)) + check(_.withField("a", lit(3)).dropFields("a"), "b", Some(2)) + + // add attribute, drop same attribute, extract an attribute from the original struct + check(_.withField("c", lit(3)).dropFields("c"), "a", Some(1)) + check(_.withField("c", lit(3)).dropFields("c"), "b", Some(2)) + + // add attribute, drop another attribute, extract added attribute + check(_.withField("b", lit(3)).dropFields("a"), "b", Some(3)) + check(_.withField("a", lit(3)).dropFields("b"), "a", Some(3)) + check(_.withField("b", lit(null).cast(IntegerType)).dropFields("a"), "b", None) + check(_.withField("a", lit(null).cast(IntegerType)).dropFields("b"), "a", None) + + // drop attribute, add same attribute, extract added attribute + check(_.dropFields("b").withField("b", lit(3)), "b", Some(3)) + check(_.dropFields("a").withField("a", lit(3)), "a", Some(3)) + check(_.dropFields("b").withField("b", lit(null).cast(IntegerType)), "b", None) + check(_.dropFields("a").withField("a", lit(null).cast(IntegerType)), "a", None) + check(_.dropFields("c").withField("c", lit(3)), "c", Some(3)) + + // add attribute, drop same attribute, add same attribute again, extract added attribute + check(_.withField("c", lit(3)).dropFields("c").withField("c", lit(4)), "c", Some(4)) + } + + test("should move field up one level of nesting") { + // move a field up one level + checkAnswer( + nullableStructLevel2.select( + col("a").withField("c", col("a.a.c")).dropFields("a.c").as("res")), + Row(null) :: Row(Row(null, null)) :: Row(Row(Row(1, null), 3)) :: Nil, + StructType(Seq( + StructField("res", StructType(Seq( + StructField("a", StructType(Seq( + StructField("a", IntegerType, nullable = false), + StructField("b", IntegerType, nullable = true))), + nullable = true), + StructField("c", IntegerType, nullable = true))), + nullable = true)))) + + // move a field up one level and then extract it + checkAnswer( + nullableStructLevel2.select( + col("a").withField("c", col("a.a.c")).dropFields("a.c").getField("c").as("res")), + Row(null) :: Row(null) :: Row(3) :: Nil, + StructType(Seq(StructField("res", IntegerType, nullable = true)))) + } + + test("should be able to refer to newly added nested column") { + intercept[AnalysisException] { + structLevel1.select($"a".withField("d", lit(4)).withField("e", $"a.d" + 1).as("a")) + }.getMessage should include("No such struct field d in a, b, c") + + checkAnswer( + structLevel1 + .select($"a".withField("d", lit(4)).as("a")) + .select($"a".withField("e", $"a.d" + 1).as("a")), + Row(Row(1, null, 3, 4, 5)) :: Nil, + StructType(Seq( + StructField("a", StructType(Seq( + StructField("a", IntegerType, nullable = false), + StructField("b", IntegerType, nullable = true), + StructField("c", IntegerType, nullable = false), + StructField("d", IntegerType, nullable = false), + StructField("e", IntegerType, nullable = false))), + nullable = false)))) + } + + test("should be able to drop newly added nested column") { + Seq( + structLevel1.select($"a".withField("d", lit(4)).dropFields("d").as("a")), + structLevel1 + .select($"a".withField("d", lit(4)).as("a")) + .select($"a".dropFields("d").as("a")) + ).foreach { query => + checkAnswer( + query, + Row(Row(1, null, 3)) :: Nil, + StructType(Seq( + StructField("a", structType, nullable = false)))) + } + } + + test("should still be able to refer to dropped column within the same select statement") { + // we can still access the nested column even after dropping it within the same select statement + checkAnswer( + structLevel1.select($"a".dropFields("c").withField("z", $"a.c").as("a")), + Row(Row(1, null, 3)) :: Nil, + StructType(Seq( + StructField("a", StructType(Seq( + StructField("a", IntegerType, nullable = false), + StructField("b", IntegerType, nullable = true), + StructField("z", IntegerType, nullable = false))), + nullable = false)))) + + // we can't access the nested column in subsequent select statement after dropping it in a + // previous select statement + intercept[AnalysisException]{ + structLevel1 + .select($"a".dropFields("c").as("a")) + .select($"a".withField("z", $"a.c")).as("a") + }.getMessage should include("No such struct field c in a, b") + } + + test("nestedDf should generate nested DataFrames") { + checkAnswer( + emptyNestedDf(1, 1, nullable = false), + Seq.empty[Row], + StructType(Seq(StructField("nested0Col0", StructType(Seq( + StructField("nested1Col0", IntegerType, nullable = false))), + nullable = false)))) + + checkAnswer( + emptyNestedDf(1, 2, nullable = false), + Seq.empty[Row], + StructType(Seq(StructField("nested0Col0", StructType(Seq( + StructField("nested1Col0", IntegerType, nullable = false), + StructField("nested1Col1", IntegerType, nullable = false))), + nullable = false)))) + + checkAnswer( + emptyNestedDf(2, 1, nullable = false), + Seq.empty[Row], + StructType(Seq(StructField("nested0Col0", StructType(Seq( + StructField("nested1Col0", StructType(Seq( + StructField("nested2Col0", IntegerType, nullable = false))), + nullable = false))), + nullable = false)))) + + checkAnswer( + emptyNestedDf(2, 2, nullable = false), + Seq.empty[Row], + StructType(Seq(StructField("nested0Col0", StructType(Seq( + StructField("nested1Col0", StructType(Seq( + StructField("nested2Col0", IntegerType, nullable = false), + StructField("nested2Col1", IntegerType, nullable = false))), + nullable = false), + StructField("nested1Col1", IntegerType, nullable = false))), + nullable = false)))) + + checkAnswer( + emptyNestedDf(2, 2, nullable = true), + Seq.empty[Row], + StructType(Seq(StructField("nested0Col0", StructType(Seq( + StructField("nested1Col0", StructType(Seq( + StructField("nested2Col0", IntegerType, nullable = false), + StructField("nested2Col1", IntegerType, nullable = false))), + nullable = true), + StructField("nested1Col1", IntegerType, nullable = false))), + nullable = true)))) + } + + Seq(Performant, NonPerformant).foreach { method => + Seq(false, true).foreach { nullable => + test(s"should add and drop 1 column at each depth of nesting using ${method.name} method, " + + s"nullable = $nullable") { + val maxDepth = 3 + + // dataframe with nested*Col0 to nested*Col2 at each depth + val inputDf = emptyNestedDf(maxDepth, 3, nullable) + + // add nested*Col3 and drop nested*Col2 + val modifiedColumn = method( + column = col(nestedColName(0, 0)), + numsToAdd = Seq(3), + numsToDrop = Seq(2), + maxDepth = maxDepth + ).as(nestedColName(0, 0)) + val resultDf = inputDf.select(modifiedColumn) + + // dataframe with nested*Col0, nested*Col1, nested*Col3 at each depth + val expectedDf = { + val colNums = Seq(0, 1, 3) + val nestedColumnDataType = nestedStructType(colNums, nullable, maxDepth) + + spark.createDataFrame( + spark.sparkContext.emptyRDD[Row], + StructType(Seq(StructField(nestedColName(0, 0), nestedColumnDataType, nullable)))) + } + + checkAnswer(resultDf, expectedDf.collect(), expectedDf.schema) + } + } + } + + test("assert_true") { + // assert_true(condition, errMsgCol) + val booleanDf = Seq((true), (false)).toDF("cond") + checkAnswer( + booleanDf.filter("cond = true").select(assert_true($"cond")), + Row(null) :: Nil + ) + val e1 = intercept[SparkException] { + booleanDf.select(assert_true($"cond", lit(null.asInstanceOf[String]))).collect() + } + assert(e1.getCause.isInstanceOf[RuntimeException]) + assert(e1.getCause.getMessage == null) + + val nullDf = Seq(("first row", None), ("second row", Some(true))).toDF("n", "cond") + checkAnswer( + nullDf.filter("cond = true").select(assert_true($"cond", $"cond")), + Row(null) :: Nil + ) + val e2 = intercept[SparkException] { + nullDf.select(assert_true($"cond", $"n")).collect() + } + assert(e2.getCause.isInstanceOf[RuntimeException]) + assert(e2.getCause.getMessage == "first row") + + // assert_true(condition) + val intDf = Seq((0, 1)).toDF("a", "b") + checkAnswer(intDf.select(assert_true($"a" < $"b")), Row(null) :: Nil) + val e3 = intercept[SparkException] { + intDf.select(assert_true($"a" > $"b")).collect() + } + assert(e3.getCause.isInstanceOf[RuntimeException]) + assert(e3.getCause.getMessage == "'('a > 'b)' is not true!") + } + + test("raise_error") { + val strDf = Seq(("hello")).toDF("a") + + val e1 = intercept[SparkException] { + strDf.select(raise_error(lit(null.asInstanceOf[String]))).collect() + } + assert(e1.getCause.isInstanceOf[RuntimeException]) + assert(e1.getCause.getMessage == null) + + val e2 = intercept[SparkException] { + strDf.select(raise_error($"a")).collect() + } + assert(e2.getCause.isInstanceOf[RuntimeException]) + assert(e2.getCause.getMessage == "hello") + } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/CsvFunctionsSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/CsvFunctionsSuite.scala index 800e294cca8c4..16b92d6d11c91 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/CsvFunctionsSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/CsvFunctionsSuite.scala @@ -80,16 +80,16 @@ class CsvFunctionsSuite extends QueryTest with SharedSparkSession { test("schema_of_csv - infers schemas") { checkAnswer( spark.range(1).select(schema_of_csv(lit("0.1,1"))), - Seq(Row("struct<_c0:double,_c1:int>"))) + Seq(Row("STRUCT<`_c0`: DOUBLE, `_c1`: INT>"))) checkAnswer( spark.range(1).select(schema_of_csv("0.1,1")), - Seq(Row("struct<_c0:double,_c1:int>"))) + Seq(Row("STRUCT<`_c0`: DOUBLE, `_c1`: INT>"))) } test("schema_of_csv - infers schemas using options") { val df = spark.range(1) .select(schema_of_csv(lit("0.1 1"), Map("sep" -> " ").asJava)) - checkAnswer(df, Seq(Row("struct<_c0:double,_c1:int>"))) + checkAnswer(df, Seq(Row("STRUCT<`_c0`: DOUBLE, `_c1`: INT>"))) } test("to_csv - struct") { @@ -236,7 +236,7 @@ class CsvFunctionsSuite extends QueryTest with SharedSparkSession { val input = concat_ws(",", lit(0.1), lit(1)) checkAnswer( spark.range(1).select(schema_of_csv(input)), - Seq(Row("struct<_c0:double,_c1:int>"))) + Seq(Row("STRUCT<`_c0`: DOUBLE, `_c1`: INT>"))) } test("optional datetime parser does not affect csv time formatting") { @@ -250,4 +250,52 @@ class CsvFunctionsSuite extends QueryTest with SharedSparkSession { | """.stripMargin) checkAnswer(toDF("yyyy-MM-dd'T'HH:mm:ss.SSSXXX"), toDF("yyyy-MM-dd'T'HH:mm:ss[.SSS][XXX]")) } + + test("SPARK-32968: Pruning csv field should not change result") { + Seq("true", "false").foreach { enabled => + withSQLConf(SQLConf.CSV_EXPRESSION_OPTIMIZATION.key -> enabled) { + val df1 = sparkContext.parallelize(Seq("a,b")).toDF("csv") + .selectExpr("from_csv(csv, 'a string, b string', map('mode', 'failfast')) as parsed") + checkAnswer(df1.selectExpr("parsed.a"), Seq(Row("a"))) + checkAnswer(df1.selectExpr("parsed.b"), Seq(Row("b"))) + + val df2 = sparkContext.parallelize(Seq("a,b")).toDF("csv") + .selectExpr("from_csv(csv, 'a string, b string') as parsed") + checkAnswer(df2.selectExpr("parsed.a"), Seq(Row("a"))) + checkAnswer(df2.selectExpr("parsed.b"), Seq(Row("b"))) + } + } + } + + test("SPARK-32968: bad csv input with csv pruning optimization") { + Seq("true", "false").foreach { enabled => + withSQLConf(SQLConf.CSV_EXPRESSION_OPTIMIZATION.key -> enabled) { + val df = sparkContext.parallelize(Seq("1,\u0001\u0000\u0001234")).toDF("csv") + .selectExpr("from_csv(csv, 'a int, b int', map('mode', 'failfast')) as parsed") + + val err1 = intercept[SparkException] { + df.selectExpr("parsed.a").collect + } + + val err2 = intercept[SparkException] { + df.selectExpr("parsed.b").collect + } + + assert(err1.getMessage.contains("Malformed records are detected in record parsing")) + assert(err2.getMessage.contains("Malformed records are detected in record parsing")) + } + } + } + + test("SPARK-32968: csv pruning optimization with corrupt record field") { + Seq("true", "false").foreach { enabled => + withSQLConf(SQLConf.CSV_EXPRESSION_OPTIMIZATION.key -> enabled) { + val df = sparkContext.parallelize(Seq("a,b,c,d")).toDF("csv") + .selectExpr("from_csv(csv, 'a string, b string, _corrupt_record string') as parsed") + .selectExpr("parsed._corrupt_record") + + checkAnswer(df, Seq(Row("a,b,c,d"))) + } + } + } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameAggregateSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameAggregateSuite.scala index e954e2bf1c46d..6603fc0bedca7 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameAggregateSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameAggregateSuite.scala @@ -456,25 +456,51 @@ class DataFrameAggregateSuite extends QueryTest } test("zero moments") { - val input = Seq((1, 2)).toDF("a", "b") - checkAnswer( - input.agg(stddev($"a"), stddev_samp($"a"), stddev_pop($"a"), variance($"a"), - var_samp($"a"), var_pop($"a"), skewness($"a"), kurtosis($"a")), - Row(Double.NaN, Double.NaN, 0.0, Double.NaN, Double.NaN, 0.0, - Double.NaN, Double.NaN)) + withSQLConf(SQLConf.LEGACY_STATISTICAL_AGGREGATE.key -> "true") { + val input = Seq((1, 2)).toDF("a", "b") + checkAnswer( + input.agg(stddev($"a"), stddev_samp($"a"), stddev_pop($"a"), variance($"a"), + var_samp($"a"), var_pop($"a"), skewness($"a"), kurtosis($"a")), + Row(Double.NaN, Double.NaN, 0.0, Double.NaN, Double.NaN, 0.0, + Double.NaN, Double.NaN)) - checkAnswer( - input.agg( - expr("stddev(a)"), - expr("stddev_samp(a)"), - expr("stddev_pop(a)"), - expr("variance(a)"), - expr("var_samp(a)"), - expr("var_pop(a)"), - expr("skewness(a)"), - expr("kurtosis(a)")), - Row(Double.NaN, Double.NaN, 0.0, Double.NaN, Double.NaN, 0.0, - Double.NaN, Double.NaN)) + checkAnswer( + input.agg( + expr("stddev(a)"), + expr("stddev_samp(a)"), + expr("stddev_pop(a)"), + expr("variance(a)"), + expr("var_samp(a)"), + expr("var_pop(a)"), + expr("skewness(a)"), + expr("kurtosis(a)")), + Row(Double.NaN, Double.NaN, 0.0, Double.NaN, Double.NaN, 0.0, + Double.NaN, Double.NaN)) + } + } + + test("SPARK-13860: zero moments LEGACY_STATISTICAL_AGGREGATE off") { + withSQLConf(SQLConf.LEGACY_STATISTICAL_AGGREGATE.key -> "false") { + val input = Seq((1, 2)).toDF("a", "b") + checkAnswer( + input.agg(stddev($"a"), stddev_samp($"a"), stddev_pop($"a"), variance($"a"), + var_samp($"a"), var_pop($"a"), skewness($"a"), kurtosis($"a")), + Row(null, null, 0.0, null, null, 0.0, + null, null)) + + checkAnswer( + input.agg( + expr("stddev(a)"), + expr("stddev_samp(a)"), + expr("stddev_pop(a)"), + expr("variance(a)"), + expr("var_samp(a)"), + expr("var_pop(a)"), + expr("skewness(a)"), + expr("kurtosis(a)")), + Row(null, null, 0.0, null, null, 0.0, + null, null)) + } } test("null moments") { @@ -975,7 +1001,8 @@ class DataFrameAggregateSuite extends QueryTest Seq(true, false).foreach { value => test(s"SPARK-31620: agg with subquery (whole-stage-codegen = $value)") { - withSQLConf(SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key -> value.toString) { + withSQLConf( + SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key -> value.toString) { withTempView("t1", "t2") { sql("create temporary view t1 as select * from values (1, 2) as t1(a, b)") sql("create temporary view t2 as select * from values (3, 4) as t2(c, d)") @@ -998,14 +1025,13 @@ class DataFrameAggregateSuite extends QueryTest // test SortAggregateExec var df = sql("select max(if(c > (select a from t1), 'str1', 'str2')) as csum from t2") - assert(df.queryExecution.executedPlan - .find { case _: SortAggregateExec => true }.isDefined) + assert(find(df.queryExecution.executedPlan)(_.isInstanceOf[SortAggregateExec]).isDefined) checkAnswer(df, Row("str1") :: Nil) // test ObjectHashAggregateExec df = sql("select collect_list(d), sum(if(c > (select a from t1), d, 0)) as csum from t2") - assert(df.queryExecution.executedPlan - .find { case _: ObjectHashAggregateExec => true }.isDefined) + assert( + find(df.queryExecution.executedPlan)(_.isInstanceOf[ObjectHashAggregateExec]).isDefined) checkAnswer(df, Row(Array(4), 4) :: Nil) } } @@ -1043,6 +1069,14 @@ class DataFrameAggregateSuite extends QueryTest checkAnswer(sql(queryTemplate("FIRST")), Row(1)) checkAnswer(sql(queryTemplate("LAST")), Row(3)) } + + test("SPARK-32906: struct field names should not change after normalizing floats") { + val df = Seq(Tuple1(Tuple2(-0.0d, Double.NaN)), Tuple1(Tuple2(0.0d, Double.NaN))).toDF("k") + val aggs = df.distinct().queryExecution.sparkPlan.collect { case a: HashAggregateExec => a } + assert(aggs.length == 2) + assert(aggs.head.output.map(_.dataType.simpleString).head === + aggs.last.output.map(_.dataType.simpleString).head) + } } case class B(c: Option[Double]) diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameJoinSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameJoinSuite.scala index b463a76a74026..ad13d7dcc2a71 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameJoinSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameJoinSuite.scala @@ -122,6 +122,16 @@ class DataFrameJoinSuite extends QueryTest df2.crossJoin(df1), Row(2, "2", 1, "1") :: Row(2, "2", 3, "3") :: Row(4, "4", 1, "1") :: Row(4, "4", 3, "3") :: Nil) + + checkAnswer( + df1.join(df2, Nil, "cross"), + Row(1, "1", 2, "2") :: Row(1, "1", 4, "4") :: + Row(3, "3", 2, "2") :: Row(3, "3", 4, "4") :: Nil) + + checkAnswer( + df2.join(df1, Nil, "cross"), + Row(2, "2", 1, "1") :: Row(2, "2", 3, "3") :: + Row(4, "4", 1, "1") :: Row(4, "4", 3, "3") :: Nil) } test("broadcast join hint using broadcast function") { @@ -341,14 +351,14 @@ class DataFrameJoinSuite extends QueryTest def checkIfHintApplied(df: DataFrame): Unit = { val sparkPlan = df.queryExecution.executedPlan - val broadcastHashJoins = sparkPlan.collect { case p: BroadcastHashJoinExec => p } + val broadcastHashJoins = collect(sparkPlan) { case p: BroadcastHashJoinExec => p } assert(broadcastHashJoins.size == 1) val broadcastExchanges = broadcastHashJoins.head.collect { case p: BroadcastExchangeExec => p } assert(broadcastExchanges.size == 1) val tables = broadcastExchanges.head.collect { - case FileSourceScanExec(_, _, _, _, _, _, _, Some(tableIdent)) => tableIdent + case FileSourceScanExec(_, _, _, _, _, _, _, Some(tableIdent), _) => tableIdent } assert(tables.size == 1) assert(tables.head === TableIdentifier(table1Name, Some(dbName))) @@ -356,7 +366,7 @@ class DataFrameJoinSuite extends QueryTest def checkIfHintNotApplied(df: DataFrame): Unit = { val sparkPlan = df.queryExecution.executedPlan - val broadcastHashJoins = sparkPlan.collect { case p: BroadcastHashJoinExec => p } + val broadcastHashJoins = collect(sparkPlan) { case p: BroadcastHashJoinExec => p } assert(broadcastHashJoins.isEmpty) } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSelfJoinSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSelfJoinSuite.scala index 3b3b54f75da57..50846d9d12b97 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSelfJoinSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSelfJoinSuite.scala @@ -21,6 +21,7 @@ import org.apache.spark.sql.expressions.Window import org.apache.spark.sql.functions.{count, sum} import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.test.SharedSparkSession +import org.apache.spark.sql.test.SQLTestData.TestData class DataFrameSelfJoinSuite extends QueryTest with SharedSparkSession { import testImplicits._ @@ -219,4 +220,32 @@ class DataFrameSelfJoinSuite extends QueryTest with SharedSparkSession { Seq((1, 2), (1, 2), (2, 4), (2, 4)).map(Row.fromTuple)) } } + + test("SPARK-33071/SPARK-33536: Avoid changing dataset_id of LogicalPlan in join() " + + "to not break DetectAmbiguousSelfJoin") { + val emp1 = Seq[TestData]( + TestData(1, "sales"), + TestData(2, "personnel"), + TestData(3, "develop"), + TestData(4, "IT")).toDS() + val emp2 = Seq[TestData]( + TestData(1, "sales"), + TestData(2, "personnel"), + TestData(3, "develop")).toDS() + val emp3 = emp1.join(emp2, emp1("key") === emp2("key")).select(emp1("*")) + assertAmbiguousSelfJoin(emp1.join(emp3, emp1.col("key") === emp3.col("key"), + "left_outer").select(emp1.col("*"), emp3.col("key").as("e2"))) + } + + test("df.show() should also not change dataset_id of LogicalPlan") { + val df = Seq[TestData]( + TestData(1, "sales"), + TestData(2, "personnel"), + TestData(3, "develop"), + TestData(4, "IT")).toDF() + val ds_id1 = df.logicalPlan.getTagValue(Dataset.DATASET_ID_TAG) + df.show(0) + val ds_id2 = df.logicalPlan.getTagValue(Dataset.DATASET_ID_TAG) + assert(ds_id1 === ds_id2) + } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSetOperationsSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSetOperationsSuite.scala index e72b8ce860b28..5f28dc60962ba 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSetOperationsSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSetOperationsSuite.scala @@ -536,4 +536,185 @@ class DataFrameSetOperationsSuite extends QueryTest with SharedSparkSession { assert(union2.schema.fieldNames === Array("a", "B", "C", "c")) } } + + test("SPARK-32376: Make unionByName null-filling behavior work with struct columns - simple") { + val df1 = Seq(((1, 2, 3), 0), ((2, 3, 4), 1), ((3, 4, 5), 2)).toDF("a", "idx") + val df2 = Seq(((3, 4), 0), ((1, 2), 1), ((2, 3), 2)).toDF("a", "idx") + val df3 = Seq(((100, 101, 102, 103), 0), ((110, 111, 112, 113), 1), ((120, 121, 122, 123), 2)) + .toDF("a", "idx") + + var unionDf = df1.unionByName(df2, true) + + checkAnswer(unionDf, + Row(Row(1, 2, 3), 0) :: Row(Row(2, 3, 4), 1) :: Row(Row(3, 4, 5), 2) :: + Row(Row(3, 4, null), 0) :: Row(Row(1, 2, null), 1) :: Row(Row(2, 3, null), 2) :: Nil + ) + + assert(unionDf.schema.toDDL == "`a` STRUCT<`_1`: INT, `_2`: INT, `_3`: INT>,`idx` INT") + + unionDf = df1.unionByName(df2, true).unionByName(df3, true) + + checkAnswer(unionDf, + Row(Row(1, 2, 3, null), 0) :: + Row(Row(2, 3, 4, null), 1) :: + Row(Row(3, 4, 5, null), 2) :: // df1 + Row(Row(3, 4, null, null), 0) :: + Row(Row(1, 2, null, null), 1) :: + Row(Row(2, 3, null, null), 2) :: // df2 + Row(Row(100, 101, 102, 103), 0) :: + Row(Row(110, 111, 112, 113), 1) :: + Row(Row(120, 121, 122, 123), 2) :: Nil // df3 + ) + assert(unionDf.schema.toDDL == + "`a` STRUCT<`_1`: INT, `_2`: INT, `_3`: INT, `_4`: INT>,`idx` INT") + } + + test("SPARK-32376: Make unionByName null-filling behavior work with struct columns - nested") { + val df1 = Seq((0, UnionClass1a(0, 1L, UnionClass2(1, "2")))).toDF("id", "a") + val df2 = Seq((1, UnionClass1b(1, 2L, UnionClass3(2, 3L)))).toDF("id", "a") + + val expectedSchema = "`id` INT,`a` STRUCT<`a`: INT, `b`: BIGINT, " + + "`nested`: STRUCT<`a`: INT, `b`: BIGINT, `c`: STRING>>" + + var unionDf = df1.unionByName(df2, true) + checkAnswer(unionDf, + Row(0, Row(0, 1, Row(1, null, "2"))) :: + Row(1, Row(1, 2, Row(2, 3L, null))) :: Nil) + assert(unionDf.schema.toDDL == expectedSchema) + + unionDf = df2.unionByName(df1, true) + checkAnswer(unionDf, + Row(1, Row(1, 2, Row(2, 3L, null))) :: + Row(0, Row(0, 1, Row(1, null, "2"))) :: Nil) + assert(unionDf.schema.toDDL == expectedSchema) + + val df3 = Seq((2, UnionClass1b(2, 3L, null))).toDF("id", "a") + unionDf = df1.unionByName(df3, true) + checkAnswer(unionDf, + Row(0, Row(0, 1, Row(1, null, "2"))) :: + Row(2, Row(2, 3, null)) :: Nil) + assert(unionDf.schema.toDDL == expectedSchema) + } + + test("SPARK-32376: Make unionByName null-filling behavior work with struct columns" + + " - case-sensitive cases") { + withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") { + val df1 = Seq((0, UnionClass1a(0, 1L, UnionClass2(1, "2")))).toDF("id", "a") + val df2 = Seq((1, UnionClass1c(1, 2L, UnionClass4(2, 3L)))).toDF("id", "a") + + var unionDf = df1.unionByName(df2, true) + checkAnswer(unionDf, + Row(0, Row(0, 1, Row(null, 1, null, "2"))) :: + Row(1, Row(1, 2, Row(2, null, 3L, null))) :: Nil) + assert(unionDf.schema.toDDL == + "`id` INT,`a` STRUCT<`a`: INT, `b`: BIGINT, " + + "`nested`: STRUCT<`A`: INT, `a`: INT, `b`: BIGINT, `c`: STRING>>") + + unionDf = df2.unionByName(df1, true) + checkAnswer(unionDf, + Row(1, Row(1, 2, Row(2, null, 3L, null))) :: + Row(0, Row(0, 1, Row(null, 1, null, "2"))) :: Nil) + assert(unionDf.schema.toDDL == + "`id` INT,`a` STRUCT<`a`: INT, `b`: BIGINT, " + + "`nested`: STRUCT<`A`: INT, `a`: INT, `b`: BIGINT, `c`: STRING>>") + + val df3 = Seq((2, UnionClass1b(2, 3L, UnionClass3(4, 5L)))).toDF("id", "a") + unionDf = df2.unionByName(df3, true) + checkAnswer(unionDf, + Row(1, Row(1, 2, Row(2, null, 3L))) :: + Row(2, Row(2, 3, Row(null, 4, 5L))) :: Nil) + assert(unionDf.schema.toDDL == + "`id` INT,`a` STRUCT<`a`: INT, `b`: BIGINT, " + + "`nested`: STRUCT<`A`: INT, `a`: INT, `b`: BIGINT>>") + } + } + + test("SPARK-32376: Make unionByName null-filling behavior work with struct columns - edge case") { + val nestedStructType1 = StructType(Seq( + StructField("b", StringType))) + val nestedStructValues1 = Row("b") + + val nestedStructType2 = StructType(Seq( + StructField("b", StringType), + StructField("a", StringType))) + val nestedStructValues2 = Row("b", "a") + + val df1: DataFrame = spark.createDataFrame( + sparkContext.parallelize(Row(nestedStructValues1) :: Nil), + StructType(Seq(StructField("topLevelCol", nestedStructType1)))) + + val df2: DataFrame = spark.createDataFrame( + sparkContext.parallelize(Row(nestedStructValues2) :: Nil), + StructType(Seq(StructField("topLevelCol", nestedStructType2)))) + + val union = df1.unionByName(df2, allowMissingColumns = true) + checkAnswer(union, Row(Row(null, "b")) :: Row(Row("a", "b")) :: Nil) + assert(union.schema.toDDL == "`topLevelCol` STRUCT<`a`: STRING, `b`: STRING>") + } + + test("SPARK-32376: Make unionByName null-filling behavior work with struct columns - deep expr") { + def nestedDf(depth: Int, numColsAtEachDepth: Int): DataFrame = { + val initialNestedStructType = StructType( + (0 to numColsAtEachDepth).map(i => + StructField(s"nested${depth}Col$i", IntegerType, nullable = false)) + ) + val initialNestedValues = Row(0 to numColsAtEachDepth: _*) + + var depthCounter = depth - 1 + var structType = initialNestedStructType + var struct = initialNestedValues + while (depthCounter != 0) { + struct = Row((struct +: (1 to numColsAtEachDepth)): _*) + structType = StructType( + StructField(s"nested${depthCounter}Col0", structType, nullable = false) +: + (1 to numColsAtEachDepth).map(i => + StructField(s"nested${depthCounter}Col$i", IntegerType, nullable = false)) + ) + depthCounter -= 1 + } + + val df: DataFrame = spark.createDataFrame( + sparkContext.parallelize(Row(struct) :: Nil), + StructType(Seq(StructField("nested0Col0", structType)))) + + df + } + + val df1 = nestedDf(depth = 10, numColsAtEachDepth = 1) + val df2 = nestedDf(depth = 10, numColsAtEachDepth = 20) + val union = df1.unionByName(df2, allowMissingColumns = true) + // scalastyle:off + val row1 = Row(Row(Row(Row(Row(Row(Row(Row(Row(Row( + Row(0, 1, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null), + 1, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null), + 1, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null), + 1, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null), + 1, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null), + 1, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null), + 1, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null), + 1, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null), + 1, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null), + 1, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null)) + val row2 = Row(Row(Row(Row(Row(Row(Row(Row(Row(Row( + Row(0, 1, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 2, 20, 3, 4, 5, 6, 7, 8, 9), + 1, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 2, 20, 3, 4, 5, 6, 7, 8, 9), + 1, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 2, 20, 3, 4, 5, 6, 7, 8, 9), + 1, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 2, 20, 3, 4, 5, 6, 7, 8, 9), + 1, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 2, 20, 3, 4, 5, 6, 7, 8, 9), + 1, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 2, 20, 3, 4, 5, 6, 7, 8, 9), + 1, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 2, 20, 3, 4, 5, 6, 7, 8, 9), + 1, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 2, 20, 3, 4, 5, 6, 7, 8, 9), + 1, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 2, 20, 3, 4, 5, 6, 7, 8, 9), + 1, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 2, 20, 3, 4, 5, 6, 7, 8, 9)) + // scalastyle:on + checkAnswer(union, row1 :: row2 :: Nil) + } } + +case class UnionClass1a(a: Int, b: Long, nested: UnionClass2) +case class UnionClass1b(a: Int, b: Long, nested: UnionClass3) +case class UnionClass1c(a: Int, b: Long, nested: UnionClass4) + +case class UnionClass2(a: Int, c: String) +case class UnionClass3(a: Int, b: Long) +case class UnionClass4(A: Int, b: Long) diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala index d95f09a4cc839..d777cd45b61ee 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala @@ -26,7 +26,6 @@ import java.util.concurrent.atomic.AtomicLong import scala.reflect.runtime.universe.TypeTag import scala.util.Random -import org.scalatest.matchers.must.Matchers import org.scalatest.matchers.should.Matchers._ import org.apache.spark.SparkException @@ -37,6 +36,7 @@ import org.apache.spark.sql.catalyst.expressions.Uuid import org.apache.spark.sql.catalyst.optimizer.ConvertToLocalRelation import org.apache.spark.sql.catalyst.plans.logical.{LocalRelation, OneRowRelation} import org.apache.spark.sql.catalyst.util.DateTimeUtils +import org.apache.spark.sql.connector.FakeV2Provider import org.apache.spark.sql.execution.{FilterExec, QueryExecution, WholeStageCodegenExec} import org.apache.spark.sql.execution.adaptive.AdaptiveSparkPlanHelper import org.apache.spark.sql.execution.aggregate.HashAggregateExec @@ -805,7 +805,7 @@ class DataFrameSuite extends QueryTest assert(df2.drop("`a.b`").columns.size == 2) } - test("drop(name: String) search and drop all top level columns that matchs the name") { + test("drop(name: String) search and drop all top level columns that matches the name") { val df1 = Seq((1, 2)).toDF("a", "b") val df2 = Seq((3, 4)).toDF("a", "b") checkAnswer(df1.crossJoin(df2), Row(1, 2, 3, 4)) @@ -1235,6 +1235,44 @@ class DataFrameSuite extends QueryTest assert(df.showString(10, vertical = true) === expectedAnswer) } + test("SPARK-33690: showString: escape meta-characters") { + val df1 = Seq("aaa\nbbb\tccc").toDF("value") + assert(df1.showString(1, truncate = 0) === + """+-------------+ + ||value | + |+-------------+ + ||aaa\nbbb\tccc| + |+-------------+ + |""".stripMargin) + + val df2 = Seq(Seq("aaa\nbbb\tccc")).toDF("value") + assert(df2.showString(1, truncate = 0) === + """+---------------+ + ||value | + |+---------------+ + ||[aaa\nbbb\tccc]| + |+---------------+ + |""".stripMargin) + + val df3 = Seq(Map("aaa\nbbb\tccc" -> "aaa\nbbb\tccc")).toDF("value") + assert(df3.showString(1, truncate = 0) === + """+--------------------------------+ + ||value | + |+--------------------------------+ + ||{aaa\nbbb\tccc -> aaa\nbbb\tccc}| + |+--------------------------------+ + |""".stripMargin) + + val df4 = Seq("aaa\nbbb\tccc").toDF("value").selectExpr("named_struct('v', value)") + assert(df4.showString(1, truncate = 0) === + """+----------------------+ + ||named_struct(v, value)| + |+----------------------+ + ||{aaa\nbbb\tccc} | + |+----------------------+ + |""".stripMargin) + } + test("SPARK-7319 showString") { val expectedAnswer = """+---+-----+ ||key|value| @@ -2452,6 +2490,14 @@ class DataFrameSuite extends QueryTest assert(e.getMessage.contains("Table or view not found:")) } + test("SPARK-32680: Don't analyze CTAS with unresolved query") { + val v2Source = classOf[FakeV2Provider].getName + val e = intercept[AnalysisException] { + sql(s"CREATE TABLE t USING $v2Source AS SELECT * from nonexist") + } + assert(e.getMessage.contains("Table or view not found:")) + } + test("CalendarInterval reflection support") { val df = Seq((1, new CalendarInterval(1, 2, 3))).toDF("a", "b") checkAnswer(df.selectExpr("b"), Row(new CalendarInterval(1, 2, 3))) @@ -2555,6 +2601,18 @@ class DataFrameSuite extends QueryTest val df = Seq(0.0 -> -0.0).toDF("pos", "neg") checkAnswer(df.select($"pos" > $"neg"), Row(false)) } + + test("SPARK-32635: Replace references with foldables coming only from the node's children") { + val a = Seq("1").toDF("col1").withColumn("col2", lit("1")) + val b = Seq("2").toDF("col1").withColumn("col2", lit("2")) + val aub = a.union(b) + val c = aub.filter($"col1" === "2").cache() + val d = Seq("2").toDF("col4") + val r = d.join(aub, $"col2" === $"col4").select("col4") + val l = c.select("col2") + val df = l.join(r, $"col2" === $"col4", "LeftOuter") + checkAnswer(df, Row("2", "2")) + } } case class GroupByKey(a: Int, b: Int) diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameTimeWindowingSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameTimeWindowingSuite.scala index 8b0f46b9d1ddb..4fdaeb57ad50e 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameTimeWindowingSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameTimeWindowingSuite.scala @@ -17,8 +17,6 @@ package org.apache.spark.sql -import org.scalatest.BeforeAndAfterEach - import org.apache.spark.sql.catalyst.plans.logical.Expand import org.apache.spark.sql.functions._ import org.apache.spark.sql.test.SharedSparkSession diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameWindowFramesSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameWindowFramesSuite.scala index 8c998290b5044..fd408c37ef6cd 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameWindowFramesSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameWindowFramesSuite.scala @@ -17,8 +17,6 @@ package org.apache.spark.sql -import java.sql.Date - import org.apache.spark.sql.expressions.Window import org.apache.spark.sql.functions._ import org.apache.spark.sql.test.SharedSparkSession diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameWindowFunctionsSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameWindowFunctionsSuite.scala index bc6adfb857b02..3568ad3a7343d 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameWindowFunctionsSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameWindowFunctionsSuite.scala @@ -94,89 +94,187 @@ class DataFrameWindowFunctionsSuite extends QueryTest } test("corr, covar_pop, stddev_pop functions in specific window") { - val df = Seq( - ("a", "p1", 10.0, 20.0), - ("b", "p1", 20.0, 10.0), - ("c", "p2", 20.0, 20.0), - ("d", "p2", 20.0, 20.0), - ("e", "p3", 0.0, 0.0), - ("f", "p3", 6.0, 12.0), - ("g", "p3", 6.0, 12.0), - ("h", "p3", 8.0, 16.0), - ("i", "p4", 5.0, 5.0)).toDF("key", "partitionId", "value1", "value2") - checkAnswer( - df.select( - $"key", - corr("value1", "value2").over(Window.partitionBy("partitionId") - .orderBy("key").rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)), - covar_pop("value1", "value2") - .over(Window.partitionBy("partitionId") + withSQLConf(SQLConf.LEGACY_STATISTICAL_AGGREGATE.key -> "true") { + val df = Seq( + ("a", "p1", 10.0, 20.0), + ("b", "p1", 20.0, 10.0), + ("c", "p2", 20.0, 20.0), + ("d", "p2", 20.0, 20.0), + ("e", "p3", 0.0, 0.0), + ("f", "p3", 6.0, 12.0), + ("g", "p3", 6.0, 12.0), + ("h", "p3", 8.0, 16.0), + ("i", "p4", 5.0, 5.0)).toDF("key", "partitionId", "value1", "value2") + checkAnswer( + df.select( + $"key", + corr("value1", "value2").over(Window.partitionBy("partitionId") + .orderBy("key").rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)), + covar_pop("value1", "value2") + .over(Window.partitionBy("partitionId") + .orderBy("key").rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)), + var_pop("value1") + .over(Window.partitionBy("partitionId") + .orderBy("key").rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)), + stddev_pop("value1") + .over(Window.partitionBy("partitionId") + .orderBy("key").rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)), + var_pop("value2") + .over(Window.partitionBy("partitionId") + .orderBy("key").rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)), + stddev_pop("value2") + .over(Window.partitionBy("partitionId") + .orderBy("key").rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing))), + + // As stddev_pop(expr) = sqrt(var_pop(expr)) + // the "stddev_pop" column can be calculated from the "var_pop" column. + // + // As corr(expr1, expr2) = covar_pop(expr1, expr2) / (stddev_pop(expr1) * stddev_pop(expr2)) + // the "corr" column can be calculated from the "covar_pop" and the two "stddev_pop" columns + Seq( + Row("a", -1.0, -25.0, 25.0, 5.0, 25.0, 5.0), + Row("b", -1.0, -25.0, 25.0, 5.0, 25.0, 5.0), + Row("c", null, 0.0, 0.0, 0.0, 0.0, 0.0), + Row("d", null, 0.0, 0.0, 0.0, 0.0, 0.0), + Row("e", 1.0, 18.0, 9.0, 3.0, 36.0, 6.0), + Row("f", 1.0, 18.0, 9.0, 3.0, 36.0, 6.0), + Row("g", 1.0, 18.0, 9.0, 3.0, 36.0, 6.0), + Row("h", 1.0, 18.0, 9.0, 3.0, 36.0, 6.0), + Row("i", Double.NaN, 0.0, 0.0, 0.0, 0.0, 0.0))) + } + } + + test("SPARK-13860: " + + "corr, covar_pop, stddev_pop functions in specific window " + + "LEGACY_STATISTICAL_AGGREGATE off") { + withSQLConf(SQLConf.LEGACY_STATISTICAL_AGGREGATE.key -> "false") { + val df = Seq( + ("a", "p1", 10.0, 20.0), + ("b", "p1", 20.0, 10.0), + ("c", "p2", 20.0, 20.0), + ("d", "p2", 20.0, 20.0), + ("e", "p3", 0.0, 0.0), + ("f", "p3", 6.0, 12.0), + ("g", "p3", 6.0, 12.0), + ("h", "p3", 8.0, 16.0), + ("i", "p4", 5.0, 5.0)).toDF("key", "partitionId", "value1", "value2") + checkAnswer( + df.select( + $"key", + corr("value1", "value2").over(Window.partitionBy("partitionId") .orderBy("key").rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)), - var_pop("value1") - .over(Window.partitionBy("partitionId") + covar_pop("value1", "value2") + .over(Window.partitionBy("partitionId") + .orderBy("key").rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)), + var_pop("value1") + .over(Window.partitionBy("partitionId") + .orderBy("key").rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)), + stddev_pop("value1") + .over(Window.partitionBy("partitionId") + .orderBy("key").rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)), + var_pop("value2") + .over(Window.partitionBy("partitionId") + .orderBy("key").rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)), + stddev_pop("value2") + .over(Window.partitionBy("partitionId") + .orderBy("key").rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing))), + + // As stddev_pop(expr) = sqrt(var_pop(expr)) + // the "stddev_pop" column can be calculated from the "var_pop" column. + // + // As corr(expr1, expr2) = covar_pop(expr1, expr2) / (stddev_pop(expr1) * stddev_pop(expr2)) + // the "corr" column can be calculated from the "covar_pop" and the two "stddev_pop" columns + Seq( + Row("a", -1.0, -25.0, 25.0, 5.0, 25.0, 5.0), + Row("b", -1.0, -25.0, 25.0, 5.0, 25.0, 5.0), + Row("c", null, 0.0, 0.0, 0.0, 0.0, 0.0), + Row("d", null, 0.0, 0.0, 0.0, 0.0, 0.0), + Row("e", 1.0, 18.0, 9.0, 3.0, 36.0, 6.0), + Row("f", 1.0, 18.0, 9.0, 3.0, 36.0, 6.0), + Row("g", 1.0, 18.0, 9.0, 3.0, 36.0, 6.0), + Row("h", 1.0, 18.0, 9.0, 3.0, 36.0, 6.0), + Row("i", null, 0.0, 0.0, 0.0, 0.0, 0.0))) + } + } + + test("covar_samp, var_samp (variance), stddev_samp (stddev) functions in specific window") { + withSQLConf(SQLConf.LEGACY_STATISTICAL_AGGREGATE.key -> "true") { + val df = Seq( + ("a", "p1", 10.0, 20.0), + ("b", "p1", 20.0, 10.0), + ("c", "p2", 20.0, 20.0), + ("d", "p2", 20.0, 20.0), + ("e", "p3", 0.0, 0.0), + ("f", "p3", 6.0, 12.0), + ("g", "p3", 6.0, 12.0), + ("h", "p3", 8.0, 16.0), + ("i", "p4", 5.0, 5.0)).toDF("key", "partitionId", "value1", "value2") + checkAnswer( + df.select( + $"key", + covar_samp("value1", "value2").over(Window.partitionBy("partitionId") .orderBy("key").rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)), - stddev_pop("value1") - .over(Window.partitionBy("partitionId") + var_samp("value1").over(Window.partitionBy("partitionId") .orderBy("key").rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)), - var_pop("value2") - .over(Window.partitionBy("partitionId") + variance("value1").over(Window.partitionBy("partitionId") .orderBy("key").rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)), - stddev_pop("value2") - .over(Window.partitionBy("partitionId") - .orderBy("key").rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing))), - - // As stddev_pop(expr) = sqrt(var_pop(expr)) - // the "stddev_pop" column can be calculated from the "var_pop" column. - // - // As corr(expr1, expr2) = covar_pop(expr1, expr2) / (stddev_pop(expr1) * stddev_pop(expr2)) - // the "corr" column can be calculated from the "covar_pop" and the two "stddev_pop" columns. - Seq( - Row("a", -1.0, -25.0, 25.0, 5.0, 25.0, 5.0), - Row("b", -1.0, -25.0, 25.0, 5.0, 25.0, 5.0), - Row("c", null, 0.0, 0.0, 0.0, 0.0, 0.0), - Row("d", null, 0.0, 0.0, 0.0, 0.0, 0.0), - Row("e", 1.0, 18.0, 9.0, 3.0, 36.0, 6.0), - Row("f", 1.0, 18.0, 9.0, 3.0, 36.0, 6.0), - Row("g", 1.0, 18.0, 9.0, 3.0, 36.0, 6.0), - Row("h", 1.0, 18.0, 9.0, 3.0, 36.0, 6.0), - Row("i", Double.NaN, 0.0, 0.0, 0.0, 0.0, 0.0))) + stddev_samp("value1").over(Window.partitionBy("partitionId") + .orderBy("key").rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)), + stddev("value1").over(Window.partitionBy("partitionId") + .orderBy("key").rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)) + ), + Seq( + Row("a", -50.0, 50.0, 50.0, 7.0710678118654755, 7.0710678118654755), + Row("b", -50.0, 50.0, 50.0, 7.0710678118654755, 7.0710678118654755), + Row("c", 0.0, 0.0, 0.0, 0.0, 0.0), + Row("d", 0.0, 0.0, 0.0, 0.0, 0.0), + Row("e", 24.0, 12.0, 12.0, 3.4641016151377544, 3.4641016151377544), + Row("f", 24.0, 12.0, 12.0, 3.4641016151377544, 3.4641016151377544), + Row("g", 24.0, 12.0, 12.0, 3.4641016151377544, 3.4641016151377544), + Row("h", 24.0, 12.0, 12.0, 3.4641016151377544, 3.4641016151377544), + Row("i", Double.NaN, Double.NaN, Double.NaN, Double.NaN, Double.NaN))) + } } - test("covar_samp, var_samp (variance), stddev_samp (stddev) functions in specific window") { - val df = Seq( - ("a", "p1", 10.0, 20.0), - ("b", "p1", 20.0, 10.0), - ("c", "p2", 20.0, 20.0), - ("d", "p2", 20.0, 20.0), - ("e", "p3", 0.0, 0.0), - ("f", "p3", 6.0, 12.0), - ("g", "p3", 6.0, 12.0), - ("h", "p3", 8.0, 16.0), - ("i", "p4", 5.0, 5.0)).toDF("key", "partitionId", "value1", "value2") - checkAnswer( - df.select( - $"key", - covar_samp("value1", "value2").over(Window.partitionBy("partitionId") - .orderBy("key").rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)), - var_samp("value1").over(Window.partitionBy("partitionId") - .orderBy("key").rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)), - variance("value1").over(Window.partitionBy("partitionId") - .orderBy("key").rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)), - stddev_samp("value1").over(Window.partitionBy("partitionId") - .orderBy("key").rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)), - stddev("value1").over(Window.partitionBy("partitionId") - .orderBy("key").rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)) - ), - Seq( - Row("a", -50.0, 50.0, 50.0, 7.0710678118654755, 7.0710678118654755), - Row("b", -50.0, 50.0, 50.0, 7.0710678118654755, 7.0710678118654755), - Row("c", 0.0, 0.0, 0.0, 0.0, 0.0), - Row("d", 0.0, 0.0, 0.0, 0.0, 0.0), - Row("e", 24.0, 12.0, 12.0, 3.4641016151377544, 3.4641016151377544), - Row("f", 24.0, 12.0, 12.0, 3.4641016151377544, 3.4641016151377544), - Row("g", 24.0, 12.0, 12.0, 3.4641016151377544, 3.4641016151377544), - Row("h", 24.0, 12.0, 12.0, 3.4641016151377544, 3.4641016151377544), - Row("i", Double.NaN, Double.NaN, Double.NaN, Double.NaN, Double.NaN))) + test("SPARK-13860: " + + "covar_samp, var_samp (variance), stddev_samp (stddev) functions in specific window " + + "LEGACY_STATISTICAL_AGGREGATE off") { + withSQLConf(SQLConf.LEGACY_STATISTICAL_AGGREGATE.key -> "false") { + val df = Seq( + ("a", "p1", 10.0, 20.0), + ("b", "p1", 20.0, 10.0), + ("c", "p2", 20.0, 20.0), + ("d", "p2", 20.0, 20.0), + ("e", "p3", 0.0, 0.0), + ("f", "p3", 6.0, 12.0), + ("g", "p3", 6.0, 12.0), + ("h", "p3", 8.0, 16.0), + ("i", "p4", 5.0, 5.0)).toDF("key", "partitionId", "value1", "value2") + checkAnswer( + df.select( + $"key", + covar_samp("value1", "value2").over(Window.partitionBy("partitionId") + .orderBy("key").rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)), + var_samp("value1").over(Window.partitionBy("partitionId") + .orderBy("key").rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)), + variance("value1").over(Window.partitionBy("partitionId") + .orderBy("key").rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)), + stddev_samp("value1").over(Window.partitionBy("partitionId") + .orderBy("key").rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)), + stddev("value1").over(Window.partitionBy("partitionId") + .orderBy("key").rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)) + ), + Seq( + Row("a", -50.0, 50.0, 50.0, 7.0710678118654755, 7.0710678118654755), + Row("b", -50.0, 50.0, 50.0, 7.0710678118654755, 7.0710678118654755), + Row("c", 0.0, 0.0, 0.0, 0.0, 0.0), + Row("d", 0.0, 0.0, 0.0, 0.0, 0.0), + Row("e", 24.0, 12.0, 12.0, 3.4641016151377544, 3.4641016151377544), + Row("f", 24.0, 12.0, 12.0, 3.4641016151377544, 3.4641016151377544), + Row("g", 24.0, 12.0, 12.0, 3.4641016151377544, 3.4641016151377544), + Row("h", 24.0, 12.0, 12.0, 3.4641016151377544, 3.4641016151377544), + Row("i", null, null, null, null, null))) + } } test("collect_list in ascending ordered window") { @@ -541,6 +639,122 @@ class DataFrameWindowFunctionsSuite extends QueryTest Row("b", 3, null, null, null, null, null, null))) } + test("nth_value with ignoreNulls") { + val nullStr: String = null + val df = Seq( + ("a", 0, nullStr), + ("a", 1, "x"), + ("a", 2, "y"), + ("a", 3, "z"), + ("a", 4, nullStr), + ("b", 1, nullStr), + ("b", 2, nullStr)). + toDF("key", "order", "value") + val window = Window.partitionBy($"key").orderBy($"order") + checkAnswer( + df.select( + $"key", + $"order", + nth_value($"value", 2).over(window), + nth_value($"value", 2, ignoreNulls = false).over(window), + nth_value($"value", 2, ignoreNulls = true).over(window), + nth_value($"value", 3, ignoreNulls = false).over(window)), + Seq( + Row("a", 0, null, null, null, null), + Row("a", 1, "x", "x", null, null), + Row("a", 2, "x", "x", "y", "y"), + Row("a", 3, "x", "x", "y", "y"), + Row("a", 4, "x", "x", "y", "y"), + Row("b", 1, null, null, null, null), + Row("b", 2, null, null, null, null))) + } + + test("nth_value on descending ordered window") { + val nullStr: String = null + val df = Seq( + ("a", 0, nullStr), + ("a", 1, "x"), + ("a", 2, "y"), + ("a", 3, "z"), + ("a", 4, "v"), + ("b", 1, "k"), + ("b", 2, "l"), + ("b", 3, nullStr)). + toDF("key", "order", "value") + val window = Window.partitionBy($"key").orderBy($"order".desc) + checkAnswer( + df.select( + $"key", + $"order", + nth_value($"value", 2).over(window), + nth_value($"value", 2, ignoreNulls = false).over(window), + nth_value($"value", 2, ignoreNulls = true).over(window)), + Seq( + Row("a", 0, "z", "z", "z"), + Row("a", 1, "z", "z", "z"), + Row("a", 2, "z", "z", "z"), + Row("a", 3, "z", "z", "z"), + Row("a", 4, null, null, null), + Row("b", 1, "l", "l", "k"), + Row("b", 2, "l", "l", null), + Row("b", 3, null, null, null))) + } + + test("lead/lag with ignoreNulls") { + val nullStr: String = null + val df = Seq( + ("a", 0, nullStr), + ("a", 1, "x"), + ("b", 2, nullStr), + ("c", 3, nullStr), + ("a", 4, "y"), + ("b", 5, nullStr), + ("a", 6, "z"), + ("a", 7, "v"), + ("a", 8, nullStr)). + toDF("key", "order", "value") + val window = Window.orderBy($"order") + checkAnswer( + df.select( + $"key", + $"order", + $"value", + lead($"value", 1).over(window), + lead($"value", 2).over(window), + lead($"value", 0, null, true).over(window), + lead($"value", 1, null, true).over(window), + lead($"value", 2, null, true).over(window), + lead($"value", 3, null, true).over(window), + lead(concat($"value", $"key"), 1, null, true).over(window), + lag($"value", 1).over(window), + lag($"value", 2).over(window), + lag($"value", 0, null, true).over(window), + lag($"value", 1, null, true).over(window), + lag($"value", 2, null, true).over(window), + lag($"value", 3, null, true).over(window), + lag(concat($"value", $"key"), 1, null, true).over(window)) + .orderBy($"order"), + Seq( + Row("a", 0, null, "x", null, null, "x", "y", "z", "xa", + null, null, null, null, null, null, null), + Row("a", 1, "x", null, null, "x", "y", "z", "v", "ya", + null, null, "x", null, null, null, null), + Row("b", 2, null, null, "y", null, "y", "z", "v", "ya", + "x", null, null, "x", null, null, "xa"), + Row("c", 3, null, "y", null, null, "y", "z", "v", "ya", + null, "x", null, "x", null, null, "xa"), + Row("a", 4, "y", null, "z", "y", "z", "v", null, "za", + null, null, "y", "x", null, null, "xa"), + Row("b", 5, null, "z", "v", null, "z", "v", null, "za", + "y", null, null, "y", "x", null, "ya"), + Row("a", 6, "z", "v", null, "z", "v", null, null, "va", + null, "y", "z", "y", "x", null, "ya"), + Row("a", 7, "v", null, null, "v", null, null, null, null, + "z", null, "v", "z", "y", "x", "za"), + Row("a", 8, null, null, null, null, null, null, null, null, + "v", "z", null, "v", "z", "y", "va"))) + } + test("SPARK-12989 ExtractWindowExpressions treats alias as regular attribute") { val src = Seq((0, 3, 5)).toDF("a", "b", "c") .withColumn("Data", struct("a", "b")) diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameWriterV2Suite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameWriterV2Suite.scala index 508eefafd0754..35e732e0840e4 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameWriterV2Suite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameWriterV2Suite.scala @@ -23,16 +23,16 @@ import scala.collection.JavaConverters._ import org.scalatest.BeforeAndAfter -import org.apache.spark.sql.catalyst.analysis.{CannotReplaceMissingTableException, NoSuchTableException, TableAlreadyExistsException} +import org.apache.spark.sql.catalyst.analysis.{CannotReplaceMissingTableException, TableAlreadyExistsException} import org.apache.spark.sql.catalyst.plans.logical.{AppendData, LogicalPlan, OverwriteByExpression, OverwritePartitionsDynamic} import org.apache.spark.sql.connector.{InMemoryTable, InMemoryTableCatalog} import org.apache.spark.sql.connector.catalog.{Identifier, TableCatalog} import org.apache.spark.sql.connector.expressions.{BucketTransform, DaysTransform, FieldReference, HoursTransform, IdentityTransform, LiteralValue, MonthsTransform, YearsTransform} import org.apache.spark.sql.execution.QueryExecution import org.apache.spark.sql.execution.datasources.v2.DataSourceV2Relation +import org.apache.spark.sql.sources.FakeSourceOne import org.apache.spark.sql.test.SharedSparkSession -import org.apache.spark.sql.types.{IntegerType, LongType, StringType, StructType} -import org.apache.spark.sql.types.TimestampType +import org.apache.spark.sql.types.{IntegerType, LongType, StringType, StructType, TimestampType} import org.apache.spark.sql.util.QueryExecutionListener import org.apache.spark.unsafe.types.UTF8String import org.apache.spark.util.Utils @@ -58,6 +58,7 @@ class DataFrameWriterV2Suite extends QueryTest with SharedSparkSession with Befo } after { + spark.sessionState.catalog.reset() spark.sessionState.catalogManager.reset() spark.sessionState.conf.clear() } @@ -119,6 +120,18 @@ class DataFrameWriterV2Suite extends QueryTest with SharedSparkSession with Befo Seq(Row(1L, "a"), Row(2L, "b"), Row(3L, "c"), Row(4L, "d"), Row(5L, "e"), Row(6L, "f"))) } + test("Append: write to a temp view of v2 relation") { + spark.sql("CREATE TABLE testcat.table_name (id bigint, data string) USING foo") + spark.table("testcat.table_name").createOrReplaceTempView("temp_view") + spark.table("source").writeTo("temp_view").append() + checkAnswer( + spark.table("testcat.table_name"), + Seq(Row(1L, "a"), Row(2L, "b"), Row(3L, "c"))) + checkAnswer( + spark.table("temp_view"), + Seq(Row(1L, "a"), Row(2L, "b"), Row(3L, "c"))) + } + test("Append: by name not position") { spark.sql("CREATE TABLE testcat.table_name (id bigint, data string) USING foo") @@ -137,11 +150,36 @@ class DataFrameWriterV2Suite extends QueryTest with SharedSparkSession with Befo } test("Append: fail if table does not exist") { - val exc = intercept[NoSuchTableException] { + val exc = intercept[AnalysisException] { spark.table("source").writeTo("testcat.table_name").append() } - assert(exc.getMessage.contains("table_name")) + assert(exc.getMessage.contains("Table or view not found: testcat.table_name")) + } + + test("Append: fail if it writes to a temp view that is not v2 relation") { + spark.range(10).createOrReplaceTempView("temp_view") + val exc = intercept[AnalysisException] { + spark.table("source").writeTo("temp_view").append() + } + assert(exc.getMessage.contains("Cannot write into temp view temp_view as it's not a " + + "data source v2 relation")) + } + + test("Append: fail if it writes to a view") { + spark.sql("CREATE VIEW v AS SELECT 1") + val exc = intercept[AnalysisException] { + spark.table("source").writeTo("v").append() + } + assert(exc.getMessage.contains("Writing into a view is not allowed")) + } + + test("Append: fail if it writes to a v1 table") { + sql(s"CREATE TABLE table_name USING ${classOf[FakeSourceOne].getName}") + val exc = intercept[AnalysisException] { + spark.table("source").writeTo("table_name").append() + } + assert(exc.getMessage.contains("Cannot write into v1 table: `default`.`table_name`")) } test("Overwrite: overwrite by expression: true") { @@ -182,6 +220,20 @@ class DataFrameWriterV2Suite extends QueryTest with SharedSparkSession with Befo Seq(Row(1L, "a"), Row(2L, "b"), Row(4L, "d"), Row(5L, "e"), Row(6L, "f"))) } + test("Overwrite: write to a temp view of v2 relation") { + spark.sql("CREATE TABLE testcat.table_name (id bigint, data string) USING foo") + spark.table("source").writeTo("testcat.table_name").append() + spark.table("testcat.table_name").createOrReplaceTempView("temp_view") + + spark.table("source2").writeTo("testcat.table_name").overwrite(lit(true)) + checkAnswer( + spark.table("testcat.table_name"), + Seq(Row(4L, "d"), Row(5L, "e"), Row(6L, "f"))) + checkAnswer( + spark.table("temp_view"), + Seq(Row(4L, "d"), Row(5L, "e"), Row(6L, "f"))) + } + test("Overwrite: by name not position") { spark.sql("CREATE TABLE testcat.table_name (id bigint, data string) USING foo") @@ -201,11 +253,36 @@ class DataFrameWriterV2Suite extends QueryTest with SharedSparkSession with Befo } test("Overwrite: fail if table does not exist") { - val exc = intercept[NoSuchTableException] { + val exc = intercept[AnalysisException] { spark.table("source").writeTo("testcat.table_name").overwrite(lit(true)) } - assert(exc.getMessage.contains("table_name")) + assert(exc.getMessage.contains("Table or view not found: testcat.table_name")) + } + + test("Overwrite: fail if it writes to a temp view that is not v2 relation") { + spark.range(10).createOrReplaceTempView("temp_view") + val exc = intercept[AnalysisException] { + spark.table("source").writeTo("temp_view").overwrite(lit(true)) + } + assert(exc.getMessage.contains("Cannot write into temp view temp_view as it's not a " + + "data source v2 relation")) + } + + test("Overwrite: fail if it writes to a view") { + spark.sql("CREATE VIEW v AS SELECT 1") + val exc = intercept[AnalysisException] { + spark.table("source").writeTo("v").overwrite(lit(true)) + } + assert(exc.getMessage.contains("Writing into a view is not allowed")) + } + + test("Overwrite: fail if it writes to a v1 table") { + sql(s"CREATE TABLE table_name USING ${classOf[FakeSourceOne].getName}") + val exc = intercept[AnalysisException] { + spark.table("source").writeTo("table_name").overwrite(lit(true)) + } + assert(exc.getMessage.contains("Cannot write into v1 table: `default`.`table_name`")) } test("OverwritePartitions: overwrite conflicting partitions") { @@ -246,6 +323,20 @@ class DataFrameWriterV2Suite extends QueryTest with SharedSparkSession with Befo Seq(Row(4L, "d"), Row(5L, "e"), Row(6L, "f"))) } + test("OverwritePartitions: write to a temp view of v2 relation") { + spark.sql("CREATE TABLE testcat.table_name (id bigint, data string) USING foo") + spark.table("source").writeTo("testcat.table_name").append() + spark.table("testcat.table_name").createOrReplaceTempView("temp_view") + + spark.table("source2").writeTo("testcat.table_name").overwritePartitions() + checkAnswer( + spark.table("testcat.table_name"), + Seq(Row(4L, "d"), Row(5L, "e"), Row(6L, "f"))) + checkAnswer( + spark.table("temp_view"), + Seq(Row(4L, "d"), Row(5L, "e"), Row(6L, "f"))) + } + test("OverwritePartitions: by name not position") { spark.sql("CREATE TABLE testcat.table_name (id bigint, data string) USING foo") @@ -265,11 +356,36 @@ class DataFrameWriterV2Suite extends QueryTest with SharedSparkSession with Befo } test("OverwritePartitions: fail if table does not exist") { - val exc = intercept[NoSuchTableException] { + val exc = intercept[AnalysisException] { spark.table("source").writeTo("testcat.table_name").overwritePartitions() } - assert(exc.getMessage.contains("table_name")) + assert(exc.getMessage.contains("Table or view not found: testcat.table_name")) + } + + test("OverwritePartitions: fail if it writes to a temp view that is not v2 relation") { + spark.range(10).createOrReplaceTempView("temp_view") + val exc = intercept[AnalysisException] { + spark.table("source").writeTo("temp_view").overwritePartitions() + } + assert(exc.getMessage.contains("Cannot write into temp view temp_view as it's not a " + + "data source v2 relation")) + } + + test("OverwritePartitions: fail if it writes to a view") { + spark.sql("CREATE VIEW v AS SELECT 1") + val exc = intercept[AnalysisException] { + spark.table("source").writeTo("v").overwritePartitions() + } + assert(exc.getMessage.contains("Writing into a view is not allowed")) + } + + test("OverwritePartitions: fail if it writes to a v1 table") { + sql(s"CREATE TABLE table_name USING ${classOf[FakeSourceOne].getName}") + val exc = intercept[AnalysisException] { + spark.table("source").writeTo("table_name").overwritePartitions() + } + assert(exc.getMessage.contains("Cannot write into v1 table: `default`.`table_name`")) } test("Create: basic behavior") { diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DatasetCacheSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DatasetCacheSuite.scala index 5c144dad23c30..009ccb9a45354 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/DatasetCacheSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/DatasetCacheSuite.scala @@ -102,18 +102,19 @@ class DatasetCacheSuite extends QueryTest test("persist and then groupBy columns asKey, map") { val ds = Seq(("a", 10), ("a", 20), ("b", 1), ("b", 2), ("c", 1)).toDS() val grouped = ds.groupByKey(_._1) - val agged = grouped.mapGroups { (g, iter) => (g, iter.map(_._2).sum) } - agged.persist() + val aggregated = grouped.mapGroups { (g, iter) => (g, iter.map(_._2).sum) } + aggregated.persist() checkDataset( - agged.filter(_._1 == "b"), + aggregated.filter(_._1 == "b"), ("b", 3)) - assertCached(agged.filter(_._1 == "b")) + assertCached(aggregated.filter(_._1 == "b")) ds.unpersist(blocking = true) assert(ds.storageLevel == StorageLevel.NONE, "The Dataset ds should not be cached.") - agged.unpersist(blocking = true) - assert(agged.storageLevel == StorageLevel.NONE, "The Dataset agged should not be cached.") + aggregated.unpersist(blocking = true) + assert(aggregated.storageLevel == StorageLevel.NONE, + "The Dataset aggregated should not be cached.") } test("persist and then withColumn") { diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DatasetPrimitiveSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DatasetPrimitiveSuite.scala index 2be86b9ad6208..8547d96e0f457 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/DatasetPrimitiveSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/DatasetPrimitiveSuite.scala @@ -20,7 +20,6 @@ package org.apache.spark.sql import scala.collection.immutable.{HashSet => HSet} import scala.collection.immutable.Queue import scala.collection.mutable.{LinkedHashMap => LHMap} -import scala.collection.mutable.ArrayBuffer import org.apache.spark.sql.test.SharedSparkSession @@ -171,23 +170,23 @@ class DatasetPrimitiveSuite extends QueryTest with SharedSparkSession { test("groupBy function, map") { val ds = Seq(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11).toDS() val grouped = ds.groupByKey(_ % 2) - val agged = grouped.mapGroups { (g, iter) => + val aggregated = grouped.mapGroups { (g, iter) => val name = if (g == 0) "even" else "odd" (name, iter.size) } checkDatasetUnorderly( - agged, + aggregated, ("even", 5), ("odd", 6)) } test("groupBy function, flatMap") { val ds = Seq("a", "b", "c", "xyz", "hello").toDS() val grouped = ds.groupByKey(_.length) - val agged = grouped.flatMapGroups { (g, iter) => Iterator(g.toString, iter.mkString) } + val aggregated = grouped.flatMapGroups { (g, iter) => Iterator(g.toString, iter.mkString) } checkDatasetUnorderly( - agged, + aggregated, "1", "abc", "3", "xyz", "5", "hello") } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DatasetSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DatasetSuite.scala index 4923e8b556907..3a169e487827a 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/DatasetSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/DatasetSuite.scala @@ -25,7 +25,7 @@ import org.scalatest.exceptions.TestFailedException import org.scalatest.prop.TableDrivenPropertyChecks._ import org.apache.spark.{SparkException, TaskContext} -import org.apache.spark.sql.catalyst.ScroogeLikeExample +import org.apache.spark.sql.catalyst.{FooClassWithEnum, FooEnum, ScroogeLikeExample} import org.apache.spark.sql.catalyst.encoders.{OuterScopes, RowEncoder} import org.apache.spark.sql.catalyst.plans.{LeftAnti, LeftSemi} import org.apache.spark.sql.catalyst.util.sideBySide @@ -528,42 +528,42 @@ class DatasetSuite extends QueryTest test("groupBy function, map") { val ds = Seq(("a", 10), ("a", 20), ("b", 1), ("b", 2), ("c", 1)).toDS() val grouped = ds.groupByKey(v => (v._1, "word")) - val agged = grouped.mapGroups { (g, iter) => (g._1, iter.map(_._2).sum) } + val aggregated = grouped.mapGroups { (g, iter) => (g._1, iter.map(_._2).sum) } checkDatasetUnorderly( - agged, + aggregated, ("a", 30), ("b", 3), ("c", 1)) } test("groupBy function, flatMap") { val ds = Seq(("a", 10), ("a", 20), ("b", 1), ("b", 2), ("c", 1)).toDS() val grouped = ds.groupByKey(v => (v._1, "word")) - val agged = grouped.flatMapGroups { (g, iter) => + val aggregated = grouped.flatMapGroups { (g, iter) => Iterator(g._1, iter.map(_._2).sum.toString) } checkDatasetUnorderly( - agged, + aggregated, "a", "30", "b", "3", "c", "1") } test("groupBy function, mapValues, flatMap") { val ds = Seq(("a", 10), ("a", 20), ("b", 1), ("b", 2), ("c", 1)).toDS() val keyValue = ds.groupByKey(_._1).mapValues(_._2) - val agged = keyValue.mapGroups { (g, iter) => (g, iter.sum) } - checkDataset(agged, ("a", 30), ("b", 3), ("c", 1)) + val aggregated = keyValue.mapGroups { (g, iter) => (g, iter.sum) } + checkDataset(aggregated, ("a", 30), ("b", 3), ("c", 1)) val keyValue1 = ds.groupByKey(t => (t._1, "key")).mapValues(t => (t._2, "value")) - val agged1 = keyValue1.mapGroups { (g, iter) => (g._1, iter.map(_._1).sum) } - checkDataset(agged1, ("a", 30), ("b", 3), ("c", 1)) + val aggregated1 = keyValue1.mapGroups { (g, iter) => (g._1, iter.map(_._1).sum) } + checkDataset(aggregated1, ("a", 30), ("b", 3), ("c", 1)) } test("groupBy function, reduce") { val ds = Seq("abc", "xyz", "hello").toDS() - val agged = ds.groupByKey(_.length).reduceGroups(_ + _) + val aggregated = ds.groupByKey(_.length).reduceGroups(_ + _) checkDatasetUnorderly( - agged, + aggregated, 3 -> "abcxyz", 5 -> "hello") } @@ -914,11 +914,11 @@ class DatasetSuite extends QueryTest test("grouping key and grouped value has field with same name") { val ds = Seq(ClassData("a", 1), ClassData("a", 2)).toDS() - val agged = ds.groupByKey(d => ClassNullableData(d.a, null)).mapGroups { + val aggregated = ds.groupByKey(d => ClassNullableData(d.a, null)).mapGroups { (key, values) => key.a + values.map(_.b).sum } - checkDataset(agged, "a3") + checkDataset(aggregated, "a3") } test("cogroup's left and right side has field with same name") { @@ -1286,7 +1286,7 @@ class DatasetSuite extends QueryTest Route("b", "c", 6)) val ds = sparkContext.parallelize(data).toDF.as[Route] - val grped = ds.map(r => GroupedRoutes(r.src, r.dest, Seq(r))) + val grouped = ds.map(r => GroupedRoutes(r.src, r.dest, Seq(r))) .groupByKey(r => (r.src, r.dest)) .reduceGroups { (g1: GroupedRoutes, g2: GroupedRoutes) => GroupedRoutes(g1.src, g1.dest, g1.routes ++ g2.routes) @@ -1303,7 +1303,7 @@ class DatasetSuite extends QueryTest implicit def ordering[GroupedRoutes]: Ordering[GroupedRoutes] = (x: GroupedRoutes, y: GroupedRoutes) => x.toString.compareTo(y.toString) - checkDatasetUnorderly(grped, expected: _*) + checkDatasetUnorderly(grouped, expected: _*) } test("SPARK-18189: Fix serialization issue in KeyValueGroupedDataset") { @@ -1383,7 +1383,7 @@ class DatasetSuite extends QueryTest } } } else { - // Local checkpoints dont require checkpoint_dir + // Local checkpoints don't require checkpoint_dir f } } @@ -1474,7 +1474,7 @@ class DatasetSuite extends QueryTest } test("SPARK-18717: code generation works for both scala.collection.Map" + - " and scala.collection.imutable.Map") { + " and scala.collection.immutable.Map") { val ds = Seq(WithImmutableMap("hi", Map(42L -> "foo"))).toDS checkDataset(ds.map(t => t), WithImmutableMap("hi", Map(42L -> "foo"))) @@ -1693,6 +1693,33 @@ class DatasetSuite extends QueryTest checkDataset(ds1.select("_2._2"), ds2.select("_2._2").collect(): _*) } + test("SPARK-23862: Spark ExpressionEncoder should support Java Enum type from Scala") { + val saveModeSeq = + Seq(SaveMode.Append, SaveMode.Overwrite, SaveMode.ErrorIfExists, SaveMode.Ignore, null) + assert(saveModeSeq.toDS().collect().toSeq === saveModeSeq) + assert(saveModeSeq.toDS().schema === new StructType().add("value", StringType, nullable = true)) + + val saveModeCaseSeq = saveModeSeq.map(SaveModeCase.apply) + assert(saveModeCaseSeq.toDS().collect().toSet === saveModeCaseSeq.toSet) + assert(saveModeCaseSeq.toDS().schema === + new StructType().add("mode", StringType, nullable = true)) + + val saveModeArrayCaseSeq = + Seq(SaveModeArrayCase(Array()), SaveModeArrayCase(saveModeSeq.toArray)) + val collected = saveModeArrayCaseSeq.toDS().collect() + assert(collected.length === 2) + val sortedByLength = collected.sortBy(_.modes.length) + assert(sortedByLength(0).modes === Array()) + assert(sortedByLength(1).modes === saveModeSeq.toArray) + assert(saveModeArrayCaseSeq.toDS().schema === + new StructType().add("modes", ArrayType(StringType, containsNull = true), nullable = true)) + + // Enum is stored as string, so it is possible to convert to/from string + val stringSeq = saveModeSeq.map(Option.apply).map(_.map(_.toString).orNull) + assert(stringSeq.toDS().as[SaveMode].collect().toSet === saveModeSeq.toSet) + assert(saveModeSeq.toDS().as[String].collect().toSet === stringSeq.toSet) + } + test("SPARK-24571: filtering of string values by char literal") { val df = Seq("Amsterdam", "San Francisco", "X").toDF("city") checkAnswer(df.where($"city" === 'X'), Seq(Row("X"))) @@ -1926,6 +1953,35 @@ class DatasetSuite extends QueryTest } } } + + test("SPARK-32585: Support scala enumeration in ScalaReflection") { + checkDataset( + Seq(FooClassWithEnum(1, FooEnum.E1), FooClassWithEnum(2, FooEnum.E2)).toDS(), + Seq(FooClassWithEnum(1, FooEnum.E1), FooClassWithEnum(2, FooEnum.E2)): _* + ) + + // test null + checkDataset( + Seq(FooClassWithEnum(1, null), FooClassWithEnum(2, FooEnum.E2)).toDS(), + Seq(FooClassWithEnum(1, null), FooClassWithEnum(2, FooEnum.E2)): _* + ) + } + + test("SPARK-33390: Make Literal support char array") { + val df = Seq("aa", "bb", "cc", "abc").toDF("zoo") + checkAnswer(df.where($"zoo" === Array('a', 'a')), Seq(Row("aa"))) + checkAnswer( + df.where($"zoo".contains(Array('a', 'b'))), + Seq(Row("abc"))) + } + + test("SPARK-33469: Add current_timezone function") { + val df = Seq(1).toDF("c") + withSQLConf(SQLConf.SESSION_LOCAL_TIMEZONE.key -> "Asia/Shanghai") { + val timezone = df.selectExpr("current_timezone()").collect().head.getString(0) + assert(timezone == "Asia/Shanghai") + } + } } object AssertExecutionId { @@ -2024,3 +2080,7 @@ case class CircularReferenceClassD(map: Map[String, CircularReferenceClassE]) case class CircularReferenceClassE(id: String, list: List[CircularReferenceClassD]) case class SpecialCharClass(`field.1`: String, `field 2`: String) + +/** Used to test Java Enums from Scala code */ +case class SaveModeCase(mode: SaveMode) +case class SaveModeArrayCase(modes: Array[SaveMode]) diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DateFunctionsSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DateFunctionsSuite.scala index 9caa4c0377009..b545d6097d71d 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/DateFunctionsSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/DateFunctionsSuite.scala @@ -372,11 +372,21 @@ class DateFunctionsSuite extends QueryTest with SharedSparkSession { val df1 = Seq(("mon", "2015-07-23"), ("tuesday", "2015-07-20")).toDF("dow", "d") val df2 = Seq(("th", "2015-07-23 00:11:22"), ("xx", "2015-07-24 11:22:33")).toDF("dow", "t") checkAnswer( - df1.select(next_day(col("d"), "MONDAY")), - Seq(Row(Date.valueOf("2015-07-27")), Row(Date.valueOf("2015-07-27")))) + df1.select( + next_day(col("d"), "MONDAY"), + next_day(col("d"), col("dow")), + next_day(col("d"), "NonValidDay")), + Seq( + Row(Date.valueOf("2015-07-27"), Date.valueOf("2015-07-27"), null), + Row(Date.valueOf("2015-07-27"), Date.valueOf("2015-07-21"), null))) checkAnswer( - df2.select(next_day(col("t"), "th")), - Seq(Row(Date.valueOf("2015-07-30")), Row(Date.valueOf("2015-07-30")))) + df2.select( + next_day(col("t"), "th"), + next_day(col("t"), col("dow")), + next_day(col("t"), "NonValidDay")), + Seq( + Row(Date.valueOf("2015-07-30"), Date.valueOf("2015-07-30"), null), + Row(Date.valueOf("2015-07-30"), null, null))) } def checkExceptionMessage(df: DataFrame): Unit = { @@ -454,7 +464,7 @@ class DateFunctionsSuite extends QueryTest with SharedSparkSession { assert(e.getCause.isInstanceOf[IllegalArgumentException]) assert(e.getMessage.contains("You may get a different result due to the upgrading of Spark")) - // february + // February val x1 = "2016-02-29" val x2 = "2017-02-29" val df1 = Seq(x1, x2).toDF("x") @@ -629,7 +639,7 @@ class DateFunctionsSuite extends QueryTest with SharedSparkSession { e.getMessage.contains("You may get a different result due to the upgrading of Spark")) } - // february + // February val y1 = "2016-02-29" val y2 = "2017-02-29" val ts5 = Timestamp.valueOf("2016-02-29 00:00:00") @@ -680,7 +690,7 @@ class DateFunctionsSuite extends QueryTest with SharedSparkSession { checkAnswer(df1.selectExpr(s"to_unix_timestamp(x, 'yyyy-MM-dd mm:HH:ss')"), Seq( Row(secs(ts4.getTime)), Row(null), Row(secs(ts3.getTime)), Row(null))) - // february + // February val y1 = "2016-02-29" val y2 = "2017-02-29" val ts5 = Timestamp.valueOf("2016-02-29 00:00:00") diff --git a/sql/core/src/test/scala/org/apache/spark/sql/ExplainSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/ExplainSuite.scala index ddc4f1dab8e63..bf100c0205efa 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/ExplainSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/ExplainSuite.scala @@ -228,12 +228,27 @@ class ExplainSuite extends ExplainSuiteHelper with DisableAdaptiveExecutionSuite } } + test("SPARK-33853: explain codegen - check presence of subquery") { + withSQLConf(SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key -> "true") { + withTempView("df") { + val df1 = spark.range(1, 100) + df1.createTempView("df") + + val sqlText = "EXPLAIN CODEGEN SELECT (SELECT min(id) FROM df)" + val expectedText = "Found 3 WholeStageCodegen subtrees." + + withNormalizedExplain(sqlText) { normalizedOutput => + assert(normalizedOutput.contains(expectedText)) + } + } + } + } + test("explain formatted - check presence of subquery in case of DPP") { withTable("df1", "df2") { withSQLConf(SQLConf.DYNAMIC_PARTITION_PRUNING_ENABLED.key -> "true", SQLConf.DYNAMIC_PARTITION_PRUNING_REUSE_BROADCAST_ONLY.key -> "false", SQLConf.EXCHANGE_REUSE_ENABLED.key -> "false") { - withTable("df1", "df2") { spark.range(1000).select(col("id"), col("id").as("k")) .write .partitionBy("k") @@ -261,11 +276,11 @@ class ExplainSuite extends ExplainSuiteHelper with DisableAdaptiveExecutionSuite "PartitionFilters: \\[isnotnull\\(k#xL\\), dynamicpruningexpression\\(k#xL " + "IN subquery#x\\)\\]" val expected_pattern3 = - "Location: InMemoryFileIndex \\[.*org.apache.spark.sql.ExplainSuite" + - "/df2/.*, ... 99 entries\\]" + "Location: InMemoryFileIndex \\[\\S*org.apache.spark.sql.ExplainSuite" + + "/df2/\\S*, ... 99 entries\\]" val expected_pattern4 = - "Location: InMemoryFileIndex \\[.*org.apache.spark.sql.ExplainSuite" + - "/df1/.*, ... 999 entries\\]" + "Location: InMemoryFileIndex \\[\\S*org.apache.spark.sql.ExplainSuite" + + "/df1/\\S*, ... 999 entries\\]" withNormalizedExplain(sqlText) { normalizedOutput => assert(expected_pattern1.r.findAllMatchIn(normalizedOutput).length == 1) assert(expected_pattern2.r.findAllMatchIn(normalizedOutput).length == 1) @@ -273,6 +288,22 @@ class ExplainSuite extends ExplainSuiteHelper with DisableAdaptiveExecutionSuite assert(expected_pattern4.r.findAllMatchIn(normalizedOutput).length == 1) } } + } + } + + test("SPARK-33850: explain formatted - check presence of subquery in case of AQE") { + withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") { + withTempView("df") { + val df = spark.range(1, 100) + df.createTempView("df") + + val sqlText = "EXPLAIN FORMATTED SELECT (SELECT min(id) FROM df) as v" + val expected_pattern = + "Subquery:1 Hosting operator id = 2 Hosting Expression = Subquery subquery#x" + + withNormalizedExplain(sqlText) { normalizedOutput => + assert(expected_pattern.r.findAllMatchIn(normalizedOutput).length == 1) + } } } } @@ -367,7 +398,7 @@ class ExplainSuite extends ExplainSuiteHelper with DisableAdaptiveExecutionSuite val basePath = dir.getCanonicalPath + "/" + fmt val pushFilterMaps = Map ( "parquet" -> - "|PushedFilers: \\[.*\\(id\\), .*\\(value\\), .*\\(id,1\\), .*\\(value,2\\)\\]", + "|PushedFilers: \\[IsNotNull\\(value\\), GreaterThan\\(value,2\\)\\]", "orc" -> "|PushedFilers: \\[.*\\(id\\), .*\\(value\\), .*\\(id,1\\), .*\\(value,2\\)\\]", "csv" -> diff --git a/sql/core/src/test/scala/org/apache/spark/sql/ExpressionsSchemaSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/ExpressionsSchemaSuite.scala index d18aa9c549eb1..f3db4d811dd86 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/ExpressionsSchemaSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/ExpressionsSchemaSuite.scala @@ -32,12 +32,12 @@ import org.apache.spark.tags.ExtendedSQLTest * * To run the entire test suite: * {{{ - * build/sbt "sql/test-only *ExpressionsSchemaSuite" + * build/sbt "sql/testOnly *ExpressionsSchemaSuite" * }}} * * To re-generate golden files for entire suite, run: * {{{ - * SPARK_GENERATE_GOLDEN_FILES=1 build/sbt "sql/test-only *ExpressionsSchemaSuite" + * SPARK_GENERATE_GOLDEN_FILES=1 build/sbt "sql/testOnly *ExpressionsSchemaSuite" * }}} * * For example: @@ -178,7 +178,15 @@ class ExpressionsSchemaSuite extends QueryTest with SharedSparkSession { s"$numberOfQueries record in result file. Try regenerating the result files.") val numberOfMissingExamples = lines(3).split(":")(1).trim.toInt - val expectedMissingExamples = lines(4).split(":")(1).trim.split(",") + val expectedMissingExamples = { + val missingExamples = lines(4).split(":")(1).trim + // Splitting on a empty string would return [""] + if (missingExamples.nonEmpty) { + missingExamples.split(",") + } else { + Array.empty[String] + } + } assert(numberOfMissingExamples == expectedMissingExamples.size, s"expected missing examples size: ${expectedMissingExamples.size} not same as " + diff --git a/sql/core/src/test/scala/org/apache/spark/sql/FileBasedDataSourceSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/FileBasedDataSourceSuite.scala index a3cd0c230d8af..876f62803dc7c 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/FileBasedDataSourceSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/FileBasedDataSourceSuite.scala @@ -31,12 +31,14 @@ import org.apache.spark.SparkException import org.apache.spark.scheduler.{SparkListener, SparkListenerTaskEnd} import org.apache.spark.sql.TestingUDT.{IntervalUDT, NullData, NullUDT} import org.apache.spark.sql.catalyst.expressions.AttributeReference -import org.apache.spark.sql.catalyst.planning.PhysicalOperation +import org.apache.spark.sql.catalyst.expressions.IntegralLiteralTestUtils.{negativeInt, positiveInt} import org.apache.spark.sql.catalyst.plans.logical.Filter +import org.apache.spark.sql.execution.SimpleMode import org.apache.spark.sql.execution.adaptive.AdaptiveSparkPlanHelper import org.apache.spark.sql.execution.datasources.FilePartition -import org.apache.spark.sql.execution.datasources.v2.{BatchScanExec, DataSourceV2ScanRelation, FileScan} -import org.apache.spark.sql.execution.datasources.v2.parquet.ParquetTable +import org.apache.spark.sql.execution.datasources.v2.{BatchScanExec, FileScan} +import org.apache.spark.sql.execution.datasources.v2.orc.OrcScan +import org.apache.spark.sql.execution.datasources.v2.parquet.ParquetScan import org.apache.spark.sql.execution.joins.{BroadcastHashJoinExec, SortMergeJoinExec} import org.apache.spark.sql.functions._ import org.apache.spark.sql.internal.SQLConf @@ -231,6 +233,20 @@ class FileBasedDataSourceSuite extends QueryTest } } + Seq("json", "orc").foreach { format => + test(s"SPARK-32889: column name supports special characters using $format") { + Seq("$", " ", ",", ";", "{", "}", "(", ")", "\n", "\t", "=").foreach { name => + withTempDir { dir => + val dataDir = new File(dir, "file").getCanonicalPath + Seq(1).toDF(name).write.format(format).save(dataDir) + val schema = spark.read.format(format).load(dataDir).schema + assert(schema.size == 1) + assertResult(name)(schema.head.name) + } + } + } + } + // Text file format only supports string type test("SPARK-24691 error handling for unsupported types - text") { withTempDir { dir => @@ -561,38 +577,6 @@ class FileBasedDataSourceSuite extends QueryTest } } - test("Option pathGlobFilter: filter files correctly") { - withTempPath { path => - val dataDir = path.getCanonicalPath - Seq("foo").toDS().write.text(dataDir) - Seq("bar").toDS().write.mode("append").orc(dataDir) - val df = spark.read.option("pathGlobFilter", "*.txt").text(dataDir) - checkAnswer(df, Row("foo")) - - // Both glob pattern in option and path should be effective to filter files. - val df2 = spark.read.option("pathGlobFilter", "*.txt").text(dataDir + "/*.orc") - checkAnswer(df2, Seq.empty) - - val df3 = spark.read.option("pathGlobFilter", "*.txt").text(dataDir + "/*xt") - checkAnswer(df3, Row("foo")) - } - } - - test("Option pathGlobFilter: simple extension filtering should contains partition info") { - withTempPath { path => - val input = Seq(("foo", 1), ("oof", 2)).toDF("a", "b") - input.write.partitionBy("b").text(path.getCanonicalPath) - Seq("bar").toDS().write.mode("append").orc(path.getCanonicalPath + "/b=1") - - // If we use glob pattern in the path, the partition column won't be shown in the result. - val df = spark.read.text(path.getCanonicalPath + "/*/*.txt") - checkAnswer(df, input.select("a")) - - val df2 = spark.read.option("pathGlobFilter", "*.txt").text(path.getCanonicalPath) - checkAnswer(df2, input) - } - } - test("Option recursiveFileLookup: recursive loading correctly") { val expectedFileList = mutable.ListBuffer[String]() @@ -826,22 +810,6 @@ class FileBasedDataSourceSuite extends QueryTest } } - test("SPARK-31935: Hadoop file system config should be effective in data source options") { - Seq("parquet", "").foreach { format => - withSQLConf( - SQLConf.USE_V1_SOURCE_LIST.key -> format, - "fs.file.impl" -> classOf[FakeFileSystemRequiringDSOption].getName, - "fs.file.impl.disable.cache" -> "true") { - withTempDir { dir => - val path = "file:" + dir.getCanonicalPath.stripPrefix("file:") - spark.range(10).write.option("ds_option", "value").mode("overwrite").parquet(path) - checkAnswer( - spark.read.option("ds_option", "value").parquet(path), spark.range(10).toDF()) - } - } - } - } - test("SPARK-31116: Select nested schema with case insensitive mode") { // This test case failed at only Parquet. ORC is added for test coverage parity. Seq("orc", "parquet").foreach { format => @@ -881,6 +849,114 @@ class FileBasedDataSourceSuite extends QueryTest } } } + + test("test casts pushdown on orc/parquet for integral types") { + def checkPushedFilters( + format: String, + df: DataFrame, + filters: Array[sources.Filter], + noScan: Boolean = false): Unit = { + val scanExec = df.queryExecution.sparkPlan.find(_.isInstanceOf[BatchScanExec]) + if (noScan) { + assert(scanExec.isEmpty) + return + } + val scan = scanExec.get.asInstanceOf[BatchScanExec].scan + format match { + case "orc" => + assert(scan.isInstanceOf[OrcScan]) + assert(scan.asInstanceOf[OrcScan].pushedFilters === filters) + case "parquet" => + assert(scan.isInstanceOf[ParquetScan]) + assert(scan.asInstanceOf[ParquetScan].pushedFilters === filters) + case _ => + fail(s"unknown format $format") + } + } + + Seq("orc", "parquet").foreach { format => + withSQLConf(SQLConf.USE_V1_SOURCE_LIST.key -> "") { + withTempPath { dir => + spark.range(100).map(i => (i.toShort, i.toString)).toDF("id", "s") + .write + .format(format) + .save(dir.getCanonicalPath) + val df = spark.read.format(format).load(dir.getCanonicalPath) + + // cases when value == MAX + var v = Short.MaxValue + checkPushedFilters(format, df.where('id > v.toInt), Array(), noScan = true) + checkPushedFilters(format, df.where('id >= v.toInt), Array(sources.IsNotNull("id"), + sources.EqualTo("id", v))) + checkPushedFilters(format, df.where('id === v.toInt), Array(sources.IsNotNull("id"), + sources.EqualTo("id", v))) + checkPushedFilters(format, df.where('id <=> v.toInt), + Array(sources.EqualNullSafe("id", v))) + checkPushedFilters(format, df.where('id <= v.toInt), Array(sources.IsNotNull("id"))) + checkPushedFilters(format, df.where('id < v.toInt), Array(sources.IsNotNull("id"), + sources.Not(sources.EqualTo("id", v)))) + + // cases when value > MAX + var v1: Int = positiveInt + checkPushedFilters(format, df.where('id > v1), Array(), noScan = true) + checkPushedFilters(format, df.where('id >= v1), Array(), noScan = true) + checkPushedFilters(format, df.where('id === v1), Array(), noScan = true) + checkPushedFilters(format, df.where('id <=> v1), Array(), noScan = true) + checkPushedFilters(format, df.where('id <= v1), Array(sources.IsNotNull("id"))) + checkPushedFilters(format, df.where('id < v1), Array(sources.IsNotNull("id"))) + + // cases when value = MIN + v = Short.MinValue + checkPushedFilters(format, df.where(lit(v.toInt) < 'id), Array(sources.IsNotNull("id"), + sources.Not(sources.EqualTo("id", v)))) + checkPushedFilters(format, df.where(lit(v.toInt) <= 'id), Array(sources.IsNotNull("id"))) + checkPushedFilters(format, df.where(lit(v.toInt) === 'id), Array(sources.IsNotNull("id"), + sources.EqualTo("id", v))) + checkPushedFilters(format, df.where(lit(v.toInt) <=> 'id), + Array(sources.EqualNullSafe("id", v))) + checkPushedFilters(format, df.where(lit(v.toInt) >= 'id), Array(sources.IsNotNull("id"), + sources.EqualTo("id", v))) + checkPushedFilters(format, df.where(lit(v.toInt) > 'id), Array(), noScan = true) + + // cases when value < MIN + v1 = negativeInt + checkPushedFilters(format, df.where(lit(v1) < 'id), Array(sources.IsNotNull("id"))) + checkPushedFilters(format, df.where(lit(v1) <= 'id), Array(sources.IsNotNull("id"))) + checkPushedFilters(format, df.where(lit(v1) === 'id), Array(), noScan = true) + checkPushedFilters(format, df.where(lit(v1) >= 'id), Array(), noScan = true) + checkPushedFilters(format, df.where(lit(v1) > 'id), Array(), noScan = true) + + // cases when value is within range (MIN, MAX) + checkPushedFilters(format, df.where('id > 30), Array(sources.IsNotNull("id"), + sources.GreaterThan("id", 30))) + checkPushedFilters(format, df.where(lit(100) >= 'id), Array(sources.IsNotNull("id"), + sources.LessThanOrEqual("id", 100))) + } + } + } + } + + test("SPARK-32827: Set max metadata string length") { + withTempDir { dir => + val tableName = "t" + val path = s"${dir.getCanonicalPath}/$tableName" + withTable(tableName) { + sql(s"CREATE TABLE $tableName(c INT) USING PARQUET LOCATION '$path'") + withSQLConf(SQLConf.MAX_METADATA_STRING_LENGTH.key -> "5") { + val explain = spark.table(tableName).queryExecution.explainString(SimpleMode) + assert(!explain.contains(path)) + // metadata has abbreviated by ... + assert(explain.contains("...")) + } + + withSQLConf(SQLConf.MAX_METADATA_STRING_LENGTH.key -> "1000") { + val explain = spark.table(tableName).queryExecution.explainString(SimpleMode) + assert(explain.contains(path)) + assert(!explain.contains("...")) + } + } + } + } } object TestingUDT { diff --git a/sql/core/src/test/scala/org/apache/spark/sql/IntegratedUDFTestUtils.scala b/sql/core/src/test/scala/org/apache/spark/sql/IntegratedUDFTestUtils.scala index 80346b350c142..861a001b190aa 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/IntegratedUDFTestUtils.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/IntegratedUDFTestUtils.scala @@ -27,7 +27,6 @@ import org.scalatest.Assertions._ import org.apache.spark.TestUtils import org.apache.spark.api.python.{PythonBroadcast, PythonEvalType, PythonFunction, PythonUtils} import org.apache.spark.broadcast.Broadcast -import org.apache.spark.internal.config.Tests import org.apache.spark.sql.catalyst.expressions.{Cast, Expression} import org.apache.spark.sql.catalyst.plans.SQLHelper import org.apache.spark.sql.execution.python.UserDefinedPythonFunction diff --git a/sql/core/src/test/scala/org/apache/spark/sql/JoinSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/JoinSuite.scala index 942cf24a3a873..2e336b264cd3a 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/JoinSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/JoinSuite.scala @@ -757,6 +757,14 @@ class JoinSuite extends QueryTest with SharedSparkSession with AdaptiveSparkPlan ) } + // LEFT ANTI JOIN without bound condition does not spill + assertNotSpilled(sparkContext, "left anti join") { + checkAnswer( + sql("SELECT * FROM testData LEFT ANTI JOIN testData2 ON key = a WHERE key = 2"), + Nil + ) + } + val expected = new ListBuffer[Row]() expected.append( Row(1, "1", 1, 1), Row(1, "1", 1, 2), @@ -1098,20 +1106,16 @@ class JoinSuite extends QueryTest with SharedSparkSession with AdaptiveSparkPlan } test("SPARK-32330: Preserve shuffled hash join build side partitioning") { - withSQLConf( - SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "50", - SQLConf.SHUFFLE_PARTITIONS.key -> "2", - SQLConf.PREFER_SORTMERGEJOIN.key -> "false") { - val df1 = spark.range(10).select($"id".as("k1")) - val df2 = spark.range(30).select($"id".as("k2")) - Seq("inner", "cross").foreach(joinType => { - val plan = df1.join(df2, $"k1" === $"k2", joinType).groupBy($"k1").count() - .queryExecution.executedPlan - assert(plan.collect { case _: ShuffledHashJoinExec => true }.size === 1) - // No extra shuffle before aggregate - assert(plan.collect { case _: ShuffleExchangeExec => true }.size === 2) - }) - } + val df1 = spark.range(10).select($"id".as("k1")) + val df2 = spark.range(30).select($"id".as("k2")) + Seq("inner", "cross").foreach(joinType => { + val plan = df1.join(df2.hint("SHUFFLE_HASH"), $"k1" === $"k2", joinType) + .groupBy($"k1").count() + .queryExecution.executedPlan + assert(collect(plan) { case _: ShuffledHashJoinExec => true }.size === 1) + // No extra shuffle before aggregate + assert(collect(plan) { case _: ShuffleExchangeExec => true }.size === 2) + }) } test("SPARK-32383: Preserve hash join (BHJ and SHJ) stream side ordering") { @@ -1121,40 +1125,30 @@ class JoinSuite extends QueryTest with SharedSparkSession with AdaptiveSparkPlan val df4 = spark.range(100).select($"id".as("k4")) // Test broadcast hash join - withSQLConf( - SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "50") { - Seq("inner", "left_outer").foreach(joinType => { - val plan = df1.join(df2, $"k1" === $"k2", joinType) - .join(df3, $"k1" === $"k3", joinType) - .join(df4, $"k1" === $"k4", joinType) - .queryExecution - .executedPlan - assert(plan.collect { case _: SortMergeJoinExec => true }.size === 2) - assert(plan.collect { case _: BroadcastHashJoinExec => true }.size === 1) - // No extra sort before last sort merge join - assert(plan.collect { case _: SortExec => true }.size === 3) - }) - } + Seq("inner", "left_outer").foreach(joinType => { + val plan = df1.join(df2.hint("SHUFFLE_MERGE"), $"k1" === $"k2", joinType) + .join(df3.hint("BROADCAST"), $"k1" === $"k3", joinType) + .join(df4.hint("SHUFFLE_MERGE"), $"k1" === $"k4", joinType) + .queryExecution + .executedPlan + assert(collect(plan) { case _: SortMergeJoinExec => true }.size === 2) + assert(collect(plan) { case _: BroadcastHashJoinExec => true }.size === 1) + // No extra sort before last sort merge join + assert(collect(plan) { case _: SortExec => true }.size === 3) + }) // Test shuffled hash join - withSQLConf( - SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "50", - SQLConf.SHUFFLE_PARTITIONS.key -> "2", - SQLConf.PREFER_SORTMERGEJOIN.key -> "false") { - val df3 = spark.range(10).select($"id".as("k3")) - - Seq("inner", "left_outer").foreach(joinType => { - val plan = df1.join(df2, $"k1" === $"k2", joinType) - .join(df3, $"k1" === $"k3", joinType) - .join(df4, $"k1" === $"k4", joinType) - .queryExecution - .executedPlan - assert(plan.collect { case _: SortMergeJoinExec => true }.size === 2) - assert(plan.collect { case _: ShuffledHashJoinExec => true }.size === 1) - // No extra sort before last sort merge join - assert(plan.collect { case _: SortExec => true }.size === 3) - }) - } + Seq("inner", "left_outer").foreach(joinType => { + val plan = df1.join(df2.hint("SHUFFLE_MERGE"), $"k1" === $"k2", joinType) + .join(df3.hint("SHUFFLE_HASH"), $"k1" === $"k3", joinType) + .join(df4.hint("SHUFFLE_MERGE"), $"k1" === $"k4", joinType) + .queryExecution + .executedPlan + assert(collect(plan) { case _: SortMergeJoinExec => true }.size === 2) + assert(collect(plan) { case _: ShuffledHashJoinExec => true }.size === 1) + // No extra sort before last sort merge join + assert(collect(plan) { case _: SortExec => true }.size === 3) + }) } test("SPARK-32290: SingleColumn Null Aware Anti Join Optimize") { @@ -1242,24 +1236,16 @@ class JoinSuite extends QueryTest with SharedSparkSession with AdaptiveSparkPlan $"k1" === $"k4" && $"k2" === $"k5" && $"k3" === $"k6") ) inputDFs.foreach { case (df1, df2, joinExprs) => - withSQLConf( - // Set broadcast join threshold and number of shuffle partitions, - // as shuffled hash join depends on these two configs. - SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80", - SQLConf.SHUFFLE_PARTITIONS.key -> "2") { - val smjDF = df1.join(df2, joinExprs, "full") - assert(smjDF.queryExecution.executedPlan.collect { - case _: SortMergeJoinExec => true }.size === 1) - val smjResult = smjDF.collect() - - withSQLConf(SQLConf.PREFER_SORTMERGEJOIN.key -> "false") { - val shjDF = df1.join(df2, joinExprs, "full") - assert(shjDF.queryExecution.executedPlan.collect { - case _: ShuffledHashJoinExec => true }.size === 1) - // Same result between shuffled hash join and sort merge join - checkAnswer(shjDF, smjResult) - } - } + val smjDF = df1.join(df2.hint("SHUFFLE_MERGE"), joinExprs, "full") + assert(collect(smjDF.queryExecution.executedPlan) { + case _: SortMergeJoinExec => true }.size === 1) + val smjResult = smjDF.collect() + + val shjDF = df1.join(df2.hint("SHUFFLE_HASH"), joinExprs, "full") + assert(collect(shjDF.queryExecution.executedPlan) { + case _: ShuffledHashJoinExec => true }.size === 1) + // Same result between shuffled hash join and sort merge join + checkAnswer(shjDF, smjResult) } } @@ -1276,8 +1262,8 @@ class JoinSuite extends QueryTest with SharedSparkSession with AdaptiveSparkPlan ) inputDFs.foreach { case (df1, df2, joinType) => // Test broadcast hash join - withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "200") { - val bhjCodegenDF = df1.join(df2, $"k1" === $"k2", joinType) + withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "false") { + val bhjCodegenDF = df1.join(df2.hint("BROADCAST"), $"k1" === $"k2", joinType) assert(bhjCodegenDF.queryExecution.executedPlan.collect { case WholeStageCodegenExec(_ : BroadcastHashJoinExec) => true case WholeStageCodegenExec(ProjectExec(_, _ : BroadcastHashJoinExec)) => true @@ -1293,12 +1279,8 @@ class JoinSuite extends QueryTest with SharedSparkSession with AdaptiveSparkPlan } // Test shuffled hash join - withSQLConf(SQLConf.PREFER_SORTMERGEJOIN.key -> "false", - // Set broadcast join threshold and number of shuffle partitions, - // as shuffled hash join depends on these two configs. - SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "50", - SQLConf.SHUFFLE_PARTITIONS.key -> "2") { - val shjCodegenDF = df1.join(df2, $"k1" === $"k2", joinType) + withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "false") { + val shjCodegenDF = df1.join(df2.hint("SHUFFLE_HASH"), $"k1" === $"k2", joinType) assert(shjCodegenDF.queryExecution.executedPlan.collect { case WholeStageCodegenExec(_ : ShuffledHashJoinExec) => true case WholeStageCodegenExec(ProjectExec(_, _ : ShuffledHashJoinExec)) => true @@ -1306,7 +1288,7 @@ class JoinSuite extends QueryTest with SharedSparkSession with AdaptiveSparkPlan checkAnswer(shjCodegenDF, Seq.empty) withSQLConf(SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key -> "false") { - val shjNonCodegenDF = df1.join(df2, $"k1" === $"k2", joinType) + val shjNonCodegenDF = df1.join(df2.hint("SHUFFLE_HASH"), $"k1" === $"k2", joinType) assert(shjNonCodegenDF.queryExecution.executedPlan.collect { case _: ShuffledHashJoinExec => true }.size === 1) checkAnswer(shjNonCodegenDF, Seq.empty) diff --git a/sql/core/src/test/scala/org/apache/spark/sql/JsonFunctionsSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/JsonFunctionsSuite.scala index 03b48451c7495..310e170e8c1b1 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/JsonFunctionsSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/JsonFunctionsSuite.scala @@ -411,7 +411,7 @@ class JsonFunctionsSuite extends QueryTest with SharedSparkSession { test("infers schemas using options") { val df = spark.range(1) .select(schema_of_json(lit("{a:1}"), Map("allowUnquotedFieldNames" -> "true").asJava)) - checkAnswer(df, Seq(Row("struct"))) + checkAnswer(df, Seq(Row("STRUCT<`a`: BIGINT>"))) } test("from_json - array of primitive types") { @@ -684,14 +684,14 @@ class JsonFunctionsSuite extends QueryTest with SharedSparkSession { val input = regexp_replace(lit("""{"item_id": 1, "item_price": 0.1}"""), "item_", "") checkAnswer( spark.range(1).select(schema_of_json(input)), - Seq(Row("struct"))) + Seq(Row("STRUCT<`id`: BIGINT, `price`: DOUBLE>"))) } test("SPARK-31065: schema_of_json - null and empty strings as strings") { Seq("""{"id": null}""", """{"id": ""}""").foreach { input => checkAnswer( spark.range(1).select(schema_of_json(input)), - Seq(Row("struct"))) + Seq(Row("STRUCT<`id`: STRING>"))) } } @@ -703,7 +703,7 @@ class JsonFunctionsSuite extends QueryTest with SharedSparkSession { schema_of_json( lit("""{"id": "a", "drop": {"drop": null}}"""), options.asJava)), - Seq(Row("struct"))) + Seq(Row("STRUCT<`id`: STRING>"))) // Array of structs checkAnswer( @@ -711,7 +711,7 @@ class JsonFunctionsSuite extends QueryTest with SharedSparkSession { schema_of_json( lit("""[{"id": "a", "drop": {"drop": null}}]"""), options.asJava)), - Seq(Row("array>"))) + Seq(Row("ARRAY>"))) // Other types are not affected. checkAnswer( @@ -719,7 +719,7 @@ class JsonFunctionsSuite extends QueryTest with SharedSparkSession { schema_of_json( lit("""null"""), options.asJava)), - Seq(Row("string"))) + Seq(Row("STRING"))) } test("optional datetime parser does not affect json time formatting") { @@ -733,4 +733,111 @@ class JsonFunctionsSuite extends QueryTest with SharedSparkSession { | """.stripMargin) checkAnswer(toDF("yyyy-MM-dd'T'HH:mm:ss.SSSXXX"), toDF("yyyy-MM-dd'T'HH:mm:ss[.SSS][XXX]")) } + + test("SPARK-33134: return partial results only for root JSON objects") { + val st = new StructType() + .add("c1", LongType) + .add("c2", ArrayType(new StructType().add("c3", LongType).add("c4", StringType))) + val df1 = Seq("""{"c2": [19], "c1": 123456}""").toDF("c0") + checkAnswer(df1.select(from_json($"c0", st)), Row(Row(123456, null))) + val df2 = Seq("""{"data": {"c2": [19], "c1": 123456}}""").toDF("c0") + checkAnswer(df2.select(from_json($"c0", new StructType().add("data", st))), Row(Row(null))) + val df3 = Seq("""[{"c2": [19], "c1": 123456}]""").toDF("c0") + checkAnswer(df3.select(from_json($"c0", ArrayType(st))), Row(null)) + val df4 = Seq("""{"c2": [19]}""").toDF("c0") + checkAnswer(df4.select(from_json($"c0", MapType(StringType, st))), Row(null)) + } + + test("SPARK-33270: infers schema for JSON field with spaces and pass them to from_json") { + val in = Seq("""{"a b": 1}""").toDS() + val out = in.select(from_json('value, schema_of_json("""{"a b": 100}""")) as "parsed") + val expected = new StructType().add("parsed", new StructType().add("a b", LongType)) + assert(out.schema == expected) + } + + test("SPARK-33286: from_json - combined error messages") { + val df = Seq("""{"a":1}""").toDF("json") + val invalidJsonSchema = """{"fields": [{"a":123}], "type": "struct"}""" + val errMsg1 = intercept[AnalysisException] { + df.select(from_json($"json", invalidJsonSchema, Map.empty[String, String])).collect() + }.getMessage + assert(errMsg1.contains("""Failed to convert the JSON string '{"a":123}' to a field""")) + + val invalidDataType = "MAP" + val errMsg2 = intercept[AnalysisException] { + df.select(from_json($"json", invalidDataType, Map.empty[String, String])).collect() + }.getMessage + assert(errMsg2.contains("DataType cow is not supported")) + + val invalidTableSchema = "x INT, a cow" + val errMsg3 = intercept[AnalysisException] { + df.select(from_json($"json", invalidTableSchema, Map.empty[String, String])).collect() + }.getMessage + assert(errMsg3.contains("DataType cow is not supported")) + } + + test("SPARK-33907: bad json input with json pruning optimization: GetStructField") { + Seq("true", "false").foreach { enabled => + withSQLConf(SQLConf.JSON_EXPRESSION_OPTIMIZATION.key -> enabled) { + val schema = new StructType() + .add("a", IntegerType) + .add("b", IntegerType) + val badRec = """{"a" 1, "b": 11}""" + val df = Seq(badRec, """{"a": 2, "b": 12}""").toDS() + + val exception1 = intercept[SparkException] { + df.select(from_json($"value", schema, Map("mode" -> "FAILFAST"))("b")).collect() + }.getMessage + assert(exception1.contains( + "Malformed records are detected in record parsing. Parse Mode: FAILFAST.")) + + val exception2 = intercept[SparkException] { + df.select(from_json($"value", schema, Map("mode" -> "FAILFAST"))("a")).collect() + }.getMessage + assert(exception2.contains( + "Malformed records are detected in record parsing. Parse Mode: FAILFAST.")) + } + } + } + + test("SPARK-33907: bad json input with json pruning optimization: GetArrayStructFields") { + Seq("true", "false").foreach { enabled => + withSQLConf(SQLConf.JSON_EXPRESSION_OPTIMIZATION.key -> enabled) { + val schema = ArrayType(new StructType() + .add("a", IntegerType) + .add("b", IntegerType)) + val badRec = """{"a" 1, "b": 11}""" + val df = Seq(s"""[$badRec, {"a": 2, "b": 12}]""").toDS() + + val exception1 = intercept[SparkException] { + df.select(from_json($"value", schema, Map("mode" -> "FAILFAST"))("b")).collect() + }.getMessage + assert(exception1.contains( + "Malformed records are detected in record parsing. Parse Mode: FAILFAST.")) + + val exception2 = intercept[SparkException] { + df.select(from_json($"value", schema, Map("mode" -> "FAILFAST"))("a")).collect() + }.getMessage + assert(exception2.contains( + "Malformed records are detected in record parsing. Parse Mode: FAILFAST.")) + } + } + } + + test("SPARK-33907: json pruning optimization with corrupt record field") { + Seq("true", "false").foreach { enabled => + withSQLConf(SQLConf.JSON_EXPRESSION_OPTIMIZATION.key -> enabled) { + val schema = new StructType() + .add("a", IntegerType) + .add("b", IntegerType) + val badRec = """{"a" 1, "b": 11}""" + + val df = Seq(badRec, """{"a": 2, "b": 12}""").toDS() + .selectExpr("from_json(value, 'a int, b int, _corrupt_record string') as parsed") + .selectExpr("parsed._corrupt_record") + + checkAnswer(df, Seq(Row("""{"a" 1, "b": 11}"""), Row(null))) + } + } + } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/MathFunctionsSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/MathFunctionsSuite.scala index bd86c2ec075b0..87526b130d4c6 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/MathFunctionsSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/MathFunctionsSuite.scala @@ -125,6 +125,11 @@ class MathFunctionsSuite extends QueryTest with SharedSparkSession { testOneToOneMathFunction(sinh, math.sinh) } + test("asinh") { + testOneToOneMathFunction(asinh, + (x: Double) => math.log(x + math.sqrt(x * x + 1)) ) + } + test("cos") { testOneToOneMathFunction(cos, math.cos) } @@ -137,6 +142,11 @@ class MathFunctionsSuite extends QueryTest with SharedSparkSession { testOneToOneMathFunction(cosh, math.cosh) } + test("acosh") { + testOneToOneMathFunction(acosh, + (x: Double) => math.log(x + math.sqrt(x * x - 1)) ) + } + test("tan") { testOneToOneMathFunction(tan, math.tan) } @@ -149,6 +159,11 @@ class MathFunctionsSuite extends QueryTest with SharedSparkSession { testOneToOneMathFunction(tanh, math.tanh) } + test("atanh") { + testOneToOneMathFunction(atanh, + (x: Double) => (0.5 * (math.log1p(x) - math.log1p(-x))) ) + } + test("degrees") { testOneToOneMathFunction(degrees, math.toDegrees) checkAnswer( @@ -185,7 +200,7 @@ class MathFunctionsSuite extends QueryTest with SharedSparkSession { checkAnswer(df.selectExpr("""conv("100", 2, 10)"""), Row("4")) checkAnswer(df.selectExpr("""conv("-10", 16, -10)"""), Row("-16")) checkAnswer( - df.selectExpr("""conv("9223372036854775807", 36, -16)"""), Row("-1")) // for overflow + df.selectExpr("""conv("9223372036854775807", 36, -16)"""), Row("12DDAC15F246BAF8C0D551AC7")) } test("floor") { diff --git a/sql/core/src/test/scala/org/apache/spark/sql/PlanStabilitySuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/PlanStabilitySuite.scala index f78fc269986b5..76204c504c0ed 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/PlanStabilitySuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/PlanStabilitySuite.scala @@ -30,7 +30,6 @@ import org.apache.spark.sql.execution._ import org.apache.spark.sql.execution.adaptive.DisableAdaptiveExecutionSuite import org.apache.spark.sql.execution.exchange.{Exchange, ReusedExchangeExec} import org.apache.spark.sql.internal.SQLConf -import org.apache.spark.tags.ExtendedSQLTest // scalastyle:off line.size.limit /** @@ -51,22 +50,22 @@ import org.apache.spark.tags.ExtendedSQLTest * * To run the entire test suite: * {{{ - * build/sbt "sql/test-only *PlanStability[WithStats]Suite" + * build/sbt "sql/testOnly *PlanStability[WithStats]Suite" * }}} * * To run a single test file upon change: * {{{ - * build/sbt "sql/test-only *PlanStability[WithStats]Suite -- -z (tpcds-v1.4/q49)" + * build/sbt "sql/testOnly *PlanStability[WithStats]Suite -- -z (tpcds-v1.4/q49)" * }}} * * To re-generate golden files for entire suite, run: * {{{ - * SPARK_GENERATE_GOLDEN_FILES=1 build/sbt "sql/test-only *PlanStability[WithStats]Suite" + * SPARK_GENERATE_GOLDEN_FILES=1 build/sbt "sql/testOnly *PlanStability[WithStats]Suite" * }}} * * To re-generate golden file for a single test, run: * {{{ - * SPARK_GENERATE_GOLDEN_FILES=1 build/sbt "sql/test-only *PlanStability[WithStats]Suite -- -z (tpcds-v1.4/q49)" + * SPARK_GENERATE_GOLDEN_FILES=1 build/sbt "sql/testOnly *PlanStability[WithStats]Suite -- -z (tpcds-v1.4/q49)" * }}} */ // scalastyle:on line.size.limit diff --git a/sql/core/src/test/scala/org/apache/spark/sql/ReplaceNullWithFalseInPredicateEndToEndSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/ReplaceNullWithFalseInPredicateEndToEndSuite.scala index bdbb741f24bc6..739b4052ee90d 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/ReplaceNullWithFalseInPredicateEndToEndSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/ReplaceNullWithFalseInPredicateEndToEndSuite.scala @@ -27,6 +27,12 @@ import org.apache.spark.sql.types.BooleanType class ReplaceNullWithFalseInPredicateEndToEndSuite extends QueryTest with SharedSparkSession { import testImplicits._ + private def checkPlanIsEmptyLocalScan(df: DataFrame): Unit = + df.queryExecution.executedPlan match { + case s: LocalTableScanExec => assert(s.rows.isEmpty) + case p => fail(s"$p is not LocalTableScanExec") + } + test("SPARK-25860: Replace Literal(null, _) with FalseLiteral whenever possible") { withTable("t1", "t2") { Seq((1, true), (2, false)).toDF("l", "b").write.saveAsTable("t1") @@ -64,11 +70,6 @@ class ReplaceNullWithFalseInPredicateEndToEndSuite extends QueryTest with Shared checkAnswer(df1.where("IF(l > 10, false, b OR null)"), Row(1, true)) } - - def checkPlanIsEmptyLocalScan(df: DataFrame): Unit = df.queryExecution.executedPlan match { - case s: LocalTableScanExec => assert(s.rows.isEmpty) - case p => fail(s"$p is not LocalTableScanExec") - } } test("SPARK-26107: Replace Literal(null, _) with FalseLiteral in higher-order functions") { @@ -112,4 +113,14 @@ class ReplaceNullWithFalseInPredicateEndToEndSuite extends QueryTest with Shared assertNoLiteralNullInPlan(q3) } } + + test("SPARK-33847: replace None of elseValue inside CaseWhen to FalseLiteral") { + withTable("t1") { + Seq((1, 1), (2, 2)).toDF("a", "b").write.saveAsTable("t1") + val t1 = spark.table("t1") + val q1 = t1.filter("(CASE WHEN a > 1 THEN 1 END) = 0") + checkAnswer(q1, Seq.empty) + checkPlanIsEmptyLocalScan(q1) + } + } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/RuntimeConfigSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/RuntimeConfigSuite.scala index 720d570ca8384..4052130720811 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/RuntimeConfigSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/RuntimeConfigSuite.scala @@ -20,7 +20,7 @@ package org.apache.spark.sql import org.apache.spark.SparkFunSuite import org.apache.spark.internal.config import org.apache.spark.sql.internal.SQLConf.CHECKPOINT_LOCATION -import org.apache.spark.sql.internal.StaticSQLConf.SCHEMA_STRING_LENGTH_THRESHOLD +import org.apache.spark.sql.internal.StaticSQLConf.GLOBAL_TEMP_DATABASE class RuntimeConfigSuite extends SparkFunSuite { @@ -62,7 +62,7 @@ class RuntimeConfigSuite extends SparkFunSuite { val conf = newConf() // SQL configs - assert(!conf.isModifiable(SCHEMA_STRING_LENGTH_THRESHOLD.key)) + assert(!conf.isModifiable(GLOBAL_TEMP_DATABASE.key)) assert(conf.isModifiable(CHECKPOINT_LOCATION.key)) // Core configs assert(!conf.isModifiable(config.CPUS_PER_TASK.key)) diff --git a/sql/core/src/test/scala/org/apache/spark/sql/SQLInsertTestSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/SQLInsertTestSuite.scala new file mode 100644 index 0000000000000..12394a92aed44 --- /dev/null +++ b/sql/core/src/test/scala/org/apache/spark/sql/SQLInsertTestSuite.scala @@ -0,0 +1,221 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql + +import org.apache.spark.SparkConf +import org.apache.spark.sql.connector.InMemoryPartitionTableCatalog +import org.apache.spark.sql.internal.SQLConf +import org.apache.spark.sql.test.{SharedSparkSession, SQLTestUtils} + +/** + * The base trait for DML - insert syntax + */ +trait SQLInsertTestSuite extends QueryTest with SQLTestUtils { + + import testImplicits._ + + def format: String + + protected def createTable( + table: String, + cols: Seq[String], + colTypes: Seq[String], + partCols: Seq[String] = Nil): Unit = { + val values = cols.zip(colTypes).map(tuple => tuple._1 + " " + tuple._2).mkString("(", ", ", ")") + val partitionSpec = if (partCols.nonEmpty) { + partCols.mkString("PARTITIONED BY (", ",", ")") + } else "" + sql(s"CREATE TABLE $table$values USING $format $partitionSpec") + } + + protected def processInsert( + tableName: String, + input: DataFrame, + cols: Seq[String] = Nil, + partitionExprs: Seq[String] = Nil, + overwrite: Boolean): Unit = { + val tmpView = "tmp_view" + val columnList = if (cols.nonEmpty) cols.mkString("(", ",", ")") else "" + val partitionList = if (partitionExprs.nonEmpty) { + partitionExprs.mkString("PARTITION (", ",", ")") + } else "" + withTempView(tmpView) { + input.createOrReplaceTempView(tmpView) + val overwriteStr = if (overwrite) "OVERWRITE" else "INTO" + sql( + s"INSERT $overwriteStr TABLE $tableName $partitionList $columnList SELECT * FROM $tmpView") + } + } + + protected def verifyTable(tableName: String, expected: DataFrame): Unit = { + checkAnswer(spark.table(tableName), expected) + } + + test("insert with column list - follow table output order") { + withTable("t1") { + val df = Seq((1, 2L, "3")).toDF() + val cols = Seq("c1", "c2", "c3") + createTable("t1", cols, Seq("int", "long", "string")) + Seq(false, true).foreach { m => + processInsert("t1", df, cols, overwrite = m) + verifyTable("t1", df) + } + } + } + + test("insert with column list - follow table output order + partitioned table") { + val cols = Seq("c1", "c2", "c3", "c4") + val df = Seq((1, 2, 3, 4)).toDF(cols: _*) + withTable("t1") { + createTable("t1", cols, Seq("int", "int", "int", "int"), cols.takeRight(2)) + Seq(false, true).foreach { m => + processInsert("t1", df, cols, overwrite = m) + verifyTable("t1", df) + } + } + + withTable("t1") { + createTable("t1", cols, Seq("int", "int", "int", "int"), cols.takeRight(2)) + Seq(false, true).foreach { m => + processInsert( + "t1", df.selectExpr("c1", "c2"), cols.take(2), Seq("c3=3", "c4=4"), overwrite = m) + verifyTable("t1", df) + } + } + + withTable("t1") { + createTable("t1", cols, Seq("int", "int", "int", "int"), cols.takeRight(2)) + Seq(false, true).foreach { m => + processInsert("t1", df.selectExpr("c1", "c2", "c4"), + cols.filterNot(_ == "c3"), Seq("c3=3", "c4"), overwrite = m) + verifyTable("t1", df) + } + } + } + + test("insert with column list - table output reorder") { + withTable("t1") { + val cols = Seq("c1", "c2", "c3") + val df = Seq((1, 2, 3)).toDF(cols: _*) + createTable("t1", cols, Seq("int", "int", "int")) + Seq(false, true).foreach { m => + processInsert("t1", df, cols.reverse, overwrite = m) + verifyTable("t1", df.selectExpr(cols.reverse: _*)) + } + } + } + + test("insert with column list - table output reorder + partitioned table") { + val cols = Seq("c1", "c2", "c3", "c4") + val df = Seq((1, 2, 3, 4)).toDF(cols: _*) + withTable("t1") { + createTable("t1", cols, Seq("int", "int", "int", "int"), cols.takeRight(2)) + Seq(false, true).foreach { m => + processInsert("t1", df, cols.reverse, overwrite = m) + verifyTable("t1", df.selectExpr(cols.reverse: _*)) + } + } + + withTable("t1") { + createTable("t1", cols, Seq("int", "int", "int", "int"), cols.takeRight(2)) + Seq(false, true).foreach { m => + processInsert( + "t1", df.selectExpr("c1", "c2"), cols.take(2).reverse, Seq("c3=3", "c4=4"), overwrite = m) + verifyTable("t1", df.selectExpr("c2", "c1", "c3", "c4")) + } + } + + withTable("t1") { + createTable("t1", cols, Seq("int", "int", "int", "int"), cols.takeRight(2)) + Seq(false, true).foreach { m => + processInsert("t1", + df.selectExpr("c1", "c2", "c4"), Seq("c4", "c2", "c1"), Seq("c3=3", "c4"), overwrite = m) + verifyTable("t1", df.selectExpr("c4", "c2", "c3", "c1")) + } + } + } + + test("insert with column list - duplicated columns") { + withTable("t1") { + val cols = Seq("c1", "c2", "c3") + createTable("t1", cols, Seq("int", "long", "string")) + val e1 = intercept[AnalysisException](sql(s"INSERT INTO t1 (c1, c2, c2) values(1, 2, 3)")) + assert(e1.getMessage === "Found duplicate column(s) in the column list: `c2`") + } + } + + test("insert with column list - invalid columns") { + withTable("t1") { + val cols = Seq("c1", "c2", "c3") + createTable("t1", cols, Seq("int", "long", "string")) + val e1 = intercept[AnalysisException](sql(s"INSERT INTO t1 (c1, c2, c4) values(1, 2, 3)")) + assert(e1.getMessage === "Cannot resolve column name c4") + } + } + + test("insert with column list - mismatched column list size") { + val msg = "Cannot write to table due to mismatched user specified column size" + withTable("t1") { + val cols = Seq("c1", "c2", "c3") + createTable("t1", cols, Seq("int", "long", "string")) + val e1 = intercept[AnalysisException](sql(s"INSERT INTO t1 (c1, c2) values(1, 2, 3)")) + assert(e1.getMessage.contains(msg)) + val e2 = intercept[AnalysisException](sql(s"INSERT INTO t1 (c1, c2, c3) values(1, 2)")) + assert(e2.getMessage.contains(msg)) + } + } + + test("insert with column list - mismatched target table out size after rewritten query") { + val v2Msg = "Cannot write to 'testcat.t1', not enough data columns:" + val cols = Seq("c1", "c2", "c3", "c4") + + withTable("t1") { + createTable("t1", cols, Seq.fill(4)("int")) + val e1 = intercept[AnalysisException](sql(s"INSERT INTO t1 (c1) values(1)")) + assert(e1.getMessage.contains("target table has 4 column(s) but the inserted data has 1") || + e1.getMessage.contains(v2Msg)) + } + + withTable("t1") { + createTable("t1", cols, Seq.fill(4)("int"), cols.takeRight(2)) + val e1 = intercept[AnalysisException] { + sql(s"INSERT INTO t1 partition(c3=3, c4=4) (c1) values(1)") + } + assert(e1.getMessage.contains("target table has 4 column(s) but the inserted data has 3") || + e1.getMessage.contains(v2Msg)) + } + } +} + +class FileSourceSQLInsertTestSuite extends SQLInsertTestSuite with SharedSparkSession { + override def format: String = "parquet" + override protected def sparkConf: SparkConf = { + super.sparkConf.set(SQLConf.USE_V1_SOURCE_LIST, format) + } +} + +class DSV2SQLInsertTestSuite extends SQLInsertTestSuite with SharedSparkSession { + + override def format: String = "foo" + + protected override def sparkConf: SparkConf = { + super.sparkConf + .set("spark.sql.catalog.testcat", classOf[InMemoryPartitionTableCatalog].getName) + .set(SQLConf.DEFAULT_CATALOG.key, "testcat") + } +} diff --git a/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala index b86df4db816b3..7526bf0e6fbe9 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala @@ -22,13 +22,16 @@ import java.net.{MalformedURLException, URL} import java.sql.{Date, Timestamp} import java.util.concurrent.atomic.AtomicBoolean +import org.apache.commons.io.FileUtils + import org.apache.spark.{AccumulatorSuite, SparkException} import org.apache.spark.scheduler.{SparkListener, SparkListenerJobStart} import org.apache.spark.sql.catalyst.expressions.GenericRow import org.apache.spark.sql.catalyst.expressions.aggregate.{Complete, Partial} import org.apache.spark.sql.catalyst.optimizer.{ConvertToLocalRelation, NestedColumnAliasingSuite} -import org.apache.spark.sql.catalyst.plans.logical.Project +import org.apache.spark.sql.catalyst.plans.logical.{Project, RepartitionByExpression} import org.apache.spark.sql.catalyst.util.StringUtils +import org.apache.spark.sql.execution.UnionExec import org.apache.spark.sql.execution.adaptive.AdaptiveSparkPlanHelper import org.apache.spark.sql.execution.aggregate.{HashAggregateExec, ObjectHashAggregateExec, SortAggregateExec} import org.apache.spark.sql.execution.columnar.InMemoryTableScanExec @@ -1316,7 +1319,7 @@ class SQLQuerySuite extends QueryTest with SharedSparkSession with AdaptiveSpark ) } - test("oder by asc by default when not specify ascending and descending") { + test("order by asc by default when not specify ascending and descending") { checkAnswer( sql("SELECT a, b FROM testData2 ORDER BY a desc, b"), Seq(Row(3, 1), Row(3, 2), Row(2, 1), Row(2, 2), Row(1, 1), Row(1, 2)) @@ -2812,7 +2815,7 @@ class SQLQuerySuite extends QueryTest with SharedSparkSession with AdaptiveSpark } } - test("SRARK-22266: the same aggregate function was calculated multiple times") { + test("SPARK-22266: the same aggregate function was calculated multiple times") { val query = "SELECT a, max(b+1), max(b+1) + 1 FROM testData2 GROUP BY a" val df = sql(query) val physical = df.queryExecution.sparkPlan @@ -3092,7 +3095,7 @@ class SQLQuerySuite extends QueryTest with SharedSparkSession with AdaptiveSpark assert(scan.isInstanceOf[ParquetScan]) assert(scan.asInstanceOf[ParquetScan].pushedFilters === filters) case _ => - fail(s"unknow format $format") + fail(s"unknown format $format") } } @@ -3691,6 +3694,175 @@ class SQLQuerySuite extends QueryTest with SharedSparkSession with AdaptiveSpark checkAnswer(sql("SELECT id FROM t WHERE (SELECT true)"), Row(0L)) } } + + test("SPARK-33306: Timezone is needed when cast Date to String") { + withTempView("t1", "t2") { + spark.sql("select to_date(concat('2000-01-0', id)) as d from range(1, 2)") + .createOrReplaceTempView("t1") + spark.sql("select concat('2000-01-0', id) as d from range(1, 2)") + .createOrReplaceTempView("t2") + val result = Date.valueOf("2000-01-01") + + checkAnswer(sql("select t1.d from t1 join t2 on t1.d = t2.d"), Row(result)) + withSQLConf(SQLConf.LEGACY_CAST_DATETIME_TO_STRING.key -> "true") { + checkAnswer(sql("select t1.d from t1 join t2 on t1.d = t2.d"), Row(result)) + } + } + } + + test("SPARK-33338: GROUP BY using literal map should not fail") { + withTempDir { dir => + sql(s"CREATE TABLE t USING ORC LOCATION '${dir.toURI}' AS SELECT map('k1', 'v1') m, 'k1' k") + Seq( + "SELECT map('k1', 'v1')[k] FROM t GROUP BY 1", + "SELECT map('k1', 'v1')[k] FROM t GROUP BY map('k1', 'v1')[k]", + "SELECT map('k1', 'v1')[k] a FROM t GROUP BY a").foreach { statement => + checkAnswer(sql(statement), Row("v1")) + } + } + } + + test("SPARK-33084: Add jar support Ivy URI in SQL") { + val sc = spark.sparkContext + // default transitive=false, only download specified jar + sql("ADD JAR ivy://org.apache.hive.hcatalog:hive-hcatalog-core:2.3.7") + assert(sc.listJars() + .exists(_.contains("org.apache.hive.hcatalog_hive-hcatalog-core-2.3.7.jar"))) + + // test download ivy URL jar return multiple jars + sql("ADD JAR ivy://org.scala-js:scalajs-test-interface_2.12:1.2.0?transitive=true") + assert(sc.listJars().exists(_.contains("scalajs-library_2.12"))) + assert(sc.listJars().exists(_.contains("scalajs-test-interface_2.12"))) + + sql("ADD JAR ivy://org.apache.hive:hive-contrib:2.3.7" + + "?exclude=org.pentaho:pentaho-aggdesigner-algorithm&transitive=true") + assert(sc.listJars().exists(_.contains("org.apache.hive_hive-contrib-2.3.7.jar"))) + assert(sc.listJars().exists(_.contains("org.apache.hive_hive-exec-2.3.7.jar"))) + assert(!sc.listJars().exists(_.contains("org.pentaho.pentaho_aggdesigner-algorithm"))) + } + + test("SPARK-33677: LikeSimplification should be skipped if pattern contains any escapeChar") { + withTempView("df") { + Seq("m@ca").toDF("s").createOrReplaceTempView("df") + + val e = intercept[AnalysisException] { + sql("SELECT s LIKE 'm%@ca' ESCAPE '%' FROM df").collect() + } + assert(e.message.contains("the pattern 'm%@ca' is invalid, " + + "the escape character is not allowed to precede '@'")) + + checkAnswer(sql("SELECT s LIKE 'm@@ca' ESCAPE '@' FROM df"), Row(true)) + } + } + + test("limit partition num to 1 when distributing by foldable expressions") { + withSQLConf((SQLConf.SHUFFLE_PARTITIONS.key, "5")) { + Seq(1, "1, 2", null, "version()").foreach { expr => + val plan = sql(s"select * from values (1), (2), (3) t(a) distribute by $expr") + .queryExecution.optimizedPlan + val res = plan.collect { + case r: RepartitionByExpression if r.numPartitions == 1 => true + } + assert(res.nonEmpty) + } + } + } + + test("Fold RepartitionExpression num partition should check if partition expression is empty") { + withSQLConf((SQLConf.SHUFFLE_PARTITIONS.key, "5")) { + val df = spark.range(1).hint("REPARTITION_BY_RANGE") + val plan = df.queryExecution.optimizedPlan + val res = plan.collect { + case r: RepartitionByExpression if r.numPartitions == 5 => true + } + assert(res.nonEmpty) + } + } + + test("SPARK-33593: Vector reader got incorrect data with binary partition value") { + Seq("false", "true").foreach(value => { + withSQLConf(SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key -> value) { + withTable("t1") { + sql( + """CREATE TABLE t1(name STRING, id BINARY, part BINARY) + |USING PARQUET PARTITIONED BY (part)""".stripMargin) + sql("INSERT INTO t1 PARTITION(part = 'Spark SQL') VALUES('a', X'537061726B2053514C')") + checkAnswer(sql("SELECT name, cast(id as string), cast(part as string) FROM t1"), + Row("a", "Spark SQL", "Spark SQL")) + } + } + + withSQLConf(SQLConf.ORC_VECTORIZED_READER_ENABLED.key -> value) { + withTable("t2") { + sql( + """CREATE TABLE t2(name STRING, id BINARY, part BINARY) + |USING ORC PARTITIONED BY (part)""".stripMargin) + sql("INSERT INTO t2 PARTITION(part = 'Spark SQL') VALUES('a', X'537061726B2053514C')") + checkAnswer(sql("SELECT name, cast(id as string), cast(part as string) FROM t2"), + Row("a", "Spark SQL", "Spark SQL")) + } + } + }) + } + + test("SPARK-33084: Add jar support Ivy URI in SQL -- jar contains udf class") { + val sumFuncClass = "org.apache.spark.examples.sql.Spark33084" + val functionName = "test_udf" + withTempDir { dir => + System.setProperty("ivy.home", dir.getAbsolutePath) + val sourceJar = new File(Thread.currentThread().getContextClassLoader + .getResource("SPARK-33084.jar").getFile) + val targetCacheJarDir = new File(dir.getAbsolutePath + + "/local/org.apache.spark/SPARK-33084/1.0/jars/") + targetCacheJarDir.mkdir() + // copy jar to local cache + FileUtils.copyFileToDirectory(sourceJar, targetCacheJarDir) + withTempView("v1") { + withUserDefinedFunction( + s"default.$functionName" -> false, + functionName -> true) { + // create temporary function without class + val e = intercept[AnalysisException] { + sql(s"CREATE TEMPORARY FUNCTION $functionName AS '$sumFuncClass'") + }.getMessage + assert(e.contains("Can not load class 'org.apache.spark.examples.sql.Spark33084")) + sql("ADD JAR ivy://org.apache.spark:SPARK-33084:1.0") + sql(s"CREATE TEMPORARY FUNCTION $functionName AS '$sumFuncClass'") + // create a view using a function in 'default' database + sql(s"CREATE TEMPORARY VIEW v1 AS SELECT $functionName(col1) FROM VALUES (1), (2), (3)") + // view v1 should still using function defined in `default` database + checkAnswer(sql("SELECT * FROM v1"), Seq(Row(2.0))) + } + } + System.clearProperty("ivy.home") + } + } + + test("SPARK-33964: Combine distinct unions that have noop project between them") { + val df = sql(""" + |SELECT a, b FROM ( + | SELECT a, b FROM testData2 + | UNION + | SELECT a, sum(b) FROM testData2 GROUP BY a + | UNION + | SELECT null AS a, sum(b) FROM testData2 + |)""".stripMargin) + + val unions = df.queryExecution.sparkPlan.collect { + case u: UnionExec => u + } + + assert(unions.size == 1) + } + + test("SPARK-33591: null as a partition value") { + val t = "part_table" + withTable(t) { + sql(s"CREATE TABLE $t (col1 INT, p1 STRING) USING PARQUET PARTITIONED BY (p1)") + sql(s"INSERT INTO TABLE $t PARTITION (p1 = null) SELECT 0") + checkAnswer(sql(s"SELECT * FROM $t"), Row(0, null)) + } + } } case class Foo(bar: Option[String]) diff --git a/sql/core/src/test/scala/org/apache/spark/sql/SQLQueryTestSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/SQLQueryTestSuite.scala index b7cf0798a9d4b..eb2caa61e1590 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/SQLQueryTestSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/SQLQueryTestSuite.scala @@ -24,7 +24,7 @@ import java.util.Locale import scala.collection.mutable.ArrayBuffer import scala.util.control.NonFatal -import org.apache.spark.{SparkConf, SparkException} +import org.apache.spark.{SparkConf, SparkException, TestUtils} import org.apache.spark.sql.catalyst.expressions.codegen.CodeGenerator import org.apache.spark.sql.catalyst.planning.PhysicalOperation import org.apache.spark.sql.catalyst.plans.SQLHelper @@ -49,22 +49,22 @@ import org.apache.spark.util.Utils * * To run the entire test suite: * {{{ - * build/sbt "sql/test-only *SQLQueryTestSuite" + * build/sbt "sql/testOnly *SQLQueryTestSuite" * }}} * * To run a single test file upon change: * {{{ - * build/sbt "~sql/test-only *SQLQueryTestSuite -- -z inline-table.sql" + * build/sbt "~sql/testOnly *SQLQueryTestSuite -- -z inline-table.sql" * }}} * * To re-generate golden files for entire suite, run: * {{{ - * SPARK_GENERATE_GOLDEN_FILES=1 build/sbt "sql/test-only *SQLQueryTestSuite" + * SPARK_GENERATE_GOLDEN_FILES=1 build/sbt "sql/testOnly *SQLQueryTestSuite" * }}} * * To re-generate golden file for a single test, run: * {{{ - * SPARK_GENERATE_GOLDEN_FILES=1 build/sbt "sql/test-only *SQLQueryTestSuite -- -z describe.sql" + * SPARK_GENERATE_GOLDEN_FILES=1 build/sbt "sql/testOnly *SQLQueryTestSuite -- -z describe.sql" * }}} * * The format for input files is simple: @@ -260,6 +260,9 @@ class SQLQueryTestSuite extends QueryTest with SharedSparkSession with SQLHelper newLine.startsWith("--") && !newLine.startsWith("--QUERY-DELIMITER") } + // SPARK-32106 Since we add SQL test 'transform.sql' will use `cat` command, + // here we need to check command available + assume(TestUtils.testCommandAvailable("/bin/bash")) val input = fileToString(new File(testCase.inputFile)) val (comments, code) = splitCommentsAndCodes(input) @@ -278,18 +281,18 @@ class SQLQueryTestSuite extends QueryTest with SharedSparkSession with SQLHelper val allCode = importedCode ++ code val tempQueries = if (allCode.exists(_.trim.startsWith("--QUERY-DELIMITER"))) { // Although the loop is heavy, only used for bracketed comments test. - val querys = new ArrayBuffer[String] + val queries = new ArrayBuffer[String] val otherCodes = new ArrayBuffer[String] var tempStr = "" var start = false for (c <- allCode) { if (c.trim.startsWith("--QUERY-DELIMITER-START")) { start = true - querys ++= splitWithSemicolon(otherCodes.toSeq) + queries ++= splitWithSemicolon(otherCodes.toSeq) otherCodes.clear() } else if (c.trim.startsWith("--QUERY-DELIMITER-END")) { start = false - querys += s"\n${tempStr.stripSuffix(";")}" + queries += s"\n${tempStr.stripSuffix(";")}" tempStr = "" } else if (start) { tempStr += s"\n$c" @@ -298,9 +301,9 @@ class SQLQueryTestSuite extends QueryTest with SharedSparkSession with SQLHelper } } if (otherCodes.nonEmpty) { - querys ++= splitWithSemicolon(otherCodes.toSeq) + queries ++= splitWithSemicolon(otherCodes.toSeq) } - querys.toSeq + queries.toSeq } else { splitWithSemicolon(allCode).toSeq } @@ -502,7 +505,7 @@ class SQLQueryTestSuite extends QueryTest with SharedSparkSession with SQLHelper case _: DescribeCommandBase | _: DescribeColumnCommand | _: DescribeRelation - | _: DescribeColumnStatement => true + | _: DescribeColumn => true case PhysicalOperation(_, _, Sort(_, true, _)) => true case _ => plan.children.iterator.exists(isSorted) } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/ShowCreateTableSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/ShowCreateTableSuite.scala index 1106a787cc9a7..92d306c0e3c11 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/ShowCreateTableSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/ShowCreateTableSuite.scala @@ -155,16 +155,19 @@ abstract class ShowCreateTableSuite extends QueryTest with SQLTestUtils { val ex = intercept[AnalysisException] { sql(s"SHOW CREATE TABLE $viewName") } - assert(ex.getMessage.contains("SHOW CREATE TABLE is not supported on a temporary view")) + assert(ex.getMessage.contains( + s"$viewName is a temp view. 'SHOW CREATE TABLE' expects a table or permanent view.")) } withGlobalTempView(viewName) { sql(s"CREATE GLOBAL TEMPORARY VIEW $viewName AS SELECT 1 AS a") + val globalTempViewDb = spark.sessionState.catalog.globalTempViewManager.database val ex = intercept[AnalysisException] { - val globalTempViewDb = spark.sessionState.catalog.globalTempViewManager.database sql(s"SHOW CREATE TABLE $globalTempViewDb.$viewName") } - assert(ex.getMessage.contains("SHOW CREATE TABLE is not supported on a temporary view")) + assert(ex.getMessage.contains( + s"$globalTempViewDb.$viewName is a temp view. " + + "'SHOW CREATE TABLE' expects a table or permanent view.")) } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/SparkSessionBuilderSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/SparkSessionBuilderSuite.scala index 9da32d02aa723..1f16bb69b3a16 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/SparkSessionBuilderSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/SparkSessionBuilderSuite.scala @@ -24,6 +24,7 @@ import org.apache.spark.internal.config.EXECUTOR_ALLOW_SPARK_CONTEXT import org.apache.spark.internal.config.UI.UI_ENABLED import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.internal.StaticSQLConf._ +import org.apache.spark.util.ThreadUtils /** * Test cases for the builder pattern of [[SparkSession]]. @@ -281,4 +282,134 @@ class SparkSessionBuilderSuite extends SparkFunSuite with BeforeAndAfterEach { () } } + + test("SPARK-32991: Use conf in shared state as the original configuration for RESET") { + val wh = "spark.sql.warehouse.dir" + val td = "spark.sql.globalTempDatabase" + val custom = "spark.sql.custom" + + val conf = new SparkConf() + .setMaster("local") + .setAppName("SPARK-32991") + .set(wh, "./data1") + .set(td, "bob") + + val sc = new SparkContext(conf) + + val spark = SparkSession.builder() + .config(wh, "./data2") + .config(td, "alice") + .config(custom, "kyao") + .getOrCreate() + + // When creating the first session like above, we will update the shared spark conf to the + // newly specified values + val sharedWH = spark.sharedState.conf.get(wh) + val sharedTD = spark.sharedState.conf.get(td) + assert(sharedWH === "./data2", + "The warehouse dir in shared state should be determined by the 1st created spark session") + assert(sharedTD === "alice", + "Static sql configs in shared state should be determined by the 1st created spark session") + assert(spark.sharedState.conf.getOption(custom).isEmpty, + "Dynamic sql configs is session specific") + + assert(spark.conf.get(wh) === sharedWH, + "The warehouse dir in session conf and shared state conf should be consistent") + assert(spark.conf.get(td) === sharedTD, + "Static sql configs in session conf and shared state conf should be consistent") + assert(spark.conf.get(custom) === "kyao", "Dynamic sql configs is session specific") + + spark.sql("RESET") + + assert(spark.conf.get(wh) === sharedWH, + "The warehouse dir in shared state should be respect after RESET") + assert(spark.conf.get(td) === sharedTD, + "Static sql configs in shared state should be respect after RESET") + assert(spark.conf.get(custom) === "kyao", + "Dynamic sql configs in session initial map should be respect after RESET") + + val spark2 = SparkSession.builder() + .config(wh, "./data3") + .config(custom, "kyaoo").getOrCreate() + assert(spark2.conf.get(wh) === sharedWH) + assert(spark2.conf.get(td) === sharedTD) + assert(spark2.conf.get(custom) === "kyaoo") + } + + test("SPARK-32991: RESET should work properly with multi threads") { + val wh = "spark.sql.warehouse.dir" + val td = "spark.sql.globalTempDatabase" + val custom = "spark.sql.custom" + val spark = ThreadUtils.runInNewThread("new session 0", false) { + SparkSession.builder() + .master("local") + .config(wh, "./data0") + .config(td, "bob") + .config(custom, "c0") + .getOrCreate() + } + + spark.sql(s"SET $custom=c1") + assert(spark.conf.get(custom) === "c1") + spark.sql("RESET") + assert(spark.conf.get(wh) === "./data0", + "The warehouse dir in shared state should be respect after RESET") + assert(spark.conf.get(td) === "bob", + "Static sql configs in shared state should be respect after RESET") + assert(spark.conf.get(custom) === "c0", + "Dynamic sql configs in shared state should be respect after RESET") + + val spark1 = ThreadUtils.runInNewThread("new session 1", false) { + SparkSession.builder().getOrCreate() + } + + assert(spark === spark1) + + // TODO: SPARK-33718: After clear sessions, the SharedState will be unreachable, then all + // the new static will take effect. + SparkSession.clearDefaultSession() + val spark2 = ThreadUtils.runInNewThread("new session 2", false) { + SparkSession.builder() + .master("local") + .config(wh, "./data1") + .config(td, "alice") + .config(custom, "c2") + .getOrCreate() + } + + assert(spark2 !== spark) + spark2.sql(s"SET $custom=c1") + assert(spark2.conf.get(custom) === "c1") + spark2.sql("RESET") + assert(spark2.conf.get(wh) === "./data1") + assert(spark2.conf.get(td) === "alice") + assert(spark2.conf.get(custom) === "c2") + + } + + test("SPARK-33944: warning setting hive.metastore.warehouse.dir using session options") { + val msg = "Not allowing to set hive.metastore.warehouse.dir in SparkSession's options" + val logAppender = new LogAppender(msg) + withLogAppender(logAppender) { + SparkSession.builder() + .master("local") + .config("hive.metastore.warehouse.dir", "any") + .getOrCreate() + .sharedState + } + assert(logAppender.loggingEvents.exists(_.getRenderedMessage.contains(msg))) + } + + test("SPARK-33944: no warning setting spark.sql.warehouse.dir using session options") { + val msg = "Not allowing to set hive.metastore.warehouse.dir in SparkSession's options" + val logAppender = new LogAppender(msg) + withLogAppender(logAppender) { + SparkSession.builder() + .master("local") + .config("spark.sql.warehouse.dir", "any") + .getOrCreate() + .sharedState + } + assert(!logAppender.loggingEvents.exists(_.getRenderedMessage.contains(msg))) + } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/SparkSessionExtensionSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/SparkSessionExtensionSuite.scala index e5e8bc6917799..35d2513835611 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/SparkSessionExtensionSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/SparkSessionExtensionSuite.scala @@ -33,7 +33,7 @@ import org.apache.spark.sql.catalyst.rules.Rule import org.apache.spark.sql.catalyst.trees.TreeNodeTag import org.apache.spark.sql.execution._ import org.apache.spark.sql.execution.adaptive.{AdaptiveSparkPlanExec, QueryStageExec} -import org.apache.spark.sql.execution.exchange.{BroadcastExchangeExec, BroadcastExchangeLike, ShuffleExchangeExec, ShuffleExchangeLike} +import org.apache.spark.sql.execution.exchange.{BroadcastExchangeExec, BroadcastExchangeLike, ShuffleExchangeExec, ShuffleExchangeLike, ShuffleOrigin} import org.apache.spark.sql.execution.vectorized.OnHeapColumnVector import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.internal.SQLConf.COLUMN_BATCH_SIZE @@ -88,6 +88,12 @@ class SparkSessionExtensionSuite extends SparkFunSuite { } } + test("SPARK-33621: inject a pre CBO rule") { + withSession(Seq(_.injectPreCBORule(MyRule))) { session => + assert(session.sessionState.optimizer.preCBORules.contains(MyRule(session))) + } + } + test("inject spark planner strategy") { withSession(Seq(_.injectPlannerStrategy(MySparkStrategy))) { session => assert(session.sessionState.planner.strategies.contains(MySparkStrategy(session))) @@ -160,13 +166,13 @@ class SparkSessionExtensionSuite extends SparkFunSuite { // inject rule that will run during AQE query stage optimization and will verify that the // custom tags were written in the preparation phase extensions.injectColumnar(session => - MyColumarRule(MyNewQueryStageRule(), MyNewQueryStageRule())) + MyColumnarRule(MyNewQueryStageRule(), MyNewQueryStageRule())) } withSession(extensions) { session => session.sessionState.conf.setConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED, true) assert(session.sessionState.queryStagePrepRules.contains(MyQueryStagePrepRule())) assert(session.sessionState.columnarRules.contains( - MyColumarRule(MyNewQueryStageRule(), MyNewQueryStageRule()))) + MyColumnarRule(MyNewQueryStageRule(), MyNewQueryStageRule()))) import session.sqlContext.implicits._ val data = Seq((100L), (200L), (300L)).toDF("vals").repartition(1) val df = data.selectExpr("vals + 1") @@ -199,12 +205,12 @@ class SparkSessionExtensionSuite extends SparkFunSuite { val extensions = create { extensions => extensions.injectColumnar(session => - MyColumarRule(PreRuleReplaceAddWithBrokenVersion(), MyPostRule())) + MyColumnarRule(PreRuleReplaceAddWithBrokenVersion(), MyPostRule())) } withSession(extensions) { session => session.sessionState.conf.setConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED, enableAQE) assert(session.sessionState.columnarRules.contains( - MyColumarRule(PreRuleReplaceAddWithBrokenVersion(), MyPostRule()))) + MyColumnarRule(PreRuleReplaceAddWithBrokenVersion(), MyPostRule()))) import session.sqlContext.implicits._ // perform a join to inject a broadcast exchange val left = Seq((1, 50L), (2, 100L), (3, 150L)).toDF("l1", "l2") @@ -238,12 +244,12 @@ class SparkSessionExtensionSuite extends SparkFunSuite { .config(COLUMN_BATCH_SIZE.key, 2) .withExtensions { extensions => extensions.injectColumnar(session => - MyColumarRule(PreRuleReplaceAddWithBrokenVersion(), MyPostRule())) } + MyColumnarRule(PreRuleReplaceAddWithBrokenVersion(), MyPostRule())) } .getOrCreate() try { assert(session.sessionState.columnarRules.contains( - MyColumarRule(PreRuleReplaceAddWithBrokenVersion(), MyPostRule()))) + MyColumnarRule(PreRuleReplaceAddWithBrokenVersion(), MyPostRule()))) import session.sqlContext.implicits._ val input = Seq((100L), (200L), (300L)) @@ -271,7 +277,7 @@ class SparkSessionExtensionSuite extends SparkFunSuite { assert(session.sessionState.functionRegistry .lookupFunction(MyExtensions.myFunction._1).isDefined) assert(session.sessionState.columnarRules.contains( - MyColumarRule(PreRuleReplaceAddWithBrokenVersion(), MyPostRule()))) + MyColumnarRule(PreRuleReplaceAddWithBrokenVersion(), MyPostRule()))) } finally { stop(session) } @@ -384,9 +390,6 @@ case class MyParser(spark: SparkSession, delegate: ParserInterface) extends Pars override def parseDataType(sqlText: String): DataType = delegate.parseDataType(sqlText) - - override def parseRawDataType(sqlText: String): DataType = - delegate.parseRawDataType(sqlText) } object MyExtensions { @@ -573,8 +576,9 @@ class ColumnarBoundReference(ordinal: Int, dataType: DataType, nullable: Boolean class ColumnarAlias(child: ColumnarExpression, name: String)( override val exprId: ExprId = NamedExpression.newExprId, override val qualifier: Seq[String] = Seq.empty, - override val explicitMetadata: Option[Metadata] = None) - extends Alias(child, name)(exprId, qualifier, explicitMetadata) + override val explicitMetadata: Option[Metadata] = None, + override val nonInheritableMetadataKeys: Seq[String] = Seq.empty) + extends Alias(child, name)(exprId, qualifier, explicitMetadata, nonInheritableMetadataKeys) with ColumnarExpression { override def columnarEval(batch: ColumnarBatch): Any = child.columnarEval(batch) @@ -643,8 +647,11 @@ class ColumnarProjectExec(projectList: Seq[NamedExpression], child: SparkPlan) * A version of add that supports columnar processing for longs. This version is broken * on purpose so it adds the numbers plus 1 so that the tests can show that it was replaced. */ -class BrokenColumnarAdd(left: ColumnarExpression, right: ColumnarExpression) - extends Add(left, right) with ColumnarExpression { +class BrokenColumnarAdd( + left: ColumnarExpression, + right: ColumnarExpression, + failOnError: Boolean = false) + extends Add(left, right, failOnError) with ColumnarExpression { override def supportsColumnar(): Boolean = left.supportsColumnar && right.supportsColumnar @@ -708,7 +715,7 @@ case class PreRuleReplaceAddWithBrokenVersion() extends Rule[SparkPlan] { def replaceWithColumnarExpression(exp: Expression): ColumnarExpression = exp match { case a: Alias => new ColumnarAlias(replaceWithColumnarExpression(a.child), - a.name)(a.exprId, a.qualifier, a.explicitMetadata) + a.name)(a.exprId, a.qualifier, a.explicitMetadata, a.nonInheritableMetadataKeys) case att: AttributeReference => new ColumnarAttributeReference(att.name, att.dataType, att.nullable, att.metadata)(att.exprId, att.qualifier) @@ -763,7 +770,9 @@ case class PreRuleReplaceAddWithBrokenVersion() extends Rule[SparkPlan] { case class MyShuffleExchangeExec(delegate: ShuffleExchangeExec) extends ShuffleExchangeLike { override def numMappers: Int = delegate.numMappers override def numPartitions: Int = delegate.numPartitions - override def canChangeNumPartitions: Boolean = delegate.canChangeNumPartitions + override def shuffleOrigin: ShuffleOrigin = { + delegate.shuffleOrigin + } override def mapOutputStatisticsFuture: Future[MapOutputStatistics] = delegate.mapOutputStatisticsFuture override def getShuffleRDD(partitionSpecs: Array[ShufflePartitionSpec]): RDD[_] = @@ -815,7 +824,7 @@ case class MyPostRule() extends Rule[SparkPlan] { } } -case class MyColumarRule(pre: Rule[SparkPlan], post: Rule[SparkPlan]) extends ColumnarRule { +case class MyColumnarRule(pre: Rule[SparkPlan], post: Rule[SparkPlan]) extends ColumnarRule { override def preColumnarTransitions: Rule[SparkPlan] = pre override def postColumnarTransitions: Rule[SparkPlan] = post } @@ -829,7 +838,7 @@ class MyExtensions extends (SparkSessionExtensions => Unit) { e.injectOptimizerRule(MyRule) e.injectParser(MyParser) e.injectFunction(MyExtensions.myFunction) - e.injectColumnar(session => MyColumarRule(PreRuleReplaceAddWithBrokenVersion(), MyPostRule())) + e.injectColumnar(session => MyColumnarRule(PreRuleReplaceAddWithBrokenVersion(), MyPostRule())) } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/StatisticsCollectionSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/StatisticsCollectionSuite.scala index 18356a4de9ef4..cc3d8375db32f 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/StatisticsCollectionSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/StatisticsCollectionSuite.scala @@ -25,7 +25,6 @@ import java.util.concurrent.TimeUnit import scala.collection.mutable import org.apache.spark.sql.catalyst.TableIdentifier -import org.apache.spark.sql.catalyst.analysis.NoSuchTableException import org.apache.spark.sql.catalyst.catalog.CatalogColumnStat import org.apache.spark.sql.catalyst.plans.logical._ import org.apache.spark.sql.catalyst.util.DateTimeTestUtils @@ -175,6 +174,15 @@ class StatisticsCollectionSuite extends StatisticsCollectionTestBase with Shared } } + test("SPARK-33812: column stats round trip serialization with splitting histogram property") { + withSQLConf(SQLConf.HIVE_TABLE_PROPERTY_LENGTH_THRESHOLD.key -> "10") { + statsWithHgms.foreach { case (k, v) => + val roundtrip = CatalogColumnStat.fromMap("t", k, v.toMap(k)) + assert(roundtrip == Some(v)) + } + } + } + test("analyze column command - result verification") { // (data.head.productArity - 1) because the last column does not support stats collection. assert(stats.size == data.head.productArity - 1) @@ -527,7 +535,7 @@ class StatisticsCollectionSuite extends StatisticsCollectionTestBase with Shared val errMsg = intercept[AnalysisException] { sql("ANALYZE TABLE tempView COMPUTE STATISTICS FOR COLUMNS id") }.getMessage - assert(errMsg.contains(s"Table or view 'tempView' not found in database 'default'")) + assert(errMsg.contains("Temporary view `tempView` is not cached for analyzing columns")) // Cache the view then analyze it sql("CACHE TABLE tempView") @@ -540,16 +548,18 @@ class StatisticsCollectionSuite extends StatisticsCollectionTestBase with Shared test("analyzes column statistics in cached global temporary view") { withGlobalTempView("gTempView") { val globalTempDB = spark.sharedState.globalTempViewManager.database - val errMsg1 = intercept[NoSuchTableException] { + val errMsg1 = intercept[AnalysisException] { sql(s"ANALYZE TABLE $globalTempDB.gTempView COMPUTE STATISTICS FOR COLUMNS id") }.getMessage - assert(errMsg1.contains(s"Table or view 'gTempView' not found in database '$globalTempDB'")) + assert(errMsg1.contains("Table or view not found: " + + s"$globalTempDB.gTempView")) // Analyzes in a global temporary view sql("CREATE GLOBAL TEMP VIEW gTempView AS SELECT * FROM range(1, 30)") val errMsg2 = intercept[AnalysisException] { sql(s"ANALYZE TABLE $globalTempDB.gTempView COMPUTE STATISTICS FOR COLUMNS id") }.getMessage - assert(errMsg2.contains(s"Table or view 'gTempView' not found in database '$globalTempDB'")) + assert(errMsg2.contains( + s"Temporary view `$globalTempDB`.`gTempView` is not cached for analyzing columns")) // Cache the view then analyze it sql(s"CACHE TABLE $globalTempDB.gTempView") diff --git a/sql/core/src/test/scala/org/apache/spark/sql/SubquerySuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/SubquerySuite.scala index a21c461e84588..73b23496de515 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/SubquerySuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/SubquerySuite.scala @@ -1314,7 +1314,7 @@ class SubquerySuite extends QueryTest with SharedSparkSession with AdaptiveSpark // need to execute the query before we can examine fs.inputRDDs() assert(stripAQEPlan(df.queryExecution.executedPlan) match { case WholeStageCodegenExec(ColumnarToRowExec(InputAdapter( - fs @ FileSourceScanExec(_, _, _, partitionFilters, _, _, _, _)))) => + fs @ FileSourceScanExec(_, _, _, partitionFilters, _, _, _, _, _)))) => partitionFilters.exists(ExecSubqueryExpression.hasSubquery) && fs.inputRDDs().forall( _.asInstanceOf[FileScanRDD].filePartitions.forall( diff --git a/sql/core/src/test/scala/org/apache/spark/sql/TPCDSQuerySuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/TPCDSQuerySuite.scala index decd1d6d08d27..22e1b838f3f3f 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/TPCDSQuerySuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/TPCDSQuerySuite.scala @@ -20,11 +20,13 @@ package org.apache.spark.sql import org.apache.spark.SparkConf import org.apache.spark.sql.catalyst.util.resourceToString import org.apache.spark.sql.internal.SQLConf +import org.apache.spark.tags.ExtendedSQLTest /** * This test suite ensures all the TPC-DS queries can be successfully analyzed, optimized * and compiled without hitting the max iteration threshold. */ +@ExtendedSQLTest class TPCDSQuerySuite extends BenchmarkQueryTest with TPCDSBase { tpcdsQueries.foreach { name => @@ -64,10 +66,12 @@ class TPCDSQuerySuite extends BenchmarkQueryTest with TPCDSBase { } } +@ExtendedSQLTest class TPCDSQueryWithStatsSuite extends TPCDSQuerySuite { override def injectStats: Boolean = true } +@ExtendedSQLTest class TPCDSQueryANSISuite extends TPCDSQuerySuite { override protected def sparkConf: SparkConf = super.sparkConf.set(SQLConf.ANSI_ENABLED, true) diff --git a/sql/core/src/test/scala/org/apache/spark/sql/TPCDSTableStats.scala b/sql/core/src/test/scala/org/apache/spark/sql/TPCDSTableStats.scala index f39b4b8b56c2e..ee9cf7b67225f 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/TPCDSTableStats.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/TPCDSTableStats.scala @@ -376,7 +376,7 @@ object TPCDSTableStats { "s_closed_date_sk" -> CatalogColumnStat(Some(70L), Some("2450823"), Some("2451313"), Some(296), Some(4), Some(4), None, CatalogColumnStat.VERSION), "s_store_id" -> CatalogColumnStat(Some(210L), None, None, Some(0), Some(16), Some(16), None, CatalogColumnStat.VERSION), "s_geography_class" -> CatalogColumnStat(Some(1L), None, None, Some(3), Some(7), Some(7), None, CatalogColumnStat.VERSION), - "s_tax_precentage" -> CatalogColumnStat(Some(12L), Some("0.00"), Some("0.11"), Some(5), Some(8), Some(8), None, CatalogColumnStat.VERSION) + "s_tax_percentage" -> CatalogColumnStat(Some(12L), Some("0.00"), Some("0.11"), Some(5), Some(8), Some(8), None, CatalogColumnStat.VERSION) )), "store_returns" -> CatalogStatistics(4837573440L, Some(28795080L), Map( "sr_item_sk" -> CatalogColumnStat(Some(197284L), Some("1"), Some("204000"), Some(0), Some(8), Some(8), None, CatalogColumnStat.VERSION), diff --git a/sql/core/src/test/scala/org/apache/spark/sql/UnwrapCastInComparisonEndToEndSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/UnwrapCastInComparisonEndToEndSuite.scala new file mode 100644 index 0000000000000..e6f0426428bd4 --- /dev/null +++ b/sql/core/src/test/scala/org/apache/spark/sql/UnwrapCastInComparisonEndToEndSuite.scala @@ -0,0 +1,194 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql + +import org.apache.spark.sql.catalyst.expressions.IntegralLiteralTestUtils.{negativeInt, positiveInt} +import org.apache.spark.sql.test.SharedSparkSession +import org.apache.spark.sql.types.Decimal + +class UnwrapCastInComparisonEndToEndSuite extends QueryTest with SharedSparkSession { + import testImplicits._ + + val t = "test_table" + + test("cases when literal is max") { + withTable(t) { + Seq[(Integer, java.lang.Short, java.lang.Float)]( + (1, 100.toShort, 3.14.toFloat), + (2, Short.MaxValue, Float.NaN), + (3, Short.MinValue, Float.PositiveInfinity), + (4, 0.toShort, Float.MaxValue), + (5, null, null)) + .toDF("c1", "c2", "c3").write.saveAsTable(t) + val df = spark.table(t) + + val lit = Short.MaxValue.toInt + checkAnswer(df.where(s"c2 > $lit").select("c1"), Seq.empty) + checkAnswer(df.where(s"c2 >= $lit").select("c1"), Row(2)) + checkAnswer(df.where(s"c2 == $lit").select("c1"), Row(2)) + checkAnswer(df.where(s"c2 <=> $lit").select("c1"), Row(2)) + checkAnswer(df.where(s"c2 != $lit").select("c1"), Row(1) :: Row(3) :: Row(4) :: Nil) + checkAnswer(df.where(s"c2 <= $lit").select("c1"), Row(1) :: Row(2) :: Row(3) :: Row(4) :: Nil) + checkAnswer(df.where(s"c2 < $lit").select("c1"), Row(1) :: Row(3) :: Row(4) :: Nil) + + checkAnswer(df.where(s"c3 > double('nan')").select("c1"), Seq.empty) + checkAnswer(df.where(s"c3 >= double('nan')").select("c1"), Row(2)) + checkAnswer(df.where(s"c3 == double('nan')").select("c1"), Row(2)) + checkAnswer(df.where(s"c3 <=> double('nan')").select("c1"), Row(2)) + checkAnswer(df.where(s"c3 != double('nan')").select("c1"), Row(1) :: Row(3) :: Row(4) :: Nil) + checkAnswer(df.where(s"c3 <= double('nan')").select("c1"), + Row(1) :: Row(2) :: Row(3) :: Row(4) :: Nil) + checkAnswer(df.where(s"c3 < double('nan')").select("c1"), Row(1) :: Row(3) :: Row(4) :: Nil) + } + } + + test("cases when literal is > max") { + withTable(t) { + Seq[(Integer, java.lang.Short)]( + (1, 100.toShort), + (2, Short.MaxValue), + (3, null)) + .toDF("c1", "c2").write.saveAsTable(t) + val df = spark.table(t) + val lit = positiveInt + checkAnswer(df.where(s"c2 > $lit").select("c1"), Seq.empty) + checkAnswer(df.where(s"c2 >= $lit").select("c1"), Seq.empty) + checkAnswer(df.where(s"c2 == $lit").select("c1"), Seq.empty) + checkAnswer(df.where(s"c2 <=> $lit").select("c1"), Seq.empty) + checkAnswer(df.where(s"c2 != $lit").select("c1"), Row(1) :: Row(2) :: Nil) + checkAnswer(df.where(s"c2 <= $lit").select("c1"), Row(1) :: Row(2) :: Nil) + checkAnswer(df.where(s"c2 < $lit").select("c1"), Row(1) :: Row(2) :: Nil) + + // No test for float case since NaN is greater than any other numeric value + } + } + + test("cases when literal is min") { + withTable(t) { + Seq[(Integer, java.lang.Short, java.lang.Float)]( + (1, 100.toShort, 3.14.toFloat), + (2, Short.MinValue, Float.NegativeInfinity), + (3, Short.MaxValue, Float.MinValue), + (4, null, null)) + .toDF("c1", "c2", "c3").write.saveAsTable(t) + val df = spark.table(t) + + val lit = Short.MinValue.toInt + checkAnswer(df.where(s"c2 > $lit").select("c1"), Row(1) :: Row(3) :: Nil) + checkAnswer(df.where(s"c2 >= $lit").select("c1"), Row(1) :: Row(2) :: Row(3) :: Nil) + checkAnswer(df.where(s"c2 == $lit").select("c1"), Row(2)) + checkAnswer(df.where(s"c2 <=> $lit").select("c1"), Row(2)) + checkAnswer(df.where(s"c2 != $lit").select("c1"), Row(1) :: Row(3) :: Nil) + checkAnswer(df.where(s"c2 <= $lit").select("c1"), Row(2)) + checkAnswer(df.where(s"c2 < $lit").select("c1"), Seq.empty) + + checkAnswer(df.where(s"c3 > double('-inf')").select("c1"), Row(1) :: Row(3) :: Nil) + checkAnswer(df.where(s"c3 >= double('-inf')").select("c1"), Row(1) :: Row(2) :: Row(3) :: Nil) + checkAnswer(df.where(s"c3 == double('-inf')").select("c1"), Row(2)) + checkAnswer(df.where(s"c3 <=> double('-inf')").select("c1"), Row(2)) + checkAnswer(df.where(s"c3 != double('-inf')").select("c1"), Row(1) :: Row(3) :: Nil) + checkAnswer(df.where(s"c3 <= double('-inf')").select("c1"), Row(2) :: Nil) + checkAnswer(df.where(s"c3 < double('-inf')").select("c1"), Seq.empty) + } + } + + test("cases when literal is < min") { + val t = "test_table" + withTable(t) { + Seq[(Integer, java.lang.Short)]( + (1, 100.toShort), + (2, Short.MinValue), + (3, null)) + .toDF("c1", "c2").write.saveAsTable(t) + val df = spark.table(t) + + val lit = negativeInt + checkAnswer(df.where(s"c2 > $lit").select("c1"), Row(1) :: Row(2) :: Nil) + checkAnswer(df.where(s"c2 >= $lit").select("c1"), Row(1) :: Row(2) :: Nil) + checkAnswer(df.where(s"c2 == $lit").select("c1"), Seq.empty) + checkAnswer(df.where(s"c2 <=> $lit").select("c1"), Seq.empty) + checkAnswer(df.where(s"c2 != $lit").select("c1"), Row(1) :: Row(2) :: Nil) + checkAnswer(df.where(s"c2 <= $lit").select("c1"), Seq.empty) + checkAnswer(df.where(s"c2 < $lit").select("c1"), Seq.empty) + } + } + + test("cases when literal is within range (min, max)") { + withTable(t) { + Seq((1, 300.toShort), (2, 500.toShort)).toDF("c1", "c2").write.saveAsTable(t) + val df = spark.table(t) + + checkAnswer(df.where("c2 < 200").select("c1"), Seq.empty) + checkAnswer(df.where("c2 < 400").select("c1"), Row(1) :: Nil) + checkAnswer(df.where("c2 < 600").select("c1"), Row(1) :: Row(2) :: Nil) + + checkAnswer(df.where("c2 <= 100").select("c1"), Seq.empty) + checkAnswer(df.where("c2 <= 300").select("c1"), Row(1) :: Nil) + checkAnswer(df.where("c2 <= 500").select("c1"), Row(1) :: Row(2) :: Nil) + + checkAnswer(df.where("c2 == 100").select("c1"), Seq.empty) + checkAnswer(df.where("c2 == 300").select("c1"), Row(1) :: Nil) + checkAnswer(df.where("c2 == 500").select("c1"), Row(2) :: Nil) + + checkAnswer(df.where("c2 <=> 100").select("c1"), Seq.empty) + checkAnswer(df.where("c2 <=> 300").select("c1"), Row(1) :: Nil) + checkAnswer(df.where("c2 <=> 500").select("c1"), Row(2) :: Nil) + checkAnswer(df.where("c2 <=> null").select("c1"), Seq.empty) + + checkAnswer(df.where("c2 >= 200").select("c1"), Row(1) :: Row(2) :: Nil) + checkAnswer(df.where("c2 >= 400").select("c1"), Row(2) :: Nil) + checkAnswer(df.where("c2 >= 600").select("c1"), Seq.empty) + + checkAnswer(df.where("c2 > 100").select("c1"), Row(1) :: Row(2) :: Nil) + checkAnswer(df.where("c2 > 300").select("c1"), Row(2) :: Nil) + checkAnswer(df.where("c2 > 500").select("c1"), Seq.empty) + } + } + + test("cases when literal is within range (min, max) and has rounding up or down") { + withTable(t) { + Seq((1, 100, 3.14.toFloat, decimal(200.12))) + .toDF("c1", "c2", "c3", "c4").write.saveAsTable(t) + val df = spark.table(t) + + checkAnswer(df.where("c2 > 99.6").select("c1"), Row(1)) + checkAnswer(df.where("c2 > 100.4").select("c1"), Seq.empty) + checkAnswer(df.where("c2 == 100.4").select("c1"), Seq.empty) + checkAnswer(df.where("c2 <=> 100.4").select("c1"), Seq.empty) + checkAnswer(df.where("c2 < 99.6").select("c1"), Seq.empty) + checkAnswer(df.where("c2 < 100.4").select("c1"), Row(1)) + + checkAnswer(df.where("c3 >= 3.14").select("c1"), Row(1)) + // float(3.14) is casted to double(3.140000104904175) + checkAnswer(df.where("c3 >= 3.14000010").select("c1"), Row(1)) + checkAnswer(df.where("c3 == 3.14").select("c1"), Seq.empty) + checkAnswer(df.where("c3 <=> 3.14").select("c1"), Seq.empty) + checkAnswer(df.where("c3 < 3.14000010").select("c1"), Seq.empty) + checkAnswer(df.where("c3 <= 3.14").select("c1"), Seq.empty) + + checkAnswer(df.where("c4 > cast(200.1199 as decimal(10, 4))").select("c1"), Row(1)) + checkAnswer(df.where("c4 >= cast(200.1201 as decimal(10, 4))").select("c1"), Seq.empty) + checkAnswer(df.where("c4 == cast(200.1156 as decimal(10, 4))").select("c1"), Seq.empty) + checkAnswer(df.where("c4 <=> cast(200.1201 as decimal(10, 4))").select("c1"), Seq.empty) + checkAnswer(df.where("c4 <= cast(200.1201 as decimal(10, 4))").select("c1"), Row(1)) + checkAnswer(df.where("c4 < cast(200.1159 as decimal(10, 4))").select("c1"), Seq.empty) + } + } + + private def decimal(v: BigDecimal): Decimal = Decimal(v, 5, 2) +} diff --git a/sql/core/src/test/scala/org/apache/spark/sql/UpdateFieldsBenchmark.scala b/sql/core/src/test/scala/org/apache/spark/sql/UpdateFieldsBenchmark.scala new file mode 100644 index 0000000000000..28af552fe586b --- /dev/null +++ b/sql/core/src/test/scala/org/apache/spark/sql/UpdateFieldsBenchmark.scala @@ -0,0 +1,224 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql + +import org.apache.spark.benchmark.Benchmark +import org.apache.spark.sql.execution.benchmark.SqlBasedBenchmark +import org.apache.spark.sql.functions.{col, lit} +import org.apache.spark.sql.types.{IntegerType, StructField, StructType} + +/** + * Benchmark to measure Spark's performance analyzing and optimizing long UpdateFields chains. + * + * {{{ + * To run this benchmark: + * 1. without sbt: + * bin/spark-submit --class + * 2. with sbt: + * build/sbt "sql/test:runMain " + * 3. generate result: + * SPARK_GENERATE_BENCHMARK_FILES=1 build/sbt "sql/test:runMain " + * Results will be written to "benchmarks/UpdateFieldsBenchmark-results.txt". + * }}} + */ +object UpdateFieldsBenchmark extends SqlBasedBenchmark { + + def nestedColName(d: Int, colNum: Int): String = s"nested${d}Col$colNum" + + def nestedStructType( + colNums: Seq[Int], + nullable: Boolean, + maxDepth: Int, + currDepth: Int = 1): StructType = { + + if (currDepth == maxDepth) { + val fields = colNums.map { colNum => + val name = nestedColName(currDepth, colNum) + StructField(name, IntegerType, nullable = false) + } + StructType(fields) + } else { + val fields = colNums.foldLeft(Seq.empty[StructField]) { + case (structFields, colNum) if colNum == 0 => + val nested = nestedStructType(colNums, nullable, maxDepth, currDepth + 1) + structFields :+ StructField(nestedColName(currDepth, colNum), nested, nullable) + case (structFields, colNum) => + val name = nestedColName(currDepth, colNum) + structFields :+ StructField(name, IntegerType, nullable = false) + } + StructType(fields) + } + } + + /** + * Utility function for generating an empty DataFrame with nested columns. + * + * @param maxDepth: The depth to which to create nested columns. + * @param numColsAtEachDepth: The number of columns to create at each depth. + * @param nullable: This value is used to set the nullability of any StructType columns. + */ + def emptyNestedDf(maxDepth: Int, numColsAtEachDepth: Int, nullable: Boolean): DataFrame = { + require(maxDepth > 0) + require(numColsAtEachDepth > 0) + + val nestedColumnDataType = nestedStructType(0 until numColsAtEachDepth, nullable, maxDepth) + spark.createDataFrame( + spark.sparkContext.emptyRDD[Row], + StructType(Seq(StructField(nestedColName(0, 0), nestedColumnDataType, nullable)))) + } + + trait ModifyNestedColumns { + val name: String + def apply(column: Column, numsToAdd: Seq[Int], numsToDrop: Seq[Int], maxDepth: Int): Column + } + + object Performant extends ModifyNestedColumns { + override val name: String = "performant" + + override def apply( + column: Column, + numsToAdd: Seq[Int], + numsToDrop: Seq[Int], + maxDepth: Int): Column = helper(column, numsToAdd, numsToDrop, maxDepth, 1) + + private def helper( + column: Column, + numsToAdd: Seq[Int], + numsToDrop: Seq[Int], + maxDepth: Int, + currDepth: Int): Column = { + + // drop columns at the current depth + val dropped = if (numsToDrop.nonEmpty) { + column.dropFields(numsToDrop.map(num => nestedColName(currDepth, num)): _*) + } else column + + // add columns at the current depth + val added = numsToAdd.foldLeft(dropped) { + (res, num) => res.withField(nestedColName(currDepth, num), lit(num)) + } + + if (currDepth == maxDepth) { + added + } else { + // add/drop columns at the next depth + val newValue = helper( + column = col((0 to currDepth).map(d => nestedColName(d, 0)).mkString(".")), + numsToAdd = numsToAdd, + numsToDrop = numsToDrop, + currDepth = currDepth + 1, + maxDepth = maxDepth) + added.withField(nestedColName(currDepth, 0), newValue) + } + } + } + + object NonPerformant extends ModifyNestedColumns { + override val name: String = "non-performant" + + override def apply( + column: Column, + numsToAdd: Seq[Int], + numsToDrop: Seq[Int], + maxDepth: Int): Column = { + + val dropped = if (numsToDrop.nonEmpty) { + val colsToDrop = (1 to maxDepth).flatMap { depth => + numsToDrop.map(num => s"${prefix(depth)}${nestedColName(depth, num)}") + } + column.dropFields(colsToDrop: _*) + } else column + + val added = { + val colsToAdd = (1 to maxDepth).flatMap { depth => + numsToAdd.map(num => (s"${prefix(depth)}${nestedColName(depth, num)}", lit(num))) + } + colsToAdd.foldLeft(dropped)((col, add) => col.withField(add._1, add._2)) + } + + added + } + + private def prefix(depth: Int): String = + if (depth == 1) "" + else (1 until depth).map(d => nestedColName(d, 0)).mkString("", ".", ".") + } + + private def updateFieldsBenchmark( + methods: Seq[ModifyNestedColumns], + maxDepth: Int, + initialNumberOfColumns: Int, + numsToAdd: Seq[Int] = Seq.empty, + numsToDrop: Seq[Int] = Seq.empty): Unit = { + + val name = s"Add ${numsToAdd.length} columns and drop ${numsToDrop.length} columns " + + s"at $maxDepth different depths of nesting" + + runBenchmark(name) { + val benchmark = new Benchmark( + name = name, + // The purpose of this benchmark is to ensure Spark is able to analyze and optimize long + // UpdateFields chains quickly so it runs over 0 rows of data. + valuesPerIteration = 0, + output = output) + + val nonNullableStructsDf = emptyNestedDf(maxDepth, initialNumberOfColumns, nullable = false) + val nullableStructsDf = emptyNestedDf(maxDepth, initialNumberOfColumns, nullable = true) + + methods.foreach { method => + val modifiedColumn = method( + column = col(nestedColName(0, 0)), + numsToAdd = numsToAdd, + numsToDrop = numsToDrop, + maxDepth = maxDepth + ).as(nestedColName(0, 0)) + + benchmark.addCase(s"To non-nullable StructTypes using ${method.name} method") { _ => + nonNullableStructsDf.select(modifiedColumn).queryExecution.optimizedPlan + } + + benchmark.addCase(s"To nullable StructTypes using ${method.name} method") { _ => + nullableStructsDf.select(modifiedColumn).queryExecution.optimizedPlan + } + } + + benchmark.run() + } + } + + override def runBenchmarkSuite(mainArgs: Array[String]): Unit = { + // This benchmark compares the performant and non-performant methods of writing the same query. + // We use small values for maxDepth, numsToAdd, and numsToDrop because the NonPerformant method + // scales extremely poorly with the number of nested columns being added/dropped. + updateFieldsBenchmark( + methods = Seq(Performant, NonPerformant), + maxDepth = 3, + initialNumberOfColumns = 5, + numsToAdd = 5 to 6, + numsToDrop = 3 to 4) + + // This benchmark is to show that the performant method of writing a query when we want to add + // and drop a large number of nested columns scales nicely. + updateFieldsBenchmark( + methods = Seq(Performant), + maxDepth = 100, + initialNumberOfColumns = 51, + numsToAdd = 51 to 100, + numsToDrop = 1 to 50) + } +} diff --git a/sql/core/src/test/scala/org/apache/spark/sql/connector/AlterTablePartitionV2SQLSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/connector/AlterTablePartitionV2SQLSuite.scala new file mode 100644 index 0000000000000..f8d4a0970ff89 --- /dev/null +++ b/sql/core/src/test/scala/org/apache/spark/sql/connector/AlterTablePartitionV2SQLSuite.scala @@ -0,0 +1,34 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.connector + +import org.apache.spark.sql.AnalysisException + +class AlterTablePartitionV2SQLSuite extends DatasourceV2SQLBase { + test("ALTER TABLE RECOVER PARTITIONS") { + val t = "testcat.ns1.ns2.tbl" + withTable(t) { + spark.sql(s"CREATE TABLE $t (id bigint, data string) USING foo") + val e = intercept[AnalysisException] { + sql(s"ALTER TABLE $t RECOVER PARTITIONS") + } + assert(e.message.contains( + "ALTER TABLE ... RECOVER PARTITIONS is not supported for v2 tables.")) + } + } +} diff --git a/sql/core/src/test/scala/org/apache/spark/sql/connector/DataSourceV2DataFrameSessionCatalogSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/connector/DataSourceV2DataFrameSessionCatalogSuite.scala index 6b25d7c61663c..46112d40f08ba 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/connector/DataSourceV2DataFrameSessionCatalogSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/connector/DataSourceV2DataFrameSessionCatalogSuite.scala @@ -30,7 +30,6 @@ import org.apache.spark.sql.connector.expressions.Transform import org.apache.spark.sql.internal.SQLConf.V2_SESSION_CATALOG_IMPLEMENTATION import org.apache.spark.sql.test.SharedSparkSession import org.apache.spark.sql.types.StructType -import org.apache.spark.sql.util.CaseInsensitiveStringMap class DataSourceV2DataFrameSessionCatalogSuite extends InsertIntoTests(supportsDynamicOverwrite = true, includeSQLOnlyTests = false) diff --git a/sql/core/src/test/scala/org/apache/spark/sql/connector/DataSourceV2SQLSessionCatalogSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/connector/DataSourceV2SQLSessionCatalogSuite.scala index cf00b3b5e4410..c973e2ba30004 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/connector/DataSourceV2SQLSessionCatalogSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/connector/DataSourceV2SQLSessionCatalogSuite.scala @@ -79,4 +79,12 @@ class DataSourceV2SQLSessionCatalogSuite Row("keyX", s"Table default.$t1 does not have property: keyX")) } } + + test("SPARK-33651: allow CREATE EXTERNAL TABLE without LOCATION") { + withTable("t") { + val prop = TestV2SessionCatalogBase.SIMULATE_ALLOW_EXTERNAL_PROPERTY + "=true" + // The following should not throw AnalysisException. + sql(s"CREATE EXTERNAL TABLE t (i INT) USING $v2Format TBLPROPERTIES($prop)") + } + } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/connector/DataSourceV2SQLSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/connector/DataSourceV2SQLSuite.scala index ffc115e6b7600..0a6bd795cd0ae 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/connector/DataSourceV2SQLSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/connector/DataSourceV2SQLSuite.scala @@ -24,22 +24,26 @@ import scala.collection.JavaConverters._ import org.apache.spark.SparkException import org.apache.spark.sql._ -import org.apache.spark.sql.catalyst.analysis.{CannotReplaceMissingTableException, NamespaceAlreadyExistsException, NoSuchDatabaseException, NoSuchNamespaceException, NoSuchTableException, TableAlreadyExistsException} +import org.apache.spark.sql.catalyst.InternalRow +import org.apache.spark.sql.catalyst.analysis.{CannotReplaceMissingTableException, NamespaceAlreadyExistsException, NoSuchDatabaseException, NoSuchNamespaceException, TableAlreadyExistsException} import org.apache.spark.sql.catalyst.parser.ParseException import org.apache.spark.sql.connector.catalog._ import org.apache.spark.sql.connector.catalog.CatalogManager.SESSION_CATALOG_NAME import org.apache.spark.sql.connector.catalog.CatalogV2Util.withDefaultOwnership +import org.apache.spark.sql.execution.columnar.InMemoryRelation import org.apache.spark.sql.internal.{SQLConf, StaticSQLConf} import org.apache.spark.sql.internal.SQLConf.{PARTITION_OVERWRITE_MODE, PartitionOverwriteMode, V2_SESSION_CATALOG_IMPLEMENTATION} import org.apache.spark.sql.internal.connector.SimpleTableProvider import org.apache.spark.sql.sources.SimpleScanSource -import org.apache.spark.sql.types.{BooleanType, LongType, StringType, StructField, StructType} +import org.apache.spark.sql.types.{LongType, StringType, StructField, StructType} import org.apache.spark.sql.util.CaseInsensitiveStringMap +import org.apache.spark.storage.StorageLevel +import org.apache.spark.unsafe.types.UTF8String import org.apache.spark.util.Utils class DataSourceV2SQLSuite extends InsertIntoTests(supportsDynamicOverwrite = true, includeSQLOnlyTests = true) - with AlterTableTests { + with AlterTableTests with DatasourceV2SQLBase { import org.apache.spark.sql.connector.catalog.CatalogV2Implicits._ @@ -48,10 +52,6 @@ class DataSourceV2SQLSuite override protected val catalogAndNamespace = "testcat.ns1.ns2." private val defaultUser: String = Utils.getCurrentUserName() - private def catalog(name: String): CatalogPlugin = { - spark.sessionState.catalogManager.catalog(name) - } - protected def doInsert(tableName: String, insert: DataFrame, mode: SaveMode): Unit = { val tmpView = "tmp_view" withTempView(tmpView) { @@ -72,26 +72,6 @@ class DataSourceV2SQLSuite v2Catalog.loadTable(Identifier.of(namespace, nameParts.last)) } - before { - spark.conf.set("spark.sql.catalog.testcat", classOf[InMemoryTableCatalog].getName) - spark.conf.set( - "spark.sql.catalog.testcat_atomic", classOf[StagingInMemoryTableCatalog].getName) - spark.conf.set("spark.sql.catalog.testcat2", classOf[InMemoryTableCatalog].getName) - spark.conf.set( - V2_SESSION_CATALOG_IMPLEMENTATION.key, classOf[InMemoryTableSessionCatalog].getName) - - val df = spark.createDataFrame(Seq((1L, "a"), (2L, "b"), (3L, "c"))).toDF("id", "data") - df.createOrReplaceTempView("source") - val df2 = spark.createDataFrame(Seq((4L, "d"), (5L, "e"), (6L, "f"))).toDF("id", "data") - df2.createOrReplaceTempView("source2") - } - - after { - spark.sessionState.catalog.reset() - spark.sessionState.catalogManager.reset() - spark.sessionState.conf.clear() - } - test("CreateTable: use v2 plan because catalog is set") { spark.sql("CREATE TABLE testcat.table_name (id bigint NOT NULL, data string) USING foo") @@ -161,6 +141,10 @@ class DataSourceV2SQLSuite Array("# Partitioning", "", ""), Array("Part 0", "id", ""), Array("", "", ""), + Array("# Metadata Columns", "", ""), + Array("index", "string", "Metadata column used to conflict with a data column"), + Array("_partition", "string", "Partition key used to store the row"), + Array("", "", ""), Array("# Detailed Table Information", "", ""), Array("Name", "testcat.table_name", ""), Array("Comment", "this is a test table", ""), @@ -168,7 +152,72 @@ class DataSourceV2SQLSuite Array("Provider", "foo", ""), Array(TableCatalog.PROP_OWNER.capitalize, defaultUser, ""), Array("Table Properties", "[bar=baz]", ""))) + } + + test("Describe column for v2 catalog") { + val t = "testcat.tbl" + withTable(t) { + sql(s"CREATE TABLE $t (id bigint, data string COMMENT 'hello') USING foo") + val df1 = sql(s"DESCRIBE $t id") + assert(df1.schema.map(field => (field.name, field.dataType)) + === Seq(("info_name", StringType), ("info_value", StringType))) + assert(df1.collect === Seq( + Row("col_name", "id"), + Row("data_type", "bigint"), + Row("comment", "NULL"))) + val df2 = sql(s"DESCRIBE $t data") + assert(df2.schema.map(field => (field.name, field.dataType)) + === Seq(("info_name", StringType), ("info_value", StringType))) + assert(df2.collect === Seq( + Row("col_name", "data"), + Row("data_type", "string"), + Row("comment", "hello"))) + + assertAnalysisError( + s"DESCRIBE $t invalid_col", + "cannot resolve '`invalid_col`' given input columns: [testcat.tbl.data, testcat.tbl.id]") + } + } + + test("Describe column for v2 catalog should work with qualified columns") { + val t = "testcat.ns.tbl" + withTable(t) { + sql(s"CREATE TABLE $t (id bigint) USING foo") + Seq("testcat.ns.tbl.id", "ns.tbl.id", "tbl.id", "id").foreach { col => + val df = sql(s"DESCRIBE $t $col") + assert(df.schema.map(field => (field.name, field.dataType)) + === Seq(("info_name", StringType), ("info_value", StringType))) + assert(df.collect === Seq( + Row("col_name", "id"), + Row("data_type", "bigint"), + Row("comment", "NULL"))) + } + } + } + test("Describing nested column for v2 catalog is not supported") { + val t = "testcat.tbl" + withTable(t) { + sql(s"CREATE TABLE $t (d struct) USING foo") + assertAnalysisError( + s"describe $t d.a", + "DESC TABLE COLUMN does not support nested column") + } + } + + test("SPARK-33004: Describe column should resolve to a temporary view first") { + withTable("testcat.ns.t") { + withTempView("t") { + sql("CREATE TABLE testcat.ns.t (id bigint) USING foo") + sql("CREATE TEMPORARY VIEW t AS SELECT 2 as i") + sql("USE testcat.ns") + checkAnswer( + sql("DESCRIBE t i"), + Seq(Row("col_name", "i"), + Row("data_type", "int"), + Row("comment", "NULL"))) + } + } } test("CreateTable: use v2 plan and session catalog when provider is v2") { @@ -259,25 +308,26 @@ class DataSourceV2SQLSuite checkAnswer(spark.internalCreateDataFrame(rdd, table.schema), Seq.empty) } - // TODO: ignored by SPARK-31707, restore the test after create table syntax unification - ignore("CreateTable: without USING clause") { - // unset this config to use the default v2 session catalog. - spark.conf.unset(V2_SESSION_CATALOG_IMPLEMENTATION.key) - val testCatalog = catalog("testcat").asTableCatalog + test("CreateTable: without USING clause") { + withSQLConf(SQLConf.LEGACY_CREATE_HIVE_TABLE_BY_DEFAULT.key -> "false") { + // unset this config to use the default v2 session catalog. + spark.conf.unset(V2_SESSION_CATALOG_IMPLEMENTATION.key) + val testCatalog = catalog("testcat").asTableCatalog - sql("CREATE TABLE testcat.t1 (id int)") - val t1 = testCatalog.loadTable(Identifier.of(Array(), "t1")) - // Spark shouldn't set the default provider for catalog plugins. - assert(!t1.properties.containsKey(TableCatalog.PROP_PROVIDER)) + sql("CREATE TABLE testcat.t1 (id int)") + val t1 = testCatalog.loadTable(Identifier.of(Array(), "t1")) + // Spark shouldn't set the default provider for catalog plugins. + assert(!t1.properties.containsKey(TableCatalog.PROP_PROVIDER)) - sql("CREATE TABLE t2 (id int)") - val t2 = spark.sessionState.catalogManager.v2SessionCatalog.asTableCatalog - .loadTable(Identifier.of(Array("default"), "t2")).asInstanceOf[V1Table] - // Spark should set the default provider as DEFAULT_DATA_SOURCE_NAME for the session catalog. - assert(t2.v1Table.provider == Some(conf.defaultDataSourceName)) + sql("CREATE TABLE t2 (id int)") + val t2 = spark.sessionState.catalogManager.v2SessionCatalog.asTableCatalog + .loadTable(Identifier.of(Array("default"), "t2")).asInstanceOf[V1Table] + // Spark should set the default provider as DEFAULT_DATA_SOURCE_NAME for the session catalog. + assert(t2.v1Table.provider == Some(conf.defaultDataSourceName)) + } } - test("CreateTable/RepalceTable: invalid schema if has interval type") { + test("CreateTable/ReplaceTable: invalid schema if has interval type") { Seq("CREATE", "REPLACE").foreach { action => val e1 = intercept[AnalysisException]( sql(s"$action TABLE table_name (id int, value interval) USING $v2Format")) @@ -426,7 +476,7 @@ class DataSourceV2SQLSuite intercept[Exception] { spark.sql("REPLACE TABLE testcat.table_name" + - s" USING foo OPTIONS (`${InMemoryTable.SIMULATE_FAILED_WRITE_OPTION}`=true)" + + s" USING foo TBLPROPERTIES (`${InMemoryTable.SIMULATE_FAILED_WRITE_OPTION}`=true)" + s" AS SELECT id FROM source") } @@ -459,7 +509,7 @@ class DataSourceV2SQLSuite intercept[Exception] { spark.sql("REPLACE TABLE testcat_atomic.table_name" + - s" USING foo OPTIONS (`${InMemoryTable.SIMULATE_FAILED_WRITE_OPTION}=true)" + + s" USING foo TBLPROPERTIES (`${InMemoryTable.SIMULATE_FAILED_WRITE_OPTION}=true)" + s" AS SELECT id FROM source") } @@ -702,28 +752,117 @@ class DataSourceV2SQLSuite assert(t2.v1Table.provider == Some(conf.defaultDataSourceName)) } - test("DropTable: basic") { - val tableName = "testcat.ns1.ns2.tbl" - val ident = Identifier.of(Array("ns1", "ns2"), "tbl") - sql(s"CREATE TABLE $tableName USING foo AS SELECT id, data FROM source") - assert(catalog("testcat").asTableCatalog.tableExists(ident) === true) - sql(s"DROP TABLE $tableName") - assert(catalog("testcat").asTableCatalog.tableExists(ident) === false) + test("SPARK-34039: ReplaceTable (atomic or non-atomic) should invalidate cache") { + Seq("testcat.ns.t", "testcat_atomic.ns.t").foreach { t => + val view = "view" + withTable(t) { + withTempView(view) { + sql(s"CREATE TABLE $t USING foo AS SELECT id, data FROM source") + sql(s"CACHE TABLE $view AS SELECT id FROM $t") + checkAnswer(sql(s"SELECT * FROM $t"), spark.table("source")) + checkAnswer(sql(s"SELECT * FROM $view"), spark.table("source").select("id")) + + sql(s"REPLACE TABLE $t (a bigint) USING foo") + assert(spark.sharedState.cacheManager.lookupCachedData(spark.table(view)).isEmpty) + } + } + } } - test("DropTable: table qualified with the session catalog name") { - val ident = Identifier.of(Array("default"), "tbl") - sql("CREATE TABLE tbl USING json AS SELECT 1 AS i") - assert(catalog("spark_catalog").asTableCatalog.tableExists(ident) === true) - sql("DROP TABLE spark_catalog.default.tbl") - assert(catalog("spark_catalog").asTableCatalog.tableExists(ident) === false) + test("SPARK-33492: ReplaceTableAsSelect (atomic or non-atomic) should invalidate cache") { + Seq("testcat.ns.t", "testcat_atomic.ns.t").foreach { t => + val view = "view" + withTable(t) { + withTempView(view) { + sql(s"CREATE TABLE $t USING foo AS SELECT id, data FROM source") + sql(s"CACHE TABLE $view AS SELECT id FROM $t") + checkAnswer(sql(s"SELECT * FROM $t"), spark.table("source")) + checkAnswer(sql(s"SELECT * FROM $view"), spark.table("source").select("id")) + + sql(s"REPLACE TABLE $t USING foo AS SELECT id FROM source") + assert(spark.sharedState.cacheManager.lookupCachedData(spark.table(view)).isEmpty) + } + } + } } - test("DropTable: if exists") { - intercept[NoSuchTableException] { - sql(s"DROP TABLE testcat.db.notbl") + test("SPARK-33492: AppendData should refresh cache") { + import testImplicits._ + + val t = "testcat.ns.t" + val view = "view" + withTable(t) { + withTempView(view) { + Seq((1, "a")).toDF("i", "j").write.saveAsTable(t) + sql(s"CACHE TABLE $view AS SELECT i FROM $t") + checkAnswer(sql(s"SELECT * FROM $t"), Row(1, "a") :: Nil) + checkAnswer(sql(s"SELECT * FROM $view"), Row(1) :: Nil) + + Seq((2, "b")).toDF("i", "j").write.mode(SaveMode.Append).saveAsTable(t) + + assert(spark.sharedState.cacheManager.lookupCachedData(spark.table(view)).isDefined) + checkAnswer(sql(s"SELECT * FROM $t"), Row(1, "a") :: Row(2, "b") :: Nil) + checkAnswer(sql(s"SELECT * FROM $view"), Row(1) :: Row(2) :: Nil) + } + } + } + + test("SPARK-33492: OverwriteByExpression should refresh cache") { + val t = "testcat.ns.t" + val view = "view" + withTable(t) { + withTempView(view) { + sql(s"CREATE TABLE $t USING foo AS SELECT id, data FROM source") + sql(s"CACHE TABLE $view AS SELECT id FROM $t") + checkAnswer(sql(s"SELECT * FROM $t"), spark.table("source")) + checkAnswer(sql(s"SELECT * FROM $view"), spark.table("source").select("id")) + + sql(s"INSERT OVERWRITE TABLE $t VALUES (1, 'a')") + + assert(spark.sharedState.cacheManager.lookupCachedData(spark.table(view)).isDefined) + checkAnswer(sql(s"SELECT * FROM $t"), Row(1, "a") :: Nil) + checkAnswer(sql(s"SELECT * FROM $view"), Row(1) :: Nil) + } + } + } + + test("SPARK-33492: OverwritePartitionsDynamic should refresh cache") { + import testImplicits._ + + val t = "testcat.ns.t" + val view = "view" + withTable(t) { + withTempView(view) { + Seq((1, "a", 1)).toDF("i", "j", "k").write.partitionBy("k") saveAsTable(t) + sql(s"CACHE TABLE $view AS SELECT i FROM $t") + checkAnswer(sql(s"SELECT * FROM $t"), Row(1, "a", 1) :: Nil) + checkAnswer(sql(s"SELECT * FROM $view"), Row(1) :: Nil) + + Seq((2, "b", 1)).toDF("i", "j", "k").writeTo(t).overwritePartitions() + + assert(spark.sharedState.cacheManager.lookupCachedData(spark.table(view)).isDefined) + checkAnswer(sql(s"SELECT * FROM $t"), Row(2, "b", 1) :: Nil) + checkAnswer(sql(s"SELECT * FROM $view"), Row(2) :: Nil) + } + } + } + + test("SPARK-33829: Renaming a table should recreate a cache while retaining the old cache info") { + withTable("testcat.ns.old", "testcat.ns.new") { + def getStorageLevel(tableName: String): StorageLevel = { + val table = spark.table(tableName) + val optCachedData = spark.sharedState.cacheManager.lookupCachedData(table) + assert(optCachedData.isDefined) + optCachedData.get.cachedRepresentation.cacheBuilder.storageLevel + } + sql("CREATE TABLE testcat.ns.old USING foo AS SELECT id, data FROM source") + sql("CACHE TABLE testcat.ns.old OPTIONS('storageLevel' 'MEMORY_ONLY')") + val oldStorageLevel = getStorageLevel("testcat.ns.old") + + sql("ALTER TABLE testcat.ns.old RENAME TO ns.new") + val newStorageLevel = getStorageLevel("testcat.ns.new") + assert(oldStorageLevel === newStorageLevel) } - sql(s"DROP TABLE IF EXISTS testcat.db.notbl") } test("Relation: basic") { @@ -758,8 +897,9 @@ class DataSourceV2SQLSuite test("Relation: view text") { val t1 = "testcat.ns1.ns2.tbl" + val v1 = "view1" withTable(t1) { - withView("view1") { v1: String => + withView(v1) { sql(s"CREATE TABLE $t1 USING foo AS SELECT id, data FROM source") sql(s"CREATE VIEW $v1 AS SELECT * from $t1") checkAnswer(sql(s"TABLE $v1"), spark.table("source")) @@ -850,74 +990,13 @@ class DataSourceV2SQLSuite } } - test("ShowTables: using v2 catalog") { - spark.sql("CREATE TABLE testcat.db.table_name (id bigint, data string) USING foo") - spark.sql("CREATE TABLE testcat.n1.n2.db.table_name (id bigint, data string) USING foo") - - runShowTablesSql("SHOW TABLES FROM testcat.db", Seq(Row("db", "table_name"))) - - runShowTablesSql( - "SHOW TABLES FROM testcat.n1.n2.db", - Seq(Row("n1.n2.db", "table_name"))) - } - - test("ShowTables: using v2 catalog with a pattern") { - spark.sql("CREATE TABLE testcat.db.table (id bigint, data string) USING foo") - spark.sql("CREATE TABLE testcat.db.table_name_1 (id bigint, data string) USING foo") - spark.sql("CREATE TABLE testcat.db.table_name_2 (id bigint, data string) USING foo") - spark.sql("CREATE TABLE testcat.db2.table_name_2 (id bigint, data string) USING foo") - - runShowTablesSql( - "SHOW TABLES FROM testcat.db", - Seq( - Row("db", "table"), - Row("db", "table_name_1"), - Row("db", "table_name_2"))) - - runShowTablesSql( - "SHOW TABLES FROM testcat.db LIKE '*name*'", - Seq(Row("db", "table_name_1"), Row("db", "table_name_2"))) - - runShowTablesSql( - "SHOW TABLES FROM testcat.db LIKE '*2'", - Seq(Row("db", "table_name_2"))) - } - - test("ShowTables: using v2 catalog, namespace doesn't exist") { - runShowTablesSql("SHOW TABLES FROM testcat.unknown", Seq()) - } - - test("ShowTables: using v1 catalog") { - runShowTablesSql( - "SHOW TABLES FROM default", - Seq(Row("", "source", true), Row("", "source2", true)), - expectV2Catalog = false) - } - - test("ShowTables: using v1 catalog, db doesn't exist ") { - // 'db' below resolves to a database name for v1 catalog because there is no catalog named - // 'db' and there is no default catalog set. - val exception = intercept[NoSuchDatabaseException] { - runShowTablesSql("SHOW TABLES FROM db", Seq(), expectV2Catalog = false) - } - - assert(exception.getMessage.contains("Database 'db' not found")) - } - - test("ShowTables: using v1 catalog, db name with multipartIdentifier ('a.b') is not allowed.") { - val exception = intercept[AnalysisException] { - runShowTablesSql("SHOW TABLES FROM a.b", Seq(), expectV2Catalog = false) - } - - assert(exception.getMessage.contains("The database name is not valid: a.b")) - } - test("ShowViews: using v1 catalog, db name with multipartIdentifier ('a.b') is not allowed.") { val exception = intercept[AnalysisException] { - sql("SHOW TABLES FROM a.b") + sql("SHOW VIEWS FROM a.b") } - assert(exception.getMessage.contains("The database name is not valid: a.b")) + assert(exception.getMessage.contains( + "Nested databases are not supported by v1 session catalog: a.b")) } test("ShowViews: using v2 catalog, command not supported.") { @@ -929,112 +1008,6 @@ class DataSourceV2SQLSuite " only SessionCatalog supports this command.")) } - test("ShowTables: using v2 catalog with empty namespace") { - spark.sql("CREATE TABLE testcat.table (id bigint, data string) USING foo") - runShowTablesSql("SHOW TABLES FROM testcat", Seq(Row("", "table"))) - } - - test("ShowTables: namespace is not specified and default v2 catalog is set") { - spark.conf.set(SQLConf.DEFAULT_CATALOG.key, "testcat") - spark.sql("CREATE TABLE testcat.table (id bigint, data string) USING foo") - - // v2 catalog is used where default namespace is empty for TestInMemoryTableCatalog. - runShowTablesSql("SHOW TABLES", Seq(Row("", "table"))) - } - - test("ShowTables: namespace not specified and default v2 catalog not set - fallback to v1") { - runShowTablesSql( - "SHOW TABLES", - Seq(Row("", "source", true), Row("", "source2", true)), - expectV2Catalog = false) - - runShowTablesSql( - "SHOW TABLES LIKE '*2'", - Seq(Row("", "source2", true)), - expectV2Catalog = false) - } - - test("ShowTables: change current catalog and namespace with USE statements") { - sql("CREATE TABLE testcat.ns1.ns2.table (id bigint) USING foo") - - // Initially, the v2 session catalog (current catalog) is used. - runShowTablesSql( - "SHOW TABLES", Seq(Row("", "source", true), Row("", "source2", true)), - expectV2Catalog = false) - - // Update the current catalog, and no table is matched since the current namespace is Array(). - sql("USE testcat") - runShowTablesSql("SHOW TABLES", Seq()) - - // Update the current namespace to match ns1.ns2.table. - sql("USE testcat.ns1.ns2") - runShowTablesSql("SHOW TABLES", Seq(Row("ns1.ns2", "table"))) - } - - private def runShowTablesSql( - sqlText: String, - expected: Seq[Row], - expectV2Catalog: Boolean = true): Unit = { - val schema = if (expectV2Catalog) { - new StructType() - .add("namespace", StringType, nullable = false) - .add("tableName", StringType, nullable = false) - } else { - new StructType() - .add("database", StringType, nullable = false) - .add("tableName", StringType, nullable = false) - .add("isTemporary", BooleanType, nullable = false) - } - - val df = spark.sql(sqlText) - assert(df.schema === schema) - assert(expected === df.collect()) - } - - test("SHOW TABLE EXTENDED not valid v1 database") { - def testV1CommandNamespace(sqlCommand: String, namespace: String): Unit = { - val e = intercept[AnalysisException] { - sql(sqlCommand) - } - assert(e.message.contains(s"The database name is not valid: ${namespace}")) - } - - val namespace = "testcat.ns1.ns2" - val table = "tbl" - withTable(s"$namespace.$table") { - sql(s"CREATE TABLE $namespace.$table (id bigint, data string) " + - s"USING foo PARTITIONED BY (id)") - - testV1CommandNamespace(s"SHOW TABLE EXTENDED FROM $namespace LIKE 'tb*'", - namespace) - testV1CommandNamespace(s"SHOW TABLE EXTENDED IN $namespace LIKE 'tb*'", - namespace) - testV1CommandNamespace("SHOW TABLE EXTENDED " + - s"FROM $namespace LIKE 'tb*' PARTITION(id=1)", - namespace) - testV1CommandNamespace("SHOW TABLE EXTENDED " + - s"IN $namespace LIKE 'tb*' PARTITION(id=1)", - namespace) - } - } - - test("SHOW TABLE EXTENDED valid v1") { - val expected = Seq(Row("", "source", true), Row("", "source2", true)) - val schema = new StructType() - .add("database", StringType, nullable = false) - .add("tableName", StringType, nullable = false) - .add("isTemporary", BooleanType, nullable = false) - .add("information", StringType, nullable = false) - - val df = sql("SHOW TABLE EXTENDED FROM default LIKE '*source*'") - val result = df.collect() - val resultWithoutInfo = result.map{ case Row(db, table, temp, _) => Row(db, table, temp)} - - assert(df.schema === schema) - assert(resultWithoutInfo === expected) - result.foreach{ case Row(_, _, _, info: String) => assert(info.nonEmpty)} - } - test("CreateNameSpace: basic tests") { // Session catalog is used. withNamespace("ns") { @@ -1329,95 +1302,6 @@ class DataSourceV2SQLSuite } } - test("ShowNamespaces: show root namespaces with default v2 catalog") { - spark.conf.set(SQLConf.DEFAULT_CATALOG.key, "testcat") - - testShowNamespaces("SHOW NAMESPACES", Seq()) - - spark.sql("CREATE TABLE testcat.ns1.table (id bigint) USING foo") - spark.sql("CREATE TABLE testcat.ns1.ns1_1.table (id bigint) USING foo") - spark.sql("CREATE TABLE testcat.ns2.table (id bigint) USING foo") - - testShowNamespaces("SHOW NAMESPACES", Seq("ns1", "ns2")) - testShowNamespaces("SHOW NAMESPACES LIKE '*1*'", Seq("ns1")) - } - - test("ShowNamespaces: show namespaces with v2 catalog") { - spark.sql("CREATE TABLE testcat.ns1.table (id bigint) USING foo") - spark.sql("CREATE TABLE testcat.ns1.ns1_1.table (id bigint) USING foo") - spark.sql("CREATE TABLE testcat.ns1.ns1_2.table (id bigint) USING foo") - spark.sql("CREATE TABLE testcat.ns2.table (id bigint) USING foo") - spark.sql("CREATE TABLE testcat.ns2.ns2_1.table (id bigint) USING foo") - - // Look up only with catalog name, which should list root namespaces. - testShowNamespaces("SHOW NAMESPACES IN testcat", Seq("ns1", "ns2")) - - // Look up sub-namespaces. - testShowNamespaces("SHOW NAMESPACES IN testcat.ns1", Seq("ns1.ns1_1", "ns1.ns1_2")) - testShowNamespaces("SHOW NAMESPACES IN testcat.ns1 LIKE '*2*'", Seq("ns1.ns1_2")) - testShowNamespaces("SHOW NAMESPACES IN testcat.ns2", Seq("ns2.ns2_1")) - - // Try to look up namespaces that do not exist. - testShowNamespaces("SHOW NAMESPACES IN testcat.ns3", Seq()) - testShowNamespaces("SHOW NAMESPACES IN testcat.ns1.ns3", Seq()) - } - - test("ShowNamespaces: default v2 catalog is not set") { - spark.sql("CREATE TABLE testcat.ns.table (id bigint) USING foo") - - // The current catalog is resolved to a v2 session catalog. - testShowNamespaces("SHOW NAMESPACES", Seq("default")) - } - - test("ShowNamespaces: default v2 catalog doesn't support namespace") { - spark.conf.set( - "spark.sql.catalog.testcat_no_namspace", - classOf[BasicInMemoryTableCatalog].getName) - spark.conf.set(SQLConf.DEFAULT_CATALOG.key, "testcat_no_namspace") - - val exception = intercept[AnalysisException] { - sql("SHOW NAMESPACES") - } - - assert(exception.getMessage.contains("does not support namespaces")) - } - - test("ShowNamespaces: v2 catalog doesn't support namespace") { - spark.conf.set( - "spark.sql.catalog.testcat_no_namspace", - classOf[BasicInMemoryTableCatalog].getName) - - val exception = intercept[AnalysisException] { - sql("SHOW NAMESPACES in testcat_no_namspace") - } - - assert(exception.getMessage.contains("does not support namespaces")) - } - - test("ShowNamespaces: session catalog is used and namespace doesn't exist") { - val exception = intercept[AnalysisException] { - sql("SHOW NAMESPACES in dummy") - } - - assert(exception.getMessage.contains("Namespace 'dummy' not found")) - } - - test("ShowNamespaces: change catalog and namespace with USE statements") { - sql("CREATE TABLE testcat.ns1.ns2.table (id bigint) USING foo") - - // Initially, the current catalog is a v2 session catalog. - testShowNamespaces("SHOW NAMESPACES", Seq("default")) - - // Update the current catalog to 'testcat'. - sql("USE testcat") - testShowNamespaces("SHOW NAMESPACES", Seq("ns1")) - - // Update the current namespace to 'ns1'. - sql("USE ns1") - // 'SHOW NAMESPACES' is not affected by the current namespace and lists root namespaces. - testShowNamespaces("SHOW NAMESPACES", Seq("ns1")) - } - private def testShowNamespaces( sqlText: String, expected: Seq[String]): Unit = { @@ -1728,6 +1612,56 @@ class DataSourceV2SQLSuite } } + test("SPARK-32990: REFRESH TABLE should resolve to a temporary view first") { + withTable("testcat.ns.t") { + withTempView("t") { + sql("CREATE TABLE testcat.ns.t (id bigint) USING foo") + sql("CREATE TEMPORARY VIEW t AS SELECT 2") + sql("USE testcat.ns") + + val testCatalog = catalog("testcat").asTableCatalog.asInstanceOf[InMemoryTableCatalog] + val identifier = Identifier.of(Array("ns"), "t") + + assert(!testCatalog.isTableInvalidated(identifier)) + sql("REFRESH TABLE t") + assert(!testCatalog.isTableInvalidated(identifier)) + } + } + } + + test("SPARK-33435: REFRESH TABLE should invalidate all caches referencing the table") { + val tblName = "testcat.ns.t" + withTable(tblName) { + withTempView("t") { + sql(s"CREATE TABLE $tblName (id bigint) USING foo") + sql(s"CACHE TABLE t AS SELECT id FROM $tblName") + + assert(spark.sharedState.cacheManager.lookupCachedData(spark.table("t")).isDefined) + sql(s"REFRESH TABLE $tblName") + assert(spark.sharedState.cacheManager.lookupCachedData(spark.table("t")).isEmpty) + } + } + } + + test("SPARK-33653: REFRESH TABLE should recache the target table itself") { + val tblName = "testcat.ns.t" + withTable(tblName) { + sql(s"CREATE TABLE $tblName (id bigint) USING foo") + + // if the table is not cached, refreshing it should not recache it + assert(spark.sharedState.cacheManager.lookupCachedData(spark.table(tblName)).isEmpty) + sql(s"REFRESH TABLE $tblName") + assert(spark.sharedState.cacheManager.lookupCachedData(spark.table(tblName)).isEmpty) + + sql(s"CACHE TABLE $tblName") + + // after caching & refreshing the table should be recached + assert(spark.sharedState.cacheManager.lookupCachedData(spark.table(tblName)).isDefined) + sql(s"REFRESH TABLE $tblName") + assert(spark.sharedState.cacheManager.lookupCachedData(spark.table(tblName)).isDefined) + } + } + test("REPLACE TABLE: v1 table") { val e = intercept[AnalysisException] { sql(s"CREATE OR REPLACE TABLE tbl (a int) USING ${classOf[SimpleScanSource].getName}") @@ -1792,6 +1726,20 @@ class DataSourceV2SQLSuite } } + test("DeleteFrom: delete with unsupported predicates") { + val t = "testcat.ns1.ns2.tbl" + withTable(t) { + sql(s"CREATE TABLE $t (id bigint, data string, p int) USING foo") + sql(s"INSERT INTO $t VALUES (2L, 'a', 2), (2L, 'b', 3), (3L, 'c', 3)") + val exc = intercept[AnalysisException] { + sql(s"DELETE FROM $t WHERE id > 3 AND p > 3") + } + + assert(spark.table(t).count === 3) + assert(exc.getMessage.contains(s"Cannot delete from table $t")) + } + } + test("DeleteFrom: DELETE is only supported with v2 tables") { // unset this config to use the default v2 session catalog. spark.conf.unset(V2_SESSION_CATALOG_IMPLEMENTATION.key) @@ -1807,6 +1755,22 @@ class DataSourceV2SQLSuite } } + test("SPARK-33652: DeleteFrom should refresh caches referencing the table") { + val t = "testcat.ns1.ns2.tbl" + val view = "view" + withTable(t) { + withTempView(view) { + sql(s"CREATE TABLE $t (id bigint, data string, p int) USING foo PARTITIONED BY (id, p)") + sql(s"INSERT INTO $t VALUES (2L, 'a', 2), (2L, 'b', 3), (3L, 'c', 3)") + sql(s"CACHE TABLE view AS SELECT id FROM $t") + assert(spark.table(view).count() == 3) + + sql(s"DELETE FROM $t WHERE id = 2") + assert(spark.table(view).count() == 1) + } + } + } + test("UPDATE TABLE") { val t = "testcat.ns1.ns2.tbl" withTable(t) { @@ -1925,10 +1889,16 @@ class DataSourceV2SQLSuite test("AlterTable: rename table basic test") { withTable("testcat.ns1.new") { - sql(s"CREATE TABLE testcat.ns1.ns2.old USING foo AS SELECT id, data FROM source") + sql("CREATE TABLE testcat.ns1.ns2.old USING foo AS SELECT id, data FROM source") checkAnswer(sql("SHOW TABLES FROM testcat.ns1.ns2"), Seq(Row("ns1.ns2", "old"))) - sql(s"ALTER TABLE testcat.ns1.ns2.old RENAME TO ns1.new") + val e = intercept[AnalysisException] { + sql("ALTER VIEW testcat.ns1.ns2.old RENAME TO ns1.new") + } + assert(e.getMessage.contains( + "Cannot rename a table with ALTER VIEW. Please use ALTER TABLE instead")) + + sql("ALTER TABLE testcat.ns1.ns2.old RENAME TO ns1.new") checkAnswer(sql("SHOW TABLES FROM testcat.ns1.ns2"), Seq.empty) checkAnswer(sql("SHOW TABLES FROM testcat.ns1"), Seq(Row("ns1", "new"))) } @@ -1938,15 +1908,16 @@ class DataSourceV2SQLSuite val e = intercept[AnalysisException] { sql(s"ALTER VIEW testcat.ns.tbl RENAME TO ns.view") } - assert(e.getMessage.contains("Renaming view is not supported in v2 catalogs")) + assert(e.getMessage.contains( + "Table or view not found: testcat.ns.tbl")) } test("ANALYZE TABLE") { val t = "testcat.ns1.ns2.tbl" withTable(t) { spark.sql(s"CREATE TABLE $t (id bigint, data string) USING foo") - testV1Command("ANALYZE TABLE", s"$t COMPUTE STATISTICS") - testV1CommandSupportingTempView("ANALYZE TABLE", s"$t COMPUTE STATISTICS FOR ALL COLUMNS") + testNotSupportedV2Command("ANALYZE TABLE", s"$t COMPUTE STATISTICS") + testNotSupportedV2Command("ANALYZE TABLE", s"$t COMPUTE STATISTICS FOR ALL COLUMNS") } } @@ -1954,7 +1925,7 @@ class DataSourceV2SQLSuite val t = "testcat.ns1.ns2.tbl" withTable(t) { spark.sql(s"CREATE TABLE $t (id bigint, data string) USING foo") - testV1Command("MSCK REPAIR TABLE", t) + testNotSupportedV2Command("MSCK REPAIR TABLE", t) } } @@ -1968,23 +1939,8 @@ class DataSourceV2SQLSuite |PARTITIONED BY (id) """.stripMargin) - testV1Command("TRUNCATE TABLE", t) - testV1Command("TRUNCATE TABLE", s"$t PARTITION(id='1')") - } - } - - test("SHOW PARTITIONS") { - val t = "testcat.ns1.ns2.tbl" - withTable(t) { - sql( - s""" - |CREATE TABLE $t (id bigint, data string) - |USING foo - |PARTITIONED BY (id) - """.stripMargin) - - testV1Command("SHOW PARTITIONS", t) - testV1Command("SHOW PARTITIONS", s"$t PARTITION(id='1')") + testNotSupportedV2Command("TRUNCATE TABLE", t) + testNotSupportedV2Command("TRUNCATE TABLE", s"$t PARTITION(id='1')") } } @@ -1998,10 +1954,10 @@ class DataSourceV2SQLSuite |PARTITIONED BY (id) """.stripMargin) - testV1Command("LOAD DATA", s"INPATH 'filepath' INTO TABLE $t") - testV1Command("LOAD DATA", s"LOCAL INPATH 'filepath' INTO TABLE $t") - testV1Command("LOAD DATA", s"LOCAL INPATH 'filepath' OVERWRITE INTO TABLE $t") - testV1Command("LOAD DATA", + testNotSupportedV2Command("LOAD DATA", s"INPATH 'filepath' INTO TABLE $t") + testNotSupportedV2Command("LOAD DATA", s"LOCAL INPATH 'filepath' INTO TABLE $t") + testNotSupportedV2Command("LOAD DATA", s"LOCAL INPATH 'filepath' OVERWRITE INTO TABLE $t") + testNotSupportedV2Command("LOAD DATA", s"LOCAL INPATH 'filepath' OVERWRITE INTO TABLE $t PARTITION(id=1)") } } @@ -2010,91 +1966,44 @@ class DataSourceV2SQLSuite val t = "testcat.ns1.ns2.tbl" withTable(t) { spark.sql(s"CREATE TABLE $t (id bigint, data string) USING foo") - testV1CommandSupportingTempView("SHOW CREATE TABLE", t) + testNotSupportedV2Command("SHOW CREATE TABLE", t) + testNotSupportedV2Command("SHOW CREATE TABLE", s"$t AS SERDE") } } - test("CACHE TABLE") { + test("CACHE/UNCACHE TABLE") { val t = "testcat.ns1.ns2.tbl" withTable(t) { - spark.sql(s"CREATE TABLE $t (id bigint, data string) USING foo") - - testV1CommandSupportingTempView("CACHE TABLE", t) - - val e = intercept[AnalysisException] { - sql(s"CACHE LAZY TABLE $t") + def isCached(table: String): Boolean = { + spark.table(table).queryExecution.withCachedData.isInstanceOf[InMemoryRelation] } - assert(e.message.contains("CACHE TABLE is only supported with temp views or v1 tables")) - } - } - - test("UNCACHE TABLE") { - val t = "testcat.ns1.ns2.tbl" - withTable(t) { - sql(s"CREATE TABLE $t (id bigint, data string) USING foo") - testV1CommandSupportingTempView("UNCACHE TABLE", t) - testV1CommandSupportingTempView("UNCACHE TABLE", s"IF EXISTS $t") - } - } - - test("SHOW COLUMNS") { - val t = "testcat.ns1.ns2.tbl" - withTable(t) { spark.sql(s"CREATE TABLE $t (id bigint, data string) USING foo") + sql(s"CACHE TABLE $t") + assert(isCached(t)) - testV1CommandSupportingTempView("SHOW COLUMNS", s"FROM $t") - testV1CommandSupportingTempView("SHOW COLUMNS", s"IN $t") - - val e3 = intercept[AnalysisException] { - sql(s"SHOW COLUMNS FROM tbl IN testcat.ns1.ns2") - } - assert(e3.message.contains("Namespace name should have " + - "only one part if specified: testcat.ns1.ns2")) + sql(s"UNCACHE TABLE $t") + assert(!isCached(t)) } - } - test("ALTER TABLE RECOVER PARTITIONS") { - val t = "testcat.ns1.ns2.tbl" - withTable(t) { - spark.sql(s"CREATE TABLE $t (id bigint, data string) USING foo") - val e = intercept[AnalysisException] { - sql(s"ALTER TABLE $t RECOVER PARTITIONS") - } - assert(e.message.contains("ALTER TABLE RECOVER PARTITIONS is only supported with v1 tables")) + // Test a scenario where a table does not exist. + val e = intercept[AnalysisException] { + sql(s"UNCACHE TABLE $t") } - } + assert(e.message.contains("Table or view not found: testcat.ns1.ns2.tbl")) - test("ALTER TABLE ADD PARTITION") { - val t = "testcat.ns1.ns2.tbl" - withTable(t) { - spark.sql(s"CREATE TABLE $t (id bigint, data string) USING foo PARTITIONED BY (id)") - val e = intercept[AnalysisException] { - sql(s"ALTER TABLE $t ADD PARTITION (id=1) LOCATION 'loc'") - } - assert(e.message.contains("ALTER TABLE ADD PARTITION is only supported with v1 tables")) - } + // If "IF EXISTS" is set, UNCACHE TABLE will not throw an exception. + sql(s"UNCACHE TABLE IF EXISTS $t") } - test("ALTER TABLE RENAME PARTITION") { + test("SHOW COLUMNS") { val t = "testcat.ns1.ns2.tbl" withTable(t) { - spark.sql(s"CREATE TABLE $t (id bigint, data string) USING foo PARTITIONED BY (id)") - val e = intercept[AnalysisException] { - sql(s"ALTER TABLE $t PARTITION (id=1) RENAME TO PARTITION (id=2)") - } - assert(e.message.contains("ALTER TABLE RENAME PARTITION is only supported with v1 tables")) - } - } + spark.sql(s"CREATE TABLE $t (id bigint, data string) USING foo") - test("ALTER TABLE DROP PARTITIONS") { - val t = "testcat.ns1.ns2.tbl" - withTable(t) { - spark.sql(s"CREATE TABLE $t (id bigint, data string) USING foo PARTITIONED BY (id)") - val e = intercept[AnalysisException] { - sql(s"ALTER TABLE $t DROP PARTITION (id=1)") - } - assert(e.message.contains("ALTER TABLE DROP PARTITION is only supported with v1 tables")) + testNotSupportedV2Command("SHOW COLUMNS", s"FROM $t") + testNotSupportedV2Command("SHOW COLUMNS", s"IN $t") + testNotSupportedV2Command("SHOW COLUMNS", "FROM tbl IN testcat.ns1.ns2") } } @@ -2105,18 +2014,11 @@ class DataSourceV2SQLSuite val e = intercept[AnalysisException] { sql(s"ALTER TABLE $t SET SERDEPROPERTIES ('columns'='foo,bar', 'field.delim' = ',')") } - assert(e.message.contains("ALTER TABLE SerDe Properties is only supported with v1 tables")) + assert(e.message.contains( + "ALTER TABLE ... SET [SERDE|SERDEPROPERTIES] is not supported for v2 tables")) } } - test("ALTER VIEW AS QUERY") { - val v = "testcat.ns1.ns2.v" - val e = intercept[AnalysisException] { - sql(s"ALTER VIEW $v AS SELECT 1") - } - assert(e.message.contains("ALTER VIEW QUERY is only supported with temp views or v1 tables")) - } - test("CREATE VIEW") { val v = "testcat.ns1.ns2.v" val e = intercept[AnalysisException] { @@ -2273,7 +2175,7 @@ class DataSourceV2SQLSuite val e = intercept[AnalysisException] { // Since the following multi-part name starts with `globalTempDB`, it is resolved to - // the session catalog, not the `gloabl_temp` v2 catalog. + // the session catalog, not the `global_temp` v2 catalog. sql(s"CREATE TABLE $globalTempDB.ns1.ns2.tbl (id bigint, data string) USING json") } assert(e.message.contains( @@ -2327,7 +2229,6 @@ class DataSourceV2SQLSuite verify(s"CACHE TABLE $t") verify(s"UNCACHE TABLE $t") verify(s"TRUNCATE TABLE $t") - verify(s"SHOW PARTITIONS $t") verify(s"SHOW COLUMNS FROM $t") } } @@ -2454,7 +2355,8 @@ class DataSourceV2SQLSuite withTempView("v") { sql("create global temp view v as select 1") val e = intercept[AnalysisException](sql("COMMENT ON TABLE global_temp.v IS NULL")) - assert(e.getMessage.contains("global_temp.v is a temp view not table.")) + assert(e.getMessage.contains( + "global_temp.v is a temp view. 'COMMENT ON TABLE' expects a table")) } } @@ -2541,18 +2443,120 @@ class DataSourceV2SQLSuite } } - private def testV1Command(sqlCommand: String, sqlParams: String): Unit = { - val e = intercept[AnalysisException] { - sql(s"$sqlCommand $sqlParams") + test("SPARK-31255: Project a metadata column") { + val t1 = s"${catalogAndNamespace}table" + withTable(t1) { + sql(s"CREATE TABLE $t1 (id bigint, data string) USING $v2Format " + + "PARTITIONED BY (bucket(4, id), id)") + sql(s"INSERT INTO $t1 VALUES (1, 'a'), (2, 'b'), (3, 'c')") + + checkAnswer( + spark.sql(s"SELECT id, data, _partition FROM $t1"), + Seq(Row(1, "a", "3/1"), Row(2, "b", "0/2"), Row(3, "c", "1/3"))) + } + } + + test("SPARK-31255: Projects data column when metadata column has the same name") { + val t1 = s"${catalogAndNamespace}table" + withTable(t1) { + sql(s"CREATE TABLE $t1 (index bigint, data string) USING $v2Format " + + "PARTITIONED BY (bucket(4, index), index)") + sql(s"INSERT INTO $t1 VALUES (3, 'c'), (2, 'b'), (1, 'a')") + + checkAnswer( + spark.sql(s"SELECT index, data, _partition FROM $t1"), + Seq(Row(3, "c", "1/3"), Row(2, "b", "0/2"), Row(1, "a", "3/1"))) + } + } + + test("SPARK-31255: * expansion does not include metadata columns") { + val t1 = s"${catalogAndNamespace}table" + withTable(t1) { + sql(s"CREATE TABLE $t1 (id bigint, data string) USING $v2Format " + + "PARTITIONED BY (bucket(4, id), id)") + sql(s"INSERT INTO $t1 VALUES (3, 'c'), (2, 'b'), (1, 'a')") + + checkAnswer( + spark.sql(s"SELECT * FROM $t1"), + Seq(Row(3, "c"), Row(2, "b"), Row(1, "a"))) + } + } + + test("SPARK-33505: insert into partitioned table") { + val t = "testpart.ns1.ns2.tbl" + withTable(t) { + sql(s""" + |CREATE TABLE $t (id bigint, city string, data string) + |USING foo + |PARTITIONED BY (id, city)""".stripMargin) + val partTable = catalog("testpart").asTableCatalog + .loadTable(Identifier.of(Array("ns1", "ns2"), "tbl")).asInstanceOf[InMemoryPartitionTable] + val expectedPartitionIdent = InternalRow.fromSeq(Seq(1, UTF8String.fromString("NY"))) + assert(!partTable.partitionExists(expectedPartitionIdent)) + sql(s"INSERT INTO $t PARTITION(id = 1, city = 'NY') SELECT 'abc'") + assert(partTable.partitionExists(expectedPartitionIdent)) + // Insert into the existing partition must not fail + sql(s"INSERT INTO $t PARTITION(id = 1, city = 'NY') SELECT 'def'") + assert(partTable.partitionExists(expectedPartitionIdent)) + } + } + + test("View commands are not supported in v2 catalogs") { + def validateViewCommand( + sql: String, + catalogName: String, + viewName: String, + cmdName: String): Unit = { + assertAnalysisError( + sql, + s"Cannot specify catalog `$catalogName` for view $viewName because view support " + + s"in v2 catalog has not been implemented yet. $cmdName expects a view.") + } + + validateViewCommand("DROP VIEW testcat.v", "testcat", "v", "DROP VIEW") + validateViewCommand( + "ALTER VIEW testcat.v SET TBLPROPERTIES ('key' = 'val')", + "testcat", + "v", + "ALTER VIEW ... SET TBLPROPERTIES") + validateViewCommand( + "ALTER VIEW testcat.v UNSET TBLPROPERTIES ('key')", + "testcat", + "v", + "ALTER VIEW ... UNSET TBLPROPERTIES") + validateViewCommand( + "ALTER VIEW testcat.v AS SELECT 1", + "testcat", + "v", + "ALTER VIEW ... AS") + } + + test("SPARK-33924: INSERT INTO .. PARTITION preserves the partition location") { + val t = "testpart.ns1.ns2.tbl" + withTable(t) { + sql(s""" + |CREATE TABLE $t (id bigint, city string, data string) + |USING foo + |PARTITIONED BY (id, city)""".stripMargin) + val partTable = catalog("testpart").asTableCatalog + .loadTable(Identifier.of(Array("ns1", "ns2"), "tbl")).asInstanceOf[InMemoryPartitionTable] + + val loc = "partition_location" + sql(s"ALTER TABLE $t ADD PARTITION (id = 1, city = 'NY') LOCATION '$loc'") + + val ident = InternalRow.fromSeq(Seq(1, UTF8String.fromString("NY"))) + assert(partTable.loadPartitionMetadata(ident).get("location") === loc) + + sql(s"INSERT INTO $t PARTITION(id = 1, city = 'NY') SELECT 'abc'") + assert(partTable.loadPartitionMetadata(ident).get("location") === loc) } - assert(e.message.contains(s"$sqlCommand is only supported with v1 tables")) } - private def testV1CommandSupportingTempView(sqlCommand: String, sqlParams: String): Unit = { + private def testNotSupportedV2Command(sqlCommand: String, sqlParams: String): Unit = { val e = intercept[AnalysisException] { sql(s"$sqlCommand $sqlParams") } - assert(e.message.contains(s"$sqlCommand is only supported with temp views or v1 tables")) + assert(e.message.contains(s"$sqlCommand is not supported for v2 tables")) } private def assertAnalysisError(sqlStatement: String, expectedError: String): Unit = { diff --git a/sql/core/src/test/scala/org/apache/spark/sql/connector/DataSourceV2Suite.scala b/sql/core/src/test/scala/org/apache/spark/sql/connector/DataSourceV2Suite.scala index a9c521eb46499..28cb448c400c7 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/connector/DataSourceV2Suite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/connector/DataSourceV2Suite.scala @@ -157,6 +157,19 @@ class DataSourceV2Suite extends QueryTest with SharedSparkSession with AdaptiveS } } + test("SPARK-33369: Skip schema inference in DataframeWriter.save() if table provider " + + "supports external metadata") { + withTempDir { dir => + val cls = classOf[SupportsExternalMetadataWritableDataSource].getName + spark.range(10).select('id as 'i, -'id as 'j).write.format(cls) + .option("path", dir.getCanonicalPath).mode("append").save() + val schema = new StructType().add("i", "long").add("j", "long") + checkAnswer( + spark.read.format(cls).option("path", dir.getCanonicalPath).schema(schema).load(), + spark.range(10).select('id, -'id)) + } + } + test("partitioning reporting") { import org.apache.spark.sql.functions.{count, sum} Seq(classOf[PartitionAwareDataSource], classOf[JavaPartitionAwareDataSource]).foreach { cls => @@ -268,7 +281,7 @@ class DataSourceV2Suite extends QueryTest with SharedSparkSession with AdaptiveS } } // this input data will fail to read middle way. - val input = spark.range(10).select(failingUdf('id).as('i)).select('i, -'i as 'j) + val input = spark.range(15).select(failingUdf('id).as('i)).select('i, -'i as 'j) val e3 = intercept[SparkException] { input.write.format(cls.getName).option("path", path).mode("overwrite").save() } @@ -413,6 +426,16 @@ class DataSourceV2Suite extends QueryTest with SharedSparkSession with AdaptiveS } } } + + test("SPARK-33267: push down with condition 'in (..., null)' should not throw NPE") { + Seq(classOf[AdvancedDataSourceV2], classOf[JavaAdvancedDataSourceV2]).foreach { cls => + withClue(cls.getName) { + val df = spark.read.format(cls.getName).load() + // before SPARK-33267 below query just threw NPE + df.select('i).where("i in (1, null)").collect() + } + } + } } @@ -761,6 +784,16 @@ class SimpleWriteOnlyDataSource extends SimpleWritableDataSource { } } +class SupportsExternalMetadataWritableDataSource extends SimpleWritableDataSource { + override def supportsExternalMetadata(): Boolean = true + + override def inferSchema(options: CaseInsensitiveStringMap): StructType = { + throw new IllegalArgumentException( + "Dataframe writer should not require inferring table schema the data source supports" + + " external metadata.") + } +} + class ReportStatisticsDataSource extends SimpleWritableDataSource { class MyScanBuilder extends SimpleScanBuilder diff --git a/sql/core/src/test/scala/org/apache/spark/sql/connector/DatasourceV2SQLBase.scala b/sql/core/src/test/scala/org/apache/spark/sql/connector/DatasourceV2SQLBase.scala new file mode 100644 index 0000000000000..8922eea8e0ae6 --- /dev/null +++ b/sql/core/src/test/scala/org/apache/spark/sql/connector/DatasourceV2SQLBase.scala @@ -0,0 +1,54 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.connector + +import org.scalatest.BeforeAndAfter + +import org.apache.spark.sql.QueryTest +import org.apache.spark.sql.connector.catalog.CatalogPlugin +import org.apache.spark.sql.internal.SQLConf.V2_SESSION_CATALOG_IMPLEMENTATION +import org.apache.spark.sql.test.SharedSparkSession + +trait DatasourceV2SQLBase + extends QueryTest with SharedSparkSession with BeforeAndAfter { + + protected def catalog(name: String): CatalogPlugin = { + spark.sessionState.catalogManager.catalog(name) + } + + before { + spark.conf.set("spark.sql.catalog.testcat", classOf[InMemoryTableCatalog].getName) + spark.conf.set("spark.sql.catalog.testpart", classOf[InMemoryPartitionTableCatalog].getName) + spark.conf.set( + "spark.sql.catalog.testcat_atomic", classOf[StagingInMemoryTableCatalog].getName) + spark.conf.set("spark.sql.catalog.testcat2", classOf[InMemoryTableCatalog].getName) + spark.conf.set( + V2_SESSION_CATALOG_IMPLEMENTATION.key, classOf[InMemoryTableSessionCatalog].getName) + + val df = spark.createDataFrame(Seq((1L, "a"), (2L, "b"), (3L, "c"))).toDF("id", "data") + df.createOrReplaceTempView("source") + val df2 = spark.createDataFrame(Seq((4L, "d"), (5L, "e"), (6L, "f"))).toDF("id", "data") + df2.createOrReplaceTempView("source2") + } + + after { + spark.sessionState.catalog.reset() + spark.sessionState.catalogManager.reset() + spark.sessionState.conf.clear() + } +} diff --git a/sql/core/src/test/scala/org/apache/spark/sql/connector/SupportsCatalogOptionsSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/connector/SupportsCatalogOptionsSuite.scala index 550bec7505422..3aad644655aa6 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/connector/SupportsCatalogOptionsSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/connector/SupportsCatalogOptionsSuite.scala @@ -22,12 +22,13 @@ import scala.util.Try import org.scalatest.BeforeAndAfter +import org.apache.spark.SparkException import org.apache.spark.sql.{DataFrame, QueryTest, SaveMode} import org.apache.spark.sql.catalyst.analysis.TableAlreadyExistsException import org.apache.spark.sql.catalyst.plans.logical.{AppendData, LogicalPlan, OverwriteByExpression} import org.apache.spark.sql.connector.catalog.{Identifier, SupportsCatalogOptions, TableCatalog} import org.apache.spark.sql.connector.catalog.CatalogManager.SESSION_CATALOG_NAME -import org.apache.spark.sql.connector.expressions.{FieldReference, IdentityTransform, Transform} +import org.apache.spark.sql.connector.expressions.{FieldReference, IdentityTransform} import org.apache.spark.sql.execution.QueryExecution import org.apache.spark.sql.execution.datasources.v2.DataSourceV2Relation import org.apache.spark.sql.internal.SQLConf.V2_SESSION_CATALOG_IMPLEMENTATION @@ -254,6 +255,22 @@ class SupportsCatalogOptionsSuite extends QueryTest with SharedSparkSession with } } + test("SPARK-33240: fail the query when instantiation on session catalog fails") { + try { + spark.sessionState.catalogManager.reset() + spark.conf.set( + V2_SESSION_CATALOG_IMPLEMENTATION.key, "InvalidCatalogClass") + val e = intercept[SparkException] { + sql(s"create table t1 (id bigint) using $format") + } + + assert(e.getMessage.contains("Cannot find catalog plugin class")) + assert(e.getMessage.contains("InvalidCatalogClass")) + } finally { + spark.sessionState.catalogManager.reset() + } + } + private def checkV2Identifiers( plan: LogicalPlan, identifier: String = "t1", diff --git a/sql/core/src/test/scala/org/apache/spark/sql/connector/TableCapabilityCheckSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/connector/TableCapabilityCheckSuite.scala index 1d016496df2de..bad21aac41712 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/connector/TableCapabilityCheckSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/connector/TableCapabilityCheckSuite.scala @@ -26,7 +26,7 @@ import org.apache.spark.sql.catalyst.analysis.{AnalysisSuite, NamedRelation} import org.apache.spark.sql.catalyst.expressions.{AttributeReference, EqualTo, Literal} import org.apache.spark.sql.catalyst.plans.logical._ import org.apache.spark.sql.catalyst.streaming.StreamingRelationV2 -import org.apache.spark.sql.connector.catalog.{CatalogPlugin, Identifier, Table, TableCapability, TableProvider} +import org.apache.spark.sql.connector.catalog.{Table, TableCapability} import org.apache.spark.sql.connector.catalog.TableCapability._ import org.apache.spark.sql.execution.datasources.DataSource import org.apache.spark.sql.execution.datasources.v2.{DataSourceV2Relation, TableCapabilityCheck} @@ -46,6 +46,8 @@ class TableCapabilityCheckSuite extends AnalysisSuite with SharedSparkSession { table, CaseInsensitiveStringMap.empty(), TableCapabilityCheckSuite.schema.toAttributes, + None, + None, v1Relation) } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/connector/TestV2SessionCatalogBase.scala b/sql/core/src/test/scala/org/apache/spark/sql/connector/TestV2SessionCatalogBase.scala index 637cf2fd16515..bf2749d1afc53 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/connector/TestV2SessionCatalogBase.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/connector/TestV2SessionCatalogBase.scala @@ -19,11 +19,12 @@ package org.apache.spark.sql.connector import java.util import java.util.concurrent.ConcurrentHashMap +import java.util.concurrent.atomic.AtomicBoolean import scala.collection.JavaConverters._ -import org.apache.spark.sql.catalyst.analysis.NoSuchTableException -import org.apache.spark.sql.connector.catalog.{DelegatingCatalogExtension, Identifier, Table} +import org.apache.spark.sql.catalyst.catalog.CatalogTableType +import org.apache.spark.sql.connector.catalog.{DelegatingCatalogExtension, Identifier, Table, TableCatalog, V1Table} import org.apache.spark.sql.connector.expressions.Transform import org.apache.spark.sql.types.StructType @@ -36,6 +37,13 @@ private[connector] trait TestV2SessionCatalogBase[T <: Table] extends Delegating protected val tables: util.Map[Identifier, T] = new ConcurrentHashMap[Identifier, T]() + private val tableCreated: AtomicBoolean = new AtomicBoolean(false) + + private def addTable(ident: Identifier, table: T): Unit = { + tableCreated.set(true) + tables.put(ident, table) + } + protected def newTable( name: String, schema: StructType, @@ -47,10 +55,13 @@ private[connector] trait TestV2SessionCatalogBase[T <: Table] extends Delegating tables.get(ident) } else { // Table was created through the built-in catalog - val t = super.loadTable(ident) - val table = newTable(t.name(), t.schema(), t.partitioning(), t.properties()) - tables.put(ident, table) - table + super.loadTable(ident) match { + case v1Table: V1Table if v1Table.v1Table.tableType == CatalogTableType.VIEW => v1Table + case t => + val table = newTable(t.name(), t.schema(), t.partitioning(), t.properties()) + addTable(ident, table) + table + } } } @@ -59,9 +70,23 @@ private[connector] trait TestV2SessionCatalogBase[T <: Table] extends Delegating schema: StructType, partitions: Array[Transform], properties: util.Map[String, String]): Table = { - val created = super.createTable(ident, schema, partitions, properties) - val t = newTable(created.name(), schema, partitions, properties) - tables.put(ident, t) + val key = TestV2SessionCatalogBase.SIMULATE_ALLOW_EXTERNAL_PROPERTY + val propsWithLocation = if (properties.containsKey(key)) { + // Always set a location so that CREATE EXTERNAL TABLE won't fail with LOCATION not specified. + if (!properties.containsKey(TableCatalog.PROP_LOCATION)) { + val newProps = new util.HashMap[String, String]() + newProps.putAll(properties) + newProps.put(TableCatalog.PROP_LOCATION, "file:/abc") + newProps + } else { + properties + } + } else { + properties + } + val created = super.createTable(ident, schema, partitions, propsWithLocation) + val t = newTable(created.name(), schema, partitions, propsWithLocation) + addTable(ident, t) t } @@ -71,8 +96,15 @@ private[connector] trait TestV2SessionCatalogBase[T <: Table] extends Delegating } def clearTables(): Unit = { - assert(!tables.isEmpty, "Tables were empty, maybe didn't use the session catalog code path?") + assert( + tableCreated.get, + "Tables are not created, maybe didn't use the session catalog code path?") tables.keySet().asScala.foreach(super.dropTable) tables.clear() + tableCreated.set(false) } } + +object TestV2SessionCatalogBase { + val SIMULATE_ALLOW_EXTERNAL_PROPERTY = "spark.sql.test.simulateAllowExternal" +} diff --git a/sql/core/src/test/scala/org/apache/spark/sql/connector/V1ReadFallbackSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/connector/V1ReadFallbackSuite.scala index 74f2ca14234d2..9beef690cba32 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/connector/V1ReadFallbackSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/connector/V1ReadFallbackSuite.scala @@ -23,7 +23,7 @@ import scala.collection.JavaConverters._ import org.apache.spark.rdd.RDD import org.apache.spark.sql.{DataFrame, QueryTest, Row, SparkSession, SQLContext} -import org.apache.spark.sql.connector.catalog.{Identifier, SupportsRead, Table, TableCapability, TableProvider} +import org.apache.spark.sql.connector.catalog.{Identifier, SupportsRead, Table, TableCapability} import org.apache.spark.sql.connector.expressions.Transform import org.apache.spark.sql.connector.read.{Scan, ScanBuilder, SupportsPushDownFilters, SupportsPushDownRequiredColumns, V1Scan} import org.apache.spark.sql.execution.RowDataSourceScanExec diff --git a/sql/core/src/test/scala/org/apache/spark/sql/connector/V1WriteFallbackSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/connector/V1WriteFallbackSuite.scala index 4b52a4cbf4116..45ddc6a6fcfc6 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/connector/V1WriteFallbackSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/connector/V1WriteFallbackSuite.scala @@ -24,14 +24,17 @@ import scala.collection.mutable import org.scalatest.BeforeAndAfter +import org.apache.spark.rdd.RDD import org.apache.spark.sql.{AnalysisException, DataFrame, QueryTest, Row, SaveMode, SparkSession, SQLContext} import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan import org.apache.spark.sql.catalyst.rules.Rule import org.apache.spark.sql.catalyst.trees.TreeNodeTag -import org.apache.spark.sql.connector.catalog.{SupportsWrite, Table, TableCapability} +import org.apache.spark.sql.connector.catalog.{Identifier, SupportsRead, SupportsWrite, Table, TableCapability} import org.apache.spark.sql.connector.expressions.{FieldReference, IdentityTransform, Transform} -import org.apache.spark.sql.connector.write.{LogicalWriteInfo, LogicalWriteInfoImpl, SupportsOverwrite, SupportsTruncate, V1WriteBuilder, WriteBuilder} +import org.apache.spark.sql.connector.read.{Scan, ScanBuilder, V1Scan} +import org.apache.spark.sql.connector.write.{LogicalWriteInfo, LogicalWriteInfoImpl, SupportsOverwrite, SupportsTruncate, V1Write, WriteBuilder} import org.apache.spark.sql.execution.datasources.DataSourceUtils +import org.apache.spark.sql.functions.lit import org.apache.spark.sql.internal.SQLConf.V2_SESSION_CATALOG_IMPLEMENTATION import org.apache.spark.sql.internal.connector.SimpleTableProvider import org.apache.spark.sql.sources._ @@ -145,6 +148,52 @@ class V1WriteFallbackSuite extends QueryTest with SharedSparkSession with Before SparkSession.setDefaultSession(spark) } } + + test("SPARK-33492: append fallback should refresh cache") { + SparkSession.clearActiveSession() + SparkSession.clearDefaultSession() + try { + val session = SparkSession.builder() + .master("local[1]") + .config(V2_SESSION_CATALOG_IMPLEMENTATION.key, classOf[V1FallbackTableCatalog].getName) + .getOrCreate() + val df = session.createDataFrame(Seq((1, "x"))) + df.write.mode("append").option("name", "t1").format(v2Format).saveAsTable("test") + session.catalog.cacheTable("test") + checkAnswer(session.read.table("test"), Row(1, "x") :: Nil) + + val df2 = session.createDataFrame(Seq((2, "y"))) + df2.writeTo("test").append() + checkAnswer(session.read.table("test"), Row(1, "x") :: Row(2, "y") :: Nil) + + } finally { + SparkSession.setActiveSession(spark) + SparkSession.setDefaultSession(spark) + } + } + + test("SPARK-33492: overwrite fallback should refresh cache") { + SparkSession.clearActiveSession() + SparkSession.clearDefaultSession() + try { + val session = SparkSession.builder() + .master("local[1]") + .config(V2_SESSION_CATALOG_IMPLEMENTATION.key, classOf[V1FallbackTableCatalog].getName) + .getOrCreate() + val df = session.createDataFrame(Seq((1, "x"))) + df.write.mode("append").option("name", "t1").format(v2Format).saveAsTable("test") + session.catalog.cacheTable("test") + checkAnswer(session.read.table("test"), Row(1, "x") :: Nil) + + val df2 = session.createDataFrame(Seq((2, "y"))) + df2.writeTo("test").overwrite(lit(true)) + checkAnswer(session.read.table("test"), Row(2, "y") :: Nil) + + } finally { + SparkSession.setActiveSession(spark) + SparkSession.setDefaultSession(spark) + } + } } class V1WriteFallbackSessionCatalogSuite @@ -177,6 +226,7 @@ class V1FallbackTableCatalog extends TestV2SessionCatalogBase[InMemoryTableWithV properties: util.Map[String, String]): InMemoryTableWithV1Fallback = { val t = new InMemoryTableWithV1Fallback(name, schema, partitions, properties) InMemoryV1Provider.tables.put(name, t) + tables.put(Identifier.of(Array("default"), name), t) t } } @@ -261,7 +311,8 @@ class InMemoryV1Provider if (mode == SaveMode.Overwrite) { writer.asInstanceOf[SupportsTruncate].truncate() } - writer.asInstanceOf[V1WriteBuilder].buildForV1Write().insert(data, overwrite = false) + val write = writer.build() + write.asInstanceOf[V1Write].toInsertableRelation.insert(data, overwrite = false) getRelation } } @@ -272,7 +323,7 @@ class InMemoryTableWithV1Fallback( override val partitioning: Array[Transform], override val properties: util.Map[String, String]) extends Table - with SupportsWrite { + with SupportsWrite with SupportsRead { partitioning.foreach { t => if (!t.isInstanceOf[IdentityTransform]) { @@ -281,6 +332,7 @@ class InMemoryTableWithV1Fallback( } override def capabilities: util.Set[TableCapability] = Set( + TableCapability.BATCH_READ, TableCapability.V1_BATCH_WRITE, TableCapability.OVERWRITE_BY_FILTER, TableCapability.TRUNCATE).asJava @@ -297,7 +349,6 @@ class InMemoryTableWithV1Fallback( private class FallbackWriteBuilder(options: CaseInsensitiveStringMap) extends WriteBuilder - with V1WriteBuilder with SupportsTruncate with SupportsOverwrite { @@ -320,9 +371,9 @@ class InMemoryTableWithV1Fallback( partIndexes.map(row.get) } - override def buildForV1Write(): InsertableRelation = { - new InsertableRelation { - override def insert(data: DataFrame, overwrite: Boolean): Unit = { + override def build(): V1Write = new V1Write { + override def toInsertableRelation: InsertableRelation = { + (data: DataFrame, overwrite: Boolean) => { assert(!overwrite, "V1 write fallbacks cannot be called with overwrite=true") val rows = data.collect() rows.groupBy(getPartitionValues).foreach { case (partition, elements) => @@ -338,6 +389,30 @@ class InMemoryTableWithV1Fallback( } } } + + override def newScanBuilder(options: CaseInsensitiveStringMap): ScanBuilder = + new V1ReadFallbackScanBuilder(schema) + + private class V1ReadFallbackScanBuilder(schema: StructType) extends ScanBuilder { + override def build(): Scan = new V1ReadFallbackScan(schema) + } + + private class V1ReadFallbackScan(schema: StructType) extends V1Scan { + override def readSchema(): StructType = schema + override def toV1TableScan[T <: BaseRelation with TableScan](context: SQLContext): T = + new V1TableScan(context, schema).asInstanceOf[T] + } + + private class V1TableScan( + context: SQLContext, + requiredSchema: StructType) extends BaseRelation with TableScan { + override def sqlContext: SQLContext = context + override def schema: StructType = requiredSchema + override def buildScan(): RDD[Row] = { + val data = InMemoryV1Provider.getTableData(context.sparkSession, name).collect() + context.sparkContext.makeRDD(data) + } + } } /** A rule that fails if a query plan is analyzed twice. */ diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/AlreadyOptimizedSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/AlreadyOptimizedSuite.scala deleted file mode 100644 index c266aa92f01cc..0000000000000 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/AlreadyOptimizedSuite.scala +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.spark.sql.execution - -import org.apache.spark.sql.QueryTest -import org.apache.spark.sql.execution.adaptive.EnableAdaptiveExecutionSuite -import org.apache.spark.sql.test.SharedSparkSession - -class AlreadyOptimizedSuite extends QueryTest with SharedSparkSession { - - import testImplicits._ - - test("simple execution") { - val df = spark.range(10) - val planned = AlreadyOptimized.dataFrame(spark, df.queryExecution.optimizedPlan) - - checkAnswer(planned, df.toDF().collect()) - } - - test("planning on top works - projection") { - val df = spark.range(10) - val planned = AlreadyOptimized.dataFrame(spark, df.queryExecution.optimizedPlan) - - checkAnswer( - planned.withColumn("data", 'id + 1), - df.withColumn("data", 'id + 1).collect()) - } - - test("planning on top works - filter") { - val df = spark.range(10) - val planned = AlreadyOptimized.dataFrame(spark, df.queryExecution.optimizedPlan) - - checkAnswer(planned.where('id < 5), df.where('id < 5).toDF().collect()) - } - - test("planning on top works - aggregate") { - val df = spark.range(10) - val planned = AlreadyOptimized.dataFrame(spark, df.queryExecution.optimizedPlan) - - checkAnswer(planned.groupBy('id).count(), df.groupBy('id).count().collect()) - } - - test("planning on top works - joins") { - val df = spark.range(10) - val planned = AlreadyOptimized.dataFrame(spark, df.queryExecution.optimizedPlan) - - val plannedLeft = planned.alias("l") - val dfLeft = df.alias("l") - val plannedRight = planned.alias("r") - val dfRight = df.alias("r") - - checkAnswer( - plannedLeft.where('id < 3).join(plannedRight, Seq("id")), - dfLeft.where('id < 3).join(dfRight, Seq("id")).collect()) - - checkAnswer( - plannedLeft.where('id < 3).join(plannedRight, plannedLeft("id") === plannedRight("id")), - dfLeft.where('id < 3).join(dfRight, dfLeft("id") === dfRight("id")).collect()) - - checkAnswer( - plannedLeft.join(plannedRight, Seq("id")).where('id < 3), - dfLeft.join(dfRight, Seq("id")).where('id < 3).collect()) - - checkAnswer( - plannedLeft.join(plannedRight, plannedLeft("id") === plannedRight("id")).where($"l.id" < 3), - dfLeft.join(dfRight, dfLeft("id") === dfRight("id")).where($"l.id" < 3).collect()) - } -} - -class AlreadyOptimizedAQESuite extends AlreadyOptimizedSuite with EnableAdaptiveExecutionSuite diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/BaseScriptTransformationSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/BaseScriptTransformationSuite.scala index 02f447bd14339..cef870b249985 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/BaseScriptTransformationSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/BaseScriptTransformationSuite.scala @@ -28,7 +28,7 @@ import org.scalatest.exceptions.TestFailedException import org.apache.spark.{SparkException, TaskContext, TestUtils} import org.apache.spark.rdd.RDD -import org.apache.spark.sql.Column +import org.apache.spark.sql.Row import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeReference, Expression, GenericInternalRow} import org.apache.spark.sql.catalyst.plans.physical.Partitioning @@ -63,16 +63,6 @@ abstract class BaseScriptTransformationSuite extends SparkPlanTest with SQLTestU uncaughtExceptionHandler.cleanStatus() } - def isHive23OrSpark: Boolean - - // In Hive 1.2, the string representation of a decimal omits trailing zeroes. - // But in Hive 2.3, it is always padded to 18 digits with trailing zeroes if necessary. - val decimalToString: Column => Column = if (isHive23OrSpark) { - c => c.cast("string") - } else { - c => c.cast("decimal(1, 0)").cast("string") - } - def createScriptTransformationExec( input: Seq[Expression], script: String, @@ -134,7 +124,11 @@ abstract class BaseScriptTransformationSuite extends SparkPlanTest with SQLTestU s""" |SELECT |TRANSFORM(a, b, c, d, e) - |USING 'python $scriptFilePath' AS (a, b, c, d, e) + | ROW FORMAT DELIMITED + | FIELDS TERMINATED BY '\t' + | USING 'python $scriptFilePath' AS (a, b, c, d, e) + | ROW FORMAT DELIMITED + | FIELDS TERMINATED BY '\t' |FROM v """.stripMargin) @@ -142,15 +136,12 @@ abstract class BaseScriptTransformationSuite extends SparkPlanTest with SQLTestU 'a.cast("string"), 'b.cast("string"), 'c.cast("string"), - decimalToString('d), + 'd.cast("string"), 'e.cast("string")).collect()) } } - test("SPARK-25990: TRANSFORM should handle schema less correctly (no serde)") { - assume(TestUtils.testCommandAvailable("python")) - val scriptFilePath = copyAndGetResourceFile("test_script.py", ".py").getAbsoluteFile - + test("SPARK-32388: TRANSFORM should handle schema less correctly (no serde)") { withTempView("v") { val df = Seq( (1, "1", 1.0, BigDecimal(1.0), new Timestamp(1)), @@ -167,7 +158,24 @@ abstract class BaseScriptTransformationSuite extends SparkPlanTest with SQLTestU df.col("c").expr, df.col("d").expr, df.col("e").expr), - script = s"python $scriptFilePath", + script = "cat", + output = Seq( + AttributeReference("key", StringType)(), + AttributeReference("value", StringType)()), + child = child, + ioschema = defaultIOSchema.copy(schemaLess = true) + ), + df.select( + 'a.cast("string").as("key"), + 'b.cast("string").as("value")).collect()) + + checkAnswer( + df, + (child: SparkPlan) => createScriptTransformationExec( + input = Seq( + df.col("a").expr, + df.col("b").expr), + script = "cat", output = Seq( AttributeReference("key", StringType)(), AttributeReference("value", StringType)()), @@ -177,6 +185,22 @@ abstract class BaseScriptTransformationSuite extends SparkPlanTest with SQLTestU df.select( 'a.cast("string").as("key"), 'b.cast("string").as("value")).collect()) + + checkAnswer( + df, + (child: SparkPlan) => createScriptTransformationExec( + input = Seq( + df.col("a").expr), + script = "cat", + output = Seq( + AttributeReference("key", StringType)(), + AttributeReference("value", StringType)()), + child = child, + ioschema = defaultIOSchema.copy(schemaLess = true) + ), + df.select( + 'a.cast("string").as("key"), + lit(null)).collect()) } } @@ -401,6 +425,167 @@ abstract class BaseScriptTransformationSuite extends SparkPlanTest with SQLTestU 'b.cast("string").as("b"), lit(null), lit(null)).collect()) } + + test("SPARK-32106: TRANSFORM with non-existent command/file") { + Seq( + s""" + |SELECT TRANSFORM(a) + |USING 'some_non_existent_command' AS (a) + |FROM VALUES (1) t(a) + """.stripMargin, + s""" + |SELECT TRANSFORM(a) + |USING 'python some_non_existent_file' AS (a) + |FROM VALUES (1) t(a) + """.stripMargin).foreach { query => + intercept[SparkException] { + // Since an error message is shell-dependent, this test just checks + // if the expected exception will be thrown. + sql(query).collect() + } + } + } + + test("SPARK-33930: Script Transform default FIELD DELIMIT should be \u0001 (no serde)") { + withTempView("v") { + val df = Seq( + (1, 2, 3), + (2, 3, 4), + (3, 4, 5) + ).toDF("a", "b", "c") + df.createTempView("v") + + checkAnswer( + sql( + s""" + |SELECT TRANSFORM(a, b, c) + | ROW FORMAT DELIMITED + | USING 'cat' AS (a) + | ROW FORMAT DELIMITED + | FIELDS TERMINATED BY '&' + |FROM v + """.stripMargin), identity, + Row("1\u00012\u00013") :: + Row("2\u00013\u00014") :: + Row("3\u00014\u00015") :: Nil) + } + } + + test("SPARK-33934: Add SparkFile's root dir to env property PATH") { + assume(TestUtils.testCommandAvailable("python")) + val scriptFilePath = copyAndGetResourceFile("test_script.py", ".py").getAbsoluteFile + withTempView("v") { + val df = Seq( + (1, "1", 1.0, BigDecimal(1.0), new Timestamp(1)), + (2, "2", 2.0, BigDecimal(2.0), new Timestamp(2)), + (3, "3", 3.0, BigDecimal(3.0), new Timestamp(3)) + ).toDF("a", "b", "c", "d", "e") // Note column d's data type is Decimal(38, 18) + df.createTempView("v") + + // test 'python /path/to/script.py' with local file + checkAnswer( + sql( + s""" + |SELECT + |TRANSFORM(a, b, c, d, e) + | ROW FORMAT DELIMITED + | FIELDS TERMINATED BY '\t' + | USING 'python $scriptFilePath' AS (a, b, c, d, e) + | ROW FORMAT DELIMITED + | FIELDS TERMINATED BY '\t' + |FROM v + """.stripMargin), identity, df.select( + 'a.cast("string"), + 'b.cast("string"), + 'c.cast("string"), + 'd.cast("string"), + 'e.cast("string")).collect()) + + // test '/path/to/script.py' with script not executable + val e1 = intercept[TestFailedException] { + checkAnswer( + sql( + s""" + |SELECT + |TRANSFORM(a, b, c, d, e) + | ROW FORMAT DELIMITED + | FIELDS TERMINATED BY '\t' + | USING '$scriptFilePath' AS (a, b, c, d, e) + | ROW FORMAT DELIMITED + | FIELDS TERMINATED BY '\t' + |FROM v + """.stripMargin), identity, df.select( + 'a.cast("string"), + 'b.cast("string"), + 'c.cast("string"), + 'd.cast("string"), + 'e.cast("string")).collect()) + }.getMessage + // Check with status exit code since in GA test, it may lose detail failed root cause. + // Different root cause's exitcode is not same. + // In this test, root cause is `Permission denied` + assert(e1.contains("Subprocess exited with status 126")) + + // test `/path/to/script.py' with script executable + scriptFilePath.setExecutable(true) + checkAnswer( + sql( + s""" + |SELECT + |TRANSFORM(a, b, c, d, e) + | ROW FORMAT DELIMITED + | FIELDS TERMINATED BY '\t' + | USING '$scriptFilePath' AS (a, b, c, d, e) + | ROW FORMAT DELIMITED + | FIELDS TERMINATED BY '\t' + |FROM v + """.stripMargin), identity, df.select( + 'a.cast("string"), + 'b.cast("string"), + 'c.cast("string"), + 'd.cast("string"), + 'e.cast("string")).collect()) + + scriptFilePath.setExecutable(false) + sql(s"ADD FILE ${scriptFilePath.getAbsolutePath}") + + // test `script.py` when file added + checkAnswer( + sql( + s""" + |SELECT TRANSFORM(a, b, c, d, e) + | ROW FORMAT DELIMITED + | FIELDS TERMINATED BY '\t' + | USING '${scriptFilePath.getName}' AS (a, b, c, d, e) + | ROW FORMAT DELIMITED + | FIELDS TERMINATED BY '\t' + |FROM v + """.stripMargin), identity, df.select( + 'a.cast("string"), + 'b.cast("string"), + 'c.cast("string"), + 'd.cast("string"), + 'e.cast("string")).collect()) + + // test `python script.py` when file added + checkAnswer( + sql( + s""" + |SELECT TRANSFORM(a, b, c, d, e) + | ROW FORMAT DELIMITED + | FIELDS TERMINATED BY '\t' + | USING 'python ${scriptFilePath.getName}' AS (a, b, c, d, e) + | ROW FORMAT DELIMITED + | FIELDS TERMINATED BY '\t' + |FROM v + """.stripMargin), identity, df.select( + 'a.cast("string"), + 'b.cast("string"), + 'c.cast("string"), + 'd.cast("string"), + 'e.cast("string")).collect()) + } + } } case class ExceptionInjectingOperator(child: SparkPlan) extends UnaryExecNode { diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/ColumnarRulesSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/ColumnarRulesSuite.scala index d5d534eb5f878..dd2790040b9e8 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/ColumnarRulesSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/ColumnarRulesSuite.scala @@ -27,7 +27,7 @@ class ColumnarRulesSuite extends PlanTest with SharedSparkSession { test("Idempotency of columnar rules - RowToColumnar/ColumnarToRow") { val rules = ApplyColumnarRulesAndInsertTransitions( - spark.sessionState.conf, spark.sessionState.columnarRules) + spark.sessionState.columnarRules) val plan = UnaryOp(UnaryOp(LeafOp(false), true), false) val expected = @@ -40,7 +40,7 @@ class ColumnarRulesSuite extends PlanTest with SharedSparkSession { test("Idempotency of columnar rules - ColumnarToRow/RowToColumnar") { val rules = ApplyColumnarRulesAndInsertTransitions( - spark.sessionState.conf, spark.sessionState.columnarRules) + spark.sessionState.columnarRules) val plan = UnaryOp(UnaryOp(LeafOp(true), false), true) val expected = ColumnarToRowExec( diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/PlannerSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/PlannerSuite.scala index d428b7ebc0e91..8924d2ef6eab8 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/PlannerSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/PlannerSuite.scala @@ -22,7 +22,7 @@ import org.apache.spark.sql.{execution, DataFrame, Row} import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.catalyst.plans._ -import org.apache.spark.sql.catalyst.plans.logical.{LocalRelation, LogicalPlan, Range, Repartition, Sort, Union} +import org.apache.spark.sql.catalyst.plans.logical.{LocalRelation, LogicalPlan, Range, Repartition, Union} import org.apache.spark.sql.catalyst.plans.physical._ import org.apache.spark.sql.execution.adaptive.{AdaptiveSparkPlanHelper, DisableAdaptiveExecution} import org.apache.spark.sql.execution.aggregate.{HashAggregateExec, ObjectHashAggregateExec, SortAggregateExec} @@ -234,19 +234,6 @@ class PlannerSuite extends SharedSparkSession with AdaptiveSparkPlanHelper { } } - test("SPARK-23375: Cached sorted data doesn't need to be re-sorted") { - val query = testData.select('key, 'value).sort('key.desc).cache() - assert(query.queryExecution.optimizedPlan.isInstanceOf[InMemoryRelation]) - val resorted = query.sort('key.desc) - assert(resorted.queryExecution.optimizedPlan.collect { case s: Sort => s}.isEmpty) - assert(resorted.select('key).collect().map(_.getInt(0)).toSeq == - (1 to 100).reverse) - // with a different order, the sort is needed - val sortedAsc = query.sort('key) - assert(sortedAsc.queryExecution.optimizedPlan.collect { case s: Sort => s}.size == 1) - assert(sortedAsc.select('key).collect().map(_.getInt(0)).toSeq == (1 to 100)) - } - test("PartitioningCollection") { withTempView("normal", "small", "tiny") { testData.createOrReplaceTempView("normal") @@ -342,7 +329,7 @@ class PlannerSuite extends SharedSparkSession with AdaptiveSparkPlanHelper { requiredChildDistribution = Seq(distribution, distribution), requiredChildOrdering = Seq(Seq.empty, Seq.empty) ) - val outputPlan = EnsureRequirements(spark.sessionState.conf).apply(inputPlan) + val outputPlan = EnsureRequirements.apply(inputPlan) assertDistributionRequirementsAreSatisfied(outputPlan) } @@ -360,7 +347,7 @@ class PlannerSuite extends SharedSparkSession with AdaptiveSparkPlanHelper { requiredChildDistribution = Seq(distribution, distribution), requiredChildOrdering = Seq(Seq.empty, Seq.empty) ) - val outputPlan = EnsureRequirements(spark.sessionState.conf).apply(inputPlan) + val outputPlan = EnsureRequirements.apply(inputPlan) assertDistributionRequirementsAreSatisfied(outputPlan) if (outputPlan.collect { case e: ShuffleExchangeExec => true }.isEmpty) { fail(s"Exchange should have been added:\n$outputPlan") @@ -380,7 +367,7 @@ class PlannerSuite extends SharedSparkSession with AdaptiveSparkPlanHelper { requiredChildDistribution = Seq(distribution, distribution), requiredChildOrdering = Seq(Seq.empty, Seq.empty) ) - val outputPlan = EnsureRequirements(spark.sessionState.conf).apply(inputPlan) + val outputPlan = EnsureRequirements.apply(inputPlan) assertDistributionRequirementsAreSatisfied(outputPlan) if (outputPlan.collect { case e: ShuffleExchangeExec => true }.nonEmpty) { fail(s"Exchange should not have been added:\n$outputPlan") @@ -403,7 +390,7 @@ class PlannerSuite extends SharedSparkSession with AdaptiveSparkPlanHelper { requiredChildDistribution = Seq(distribution, distribution), requiredChildOrdering = Seq(outputOrdering, outputOrdering) ) - val outputPlan = EnsureRequirements(spark.sessionState.conf).apply(inputPlan) + val outputPlan = EnsureRequirements.apply(inputPlan) assertDistributionRequirementsAreSatisfied(outputPlan) if (outputPlan.collect { case e: ShuffleExchangeExec => true }.nonEmpty) { fail(s"No Exchanges should have been added:\n$outputPlan") @@ -418,7 +405,7 @@ class PlannerSuite extends SharedSparkSession with AdaptiveSparkPlanHelper { val inputPlan = ShuffleExchangeExec( partitioning, DummySparkPlan(outputPartitioning = partitioning)) - val outputPlan = EnsureRequirements(spark.sessionState.conf).apply(inputPlan) + val outputPlan = EnsureRequirements.apply(inputPlan) assertDistributionRequirementsAreSatisfied(outputPlan) if (outputPlan.collect { case e: ShuffleExchangeExec => true }.size == 2) { fail(s"Topmost Exchange should have been eliminated:\n$outputPlan") @@ -433,7 +420,7 @@ class PlannerSuite extends SharedSparkSession with AdaptiveSparkPlanHelper { val inputPlan = ShuffleExchangeExec( partitioning, DummySparkPlan(outputPartitioning = partitioning)) - val outputPlan = EnsureRequirements(spark.sessionState.conf).apply(inputPlan) + val outputPlan = EnsureRequirements.apply(inputPlan) assertDistributionRequirementsAreSatisfied(outputPlan) if (outputPlan.collect { case e: ShuffleExchangeExec => true }.size == 1) { fail(s"Topmost Exchange should not have been eliminated:\n$outputPlan") @@ -451,7 +438,7 @@ class PlannerSuite extends SharedSparkSession with AdaptiveSparkPlanHelper { requiredChildDistribution = Seq(distribution), requiredChildOrdering = Seq(Seq.empty)) - val outputPlan = EnsureRequirements(spark.sessionState.conf).apply(inputPlan) + val outputPlan = EnsureRequirements.apply(inputPlan) val shuffle = outputPlan.collect { case e: ShuffleExchangeExec => e } assert(shuffle.size === 1) assert(shuffle.head.outputPartitioning === finalPartitioning) @@ -476,7 +463,7 @@ class PlannerSuite extends SharedSparkSession with AdaptiveSparkPlanHelper { shuffle, shuffle) - val outputPlan = ReuseExchange(spark.sessionState.conf).apply(inputPlan) + val outputPlan = ReuseExchange.apply(inputPlan) if (outputPlan.collect { case e: ReusedExchangeExec => true }.size != 1) { fail(s"Should re-use the shuffle:\n$outputPlan") } @@ -493,7 +480,7 @@ class PlannerSuite extends SharedSparkSession with AdaptiveSparkPlanHelper { ShuffleExchangeExec(finalPartitioning, inputPlan), ShuffleExchangeExec(finalPartitioning, inputPlan)) - val outputPlan2 = ReuseExchange(spark.sessionState.conf).apply(inputPlan2) + val outputPlan2 = ReuseExchange.apply(inputPlan2) if (outputPlan2.collect { case e: ReusedExchangeExec => true }.size != 2) { fail(s"Should re-use the two shuffles:\n$outputPlan2") } @@ -530,7 +517,7 @@ class PlannerSuite extends SharedSparkSession with AdaptiveSparkPlanHelper { requiredChildOrdering = Seq(requiredOrdering), requiredChildDistribution = Seq(UnspecifiedDistribution) ) - val outputPlan = EnsureRequirements(spark.sessionState.conf).apply(inputPlan) + val outputPlan = EnsureRequirements.apply(inputPlan) assertDistributionRequirementsAreSatisfied(outputPlan) if (shouldHaveSort) { if (outputPlan.collect { case s: SortExec => true }.isEmpty) { @@ -691,7 +678,7 @@ class PlannerSuite extends SharedSparkSession with AdaptiveSparkPlanHelper { val smjExec = SortMergeJoinExec( exprA :: exprA :: Nil, exprB :: exprC :: Nil, Inner, None, plan1, plan2) - val outputPlan = EnsureRequirements(spark.sessionState.conf).apply(smjExec) + val outputPlan = EnsureRequirements.apply(smjExec) outputPlan match { case SortMergeJoinExec(leftKeys, rightKeys, _, _, _, _, _) => assert(leftKeys == Seq(exprA, exprA)) @@ -711,7 +698,7 @@ class PlannerSuite extends SharedSparkSession with AdaptiveSparkPlanHelper { condition = None, left = plan1, right = plan2) - val outputPlan = EnsureRequirements(spark.sessionState.conf).apply(smjExec) + val outputPlan = EnsureRequirements.apply(smjExec) outputPlan match { case SortMergeJoinExec(leftKeys, rightKeys, _, _, SortExec(_, _, @@ -902,12 +889,229 @@ class PlannerSuite extends SharedSparkSession with AdaptiveSparkPlanHelper { | (SELECT key AS k from df2) t2 |ON t1.k = t2.k """.stripMargin).queryExecution.executedPlan - val exchanges = planned.collect { case s: ShuffleExchangeExec => s } + val exchanges = collect(planned) { case s: ShuffleExchangeExec => s } assert(exchanges.size == 2) } } } + test("SPARK-33399: aliases should be handled properly in PartitioningCollection output" + + " partitioning") { + withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") { + withTempView("t1", "t2", "t3") { + spark.range(10).repartition($"id").createTempView("t1") + spark.range(20).repartition($"id").createTempView("t2") + spark.range(30).repartition($"id").createTempView("t3") + val planned = sql( + """ + |SELECT t3.id as t3id + |FROM ( + | SELECT t1.id as t1id, t2.id as t2id + | FROM t1, t2 + | WHERE t1.id = t2.id + |) t12, t3 + |WHERE t1id = t3.id + """.stripMargin).queryExecution.executedPlan + val exchanges = collect(planned) { case s: ShuffleExchangeExec => s } + assert(exchanges.size == 3) + + val projects = collect(planned) { case p: ProjectExec => p } + assert(projects.exists(_.outputPartitioning match { + case HashPartitioning(Seq(k1: AttributeReference), _) if k1.name == "t1id" => + true + case _ => + false + })) + } + } + } + + test("SPARK-33399: aliases should be handled properly in HashPartitioning") { + withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") { + withTempView("t1", "t2", "t3") { + spark.range(10).repartition($"id").createTempView("t1") + spark.range(20).repartition($"id").createTempView("t2") + spark.range(30).repartition($"id").createTempView("t3") + val planned = sql( + """ + |SELECT t1id, t3.id as t3id + |FROM ( + | SELECT t1.id as t1id + | FROM t1 LEFT SEMI JOIN t2 + | ON t1.id = t2.id + |) t12 INNER JOIN t3 + |WHERE t1id = t3.id + """.stripMargin).queryExecution.executedPlan + val exchanges = collect(planned) { case s: ShuffleExchangeExec => s } + assert(exchanges.size == 3) + + val projects = collect(planned) { case p: ProjectExec => p } + assert(projects.exists(_.outputPartitioning match { + case HashPartitioning(Seq(a: AttributeReference), _) => a.name == "t1id" + case _ => false + })) + } + } + } + + test("SPARK-33399: alias handling should happen properly for RangePartitioning") { + withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") { + val df = spark.range(1, 100) + .select(col("id").as("id1")).groupBy("id1").count() + // Plan for this will be Range -> ProjectWithAlias -> HashAggregate -> HashAggregate + // if Project normalizes alias in its Range outputPartitioning, then no Exchange should come + // in between HashAggregates + val planned = df.queryExecution.executedPlan + val exchanges = collect(planned) { case s: ShuffleExchangeExec => s } + assert(exchanges.isEmpty) + + val projects = collect(planned) { case p: ProjectExec => p } + assert(projects.exists(_.outputPartitioning match { + case RangePartitioning(Seq(SortOrder(ar: AttributeReference, _, _, _)), _) => + ar.name == "id1" + case _ => false + })) + } + } + + test("SPARK-33399: aliased should be handled properly " + + "for partitioning and sortorder involving complex expressions") { + withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") { + withTempView("t1", "t2", "t3") { + spark.range(10).select(col("id").as("id1")).createTempView("t1") + spark.range(20).select(col("id").as("id2")).createTempView("t2") + spark.range(30).select(col("id").as("id3")).createTempView("t3") + val planned = sql( + """ + |SELECT t3.id3 as t3id + |FROM ( + | SELECT t1.id1 as t1id, t2.id2 as t2id + | FROM t1, t2 + | WHERE t1.id1 * 10 = t2.id2 * 10 + |) t12, t3 + |WHERE t1id * 10 = t3.id3 * 10 + """.stripMargin).queryExecution.executedPlan + val sortNodes = collect(planned) { case s: SortExec => s } + assert(sortNodes.size == 3) + val exchangeNodes = collect(planned) { case e: ShuffleExchangeExec => e } + assert(exchangeNodes.size == 3) + + val projects = collect(planned) { case p: ProjectExec => p } + assert(projects.exists(_.outputPartitioning match { + case HashPartitioning(Seq(Multiply(ar1: AttributeReference, _, _)), _) => + ar1.name == "t1id" + case _ => + false + })) + } + } + } + + test("SPARK-33399: alias handling should happen properly for SinglePartition") { + withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") { + val df = spark.range(1, 100, 1, 1) + .select(col("id").as("id1")).groupBy("id1").count() + val planned = df.queryExecution.executedPlan + val exchanges = collect(planned) { case s: ShuffleExchangeExec => s } + assert(exchanges.isEmpty) + + val projects = collect(planned) { case p: ProjectExec => p } + assert(projects.exists(_.outputPartitioning match { + case SinglePartition => true + case _ => false + })) + } + } + + test("SPARK-33399: No extra exchanges in case of" + + " [Inner Join -> Project with aliases -> HashAggregate]") { + withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") { + withTempView("t1", "t2") { + spark.range(10).repartition($"id").createTempView("t1") + spark.range(20).repartition($"id").createTempView("t2") + val planned = sql( + """ + |SELECT t1id, t2id + |FROM ( + | SELECT t1.id as t1id, t2.id as t2id + | FROM t1 INNER JOIN t2 + | WHERE t1.id = t2.id + |) t12 + |GROUP BY t1id, t2id + """.stripMargin).queryExecution.executedPlan + val exchanges = collect(planned) { case s: ShuffleExchangeExec => s } + assert(exchanges.size == 2) + + val projects = collect(planned) { case p: ProjectExec => p } + assert(projects.exists(_.outputPartitioning match { + case PartitioningCollection(Seq(HashPartitioning(Seq(k1: AttributeReference), _), + HashPartitioning(Seq(k2: AttributeReference), _))) => + k1.name == "t1id" && k2.name == "t2id" + case _ => false + })) + } + } + } + + test("SPARK-33400: Normalization of sortOrder should take care of sameOrderExprs") { + withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") { + withTempView("t1", "t2", "t3") { + spark.range(10).repartition($"id").createTempView("t1") + spark.range(20).repartition($"id").createTempView("t2") + spark.range(30).repartition($"id").createTempView("t3") + val planned = sql( + """ + |SELECT t2id, t3.id as t3id + |FROM ( + | SELECT t1.id as t1id, t2.id as t2id + | FROM t1, t2 + | WHERE t1.id = t2.id + |) t12, t3 + |WHERE t2id = t3.id + """.stripMargin).queryExecution.executedPlan + + val sortNodes = collect(planned) { case s: SortExec => s } + assert(sortNodes.size == 3) + + val projects = collect(planned) { case p: ProjectExec => p } + assert(projects.exists(_.outputOrdering match { + case Seq(SortOrder(_, Ascending, NullsFirst, sameOrderExprs)) => + sameOrderExprs.size == 1 && sameOrderExprs.head.isInstanceOf[AttributeReference] && + sameOrderExprs.head.asInstanceOf[AttributeReference].name == "t2id" + case _ => false + })) + } + } + } + + test("sort order doesn't have repeated expressions") { + withSQLConf( + SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1", + SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "false") { + withTempView("t1", "t2") { + spark.range(10).repartition($"id").createTempView("t1") + spark.range(20).repartition($"id").createTempView("t2") + val planned = sql( + """ + | SELECT t12.id, t1.id + | FROM (SELECT t1.id FROM t1, t2 WHERE t1.id * 2 = t2.id) t12, t1 + | where 2 * t12.id = t1.id + """.stripMargin).queryExecution.executedPlan + + // t12 is already sorted on `t1.id * 2`. and we need to sort it on `2 * t12.id` + // for 2nd join. So sorting on t12 can be avoided + val sortNodes = planned.collect { case s: SortExec => s } + assert(sortNodes.size == 3) + val outputOrdering = planned.outputOrdering + assert(outputOrdering.size == 1) + // Sort order should have 3 childrens, not 4. This is because t1.id*2 and 2*t1.id are same + assert(outputOrdering.head.children.size == 3) + assert(outputOrdering.head.children.count(_.isInstanceOf[AttributeReference]) == 2) + assert(outputOrdering.head.children.count(_.isInstanceOf[Multiply]) == 1) + } + } + } + test("aliases to expressions should not be replaced") { withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") { withTempView("df1", "df2") { @@ -921,7 +1125,7 @@ class PlannerSuite extends SharedSparkSession with AdaptiveSparkPlanHelper { | (SELECT key + 1 AS k2 from df2) t2 |ON t1.k1 = t2.k2 |""".stripMargin).queryExecution.executedPlan - val exchanges = planned.collect { case s: ShuffleExchangeExec => s } + val exchanges = collect(planned) { case s: ShuffleExchangeExec => s } // Make sure aliases to an expression (key + 1) are not replaced. Seq("k1", "k2").foreach { alias => @@ -944,9 +1148,9 @@ class PlannerSuite extends SharedSparkSession with AdaptiveSparkPlanHelper { val planned = agg1.join(agg2, $"k1" === $"k3").queryExecution.executedPlan - assert(planned.collect { case h: HashAggregateExec => h }.nonEmpty) + assert(collect(planned) { case h: HashAggregateExec => h }.nonEmpty) - val exchanges = planned.collect { case s: ShuffleExchangeExec => s } + val exchanges = collect(planned) { case s: ShuffleExchangeExec => s } assert(exchanges.size == 2) } } @@ -964,12 +1168,12 @@ class PlannerSuite extends SharedSparkSession with AdaptiveSparkPlanHelper { val planned = agg1.join(agg2, $"k1" === $"k3").queryExecution.executedPlan if (useObjectHashAgg) { - assert(planned.collect { case o: ObjectHashAggregateExec => o }.nonEmpty) + assert(collect(planned) { case o: ObjectHashAggregateExec => o }.nonEmpty) } else { - assert(planned.collect { case s: SortAggregateExec => s }.nonEmpty) + assert(collect(planned) { case s: SortAggregateExec => s }.nonEmpty) } - val exchanges = planned.collect { case s: ShuffleExchangeExec => s } + val exchanges = collect(planned) { case s: ShuffleExchangeExec => s } assert(exchanges.size == 2) } } @@ -977,20 +1181,61 @@ class PlannerSuite extends SharedSparkSession with AdaptiveSparkPlanHelper { } test("aliases in the sort aggregate expressions should not introduce extra sort") { - withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") { - withSQLConf(SQLConf.USE_OBJECT_HASH_AGG.key -> "false") { - val t1 = spark.range(10).selectExpr("floor(id/4) as k1") - val t2 = spark.range(20).selectExpr("floor(id/4) as k2") + withSQLConf( + SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1", + SQLConf.USE_OBJECT_HASH_AGG.key -> "false") { + val t1 = spark.range(10).selectExpr("floor(id/4) as k1") + val t2 = spark.range(20).selectExpr("floor(id/4) as k2") + + val agg1 = t1.groupBy("k1").agg(collect_list("k1")).withColumnRenamed("k1", "k3") + val agg2 = t2.groupBy("k2").agg(collect_list("k2")) + + val planned = agg1.join(agg2, $"k3" === $"k2").queryExecution.executedPlan + assert(collect(planned) { case s: SortAggregateExec => s }.nonEmpty) + + // We expect two SortExec nodes on each side of join. + val sorts = collect(planned) { case s: SortExec => s } + assert(sorts.size == 4) + } + } - val agg1 = t1.groupBy("k1").agg(collect_list("k1")).withColumnRenamed("k1", "k3") - val agg2 = t2.groupBy("k2").agg(collect_list("k2")) + testWithWholeStageCodegenOnAndOff("Change the number of partitions to zero " + + "when a range is empty") { _ => + val range = spark.range(1, 1, 1, 1000) + val numPartitions = range.rdd.getNumPartitions + assert(numPartitions == 0) + } - val planned = agg1.join(agg2, $"k3" === $"k2").queryExecution.executedPlan - assert(planned.collect { case s: SortAggregateExec => s }.nonEmpty) + test("SPARK-33758: Prune unnecessary output partitioning") { + withSQLConf( + SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1", + SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "false") { + withTempView("t1", "t2") { + spark.range(10).repartition($"id").createTempView("t1") + spark.range(20).repartition($"id").createTempView("t2") + val planned = sql( + """ + | SELECT t1.id as t1id, t2.id as t2id + | FROM t1, t2 + | WHERE t1.id = t2.id + """.stripMargin).queryExecution.executedPlan + + assert(planned.outputPartitioning match { + case PartitioningCollection(Seq(HashPartitioning(Seq(k1: AttributeReference), _), + HashPartitioning(Seq(k2: AttributeReference), _))) => + k1.name == "t1id" && k2.name == "t2id" + }) - // We expect two SortExec nodes on each side of join. - val sorts = planned.collect { case s: SortExec => s } - assert(sorts.size == 4) + val planned2 = sql( + """ + | SELECT t1.id as t1id + | FROM t1, t2 + | WHERE t1.id = t2.id + """.stripMargin).queryExecution.executedPlan + assert(planned2.outputPartitioning match { + case HashPartitioning(Seq(k1: AttributeReference), _) if k1.name == "t1id" => + true + }) } } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/QueryExecutionSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/QueryExecutionSuite.scala index 83c80b4f3eb08..585ce4e40471d 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/QueryExecutionSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/QueryExecutionSuite.scala @@ -219,7 +219,7 @@ class QueryExecutionSuite extends SharedSparkSession { spark.range(1).groupBy("id").count().queryExecution.executedPlan } } - Seq("=== Applying Rule org.apache.spark.sql.execution.CollapseCodegenStages ===", + Seq("=== Applying Rule org.apache.spark.sql.execution", "=== Result of Batch Preparations ===").foreach { expectedMsg => assert(testAppender.loggingEvents.exists(_.getRenderedMessage.contains(expectedMsg))) } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/RemoveRedundantProjectsSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/RemoveRedundantProjectsSuite.scala index bc24436c5806a..040c5189abcb6 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/RemoveRedundantProjectsSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/RemoveRedundantProjectsSuite.scala @@ -18,17 +18,21 @@ package org.apache.spark.sql.execution import org.apache.spark.sql.{DataFrame, QueryTest, Row} +import org.apache.spark.sql.execution.adaptive.{AdaptiveSparkPlanHelper, DisableAdaptiveExecutionSuite, EnableAdaptiveExecutionSuite} import org.apache.spark.sql.internal.SQLConf -import org.apache.spark.sql.test.{SharedSparkSession, SQLTestUtils} +import org.apache.spark.sql.test.SharedSparkSession import org.apache.spark.sql.types.StructType import org.apache.spark.util.Utils -class RemoveRedundantProjectsSuite extends QueryTest with SharedSparkSession with SQLTestUtils { +abstract class RemoveRedundantProjectsSuiteBase + extends QueryTest + with SharedSparkSession + with AdaptiveSparkPlanHelper { private def assertProjectExecCount(df: DataFrame, expected: Int): Unit = { withClue(df.queryExecution) { val plan = df.queryExecution.executedPlan - val actual = plan.collectWithSubqueries { case p: ProjectExec => p }.size + val actual = collectWithSubqueries(plan) { case p: ProjectExec => p }.size assert(actual == expected) } } @@ -115,9 +119,41 @@ class RemoveRedundantProjectsSuite extends QueryTest with SharedSparkSession wit assertProjectExec(query, 1, 2) } - test("generate") { - val query = "select a, key, explode(d) from testView where a > 10" - assertProjectExec(query, 0, 1) + test("generate should require column ordering") { + withTempView("testData") { + spark.range(0, 10, 1) + .selectExpr("id as key", "id * 2 as a", "id * 3 as b") + .createOrReplaceTempView("testData") + + val data = sql("select key, a, b, count(*) from testData group by key, a, b limit 2") + val df = data.selectExpr("a", "b", "key", "explode(array(key, a, b)) as d").filter("d > 0") + df.collect() + val plan = df.queryExecution.executedPlan + val numProjects = collectWithSubqueries(plan) { case p: ProjectExec => p }.length + + // Create a new plan that reverse the GenerateExec output and add a new ProjectExec between + // GenerateExec and its child. This is to test if the ProjectExec is removed, the output of + // the query will be incorrect. + val newPlan = stripAQEPlan(plan) transform { + case g @ GenerateExec(_, requiredChildOutput, _, _, child) => + g.copy(requiredChildOutput = requiredChildOutput.reverse, + child = ProjectExec(requiredChildOutput.reverse, child)) + } + + // Re-apply remove redundant project rule. + val rule = RemoveRedundantProjects + val newExecutedPlan = rule.apply(newPlan) + // The manually added ProjectExec node shouldn't be removed. + assert(collectWithSubqueries(newExecutedPlan) { + case p: ProjectExec => p + }.size == numProjects + 1) + + // Check the original plan's output and the new plan's output are the same. + val expectedRows = plan.executeCollect() + val actualRows = newExecutedPlan.executeCollect() + assert(expectedRows.length == actualRows.length) + expectedRows.zip(actualRows).foreach { case (expected, actual) => assert(expected == actual) } + } } test("subquery") { @@ -130,4 +166,57 @@ class RemoveRedundantProjectsSuite extends QueryTest with SharedSparkSession wit assertProjectExec(query, 0, 1) } } + + test("SPARK-33697: UnionExec should require column ordering") { + withTable("t1", "t2") { + spark.range(-10, 20) + .selectExpr( + "id", + "date_add(date '1950-01-01', cast(id as int)) as datecol", + "cast(id as string) strcol") + .write.mode("overwrite").format("parquet").saveAsTable("t1") + spark.range(-10, 20) + .selectExpr( + "cast(id as string) strcol", + "id", + "date_add(date '1950-01-01', cast(id as int)) as datecol") + .write.mode("overwrite").format("parquet").saveAsTable("t2") + + val queryTemplate = + """ + |SELECT DISTINCT datecol, strcol FROM + |( + |(SELECT datecol, id, strcol from t1) + | %s + |(SELECT datecol, id, strcol from t2) + |) + |""".stripMargin + + Seq(("UNION", 2, 2), ("UNION ALL", 1, 2)).foreach { case (setOperation, enabled, disabled) => + val query = queryTemplate.format(setOperation) + assertProjectExec(query, enabled = enabled, disabled = disabled) + } + } + } + + test("SPARK-33697: remove redundant projects under expand") { + val query = + """ + |SELECT t1.key, t2.key, sum(t1.a) AS s1, sum(t2.b) AS s2 FROM + |(SELECT a, key FROM testView) t1 + |JOIN + |(SELECT b, key FROM testView) t2 + |ON t1.key = t2.key + |GROUP BY t1.key, t2.key GROUPING SETS(t1.key, t2.key) + |ORDER BY t1.key, t2.key, s1, s2 + |LIMIT 10 + |""".stripMargin + assertProjectExec(query, 0, 3) + } } + +class RemoveRedundantProjectsSuite extends RemoveRedundantProjectsSuiteBase + with DisableAdaptiveExecutionSuite + +class RemoveRedundantProjectsSuiteAE extends RemoveRedundantProjectsSuiteBase + with EnableAdaptiveExecutionSuite diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/RemoveRedundantSortsSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/RemoveRedundantSortsSuite.scala new file mode 100644 index 0000000000000..751078d08fda9 --- /dev/null +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/RemoveRedundantSortsSuite.scala @@ -0,0 +1,172 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.execution + +import org.apache.spark.sql.{DataFrame, QueryTest} +import org.apache.spark.sql.catalyst.plans.physical.{RangePartitioning, UnknownPartitioning} +import org.apache.spark.sql.execution.adaptive.{AdaptiveSparkPlanHelper, DisableAdaptiveExecutionSuite, EnableAdaptiveExecutionSuite} +import org.apache.spark.sql.execution.joins.ShuffledJoin +import org.apache.spark.sql.internal.SQLConf +import org.apache.spark.sql.test.SharedSparkSession + + +abstract class RemoveRedundantSortsSuiteBase + extends QueryTest + with SharedSparkSession + with AdaptiveSparkPlanHelper { + import testImplicits._ + + private def checkNumSorts(df: DataFrame, count: Int): Unit = { + val plan = df.queryExecution.executedPlan + assert(collectWithSubqueries(plan) { case s: SortExec => s }.length == count) + } + + private def checkSorts(query: String, enabledCount: Int, disabledCount: Int): Unit = { + withSQLConf(SQLConf.REMOVE_REDUNDANT_SORTS_ENABLED.key -> "true") { + val df = sql(query) + checkNumSorts(df, enabledCount) + val result = df.collect() + withSQLConf(SQLConf.REMOVE_REDUNDANT_SORTS_ENABLED.key -> "false") { + val df = sql(query) + checkNumSorts(df, disabledCount) + checkAnswer(df, result) + } + } + } + + test("remove redundant sorts with limit") { + withTempView("t") { + spark.range(100).select('id as "key").createOrReplaceTempView("t") + val query = + """ + |SELECT key FROM + | (SELECT key FROM t WHERE key > 10 ORDER BY key DESC LIMIT 10) + |ORDER BY key DESC + |""".stripMargin + checkSorts(query, 0, 1) + } + } + + test("remove redundant sorts with broadcast hash join") { + withTempView("t1", "t2") { + spark.range(1000).select('id as "key").createOrReplaceTempView("t1") + spark.range(1000).select('id as "key").createOrReplaceTempView("t2") + + val queryTemplate = """ + |SELECT /*+ BROADCAST(%s) */ t1.key FROM + | (SELECT key FROM t1 WHERE key > 10 ORDER BY key DESC LIMIT 10) t1 + |JOIN + | (SELECT key FROM t2 WHERE key > 50 ORDER BY key DESC LIMIT 100) t2 + |ON t1.key = t2.key + |ORDER BY %s + """.stripMargin + + // No sort should be removed since the stream side (t2) order DESC + // does not satisfy the required sort order ASC. + val buildLeftOrderByRightAsc = queryTemplate.format("t1", "t2.key ASC") + checkSorts(buildLeftOrderByRightAsc, 1, 1) + + // The top sort node should be removed since the stream side (t2) order DESC already + // satisfies the required sort order DESC. + val buildLeftOrderByRightDesc = queryTemplate.format("t1", "t2.key DESC") + checkSorts(buildLeftOrderByRightDesc, 0, 1) + + // No sort should be removed since the sort ordering from broadcast-hash join is based + // on the stream side (t2) and the required sort order is from t1. + val buildLeftOrderByLeftDesc = queryTemplate.format("t1", "t1.key DESC") + checkSorts(buildLeftOrderByLeftDesc, 1, 1) + + // The top sort node should be removed since the stream side (t1) order DESC already + // satisfies the required sort order DESC. + val buildRightOrderByLeftDesc = queryTemplate.format("t2", "t1.key DESC") + checkSorts(buildRightOrderByLeftDesc, 0, 1) + } + } + + test("remove redundant sorts with sort merge join") { + withTempView("t1", "t2") { + spark.range(1000).select('id as "key").createOrReplaceTempView("t1") + spark.range(1000).select('id as "key").createOrReplaceTempView("t2") + val query = """ + |SELECT /*+ MERGE(t1) */ t1.key FROM + | (SELECT key FROM t1 WHERE key > 10 ORDER BY key DESC LIMIT 10) t1 + |JOIN + | (SELECT key FROM t2 WHERE key > 50 ORDER BY key DESC LIMIT 100) t2 + |ON t1.key = t2.key + |ORDER BY t1.key + """.stripMargin + + val queryAsc = query + " ASC" + checkSorts(queryAsc, 2, 3) + + // The top level sort should not be removed since the child output ordering is ASC and + // the required ordering is DESC. + val queryDesc = query + " DESC" + checkSorts(queryDesc, 3, 3) + } + } + + test("cached sorted data doesn't need to be re-sorted") { + withSQLConf(SQLConf.REMOVE_REDUNDANT_SORTS_ENABLED.key -> "true") { + val df = spark.range(1000).select('id as "key").sort('key.desc).cache() + val resorted = df.sort('key.desc) + val sortedAsc = df.sort('key.asc) + checkNumSorts(df, 0) + checkNumSorts(resorted, 0) + checkNumSorts(sortedAsc, 1) + val result = resorted.collect() + withSQLConf(SQLConf.REMOVE_REDUNDANT_SORTS_ENABLED.key -> "false") { + val resorted = df.sort('key.desc) + checkNumSorts(resorted, 1) + checkAnswer(resorted, result) + } + } + } + + test("SPARK-33472: shuffled join with different left and right side partition numbers") { + withTempView("t1", "t2") { + spark.range(0, 100, 1, 2).select('id as "key").createOrReplaceTempView("t1") + (0 to 100).toDF("key").createOrReplaceTempView("t2") + + val queryTemplate = """ + |SELECT /*+ %s(t1) */ t1.key + |FROM t1 JOIN t2 ON t1.key = t2.key + |WHERE t1.key > 10 AND t2.key < 50 + |ORDER BY t1.key ASC + """.stripMargin + + Seq(("MERGE", 3), ("SHUFFLE_HASH", 1)).foreach { case (hint, count) => + val query = queryTemplate.format(hint) + val df = sql(query) + val sparkPlan = df.queryExecution.sparkPlan + val join = sparkPlan.collect { case j: ShuffledJoin => j }.head + val leftPartitioning = join.left.outputPartitioning + assert(leftPartitioning.isInstanceOf[RangePartitioning]) + assert(leftPartitioning.numPartitions == 2) + assert(join.right.outputPartitioning == UnknownPartitioning(0)) + checkSorts(query, count, count) + } + } + } +} + +class RemoveRedundantSortsSuite extends RemoveRedundantSortsSuiteBase + with DisableAdaptiveExecutionSuite + +class RemoveRedundantSortsSuiteAE extends RemoveRedundantSortsSuiteBase + with EnableAdaptiveExecutionSuite diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/SQLViewSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/SQLViewSuite.scala index f3cae24527d60..9b84e0fe4bcb7 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/SQLViewSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/SQLViewSuite.scala @@ -17,11 +17,12 @@ package org.apache.spark.sql.execution +import org.apache.spark.SparkException import org.apache.spark.sql._ import org.apache.spark.sql.catalyst.TableIdentifier import org.apache.spark.sql.catalyst.analysis.NoSuchTableException import org.apache.spark.sql.catalyst.parser.ParseException -import org.apache.spark.sql.internal.SQLConf.MAX_NESTED_VIEW_DEPTH +import org.apache.spark.sql.internal.SQLConf._ import org.apache.spark.sql.test.{SharedSparkSession, SQLTestUtils} class SimpleSQLViewSuite extends SQLViewSuite with SharedSparkSession @@ -110,7 +111,7 @@ abstract class SQLViewSuite extends QueryTest with SQLTestUtils { e = intercept[AnalysisException] { sql("ALTER VIEW tab1 AS SELECT * FROM jt") }.getMessage - assert(e.contains("`tab1` is not a view")) + assert(e.contains("tab1 is a table. 'ALTER VIEW ... AS' expects a view.")) } } @@ -126,8 +127,12 @@ abstract class SQLViewSuite extends QueryTest with SQLTestUtils { val viewName = "testView" withTempView(viewName) { spark.range(10).createTempView(viewName) - assertNoSuchTable(s"ALTER VIEW $viewName SET TBLPROPERTIES ('p' = 'an')") - assertNoSuchTable(s"ALTER VIEW $viewName UNSET TBLPROPERTIES ('p')") + assertAnalysisError( + s"ALTER VIEW $viewName SET TBLPROPERTIES ('p' = 'an')", + "testView is a temp view. 'ALTER VIEW ... SET TBLPROPERTIES' expects a permanent view.") + assertAnalysisError( + s"ALTER VIEW $viewName UNSET TBLPROPERTIES ('p')", + "testView is a temp view. 'ALTER VIEW ... UNSET TBLPROPERTIES' expects a permanent view.") } } @@ -135,18 +140,32 @@ abstract class SQLViewSuite extends QueryTest with SQLTestUtils { val viewName = "testView" withTempView(viewName) { spark.range(10).createTempView(viewName) - assertNoSuchTable(s"ALTER TABLE $viewName SET SERDE 'whatever'") - assertNoSuchTable(s"ALTER TABLE $viewName PARTITION (a=1, b=2) SET SERDE 'whatever'") - assertNoSuchTable(s"ALTER TABLE $viewName SET SERDEPROPERTIES ('p' = 'an')") - assertNoSuchTable(s"ALTER TABLE $viewName ADD IF NOT EXISTS PARTITION (a='4', b='8')") - assertNoSuchTable(s"ALTER TABLE $viewName DROP PARTITION (a='4', b='8')") - assertNoSuchTable(s"ALTER TABLE $viewName PARTITION (a='4') RENAME TO PARTITION (a='5')") - assertNoSuchTable(s"ALTER TABLE $viewName RECOVER PARTITIONS") + assertAnalysisError( + s"ALTER TABLE $viewName SET SERDE 'whatever'", + s"$viewName is a temp view. 'ALTER TABLE ... SET [SERDE|SERDEPROPERTIES]' expects a table") + assertAnalysisError( + s"ALTER TABLE $viewName PARTITION (a=1, b=2) SET SERDE 'whatever'", + s"$viewName is a temp view. 'ALTER TABLE ... SET [SERDE|SERDEPROPERTIES]' expects a table") + assertAnalysisError( + s"ALTER TABLE $viewName SET SERDEPROPERTIES ('p' = 'an')", + s"$viewName is a temp view. 'ALTER TABLE ... SET [SERDE|SERDEPROPERTIES]' expects a table") + assertAnalysisError( + s"ALTER TABLE $viewName PARTITION (a='4') RENAME TO PARTITION (a='5')", + s"$viewName is a temp view. 'ALTER TABLE ... RENAME TO PARTITION' expects a table") + assertAnalysisError( + s"ALTER TABLE $viewName RECOVER PARTITIONS", + s"$viewName is a temp view. 'ALTER TABLE ... RECOVER PARTITIONS' expects a table") // For v2 ALTER TABLE statements, we have better error message saying view is not supported. assertAnalysisError( s"ALTER TABLE $viewName SET LOCATION '/path/to/your/lovely/heart'", s"'$viewName' is a view not a table") + assertAnalysisError( + s"ALTER TABLE $viewName ADD IF NOT EXISTS PARTITION (a='4', b='8')", + s"$viewName is a temp view. 'ALTER TABLE ... ADD PARTITION ...' expects a table") + assertAnalysisError( + s"ALTER TABLE $viewName DROP PARTITION (a='4', b='8')", + s"$viewName is a temp view. 'ALTER TABLE ... DROP PARTITION ...' expects a table") // For the following v2 ALERT TABLE statements, unsupported operations are checked first // before resolving the relations. @@ -168,15 +187,28 @@ abstract class SQLViewSuite extends QueryTest with SQLTestUtils { val dataFilePath = Thread.currentThread().getContextClassLoader.getResource("data/files/employee.dat") - assertNoSuchTable(s"""LOAD DATA LOCAL INPATH "$dataFilePath" INTO TABLE $viewName""") - assertNoSuchTable(s"TRUNCATE TABLE $viewName") val e2 = intercept[AnalysisException] { + sql(s"""LOAD DATA LOCAL INPATH "$dataFilePath" INTO TABLE $viewName""") + }.getMessage + assert(e2.contains(s"$viewName is a temp view. 'LOAD DATA' expects a table")) + val e3 = intercept[AnalysisException] { + sql(s"TRUNCATE TABLE $viewName") + }.getMessage + assert(e3.contains(s"$viewName is a temp view. 'TRUNCATE TABLE' expects a table")) + val e4 = intercept[AnalysisException] { sql(s"SHOW CREATE TABLE $viewName") }.getMessage - assert(e2.contains("SHOW CREATE TABLE is not supported on a temporary view")) - assertNoSuchTable(s"SHOW PARTITIONS $viewName") - assertNoSuchTable(s"ANALYZE TABLE $viewName COMPUTE STATISTICS") - assertNoSuchTable(s"ANALYZE TABLE $viewName COMPUTE STATISTICS FOR COLUMNS id") + assert(e4.contains( + s"$viewName is a temp view. 'SHOW CREATE TABLE' expects a table or permanent view.")) + val e5 = intercept[AnalysisException] { + sql(s"ANALYZE TABLE $viewName COMPUTE STATISTICS") + }.getMessage + assert(e5.contains( + s"$viewName is a temp view. 'ANALYZE TABLE' expects a table or permanent view.")) + val e6 = intercept[AnalysisException] { + sql(s"ANALYZE TABLE $viewName COMPUTE STATISTICS FOR COLUMNS id") + }.getMessage + assert(e6.contains(s"Temporary view `$viewName` is not cached for analyzing columns.")) } } @@ -205,12 +237,12 @@ abstract class SQLViewSuite extends QueryTest with SQLTestUtils { e = intercept[AnalysisException] { sql(s"""LOAD DATA LOCAL INPATH "$dataFilePath" INTO TABLE $viewName""") }.getMessage - assert(e.contains(s"Target table in LOAD DATA cannot be a view: `default`.`testview`")) + assert(e.contains("default.testView is a view. 'LOAD DATA' expects a table")) e = intercept[AnalysisException] { sql(s"TRUNCATE TABLE $viewName") }.getMessage - assert(e.contains(s"Operation not allowed: TRUNCATE TABLE on views: `default`.`testview`")) + assert(e.contains("default.testView is a view. 'TRUNCATE TABLE' expects a table")) } } @@ -426,8 +458,13 @@ abstract class SQLViewSuite extends QueryTest with SQLTestUtils { } test("should not allow ALTER VIEW AS when the view does not exist") { - assertNoSuchTable("ALTER VIEW testView AS SELECT 1, 2") - assertNoSuchTable("ALTER VIEW default.testView AS SELECT 1, 2") + assertAnalysisError( + "ALTER VIEW testView AS SELECT 1, 2", + "View not found: testView") + + assertAnalysisError( + "ALTER VIEW default.testView AS SELECT 1, 2", + "View not found: default.testView") } test("ALTER VIEW AS should try to alter temp view first if view name has no database part") { @@ -686,31 +723,6 @@ abstract class SQLViewSuite extends QueryTest with SQLTestUtils { } } - test("restrict the nested level of a view") { - val viewNames = Array.range(0, 11).map(idx => s"view$idx") - withView(viewNames: _*) { - sql("CREATE VIEW view0 AS SELECT * FROM jt") - Array.range(0, 10).foreach { idx => - sql(s"CREATE VIEW view${idx + 1} AS SELECT * FROM view$idx") - } - - withSQLConf(MAX_NESTED_VIEW_DEPTH.key -> "10") { - val e = intercept[AnalysisException] { - sql("SELECT * FROM view10") - }.getMessage - assert(e.contains("The depth of view `default`.`view0` exceeds the maximum view " + - "resolution depth (10). Analysis is aborted to avoid errors. Increase the value " + - s"of ${MAX_NESTED_VIEW_DEPTH.key} to work around this.")) - } - - val e = intercept[IllegalArgumentException] { - withSQLConf(MAX_NESTED_VIEW_DEPTH.key -> "0") {} - }.getMessage - assert(e.contains("The maximum depth of a view reference in a nested view must be " + - "positive.")) - } - } - test("permanent view should be case-preserving") { withView("v") { sql("CREATE VIEW v AS SELECT 1 as aBc") @@ -745,4 +757,146 @@ abstract class SQLViewSuite extends QueryTest with SQLTestUtils { } } } + + test("temporary view should ignore useCurrentSQLConfigsForView config") { + withTable("t") { + Seq(2, 3, 1).toDF("c1").write.format("parquet").saveAsTable("t") + withTempView("v1") { + sql("CREATE TEMPORARY VIEW v1 AS SELECT 1/0") + withSQLConf( + USE_CURRENT_SQL_CONFIGS_FOR_VIEW.key -> "true", + ANSI_ENABLED.key -> "true") { + checkAnswer(sql("SELECT * FROM v1"), Seq(Row(null))) + } + } + } + } + + test("alter temporary view should follow current storeAnalyzedPlanForView config") { + withTable("t") { + Seq(2, 3, 1).toDF("c1").write.format("parquet").saveAsTable("t") + withView("v1") { + withSQLConf(STORE_ANALYZED_PLAN_FOR_VIEW.key -> "true") { + sql("CREATE TEMPORARY VIEW v1 AS SELECT * FROM t") + Seq(4, 6, 5).toDF("c1").write.mode("overwrite").format("parquet").saveAsTable("t") + val e = intercept[SparkException] { + sql("SELECT * FROM v1").collect() + }.getMessage + assert(e.contains("does not exist")) + } + + withSQLConf(STORE_ANALYZED_PLAN_FOR_VIEW.key -> "false") { + // alter view from legacy to non-legacy config + sql("ALTER VIEW v1 AS SELECT * FROM t") + Seq(1, 3, 5).toDF("c1").write.mode("overwrite").format("parquet").saveAsTable("t") + checkAnswer(sql("SELECT * FROM v1"), Seq(Row(1), Row(3), Row(5))) + } + + withSQLConf(STORE_ANALYZED_PLAN_FOR_VIEW.key -> "true") { + // alter view from non-legacy to legacy config + sql("ALTER VIEW v1 AS SELECT * FROM t") + Seq(2, 4, 6).toDF("c1").write.mode("overwrite").format("parquet").saveAsTable("t") + val e = intercept[SparkException] { + sql("SELECT * FROM v1").collect() + }.getMessage + assert(e.contains("does not exist")) + } + } + } + } + + test("local temp view refers global temp view") { + withGlobalTempView("v1") { + withTempView("v2") { + val globalTempDB = spark.sharedState.globalTempViewManager.database + sql("CREATE GLOBAL TEMPORARY VIEW v1 AS SELECT 1") + sql(s"CREATE TEMPORARY VIEW v2 AS SELECT * FROM ${globalTempDB}.v1") + checkAnswer(sql("SELECT * FROM v2"), Seq(Row(1))) + } + } + } + + test("global temp view refers local temp view") { + withTempView("v1") { + withGlobalTempView("v2") { + val globalTempDB = spark.sharedState.globalTempViewManager.database + sql("CREATE TEMPORARY VIEW v1 AS SELECT 1") + sql(s"CREATE GLOBAL TEMPORARY VIEW v2 AS SELECT * FROM v1") + checkAnswer(sql(s"SELECT * FROM ${globalTempDB}.v2"), Seq(Row(1))) + } + } + } + + test("SPARK-33141: view should be parsed and analyzed with configs set when creating") { + withTable("t") { + withView("v1", "v2", "v3", "v4", "v5") { + Seq(2, 3, 1).toDF("c1").write.format("parquet").saveAsTable("t") + sql("CREATE VIEW v1 (c1) AS SELECT C1 FROM t") + sql("CREATE VIEW v2 (c1) AS SELECT c1 FROM t ORDER BY 1 ASC, c1 DESC") + sql("CREATE VIEW v3 (c1, count) AS SELECT c1, count(c1) FROM t GROUP BY 1") + sql("CREATE VIEW v4 (a, count) AS SELECT c1 as a, count(c1) FROM t GROUP BY a") + sql("CREATE VIEW v5 (c1) AS SELECT 1/0") + + withSQLConf(CASE_SENSITIVE.key -> "true") { + checkAnswer(sql("SELECT * FROM v1"), Seq(Row(2), Row(3), Row(1))) + } + withSQLConf(ORDER_BY_ORDINAL.key -> "false") { + checkAnswer(sql("SELECT * FROM v2"), Seq(Row(1), Row(2), Row(3))) + } + withSQLConf(GROUP_BY_ORDINAL.key -> "false") { + checkAnswer(sql("SELECT * FROM v3"), + Seq(Row(1, 1), Row(2, 1), Row(3, 1))) + } + withSQLConf(GROUP_BY_ALIASES.key -> "false") { + checkAnswer(sql("SELECT * FROM v4"), + Seq(Row(1, 1), Row(2, 1), Row(3, 1))) + } + withSQLConf(ANSI_ENABLED.key -> "true") { + checkAnswer(sql("SELECT * FROM v5"), Seq(Row(null))) + } + + withSQLConf(USE_CURRENT_SQL_CONFIGS_FOR_VIEW.key -> "true") { + withSQLConf(CASE_SENSITIVE.key -> "true") { + val e = intercept[AnalysisException] { + sql("SELECT * FROM v1") + }.getMessage + assert(e.contains("cannot resolve '`C1`' given input columns: " + + "[spark_catalog.default.t.c1]")) + } + withSQLConf(ORDER_BY_ORDINAL.key -> "false") { + checkAnswer(sql("SELECT * FROM v2"), Seq(Row(3), Row(2), Row(1))) + } + withSQLConf(GROUP_BY_ORDINAL.key -> "false") { + val e = intercept[AnalysisException] { + sql("SELECT * FROM v3") + }.getMessage + assert(e.contains("expression 'spark_catalog.default.t.`c1`' is neither present " + + "in the group by, nor is it an aggregate function. Add to group by or wrap in " + + "first() (or first_value) if you don't care which value you get.")) + } + withSQLConf(GROUP_BY_ALIASES.key -> "false") { + val e = intercept[AnalysisException] { + sql("SELECT * FROM v4") + }.getMessage + assert(e.contains("cannot resolve '`a`' given input columns: " + + "[spark_catalog.default.t.c1]")) + } + withSQLConf(ANSI_ENABLED.key -> "true") { + val e = intercept[ArithmeticException] { + sql("SELECT * FROM v5").collect() + }.getMessage + assert(e.contains("divide by zero")) + } + } + + withSQLConf(ANSI_ENABLED.key -> "true") { + sql("ALTER VIEW v1 AS SELECT 1/0") + } + val e = intercept[ArithmeticException] { + sql("SELECT * FROM v1").collect() + }.getMessage + assert(e.contains("divide by zero")) + } + } + } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/SQLViewTestSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/SQLViewTestSuite.scala new file mode 100644 index 0000000000000..8c3d92358a975 --- /dev/null +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/SQLViewTestSuite.scala @@ -0,0 +1,270 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.execution + +import org.apache.spark.sql.{AnalysisException, QueryTest, Row} +import org.apache.spark.sql.internal.SQLConf._ +import org.apache.spark.sql.test.{SharedSparkSession, SQLTestUtils} + +/** + * A base suite contains a set of view related test cases for different kind of views + * Currently, the test cases in this suite should have same behavior across all kind of views + * TODO: Combine this with [[SQLViewSuite]] + */ +abstract class SQLViewTestSuite extends QueryTest with SQLTestUtils { + import testImplicits._ + + protected def viewTypeString: String + protected def formattedViewName(viewName: String): String + + def createView( + viewName: String, + sqlText: String, + columnNames: Seq[String] = Seq.empty, + replace: Boolean = false): String = { + val replaceString = if (replace) "OR REPLACE" else "" + val columnString = if (columnNames.nonEmpty) columnNames.mkString("(", ",", ")") else "" + sql(s"CREATE $replaceString $viewTypeString $viewName $columnString AS $sqlText") + formattedViewName(viewName) + } + + def checkViewOutput(viewName: String, expectedAnswer: Seq[Row]): Unit = { + checkAnswer(sql(s"SELECT * FROM $viewName"), expectedAnswer) + } + + test("change SQLConf should not change view behavior - caseSensitiveAnalysis") { + withTable("t") { + Seq(2, 3, 1).toDF("c1").write.format("parquet").saveAsTable("t") + val viewName = createView("v1", "SELECT c1 FROM t", Seq("C1")) + withView(viewName) { + Seq("true", "false").foreach { flag => + withSQLConf(CASE_SENSITIVE.key -> flag) { + checkViewOutput(viewName, Seq(Row(2), Row(3), Row(1))) + } + } + } + } + } + + test("change SQLConf should not change view behavior - orderByOrdinal") { + withTable("t") { + Seq(2, 3, 1).toDF("c1").write.format("parquet").saveAsTable("t") + val viewName = createView("v1", "SELECT c1 FROM t ORDER BY 1 ASC, c1 DESC", Seq("c1")) + withView(viewName) { + Seq("true", "false").foreach { flag => + withSQLConf(ORDER_BY_ORDINAL.key -> flag) { + checkViewOutput(viewName, Seq(Row(1), Row(2), Row(3))) + } + } + } + } + } + + test("change SQLConf should not change view behavior - groupByOrdinal") { + withTable("t") { + Seq(2, 3, 1).toDF("c1").write.format("parquet").saveAsTable("t") + val viewName = createView("v1", "SELECT c1, count(c1) FROM t GROUP BY 1", Seq("c1", "count")) + withView(viewName) { + Seq("true", "false").foreach { flag => + withSQLConf(GROUP_BY_ORDINAL.key -> flag) { + checkViewOutput(viewName, Seq(Row(1, 1), Row(2, 1), Row(3, 1))) + } + } + } + } + } + + test("change SQLConf should not change view behavior - groupByAliases") { + withTable("t") { + Seq(2, 3, 1).toDF("c1").write.format("parquet").saveAsTable("t") + val viewName = createView( + "v1", "SELECT c1 as a, count(c1) FROM t GROUP BY a", Seq("a", "count")) + withView(viewName) { + Seq("true", "false").foreach { flag => + withSQLConf(GROUP_BY_ALIASES.key -> flag) { + checkViewOutput(viewName, Seq(Row(1, 1), Row(2, 1), Row(3, 1))) + } + } + } + } + } + + test("change SQLConf should not change view behavior - ansiEnabled") { + withTable("t") { + Seq(2, 3, 1).toDF("c1").write.format("parquet").saveAsTable("t") + val viewName = createView("v1", "SELECT 1/0", Seq("c1")) + withView(viewName) { + Seq("true", "false").foreach { flag => + withSQLConf(ANSI_ENABLED.key -> flag) { + checkViewOutput(viewName, Seq(Row(null))) + } + } + } + } + } + + test("change current database should not change view behavior") { + withTable("t") { + Seq(2, 3, 1).toDF("c1").write.format("parquet").saveAsTable("t") + val viewName = createView("v1", "SELECT * FROM t") + withView(viewName) { + withTempDatabase { db => + sql(s"USE $db") + Seq(4, 5, 6).toDF("c1").write.format("parquet").saveAsTable("t") + checkViewOutput(viewName, Seq(Row(2), Row(3), Row(1))) + } + } + } + } + + test("view should read the new data if table is updated") { + withTable("t") { + Seq(2, 3, 1).toDF("c1").write.format("parquet").saveAsTable("t") + val viewName = createView("v1", "SELECT c1 FROM t", Seq("c1")) + withView(viewName) { + Seq(9, 7, 8).toDF("c1").write.mode("overwrite").format("parquet").saveAsTable("t") + checkViewOutput(viewName, Seq(Row(9), Row(7), Row(8))) + } + } + } + + test("add column for table should not affect view output") { + withTable("t") { + Seq(2, 3, 1).toDF("c1").write.format("parquet").saveAsTable("t") + val viewName = createView("v1", "SELECT * FROM t") + withView(viewName) { + sql("ALTER TABLE t ADD COLUMN (c2 INT)") + checkViewOutput(viewName, Seq(Row(2), Row(3), Row(1))) + } + } + } + + test("check cyclic view reference on CREATE OR REPLACE VIEW") { + withTable("t") { + Seq(2, 3, 1).toDF("c1").write.format("parquet").saveAsTable("t") + val viewName1 = createView("v1", "SELECT * FROM t") + val viewName2 = createView("v2", s"SELECT * FROM $viewName1") + withView(viewName2, viewName1) { + val e = intercept[AnalysisException] { + createView("v1", s"SELECT * FROM $viewName2", replace = true) + }.getMessage + assert(e.contains("Recursive view")) + } + } + } + + test("check cyclic view reference on ALTER VIEW") { + withTable("t") { + Seq(2, 3, 1).toDF("c1").write.format("parquet").saveAsTable("t") + val viewName1 = createView("v1", "SELECT * FROM t") + val viewName2 = createView("v2", s"SELECT * FROM $viewName1") + withView(viewName2, viewName1) { + val e = intercept[AnalysisException] { + sql(s"ALTER VIEW $viewName1 AS SELECT * FROM $viewName2") + }.getMessage + assert(e.contains("Recursive view")) + } + } + } + + test("restrict the nested level of a view") { + val viewNames = scala.collection.mutable.ArrayBuffer.empty[String] + val view0 = createView("view0", "SELECT 1") + viewNames += view0 + for (i <- 1 to 10) { + viewNames += createView(s"view$i", s"SELECT * FROM ${viewNames.last}") + } + withView(viewNames.reverse.toSeq: _*) { + withSQLConf(MAX_NESTED_VIEW_DEPTH.key -> "10") { + val e = intercept[AnalysisException] { + sql(s"SELECT * FROM ${viewNames.last}") + }.getMessage + assert(e.contains("exceeds the maximum view resolution depth (10)")) + assert(e.contains(s"Increase the value of ${MAX_NESTED_VIEW_DEPTH.key}")) + } + } + } + + test("view should use captured catalog and namespace to resolve relation") { + withTempDatabase { dbName => + withTable("default.t", s"$dbName.t") { + withTempView("t") { + // create a table in default database + sql("USE DEFAULT") + Seq(2, 3, 1).toDF("c1").write.format("parquet").saveAsTable("t") + // create a view refer the created table in default database + val viewName = createView("v1", "SELECT * FROM t") + // using another database to create a table with same name + sql(s"USE $dbName") + Seq(4, 5, 6).toDF("c1").write.format("parquet").saveAsTable("t") + // create a temporary view with the same name + sql("CREATE TEMPORARY VIEW t AS SELECT 1") + withView(viewName) { + // view v1 should still refer the table defined in `default` database + checkViewOutput(viewName, Seq(Row(2), Row(3), Row(1))) + } + } + } + } + } + + test("SPARK-33692: view should use captured catalog and namespace to lookup function") { + val avgFuncClass = "test.org.apache.spark.sql.MyDoubleAvg" + val sumFuncClass = "test.org.apache.spark.sql.MyDoubleSum" + val functionName = "test_udf" + withTempDatabase { dbName => + withUserDefinedFunction( + s"default.$functionName" -> false, + s"$dbName.$functionName" -> false, + functionName -> true) { + // create a function in default database + sql("USE DEFAULT") + sql(s"CREATE FUNCTION $functionName AS '$avgFuncClass'") + // create a view using a function in 'default' database + val viewName = createView("v1", s"SELECT $functionName(col1) FROM VALUES (1), (2), (3)") + // create function in another database with the same function name + sql(s"USE $dbName") + sql(s"CREATE FUNCTION $functionName AS '$sumFuncClass'") + // create temporary function with the same function name + sql(s"CREATE TEMPORARY FUNCTION $functionName AS '$sumFuncClass'") + withView(viewName) { + // view v1 should still using function defined in `default` database + checkViewOutput(viewName, Seq(Row(102.0))) + } + } + } + } +} + +class LocalTempViewTestSuite extends SQLViewTestSuite with SharedSparkSession { + override protected def viewTypeString: String = "TEMPORARY VIEW" + override protected def formattedViewName(viewName: String): String = viewName +} + +class GlobalTempViewTestSuite extends SQLViewTestSuite with SharedSparkSession { + override protected def viewTypeString: String = "GLOBAL TEMPORARY VIEW" + override protected def formattedViewName(viewName: String): String = { + val globalTempDB = spark.sharedState.globalTempViewManager.database + s"$globalTempDB.$viewName" + } +} + +class PersistedViewTestSuite extends SQLViewTestSuite with SharedSparkSession { + override protected def viewTypeString: String = "VIEW" + override protected def formattedViewName(viewName: String): String = s"default.$viewName" +} diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/SQLWindowFunctionSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/SQLWindowFunctionSuite.scala index 67ec1028f1998..eec396b2e3998 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/SQLWindowFunctionSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/SQLWindowFunctionSuite.scala @@ -372,7 +372,7 @@ class SQLWindowFunctionSuite extends QueryTest with SharedSparkSession { spark.catalog.dropTempView("nums") } - test("window function: mutiple window expressions specified by range in a single expression") { + test("window function: multiple window expressions specified by range in a single expression") { val nums = sparkContext.parallelize(1 to 10).map(x => (x, x % 2)).toDF("x", "y") nums.createOrReplaceTempView("nums") withTempView("nums") { diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/SameResultSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/SameResultSuite.scala index ddaa2687eaf1a..18d36670306b8 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/SameResultSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/SameResultSuite.scala @@ -20,7 +20,7 @@ package org.apache.spark.sql.execution import org.apache.spark.sql.{DataFrame, QueryTest} import org.apache.spark.sql.catalyst.expressions.AttributeReference import org.apache.spark.sql.catalyst.plans.logical.{LocalRelation, Project} -import org.apache.spark.sql.execution.datasources.v2.{BatchScanExec, FileScan} +import org.apache.spark.sql.execution.datasources.v2.BatchScanExec import org.apache.spark.sql.functions._ import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.test.SharedSparkSession diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/SortSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/SortSuite.scala index 7654a9d982059..6a4f3f62641f8 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/SortSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/SortSuite.scala @@ -97,6 +97,19 @@ class SortSuite extends SparkPlanTest with SharedSparkSession { } } + test("SPARK-33260: sort order is a Stream") { + val input = Seq( + ("Hello", 4, 2.0), + ("Hello", 1, 1.0), + ("World", 8, 3.0) + ) + checkAnswer( + input.toDF("a", "b", "c"), + (child: SparkPlan) => SortExec(Stream('a.asc, 'b.asc, 'c.asc), global = true, child = child), + input.sortBy(t => (t._1, t._2, t._3)).map(Row.fromTuple), + sortAnswers = false) + } + // Test sorting on different data types for ( dataType <- DataTypeTestUtils.atomicTypes ++ Set(NullType); diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/SparkPlanSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/SparkPlanSuite.scala index 56fff1107ae39..dfec6bccb0c58 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/SparkPlanSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/SparkPlanSuite.scala @@ -88,4 +88,13 @@ class SparkPlanSuite extends QueryTest with SharedSparkSession { test("SPARK-30780 empty LocalTableScan should use RDD without partitions") { assert(LocalTableScanExec(Nil, Nil).execute().getNumPartitions == 0) } + + test("SPARK-33617: change default parallelism of LocalTableScan") { + Seq(1, 4).foreach { minPartitionNum => + withSQLConf(SQLConf.LEAF_NODE_DEFAULT_PARALLELISM.key -> minPartitionNum.toString) { + val df = spark.sql("SELECT * FROM VALUES (1), (2), (3), (4), (5), (6), (7), (8)") + assert(df.rdd.partitions.length === minPartitionNum) + } + } + } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/SparkPlanTest.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/SparkPlanTest.scala index 7ddf9d87a6aca..f1fcf3bc5125e 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/SparkPlanTest.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/SparkPlanTest.scala @@ -17,7 +17,6 @@ package org.apache.spark.sql.execution -import scala.language.implicitConversions import scala.util.control.NonFatal import org.apache.spark.SparkFunSuite diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/SparkScriptTransformationSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/SparkScriptTransformationSuite.scala new file mode 100644 index 0000000000000..6ff7c5d6d2f3a --- /dev/null +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/SparkScriptTransformationSuite.scala @@ -0,0 +1,102 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.execution + +import org.apache.spark.{SparkException, TestUtils} +import org.apache.spark.sql.catalyst.expressions.{Attribute, Expression} +import org.apache.spark.sql.catalyst.parser.ParseException +import org.apache.spark.sql.test.SharedSparkSession + +class SparkScriptTransformationSuite extends BaseScriptTransformationSuite with SharedSparkSession { + import testImplicits._ + + override def createScriptTransformationExec( + input: Seq[Expression], + script: String, + output: Seq[Attribute], + child: SparkPlan, + ioschema: ScriptTransformationIOSchema): BaseScriptTransformationExec = { + SparkScriptTransformationExec( + input = input, + script = script, + output = output, + child = child, + ioschema = ioschema + ) + } + + test("SPARK-32106: TRANSFORM with serde without hive should throw exception") { + assume(TestUtils.testCommandAvailable("/bin/bash")) + withTempView("v") { + val df = Seq("a", "b", "c").map(Tuple1.apply).toDF("a") + df.createTempView("v") + + val e = intercept[ParseException] { + sql( + """ + |SELECT TRANSFORM (a) + |ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' + |USING 'cat' AS (a) + |ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' + |FROM v + """.stripMargin) + }.getMessage + assert(e.contains("TRANSFORM with serde is only supported in hive mode")) + } + } + + test("SPARK-32106: TRANSFORM doesn't support ArrayType/MapType/StructType " + + "as output data type (no serde)") { + assume(TestUtils.testCommandAvailable("/bin/bash")) + // check for ArrayType + val e1 = intercept[SparkException] { + sql( + """ + |SELECT TRANSFORM(a) + |USING 'cat' AS (a array) + |FROM VALUES (array(1, 1), map('1', 1), struct(1, 'a')) t(a, b, c) + """.stripMargin).collect() + }.getMessage + assert(e1.contains("SparkScriptTransformation without serde does not support" + + " ArrayType as output data type")) + + // check for MapType + val e2 = intercept[SparkException] { + sql( + """ + |SELECT TRANSFORM(b) + |USING 'cat' AS (b map) + |FROM VALUES (array(1, 1), map('1', 1), struct(1, 'a')) t(a, b, c) + """.stripMargin).collect() + }.getMessage + assert(e2.contains("SparkScriptTransformation without serde does not support" + + " MapType as output data type")) + + // check for StructType + val e3 = intercept[SparkException] { + sql( + """ + |SELECT TRANSFORM(c) + |USING 'cat' AS (c struct) + |FROM VALUES (array(1, 1), map('1', 1), struct(1, 'a')) t(a, b, c) + """.stripMargin).collect() + }.getMessage + assert(e3.contains("SparkScriptTransformation without serde does not support" + + " StructType as output data type")) + } +} diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/SparkSqlParserSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/SparkSqlParserSuite.scala index af9088003f3b0..f1788e9c31af8 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/SparkSqlParserSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/SparkSqlParserSuite.scala @@ -20,16 +20,14 @@ package org.apache.spark.sql.execution import scala.collection.JavaConverters._ import org.apache.spark.internal.config.ConfigEntry -import org.apache.spark.sql.SaveMode import org.apache.spark.sql.catalyst.TableIdentifier import org.apache.spark.sql.catalyst.analysis.{AnalysisTest, UnresolvedAlias, UnresolvedAttribute, UnresolvedRelation, UnresolvedStar} -import org.apache.spark.sql.catalyst.catalog.{BucketSpec, CatalogStorageFormat, CatalogTable, CatalogTableType} import org.apache.spark.sql.catalyst.expressions.{Ascending, AttributeReference, Concat, SortOrder} import org.apache.spark.sql.catalyst.plans.logical._ import org.apache.spark.sql.execution.command._ -import org.apache.spark.sql.execution.datasources.{CreateTable, RefreshResource} -import org.apache.spark.sql.internal.{HiveSerDe, SQLConf, StaticSQLConf} -import org.apache.spark.sql.types.{IntegerType, LongType, StringType, StructType} +import org.apache.spark.sql.execution.datasources.{CreateTempViewUsing, RefreshResource} +import org.apache.spark.sql.internal.StaticSQLConf +import org.apache.spark.sql.types.StringType /** * Parser test cases for rules defined in [[SparkSqlParser]]. @@ -40,26 +38,10 @@ import org.apache.spark.sql.types.{IntegerType, LongType, StringType, StructType class SparkSqlParserSuite extends AnalysisTest { import org.apache.spark.sql.catalyst.dsl.expressions._ - val newConf = new SQLConf - private lazy val parser = new SparkSqlParser(newConf) - - /** - * Normalizes plans: - * - CreateTable the createTime in tableDesc will replaced by -1L. - */ - override def normalizePlan(plan: LogicalPlan): LogicalPlan = { - plan match { - case CreateTable(tableDesc, mode, query) => - val newTableDesc = tableDesc.copy(createTime = -1L) - CreateTable(newTableDesc, mode, query) - case _ => plan // Don't transform - } - } + private lazy val parser = new SparkSqlParser() private def assertEqual(sqlCommand: String, plan: LogicalPlan): Unit = { - val normalized1 = normalizePlan(parser.parsePlan(sqlCommand)) - val normalized2 = normalizePlan(plan) - comparePlans(normalized1, normalized2) + comparePlans(parser.parsePlan(sqlCommand), plan) } private def intercept(sqlCommand: String, messages: String*): Unit = @@ -70,9 +52,21 @@ class SparkSqlParserSuite extends AnalysisTest { StaticSQLConf ConfigEntry.knownConfigs.values.asScala.foreach { config => assertEqual(s"SET ${config.key}", SetCommand(Some(config.key -> None))) - if (config.defaultValue.isDefined && config.defaultValueString != null) { - assertEqual(s"SET ${config.key}=${config.defaultValueString}", - SetCommand(Some(config.key -> Some(config.defaultValueString)))) + assertEqual(s"SET `${config.key}`", SetCommand(Some(config.key -> None))) + + val defaultValueStr = config.defaultValueString + if (config.defaultValue.isDefined && defaultValueStr != null) { + assertEqual(s"SET ${config.key}=`$defaultValueStr`", + SetCommand(Some(config.key -> Some(defaultValueStr)))) + assertEqual(s"SET `${config.key}`=`$defaultValueStr`", + SetCommand(Some(config.key -> Some(defaultValueStr)))) + + if (!defaultValueStr.contains(";")) { + assertEqual(s"SET ${config.key}=$defaultValueStr", + SetCommand(Some(config.key -> Some(defaultValueStr)))) + assertEqual(s"SET `${config.key}`=$defaultValueStr", + SetCommand(Some(config.key -> Some(defaultValueStr)))) + } } assertEqual(s"RESET ${config.key}", ResetCommand(Some(config.key))) } @@ -101,10 +95,11 @@ class SparkSqlParserSuite extends AnalysisTest { SetCommand(Some("spark.sql. key" -> Some("v a lu e")))) assertEqual("SET `spark.sql. key`= -1", SetCommand(Some("spark.sql. key" -> Some("-1")))) + assertEqual("SET key=", SetCommand(Some("key" -> Some("")))) val expectedErrMsg = "Expected format is 'SET', 'SET key', or " + - "'SET key=value'. If you want to include special characters in key, " + - "please use quotes, e.g., SET `ke y`=value." + "'SET key=value'. If you want to include special characters in key, or include semicolon " + + "in value, please use quotes, e.g., SET `ke y`=`v;alue`." intercept("SET spark.sql.key value", expectedErrMsg) intercept("SET spark.sql.key 'value'", expectedErrMsg) intercept("SET spark.sql.key \"value\" ", expectedErrMsg) @@ -115,6 +110,8 @@ class SparkSqlParserSuite extends AnalysisTest { intercept("SET spark.sql. key=value", expectedErrMsg) intercept("SET spark.sql :key=value", expectedErrMsg) intercept("SET spark.sql . key=value", expectedErrMsg) + intercept("SET =", expectedErrMsg) + intercept("SET =value", expectedErrMsg) } test("Report Error for invalid usage of RESET command") { @@ -141,6 +138,33 @@ class SparkSqlParserSuite extends AnalysisTest { intercept("RESET spark.sql : key", expectedErrMsg) } + test("SPARK-33419: Semicolon handling in SET command") { + assertEqual("SET a=1;", SetCommand(Some("a" -> Some("1")))) + assertEqual("SET a=1;;", SetCommand(Some("a" -> Some("1")))) + + assertEqual("SET a=`1`;", SetCommand(Some("a" -> Some("1")))) + assertEqual("SET a=`1;`", SetCommand(Some("a" -> Some("1;")))) + assertEqual("SET a=`1;`;", SetCommand(Some("a" -> Some("1;")))) + + assertEqual("SET `a`=1;;", SetCommand(Some("a" -> Some("1")))) + assertEqual("SET `a`=`1;`", SetCommand(Some("a" -> Some("1;")))) + assertEqual("SET `a`=`1;`;", SetCommand(Some("a" -> Some("1;")))) + + val expectedErrMsg = "Expected format is 'SET', 'SET key', or " + + "'SET key=value'. If you want to include special characters in key, or include semicolon " + + "in value, please use quotes, e.g., SET `ke y`=`v;alue`." + + intercept("SET a=1; SELECT 1", expectedErrMsg) + intercept("SET a=1;2;;", expectedErrMsg) + + intercept("SET a b=`1;;`", + "'a b' is an invalid property key, please use quotes, e.g. SET `a b`=`1;;`") + + intercept("SET `a`=1;2;;", + "'1;2;;' is an invalid property value, please use quotes, e.g." + + " SET `a`=`1;2;;`") + } + test("refresh resource") { assertEqual("REFRESH prefix_path", RefreshResource("prefix_path")) assertEqual("REFRESH /", RefreshResource("/")) @@ -160,108 +184,13 @@ class SparkSqlParserSuite extends AnalysisTest { intercept("REFRESH", "Resource paths cannot be empty in REFRESH statements") } - private def createTableUsing( - table: String, - database: Option[String] = None, - tableType: CatalogTableType = CatalogTableType.MANAGED, - storage: CatalogStorageFormat = CatalogStorageFormat.empty, - schema: StructType = new StructType, - provider: Option[String] = Some("parquet"), - partitionColumnNames: Seq[String] = Seq.empty, - bucketSpec: Option[BucketSpec] = None, - mode: SaveMode = SaveMode.ErrorIfExists, - query: Option[LogicalPlan] = None): CreateTable = { - CreateTable( - CatalogTable( - identifier = TableIdentifier(table, database), - tableType = tableType, - storage = storage, - schema = schema, - provider = provider, - partitionColumnNames = partitionColumnNames, - bucketSpec = bucketSpec - ), mode, query - ) - } - - private def createTable( - table: String, - database: Option[String] = None, - tableType: CatalogTableType = CatalogTableType.MANAGED, - storage: CatalogStorageFormat = CatalogStorageFormat.empty.copy( - inputFormat = HiveSerDe.sourceToSerDe("textfile").get.inputFormat, - outputFormat = HiveSerDe.sourceToSerDe("textfile").get.outputFormat, - serde = Some("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe")), - schema: StructType = new StructType, - provider: Option[String] = Some("hive"), - partitionColumnNames: Seq[String] = Seq.empty, - comment: Option[String] = None, - mode: SaveMode = SaveMode.ErrorIfExists, - query: Option[LogicalPlan] = None): CreateTable = { - CreateTable( - CatalogTable( - identifier = TableIdentifier(table, database), - tableType = tableType, - storage = storage, - schema = schema, - provider = provider, - partitionColumnNames = partitionColumnNames, - comment = comment - ), mode, query - ) - } - - test("create table - schema") { - assertEqual("CREATE TABLE my_tab(a INT COMMENT 'test', b STRING) STORED AS textfile", - createTable( - table = "my_tab", - schema = (new StructType) - .add("a", IntegerType, nullable = true, "test") - .add("b", StringType) - ) - ) - assertEqual("CREATE TABLE my_tab(a INT COMMENT 'test', b STRING) " + - "PARTITIONED BY (c INT, d STRING COMMENT 'test2')", - createTable( - table = "my_tab", - schema = (new StructType) - .add("a", IntegerType, nullable = true, "test") - .add("b", StringType) - .add("c", IntegerType) - .add("d", StringType, nullable = true, "test2"), - partitionColumnNames = Seq("c", "d") - ) - ) - assertEqual("CREATE TABLE my_tab(id BIGINT, nested STRUCT) " + - "STORED AS textfile", - createTable( - table = "my_tab", - schema = (new StructType) - .add("id", LongType) - .add("nested", (new StructType) - .add("col1", StringType) - .add("col2", IntegerType) - ) - ) - ) - // Partitioned by a StructType should be accepted by `SparkSqlParser` but will fail an analyze - // rule in `AnalyzeCreateTable`. - assertEqual("CREATE TABLE my_tab(a INT COMMENT 'test', b STRING) " + - "PARTITIONED BY (nested STRUCT)", - createTable( - table = "my_tab", - schema = (new StructType) - .add("a", IntegerType, nullable = true, "test") - .add("b", StringType) - .add("nested", (new StructType) - .add("col1", StringType) - .add("col2", IntegerType) - ), - partitionColumnNames = Seq("nested") - ) - ) - intercept("CREATE TABLE my_tab(a: INT COMMENT 'test', b: STRING)", - "no viable alternative at input") + test("SPARK-33118 CREATE TEMPORARY TABLE with LOCATION") { + assertEqual("CREATE TEMPORARY TABLE t USING parquet OPTIONS (path '/data/tmp/testspark1')", + CreateTempViewUsing(TableIdentifier("t", None), None, false, false, "parquet", + Map("path" -> "/data/tmp/testspark1"))) + assertEqual("CREATE TEMPORARY TABLE t USING parquet LOCATION '/data/tmp/testspark1'", + CreateTempViewUsing(TableIdentifier("t", None), None, false, false, "parquet", + Map("path" -> "/data/tmp/testspark1"))) } test("describe query") { @@ -408,5 +337,9 @@ class SparkSqlParserSuite extends AnalysisTest { |FROM v """.stripMargin, "LINES TERMINATED BY only supports newline '\\n' right now") - } + } + + test("CLEAR CACHE") { + assertEqual("CLEAR CACHE", ClearCacheCommand) + } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/SubExprEliminationBenchmark.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/SubExprEliminationBenchmark.scala new file mode 100644 index 0000000000000..0ed0126add7a2 --- /dev/null +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/SubExprEliminationBenchmark.scala @@ -0,0 +1,128 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.spark.sql.execution + +import org.apache.spark.benchmark.Benchmark +import org.apache.spark.sql.Column +import org.apache.spark.sql.catalyst.expressions.{Expression, Literal, Or} +import org.apache.spark.sql.execution.benchmark.SqlBasedBenchmark +import org.apache.spark.sql.functions._ +import org.apache.spark.sql.internal.SQLConf + +/** + * The benchmarks aims to measure performance of the queries where there are subexpression + * elimination or not. + * To run this benchmark: + * {{{ + * 1. without sbt: + * bin/spark-submit --class --jars , + * + * 2. build/sbt "sql/test:runMain " + * 3. generate result: + * SPARK_GENERATE_BENCHMARK_FILES=1 build/sbt "sql/test:runMain " + * Results will be written to "benchmarks/SubExprEliminationBenchmark-results.txt". + * }}} + */ +object SubExprEliminationBenchmark extends SqlBasedBenchmark { + import spark.implicits._ + + def withFromJson(rowsNum: Int, numIters: Int): Unit = { + val benchmark = new Benchmark("from_json as subExpr in Project", rowsNum, output = output) + + withTempPath { path => + prepareDataInfo(benchmark) + val numCols = 1000 + val schema = writeWideRow(path.getAbsolutePath, rowsNum, numCols) + + val cols = (0 until numCols).map { idx => + from_json('value, schema).getField(s"col$idx") + } + + Seq( + ("false", "true", "CODEGEN_ONLY"), + ("false", "false", "NO_CODEGEN"), + ("true", "true", "CODEGEN_ONLY"), + ("true", "false", "NO_CODEGEN") + ).foreach { case (subExprEliminationEnabled, codegenEnabled, codegenFactory) => + // We only benchmark subexpression performance under codegen/non-codegen, so disabling + // json optimization. + val caseName = s"subExprElimination $subExprEliminationEnabled, codegen: $codegenEnabled" + benchmark.addCase(caseName, numIters) { _ => + withSQLConf( + SQLConf.SUBEXPRESSION_ELIMINATION_ENABLED.key -> subExprEliminationEnabled, + SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key -> codegenEnabled, + SQLConf.CODEGEN_FACTORY_MODE.key -> codegenFactory, + SQLConf.JSON_EXPRESSION_OPTIMIZATION.key -> "false") { + val df = spark.read + .text(path.getAbsolutePath) + .select(cols: _*) + df.write.mode("overwrite").format("noop").save() + } + } + } + + benchmark.run() + } + } + + def withFilter(rowsNum: Int, numIters: Int): Unit = { + val benchmark = new Benchmark("from_json as subExpr in Filter", rowsNum, output = output) + + withTempPath { path => + prepareDataInfo(benchmark) + val numCols = 1000 + val schema = writeWideRow(path.getAbsolutePath, rowsNum, numCols) + + val predicate = (0 until numCols).map { idx => + (from_json('value, schema).getField(s"col$idx") >= Literal(100000)).expr + }.asInstanceOf[Seq[Expression]].reduce(Or) + + Seq( + ("false", "true", "CODEGEN_ONLY"), + ("false", "false", "NO_CODEGEN"), + ("true", "true", "CODEGEN_ONLY"), + ("true", "false", "NO_CODEGEN") + ).foreach { case (subExprEliminationEnabled, codegenEnabled, codegenFactory) => + // We only benchmark subexpression performance under codegen/non-codegen, so disabling + // json optimization. + val caseName = s"subExprElimination $subExprEliminationEnabled, codegen: $codegenEnabled" + benchmark.addCase(caseName, numIters) { _ => + withSQLConf( + SQLConf.SUBEXPRESSION_ELIMINATION_ENABLED.key -> subExprEliminationEnabled, + SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key -> codegenEnabled, + SQLConf.CODEGEN_FACTORY_MODE.key -> codegenFactory, + SQLConf.JSON_EXPRESSION_OPTIMIZATION.key -> "false") { + val df = spark.read + .text(path.getAbsolutePath) + .where(Column(predicate)) + df.write.mode("overwrite").format("noop").save() + } + } + } + + benchmark.run() + } + } + + override def runBenchmarkSuite(mainArgs: Array[String]): Unit = { + val numIters = 3 + runBenchmark("Benchmark for performance of subexpression elimination") { + withFromJson(100, numIters) + withFilter(100, numIters) + } + } +} diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/WholeStageCodegenSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/WholeStageCodegenSuite.scala index fe40d7dce344d..71eaed269e6c2 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/WholeStageCodegenSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/WholeStageCodegenSuite.scala @@ -71,28 +71,25 @@ class WholeStageCodegenSuite extends QueryTest with SharedSparkSession } test("ShuffledHashJoin should be included in WholeStageCodegen") { - withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "30", - SQLConf.SHUFFLE_PARTITIONS.key -> "2", - SQLConf.PREFER_SORTMERGEJOIN.key -> "false") { - val df1 = spark.range(5).select($"id".as("k1")) - val df2 = spark.range(15).select($"id".as("k2")) - val df3 = spark.range(6).select($"id".as("k3")) - - // test one shuffled hash join - val oneJoinDF = df1.join(df2, $"k1" === $"k2") - assert(oneJoinDF.queryExecution.executedPlan.collect { - case WholeStageCodegenExec(_ : ShuffledHashJoinExec) => true - }.size === 1) - checkAnswer(oneJoinDF, Seq(Row(0, 0), Row(1, 1), Row(2, 2), Row(3, 3), Row(4, 4))) - - // test two shuffled hash joins - val twoJoinsDF = df1.join(df2, $"k1" === $"k2").join(df3, $"k1" === $"k3") - assert(twoJoinsDF.queryExecution.executedPlan.collect { - case WholeStageCodegenExec(_ : ShuffledHashJoinExec) => true - }.size === 2) - checkAnswer(twoJoinsDF, - Seq(Row(0, 0, 0), Row(1, 1, 1), Row(2, 2, 2), Row(3, 3, 3), Row(4, 4, 4))) - } + val df1 = spark.range(5).select($"id".as("k1")) + val df2 = spark.range(15).select($"id".as("k2")) + val df3 = spark.range(6).select($"id".as("k3")) + + // test one shuffled hash join + val oneJoinDF = df1.join(df2.hint("SHUFFLE_HASH"), $"k1" === $"k2") + assert(oneJoinDF.queryExecution.executedPlan.collect { + case WholeStageCodegenExec(_ : ShuffledHashJoinExec) => true + }.size === 1) + checkAnswer(oneJoinDF, Seq(Row(0, 0), Row(1, 1), Row(2, 2), Row(3, 3), Row(4, 4))) + + // test two shuffled hash joins + val twoJoinsDF = df1.join(df2.hint("SHUFFLE_HASH"), $"k1" === $"k2") + .join(df3.hint("SHUFFLE_HASH"), $"k1" === $"k3") + assert(twoJoinsDF.queryExecution.executedPlan.collect { + case WholeStageCodegenExec(_ : ShuffledHashJoinExec) => true + }.size === 2) + checkAnswer(twoJoinsDF, + Seq(Row(0, 0, 0), Row(1, 1, 1), Row(2, 2, 2), Row(3, 3, 3), Row(4, 4, 4))) } test("Sort should be included in WholeStageCodegen") { @@ -398,8 +395,8 @@ class WholeStageCodegenSuite extends QueryTest with SharedSparkSession // Case2: The parent of a LocalTableScanExec supports WholeStageCodegen. // In this case, the LocalTableScanExec should be within a WholeStageCodegen domain // and no more InputAdapter is inserted as the direct parent of the LocalTableScanExec. - val aggedDF = Seq(1, 2, 3).toDF.groupBy("value").sum() - val executedPlan = aggedDF.queryExecution.executedPlan + val aggregatedDF = Seq(1, 2, 3).toDF.groupBy("value").sum() + val executedPlan = aggregatedDF.queryExecution.executedPlan // HashAggregateExec supports WholeStageCodegen and it's the parent of // LocalTableScanExec so LocalTableScanExec should be within a WholeStageCodegen domain. diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/adaptive/AdaptiveQueryExecSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/adaptive/AdaptiveQueryExecSuite.scala index 7e7248c312e11..75993d49da677 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/adaptive/AdaptiveQueryExecSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/adaptive/AdaptiveQueryExecSuite.scala @@ -26,15 +26,19 @@ import org.apache.spark.scheduler.{SparkListener, SparkListenerEvent, SparkListe import org.apache.spark.sql.{Dataset, QueryTest, Row, SparkSession, Strategy} import org.apache.spark.sql.catalyst.optimizer.{BuildLeft, BuildRight} import org.apache.spark.sql.catalyst.plans.logical.{Aggregate, LogicalPlan} -import org.apache.spark.sql.execution.{PartialReducerPartitionSpec, ReusedSubqueryExec, ShuffledRowRDD, SparkPlan} +import org.apache.spark.sql.execution.{PartialReducerPartitionSpec, QueryExecution, ReusedSubqueryExec, ShuffledRowRDD, SparkPlan, UnaryExecNode} import org.apache.spark.sql.execution.command.DataWritingCommandExec -import org.apache.spark.sql.execution.exchange.{BroadcastExchangeExec, Exchange, ReusedExchangeExec, ShuffleExchangeExec} +import org.apache.spark.sql.execution.datasources.noop.NoopDataSource +import org.apache.spark.sql.execution.datasources.v2.V2TableWriteExec +import org.apache.spark.sql.execution.exchange.{BroadcastExchangeExec, Exchange, REPARTITION, REPARTITION_WITH_NUM, ReusedExchangeExec, ShuffleExchangeExec, ShuffleExchangeLike} import org.apache.spark.sql.execution.joins.{BaseJoinExec, BroadcastHashJoinExec, SortMergeJoinExec} import org.apache.spark.sql.execution.ui.SparkListenerSQLAdaptiveExecutionUpdate import org.apache.spark.sql.functions._ import org.apache.spark.sql.internal.SQLConf +import org.apache.spark.sql.internal.SQLConf.PartitionOverwriteMode import org.apache.spark.sql.test.SharedSparkSession import org.apache.spark.sql.types.{IntegerType, StructType} +import org.apache.spark.sql.util.QueryExecutionListener import org.apache.spark.util.Utils class AdaptiveQueryExecSuite @@ -751,9 +755,9 @@ class AdaptiveQueryExecSuite Utils.deleteRecursively(tableDir) df1.write.parquet(tableDir.getAbsolutePath) - val agged = spark.table("bucketed_table").groupBy("i").count() + val aggregated = spark.table("bucketed_table").groupBy("i").count() val error = intercept[Exception] { - agged.count() + aggregated.count() } assert(error.getCause().toString contains "Invalid bucket file") assert(error.getSuppressed.size === 0) @@ -842,8 +846,8 @@ class AdaptiveQueryExecSuite withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") { val df = sql("SELECT * FROM testData join testData2 ON key = a where value = '1'") val planBefore = df.queryExecution.executedPlan - assert(planBefore.toString.contains("== Current Plan ==")) - assert(planBefore.toString.contains("== Initial Plan ==")) + assert(!planBefore.toString.contains("== Current Plan ==")) + assert(!planBefore.toString.contains("== Initial Plan ==")) df.collect() val planAfter = df.queryExecution.executedPlan assert(planAfter.toString.contains("== Final Plan ==")) @@ -958,9 +962,9 @@ class AdaptiveQueryExecSuite withSQLConf(SQLConf.UI_EXPLAIN_MODE.key -> mode, SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true", SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80") { - val dfApdaptive = sql("SELECT * FROM testData JOIN testData2 ON key = a WHERE value = '1'") + val dfAdaptive = sql("SELECT * FROM testData JOIN testData2 ON key = a WHERE value = '1'") try { - checkAnswer(dfApdaptive, Row(1, "1", 1, 1) :: Row(1, "1", 1, 2) :: Nil) + checkAnswer(dfAdaptive, Row(1, "1", 1, 1) :: Row(1, "1", 1, 2) :: Nil) spark.sparkContext.listenerBus.waitUntilEmpty() assert(checkDone) } finally { @@ -1238,4 +1242,217 @@ class AdaptiveQueryExecSuite } } } + + test("Logging plan changes for AQE") { + val testAppender = new LogAppender("plan changes") + withLogAppender(testAppender) { + withSQLConf( + SQLConf.PLAN_CHANGE_LOG_LEVEL.key -> "INFO", + SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true", + SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80") { + sql("SELECT * FROM testData JOIN testData2 ON key = a " + + "WHERE value = (SELECT max(a) FROM testData3)").collect() + } + Seq("=== Result of Batch AQE Preparations ===", + "=== Result of Batch AQE Post Stage Creation ===", + "=== Result of Batch AQE Replanning ===", + "=== Result of Batch AQE Query Stage Optimization ===", + "=== Result of Batch AQE Final Query Stage Optimization ===").foreach { expectedMsg => + assert(testAppender.loggingEvents.exists(_.getRenderedMessage.contains(expectedMsg))) + } + } + } + + test("SPARK-32932: Do not use local shuffle reader at final stage on write command") { + withSQLConf(SQLConf.PARTITION_OVERWRITE_MODE.key -> PartitionOverwriteMode.DYNAMIC.toString, + SQLConf.SHUFFLE_PARTITIONS.key -> "5", + SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") { + val data = for ( + i <- 1L to 10L; + j <- 1L to 3L + ) yield (i, j) + + val df = data.toDF("i", "j").repartition($"j") + var noLocalReader: Boolean = false + val listener = new QueryExecutionListener { + override def onSuccess(funcName: String, qe: QueryExecution, durationNs: Long): Unit = { + qe.executedPlan match { + case plan@(_: DataWritingCommandExec | _: V2TableWriteExec) => + assert(plan.asInstanceOf[UnaryExecNode].child.isInstanceOf[AdaptiveSparkPlanExec]) + noLocalReader = collect(plan) { + case exec: CustomShuffleReaderExec if exec.isLocalReader => exec + }.isEmpty + case _ => // ignore other events + } + } + override def onFailure(funcName: String, qe: QueryExecution, + exception: Exception): Unit = {} + } + spark.listenerManager.register(listener) + + withTable("t") { + df.write.partitionBy("j").saveAsTable("t") + sparkContext.listenerBus.waitUntilEmpty() + assert(noLocalReader) + noLocalReader = false + } + + // Test DataSource v2 + val format = classOf[NoopDataSource].getName + df.write.format(format).mode("overwrite").save() + sparkContext.listenerBus.waitUntilEmpty() + assert(noLocalReader) + noLocalReader = false + + spark.listenerManager.unregister(listener) + } + } + + test("SPARK-33494: Do not use local shuffle reader for repartition") { + withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") { + val df = spark.table("testData").repartition('key) + df.collect() + // local shuffle reader breaks partitioning and shouldn't be used for repartition operation + // which is specified by users. + checkNumLocalShuffleReaders(df.queryExecution.executedPlan, numShufflesWithoutLocalReader = 1) + } + } + + test("SPARK-33551: Do not use custom shuffle reader for repartition") { + def hasRepartitionShuffle(plan: SparkPlan): Boolean = { + find(plan) { + case s: ShuffleExchangeLike => + s.shuffleOrigin == REPARTITION || s.shuffleOrigin == REPARTITION_WITH_NUM + case _ => false + }.isDefined + } + + withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true", + SQLConf.SHUFFLE_PARTITIONS.key -> "5") { + val df = sql( + """ + |SELECT * FROM ( + | SELECT * FROM testData WHERE key = 1 + |) + |RIGHT OUTER JOIN testData2 + |ON value = b + """.stripMargin) + + withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80") { + // Repartition with no partition num specified. + val dfRepartition = df.repartition('b) + dfRepartition.collect() + val plan = dfRepartition.queryExecution.executedPlan + // The top shuffle from repartition is optimized out. + assert(!hasRepartitionShuffle(plan)) + val bhj = findTopLevelBroadcastHashJoin(plan) + assert(bhj.length == 1) + checkNumLocalShuffleReaders(plan, 1) + // Probe side is coalesced. + val customReader = bhj.head.right.find(_.isInstanceOf[CustomShuffleReaderExec]) + assert(customReader.isDefined) + assert(customReader.get.asInstanceOf[CustomShuffleReaderExec].hasCoalescedPartition) + + // Repartition with partition default num specified. + val dfRepartitionWithNum = df.repartition(5, 'b) + dfRepartitionWithNum.collect() + val planWithNum = dfRepartitionWithNum.queryExecution.executedPlan + // The top shuffle from repartition is optimized out. + assert(!hasRepartitionShuffle(planWithNum)) + val bhjWithNum = findTopLevelBroadcastHashJoin(planWithNum) + assert(bhjWithNum.length == 1) + checkNumLocalShuffleReaders(planWithNum, 1) + // Probe side is not coalesced. + assert(bhjWithNum.head.right.find(_.isInstanceOf[CustomShuffleReaderExec]).isEmpty) + + // Repartition with partition non-default num specified. + val dfRepartitionWithNum2 = df.repartition(3, 'b) + dfRepartitionWithNum2.collect() + val planWithNum2 = dfRepartitionWithNum2.queryExecution.executedPlan + // The top shuffle from repartition is not optimized out, and this is the only shuffle that + // does not have local shuffle reader. + assert(hasRepartitionShuffle(planWithNum2)) + val bhjWithNum2 = findTopLevelBroadcastHashJoin(planWithNum2) + assert(bhjWithNum2.length == 1) + checkNumLocalShuffleReaders(planWithNum2, 1) + val customReader2 = bhjWithNum2.head.right.find(_.isInstanceOf[CustomShuffleReaderExec]) + assert(customReader2.isDefined) + assert(customReader2.get.asInstanceOf[CustomShuffleReaderExec].isLocalReader) + } + + // Force skew join + withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1", + SQLConf.SKEW_JOIN_ENABLED.key -> "true", + SQLConf.SKEW_JOIN_SKEWED_PARTITION_THRESHOLD.key -> "1", + SQLConf.SKEW_JOIN_SKEWED_PARTITION_FACTOR.key -> "0", + SQLConf.ADVISORY_PARTITION_SIZE_IN_BYTES.key -> "10") { + // Repartition with no partition num specified. + val dfRepartition = df.repartition('b) + dfRepartition.collect() + val plan = dfRepartition.queryExecution.executedPlan + // The top shuffle from repartition is optimized out. + assert(!hasRepartitionShuffle(plan)) + val smj = findTopLevelSortMergeJoin(plan) + assert(smj.length == 1) + // No skew join due to the repartition. + assert(!smj.head.isSkewJoin) + // Both sides are coalesced. + val customReaders = collect(smj.head) { + case c: CustomShuffleReaderExec if c.hasCoalescedPartition => c + } + assert(customReaders.length == 2) + + // Repartition with default partition num specified. + val dfRepartitionWithNum = df.repartition(5, 'b) + dfRepartitionWithNum.collect() + val planWithNum = dfRepartitionWithNum.queryExecution.executedPlan + // The top shuffle from repartition is optimized out. + assert(!hasRepartitionShuffle(planWithNum)) + val smjWithNum = findTopLevelSortMergeJoin(planWithNum) + assert(smjWithNum.length == 1) + // No skew join due to the repartition. + assert(!smjWithNum.head.isSkewJoin) + // No coalesce due to the num in repartition. + val customReadersWithNum = collect(smjWithNum.head) { + case c: CustomShuffleReaderExec if c.hasCoalescedPartition => c + } + assert(customReadersWithNum.isEmpty) + + // Repartition with default non-partition num specified. + val dfRepartitionWithNum2 = df.repartition(3, 'b) + dfRepartitionWithNum2.collect() + val planWithNum2 = dfRepartitionWithNum2.queryExecution.executedPlan + // The top shuffle from repartition is not optimized out. + assert(hasRepartitionShuffle(planWithNum2)) + val smjWithNum2 = findTopLevelSortMergeJoin(planWithNum2) + assert(smjWithNum2.length == 1) + // Skew join can apply as the repartition is not optimized out. + assert(smjWithNum2.head.isSkewJoin) + } + } + } + + test("SPARK-33933: AQE broadcast should not timeout with slow map tasks") { + val broadcastTimeoutInSec = 1 + val df = spark.sparkContext.parallelize(Range(0, 100), 100) + .flatMap(x => { + Thread.sleep(20) + for (i <- Range(0, 100)) yield (x % 26, x % 10) + }).toDF("index", "pv") + val dim = Range(0, 26).map(x => (x, ('a' + x).toChar.toString)) + .toDF("index", "name") + val testDf = df.groupBy("index") + .agg(sum($"pv").alias("pv")) + .join(dim, Seq("index")) + withSQLConf(SQLConf.BROADCAST_TIMEOUT.key -> broadcastTimeoutInSec.toString, + SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") { + val startTime = System.currentTimeMillis() + val result = testDf.collect() + val queryTime = System.currentTimeMillis() - startTime + assert(result.length == 26) + // make sure the execution time is large enough + assert(queryTime > (broadcastTimeoutInSec + 1) * 1000) + } + } + } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/adaptive/AdaptiveTestUtils.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/adaptive/AdaptiveTestUtils.scala index 48f85ae76cd8c..ad3ec85e984c8 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/adaptive/AdaptiveTestUtils.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/adaptive/AdaptiveTestUtils.scala @@ -17,8 +17,6 @@ package org.apache.spark.sql.execution.adaptive -import java.io.{PrintWriter, StringWriter} - import org.scalactic.source.Position import org.scalatest.Tag diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/arrow/ArrowConvertersSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/arrow/ArrowConvertersSuite.scala index 1e6e59456c887..d861bbbf67b1c 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/arrow/ArrowConvertersSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/arrow/ArrowConvertersSuite.scala @@ -1210,7 +1210,7 @@ class ArrowConvertersSuite extends SharedSparkSession { testQuietly("interval is unsupported for arrow") { val e = intercept[SparkException] { - calenderIntervalData.toDF().toArrowBatchRdd.collect() + calendarIntervalData.toDF().toArrowBatchRdd.collect() } assert(e.getCause.isInstanceOf[UnsupportedOperationException]) diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/DateTimeRebaseBenchmark.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/DateTimeRebaseBenchmark.scala index 7caaa5376db7f..bc94d1f235800 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/DateTimeRebaseBenchmark.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/DateTimeRebaseBenchmark.scala @@ -165,7 +165,8 @@ object DateTimeRebaseBenchmark extends SqlBasedBenchmark { benchmark.addCase(caseName(modernDates, dateTime, Some(mode)), 1) { _ => withSQLConf( SQLConf.PARQUET_OUTPUT_TIMESTAMP_TYPE.key -> getOutputType(dateTime), - SQLConf.LEGACY_PARQUET_REBASE_MODE_IN_WRITE.key -> mode.toString) { + SQLConf.LEGACY_PARQUET_REBASE_MODE_IN_WRITE.key -> mode.toString, + SQLConf.LEGACY_PARQUET_INT96_REBASE_MODE_IN_WRITE.key -> mode.toString) { genDF(rowsNum, dateTime, modernDates) .write .mode("overwrite") diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/FilterPushdownBenchmark.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/FilterPushdownBenchmark.scala index 9ade8b14f59b0..a98ca7f5d8f88 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/FilterPushdownBenchmark.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/FilterPushdownBenchmark.scala @@ -27,7 +27,7 @@ import org.apache.spark.sql.{DataFrame, SparkSession} import org.apache.spark.sql.functions.{monotonically_increasing_id, timestamp_seconds} import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.internal.SQLConf.ParquetOutputTimestampType -import org.apache.spark.sql.types.{ByteType, Decimal, DecimalType, TimestampType} +import org.apache.spark.sql.types.{ByteType, Decimal, DecimalType} /** * Benchmark to measure read performance with Filter pushdown. diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/InsertTableWithDynamicPartitionsBenchmark.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/InsertTableWithDynamicPartitionsBenchmark.scala new file mode 100644 index 0000000000000..81a29cefd0045 --- /dev/null +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/InsertTableWithDynamicPartitionsBenchmark.scala @@ -0,0 +1,103 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.execution.benchmark + +import org.apache.spark.benchmark.Benchmark + +/** + * Benchmark to measure insert into table with dynamic partition columns. + * To run this benchmark: + * {{{ + * 1. without sbt: bin/spark-submit --class + * 2. build/sbt "sql/test:runMain " + * 3. generate result: SPARK_GENERATE_BENCHMARK_FILES=1 build/sbt "sql/test:runMain " + * Results will be written to + * "benchmarks/InsertTableWithDynamicPartitionsBenchmark-results.txt". + * }}} + */ +object InsertTableWithDynamicPartitionsBenchmark extends DataSourceWriteBenchmark { + + def prepareSourceTableAndGetTotalRows(numberRows: Long, sourceTable: String, + part1Step: Int, part2Step: Int, part3Step: Int): Long = { + val dataFrame = spark.range(0, numberRows, 1, 4) + val dataFrame1 = spark.range(0, numberRows, part1Step, 4) + val dataFrame2 = spark.range(0, numberRows, part2Step, 4) + val dataFrame3 = spark.range(0, numberRows, part3Step, 4) + + val data = dataFrame.join(dataFrame1).join(dataFrame2).join(dataFrame3) + .toDF("id", "part1", "part2", "part3") + data.write.saveAsTable(sourceTable) + data.count() + } + + def writeOnePartitionColumnTable(tableName: String, + partitionNumber: Long, benchmark: Benchmark): Unit = { + spark.sql(s"create table $tableName(i bigint, part bigint) " + + "using parquet partitioned by (part)") + benchmark.addCase(s"one partition column, $partitionNumber partitions") { _ => + spark.sql(s"insert overwrite table $tableName partition(part) " + + "select id, part1 as part from sourceTable") + } + } + + def writeTwoPartitionColumnTable(tableName: String, + partitionNumber: Long, benchmark: Benchmark): Unit = { + spark.sql(s"create table $tableName(i bigint, part1 bigint, part2 bigint) " + + "using parquet partitioned by (part1, part2)") + benchmark.addCase(s"two partition columns, $partitionNumber partitions") { _ => + spark.sql(s"insert overwrite table $tableName partition(part1, part2) " + + "select id, part1, part2 from sourceTable") + } + } + + def writeThreePartitionColumnTable(tableName: String, + partitionNumber: Long, benchmark: Benchmark): Unit = { + spark.sql(s"create table $tableName(i bigint, part1 bigint, part2 bigint, part3 bigint) " + + "using parquet partitioned by (part1, part2, part3)") + benchmark.addCase(s"three partition columns, $partitionNumber partitions") { _ => + spark.sql(s"insert overwrite table $tableName partition(part1, part2, part3) " + + "select id, part1, part2, part3 from sourceTable") + } + } + + override def runBenchmarkSuite(mainArgs: Array[String]): Unit = { + val sourceTable = "sourceTable" + val onePartColTable = "onePartColTable" + val twoPartColTable = "twoPartColTable" + val threePartColTable = "threePartColTable" + val numberRows = 100L + val part1Step = 1 + val part2Step = 20 + val part3Step = 25 + val part1Number = numberRows / part1Step + val part2Number = numberRows / part2Step * part1Number + val part3Number = numberRows / part3Step * part2Number + + withTable(sourceTable, onePartColTable, twoPartColTable, threePartColTable) { + val totalRows = + prepareSourceTableAndGetTotalRows(numberRows, sourceTable, part1Step, part2Step, part3Step) + val benchmark = + new Benchmark(s"dynamic insert table benchmark, totalRows = $totalRows", + totalRows, output = output) + writeOnePartitionColumnTable(onePartColTable, part1Number, benchmark) + writeTwoPartitionColumnTable(twoPartColTable, part2Number, benchmark) + writeThreePartitionColumnTable(threePartColTable, part3Number, benchmark) + benchmark.run() + } + } +} diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/ParquetNestedPredicatePushDownBenchmark.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/ParquetNestedPredicatePushDownBenchmark.scala index d2bd962b50654..f89fe2e64c778 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/ParquetNestedPredicatePushDownBenchmark.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/ParquetNestedPredicatePushDownBenchmark.scala @@ -17,9 +17,8 @@ package org.apache.spark.sql.execution.benchmark -import org.apache.spark.SparkConf import org.apache.spark.benchmark.Benchmark -import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession} +import org.apache.spark.sql.{DataFrame, SaveMode} import org.apache.spark.sql.internal.SQLConf /** diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/SqlBasedBenchmark.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/SqlBasedBenchmark.scala index 28387dcef125b..98abe8daac670 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/SqlBasedBenchmark.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/SqlBasedBenchmark.scala @@ -22,7 +22,9 @@ import org.apache.spark.internal.config.UI.UI_ENABLED import org.apache.spark.sql.{Dataset, SparkSession} import org.apache.spark.sql.SaveMode.Overwrite import org.apache.spark.sql.catalyst.plans.SQLHelper +import org.apache.spark.sql.functions.lit import org.apache.spark.sql.internal.SQLConf +import org.apache.spark.sql.types._ /** * Common base trait to run benchmark with the Dataset and DataFrame API. @@ -66,4 +68,25 @@ trait SqlBasedBenchmark extends BenchmarkBase with SQLHelper { ds.write.format("noop").mode(Overwrite).save() } } + + protected def prepareDataInfo(benchmark: Benchmark): Unit = { + // scalastyle:off println + benchmark.out.println("Preparing data for benchmarking ...") + // scalastyle:on println + } + + /** + * Prepares a table with wide row for benchmarking. The table will be written into + * the given path. + */ + protected def writeWideRow(path: String, rowsNum: Int, numCols: Int): StructType = { + val fields = Seq.tabulate(numCols)(i => StructField(s"col$i", IntegerType)) + val schema = StructType(fields) + + spark.range(rowsNum) + .select(Seq.tabulate(numCols)(i => lit(i).as(s"col$i")): _*) + .write.json(path) + + schema + } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/TPCDSQueryBenchmark.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/TPCDSQueryBenchmark.scala index ad3d79760adf0..b34eac5df8090 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/TPCDSQueryBenchmark.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/TPCDSQueryBenchmark.scala @@ -24,21 +24,28 @@ import org.apache.spark.sql.SparkSession import org.apache.spark.sql.catalyst.catalog.HiveTableRelation import org.apache.spark.sql.catalyst.plans.logical.SubqueryAlias import org.apache.spark.sql.catalyst.util._ +import org.apache.spark.sql.catalyst.util.DateTimeConstants.NANOS_PER_SECOND import org.apache.spark.sql.execution.datasources.LogicalRelation +import org.apache.spark.sql.internal.SQLConf +import org.apache.spark.util.Utils /** * Benchmark to measure TPCDS query performance. * To run this: * {{{ * 1. without sbt: - * bin/spark-submit --class --data-location + * bin/spark-submit --jars , + * --class --data-location * 2. build/sbt "sql/test:runMain --data-location " * 3. generate result: SPARK_GENERATE_BENCHMARK_FILES=1 build/sbt * "sql/test:runMain --data-location " * Results will be written to "benchmarks/TPCDSQueryBenchmark-results.txt". * }}} */ -object TPCDSQueryBenchmark extends SqlBasedBenchmark { +object TPCDSQueryBenchmark extends SqlBasedBenchmark with Logging { + + private lazy val warehousePath = + Utils.createTempDir(namePrefix = "spark-warehouse").getAbsolutePath override def getSparkSession: SparkSession = { val conf = new SparkConf() @@ -50,6 +57,7 @@ object TPCDSQueryBenchmark extends SqlBasedBenchmark { .set("spark.executor.memory", "3g") .set("spark.sql.autoBroadcastJoinThreshold", (20 * 1024 * 1024).toString) .set("spark.sql.crossJoin.enabled", "true") + .set("spark.sql.warehouse.dir", warehousePath) SparkSession.builder.config(conf).getOrCreate() } @@ -60,9 +68,14 @@ object TPCDSQueryBenchmark extends SqlBasedBenchmark { "web_returns", "web_site", "reason", "call_center", "warehouse", "ship_mode", "income_band", "time_dim", "web_page") - def setupTables(dataLocation: String): Map[String, Long] = { + def setupTables(dataLocation: String, createTempView: Boolean): Map[String, Long] = { tables.map { tableName => - spark.read.parquet(s"$dataLocation/$tableName").createOrReplaceTempView(tableName) + val df = spark.read.parquet(s"$dataLocation/$tableName") + if (createTempView) { + df.createOrReplaceTempView(tableName) + } else { + df.write.saveAsTable(tableName) + } tableName -> spark.table(tableName).count() }.toMap } @@ -97,11 +110,16 @@ object TPCDSQueryBenchmark extends SqlBasedBenchmark { } } - def filterQueries( + private def filterQueries( origQueries: Seq[String], - args: TPCDSQueryBenchmarkArguments): Seq[String] = { - if (args.queryFilter.nonEmpty) { - origQueries.filter(args.queryFilter.contains) + queryFilter: Set[String], + nameSuffix: String = ""): Seq[String] = { + if (queryFilter.nonEmpty) { + if (nameSuffix.nonEmpty) { + origQueries.filter { name => queryFilter.contains(s"$name$nameSuffix") } + } else { + origQueries.filter(queryFilter.contains) + } } else { origQueries } @@ -124,6 +142,7 @@ object TPCDSQueryBenchmark extends SqlBasedBenchmark { "q91", "q92", "q93", "q94", "q95", "q96", "q97", "q98", "q99") // This list only includes TPC-DS v2.7 queries that are different from v1.4 ones + val nameSuffixForQueriesV2_7 = "-v2.7" val tpcdsQueriesV2_7 = Seq( "q5a", "q6", "q10a", "q11", "q12", "q14", "q14a", "q18a", "q20", "q22", "q22a", "q24", "q27a", "q34", "q35", "q35a", "q36a", "q47", "q49", @@ -131,17 +150,36 @@ object TPCDSQueryBenchmark extends SqlBasedBenchmark { "q80a", "q86a", "q98") // If `--query-filter` defined, filters the queries that this option selects - val queriesV1_4ToRun = filterQueries(tpcdsQueries, benchmarkArgs) - val queriesV2_7ToRun = filterQueries(tpcdsQueriesV2_7, benchmarkArgs) + val queriesV1_4ToRun = filterQueries(tpcdsQueries, benchmarkArgs.queryFilter) + val queriesV2_7ToRun = filterQueries(tpcdsQueriesV2_7, benchmarkArgs.queryFilter, + nameSuffix = nameSuffixForQueriesV2_7) if ((queriesV1_4ToRun ++ queriesV2_7ToRun).isEmpty) { throw new RuntimeException( s"Empty queries to run. Bad query name filter: ${benchmarkArgs.queryFilter}") } - val tableSizes = setupTables(benchmarkArgs.dataLocation) + val tableSizes = setupTables(benchmarkArgs.dataLocation, + createTempView = !benchmarkArgs.cboEnabled) + if (benchmarkArgs.cboEnabled) { + spark.sql(s"SET ${SQLConf.CBO_ENABLED.key}=true") + spark.sql(s"SET ${SQLConf.PLAN_STATS_ENABLED.key}=true") + spark.sql(s"SET ${SQLConf.JOIN_REORDER_ENABLED.key}=true") + spark.sql(s"SET ${SQLConf.HISTOGRAM_ENABLED.key}=true") + + // Analyze all the tables before running TPCDS queries + val startTime = System.nanoTime() + tables.foreach { tableName => + spark.sql(s"ANALYZE TABLE $tableName COMPUTE STATISTICS FOR ALL COLUMNS") + } + logInfo("The elapsed time to analyze all the tables is " + + s"${(System.nanoTime() - startTime) / NANOS_PER_SECOND.toDouble} seconds") + } else { + spark.sql(s"SET ${SQLConf.CBO_ENABLED.key}=false") + } + runTpcdsQueries(queryLocation = "tpcds", queries = queriesV1_4ToRun, tableSizes) runTpcdsQueries(queryLocation = "tpcds-v2.7.0", queries = queriesV2_7ToRun, tableSizes, - nameSuffix = "-v2.7") + nameSuffix = nameSuffixForQueriesV2_7) } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/TPCDSQueryBenchmarkArguments.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/TPCDSQueryBenchmarkArguments.scala index 184ffff94298a..80a6bffc61ea4 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/TPCDSQueryBenchmarkArguments.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/TPCDSQueryBenchmarkArguments.scala @@ -23,6 +23,7 @@ import java.util.Locale class TPCDSQueryBenchmarkArguments(val args: Array[String]) { var dataLocation: String = null var queryFilter: Set[String] = Set.empty + var cboEnabled: Boolean = false parseArgs(args.toList) validateArguments() @@ -44,6 +45,10 @@ class TPCDSQueryBenchmarkArguments(val args: Array[String]) { queryFilter = value.toLowerCase(Locale.ROOT).split(",").map(_.trim).toSet args = tail + case optName :: tail if optionMatch("--cbo", optName) => + cboEnabled = true + args = tail + case _ => // scalastyle:off println System.err.println("Unknown/unsupported param " + args) @@ -60,6 +65,7 @@ class TPCDSQueryBenchmarkArguments(val args: Array[String]) { |Options: | --data-location Path to TPCDS data | --query-filter Queries to filter, e.g., q3,q5,q13 + | --cbo Whether to enable cost-based optimization | |------------------------------------------------------------------------------------------------------------------ |In order to run this benchmark, please follow the instructions at diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/bucketing/CoalesceBucketsInJoinSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/bucketing/CoalesceBucketsInJoinSuite.scala index 89aee37a4246f..63964665fc81c 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/bucketing/CoalesceBucketsInJoinSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/bucketing/CoalesceBucketsInJoinSuite.scala @@ -99,7 +99,7 @@ class CoalesceBucketsInJoinSuite extends SQLTestUtils with SharedSparkSession { s.leftKeys, s.rightKeys, Inner, BuildLeft, None, lScan, rScan) } - val plan = CoalesceBucketsInJoin(spark.sessionState.conf)(join) + val plan = CoalesceBucketsInJoin(join) def verify(expected: Option[Int], subPlan: SparkPlan): Unit = { val coalesced = subPlan.collect { diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/ColumnStatsSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/ColumnStatsSuite.scala index 847e0ec4f3195..0abb3cb6a2ed0 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/ColumnStatsSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/ColumnStatsSuite.scala @@ -19,7 +19,6 @@ package org.apache.spark.sql.execution.columnar import org.apache.spark.SparkFunSuite import org.apache.spark.sql.types._ -import org.apache.spark.unsafe.types.CalendarInterval class ColumnStatsSuite extends SparkFunSuite { testColumnStats(classOf[BooleanColumnStats], BOOLEAN, Array(true, false, 0)) diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/AlterTableAddPartitionParserSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/AlterTableAddPartitionParserSuite.scala new file mode 100644 index 0000000000000..5ebca8f651604 --- /dev/null +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/AlterTableAddPartitionParserSuite.scala @@ -0,0 +1,51 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.execution.command + +import org.apache.spark.sql.catalyst.analysis.{AnalysisTest, UnresolvedPartitionSpec, UnresolvedTable} +import org.apache.spark.sql.catalyst.parser.CatalystSqlParser.parsePlan +import org.apache.spark.sql.catalyst.plans.logical.AlterTableAddPartition +import org.apache.spark.sql.test.SharedSparkSession + +class AlterTableAddPartitionParserSuite extends AnalysisTest with SharedSparkSession { + test("add partition if not exists") { + val sql = """ + |ALTER TABLE a.b.c ADD IF NOT EXISTS PARTITION + |(dt='2008-08-08', country='us') LOCATION 'location1' PARTITION + |(dt='2009-09-09', country='uk')""".stripMargin + val parsed = parsePlan(sql) + val expected = AlterTableAddPartition( + UnresolvedTable(Seq("a", "b", "c"), "ALTER TABLE ... ADD PARTITION ..."), + Seq( + UnresolvedPartitionSpec(Map("dt" -> "2008-08-08", "country" -> "us"), Some("location1")), + UnresolvedPartitionSpec(Map("dt" -> "2009-09-09", "country" -> "uk"), None)), + ifNotExists = true) + comparePlans(parsed, expected) + } + + test("add partition") { + val sql = "ALTER TABLE a.b.c ADD PARTITION (dt='2008-08-08') LOCATION 'loc'" + val parsed = parsePlan(sql) + val expected = AlterTableAddPartition( + UnresolvedTable(Seq("a", "b", "c"), "ALTER TABLE ... ADD PARTITION ..."), + Seq(UnresolvedPartitionSpec(Map("dt" -> "2008-08-08"), Some("loc"))), + ifNotExists = false) + + comparePlans(parsed, expected) + } +} diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/AlterTableAddPartitionSuiteBase.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/AlterTableAddPartitionSuiteBase.scala new file mode 100644 index 0000000000000..2705adb8b3c67 --- /dev/null +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/AlterTableAddPartitionSuiteBase.scala @@ -0,0 +1,184 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.execution.command + +import org.apache.spark.sql.{AnalysisException, QueryTest} +import org.apache.spark.sql.catalyst.analysis.PartitionsAlreadyExistException +import org.apache.spark.sql.internal.SQLConf + +/** + * This base suite contains unified tests for the `ALTER TABLE .. ADD PARTITION` command that + * check V1 and V2 table catalogs. The tests that cannot run for all supported catalogs are + * located in more specific test suites: + * + * - V2 table catalog tests: + * `org.apache.spark.sql.execution.command.v2.AlterTableAddPartitionSuite` + * - V1 table catalog tests: + * `org.apache.spark.sql.execution.command.v1.AlterTableAddPartitionSuiteBase` + * - V1 In-Memory catalog: + * `org.apache.spark.sql.execution.command.v1.AlterTableAddPartitionSuite` + * - V1 Hive External catalog: + * `org.apache.spark.sql.hive.execution.command.AlterTableAddPartitionSuite` + */ +trait AlterTableAddPartitionSuiteBase extends QueryTest with DDLCommandTestUtils { + override val command = "ALTER TABLE .. ADD PARTITION" + + test("one partition") { + withNamespaceAndTable("ns", "tbl") { t => + sql(s"CREATE TABLE $t (id bigint, data string) $defaultUsing PARTITIONED BY (id)") + Seq("", "IF NOT EXISTS").foreach { exists => + sql(s"ALTER TABLE $t ADD $exists PARTITION (id=1) LOCATION 'loc'") + + checkPartitions(t, Map("id" -> "1")) + checkLocation(t, Map("id" -> "1"), "loc") + } + } + } + + test("multiple partitions") { + withNamespaceAndTable("ns", "tbl") { t => + sql(s"CREATE TABLE $t (id bigint, data string) $defaultUsing PARTITIONED BY (id)") + Seq("", "IF NOT EXISTS").foreach { exists => + sql(s""" + |ALTER TABLE $t ADD $exists + |PARTITION (id=1) LOCATION 'loc' + |PARTITION (id=2) LOCATION 'loc1'""".stripMargin) + + checkPartitions(t, Map("id" -> "1"), Map("id" -> "2")) + checkLocation(t, Map("id" -> "1"), "loc") + checkLocation(t, Map("id" -> "2"), "loc1") + } + } + } + + test("multi-part partition") { + withNamespaceAndTable("ns", "tbl") { t => + sql(s"CREATE TABLE $t (id bigint, a int, b string) $defaultUsing PARTITIONED BY (a, b)") + Seq("", "IF NOT EXISTS").foreach { exists => + sql(s"ALTER TABLE $t ADD $exists PARTITION (a=2, b='abc')") + + checkPartitions(t, Map("a" -> "2", "b" -> "abc")) + } + } + } + + test("table to alter does not exist") { + withNamespaceAndTable("ns", "does_not_exist") { t => + val errMsg = intercept[AnalysisException] { + sql(s"ALTER TABLE $t ADD IF NOT EXISTS PARTITION (a='4', b='9')") + }.getMessage + assert(errMsg.contains("Table not found")) + } + } + + test("case sensitivity in resolving partition specs") { + withNamespaceAndTable("ns", "tbl") { t => + spark.sql(s"CREATE TABLE $t (id bigint, data string) $defaultUsing PARTITIONED BY (id)") + withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") { + val errMsg = intercept[AnalysisException] { + spark.sql(s"ALTER TABLE $t ADD PARTITION (ID=1) LOCATION 'loc1'") + }.getMessage + assert(errMsg.contains("ID is not a valid partition column")) + } + withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") { + spark.sql(s"ALTER TABLE $t ADD PARTITION (ID=1) LOCATION 'loc1'") + checkPartitions(t, Map("id" -> "1")) + checkLocation(t, Map("id" -> "1"), "loc1") + } + } + } + + test("SPARK-33521: universal type conversions of partition values") { + withNamespaceAndTable("ns", "tbl") { t => + sql(s""" + |CREATE TABLE $t ( + | id int, + | part0 tinyint, + | part1 smallint, + | part2 int, + | part3 bigint, + | part4 float, + | part5 double, + | part6 string, + | part7 boolean, + | part8 date, + | part9 timestamp + |) $defaultUsing + |PARTITIONED BY (part0, part1, part2, part3, part4, part5, part6, part7, part8, part9) + |""".stripMargin) + val partSpec = """ + | part0 = -1, + | part1 = 0, + | part2 = 1, + | part3 = 2, + | part4 = 3.14, + | part5 = 3.14, + | part6 = 'abc', + | part7 = true, + | part8 = '2020-11-23', + | part9 = '2020-11-23 22:13:10.123456' + |""".stripMargin + sql(s"ALTER TABLE $t ADD PARTITION ($partSpec)") + val expected = Map( + "part0" -> "-1", + "part1" -> "0", + "part2" -> "1", + "part3" -> "2", + "part4" -> "3.14", + "part5" -> "3.14", + "part6" -> "abc", + "part7" -> "true", + "part8" -> "2020-11-23", + "part9" -> "2020-11-23 22:13:10.123456") + checkPartitions(t, expected) + sql(s"ALTER TABLE $t DROP PARTITION ($partSpec)") + checkPartitions(t) // no partitions + } + } + + test("SPARK-33676: not fully specified partition spec") { + withNamespaceAndTable("ns", "tbl") { t => + sql(s""" + |CREATE TABLE $t (id bigint, part0 int, part1 string) + |$defaultUsing + |PARTITIONED BY (part0, part1)""".stripMargin) + val errMsg = intercept[AnalysisException] { + sql(s"ALTER TABLE $t ADD PARTITION (part0 = 1)") + }.getMessage + assert(errMsg.contains("Partition spec is invalid. " + + "The spec (part0) must match the partition spec (part0, part1)")) + } + } + + test("partition already exists") { + withNamespaceAndTable("ns", "tbl") { t => + sql(s"CREATE TABLE $t (id bigint, data string) $defaultUsing PARTITIONED BY (id)") + sql(s"ALTER TABLE $t ADD PARTITION (id=2) LOCATION 'loc1'") + + val errMsg = intercept[PartitionsAlreadyExistException] { + sql(s"ALTER TABLE $t ADD PARTITION (id=1) LOCATION 'loc'" + + " PARTITION (id=2) LOCATION 'loc1'") + }.getMessage + assert(errMsg.contains("The following partitions already exists")) + + sql(s"ALTER TABLE $t ADD IF NOT EXISTS PARTITION (id=1) LOCATION 'loc'" + + " PARTITION (id=2) LOCATION 'loc1'") + checkPartitions(t, Map("id" -> "1"), Map("id" -> "2")) + } + } +} diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/AlterTableDropPartitionParserSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/AlterTableDropPartitionParserSuite.scala new file mode 100644 index 0000000000000..53edd5854f289 --- /dev/null +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/AlterTableDropPartitionParserSuite.scala @@ -0,0 +1,88 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.execution.command + +import org.apache.spark.sql.catalyst.analysis.{AnalysisTest, UnresolvedPartitionSpec, UnresolvedTable} +import org.apache.spark.sql.catalyst.parser.CatalystSqlParser.parsePlan +import org.apache.spark.sql.catalyst.parser.ParseException +import org.apache.spark.sql.catalyst.plans.logical.AlterTableDropPartition +import org.apache.spark.sql.test.SharedSparkSession + +class AlterTableDropPartitionParserSuite extends AnalysisTest with SharedSparkSession { + test("drop partition") { + val sql = """ + |ALTER TABLE table_name DROP PARTITION + |(dt='2008-08-08', country='us'), PARTITION (dt='2009-09-09', country='uk') + """.stripMargin + val expected = AlterTableDropPartition( + UnresolvedTable(Seq("table_name"), "ALTER TABLE ... DROP PARTITION ..."), + Seq( + UnresolvedPartitionSpec(Map("dt" -> "2008-08-08", "country" -> "us")), + UnresolvedPartitionSpec(Map("dt" -> "2009-09-09", "country" -> "uk"))), + ifExists = false, + purge = false) + + comparePlans(parsePlan(sql), expected) + } + + test("drop partition if exists") { + val sql = """ + |ALTER TABLE table_name DROP IF EXISTS + |PARTITION (dt='2008-08-08', country='us'), + |PARTITION (dt='2009-09-09', country='uk') + """.stripMargin + val expected = AlterTableDropPartition( + UnresolvedTable(Seq("table_name"), "ALTER TABLE ... DROP PARTITION ..."), + Seq( + UnresolvedPartitionSpec(Map("dt" -> "2008-08-08", "country" -> "us")), + UnresolvedPartitionSpec(Map("dt" -> "2009-09-09", "country" -> "uk"))), + ifExists = true, + purge = false) + comparePlans(parsePlan(sql), expected) + } + + test("drop partition in a table with multi-part identifier") { + val sql = "ALTER TABLE a.b.c DROP IF EXISTS PARTITION (ds='2017-06-10')" + val expected = AlterTableDropPartition( + UnresolvedTable(Seq("a", "b", "c"), "ALTER TABLE ... DROP PARTITION ..."), + Seq(UnresolvedPartitionSpec(Map("ds" -> "2017-06-10"))), + ifExists = true, + purge = false) + + comparePlans(parsePlan(sql), expected) + } + + test("drop partition with PURGE") { + val sql = "ALTER TABLE table_name DROP PARTITION (p=1) PURGE" + val expected = AlterTableDropPartition( + UnresolvedTable(Seq("table_name"), "ALTER TABLE ... DROP PARTITION ..."), + Seq(UnresolvedPartitionSpec(Map("p" -> "1"))), + ifExists = false, + purge = true) + + comparePlans(parsePlan(sql), expected) + } + + test("drop partition from view") { + val sql = "ALTER VIEW table_name DROP PARTITION (p=1)" + val errMsg = intercept[ParseException] { + parsePlan(sql) + }.getMessage + assert(errMsg.contains("Operation not allowed")) + } +} diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/AlterTableDropPartitionSuiteBase.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/AlterTableDropPartitionSuiteBase.scala new file mode 100644 index 0000000000000..942a3e8635698 --- /dev/null +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/AlterTableDropPartitionSuiteBase.scala @@ -0,0 +1,184 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.execution.command + +import org.apache.spark.sql.{AnalysisException, QueryTest, Row} +import org.apache.spark.sql.catalyst.analysis.NoSuchPartitionsException +import org.apache.spark.sql.internal.SQLConf + +/** + * This base suite contains unified tests for the `ALTER TABLE .. DROP PARTITION` command that + * check V1 and V2 table catalogs. The tests that cannot run for all supported catalogs are + * located in more specific test suites: + * + * - V2 table catalog tests: + * `org.apache.spark.sql.execution.command.v2.AlterTableDropPartitionSuite` + * - V1 table catalog tests: + * `org.apache.spark.sql.execution.command.v1.AlterTableDropPartitionSuiteBase` + * - V1 In-Memory catalog: + * `org.apache.spark.sql.execution.command.v1.AlterTableDropPartitionSuite` + * - V1 Hive External catalog: + * `org.apache.spark.sql.hive.execution.command.AlterTableDropPartitionSuite` + */ +trait AlterTableDropPartitionSuiteBase extends QueryTest with DDLCommandTestUtils { + override val command = "ALTER TABLE .. DROP PARTITION" + + protected def notFullPartitionSpecErr: String + protected def nullPartitionValue: String + + protected def checkDropPartition( + t: String, + ifExists: String, + specs: Map[String, Any]*): Unit = { + checkPartitions(t, specs.map(_.mapValues(_.toString).toMap): _*) + val specStr = specs.map( + _.map { + case (k, v: String) => s"$k = '$v'" + case (k, v) => s"$k = $v" + }.mkString("PARTITION (", ", ", ")")) + .mkString(", ") + sql(s"ALTER TABLE $t DROP $ifExists $specStr") + checkPartitions(t) + } + + test("single partition") { + withNamespaceAndTable("ns", "tbl") { t => + sql(s"CREATE TABLE $t (id bigint, data string) $defaultUsing PARTITIONED BY (id)") + Seq("", "IF EXISTS").foreach { ifExists => + sql(s"ALTER TABLE $t ADD PARTITION (id=1) LOCATION 'loc'") + checkDropPartition(t, ifExists, Map("id" -> 1)) + } + } + } + + test("multiple partitions") { + withNamespaceAndTable("ns", "tbl") { t => + sql(s"CREATE TABLE $t (id bigint, data string) $defaultUsing PARTITIONED BY (id)") + Seq("", "IF EXISTS").foreach { ifExists => + sql(s""" + |ALTER TABLE $t ADD + |PARTITION (id=1) LOCATION 'loc' + |PARTITION (id=2) LOCATION 'loc1'""".stripMargin) + checkDropPartition(t, ifExists, Map("id" -> 1), Map("id" -> 2)) + } + } + } + + test("multi-part partition") { + withNamespaceAndTable("ns", "tbl") { t => + sql(s"CREATE TABLE $t (id bigint, a int, b string) $defaultUsing PARTITIONED BY (a, b)") + Seq("", "IF EXISTS").foreach { ifExists => + sql(s"ALTER TABLE $t ADD PARTITION (a = 2, b = 'abc')") + checkDropPartition(t, ifExists, Map("a" -> 2, "b" -> "abc")) + } + } + } + + test("table to alter does not exist") { + withNamespaceAndTable("ns", "does_not_exist") { t => + val errMsg = intercept[AnalysisException] { + sql(s"ALTER TABLE $t DROP PARTITION (a='4', b='9')") + }.getMessage + assert(errMsg.contains("Table not found")) + } + } + + test("case sensitivity in resolving partition specs") { + withNamespaceAndTable("ns", "tbl") { t => + sql(s"CREATE TABLE $t (id bigint, data string) $defaultUsing PARTITIONED BY (id)") + withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") { + val errMsg = intercept[AnalysisException] { + sql(s"ALTER TABLE $t DROP PARTITION (ID=1)") + }.getMessage + assert(errMsg.contains("ID is not a valid partition column")) + } + + withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") { + Seq("", "IF EXISTS").foreach { ifExists => + sql(s"ALTER TABLE $t ADD PARTITION (ID=1) LOCATION 'loc1'") + checkDropPartition(t, ifExists, Map("id" -> 1)) + } + } + } + } + + test("SPARK-33676: not fully specified partition spec") { + withNamespaceAndTable("ns", "tbl") { t => + sql(s""" + |CREATE TABLE $t (id bigint, part0 int, part1 string) + |$defaultUsing + |PARTITIONED BY (part0, part1)""".stripMargin) + val errMsg = intercept[AnalysisException] { + sql(s"ALTER TABLE $t DROP PARTITION (part0 = 1)") + }.getMessage + assert(errMsg.contains(notFullPartitionSpecErr)) + } + } + + test("partition not exists") { + withNamespaceAndTable("ns", "tbl") { t => + sql(s"CREATE TABLE $t (id bigint, data string) $defaultUsing PARTITIONED BY (id)") + sql(s"ALTER TABLE $t ADD PARTITION (id=1) LOCATION 'loc'") + + val errMsg = intercept[NoSuchPartitionsException] { + sql(s"ALTER TABLE $t DROP PARTITION (id=1), PARTITION (id=2)") + }.getMessage + assert(errMsg.contains("partitions not found in table")) + + checkPartitions(t, Map("id" -> "1")) + sql(s"ALTER TABLE $t DROP IF EXISTS PARTITION (id=1), PARTITION (id=2)") + checkPartitions(t) + } + } + + test("SPARK-33990: don not return data from dropped partition") { + withNamespaceAndTable("ns", "tbl") { t => + sql(s"CREATE TABLE $t (id int, part int) $defaultUsing PARTITIONED BY (part)") + sql(s"INSERT INTO $t PARTITION (part=0) SELECT 0") + sql(s"INSERT INTO $t PARTITION (part=1) SELECT 1") + QueryTest.checkAnswer(sql(s"SELECT * FROM $t"), Seq(Row(0, 0), Row(1, 1))) + sql(s"ALTER TABLE $t DROP PARTITION (part=0)") + QueryTest.checkAnswer(sql(s"SELECT * FROM $t"), Seq(Row(1, 1))) + } + } + + test("SPARK-33950, SPARK-33987: refresh cache after partition dropping") { + withNamespaceAndTable("ns", "tbl") { t => + sql(s"CREATE TABLE $t (id int, part int) $defaultUsing PARTITIONED BY (part)") + sql(s"INSERT INTO $t PARTITION (part=0) SELECT 0") + sql(s"INSERT INTO $t PARTITION (part=1) SELECT 1") + assert(!spark.catalog.isCached(t)) + sql(s"CACHE TABLE $t") + assert(spark.catalog.isCached(t)) + QueryTest.checkAnswer(sql(s"SELECT * FROM $t"), Seq(Row(0, 0), Row(1, 1))) + sql(s"ALTER TABLE $t DROP PARTITION (part=0)") + assert(spark.catalog.isCached(t)) + QueryTest.checkAnswer(sql(s"SELECT * FROM $t"), Seq(Row(1, 1))) + } + } + + test("SPARK-33591: null as a partition value") { + withNamespaceAndTable("ns", "tbl") { t => + sql(s"CREATE TABLE $t (col1 INT, p1 STRING) $defaultUsing PARTITIONED BY (p1)") + sql(s"ALTER TABLE $t ADD PARTITION (p1 = null)") + checkPartitions(t, Map("p1" -> nullPartitionValue)) + sql(s"ALTER TABLE $t DROP PARTITION (p1 = null)") + checkPartitions(t) + } + } +} diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/AlterTableRenamePartitionParserSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/AlterTableRenamePartitionParserSuite.scala new file mode 100644 index 0000000000000..c9a6732796729 --- /dev/null +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/AlterTableRenamePartitionParserSuite.scala @@ -0,0 +1,51 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.execution.command + +import org.apache.spark.sql.catalyst.analysis.{AnalysisTest, UnresolvedPartitionSpec, UnresolvedTable} +import org.apache.spark.sql.catalyst.parser.CatalystSqlParser.parsePlan +import org.apache.spark.sql.catalyst.plans.logical.AlterTableRenamePartition +import org.apache.spark.sql.test.SharedSparkSession + +class AlterTableRenamePartitionParserSuite extends AnalysisTest with SharedSparkSession { + test("rename a partition with single part") { + val sql = """ + |ALTER TABLE a.b.c PARTITION (ds='2017-06-10') + |RENAME TO PARTITION (ds='2018-06-10') + """.stripMargin + val parsed = parsePlan(sql) + val expected = AlterTableRenamePartition( + UnresolvedTable(Seq("a", "b", "c"), "ALTER TABLE ... RENAME TO PARTITION"), + UnresolvedPartitionSpec(Map("ds" -> "2017-06-10")), + UnresolvedPartitionSpec(Map("ds" -> "2018-06-10"))) + comparePlans(parsed, expected) + } + + test("rename a partition with multi parts") { + val sql = """ + |ALTER TABLE table_name PARTITION (dt='2008-08-08', country='us') + |RENAME TO PARTITION (dt='2008-09-09', country='uk') + """.stripMargin + val parsed = parsePlan(sql) + val expected = AlterTableRenamePartition( + UnresolvedTable(Seq("table_name"), "ALTER TABLE ... RENAME TO PARTITION"), + UnresolvedPartitionSpec(Map("dt" -> "2008-08-08", "country" -> "us")), + UnresolvedPartitionSpec(Map("dt" -> "2008-09-09", "country" -> "uk"))) + comparePlans(parsed, expected) + } +} diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/AlterTableRenamePartitionSuiteBase.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/AlterTableRenamePartitionSuiteBase.scala new file mode 100644 index 0000000000000..7f66e282499d4 --- /dev/null +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/AlterTableRenamePartitionSuiteBase.scala @@ -0,0 +1,181 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.execution.command + +import org.apache.spark.sql.{AnalysisException, QueryTest, Row} +import org.apache.spark.sql.catalyst.analysis.{NoSuchPartitionException, PartitionAlreadyExistsException} +import org.apache.spark.sql.internal.SQLConf + +/** + * This base suite contains unified tests for the `ALTER TABLE .. RENAME PARTITION` command that + * check V1 and V2 table catalogs. The tests that cannot run for all supported catalogs are + * located in more specific test suites: + * + * - V2 table catalog tests: + * `org.apache.spark.sql.execution.command.v2.AlterTableRenamePartitionSuite` + * - V1 table catalog tests: + * `org.apache.spark.sql.execution.command.v1.AlterTableRenamePartitionSuiteBase` + * - V1 In-Memory catalog: + * `org.apache.spark.sql.execution.command.v1.AlterTableRenamePartitionSuite` + * - V1 Hive External catalog: + * `org.apache.spark.sql.hive.execution.command.AlterTableRenamePartitionSuite` + */ +trait AlterTableRenamePartitionSuiteBase extends QueryTest with DDLCommandTestUtils { + override val command = "ALTER TABLE .. RENAME PARTITION" + + protected def createSinglePartTable(t: String): Unit = { + sql(s"CREATE TABLE $t (id bigint, data string) $defaultUsing PARTITIONED BY (id)") + sql(s"INSERT INTO $t PARTITION (id = 1) SELECT 'abc'") + } + + test("rename without explicitly specifying database") { + withSQLConf(SQLConf.DEFAULT_CATALOG.key -> catalog) { + createSinglePartTable("t") + checkPartitions("t", Map("id" -> "1")) + + sql(s"ALTER TABLE t PARTITION (id = 1) RENAME TO PARTITION (id = 2)") + checkPartitions("t", Map("id" -> "2")) + checkAnswer(sql(s"SELECT id, data FROM t"), Row(2, "abc")) + } + } + + test("table to alter does not exist") { + withNamespace(s"$catalog.ns") { + sql(s"CREATE NAMESPACE $catalog.ns") + val errMsg = intercept[AnalysisException] { + sql(s"ALTER TABLE $catalog.ns.no_tbl PARTITION (id=1) RENAME TO PARTITION (id=2)") + }.getMessage + assert(errMsg.contains("Table not found")) + } + } + + test("partition to rename does not exist") { + withNamespaceAndTable("ns", "tbl") { t => + createSinglePartTable(t) + checkPartitions(t, Map("id" -> "1")) + val errMsg = intercept[NoSuchPartitionException] { + sql(s"ALTER TABLE $t PARTITION (id = 3) RENAME TO PARTITION (id = 2)") + }.getMessage + assert(errMsg.contains("Partition not found in table")) + } + } + + test("target partition exists") { + withNamespaceAndTable("ns", "tbl") { t => + createSinglePartTable(t) + sql(s"INSERT INTO $t PARTITION (id = 2) SELECT 'def'") + checkPartitions(t, Map("id" -> "1"), Map("id" -> "2")) + val errMsg = intercept[PartitionAlreadyExistsException] { + sql(s"ALTER TABLE $t PARTITION (id = 1) RENAME TO PARTITION (id = 2)") + }.getMessage + assert(errMsg.contains("Partition already exists")) + } + } + + test("single part partition") { + withNamespaceAndTable("ns", "tbl") { t => + createSinglePartTable(t) + checkPartitions(t, Map("id" -> "1")) + + sql(s"ALTER TABLE $t PARTITION (id = 1) RENAME TO PARTITION (id = 2)") + checkPartitions(t, Map("id" -> "2")) + checkAnswer(sql(s"SELECT id, data FROM $t"), Row(2, "abc")) + } + } + + test("multi part partition") { + withNamespaceAndTable("ns", "tbl") { t => + createWideTable(t) + checkPartitions(t, + Map( + "year" -> "2016", + "month" -> "3", + "hour" -> "10", + "minute" -> "10", + "sec" -> "10", + "extra" -> "1"), + Map( + "year" -> "2016", + "month" -> "4", + "hour" -> "10", + "minute" -> "10", + "sec" -> "10", + "extra" -> "1")) + + sql(s""" + |ALTER TABLE $t + |PARTITION ( + | year = 2016, month = 3, hour = 10, minute = 10, sec = 10, extra = 1 + |) RENAME TO PARTITION ( + | year = 2016, month = 3, hour = 10, minute = 10, sec = 123, extra = 1 + |)""".stripMargin) + checkPartitions(t, + Map( + "year" -> "2016", + "month" -> "3", + "hour" -> "10", + "minute" -> "10", + "sec" -> "123", + "extra" -> "1"), + Map( + "year" -> "2016", + "month" -> "4", + "hour" -> "10", + "minute" -> "10", + "sec" -> "10", + "extra" -> "1")) + checkAnswer(sql(s"SELECT month, sec, price FROM $t"), Row(3, 123, 3)) + } + } + + test("partition spec in RENAME PARTITION should be case insensitive") { + withNamespaceAndTable("ns", "tbl") { t => + createSinglePartTable(t) + checkPartitions(t, Map("id" -> "1")) + + withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") { + val errMsg = intercept[AnalysisException] { + sql(s"ALTER TABLE $t PARTITION (ID = 1) RENAME TO PARTITION (id = 2)") + }.getMessage + assert(errMsg.contains("ID is not a valid partition column")) + checkPartitions(t, Map("id" -> "1")) + } + + withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") { + sql(s"ALTER TABLE $t PARTITION (ID = 1) RENAME TO PARTITION (id = 2)") + checkPartitions(t, Map("id" -> "2")) + checkAnswer(sql(s"SELECT id, data FROM $t"), Row(2, "abc")) + } + } + } + + test("SPARK-34011: refresh cache after partition renaming") { + withNamespaceAndTable("ns", "tbl") { t => + sql(s"CREATE TABLE $t (id int, part int) $defaultUsing PARTITIONED BY (part)") + sql(s"INSERT INTO $t PARTITION (part=0) SELECT 0") + sql(s"INSERT INTO $t PARTITION (part=1) SELECT 1") + assert(!spark.catalog.isCached(t)) + sql(s"CACHE TABLE $t") + assert(spark.catalog.isCached(t)) + QueryTest.checkAnswer(sql(s"SELECT * FROM $t"), Seq(Row(0, 0), Row(1, 1))) + sql(s"ALTER TABLE $t PARTITION (part=0) RENAME TO PARTITION (part=2)") + assert(spark.catalog.isCached(t)) + QueryTest.checkAnswer(sql(s"SELECT * FROM $t"), Seq(Row(0, 2), Row(1, 1))) + } + } +} diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/CharVarcharDDLTestBase.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/CharVarcharDDLTestBase.scala new file mode 100644 index 0000000000000..1f47744ce4abd --- /dev/null +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/CharVarcharDDLTestBase.scala @@ -0,0 +1,199 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.execution.command + +import org.apache.spark.SparkConf +import org.apache.spark.sql.{AnalysisException, QueryTest, Row} +import org.apache.spark.sql.catalyst.util.CharVarcharUtils +import org.apache.spark.sql.connector.InMemoryPartitionTableCatalog +import org.apache.spark.sql.internal.SQLConf +import org.apache.spark.sql.test.{SharedSparkSession, SQLTestUtils} +import org.apache.spark.sql.types._ + +trait CharVarcharDDLTestBase extends QueryTest with SQLTestUtils { + + def format: String + + def checkColType(f: StructField, dt: DataType): Unit = { + assert(f.dataType == CharVarcharUtils.replaceCharVarcharWithString(dt)) + assert(CharVarcharUtils.getRawType(f.metadata).contains(dt)) + } + + test("allow to change column for char(x) to char(y), x == y") { + withTable("t") { + sql(s"CREATE TABLE t(i STRING, c CHAR(4)) USING $format") + sql("ALTER TABLE t CHANGE COLUMN c TYPE CHAR(4)") + checkColType(spark.table("t").schema(1), CharType(4)) + } + } + + test("not allow to change column for char(x) to char(y), x != y") { + withTable("t") { + sql(s"CREATE TABLE t(i STRING, c CHAR(4)) USING $format") + val e = intercept[AnalysisException] { + sql("ALTER TABLE t CHANGE COLUMN c TYPE CHAR(5)") + } + val v1 = e.getMessage contains "'CharType(4)' to 'c' with type 'CharType(5)'" + val v2 = e.getMessage contains "char(4) cannot be cast to char(5)" + assert(v1 || v2) + } + } + + test("not allow to change column from string to char type") { + withTable("t") { + sql(s"CREATE TABLE t(i STRING, c STRING) USING $format") + val e = intercept[AnalysisException] { + sql("ALTER TABLE t CHANGE COLUMN c TYPE CHAR(5)") + } + val v1 = e.getMessage contains "'StringType' to 'c' with type 'CharType(5)'" + val v2 = e.getMessage contains "string cannot be cast to char(5)" + assert(v1 || v2) + } + } + + test("not allow to change column from int to char type") { + withTable("t") { + sql(s"CREATE TABLE t(i int, c CHAR(4)) USING $format") + val e = intercept[AnalysisException] { + sql("ALTER TABLE t CHANGE COLUMN i TYPE CHAR(5)") + } + val v1 = e.getMessage contains "'IntegerType' to 'i' with type 'CharType(5)'" + val v2 = e.getMessage contains "int cannot be cast to char(5)" + assert(v1 || v2) + } + } + + test("allow to change column for varchar(x) to varchar(y), x == y") { + withTable("t") { + sql(s"CREATE TABLE t(i STRING, c VARCHAR(4)) USING $format") + sql("ALTER TABLE t CHANGE COLUMN c TYPE VARCHAR(4)") + checkColType(spark.table("t").schema(1), VarcharType(4)) + } + } + + test("not allow to change column for varchar(x) to varchar(y), x > y") { + withTable("t") { + sql(s"CREATE TABLE t(i STRING, c VARCHAR(4)) USING $format") + val e = intercept[AnalysisException] { + sql("ALTER TABLE t CHANGE COLUMN c TYPE VARCHAR(3)") + } + val v1 = e.getMessage contains "'VarcharType(4)' to 'c' with type 'VarcharType(3)'" + val v2 = e.getMessage contains "varchar(4) cannot be cast to varchar(3)" + assert(v1 || v2) + } + } + + def checkTableSchemaTypeStr(expected: Seq[Row]): Unit = { + checkAnswer(sql("desc t").selectExpr("data_type").where("data_type like '%char%'"), expected) + } + + test("SPARK-33901: alter table add columns should not change original table's schema") { + withTable("t") { + sql(s"CREATE TABLE t(i CHAR(5), c VARCHAR(4)) USING $format") + sql("ALTER TABLE t ADD COLUMNS (d VARCHAR(5))") + checkTableSchemaTypeStr(Seq(Row("char(5)"), Row("varchar(4)"), Row("varchar(5)"))) + } + } + + test("SPARK-33901: ctas should should not change table's schema") { + withTable("t", "tt") { + sql(s"CREATE TABLE tt(i CHAR(5), c VARCHAR(4)) USING $format") + sql(s"CREATE TABLE t USING $format AS SELECT * FROM tt") + checkTableSchemaTypeStr(Seq(Row("char(5)"), Row("varchar(4)"))) + } + } +} + +class FileSourceCharVarcharDDLTestSuite extends CharVarcharDDLTestBase with SharedSparkSession { + override def format: String = "parquet" + override protected def sparkConf: SparkConf = { + super.sparkConf.set(SQLConf.USE_V1_SOURCE_LIST, "parquet") + } + + // TODO(SPARK-33902): MOVE TO SUPER CLASS AFTER THE TARGET TICKET RESOLVED + test("SPARK-33901: create table like should should not change table's schema") { + withTable("t", "tt") { + sql(s"CREATE TABLE tt(i CHAR(5), c VARCHAR(4)) USING $format") + sql("CREATE TABLE t LIKE tt") + checkTableSchemaTypeStr(Seq(Row("char(5)"), Row("varchar(4)"))) + } + } + + // TODO(SPARK-33903): MOVE TO SUPER CLASS AFTER THE TARGET TICKET RESOLVED + test("SPARK-33901: cvas should should not change view's schema") { + withTable( "tt") { + sql(s"CREATE TABLE tt(i CHAR(5), c VARCHAR(4)) USING $format") + withView("t") { + sql("CREATE VIEW t AS SELECT * FROM tt") + checkTableSchemaTypeStr(Seq(Row("char(5)"), Row("varchar(4)"))) + } + } + } +} + +class DSV2CharVarcharDDLTestSuite extends CharVarcharDDLTestBase + with SharedSparkSession { + override def format: String = "foo" + protected override def sparkConf = { + super.sparkConf + .set("spark.sql.catalog.testcat", classOf[InMemoryPartitionTableCatalog].getName) + .set(SQLConf.DEFAULT_CATALOG.key, "testcat") + } + + test("allow to change change column from char to string type") { + withTable("t") { + sql(s"CREATE TABLE t(i STRING, c CHAR(4)) USING $format") + sql("ALTER TABLE t CHANGE COLUMN c TYPE STRING") + assert(spark.table("t").schema(1).dataType === StringType) + } + } + + test("allow to change column from char(x) to varchar(y) type x <= y") { + withTable("t") { + sql(s"CREATE TABLE t(i STRING, c CHAR(4)) USING $format") + sql("ALTER TABLE t CHANGE COLUMN c TYPE VARCHAR(4)") + checkColType(spark.table("t").schema(1), VarcharType(4)) + } + withTable("t") { + sql(s"CREATE TABLE t(i STRING, c CHAR(4)) USING $format") + sql("ALTER TABLE t CHANGE COLUMN c TYPE VARCHAR(5)") + checkColType(spark.table("t").schema(1), VarcharType(5)) + } + } + + test("allow to change column from varchar(x) to varchar(y) type x <= y") { + withTable("t") { + sql(s"CREATE TABLE t(i STRING, c VARCHAR(4)) USING $format") + sql("ALTER TABLE t CHANGE COLUMN c TYPE VARCHAR(4)") + checkColType(spark.table("t").schema(1), VarcharType(4)) + sql("ALTER TABLE t CHANGE COLUMN c TYPE VARCHAR(5)") + checkColType(spark.table("t").schema(1), VarcharType(5)) + + } + } + + test("not allow to change column from char(x) to varchar(y) type x > y") { + withTable("t") { + sql(s"CREATE TABLE t(i STRING, c CHAR(4)) USING $format") + val e = intercept[AnalysisException] { + sql("ALTER TABLE t CHANGE COLUMN c TYPE VARCHAR(3)") + } + assert(e.getMessage contains "char(4) cannot be cast to varchar(3)") + } + } +} diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLCommandTestUtils.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLCommandTestUtils.scala new file mode 100644 index 0000000000000..f4b84d8ee0059 --- /dev/null +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLCommandTestUtils.scala @@ -0,0 +1,94 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.execution.command + +import org.scalactic.source.Position +import org.scalatest.Tag + +import org.apache.spark.sql.Row +import org.apache.spark.sql.catalyst.catalog.CatalogTypes.TablePartitionSpec +import org.apache.spark.sql.execution.datasources.PartitioningUtils +import org.apache.spark.sql.test.SQLTestUtils + +/** + * The common settings and utility functions for all v1 and v2 test suites. When a function + * is not applicable to all supported catalogs, it should be placed to a specific trait: + * + * - V1 In-Memory catalog: `org.apache.spark.sql.execution.command.v1.CommandSuiteBase` + * - V1 Hive External catalog: `org.apache.spark.sql.hive.execution.command.CommandSuiteBase` + * - V2 In-Memory catalog: `org.apache.spark.sql.execution.command.v2.CommandSuiteBase` + */ +trait DDLCommandTestUtils extends SQLTestUtils { + // The version of the catalog under testing such as "V1", "V2", "Hive V1". + protected def version: String + // Name of the command as SQL statement, for instance "SHOW PARTITIONS" + protected def command: String + // The catalog name which can be used in SQL statements under testing + protected def catalog: String + // The clause is used in creating tables for testing + protected def defaultUsing: String + + // Overrides the `test` method, and adds a prefix to easily find identify the catalog to which + // the failed test in logs belongs to. + override def test(testName: String, testTags: Tag*)(testFun: => Any) + (implicit pos: Position): Unit = { + super.test(s"$command $version: " + testName, testTags: _*)(testFun) + } + + protected def withNamespaceAndTable(ns: String, tableName: String, cat: String = catalog) + (f: String => Unit): Unit = { + val nsCat = s"$cat.$ns" + withNamespace(nsCat) { + sql(s"CREATE NAMESPACE $nsCat") + val t = s"$nsCat.$tableName" + withTable(t) { + f(t) + } + } + } + + // Checks that the table `t` contains only the `expected` partitions. + protected def checkPartitions(t: String, expected: Map[String, String]*): Unit = { + val partitions = sql(s"SHOW PARTITIONS $t") + .collect() + .toSet + .map((row: Row) => row.getString(0)) + .map(PartitioningUtils.parsePathFragment) + assert(partitions === expected.toSet) + } + + protected def createWideTable(table: String): Unit = { + sql(s""" + |CREATE TABLE $table ( + | price int, qty int, + | year int, month int, hour int, minute int, sec int, extra int) + |$defaultUsing + |PARTITIONED BY (year, month, hour, minute, sec, extra) + |""".stripMargin) + sql(s""" + |INSERT INTO $table + |PARTITION(year = 2016, month = 3, hour = 10, minute = 10, sec = 10, extra = 1) SELECT 3, 3 + |""".stripMargin) + sql(s""" + |ALTER TABLE $table + |ADD PARTITION(year = 2016, month = 4, hour = 10, minute = 10, sec = 10, extra = 1) + |""".stripMargin) + } + + protected def checkLocation(t: String, spec: TablePartitionSpec, expected: String): Unit +} diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLParserSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLParserSuite.scala index c6a533dfae4d0..96f9421e1d988 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLParserSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLParserSuite.scala @@ -17,30 +17,21 @@ package org.apache.spark.sql.execution.command -import java.net.URI import java.util.Locale -import scala.reflect.{classTag, ClassTag} - -import org.apache.spark.sql.{AnalysisException, SaveMode} -import org.apache.spark.sql.catalyst.TableIdentifier +import org.apache.spark.sql.AnalysisException import org.apache.spark.sql.catalyst.analysis.{AnalysisTest, UnresolvedAttribute} -import org.apache.spark.sql.catalyst.catalog._ import org.apache.spark.sql.catalyst.dsl.expressions._ import org.apache.spark.sql.catalyst.dsl.plans import org.apache.spark.sql.catalyst.dsl.plans.DslLogicalPlan import org.apache.spark.sql.catalyst.expressions.JsonTuple import org.apache.spark.sql.catalyst.parser.ParseException import org.apache.spark.sql.catalyst.plans.logical._ -import org.apache.spark.sql.connector.expressions.{FieldReference, IdentityTransform} import org.apache.spark.sql.execution.SparkSqlParser -import org.apache.spark.sql.execution.datasources.CreateTable -import org.apache.spark.sql.internal.{HiveSerDe, SQLConf} import org.apache.spark.sql.test.SharedSparkSession -import org.apache.spark.sql.types.{IntegerType, StructField, StructType} class DDLParserSuite extends AnalysisTest with SharedSparkSession { - private lazy val parser = new SparkSqlParser(new SQLConf) + private lazy val parser = new SparkSqlParser() private def assertUnsupported(sql: String, containsThesePhrases: Seq[String] = Seq()): Unit = { val e = intercept[ParseException] { @@ -52,159 +43,17 @@ class DDLParserSuite extends AnalysisTest with SharedSparkSession { } } - private def intercept(sqlCommand: String, messages: String*): Unit = - interceptParseException(parser.parsePlan)(sqlCommand, messages: _*) - - private def parseAs[T: ClassTag](query: String): T = { - parser.parsePlan(query) match { - case t: T => t - case other => - fail(s"Expected to parse ${classTag[T].runtimeClass} from query," + - s"got ${other.getClass.getName}: $query") - } - } - private def compareTransformQuery(sql: String, expected: LogicalPlan): Unit = { val plan = parser.parsePlan(sql).asInstanceOf[ScriptTransformation].copy(ioschema = null) comparePlans(plan, expected, checkAnalysis = false) } - private def extractTableDesc(sql: String): (CatalogTable, Boolean) = { - parser.parsePlan(sql).collect { - case CreateTable(tableDesc, mode, _) => (tableDesc, mode == SaveMode.Ignore) - }.head - } - test("alter database - property values must be set") { assertUnsupported( sql = "ALTER DATABASE my_db SET DBPROPERTIES('key_without_value', 'key_with_value'='x')", containsThesePhrases = Seq("key_without_value")) } - test("create hive table - table file format") { - val allSources = Seq("parquet", "parquetfile", "orc", "orcfile", "avro", "avrofile", - "sequencefile", "rcfile", "textfile") - - allSources.foreach { s => - val query = s"CREATE TABLE my_tab STORED AS $s" - val ct = parseAs[CreateTable](query) - val hiveSerde = HiveSerDe.sourceToSerDe(s) - assert(hiveSerde.isDefined) - assert(ct.tableDesc.storage.serde == - hiveSerde.get.serde.orElse(Some("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"))) - assert(ct.tableDesc.storage.inputFormat == hiveSerde.get.inputFormat) - assert(ct.tableDesc.storage.outputFormat == hiveSerde.get.outputFormat) - } - } - - test("create hive table - row format and table file format") { - val createTableStart = "CREATE TABLE my_tab ROW FORMAT" - val fileFormat = s"STORED AS INPUTFORMAT 'inputfmt' OUTPUTFORMAT 'outputfmt'" - val query1 = s"$createTableStart SERDE 'anything' $fileFormat" - val query2 = s"$createTableStart DELIMITED FIELDS TERMINATED BY ' ' $fileFormat" - - // No conflicting serdes here, OK - val parsed1 = parseAs[CreateTable](query1) - assert(parsed1.tableDesc.storage.serde == Some("anything")) - assert(parsed1.tableDesc.storage.inputFormat == Some("inputfmt")) - assert(parsed1.tableDesc.storage.outputFormat == Some("outputfmt")) - - val parsed2 = parseAs[CreateTable](query2) - assert(parsed2.tableDesc.storage.serde == - Some("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe")) - assert(parsed2.tableDesc.storage.inputFormat == Some("inputfmt")) - assert(parsed2.tableDesc.storage.outputFormat == Some("outputfmt")) - } - - test("create hive table - row format serde and generic file format") { - val allSources = Seq("parquet", "orc", "avro", "sequencefile", "rcfile", "textfile") - val supportedSources = Set("sequencefile", "rcfile", "textfile") - - allSources.foreach { s => - val query = s"CREATE TABLE my_tab ROW FORMAT SERDE 'anything' STORED AS $s" - if (supportedSources.contains(s)) { - val ct = parseAs[CreateTable](query) - val hiveSerde = HiveSerDe.sourceToSerDe(s) - assert(hiveSerde.isDefined) - assert(ct.tableDesc.storage.serde == Some("anything")) - assert(ct.tableDesc.storage.inputFormat == hiveSerde.get.inputFormat) - assert(ct.tableDesc.storage.outputFormat == hiveSerde.get.outputFormat) - } else { - assertUnsupported(query, Seq("row format serde", "incompatible", s)) - } - } - } - - test("create hive table - row format delimited and generic file format") { - val allSources = Seq("parquet", "orc", "avro", "sequencefile", "rcfile", "textfile") - val supportedSources = Set("textfile") - - allSources.foreach { s => - val query = s"CREATE TABLE my_tab ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS $s" - if (supportedSources.contains(s)) { - val ct = parseAs[CreateTable](query) - val hiveSerde = HiveSerDe.sourceToSerDe(s) - assert(hiveSerde.isDefined) - assert(ct.tableDesc.storage.serde == - hiveSerde.get.serde.orElse(Some("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"))) - assert(ct.tableDesc.storage.inputFormat == hiveSerde.get.inputFormat) - assert(ct.tableDesc.storage.outputFormat == hiveSerde.get.outputFormat) - } else { - assertUnsupported(query, Seq("row format delimited", "only compatible with 'textfile'", s)) - } - } - } - - test("create hive external table - location must be specified") { - assertUnsupported( - sql = "CREATE EXTERNAL TABLE my_tab STORED AS parquet", - containsThesePhrases = Seq("create external table", "location")) - val query = "CREATE EXTERNAL TABLE my_tab STORED AS parquet LOCATION '/something/anything'" - val ct = parseAs[CreateTable](query) - assert(ct.tableDesc.tableType == CatalogTableType.EXTERNAL) - assert(ct.tableDesc.storage.locationUri == Some(new URI("/something/anything"))) - } - - test("create hive table - property values must be set") { - assertUnsupported( - sql = "CREATE TABLE my_tab STORED AS parquet " + - "TBLPROPERTIES('key_without_value', 'key_with_value'='x')", - containsThesePhrases = Seq("key_without_value")) - assertUnsupported( - sql = "CREATE TABLE my_tab ROW FORMAT SERDE 'serde' " + - "WITH SERDEPROPERTIES('key_without_value', 'key_with_value'='x')", - containsThesePhrases = Seq("key_without_value")) - } - - test("create hive table - location implies external") { - val query = "CREATE TABLE my_tab STORED AS parquet LOCATION '/something/anything'" - val ct = parseAs[CreateTable](query) - assert(ct.tableDesc.tableType == CatalogTableType.EXTERNAL) - assert(ct.tableDesc.storage.locationUri == Some(new URI("/something/anything"))) - } - - test("Duplicate clauses - create hive table") { - def createTableHeader(duplicateClause: String): String = { - s"CREATE TABLE my_tab(a INT, b STRING) STORED AS parquet $duplicateClause $duplicateClause" - } - - intercept(createTableHeader("TBLPROPERTIES('test' = 'test2')"), - "Found duplicate clauses: TBLPROPERTIES") - intercept(createTableHeader("LOCATION '/tmp/file'"), - "Found duplicate clauses: LOCATION") - intercept(createTableHeader("COMMENT 'a table'"), - "Found duplicate clauses: COMMENT") - intercept(createTableHeader("CLUSTERED BY(b) INTO 256 BUCKETS"), - "Found duplicate clauses: CLUSTERED BY") - intercept(createTableHeader("PARTITIONED BY (k int)"), - "Found duplicate clauses: PARTITIONED BY") - intercept(createTableHeader("STORED AS parquet"), - "Found duplicate clauses: STORED AS/BY") - intercept( - createTableHeader("ROW FORMAT SERDE 'parquet.hive.serde.ParquetHiveSerDe'"), - "Found duplicate clauses: ROW FORMAT") - } - test("insert overwrite directory") { val v1 = "INSERT OVERWRITE DIRECTORY '/tmp/file' USING parquet SELECT 1 as a" parser.parsePlan(v1) match { @@ -361,188 +210,6 @@ class DDLParserSuite extends AnalysisTest with SharedSparkSession { assert(e.contains("Found duplicate keys 'a'")) } - test("empty values in non-optional partition specs") { - val e = intercept[ParseException] { - parser.parsePlan( - "SHOW PARTITIONS dbx.tab1 PARTITION (a='1', b)") - }.getMessage - assert(e.contains("Found an empty partition key 'b'")) - } - - test("Test CTAS #1") { - val s1 = - """ - |CREATE EXTERNAL TABLE IF NOT EXISTS mydb.page_view - |COMMENT 'This is the staging page view table' - |STORED AS RCFILE - |LOCATION '/user/external/page_view' - |TBLPROPERTIES ('p1'='v1', 'p2'='v2') - |AS SELECT * FROM src - """.stripMargin - - val s2 = - """ - |CREATE EXTERNAL TABLE IF NOT EXISTS mydb.page_view - |STORED AS RCFILE - |COMMENT 'This is the staging page view table' - |TBLPROPERTIES ('p1'='v1', 'p2'='v2') - |LOCATION '/user/external/page_view' - |AS SELECT * FROM src - """.stripMargin - - val s3 = - """ - |CREATE EXTERNAL TABLE IF NOT EXISTS mydb.page_view - |TBLPROPERTIES ('p1'='v1', 'p2'='v2') - |LOCATION '/user/external/page_view' - |STORED AS RCFILE - |COMMENT 'This is the staging page view table' - |AS SELECT * FROM src - """.stripMargin - - checkParsing(s1) - checkParsing(s2) - checkParsing(s3) - - def checkParsing(sql: String): Unit = { - val (desc, exists) = extractTableDesc(sql) - assert(exists) - assert(desc.identifier.database == Some("mydb")) - assert(desc.identifier.table == "page_view") - assert(desc.tableType == CatalogTableType.EXTERNAL) - assert(desc.storage.locationUri == Some(new URI("/user/external/page_view"))) - assert(desc.schema.isEmpty) // will be populated later when the table is actually created - assert(desc.comment == Some("This is the staging page view table")) - // TODO will be SQLText - assert(desc.viewText.isEmpty) - assert(desc.viewCatalogAndNamespace.isEmpty) - assert(desc.viewQueryColumnNames.isEmpty) - assert(desc.partitionColumnNames.isEmpty) - assert(desc.storage.inputFormat == Some("org.apache.hadoop.hive.ql.io.RCFileInputFormat")) - assert(desc.storage.outputFormat == Some("org.apache.hadoop.hive.ql.io.RCFileOutputFormat")) - assert(desc.storage.serde == - Some("org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe")) - assert(desc.properties == Map("p1" -> "v1", "p2" -> "v2")) - } - } - - test("Test CTAS #2") { - val s1 = - """ - |CREATE EXTERNAL TABLE IF NOT EXISTS mydb.page_view - |COMMENT 'This is the staging page view table' - |ROW FORMAT SERDE 'parquet.hive.serde.ParquetHiveSerDe' - | STORED AS - | INPUTFORMAT 'parquet.hive.DeprecatedParquetInputFormat' - | OUTPUTFORMAT 'parquet.hive.DeprecatedParquetOutputFormat' - |LOCATION '/user/external/page_view' - |TBLPROPERTIES ('p1'='v1', 'p2'='v2') - |AS SELECT * FROM src - """.stripMargin - - val s2 = - """ - |CREATE EXTERNAL TABLE IF NOT EXISTS mydb.page_view - |LOCATION '/user/external/page_view' - |TBLPROPERTIES ('p1'='v1', 'p2'='v2') - |ROW FORMAT SERDE 'parquet.hive.serde.ParquetHiveSerDe' - | STORED AS - | INPUTFORMAT 'parquet.hive.DeprecatedParquetInputFormat' - | OUTPUTFORMAT 'parquet.hive.DeprecatedParquetOutputFormat' - |COMMENT 'This is the staging page view table' - |AS SELECT * FROM src - """.stripMargin - - checkParsing(s1) - checkParsing(s2) - - def checkParsing(sql: String): Unit = { - val (desc, exists) = extractTableDesc(sql) - assert(exists) - assert(desc.identifier.database == Some("mydb")) - assert(desc.identifier.table == "page_view") - assert(desc.tableType == CatalogTableType.EXTERNAL) - assert(desc.storage.locationUri == Some(new URI("/user/external/page_view"))) - assert(desc.schema.isEmpty) // will be populated later when the table is actually created - // TODO will be SQLText - assert(desc.comment == Some("This is the staging page view table")) - assert(desc.viewText.isEmpty) - assert(desc.viewCatalogAndNamespace.isEmpty) - assert(desc.viewQueryColumnNames.isEmpty) - assert(desc.partitionColumnNames.isEmpty) - assert(desc.storage.properties == Map()) - assert(desc.storage.inputFormat == Some("parquet.hive.DeprecatedParquetInputFormat")) - assert(desc.storage.outputFormat == Some("parquet.hive.DeprecatedParquetOutputFormat")) - assert(desc.storage.serde == Some("parquet.hive.serde.ParquetHiveSerDe")) - assert(desc.properties == Map("p1" -> "v1", "p2" -> "v2")) - } - } - - test("Test CTAS #3") { - val s3 = """CREATE TABLE page_view AS SELECT * FROM src""" - val (desc, exists) = extractTableDesc(s3) - assert(exists == false) - assert(desc.identifier.database == None) - assert(desc.identifier.table == "page_view") - assert(desc.tableType == CatalogTableType.MANAGED) - assert(desc.storage.locationUri == None) - assert(desc.schema.isEmpty) - assert(desc.viewText == None) // TODO will be SQLText - assert(desc.viewQueryColumnNames.isEmpty) - assert(desc.storage.properties == Map()) - assert(desc.storage.inputFormat == Some("org.apache.hadoop.mapred.TextInputFormat")) - assert(desc.storage.outputFormat == - Some("org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat")) - assert(desc.storage.serde == Some("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe")) - assert(desc.properties == Map()) - } - - test("Test CTAS #4") { - val s4 = - """CREATE TABLE page_view - |STORED BY 'storage.handler.class.name' AS SELECT * FROM src""".stripMargin - intercept[AnalysisException] { - extractTableDesc(s4) - } - } - - test("Test CTAS #5") { - val s5 = """CREATE TABLE ctas2 - | ROW FORMAT SERDE "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe" - | WITH SERDEPROPERTIES("serde_p1"="p1","serde_p2"="p2") - | STORED AS RCFile - | TBLPROPERTIES("tbl_p1"="p11", "tbl_p2"="p22") - | AS - | SELECT key, value - | FROM src - | ORDER BY key, value""".stripMargin - val (desc, exists) = extractTableDesc(s5) - assert(exists == false) - assert(desc.identifier.database == None) - assert(desc.identifier.table == "ctas2") - assert(desc.tableType == CatalogTableType.MANAGED) - assert(desc.storage.locationUri == None) - assert(desc.schema.isEmpty) - assert(desc.viewText == None) // TODO will be SQLText - assert(desc.viewCatalogAndNamespace.isEmpty) - assert(desc.viewQueryColumnNames.isEmpty) - assert(desc.storage.properties == Map(("serde_p1" -> "p1"), ("serde_p2" -> "p2"))) - assert(desc.storage.inputFormat == Some("org.apache.hadoop.hive.ql.io.RCFileInputFormat")) - assert(desc.storage.outputFormat == Some("org.apache.hadoop.hive.ql.io.RCFileOutputFormat")) - assert(desc.storage.serde == Some("org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe")) - assert(desc.properties == Map(("tbl_p1" -> "p11"), ("tbl_p2" -> "p22"))) - } - - test("CTAS statement with a PARTITIONED BY clause is not allowed") { - assertUnsupported(s"CREATE TABLE ctas1 PARTITIONED BY (k int)" + - " AS SELECT key, value FROM (SELECT 1 as key, 2 as value) tmp") - } - - test("CTAS statement with schema") { - assertUnsupported(s"CREATE TABLE ctas1 (age INT, name STRING) AS SELECT * FROM src") - assertUnsupported(s"CREATE TABLE ctas1 (age INT, name STRING) AS SELECT 1, 'hello'") - } - test("unsupported operations") { intercept[ParseException] { parser.parsePlan( @@ -652,205 +319,6 @@ class DDLParserSuite extends AnalysisTest with SharedSparkSession { """.stripMargin) } - test("create table - basic") { - val query = "CREATE TABLE my_table (id int, name string)" - val (desc, allowExisting) = extractTableDesc(query) - assert(!allowExisting) - assert(desc.identifier.database.isEmpty) - assert(desc.identifier.table == "my_table") - assert(desc.tableType == CatalogTableType.MANAGED) - assert(desc.schema == new StructType().add("id", "int").add("name", "string")) - assert(desc.partitionColumnNames.isEmpty) - assert(desc.bucketSpec.isEmpty) - assert(desc.viewText.isEmpty) - assert(desc.viewQueryColumnNames.isEmpty) - assert(desc.storage.locationUri.isEmpty) - assert(desc.storage.inputFormat == - Some("org.apache.hadoop.mapred.TextInputFormat")) - assert(desc.storage.outputFormat == - Some("org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat")) - assert(desc.storage.serde == Some("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe")) - assert(desc.storage.properties.isEmpty) - assert(desc.properties.isEmpty) - assert(desc.comment.isEmpty) - } - - test("create table - with database name") { - val query = "CREATE TABLE dbx.my_table (id int, name string)" - val (desc, _) = extractTableDesc(query) - assert(desc.identifier.database == Some("dbx")) - assert(desc.identifier.table == "my_table") - } - - test("create table - temporary") { - val query = "CREATE TEMPORARY TABLE tab1 (id int, name string)" - val e = intercept[ParseException] { parser.parsePlan(query) } - assert(e.message.contains("CREATE TEMPORARY TABLE is not supported yet")) - } - - test("create table - external") { - val query = "CREATE EXTERNAL TABLE tab1 (id int, name string) LOCATION '/path/to/nowhere'" - val (desc, _) = extractTableDesc(query) - assert(desc.tableType == CatalogTableType.EXTERNAL) - assert(desc.storage.locationUri == Some(new URI("/path/to/nowhere"))) - } - - test("create table - if not exists") { - val query = "CREATE TABLE IF NOT EXISTS tab1 (id int, name string)" - val (_, allowExisting) = extractTableDesc(query) - assert(allowExisting) - } - - test("create table - comment") { - val query = "CREATE TABLE my_table (id int, name string) COMMENT 'its hot as hell below'" - val (desc, _) = extractTableDesc(query) - assert(desc.comment == Some("its hot as hell below")) - } - - test("create table - partitioned columns") { - val query = "CREATE TABLE my_table (id int, name string) PARTITIONED BY (month int)" - val (desc, _) = extractTableDesc(query) - assert(desc.schema == new StructType() - .add("id", "int") - .add("name", "string") - .add("month", "int")) - assert(desc.partitionColumnNames == Seq("month")) - } - - test("create table - clustered by") { - val numBuckets = 10 - val bucketedColumn = "id" - val sortColumn = "id" - val baseQuery = - s""" - CREATE TABLE my_table ( - $bucketedColumn int, - name string) - CLUSTERED BY($bucketedColumn) - """ - - val query1 = s"$baseQuery INTO $numBuckets BUCKETS" - val (desc1, _) = extractTableDesc(query1) - assert(desc1.bucketSpec.isDefined) - val bucketSpec1 = desc1.bucketSpec.get - assert(bucketSpec1.numBuckets == numBuckets) - assert(bucketSpec1.bucketColumnNames.head.equals(bucketedColumn)) - assert(bucketSpec1.sortColumnNames.isEmpty) - - val query2 = s"$baseQuery SORTED BY($sortColumn) INTO $numBuckets BUCKETS" - val (desc2, _) = extractTableDesc(query2) - assert(desc2.bucketSpec.isDefined) - val bucketSpec2 = desc2.bucketSpec.get - assert(bucketSpec2.numBuckets == numBuckets) - assert(bucketSpec2.bucketColumnNames.head.equals(bucketedColumn)) - assert(bucketSpec2.sortColumnNames.head.equals(sortColumn)) - } - - test("create table(hive) - skewed by") { - val baseQuery = "CREATE TABLE my_table (id int, name string) SKEWED BY" - val query1 = s"$baseQuery(id) ON (1, 10, 100)" - val query2 = s"$baseQuery(id, name) ON ((1, 'x'), (2, 'y'), (3, 'z'))" - val query3 = s"$baseQuery(id, name) ON ((1, 'x'), (2, 'y'), (3, 'z')) STORED AS DIRECTORIES" - val e1 = intercept[ParseException] { parser.parsePlan(query1) } - val e2 = intercept[ParseException] { parser.parsePlan(query2) } - val e3 = intercept[ParseException] { parser.parsePlan(query3) } - assert(e1.getMessage.contains("Operation not allowed")) - assert(e2.getMessage.contains("Operation not allowed")) - assert(e3.getMessage.contains("Operation not allowed")) - } - - test("create table(hive) - row format") { - val baseQuery = "CREATE TABLE my_table (id int, name string) ROW FORMAT" - val query1 = s"$baseQuery SERDE 'org.apache.poof.serde.Baff'" - val query2 = s"$baseQuery SERDE 'org.apache.poof.serde.Baff' WITH SERDEPROPERTIES ('k1'='v1')" - val query3 = - s""" - |$baseQuery DELIMITED FIELDS TERMINATED BY 'x' ESCAPED BY 'y' - |COLLECTION ITEMS TERMINATED BY 'a' - |MAP KEYS TERMINATED BY 'b' - |LINES TERMINATED BY '\n' - |NULL DEFINED AS 'c' - """.stripMargin - val (desc1, _) = extractTableDesc(query1) - val (desc2, _) = extractTableDesc(query2) - val (desc3, _) = extractTableDesc(query3) - assert(desc1.storage.serde == Some("org.apache.poof.serde.Baff")) - assert(desc1.storage.properties.isEmpty) - assert(desc2.storage.serde == Some("org.apache.poof.serde.Baff")) - assert(desc2.storage.properties == Map("k1" -> "v1")) - assert(desc3.storage.properties == Map( - "field.delim" -> "x", - "escape.delim" -> "y", - "serialization.format" -> "x", - "line.delim" -> "\n", - "colelction.delim" -> "a", // yes, it's a typo from Hive :) - "mapkey.delim" -> "b")) - } - - test("create table(hive) - file format") { - val baseQuery = "CREATE TABLE my_table (id int, name string) STORED AS" - val query1 = s"$baseQuery INPUTFORMAT 'winput' OUTPUTFORMAT 'wowput'" - val query2 = s"$baseQuery ORC" - val (desc1, _) = extractTableDesc(query1) - val (desc2, _) = extractTableDesc(query2) - assert(desc1.storage.inputFormat == Some("winput")) - assert(desc1.storage.outputFormat == Some("wowput")) - assert(desc1.storage.serde == Some("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe")) - assert(desc2.storage.inputFormat == Some("org.apache.hadoop.hive.ql.io.orc.OrcInputFormat")) - assert(desc2.storage.outputFormat == Some("org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat")) - assert(desc2.storage.serde == Some("org.apache.hadoop.hive.ql.io.orc.OrcSerde")) - } - - test("create table(hive) - storage handler") { - val baseQuery = "CREATE TABLE my_table (id int, name string) STORED BY" - val query1 = s"$baseQuery 'org.papachi.StorageHandler'" - val query2 = s"$baseQuery 'org.mamachi.StorageHandler' WITH SERDEPROPERTIES ('k1'='v1')" - val e1 = intercept[ParseException] { parser.parsePlan(query1) } - val e2 = intercept[ParseException] { parser.parsePlan(query2) } - assert(e1.getMessage.contains("Operation not allowed")) - assert(e2.getMessage.contains("Operation not allowed")) - } - - test("create table - properties") { - val query = "CREATE TABLE my_table (id int, name string) TBLPROPERTIES ('k1'='v1', 'k2'='v2')" - val (desc, _) = extractTableDesc(query) - assert(desc.properties == Map("k1" -> "v1", "k2" -> "v2")) - } - - test("create table(hive) - everything!") { - val query = - """ - |CREATE EXTERNAL TABLE IF NOT EXISTS dbx.my_table (id int, name string) - |COMMENT 'no comment' - |PARTITIONED BY (month int) - |ROW FORMAT SERDE 'org.apache.poof.serde.Baff' WITH SERDEPROPERTIES ('k1'='v1') - |STORED AS INPUTFORMAT 'winput' OUTPUTFORMAT 'wowput' - |LOCATION '/path/to/mercury' - |TBLPROPERTIES ('k1'='v1', 'k2'='v2') - """.stripMargin - val (desc, allowExisting) = extractTableDesc(query) - assert(allowExisting) - assert(desc.identifier.database == Some("dbx")) - assert(desc.identifier.table == "my_table") - assert(desc.tableType == CatalogTableType.EXTERNAL) - assert(desc.schema == new StructType() - .add("id", "int") - .add("name", "string") - .add("month", "int")) - assert(desc.partitionColumnNames == Seq("month")) - assert(desc.bucketSpec.isEmpty) - assert(desc.viewText.isEmpty) - assert(desc.viewCatalogAndNamespace.isEmpty) - assert(desc.viewQueryColumnNames.isEmpty) - assert(desc.storage.locationUri == Some(new URI("/path/to/mercury"))) - assert(desc.storage.inputFormat == Some("winput")) - assert(desc.storage.outputFormat == Some("wowput")) - assert(desc.storage.serde == Some("org.apache.poof.serde.Baff")) - assert(desc.storage.properties == Map("k1" -> "v1")) - assert(desc.properties == Map("k1" -> "v1", "k2" -> "v2")) - assert(desc.comment == Some("no comment")) - } - test("create table like") { val v1 = "CREATE TABLE table1 LIKE table2" val (target, source, fileFormat, provider, properties, exists) = diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala index b8ac5079b7745..946e8412cfa7a 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala @@ -29,7 +29,7 @@ import org.apache.spark.internal.config import org.apache.spark.internal.config.RDD_PARALLEL_LISTING_THRESHOLD import org.apache.spark.sql.{AnalysisException, QueryTest, Row, SaveMode} import org.apache.spark.sql.catalyst.{FunctionIdentifier, QualifiedTableName, TableIdentifier} -import org.apache.spark.sql.catalyst.analysis.{FunctionRegistry, NoSuchDatabaseException, NoSuchFunctionException, NoSuchPartitionException, NoSuchTableException, TempTableAlreadyExistsException} +import org.apache.spark.sql.catalyst.analysis.{FunctionRegistry, NoSuchDatabaseException, NoSuchFunctionException, NoSuchPartitionException, TempTableAlreadyExistsException} import org.apache.spark.sql.catalyst.catalog._ import org.apache.spark.sql.catalyst.catalog.CatalogTypes.TablePartitionSpec import org.apache.spark.sql.connector.catalog.SupportsNamespaces.PROP_OWNER @@ -334,22 +334,6 @@ abstract class DDLSuite extends QueryTest with SQLTestUtils { testChangeColumn(isDatasourceTable = true) } - test("alter table: add partition (datasource table)") { - testAddPartitions(isDatasourceTable = true) - } - - test("alter table: drop partition (datasource table)") { - testDropPartitions(isDatasourceTable = true) - } - - test("alter table: rename partition (datasource table)") { - testRenamePartitions(isDatasourceTable = true) - } - - test("drop table - data source table") { - testDropTable(isDatasourceTable = true) - } - test("the qualified path of a database is stored in the catalog") { val catalog = spark.sessionState.catalog @@ -549,9 +533,9 @@ abstract class DDLSuite extends QueryTest with SQLTestUtils { import testImplicits._ val df = sparkContext.parallelize(1 to 10).map(i => (i, i.toString)).toDF("num", "str") - // Case 1: with partitioning columns but no schema: Option("inexistentColumns") + // Case 1: with partitioning columns but no schema: Option("nonexistentColumns") // Case 2: without schema and partitioning columns: None - Seq(Option("inexistentColumns"), None).foreach { partitionCols => + Seq(Option("nonexistentColumns"), None).foreach { partitionCols => withTempPath { pathToPartitionedTable => df.write.format("parquet").partitionBy("num") .save(pathToPartitionedTable.getCanonicalPath) @@ -589,9 +573,9 @@ abstract class DDLSuite extends QueryTest with SQLTestUtils { import testImplicits._ val df = sparkContext.parallelize(1 to 10).map(i => (i, i.toString)).toDF("num", "str") - // Case 1: with partitioning columns but no schema: Option("inexistentColumns") + // Case 1: with partitioning columns but no schema: Option("nonexistentColumns") // Case 2: without schema and partitioning columns: None - Seq(Option("inexistentColumns"), None).foreach { partitionCols => + Seq(Option("nonexistentColumns"), None).foreach { partitionCols => withTempPath { pathToNonPartitionedTable => df.write.format("parquet").save(pathToNonPartitionedTable.getCanonicalPath) checkSchemaInCreatedDataSourceTable( @@ -608,7 +592,7 @@ abstract class DDLSuite extends QueryTest with SQLTestUtils { import testImplicits._ val df = sparkContext.parallelize(1 to 10).map(i => (i, i.toString)).toDF("num", "str") - // Case 1: with partitioning columns but no schema: Option("inexistentColumns") + // Case 1: with partitioning columns but no schema: Option("nonexistentColumns") // Case 2: without schema and partitioning columns: None Seq(Option("num"), None).foreach { partitionCols => withTempPath { pathToNonPartitionedTable => @@ -1293,36 +1277,6 @@ abstract class DDLSuite extends QueryTest with SQLTestUtils { assertUnsupported("ALTER VIEW dbx.tab1 DROP IF EXISTS PARTITION (b='2')") } - - test("show databases") { - sql("CREATE DATABASE showdb2B") - sql("CREATE DATABASE showdb1A") - - // check the result as well as its order - checkDataset(sql("SHOW DATABASES"), Row("default"), Row("showdb1a"), Row("showdb2b")) - - checkAnswer( - sql("SHOW DATABASES LIKE '*db1A'"), - Row("showdb1a") :: Nil) - - checkAnswer( - sql("SHOW DATABASES '*db1A'"), - Row("showdb1a") :: Nil) - - checkAnswer( - sql("SHOW DATABASES LIKE 'showdb1A'"), - Row("showdb1a") :: Nil) - - checkAnswer( - sql("SHOW DATABASES LIKE '*db1A|*db2B'"), - Row("showdb1a") :: - Row("showdb2b") :: Nil) - - checkAnswer( - sql("SHOW DATABASES LIKE 'non-existentdb'"), - Nil) - } - test("drop view - temporary view") { val catalog = spark.sessionState.catalog sql( @@ -1340,35 +1294,17 @@ abstract class DDLSuite extends QueryTest with SQLTestUtils { assert(catalog.listTables("default") == Nil) } - protected def testDropTable(isDatasourceTable: Boolean): Unit = { - if (!isUsingHiveMetastore) { - assert(isDatasourceTable, "InMemoryCatalog only supports data source tables") - } - val catalog = spark.sessionState.catalog - val tableIdent = TableIdentifier("tab1", Some("dbx")) - createDatabase(catalog, "dbx") - createTable(catalog, tableIdent, isDatasourceTable) - assert(catalog.listTables("dbx") == Seq(tableIdent)) - sql("DROP TABLE dbx.tab1") - assert(catalog.listTables("dbx") == Nil) - sql("DROP TABLE IF EXISTS dbx.tab1") - intercept[AnalysisException] { - sql("DROP TABLE dbx.tab1") - } - } - test("drop view") { val catalog = spark.sessionState.catalog val tableIdent = TableIdentifier("tab1", Some("dbx")) createDatabase(catalog, "dbx") createTable(catalog, tableIdent) assert(catalog.listTables("dbx") == Seq(tableIdent)) - val e = intercept[AnalysisException] { sql("DROP VIEW dbx.tab1") } - assert( - e.getMessage.contains("Cannot drop a table with DROP VIEW. Please use DROP TABLE instead")) + assert(e.getMessage.contains( + "dbx.tab1 is a table. 'DROP VIEW' expects a view. Please use DROP TABLE instead.")) } protected def testSetProperties(isDatasourceTable: Boolean): Unit = { @@ -1622,160 +1558,6 @@ abstract class DDLSuite extends QueryTest with SQLTestUtils { } } - protected def testAddPartitions(isDatasourceTable: Boolean): Unit = { - if (!isUsingHiveMetastore) { - assert(isDatasourceTable, "InMemoryCatalog only supports data source tables") - } - val catalog = spark.sessionState.catalog - val tableIdent = TableIdentifier("tab1", Some("dbx")) - val part1 = Map("a" -> "1", "b" -> "5") - val part2 = Map("a" -> "2", "b" -> "6") - val part3 = Map("a" -> "3", "b" -> "7") - val part4 = Map("a" -> "4", "b" -> "8") - val part5 = Map("a" -> "9", "b" -> "9") - createDatabase(catalog, "dbx") - createTable(catalog, tableIdent, isDatasourceTable) - createTablePartition(catalog, part1, tableIdent) - assert(catalog.listPartitions(tableIdent).map(_.spec).toSet == Set(part1)) - - // basic add partition - sql("ALTER TABLE dbx.tab1 ADD IF NOT EXISTS " + - "PARTITION (a='2', b='6') LOCATION 'paris' PARTITION (a='3', b='7')") - assert(catalog.listPartitions(tableIdent).map(_.spec).toSet == Set(part1, part2, part3)) - assert(catalog.getPartition(tableIdent, part1).storage.locationUri.isDefined) - - val tableLocation = catalog.getTableMetadata(tableIdent).storage.locationUri - assert(tableLocation.isDefined) - val partitionLocation = makeQualifiedPath( - new Path(tableLocation.get.toString, "paris").toString) - - assert(catalog.getPartition(tableIdent, part2).storage.locationUri == Option(partitionLocation)) - assert(catalog.getPartition(tableIdent, part3).storage.locationUri.isDefined) - - // add partitions without explicitly specifying database - catalog.setCurrentDatabase("dbx") - sql("ALTER TABLE tab1 ADD IF NOT EXISTS PARTITION (a='4', b='8')") - assert(catalog.listPartitions(tableIdent).map(_.spec).toSet == - Set(part1, part2, part3, part4)) - - // table to alter does not exist - intercept[AnalysisException] { - sql("ALTER TABLE does_not_exist ADD IF NOT EXISTS PARTITION (a='4', b='9')") - } - - // partition to add already exists - intercept[AnalysisException] { - sql("ALTER TABLE tab1 ADD PARTITION (a='4', b='8')") - } - - // partition to add already exists when using IF NOT EXISTS - sql("ALTER TABLE tab1 ADD IF NOT EXISTS PARTITION (a='4', b='8')") - assert(catalog.listPartitions(tableIdent).map(_.spec).toSet == - Set(part1, part2, part3, part4)) - - // partition spec in ADD PARTITION should be case insensitive by default - sql("ALTER TABLE tab1 ADD PARTITION (A='9', B='9')") - assert(catalog.listPartitions(tableIdent).map(_.spec).toSet == - Set(part1, part2, part3, part4, part5)) - } - - protected def testDropPartitions(isDatasourceTable: Boolean): Unit = { - if (!isUsingHiveMetastore) { - assert(isDatasourceTable, "InMemoryCatalog only supports data source tables") - } - val catalog = spark.sessionState.catalog - val tableIdent = TableIdentifier("tab1", Some("dbx")) - val part1 = Map("a" -> "1", "b" -> "5") - val part2 = Map("a" -> "2", "b" -> "6") - val part3 = Map("a" -> "3", "b" -> "7") - val part4 = Map("a" -> "4", "b" -> "8") - val part5 = Map("a" -> "9", "b" -> "9") - createDatabase(catalog, "dbx") - createTable(catalog, tableIdent, isDatasourceTable) - createTablePartition(catalog, part1, tableIdent) - createTablePartition(catalog, part2, tableIdent) - createTablePartition(catalog, part3, tableIdent) - createTablePartition(catalog, part4, tableIdent) - createTablePartition(catalog, part5, tableIdent) - assert(catalog.listPartitions(tableIdent).map(_.spec).toSet == - Set(part1, part2, part3, part4, part5)) - - // basic drop partition - sql("ALTER TABLE dbx.tab1 DROP IF EXISTS PARTITION (a='4', b='8'), PARTITION (a='3', b='7')") - assert(catalog.listPartitions(tableIdent).map(_.spec).toSet == Set(part1, part2, part5)) - - // drop partitions without explicitly specifying database - catalog.setCurrentDatabase("dbx") - sql("ALTER TABLE tab1 DROP IF EXISTS PARTITION (a='2', b ='6')") - assert(catalog.listPartitions(tableIdent).map(_.spec).toSet == Set(part1, part5)) - - // table to alter does not exist - intercept[AnalysisException] { - sql("ALTER TABLE does_not_exist DROP IF EXISTS PARTITION (a='2')") - } - - // partition to drop does not exist - intercept[AnalysisException] { - sql("ALTER TABLE tab1 DROP PARTITION (a='300')") - } - - // partition to drop does not exist when using IF EXISTS - sql("ALTER TABLE tab1 DROP IF EXISTS PARTITION (a='300')") - assert(catalog.listPartitions(tableIdent).map(_.spec).toSet == Set(part1, part5)) - - // partition spec in DROP PARTITION should be case insensitive by default - sql("ALTER TABLE tab1 DROP PARTITION (A='1', B='5')") - assert(catalog.listPartitions(tableIdent).map(_.spec).toSet == Set(part5)) - - // use int literal as partition value for int type partition column - sql("ALTER TABLE tab1 DROP PARTITION (a=9, b=9)") - assert(catalog.listPartitions(tableIdent).isEmpty) - } - - protected def testRenamePartitions(isDatasourceTable: Boolean): Unit = { - if (!isUsingHiveMetastore) { - assert(isDatasourceTable, "InMemoryCatalog only supports data source tables") - } - val catalog = spark.sessionState.catalog - val tableIdent = TableIdentifier("tab1", Some("dbx")) - val part1 = Map("a" -> "1", "b" -> "q") - val part2 = Map("a" -> "2", "b" -> "c") - val part3 = Map("a" -> "3", "b" -> "p") - createDatabase(catalog, "dbx") - createTable(catalog, tableIdent, isDatasourceTable) - createTablePartition(catalog, part1, tableIdent) - createTablePartition(catalog, part2, tableIdent) - createTablePartition(catalog, part3, tableIdent) - assert(catalog.listPartitions(tableIdent).map(_.spec).toSet == Set(part1, part2, part3)) - - // basic rename partition - sql("ALTER TABLE dbx.tab1 PARTITION (a='1', b='q') RENAME TO PARTITION (a='100', b='p')") - sql("ALTER TABLE dbx.tab1 PARTITION (a='2', b='c') RENAME TO PARTITION (a='20', b='c')") - assert(catalog.listPartitions(tableIdent).map(_.spec).toSet == - Set(Map("a" -> "100", "b" -> "p"), Map("a" -> "20", "b" -> "c"), Map("a" -> "3", "b" -> "p"))) - - // rename without explicitly specifying database - catalog.setCurrentDatabase("dbx") - sql("ALTER TABLE tab1 PARTITION (a='100', b='p') RENAME TO PARTITION (a='10', b='p')") - assert(catalog.listPartitions(tableIdent).map(_.spec).toSet == - Set(Map("a" -> "10", "b" -> "p"), Map("a" -> "20", "b" -> "c"), Map("a" -> "3", "b" -> "p"))) - - // table to alter does not exist - intercept[NoSuchTableException] { - sql("ALTER TABLE does_not_exist PARTITION (c='3') RENAME TO PARTITION (c='333')") - } - - // partition to rename does not exist - intercept[NoSuchPartitionException] { - sql("ALTER TABLE tab1 PARTITION (a='not_found', b='1') RENAME TO PARTITION (a='1', b='2')") - } - - // partition spec in RENAME PARTITION should be case insensitive by default - sql("ALTER TABLE tab1 PARTITION (A='10', B='p') RENAME TO PARTITION (A='1', B='p')") - assert(catalog.listPartitions(tableIdent).map(_.spec).toSet == - Set(Map("a" -> "1", "b" -> "p"), Map("a" -> "20", "b" -> "c"), Map("a" -> "3", "b" -> "p"))) - } - protected def testChangeColumn(isDatasourceTable: Boolean): Unit = { if (!isUsingHiveMetastore) { assert(isDatasourceTable, "InMemoryCatalog only supports data source tables") @@ -1863,6 +1645,7 @@ abstract class DDLSuite extends QueryTest with SQLTestUtils { "Returns the concatenation of col1, col2, ..., colN.") :: Nil ) // extended mode + // scalastyle:off whitespace.end.of.line checkAnswer( sql("DESCRIBE FUNCTION EXTENDED ^"), Row("Class: org.apache.spark.sql.catalyst.expressions.BitwiseXor") :: @@ -1871,11 +1654,14 @@ abstract class DDLSuite extends QueryTest with SQLTestUtils { | Examples: | > SELECT 3 ^ 5; | 6 - | """.stripMargin) :: + | + | Since: 1.4.0 + |""".stripMargin) :: Row("Function: ^") :: Row("Usage: expr1 ^ expr2 - Returns the result of " + "bitwise exclusive OR of `expr1` and `expr2`.") :: Nil ) + // scalastyle:on whitespace.end.of.line } test("create a data source table without schema") { @@ -1907,7 +1693,7 @@ abstract class DDLSuite extends QueryTest with SQLTestUtils { |OPTIONS ( | path '${tempDir.getCanonicalPath}' |) - |CLUSTERED BY (inexistentColumnA) SORTED BY (inexistentColumnB) INTO 2 BUCKETS + |CLUSTERED BY (nonexistentColumnA) SORTED BY (nonexistentColumnB) INTO 2 BUCKETS """.stripMargin) } assert(e.message == "Cannot specify bucketing information if the table schema is not " + @@ -2022,7 +1808,6 @@ abstract class DDLSuite extends QueryTest with SQLTestUtils { } test("SPARK-30312: truncate table - keep acl/permission") { - import testImplicits._ val ignorePermissionAcl = Seq(true, false) ignorePermissionAcl.foreach { ignore => @@ -2166,11 +1951,15 @@ abstract class DDLSuite extends QueryTest with SQLTestUtils { (1 to 10).map { i => (i, i) }.toDF("a", "b").createTempView("my_temp_tab") sql(s"CREATE TABLE my_ext_tab using parquet LOCATION '${tempDir.toURI}'") sql(s"CREATE VIEW my_view AS SELECT 1") - intercept[NoSuchTableException] { + val e1 = intercept[AnalysisException] { sql("TRUNCATE TABLE my_temp_tab") - } + }.getMessage + assert(e1.contains("my_temp_tab is a temp view. 'TRUNCATE TABLE' expects a table")) assertUnsupported("TRUNCATE TABLE my_ext_tab") - assertUnsupported("TRUNCATE TABLE my_view") + val e2 = intercept[AnalysisException] { + sql("TRUNCATE TABLE my_view") + }.getMessage + assert(e2.contains("default.my_view is a view. 'TRUNCATE TABLE' expects a table")) } } } @@ -2259,6 +2048,17 @@ abstract class DDLSuite extends QueryTest with SQLTestUtils { } } + test("show columns - invalid db name") { + withTable("tbl") { + sql("CREATE TABLE tbl(col1 int, col2 string) USING parquet ") + val message = intercept[AnalysisException] { + sql("SHOW COLUMNS IN tbl FROM a.b.c") + }.getMessage + assert(message.contains( + "The namespace in session catalog must have exactly one name part: a.b.c.tbl")) + } + } + test("SPARK-18009 calling toLocalIterator on commands") { import scala.collection.JavaConverters._ val df = sql("show databases") @@ -3101,81 +2901,6 @@ abstract class DDLSuite extends QueryTest with SQLTestUtils { assert(spark.sessionState.catalog.isRegisteredFunction(rand)) } } - - test("SPARK-32481 Move data to trash on truncate table if enabled") { - val trashIntervalKey = "fs.trash.interval" - withTable("tab1") { - withSQLConf(SQLConf.TRUNCATE_TRASH_ENABLED.key -> "true") { - sql("CREATE TABLE tab1 (col INT) USING parquet") - sql("INSERT INTO tab1 SELECT 1") - // scalastyle:off hadoopconfiguration - val hadoopConf = spark.sparkContext.hadoopConfiguration - // scalastyle:on hadoopconfiguration - val originalValue = hadoopConf.get(trashIntervalKey, "0") - val tablePath = new Path(spark.sessionState.catalog - .getTableMetadata(TableIdentifier("tab1")).storage.locationUri.get) - - val fs = tablePath.getFileSystem(hadoopConf) - val trashCurrent = new Path(fs.getHomeDirectory, ".Trash/Current") - val trashPath = Path.mergePaths(trashCurrent, tablePath) - assert(!fs.exists(trashPath)) - try { - hadoopConf.set(trashIntervalKey, "5") - sql("TRUNCATE TABLE tab1") - } finally { - hadoopConf.set(trashIntervalKey, originalValue) - } - assert(fs.exists(trashPath)) - fs.delete(trashPath, true) - } - } - } - - test("SPARK-32481 delete data permanently on truncate table if trash interval is non-positive") { - val trashIntervalKey = "fs.trash.interval" - withTable("tab1") { - withSQLConf(SQLConf.TRUNCATE_TRASH_ENABLED.key -> "true") { - sql("CREATE TABLE tab1 (col INT) USING parquet") - sql("INSERT INTO tab1 SELECT 1") - // scalastyle:off hadoopconfiguration - val hadoopConf = spark.sparkContext.hadoopConfiguration - // scalastyle:on hadoopconfiguration - val originalValue = hadoopConf.get(trashIntervalKey, "0") - val tablePath = new Path(spark.sessionState.catalog - .getTableMetadata(TableIdentifier("tab1")).storage.locationUri.get) - - val fs = tablePath.getFileSystem(hadoopConf) - val trashCurrent = new Path(fs.getHomeDirectory, ".Trash/Current") - val trashPath = Path.mergePaths(trashCurrent, tablePath) - assert(!fs.exists(trashPath)) - try { - hadoopConf.set(trashIntervalKey, "0") - sql("TRUNCATE TABLE tab1") - } finally { - hadoopConf.set(trashIntervalKey, originalValue) - } - assert(!fs.exists(trashPath)) - } - } - } - - test("SPARK-32481 Do not move data to trash on truncate table if disabled") { - withTable("tab1") { - withSQLConf(SQLConf.TRUNCATE_TRASH_ENABLED.key -> "false") { - sql("CREATE TABLE tab1 (col INT) USING parquet") - sql("INSERT INTO tab1 SELECT 1") - val hadoopConf = spark.sessionState.newHadoopConf() - val tablePath = new Path(spark.sessionState.catalog - .getTableMetadata(TableIdentifier("tab1")).storage.locationUri.get) - - val fs = tablePath.getFileSystem(hadoopConf) - val trashCurrent = new Path(fs.getHomeDirectory, ".Trash/Current") - val trashPath = Path.mergePaths(trashCurrent, tablePath) - sql("TRUNCATE TABLE tab1") - assert(!fs.exists(trashPath)) - } - } - } } object FakeLocalFsFileSystem { diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DropTableParserSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DropTableParserSuite.scala new file mode 100644 index 0000000000000..f88fff8ed326e --- /dev/null +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DropTableParserSuite.scala @@ -0,0 +1,55 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.execution.command + +import org.apache.spark.sql.catalyst.analysis.{AnalysisTest, UnresolvedTableOrView} +import org.apache.spark.sql.catalyst.parser.CatalystSqlParser.parsePlan +import org.apache.spark.sql.catalyst.plans.logical.{DropTable, LogicalPlan} +import org.apache.spark.sql.test.SharedSparkSession + +class DropTableParserSuite extends AnalysisTest with SharedSparkSession { + private def parseCompare(sql: String, expected: LogicalPlan): Unit = { + comparePlans(parsePlan(sql), expected, checkAnalysis = false) + } + + test("drop table") { + parseCompare("DROP TABLE testcat.ns1.ns2.tbl", + DropTable( + UnresolvedTableOrView(Seq("testcat", "ns1", "ns2", "tbl"), "DROP TABLE"), + ifExists = false, + purge = false)) + parseCompare(s"DROP TABLE db.tab", + DropTable( + UnresolvedTableOrView(Seq("db", "tab"), "DROP TABLE"), ifExists = false, purge = false)) + parseCompare(s"DROP TABLE IF EXISTS db.tab", + DropTable( + UnresolvedTableOrView(Seq("db", "tab"), "DROP TABLE"), ifExists = true, purge = false)) + parseCompare(s"DROP TABLE tab", + DropTable( + UnresolvedTableOrView(Seq("tab"), "DROP TABLE"), ifExists = false, purge = false)) + parseCompare(s"DROP TABLE IF EXISTS tab", + DropTable( + UnresolvedTableOrView(Seq("tab"), "DROP TABLE"), ifExists = true, purge = false)) + parseCompare(s"DROP TABLE tab PURGE", + DropTable( + UnresolvedTableOrView(Seq("tab"), "DROP TABLE"), ifExists = false, purge = true)) + parseCompare(s"DROP TABLE IF EXISTS tab PURGE", + DropTable( + UnresolvedTableOrView(Seq("tab"), "DROP TABLE"), ifExists = true, purge = true)) + } +} diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DropTableSuiteBase.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DropTableSuiteBase.scala new file mode 100644 index 0000000000000..bb76bfd878f48 --- /dev/null +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DropTableSuiteBase.scala @@ -0,0 +1,131 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.execution.command + +import org.apache.spark.sql.{AnalysisException, QueryTest, Row} + +/** + * This base suite contains unified tests for the `DROP TABLE` command that check V1 and V2 + * table catalogs. The tests that cannot run for all supported catalogs are located in more + * specific test suites: + * + * - V2 table catalog tests: `org.apache.spark.sql.execution.command.v2.DropTableSuite` + * - V1 table catalog tests: `org.apache.spark.sql.execution.command.v1.DropTableSuiteBase` + * - V1 In-Memory catalog: `org.apache.spark.sql.execution.command.v1.DropTableSuite` + * - V1 Hive External catalog: `org.apache.spark.sql.hive.execution.command.DropTableSuite` + */ +trait DropTableSuiteBase extends QueryTest with DDLCommandTestUtils { + override val command = "DROP TABLE" + + protected def createTable(tableName: String): Unit = { + sql(s"CREATE TABLE $tableName (c int) $defaultUsing") + sql(s"INSERT INTO $tableName SELECT 0") + } + + protected def checkTables(namespace: String, expectedTables: String*): Unit = { + val tables = sql(s"SHOW TABLES IN $catalog.$namespace").select("tableName") + val rows = expectedTables.map(Row(_)) + checkAnswer(tables, rows) + } + + test("basic") { + withNamespace(s"$catalog.ns") { + sql(s"CREATE NAMESPACE $catalog.ns") + + createTable(s"$catalog.ns.tbl") + checkTables("ns", "tbl") + + sql(s"DROP TABLE $catalog.ns.tbl") + checkTables("ns") // no tables + } + } + + test("try to drop a nonexistent table") { + withNamespace(s"$catalog.ns") { + sql(s"CREATE NAMESPACE $catalog.ns") + checkTables("ns") // no tables + + val errMsg = intercept[AnalysisException] { + sql(s"DROP TABLE $catalog.ns.tbl") + }.getMessage + assert(errMsg.contains("Table or view not found")) + } + } + + test("with IF EXISTS") { + withNamespace(s"$catalog.ns") { + sql(s"CREATE NAMESPACE $catalog.ns") + + createTable(s"$catalog.ns.tbl") + checkTables("ns", "tbl") + sql(s"DROP TABLE IF EXISTS $catalog.ns.tbl") + checkTables("ns") + + // It must not throw any exceptions + sql(s"DROP TABLE IF EXISTS $catalog.ns.tbl") + checkTables("ns") + } + } + + test("SPARK-33174: DROP TABLE should resolve to a temporary view first") { + withNamespaceAndTable("ns", "t") { t => + withTempView("t") { + sql(s"CREATE TABLE $t (id bigint) $defaultUsing") + sql("CREATE TEMPORARY VIEW t AS SELECT 2") + sql(s"USE $catalog.ns") + try { + // Check the temporary view 't' exists. + checkAnswer( + sql("SHOW TABLES FROM spark_catalog.default LIKE 't'") + .select("tableName", "isTemporary"), + Row("t", true)) + sql("DROP TABLE t") + // Verify that the temporary view 't' is resolved first and dropped. + checkAnswer( + sql("SHOW TABLES FROM spark_catalog.default LIKE 't'") + .select("tableName", "isTemporary"), + Seq.empty) + } finally { + sql(s"USE spark_catalog") + } + } + } + } + + test("SPARK-33305: DROP TABLE should also invalidate cache") { + val t = s"$catalog.ns.tbl" + val view = "view" + withNamespace(s"$catalog.ns") { + sql(s"CREATE NAMESPACE $catalog.ns") + withTempView(view, "source") { + val df = spark.createDataFrame(Seq((1L, "a"), (2L, "b"), (3L, "c"))).toDF("id", "data") + df.createOrReplaceTempView("source") + sql(s"CREATE TABLE $t $defaultUsing AS SELECT id, data FROM source") + sql(s"CACHE TABLE $view AS SELECT id FROM $t") + checkAnswer(sql(s"SELECT * FROM $t"), spark.table("source").collect()) + checkAnswer( + sql(s"SELECT * FROM $view"), + spark.table("source").select("id").collect()) + + assert(!spark.sharedState.cacheManager.lookupCachedData(spark.table(view)).isEmpty) + sql(s"DROP TABLE $t") + assert(spark.sharedState.cacheManager.lookupCachedData(spark.table(view)).isEmpty) + } + } + } +} diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/PlanResolutionSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/PlanResolutionSuite.scala index 2d6a5da6d67f7..ee2af085c0fa6 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/PlanResolutionSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/PlanResolutionSuite.scala @@ -26,19 +26,21 @@ import org.mockito.invocation.InvocationOnMock import org.apache.spark.sql.{AnalysisException, SaveMode} import org.apache.spark.sql.catalyst.{AliasIdentifier, TableIdentifier} -import org.apache.spark.sql.catalyst.analysis.{AnalysisTest, Analyzer, CTESubstitution, EmptyFunctionRegistry, NoSuchTableException, ResolveCatalogs, ResolvedTable, ResolveInlineTables, ResolveSessionCatalog, UnresolvedAttribute, UnresolvedRelation, UnresolvedStar, UnresolvedSubqueryColumnAliases, UnresolvedV2Relation} +import org.apache.spark.sql.catalyst.analysis.{AnalysisTest, Analyzer, EmptyFunctionRegistry, NoSuchTableException, ResolvedTable, ResolveSessionCatalog, UnresolvedAttribute, UnresolvedRelation, UnresolvedSubqueryColumnAliases, UnresolvedV2Relation} import org.apache.spark.sql.catalyst.catalog.{BucketSpec, CatalogStorageFormat, CatalogTable, CatalogTableType, InMemoryCatalog, SessionCatalog} import org.apache.spark.sql.catalyst.expressions.{AttributeReference, EqualTo, Expression, InSubquery, IntegerLiteral, ListQuery, StringLiteral} -import org.apache.spark.sql.catalyst.parser.CatalystSqlParser -import org.apache.spark.sql.catalyst.plans.logical.{AlterTable, Assignment, CreateTableAsSelect, CreateV2Table, DeleteAction, DeleteFromTable, DescribeRelation, DropTable, InsertAction, InsertIntoStatement, LocalRelation, LogicalPlan, MergeIntoTable, OneRowRelation, Project, ShowTableProperties, SubqueryAlias, UpdateAction, UpdateTable} +import org.apache.spark.sql.catalyst.parser.{CatalystSqlParser, ParseException} +import org.apache.spark.sql.catalyst.plans.logical.{AlterTable, AppendData, Assignment, CreateTableAsSelect, CreateTableStatement, CreateV2Table, DeleteAction, DeleteFromTable, DescribeRelation, DropTable, InsertAction, LocalRelation, LogicalPlan, MergeIntoTable, OneRowRelation, Project, ShowTableProperties, SubqueryAlias, UpdateAction, UpdateTable} +import org.apache.spark.sql.catalyst.rules.Rule import org.apache.spark.sql.connector.FakeV2Provider import org.apache.spark.sql.connector.catalog.{CatalogManager, CatalogNotFoundException, Identifier, Table, TableCapability, TableCatalog, TableChange, V1Table} import org.apache.spark.sql.connector.catalog.TableChange.{UpdateColumnComment, UpdateColumnType} +import org.apache.spark.sql.connector.expressions.Transform import org.apache.spark.sql.execution.datasources.CreateTable import org.apache.spark.sql.execution.datasources.v2.DataSourceV2Relation -import org.apache.spark.sql.internal.SQLConf +import org.apache.spark.sql.internal.{HiveSerDe, SQLConf} import org.apache.spark.sql.sources.SimpleScanSource -import org.apache.spark.sql.types.{CharType, DoubleType, HIVE_TYPE_STRING, IntegerType, LongType, MetadataBuilder, StringType, StructField, StructType} +import org.apache.spark.sql.types.{CharType, DoubleType, IntegerType, LongType, StringType, StructField, StructType} class PlanResolutionSuite extends AnalysisTest { import CatalystSqlParser._ @@ -49,6 +51,7 @@ class PlanResolutionSuite extends AnalysisTest { private val table: Table = { val t = mock(classOf[Table]) when(t.schema()).thenReturn(new StructType().add("i", "int").add("s", "string")) + when(t.partitioning()).thenReturn(Array.empty[Transform]) t } @@ -75,6 +78,14 @@ class PlanResolutionSuite extends AnalysisTest { V1Table(t) } + private val view: V1Table = { + val t = mock(classOf[CatalogTable]) + when(t.schema).thenReturn(new StructType().add("i", "int").add("s", "string")) + when(t.tableType).thenReturn(CatalogTableType.VIEW) + when(t.provider).thenReturn(Some(v1Format)) + V1Table(t) + } + private val testCat: TableCatalog = { val newCatalog = mock(classOf[TableCatalog]) when(newCatalog.loadTable(any())).thenAnswer((invocation: InvocationOnMock) => { @@ -98,6 +109,7 @@ class PlanResolutionSuite extends AnalysisTest { case "v2Table" => table case "v2Table1" => table case "v2TableWithAcceptAnySchemaCapability" => tableWithAcceptAnySchemaCapability + case "view" => view case name => throw new NoSuchTableException(name) } }) @@ -145,28 +157,26 @@ class PlanResolutionSuite extends AnalysisTest { manager } - def parseAndResolve(query: String, withDefault: Boolean = false): LogicalPlan = { + def parseAndResolve( + query: String, + withDefault: Boolean = false, + checkAnalysis: Boolean = false): LogicalPlan = { val catalogManager = if (withDefault) { catalogManagerWithDefault } else { catalogManagerWithoutDefault } - val analyzer = new Analyzer(catalogManager, conf) - // TODO: run the analyzer directly. - val rules = Seq( - CTESubstitution, - ResolveInlineTables(conf), - analyzer.ResolveRelations, - new ResolveCatalogs(catalogManager), - new ResolveSessionCatalog(catalogManager, conf, _ == Seq("v"), _ => false), - analyzer.ResolveTables, - analyzer.ResolveReferences, - analyzer.ResolveSubqueryColumnAliases, - analyzer.ResolveReferences, - analyzer.ResolveAlterTableChanges) - rules.foldLeft(parsePlan(query)) { - case (plan, rule) => rule.apply(plan) + val analyzer = new Analyzer(catalogManager) { + override val extendedResolutionRules: Seq[Rule[LogicalPlan]] = Seq( + new ResolveSessionCatalog(catalogManager, _ == Seq("v"), _ => false)) } + // We don't check analysis here by default, as we expect the plan to be unresolved + // such as `CreateTable`. + val analyzed = analyzer.execute(CatalystSqlParser.parsePlan(query)) + if (checkAnalysis) { + analyzer.checkAnalysis(analyzed) + } + analyzed } private def parseResolveCompare(query: String, expected: LogicalPlan): Unit = @@ -178,6 +188,16 @@ class PlanResolutionSuite extends AnalysisTest { }.head } + private def assertUnsupported(sql: String, containsThesePhrases: Seq[String] = Seq()): Unit = { + val e = intercept[ParseException] { + parsePlan(sql) + } + assert(e.getMessage.toLowerCase(Locale.ROOT).contains("operation not allowed")) + containsThesePhrases.foreach { p => + assert(e.getMessage.toLowerCase(Locale.ROOT).contains(p.toLowerCase(Locale.ROOT))) + } + } + test("create table - with partitioned by") { val query = "CREATE TABLE my_tab(a INT comment 'test', b STRING) " + "USING parquet PARTITIONED BY (a)" @@ -428,10 +448,11 @@ class PlanResolutionSuite extends AnalysisTest { val expectedProperties = Map( "p1" -> "v1", "p2" -> "v2", - "other" -> "20", + "option.other" -> "20", "provider" -> "parquet", "location" -> "s3://bucket/path/to/data", - "comment" -> "table comment") + "comment" -> "table comment", + "other" -> "20") parseAndResolve(sql) match { case create: CreateV2Table => @@ -467,10 +488,11 @@ class PlanResolutionSuite extends AnalysisTest { val expectedProperties = Map( "p1" -> "v1", "p2" -> "v2", - "other" -> "20", + "option.other" -> "20", "provider" -> "parquet", "location" -> "s3://bucket/path/to/data", - "comment" -> "table comment") + "comment" -> "table comment", + "other" -> "20") parseAndResolve(sql, withDefault = true) match { case create: CreateV2Table => @@ -542,10 +564,11 @@ class PlanResolutionSuite extends AnalysisTest { val expectedProperties = Map( "p1" -> "v1", "p2" -> "v2", - "other" -> "20", + "option.other" -> "20", "provider" -> "parquet", "location" -> "s3://bucket/path/to/data", - "comment" -> "table comment") + "comment" -> "table comment", + "other" -> "20") parseAndResolve(sql) match { case ctas: CreateTableAsSelect => @@ -576,10 +599,11 @@ class PlanResolutionSuite extends AnalysisTest { val expectedProperties = Map( "p1" -> "v1", "p2" -> "v2", - "other" -> "20", + "option.other" -> "20", "provider" -> "parquet", "location" -> "s3://bucket/path/to/data", - "comment" -> "table comment") + "comment" -> "table comment", + "other" -> "20") parseAndResolve(sql, withDefault = true) match { case ctas: CreateTableAsSelect => @@ -630,10 +654,10 @@ class PlanResolutionSuite extends AnalysisTest { } test("drop table") { - val tableName1 = "db.tab" - val tableIdent1 = TableIdentifier("tab", Option("db")) - val tableName2 = "tab" - val tableIdent2 = TableIdentifier("tab", Some("default")) + val tableName1 = "db.v1Table" + val tableIdent1 = TableIdentifier("v1Table", Option("db")) + val tableName2 = "v1Table" + val tableIdent2 = TableIdentifier("v1Table", Some("default")) parseResolveCompare(s"DROP TABLE $tableName1", DropTableCommand(tableIdent1, ifExists = false, isView = false, purge = false)) @@ -656,13 +680,13 @@ class PlanResolutionSuite extends AnalysisTest { val tableIdent2 = Identifier.of(Array.empty, "tab") parseResolveCompare(s"DROP TABLE $tableName1", - DropTable(testCat, tableIdent1, ifExists = false)) + DropTable(ResolvedTable.create(testCat, tableIdent1, table), ifExists = false, purge = false)) parseResolveCompare(s"DROP TABLE IF EXISTS $tableName1", - DropTable(testCat, tableIdent1, ifExists = true)) + DropTable(ResolvedTable.create(testCat, tableIdent1, table), ifExists = true, purge = false)) parseResolveCompare(s"DROP TABLE $tableName2", - DropTable(testCat, tableIdent2, ifExists = false)) + DropTable(ResolvedTable.create(testCat, tableIdent2, table), ifExists = false, purge = false)) parseResolveCompare(s"DROP TABLE IF EXISTS $tableName2", - DropTable(testCat, tableIdent2, ifExists = true)) + DropTable(ResolvedTable.create(testCat, tableIdent2, table), ifExists = true, purge = false)) } test("drop view") { @@ -670,6 +694,8 @@ class PlanResolutionSuite extends AnalysisTest { val viewIdent1 = TableIdentifier("view", Option("db")) val viewName2 = "view" val viewIdent2 = TableIdentifier("view", Option("default")) + val tempViewName = "v" + val tempViewIdent = TableIdentifier("v") parseResolveCompare(s"DROP VIEW $viewName1", DropTableCommand(viewIdent1, ifExists = false, isView = true, purge = false)) @@ -679,11 +705,15 @@ class PlanResolutionSuite extends AnalysisTest { DropTableCommand(viewIdent2, ifExists = false, isView = true, purge = false)) parseResolveCompare(s"DROP VIEW IF EXISTS $viewName2", DropTableCommand(viewIdent2, ifExists = true, isView = true, purge = false)) + parseResolveCompare(s"DROP VIEW $tempViewName", + DropTableCommand(tempViewIdent, ifExists = false, isView = true, purge = false)) + parseResolveCompare(s"DROP VIEW IF EXISTS $tempViewName", + DropTableCommand(tempViewIdent, ifExists = true, isView = true, purge = false)) } test("drop view in v2 catalog") { intercept[AnalysisException] { - parseAndResolve("DROP VIEW testcat.db.view") + parseAndResolve("DROP VIEW testcat.db.view", checkAnalysis = true) }.getMessage.toLowerCase(Locale.ROOT).contains( "view support in catalog has not been implemented") } @@ -691,16 +721,16 @@ class PlanResolutionSuite extends AnalysisTest { // ALTER VIEW view_name SET TBLPROPERTIES ('comment' = new_comment); // ALTER VIEW view_name UNSET TBLPROPERTIES [IF EXISTS] ('comment', 'key'); test("alter view: alter view properties") { - val sql1_view = "ALTER VIEW table_name SET TBLPROPERTIES ('test' = 'test', " + + val sql1_view = "ALTER VIEW view SET TBLPROPERTIES ('test' = 'test', " + "'comment' = 'new_comment')" - val sql2_view = "ALTER VIEW table_name UNSET TBLPROPERTIES ('comment', 'test')" - val sql3_view = "ALTER VIEW table_name UNSET TBLPROPERTIES IF EXISTS ('comment', 'test')" + val sql2_view = "ALTER VIEW view UNSET TBLPROPERTIES ('comment', 'test')" + val sql3_view = "ALTER VIEW view UNSET TBLPROPERTIES IF EXISTS ('comment', 'test')" val parsed1_view = parseAndResolve(sql1_view) val parsed2_view = parseAndResolve(sql2_view) val parsed3_view = parseAndResolve(sql3_view) - val tableIdent = TableIdentifier("table_name", Some("default")) + val tableIdent = TableIdentifier("view", Some("default")) val expected1_view = AlterTableSetPropertiesCommand( tableIdent, Map("test" -> "test", "comment" -> "new_comment"), isView = true) val expected2_view = AlterTableUnsetPropertiesCommand( @@ -1076,9 +1106,7 @@ class PlanResolutionSuite extends AnalysisTest { } val sql = s"ALTER TABLE v1HiveTable ALTER COLUMN i TYPE char(1)" - val builder = new MetadataBuilder - builder.putString(HIVE_TYPE_STRING, CharType(1).catalogString) - val newColumnWithCleanedType = StructField("i", StringType, true, builder.build()) + val newColumnWithCleanedType = StructField("i", CharType(1), true) val expected = AlterTableChangeColumnCommand( TableIdentifier("v1HiveTable", Some("default")), "i", newColumnWithCleanedType) val parsed = parseAndResolve(sql) @@ -1144,9 +1172,9 @@ class PlanResolutionSuite extends AnalysisTest { ("ALTER TABLE testcat.tab ALTER COLUMN i TYPE bigint", false), ("ALTER TABLE tab ALTER COLUMN i TYPE bigint", false), (s"ALTER TABLE $v2SessionCatalogTable ALTER COLUMN i TYPE bigint", true), - ("INSERT INTO TABLE tab VALUES (1)", false), - ("INSERT INTO TABLE testcat.tab VALUES (1)", false), - (s"INSERT INTO TABLE $v2SessionCatalogTable VALUES (1)", true), + ("INSERT INTO TABLE tab VALUES (1, 'a')", false), + ("INSERT INTO TABLE testcat.tab VALUES (1, 'a')", false), + (s"INSERT INTO TABLE $v2SessionCatalogTable VALUES (1, 'a')", true), ("DESC TABLE tab", false), ("DESC TABLE testcat.tab", false), (s"DESC TABLE $v2SessionCatalogTable", true), @@ -1159,26 +1187,26 @@ class PlanResolutionSuite extends AnalysisTest { ) } - DSV2ResolutionTests.foreach { case (sql, isSessionCatlog) => + DSV2ResolutionTests.foreach { case (sql, isSessionCatalog) => test(s"Data source V2 relation resolution '$sql'") { val parsed = parseAndResolve(sql, withDefault = true) - val catlogIdent = if (isSessionCatlog) v2SessionCatalog else testCat - val tableIdent = if (isSessionCatlog) "v2Table" else "tab" + val catalogIdent = if (isSessionCatalog) v2SessionCatalog else testCat + val tableIdent = if (isSessionCatalog) "v2Table" else "tab" parsed match { case AlterTable(_, _, r: DataSourceV2Relation, _) => - assert(r.catalog.exists(_ == catlogIdent)) + assert(r.catalog.exists(_ == catalogIdent)) assert(r.identifier.exists(_.name() == tableIdent)) case Project(_, AsDataSourceV2Relation(r)) => - assert(r.catalog.exists(_ == catlogIdent)) + assert(r.catalog.exists(_ == catalogIdent)) assert(r.identifier.exists(_.name() == tableIdent)) - case InsertIntoStatement(r: DataSourceV2Relation, _, _, _, _) => - assert(r.catalog.exists(_ == catlogIdent)) + case AppendData(r: DataSourceV2Relation, _, _, _, _) => + assert(r.catalog.exists(_ == catalogIdent)) assert(r.identifier.exists(_.name() == tableIdent)) case DescribeRelation(r: ResolvedTable, _, _) => - assert(r.catalog == catlogIdent) + assert(r.catalog == catalogIdent) assert(r.identifier.name() == tableIdent) case ShowTableProperties(r: ResolvedTable, _) => - assert(r.catalog == catlogIdent) + assert(r.catalog == catalogIdent) assert(r.identifier.name() == tableIdent) case ShowTablePropertiesCommand(t: TableIdentifier, _) => assert(t.identifier == tableIdent) @@ -1519,42 +1547,630 @@ class PlanResolutionSuite extends AnalysisTest { } } - test("SPARK-31147: forbid CHAR type in non-Hive tables") { - def checkFailure(t: String, provider: String): Unit = { - val types = Seq( - "CHAR(2)", - "ARRAY", - "MAP", - "MAP", - "STRUCT") - types.foreach { tpe => - intercept[AnalysisException] { - parseAndResolve(s"CREATE TABLE $t(col $tpe) USING $provider") - } - intercept[AnalysisException] { - parseAndResolve(s"REPLACE TABLE $t(col $tpe) USING $provider") - } - intercept[AnalysisException] { - parseAndResolve(s"CREATE OR REPLACE TABLE $t(col $tpe) USING $provider") - } - intercept[AnalysisException] { - parseAndResolve(s"ALTER TABLE $t ADD COLUMN col $tpe") - } - intercept[AnalysisException] { - parseAndResolve(s"ALTER TABLE $t ADD COLUMN col $tpe") - } - intercept[AnalysisException] { - parseAndResolve(s"ALTER TABLE $t ALTER COLUMN col TYPE $tpe") + private def compareNormalized(plan1: LogicalPlan, plan2: LogicalPlan): Unit = { + /** + * Normalizes plans: + * - CreateTable the createTime in tableDesc will replaced by -1L. + */ + def normalizePlan(plan: LogicalPlan): LogicalPlan = { + plan match { + case CreateTable(tableDesc, mode, query) => + val newTableDesc = tableDesc.copy(createTime = -1L) + CreateTable(newTableDesc, mode, query) + case _ => plan // Don't transform + } + } + comparePlans(normalizePlan(plan1), normalizePlan(plan2)) + } + + test("create table - schema") { + def createTable( + table: String, + database: Option[String] = None, + tableType: CatalogTableType = CatalogTableType.MANAGED, + storage: CatalogStorageFormat = CatalogStorageFormat.empty.copy( + inputFormat = HiveSerDe.sourceToSerDe("textfile").get.inputFormat, + outputFormat = HiveSerDe.sourceToSerDe("textfile").get.outputFormat, + serde = Some("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe")), + schema: StructType = new StructType, + provider: Option[String] = Some("hive"), + partitionColumnNames: Seq[String] = Seq.empty, + comment: Option[String] = None, + mode: SaveMode = SaveMode.ErrorIfExists, + query: Option[LogicalPlan] = None): CreateTable = { + CreateTable( + CatalogTable( + identifier = TableIdentifier(table, database), + tableType = tableType, + storage = storage, + schema = schema, + provider = provider, + partitionColumnNames = partitionColumnNames, + comment = comment + ), mode, query + ) + } + + def compare(sql: String, plan: LogicalPlan): Unit = { + compareNormalized(parseAndResolve(sql), plan) + } + + compare("CREATE TABLE my_tab(a INT COMMENT 'test', b STRING) STORED AS textfile", + createTable( + table = "my_tab", + database = Some("default"), + schema = (new StructType) + .add("a", IntegerType, nullable = true, "test") + .add("b", StringType) + ) + ) + compare("CREATE TABLE my_tab(a INT COMMENT 'test', b STRING) STORED AS textfile " + + "PARTITIONED BY (c INT, d STRING COMMENT 'test2')", + createTable( + table = "my_tab", + database = Some("default"), + schema = (new StructType) + .add("a", IntegerType, nullable = true, "test") + .add("b", StringType) + .add("c", IntegerType) + .add("d", StringType, nullable = true, "test2"), + partitionColumnNames = Seq("c", "d") + ) + ) + compare("CREATE TABLE my_tab(id BIGINT, nested STRUCT) " + + "STORED AS textfile", + createTable( + table = "my_tab", + database = Some("default"), + schema = (new StructType) + .add("id", LongType) + .add("nested", (new StructType) + .add("col1", StringType) + .add("col2", IntegerType) + ) + ) + ) + // Partitioned by a StructType should be accepted by `SparkSqlParser` but will fail an analyze + // rule in `AnalyzeCreateTable`. + compare("CREATE TABLE my_tab(a INT COMMENT 'test', b STRING) STORED AS textfile " + + "PARTITIONED BY (nested STRUCT)", + createTable( + table = "my_tab", + database = Some("default"), + schema = (new StructType) + .add("a", IntegerType, nullable = true, "test") + .add("b", StringType) + .add("nested", (new StructType) + .add("col1", StringType) + .add("col2", IntegerType) + ), + partitionColumnNames = Seq("nested") + ) + ) + + interceptParseException(parsePlan)( + "CREATE TABLE my_tab(a: INT COMMENT 'test', b: STRING)", + "extraneous input ':'") + } + + test("create hive table - table file format") { + val allSources = Seq("parquet", "parquetfile", "orc", "orcfile", "avro", "avrofile", + "sequencefile", "rcfile", "textfile") + + allSources.foreach { s => + val query = s"CREATE TABLE my_tab STORED AS $s" + parseAndResolve(query) match { + case ct: CreateTable => + val hiveSerde = HiveSerDe.sourceToSerDe(s) + assert(hiveSerde.isDefined) + assert(ct.tableDesc.storage.serde == + hiveSerde.get.serde.orElse(Some("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"))) + assert(ct.tableDesc.storage.inputFormat == hiveSerde.get.inputFormat) + assert(ct.tableDesc.storage.outputFormat == hiveSerde.get.outputFormat) + } + } + } + + test("create hive table - row format and table file format") { + val createTableStart = "CREATE TABLE my_tab ROW FORMAT" + val fileFormat = s"STORED AS INPUTFORMAT 'inputfmt' OUTPUTFORMAT 'outputfmt'" + val query1 = s"$createTableStart SERDE 'anything' $fileFormat" + val query2 = s"$createTableStart DELIMITED FIELDS TERMINATED BY ' ' $fileFormat" + + // No conflicting serdes here, OK + parseAndResolve(query1) match { + case parsed1: CreateTable => + assert(parsed1.tableDesc.storage.serde == Some("anything")) + assert(parsed1.tableDesc.storage.inputFormat == Some("inputfmt")) + assert(parsed1.tableDesc.storage.outputFormat == Some("outputfmt")) + } + + parseAndResolve(query2) match { + case parsed2: CreateTable => + assert(parsed2.tableDesc.storage.serde == + Some("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe")) + assert(parsed2.tableDesc.storage.inputFormat == Some("inputfmt")) + assert(parsed2.tableDesc.storage.outputFormat == Some("outputfmt")) + } + } + + test("create hive table - row format serde and generic file format") { + val allSources = Seq("parquet", "orc", "avro", "sequencefile", "rcfile", "textfile") + val supportedSources = Set("sequencefile", "rcfile", "textfile") + + allSources.foreach { s => + val query = s"CREATE TABLE my_tab ROW FORMAT SERDE 'anything' STORED AS $s" + if (supportedSources.contains(s)) { + parseAndResolve(query) match { + case ct: CreateTable => + val hiveSerde = HiveSerDe.sourceToSerDe(s) + assert(hiveSerde.isDefined) + assert(ct.tableDesc.storage.serde == Some("anything")) + assert(ct.tableDesc.storage.inputFormat == hiveSerde.get.inputFormat) + assert(ct.tableDesc.storage.outputFormat == hiveSerde.get.outputFormat) } - intercept[AnalysisException] { - parseAndResolve(s"ALTER TABLE $t REPLACE COLUMNS (col $tpe)") + } else { + assertUnsupported(query, Seq("row format serde", "incompatible", s)) + } + } + } + + test("create hive table - row format delimited and generic file format") { + val allSources = Seq("parquet", "orc", "avro", "sequencefile", "rcfile", "textfile") + val supportedSources = Set("textfile") + + allSources.foreach { s => + val query = s"CREATE TABLE my_tab ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS $s" + if (supportedSources.contains(s)) { + parseAndResolve(query) match { + case ct: CreateTable => + val hiveSerde = HiveSerDe.sourceToSerDe(s) + assert(hiveSerde.isDefined) + assert(ct.tableDesc.storage.serde == hiveSerde.get.serde + .orElse(Some("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"))) + assert(ct.tableDesc.storage.inputFormat == hiveSerde.get.inputFormat) + assert(ct.tableDesc.storage.outputFormat == hiveSerde.get.outputFormat) } + } else { + assertUnsupported(query, Seq("row format delimited", "only compatible with 'textfile'", s)) } } + } + + test("create hive external table") { + val withoutLoc = "CREATE EXTERNAL TABLE my_tab STORED AS parquet" + parseAndResolve(withoutLoc) match { + case ct: CreateTable => + assert(ct.tableDesc.tableType == CatalogTableType.EXTERNAL) + assert(ct.tableDesc.storage.locationUri.isEmpty) + } + + val withLoc = "CREATE EXTERNAL TABLE my_tab STORED AS parquet LOCATION '/something/anything'" + parseAndResolve(withLoc) match { + case ct: CreateTable => + assert(ct.tableDesc.tableType == CatalogTableType.EXTERNAL) + assert(ct.tableDesc.storage.locationUri == Some(new URI("/something/anything"))) + } + } + + test("create hive table - property values must be set") { + assertUnsupported( + sql = "CREATE TABLE my_tab STORED AS parquet " + + "TBLPROPERTIES('key_without_value', 'key_with_value'='x')", + containsThesePhrases = Seq("key_without_value")) + assertUnsupported( + sql = "CREATE TABLE my_tab ROW FORMAT SERDE 'serde' " + + "WITH SERDEPROPERTIES('key_without_value', 'key_with_value'='x')", + containsThesePhrases = Seq("key_without_value")) + } - checkFailure("v1Table", v1Format) - checkFailure("v2Table", v2Format) - checkFailure("testcat.tab", "foo") + test("create hive table - location implies external") { + val query = "CREATE TABLE my_tab STORED AS parquet LOCATION '/something/anything'" + parseAndResolve(query) match { + case ct: CreateTable => + assert(ct.tableDesc.tableType == CatalogTableType.EXTERNAL) + assert(ct.tableDesc.storage.locationUri == Some(new URI("/something/anything"))) + } + } + + test("Duplicate clauses - create hive table") { + def intercept(sqlCommand: String, messages: String*): Unit = + interceptParseException(parsePlan)(sqlCommand, messages: _*) + + def createTableHeader(duplicateClause: String): String = { + s"CREATE TABLE my_tab(a INT, b STRING) STORED AS parquet $duplicateClause $duplicateClause" + } + + intercept(createTableHeader("TBLPROPERTIES('test' = 'test2')"), + "Found duplicate clauses: TBLPROPERTIES") + intercept(createTableHeader("LOCATION '/tmp/file'"), + "Found duplicate clauses: LOCATION") + intercept(createTableHeader("COMMENT 'a table'"), + "Found duplicate clauses: COMMENT") + intercept(createTableHeader("CLUSTERED BY(b) INTO 256 BUCKETS"), + "Found duplicate clauses: CLUSTERED BY") + intercept(createTableHeader("PARTITIONED BY (k int)"), + "Found duplicate clauses: PARTITIONED BY") + intercept(createTableHeader("STORED AS parquet"), + "Found duplicate clauses: STORED AS/BY") + intercept( + createTableHeader("ROW FORMAT SERDE 'parquet.hive.serde.ParquetHiveSerDe'"), + "Found duplicate clauses: ROW FORMAT") + } + + test("Test CTAS #1") { + val s1 = + """ + |CREATE EXTERNAL TABLE IF NOT EXISTS mydb.page_view + |COMMENT 'This is the staging page view table' + |STORED AS RCFILE + |LOCATION '/user/external/page_view' + |TBLPROPERTIES ('p1'='v1', 'p2'='v2') + |AS SELECT * FROM src + """.stripMargin + + val s2 = + """ + |CREATE EXTERNAL TABLE IF NOT EXISTS mydb.page_view + |STORED AS RCFILE + |COMMENT 'This is the staging page view table' + |TBLPROPERTIES ('p1'='v1', 'p2'='v2') + |LOCATION '/user/external/page_view' + |AS SELECT * FROM src + """.stripMargin + + val s3 = + """ + |CREATE EXTERNAL TABLE IF NOT EXISTS mydb.page_view + |TBLPROPERTIES ('p1'='v1', 'p2'='v2') + |LOCATION '/user/external/page_view' + |STORED AS RCFILE + |COMMENT 'This is the staging page view table' + |AS SELECT * FROM src + """.stripMargin + + checkParsing(s1) + checkParsing(s2) + checkParsing(s3) + + def checkParsing(sql: String): Unit = { + val (desc, exists) = extractTableDesc(sql) + assert(exists) + assert(desc.identifier.database == Some("mydb")) + assert(desc.identifier.table == "page_view") + assert(desc.tableType == CatalogTableType.EXTERNAL) + assert(desc.storage.locationUri == Some(new URI("/user/external/page_view"))) + assert(desc.schema.isEmpty) // will be populated later when the table is actually created + assert(desc.comment == Some("This is the staging page view table")) + // TODO will be SQLText + assert(desc.viewText.isEmpty) + assert(desc.viewCatalogAndNamespace.isEmpty) + assert(desc.viewQueryColumnNames.isEmpty) + assert(desc.partitionColumnNames.isEmpty) + assert(desc.storage.inputFormat == Some("org.apache.hadoop.hive.ql.io.RCFileInputFormat")) + assert(desc.storage.outputFormat == Some("org.apache.hadoop.hive.ql.io.RCFileOutputFormat")) + assert(desc.storage.serde == + Some("org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe")) + assert(desc.properties == Map("p1" -> "v1", "p2" -> "v2")) + } + } + + test("Test CTAS #2") { + val s1 = + """ + |CREATE EXTERNAL TABLE IF NOT EXISTS mydb.page_view + |COMMENT 'This is the staging page view table' + |ROW FORMAT SERDE 'parquet.hive.serde.ParquetHiveSerDe' + | STORED AS + | INPUTFORMAT 'parquet.hive.DeprecatedParquetInputFormat' + | OUTPUTFORMAT 'parquet.hive.DeprecatedParquetOutputFormat' + |LOCATION '/user/external/page_view' + |TBLPROPERTIES ('p1'='v1', 'p2'='v2') + |AS SELECT * FROM src + """.stripMargin + + val s2 = + """ + |CREATE EXTERNAL TABLE IF NOT EXISTS mydb.page_view + |LOCATION '/user/external/page_view' + |TBLPROPERTIES ('p1'='v1', 'p2'='v2') + |ROW FORMAT SERDE 'parquet.hive.serde.ParquetHiveSerDe' + | STORED AS + | INPUTFORMAT 'parquet.hive.DeprecatedParquetInputFormat' + | OUTPUTFORMAT 'parquet.hive.DeprecatedParquetOutputFormat' + |COMMENT 'This is the staging page view table' + |AS SELECT * FROM src + """.stripMargin + + checkParsing(s1) + checkParsing(s2) + + def checkParsing(sql: String): Unit = { + val (desc, exists) = extractTableDesc(sql) + assert(exists) + assert(desc.identifier.database == Some("mydb")) + assert(desc.identifier.table == "page_view") + assert(desc.tableType == CatalogTableType.EXTERNAL) + assert(desc.storage.locationUri == Some(new URI("/user/external/page_view"))) + assert(desc.schema.isEmpty) // will be populated later when the table is actually created + // TODO will be SQLText + assert(desc.comment == Some("This is the staging page view table")) + assert(desc.viewText.isEmpty) + assert(desc.viewCatalogAndNamespace.isEmpty) + assert(desc.viewQueryColumnNames.isEmpty) + assert(desc.partitionColumnNames.isEmpty) + assert(desc.storage.properties == Map()) + assert(desc.storage.inputFormat == Some("parquet.hive.DeprecatedParquetInputFormat")) + assert(desc.storage.outputFormat == Some("parquet.hive.DeprecatedParquetOutputFormat")) + assert(desc.storage.serde == Some("parquet.hive.serde.ParquetHiveSerDe")) + assert(desc.properties == Map("p1" -> "v1", "p2" -> "v2")) + } + } + + test("Test CTAS #3") { + val s3 = """CREATE TABLE page_view STORED AS textfile AS SELECT * FROM src""" + val (desc, exists) = extractTableDesc(s3) + assert(exists == false) + assert(desc.identifier.database == Some("default")) + assert(desc.identifier.table == "page_view") + assert(desc.tableType == CatalogTableType.MANAGED) + assert(desc.storage.locationUri == None) + assert(desc.schema.isEmpty) + assert(desc.viewText == None) // TODO will be SQLText + assert(desc.viewQueryColumnNames.isEmpty) + assert(desc.storage.properties == Map()) + assert(desc.storage.inputFormat == Some("org.apache.hadoop.mapred.TextInputFormat")) + assert(desc.storage.outputFormat == + Some("org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat")) + assert(desc.storage.serde == Some("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe")) + assert(desc.properties == Map()) + } + + test("Test CTAS #4") { + val s4 = + """CREATE TABLE page_view + |STORED BY 'storage.handler.class.name' AS SELECT * FROM src""".stripMargin + intercept[AnalysisException] { + extractTableDesc(s4) + } + } + + test("Test CTAS #5") { + val s5 = """CREATE TABLE ctas2 + | ROW FORMAT SERDE "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe" + | WITH SERDEPROPERTIES("serde_p1"="p1","serde_p2"="p2") + | STORED AS RCFile + | TBLPROPERTIES("tbl_p1"="p11", "tbl_p2"="p22") + | AS + | SELECT key, value + | FROM src + | ORDER BY key, value""".stripMargin + val (desc, exists) = extractTableDesc(s5) + assert(exists == false) + assert(desc.identifier.database == Some("default")) + assert(desc.identifier.table == "ctas2") + assert(desc.tableType == CatalogTableType.MANAGED) + assert(desc.storage.locationUri == None) + assert(desc.schema.isEmpty) + assert(desc.viewText == None) // TODO will be SQLText + assert(desc.viewCatalogAndNamespace.isEmpty) + assert(desc.viewQueryColumnNames.isEmpty) + assert(desc.storage.properties == Map(("serde_p1" -> "p1"), ("serde_p2" -> "p2"))) + assert(desc.storage.inputFormat == Some("org.apache.hadoop.hive.ql.io.RCFileInputFormat")) + assert(desc.storage.outputFormat == Some("org.apache.hadoop.hive.ql.io.RCFileOutputFormat")) + assert(desc.storage.serde == Some("org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe")) + assert(desc.properties == Map(("tbl_p1" -> "p11"), ("tbl_p2" -> "p22"))) + } + + test("CTAS statement with a PARTITIONED BY clause is not allowed") { + assertUnsupported(s"CREATE TABLE ctas1 PARTITIONED BY (k int)" + + " AS SELECT key, value FROM (SELECT 1 as key, 2 as value) tmp") + } + + test("CTAS statement with schema") { + assertUnsupported(s"CREATE TABLE ctas1 (age INT, name STRING) AS SELECT * FROM src") + assertUnsupported(s"CREATE TABLE ctas1 (age INT, name STRING) AS SELECT 1, 'hello'") + } + + test("create table - basic") { + val query = "CREATE TABLE my_table (id int, name string)" + val (desc, allowExisting) = extractTableDesc(query) + assert(!allowExisting) + assert(desc.identifier.database == Some("default")) + assert(desc.identifier.table == "my_table") + assert(desc.tableType == CatalogTableType.MANAGED) + assert(desc.schema == new StructType().add("id", "int").add("name", "string")) + assert(desc.partitionColumnNames.isEmpty) + assert(desc.bucketSpec.isEmpty) + assert(desc.viewText.isEmpty) + assert(desc.viewQueryColumnNames.isEmpty) + assert(desc.storage.locationUri.isEmpty) + assert(desc.storage.inputFormat == + Some("org.apache.hadoop.mapred.TextInputFormat")) + assert(desc.storage.outputFormat == + Some("org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat")) + assert(desc.storage.serde == Some("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe")) + assert(desc.storage.properties.isEmpty) + assert(desc.properties.isEmpty) + assert(desc.comment.isEmpty) + } + + test("create table - with database name") { + val query = "CREATE TABLE dbx.my_table (id int, name string)" + val (desc, _) = extractTableDesc(query) + assert(desc.identifier.database == Some("dbx")) + assert(desc.identifier.table == "my_table") + } + + test("create table - temporary") { + val query = "CREATE TEMPORARY TABLE tab1 (id int, name string)" + val e = intercept[ParseException] { parsePlan(query) } + assert(e.message.contains("Operation not allowed: CREATE TEMPORARY TABLE")) + } + + test("create table - external") { + val query = "CREATE EXTERNAL TABLE tab1 (id int, name string) LOCATION '/path/to/nowhere'" + val (desc, _) = extractTableDesc(query) + assert(desc.tableType == CatalogTableType.EXTERNAL) + assert(desc.storage.locationUri == Some(new URI("/path/to/nowhere"))) + } + + test("create table - if not exists") { + val query = "CREATE TABLE IF NOT EXISTS tab1 (id int, name string)" + val (_, allowExisting) = extractTableDesc(query) + assert(allowExisting) + } + + test("create table - comment") { + val query = "CREATE TABLE my_table (id int, name string) COMMENT 'its hot as hell below'" + val (desc, _) = extractTableDesc(query) + assert(desc.comment == Some("its hot as hell below")) + } + + test("create table - partitioned columns") { + val query = "CREATE TABLE my_table (id int, name string) PARTITIONED BY (month int)" + val (desc, _) = extractTableDesc(query) + assert(desc.schema == new StructType() + .add("id", "int") + .add("name", "string") + .add("month", "int")) + assert(desc.partitionColumnNames == Seq("month")) + } + + test("create table - clustered by") { + val numBuckets = 10 + val bucketedColumn = "id" + val sortColumn = "id" + val baseQuery = + s""" + CREATE TABLE my_table ( + $bucketedColumn int, + name string) + CLUSTERED BY($bucketedColumn) + """ + + val query1 = s"$baseQuery INTO $numBuckets BUCKETS" + val (desc1, _) = extractTableDesc(query1) + assert(desc1.bucketSpec.isDefined) + val bucketSpec1 = desc1.bucketSpec.get + assert(bucketSpec1.numBuckets == numBuckets) + assert(bucketSpec1.bucketColumnNames.head.equals(bucketedColumn)) + assert(bucketSpec1.sortColumnNames.isEmpty) + + val query2 = s"$baseQuery SORTED BY($sortColumn) INTO $numBuckets BUCKETS" + val (desc2, _) = extractTableDesc(query2) + assert(desc2.bucketSpec.isDefined) + val bucketSpec2 = desc2.bucketSpec.get + assert(bucketSpec2.numBuckets == numBuckets) + assert(bucketSpec2.bucketColumnNames.head.equals(bucketedColumn)) + assert(bucketSpec2.sortColumnNames.head.equals(sortColumn)) + } + + test("create table(hive) - skewed by") { + val baseQuery = "CREATE TABLE my_table (id int, name string) SKEWED BY" + val query1 = s"$baseQuery(id) ON (1, 10, 100)" + val query2 = s"$baseQuery(id, name) ON ((1, 'x'), (2, 'y'), (3, 'z'))" + val query3 = s"$baseQuery(id, name) ON ((1, 'x'), (2, 'y'), (3, 'z')) STORED AS DIRECTORIES" + val e1 = intercept[ParseException] { parsePlan(query1) } + val e2 = intercept[ParseException] { parsePlan(query2) } + val e3 = intercept[ParseException] { parsePlan(query3) } + assert(e1.getMessage.contains("Operation not allowed")) + assert(e2.getMessage.contains("Operation not allowed")) + assert(e3.getMessage.contains("Operation not allowed")) + } + + test("create table(hive) - row format") { + val baseQuery = "CREATE TABLE my_table (id int, name string) ROW FORMAT" + val query1 = s"$baseQuery SERDE 'org.apache.poof.serde.Baff'" + val query2 = s"$baseQuery SERDE 'org.apache.poof.serde.Baff' WITH SERDEPROPERTIES ('k1'='v1')" + val query3 = + s""" + |$baseQuery DELIMITED FIELDS TERMINATED BY 'x' ESCAPED BY 'y' + |COLLECTION ITEMS TERMINATED BY 'a' + |MAP KEYS TERMINATED BY 'b' + |LINES TERMINATED BY '\n' + |NULL DEFINED AS 'c' + """.stripMargin + val (desc1, _) = extractTableDesc(query1) + val (desc2, _) = extractTableDesc(query2) + val (desc3, _) = extractTableDesc(query3) + assert(desc1.storage.serde == Some("org.apache.poof.serde.Baff")) + assert(desc1.storage.properties.isEmpty) + assert(desc2.storage.serde == Some("org.apache.poof.serde.Baff")) + assert(desc2.storage.properties == Map("k1" -> "v1")) + assert(desc3.storage.properties == Map( + "field.delim" -> "x", + "escape.delim" -> "y", + "serialization.format" -> "x", + "line.delim" -> "\n", + "colelction.delim" -> "a", // yes, it's a typo from Hive :) + "mapkey.delim" -> "b")) + } + + test("create table(hive) - file format") { + val baseQuery = "CREATE TABLE my_table (id int, name string) STORED AS" + val query1 = s"$baseQuery INPUTFORMAT 'winput' OUTPUTFORMAT 'wowput'" + val query2 = s"$baseQuery ORC" + val (desc1, _) = extractTableDesc(query1) + val (desc2, _) = extractTableDesc(query2) + assert(desc1.storage.inputFormat == Some("winput")) + assert(desc1.storage.outputFormat == Some("wowput")) + assert(desc1.storage.serde == Some("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe")) + assert(desc2.storage.inputFormat == Some("org.apache.hadoop.hive.ql.io.orc.OrcInputFormat")) + assert(desc2.storage.outputFormat == Some("org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat")) + assert(desc2.storage.serde == Some("org.apache.hadoop.hive.ql.io.orc.OrcSerde")) + } + + test("create table(hive) - storage handler") { + val baseQuery = "CREATE TABLE my_table (id int, name string) STORED BY" + val query1 = s"$baseQuery 'org.papachi.StorageHandler'" + val query2 = s"$baseQuery 'org.mamachi.StorageHandler' WITH SERDEPROPERTIES ('k1'='v1')" + val e1 = intercept[ParseException] { parsePlan(query1) } + val e2 = intercept[ParseException] { parsePlan(query2) } + assert(e1.getMessage.contains("Operation not allowed")) + assert(e2.getMessage.contains("Operation not allowed")) + } + + test("create table - properties") { + val query = "CREATE TABLE my_table (id int, name string) TBLPROPERTIES ('k1'='v1', 'k2'='v2')" + parsePlan(query) match { + case state: CreateTableStatement => + assert(state.properties == Map("k1" -> "v1", "k2" -> "v2")) + } + } + + test("create table(hive) - everything!") { + val query = + """ + |CREATE EXTERNAL TABLE IF NOT EXISTS dbx.my_table (id int, name string) + |COMMENT 'no comment' + |PARTITIONED BY (month int) + |ROW FORMAT SERDE 'org.apache.poof.serde.Baff' WITH SERDEPROPERTIES ('k1'='v1') + |STORED AS INPUTFORMAT 'winput' OUTPUTFORMAT 'wowput' + |LOCATION '/path/to/mercury' + |TBLPROPERTIES ('k1'='v1', 'k2'='v2') + """.stripMargin + val (desc, allowExisting) = extractTableDesc(query) + assert(allowExisting) + assert(desc.identifier.database == Some("dbx")) + assert(desc.identifier.table == "my_table") + assert(desc.tableType == CatalogTableType.EXTERNAL) + assert(desc.schema == new StructType() + .add("id", "int") + .add("name", "string") + .add("month", "int")) + assert(desc.partitionColumnNames == Seq("month")) + assert(desc.bucketSpec.isEmpty) + assert(desc.viewText.isEmpty) + assert(desc.viewCatalogAndNamespace.isEmpty) + assert(desc.viewQueryColumnNames.isEmpty) + assert(desc.storage.locationUri == Some(new URI("/path/to/mercury"))) + assert(desc.storage.inputFormat == Some("winput")) + assert(desc.storage.outputFormat == Some("wowput")) + assert(desc.storage.serde == Some("org.apache.poof.serde.Baff")) + assert(desc.storage.properties == Map("k1" -> "v1")) + assert(desc.properties == Map("k1" -> "v1", "k2" -> "v2")) + assert(desc.comment == Some("no comment")) } // TODO: add tests for more commands. diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/ShowNamespacesParserSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/ShowNamespacesParserSuite.scala new file mode 100644 index 0000000000000..c9e5d33fea87a --- /dev/null +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/ShowNamespacesParserSuite.scala @@ -0,0 +1,70 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.execution.command + +import org.apache.spark.sql.catalyst.analysis.{AnalysisTest, UnresolvedNamespace} +import org.apache.spark.sql.catalyst.parser.CatalystSqlParser.parsePlan +import org.apache.spark.sql.catalyst.parser.ParseException +import org.apache.spark.sql.catalyst.plans.logical.ShowNamespaces +import org.apache.spark.sql.test.SharedSparkSession + +class ShowNamespacesParserSuite extends AnalysisTest with SharedSparkSession { + test("all namespaces") { + Seq("SHOW NAMESPACES", "SHOW DATABASES").foreach { sqlCmd => + comparePlans( + parsePlan(sqlCmd), + ShowNamespaces(UnresolvedNamespace(Seq.empty[String]), None)) + } + } + + test("basic pattern") { + Seq( + "SHOW DATABASES LIKE 'defau*'", + "SHOW NAMESPACES LIKE 'defau*'").foreach { sqlCmd => + comparePlans( + parsePlan(sqlCmd), + ShowNamespaces(UnresolvedNamespace(Seq.empty[String]), Some("defau*"))) + } + } + + test("FROM/IN operator is not allowed by SHOW DATABASES") { + Seq( + "SHOW DATABASES FROM testcat.ns1.ns2", + "SHOW DATABASES IN testcat.ns1.ns2").foreach { sqlCmd => + val errMsg = intercept[ParseException] { + parsePlan(sqlCmd) + }.getMessage + assert(errMsg.contains("FROM/IN operator is not allowed in SHOW DATABASES")) + } + } + + test("show namespaces in/from a namespace") { + comparePlans( + parsePlan("SHOW NAMESPACES FROM testcat.ns1.ns2"), + ShowNamespaces(UnresolvedNamespace(Seq("testcat", "ns1", "ns2")), None)) + comparePlans( + parsePlan("SHOW NAMESPACES IN testcat.ns1.ns2"), + ShowNamespaces(UnresolvedNamespace(Seq("testcat", "ns1", "ns2")), None)) + } + + test("namespaces by a pattern from another namespace") { + comparePlans( + parsePlan("SHOW NAMESPACES IN testcat.ns1 LIKE '*pattern*'"), + ShowNamespaces(UnresolvedNamespace(Seq("testcat", "ns1")), Some("*pattern*"))) + } +} diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/ShowNamespacesSuiteBase.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/ShowNamespacesSuiteBase.scala new file mode 100644 index 0000000000000..790489e0d47ce --- /dev/null +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/ShowNamespacesSuiteBase.scala @@ -0,0 +1,131 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.execution.command + +import org.apache.spark.sql.{QueryTest, Row} +import org.apache.spark.sql.internal.SQLConf +import org.apache.spark.sql.types.{StringType, StructType} + +/** + * This base suite contains unified tests for the `SHOW NAMESPACES` and `SHOW DATABASES` commands + * that check V1 and V2 table catalogs. The tests that cannot run for all supported catalogs are + * located in more specific test suites: + * + * - V2 table catalog tests: `org.apache.spark.sql.execution.command.v2.ShowNamespacesSuite` + * - V1 table catalog tests: `org.apache.spark.sql.execution.command.v1.ShowNamespacesSuiteBase` + * - V1 In-Memory catalog: `org.apache.spark.sql.execution.command.v1.ShowNamespacesSuite` + * - V1 Hive External catalog: `org.apache.spark.sql.hive.execution.command.ShowNamespacesSuite` + */ +trait ShowNamespacesSuiteBase extends QueryTest with DDLCommandTestUtils { + override val command = "SHOW NAMESPACES" + + protected def runShowNamespacesSql(sqlText: String, expected: Seq[String]): Unit = { + val df = spark.sql(sqlText) + assert(df.schema === new StructType().add("namespace", StringType, false)) + checkAnswer(df, expected.map(Row(_))) + } + + protected def builtinTopNamespaces: Seq[String] = Seq.empty + + test("default namespace") { + withSQLConf(SQLConf.DEFAULT_CATALOG.key -> catalog) { + runShowNamespacesSql("SHOW NAMESPACES", builtinTopNamespaces) + } + runShowNamespacesSql(s"SHOW NAMESPACES IN $catalog", builtinTopNamespaces) + } + + test("at the top level") { + withNamespace(s"$catalog.ns1", s"$catalog.ns2") { + sql(s"CREATE DATABASE $catalog.ns1") + sql(s"CREATE NAMESPACE $catalog.ns2") + + runShowNamespacesSql( + s"SHOW NAMESPACES IN $catalog", + Seq("ns1", "ns2") ++ builtinTopNamespaces) + } + } + + test("exact matching") { + withNamespace(s"$catalog.ns1", s"$catalog.ns2") { + sql(s"CREATE NAMESPACE $catalog.ns1") + sql(s"CREATE NAMESPACE $catalog.ns2") + Seq( + s"SHOW NAMESPACES IN $catalog LIKE 'ns2'", + s"SHOW NAMESPACES IN $catalog 'ns2'", + s"SHOW NAMESPACES FROM $catalog LIKE 'ns2'", + s"SHOW NAMESPACES FROM $catalog 'ns2'").foreach { sqlCmd => + withClue(sqlCmd) { + runShowNamespacesSql(sqlCmd, Seq("ns2")) + } + } + } + } + + test("does not match to any namespace") { + Seq( + "SHOW DATABASES LIKE 'non-existentdb'", + "SHOW NAMESPACES 'non-existentdb'").foreach { sqlCmd => + runShowNamespacesSql(sqlCmd, Seq.empty) + } + } + + test("show root namespaces with the default catalog") { + withSQLConf(SQLConf.DEFAULT_CATALOG.key -> catalog) { + runShowNamespacesSql("SHOW NAMESPACES", builtinTopNamespaces) + + withNamespace("ns1", "ns2") { + sql(s"CREATE NAMESPACE ns1") + sql(s"CREATE NAMESPACE ns2") + + runShowNamespacesSql("SHOW NAMESPACES", Seq("ns1", "ns2") ++ builtinTopNamespaces) + runShowNamespacesSql("SHOW NAMESPACES LIKE '*1*'", Seq("ns1")) + } + } + } + + test("complex namespace patterns") { + withNamespace(s"$catalog.showdb2b", s"$catalog.showdb1a") { + sql(s"CREATE NAMESPACE $catalog.showdb2b") + sql(s"CREATE NAMESPACE $catalog.showdb1a") + + Seq( + "'*db1A'" -> Seq("showdb1a"), + "'*2*'" -> Seq("showdb2b"), + "'*db1A|*db2B'" -> Seq("showdb1a", "showdb2b") + ).foreach { case (pattern, expected) => + runShowNamespacesSql(s"SHOW NAMESPACES IN $catalog LIKE $pattern", expected) + } + } + } + + test("change catalog and namespace with USE statements") { + try { + withNamespace(s"$catalog.ns") { + sql(s"CREATE NAMESPACE $catalog.ns") + sql(s"USE $catalog") + runShowNamespacesSql("SHOW NAMESPACES", Seq("ns") ++ builtinTopNamespaces) + + sql("USE ns") + // 'SHOW NAMESPACES' is not affected by the current namespace and lists root namespaces. + runShowNamespacesSql("SHOW NAMESPACES", Seq("ns") ++ builtinTopNamespaces) + } + } finally { + spark.sessionState.catalogManager.reset() + } + } +} diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/ShowPartitionsParserSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/ShowPartitionsParserSuite.scala new file mode 100644 index 0000000000000..7b5cf8af4eead --- /dev/null +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/ShowPartitionsParserSuite.scala @@ -0,0 +1,57 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.execution.command + +import org.apache.spark.sql.catalyst.analysis.{AnalysisTest, UnresolvedPartitionSpec, UnresolvedTable} +import org.apache.spark.sql.catalyst.parser.CatalystSqlParser.parsePlan +import org.apache.spark.sql.catalyst.parser.ParseException +import org.apache.spark.sql.catalyst.plans.logical.ShowPartitions +import org.apache.spark.sql.execution.SparkSqlParser +import org.apache.spark.sql.test.SharedSparkSession + +class ShowPartitionsParserSuite extends AnalysisTest with SharedSparkSession { + test("SHOW PARTITIONS") { + val commandName = "SHOW PARTITIONS" + Seq( + "SHOW PARTITIONS t1" -> ShowPartitions(UnresolvedTable(Seq("t1"), commandName), None), + "SHOW PARTITIONS db1.t1" -> ShowPartitions( + UnresolvedTable(Seq("db1", "t1"), commandName), None), + "SHOW PARTITIONS t1 PARTITION(partcol1='partvalue', partcol2='partvalue')" -> + ShowPartitions( + UnresolvedTable(Seq("t1"), commandName), + Some(UnresolvedPartitionSpec(Map("partcol1" -> "partvalue", "partcol2" -> "partvalue")))), + "SHOW PARTITIONS a.b.c" -> ShowPartitions( + UnresolvedTable(Seq("a", "b", "c"), commandName), None), + "SHOW PARTITIONS a.b.c PARTITION(ds='2017-06-10')" -> + ShowPartitions( + UnresolvedTable(Seq("a", "b", "c"), commandName), + Some(UnresolvedPartitionSpec(Map("ds" -> "2017-06-10")))) + ).foreach { case (sql, expected) => + val parsed = parsePlan(sql) + comparePlans(parsed, expected) + } + } + + test("empty values in non-optional partition specs") { + val e = intercept[ParseException] { + new SparkSqlParser().parsePlan( + "SHOW PARTITIONS dbx.tab1 PARTITION (a='1', b)") + }.getMessage + assert(e.contains("Found an empty partition key 'b'")) + } +} diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/ShowPartitionsSuiteBase.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/ShowPartitionsSuiteBase.scala new file mode 100644 index 0000000000000..29edb8fb51cf8 --- /dev/null +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/ShowPartitionsSuiteBase.scala @@ -0,0 +1,157 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.execution.command + +import org.apache.spark.sql.{AnalysisException, QueryTest, Row, SaveMode} +import org.apache.spark.sql.internal.SQLConf +import org.apache.spark.sql.types.{StringType, StructType} + +/** + * This base suite contains unified tests for the `SHOW PARTITIONS` command that check V1 and V2 + * table catalogs. The tests that cannot run for all supported catalogs are located in more + * specific test suites: + * + * - V2 table catalog tests: `org.apache.spark.sql.execution.command.v2.ShowPartitionsSuite` + * - V1 table catalog tests: `org.apache.spark.sql.execution.command.v1.ShowPartitionsSuiteBase` + * - V1 In-Memory catalog: `org.apache.spark.sql.execution.command.v1.ShowPartitionsSuite` + * - V1 Hive External catalog: + * `org.apache.spark.sql.hive.execution.command.ShowPartitionsSuite` + */ +trait ShowPartitionsSuiteBase extends QueryTest with DDLCommandTestUtils { + override val command = "SHOW PARTITIONS" + // Gets the schema of `SHOW PARTITIONS` + private val showSchema: StructType = new StructType().add("partition", StringType, false) + protected def runShowPartitionsSql(sqlText: String, expected: Seq[Row]): Unit = { + val df = spark.sql(sqlText) + assert(df.schema === showSchema) + checkAnswer(df, expected) + } + + protected def createDateTable(table: String): Unit = { + sql(s""" + |CREATE TABLE $table (price int, qty int, year int, month int) + |$defaultUsing + |partitioned by (year, month)""".stripMargin) + sql(s"INSERT INTO $table PARTITION(year = 2015, month = 1) SELECT 1, 1") + sql(s"INSERT INTO $table PARTITION(year = 2015, month = 2) SELECT 2, 2") + sql(s"ALTER TABLE $table ADD PARTITION(year = 2016, month = 2)") + sql(s"ALTER TABLE $table ADD PARTITION(year = 2016, month = 3)") + } + + protected def createNullPartTable(table: String, format: String): Unit = { + import testImplicits._ + val df = Seq((0, ""), (1, null)).toDF("a", "part") + df.write + .partitionBy("part") + .format(format) + .mode(SaveMode.Overwrite) + .saveAsTable(table) + } + + test("show partitions of non-partitioned table") { + withNamespaceAndTable("ns", "not_partitioned_table") { t => + sql(s"CREATE TABLE $t (col1 int) $defaultUsing") + val errMsg = intercept[AnalysisException] { + sql(s"SHOW PARTITIONS $t") + }.getMessage + assert(errMsg.contains("not allowed on a table that is not partitioned")) + } + } + + test("non-partitioning columns") { + withNamespaceAndTable("ns", "dateTable") { t => + createDateTable(t) + val errMsg = intercept[AnalysisException] { + sql(s"SHOW PARTITIONS $t PARTITION(abcd=2015, xyz=1)") + }.getMessage + assert(errMsg.contains("abcd is not a valid partition column")) + } + } + + test("show everything") { + withNamespaceAndTable("ns", "dateTable") { t => + createDateTable(t) + runShowPartitionsSql( + s"show partitions $t", + Row("year=2015/month=1") :: + Row("year=2015/month=2") :: + Row("year=2016/month=2") :: + Row("year=2016/month=3") :: Nil) + } + } + + test("filter by partitions") { + withNamespaceAndTable("ns", "dateTable") { t => + createDateTable(t) + runShowPartitionsSql( + s"show partitions $t PARTITION(year=2015)", + Row("year=2015/month=1") :: + Row("year=2015/month=2") :: Nil) + runShowPartitionsSql( + s"show partitions $t PARTITION(year=2015, month=1)", + Row("year=2015/month=1") :: Nil) + runShowPartitionsSql( + s"show partitions $t PARTITION(month=2)", + Row("year=2015/month=2") :: + Row("year=2016/month=2") :: Nil) + } + } + + test("show everything more than 5 part keys") { + withNamespaceAndTable("ns", "wideTable") { t => + createWideTable(t) + runShowPartitionsSql( + s"show partitions $t", + Row("year=2016/month=3/hour=10/minute=10/sec=10/extra=1") :: + Row("year=2016/month=4/hour=10/minute=10/sec=10/extra=1") :: Nil) + } + } + + test("SPARK-33667: case sensitivity of partition spec") { + withNamespaceAndTable("ns", "part_table") { t => + sql(s""" + |CREATE TABLE $t (price int, qty int, year int, month int) + |$defaultUsing + |PARTITIONED BY (year, month)""".stripMargin) + sql(s"INSERT INTO $t PARTITION(year = 2015, month = 1) SELECT 1, 1") + Seq( + true -> "PARTITION(year = 2015, month = 1)", + false -> "PARTITION(YEAR = 2015, Month = 1)" + ).foreach { case (caseSensitive, partitionSpec) => + withSQLConf(SQLConf.CASE_SENSITIVE.key -> caseSensitive.toString) { + runShowPartitionsSql( + s"SHOW PARTITIONS $t $partitionSpec", + Row("year=2015/month=1") :: Nil) + } + } + } + } + + test("SPARK-33777: sorted output") { + withNamespaceAndTable("ns", "dateTable") { t => + sql(s""" + |CREATE TABLE $t (id int, part string) + |$defaultUsing + |PARTITIONED BY (part)""".stripMargin) + sql(s"ALTER TABLE $t ADD PARTITION(part = 'b')") + sql(s"ALTER TABLE $t ADD PARTITION(part = 'a')") + val partitions = sql(s"show partitions $t") + assert(partitions.first().getString(0) === "part=a") + } + } +} diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/ShowTablesParserSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/ShowTablesParserSuite.scala new file mode 100644 index 0000000000000..d68e1233f7ab2 --- /dev/null +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/ShowTablesParserSuite.scala @@ -0,0 +1,83 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.execution.command + +import org.apache.spark.sql.catalyst.analysis.{AnalysisTest, UnresolvedNamespace, UnresolvedPartitionSpec} +import org.apache.spark.sql.catalyst.parser.CatalystSqlParser.parsePlan +import org.apache.spark.sql.catalyst.plans.logical.{ShowTableExtended, ShowTables} +import org.apache.spark.sql.test.SharedSparkSession + +class ShowTablesParserSuite extends AnalysisTest with SharedSparkSession { + private val catalog = "test_catalog" + + test("show tables") { + comparePlans( + parsePlan("SHOW TABLES"), + ShowTables(UnresolvedNamespace(Seq.empty[String]), None)) + comparePlans( + parsePlan("SHOW TABLES '*test*'"), + ShowTables(UnresolvedNamespace(Seq.empty[String]), Some("*test*"))) + comparePlans( + parsePlan("SHOW TABLES LIKE '*test*'"), + ShowTables(UnresolvedNamespace(Seq.empty[String]), Some("*test*"))) + comparePlans( + parsePlan(s"SHOW TABLES FROM $catalog.ns1.ns2.tbl"), + ShowTables(UnresolvedNamespace(Seq(catalog, "ns1", "ns2", "tbl")), None)) + comparePlans( + parsePlan(s"SHOW TABLES IN $catalog.ns1.ns2.tbl"), + ShowTables(UnresolvedNamespace(Seq(catalog, "ns1", "ns2", "tbl")), None)) + comparePlans( + parsePlan("SHOW TABLES IN ns1 '*test*'"), + ShowTables(UnresolvedNamespace(Seq("ns1")), Some("*test*"))) + comparePlans( + parsePlan("SHOW TABLES IN ns1 LIKE '*test*'"), + ShowTables(UnresolvedNamespace(Seq("ns1")), Some("*test*"))) + } + + test("show table extended") { + comparePlans( + parsePlan("SHOW TABLE EXTENDED LIKE '*test*'"), + ShowTableExtended(UnresolvedNamespace(Seq.empty[String]), "*test*", None)) + comparePlans( + parsePlan(s"SHOW TABLE EXTENDED FROM $catalog.ns1.ns2 LIKE '*test*'"), + ShowTableExtended(UnresolvedNamespace(Seq(catalog, "ns1", "ns2")), "*test*", None)) + comparePlans( + parsePlan(s"SHOW TABLE EXTENDED IN $catalog.ns1.ns2 LIKE '*test*'"), + ShowTableExtended(UnresolvedNamespace(Seq(catalog, "ns1", "ns2")), "*test*", None)) + comparePlans( + parsePlan("SHOW TABLE EXTENDED LIKE '*test*' PARTITION(ds='2008-04-09', hr=11)"), + ShowTableExtended( + UnresolvedNamespace(Seq.empty[String]), + "*test*", + Some(UnresolvedPartitionSpec(Map("ds" -> "2008-04-09", "hr" -> "11"))))) + comparePlans( + parsePlan(s"SHOW TABLE EXTENDED FROM $catalog.ns1.ns2 LIKE '*test*' " + + "PARTITION(ds='2008-04-09')"), + ShowTableExtended( + UnresolvedNamespace(Seq(catalog, "ns1", "ns2")), + "*test*", + Some(UnresolvedPartitionSpec(Map("ds" -> "2008-04-09"))))) + comparePlans( + parsePlan(s"SHOW TABLE EXTENDED IN $catalog.ns1.ns2 LIKE '*test*' " + + "PARTITION(ds='2008-04-09')"), + ShowTableExtended( + UnresolvedNamespace(Seq(catalog, "ns1", "ns2")), + "*test*", + Some(UnresolvedPartitionSpec(Map("ds" -> "2008-04-09"))))) + } +} diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/ShowTablesSuiteBase.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/ShowTablesSuiteBase.scala new file mode 100644 index 0000000000000..6a1337ef5ac8b --- /dev/null +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/ShowTablesSuiteBase.scala @@ -0,0 +1,129 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.execution.command + +import org.apache.spark.sql.{QueryTest, Row} +import org.apache.spark.sql.catalyst.analysis.NoSuchNamespaceException +import org.apache.spark.sql.connector.catalog.CatalogV2Implicits._ +import org.apache.spark.sql.internal.SQLConf +import org.apache.spark.sql.types.StructType + +/** + * This base suite contains unified tests for the `SHOW TABLES` command that check V1 and V2 + * table catalogs. The tests that cannot run for all supported catalogs are located in more + * specific test suites: + * + * - V2 table catalog tests: `org.apache.spark.sql.execution.command.v2.ShowTablesSuite` + * - V1 table catalog tests: `org.apache.spark.sql.execution.command.v1.ShowTablesSuiteBase` + * - V1 In-Memory catalog: `org.apache.spark.sql.execution.command.v1.ShowTablesSuite` + * - V1 Hive External catalog: `org.apache.spark.sql.hive.execution.command.ShowTablesSuite` + */ +trait ShowTablesSuiteBase extends QueryTest with DDLCommandTestUtils { + override val command = "SHOW TABLES" + protected def defaultNamespace: Seq[String] + case class ShowRow(namespace: String, table: String, isTemporary: Boolean) + protected def getRows(showRows: Seq[ShowRow]): Seq[Row] + // Gets the schema of `SHOW TABLES` + protected def showSchema: StructType + + protected def runShowTablesSql(sqlText: String, expected: Seq[ShowRow]): Unit = { + val df = spark.sql(sqlText) + assert(df.schema === showSchema) + checkAnswer(df, getRows(expected)) + } + + test("show an existing table") { + withNamespaceAndTable("ns", "table") { t => + sql(s"CREATE TABLE $t (name STRING, id INT) $defaultUsing") + runShowTablesSql(s"SHOW TABLES IN $catalog.ns", Seq(ShowRow("ns", "table", false))) + } + } + + test("show table in a not existing namespace") { + val msg = intercept[NoSuchNamespaceException] { + runShowTablesSql(s"SHOW TABLES IN $catalog.unknown", Seq()) + }.getMessage + assert(msg.matches("(Database|Namespace) 'unknown' not found")) + } + + test("show tables with a pattern") { + withNamespace(s"$catalog.ns1", s"$catalog.ns2") { + sql(s"CREATE NAMESPACE $catalog.ns1") + sql(s"CREATE NAMESPACE $catalog.ns2") + withTable( + s"$catalog.ns1.table", + s"$catalog.ns1.table_name_1a", + s"$catalog.ns1.table_name_2b", + s"$catalog.ns2.table_name_2b") { + sql(s"CREATE TABLE $catalog.ns1.table (id bigint, data string) $defaultUsing") + sql(s"CREATE TABLE $catalog.ns1.table_name_1a (id bigint, data string) $defaultUsing") + sql(s"CREATE TABLE $catalog.ns1.table_name_2b (id bigint, data string) $defaultUsing") + sql(s"CREATE TABLE $catalog.ns2.table_name_2b (id bigint, data string) $defaultUsing") + + runShowTablesSql( + s"SHOW TABLES FROM $catalog.ns1", + Seq( + ShowRow("ns1", "table", false), + ShowRow("ns1", "table_name_1a", false), + ShowRow("ns1", "table_name_2b", false))) + + runShowTablesSql( + s"SHOW TABLES FROM $catalog.ns1 LIKE '*name*'", + Seq( + ShowRow("ns1", "table_name_1a", false), + ShowRow("ns1", "table_name_2b", false))) + + runShowTablesSql( + s"SHOW TABLES FROM $catalog.ns1 LIKE 'table_name_1*|table_name_2*'", + Seq( + ShowRow("ns1", "table_name_1a", false), + ShowRow("ns1", "table_name_2b", false))) + + runShowTablesSql( + s"SHOW TABLES FROM $catalog.ns1 LIKE '*2b'", + Seq(ShowRow("ns1", "table_name_2b", false))) + } + } + } + + test("show tables with current catalog and namespace") { + withSQLConf(SQLConf.DEFAULT_CATALOG.key -> catalog) { + val tblName = (catalog +: defaultNamespace :+ "table").quoted + withTable(tblName) { + sql(s"CREATE TABLE $tblName (name STRING, id INT) $defaultUsing") + val ns = defaultNamespace.mkString(".") + runShowTablesSql("SHOW TABLES", Seq(ShowRow(ns, "table", false))) + } + } + } + + test("change current catalog and namespace with USE statements") { + withNamespaceAndTable("ns", "table") { t => + sql(s"CREATE TABLE $t (name STRING, id INT) $defaultUsing") + + sql(s"USE $catalog") + // No table is matched since the current namespace is not ["ns"] + assert(defaultNamespace != Seq("ns")) + runShowTablesSql("SHOW TABLES", Seq()) + + // Update the current namespace to match "ns.tbl". + sql(s"USE $catalog.ns") + runShowTablesSql("SHOW TABLES", Seq(ShowRow("ns", "table", false))) + } + } +} diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/AlterTableAddPartitionSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/AlterTableAddPartitionSuite.scala new file mode 100644 index 0000000000000..b3c118def70b7 --- /dev/null +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/AlterTableAddPartitionSuite.scala @@ -0,0 +1,50 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.execution.command.v1 + +import org.apache.spark.sql.AnalysisException +import org.apache.spark.sql.execution.command + +/** + * This base suite contains unified tests for the `ALTER TABLE .. ADD PARTITION` command that + * check V1 table catalogs. The tests that cannot run for all V1 catalogs are located in more + * specific test suites: + * + * - V1 In-Memory catalog: + * `org.apache.spark.sql.execution.command.v1.AlterTableAddPartitionSuite` + * - V1 Hive External catalog: + * `org.apache.spark.sql.hive.execution.command.AlterTableAddPartitionSuite` + */ +trait AlterTableAddPartitionSuiteBase extends command.AlterTableAddPartitionSuiteBase { + test("empty string as partition value") { + withNamespaceAndTable("ns", "tbl") { t => + sql(s"CREATE TABLE $t (col1 INT, p1 STRING) $defaultUsing PARTITIONED BY (p1)") + val errMsg = intercept[AnalysisException] { + sql(s"ALTER TABLE $t ADD PARTITION (p1 = '')") + }.getMessage + assert(errMsg.contains("Partition spec is invalid. " + + "The spec ([p1=]) contains an empty partition column value")) + } + } +} + +/** + * The class contains tests for the `ALTER TABLE .. ADD PARTITION` command to check + * V1 In-Memory table catalog. + */ +class AlterTableAddPartitionSuite extends AlterTableAddPartitionSuiteBase with CommandSuiteBase diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/AlterTableDropPartitionSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/AlterTableDropPartitionSuite.scala new file mode 100644 index 0000000000000..509c0be28c26a --- /dev/null +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/AlterTableDropPartitionSuite.scala @@ -0,0 +1,66 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.execution.command.v1 + +import org.apache.spark.sql.AnalysisException +import org.apache.spark.sql.execution.command + +/** + * This base suite contains unified tests for the `ALTER TABLE .. DROP PARTITION` command that + * check V1 table catalogs. The tests that cannot run for all V1 catalogs are located in more + * specific test suites: + * + * - V1 In-Memory catalog: + * `org.apache.spark.sql.execution.command.v1.AlterTableDropPartitionSuite` + * - V1 Hive External catalog: + * `org.apache.spark.sql.hive.execution.command.AlterTableDropPartitionSuite` + */ +trait AlterTableDropPartitionSuiteBase extends command.AlterTableDropPartitionSuiteBase { + override protected val notFullPartitionSpecErr = "The following partitions not found in table" + override protected def nullPartitionValue: String = "__HIVE_DEFAULT_PARTITION__" + + test("purge partition data") { + withNamespaceAndTable("ns", "tbl") { t => + sql(s"CREATE TABLE $t (id bigint, data string) $defaultUsing PARTITIONED BY (id)") + sql(s"ALTER TABLE $t ADD PARTITION (id = 1)") + checkPartitions(t, Map("id" -> "1")) + sql(s"ALTER TABLE $t DROP PARTITION (id = 1) PURGE") + checkPartitions(t) // no partitions + } + } +} + +/** + * The class contains tests for the `ALTER TABLE .. DROP PARTITION` command to check + * V1 In-Memory table catalog. + */ +class AlterTableDropPartitionSuite + extends AlterTableDropPartitionSuiteBase + with CommandSuiteBase { + + test("empty string as partition value") { + withNamespaceAndTable("ns", "tbl") { t => + sql(s"CREATE TABLE $t (col1 INT, p1 STRING) $defaultUsing PARTITIONED BY (p1)") + val errMsg = intercept[AnalysisException] { + sql(s"ALTER TABLE $t DROP PARTITION (p1 = '')") + }.getMessage + assert(errMsg.contains("Partition spec is invalid. " + + "The spec ([p1=]) contains an empty partition column value")) + } + } +} diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/AlterTableRenamePartitionSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/AlterTableRenamePartitionSuite.scala new file mode 100644 index 0000000000000..bde77106a3ab7 --- /dev/null +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/AlterTableRenamePartitionSuite.scala @@ -0,0 +1,57 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.execution.command.v1 + +import org.apache.spark.sql.Row +import org.apache.spark.sql.execution.command + +/** + * This base suite contains unified tests for the `ALTER TABLE .. RENAME PARTITION` command that + * check V1 table catalogs. The tests that cannot run for all V1 catalogs are located in more + * specific test suites: + * + * - V1 In-Memory catalog: + * `org.apache.spark.sql.execution.command.v1.AlterTableRenamePartitionSuite` + * - V1 Hive External catalog: + * `org.apache.spark.sql.hive.execution.command.AlterTableRenamePartitionSuite` + */ +trait AlterTableRenamePartitionSuiteBase extends command.AlterTableRenamePartitionSuiteBase { + test("with location") { + withNamespaceAndTable("ns", "tbl") { t => + createSinglePartTable(t) + sql(s"ALTER TABLE $t ADD PARTITION (id = 2) LOCATION 'loc1'") + sql(s"INSERT INTO $t PARTITION (id = 2) SELECT 'def'") + checkPartitions(t, Map("id" -> "1"), Map("id" -> "2")) + checkLocation(t, Map("id" -> "2"), "loc1") + + sql(s"ALTER TABLE $t PARTITION (id = 2) RENAME TO PARTITION (id = 3)") + checkPartitions(t, Map("id" -> "1"), Map("id" -> "3")) + // V1 catalogs rename the partition location of managed tables + checkLocation(t, Map("id" -> "3"), "id=3") + checkAnswer(sql(s"SELECT id, data FROM $t WHERE id = 3"), Row(3, "def")) + } + } +} + +/** + * The class contains tests for the `ALTER TABLE .. RENAME PARTITION` command to check + * V1 In-Memory table catalog. + */ +class AlterTableRenamePartitionSuite + extends AlterTableRenamePartitionSuiteBase + with CommandSuiteBase diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/CommandSuiteBase.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/CommandSuiteBase.scala new file mode 100644 index 0000000000000..80c552de567ba --- /dev/null +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/CommandSuiteBase.scala @@ -0,0 +1,50 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.execution.command.v1 + +import org.apache.spark.sql.catalyst.catalog.CatalogTypes.TablePartitionSpec +import org.apache.spark.sql.connector.catalog.CatalogManager +import org.apache.spark.sql.test.SharedSparkSession + +/** + * The trait contains settings and utility functions. It can be mixed to the test suites for + * datasource v1 In-Memory catalog. This trait complements the common trait + * `org.apache.spark.sql.execution.command.DDLCommandTestUtils` with utility functions and + * settings for all unified datasource V1 and V2 test suites. + */ +trait CommandSuiteBase extends SharedSparkSession { + def version: String = "V1" // The prefix is added to test names + def catalog: String = CatalogManager.SESSION_CATALOG_NAME + def defaultUsing: String = "USING parquet" // The clause is used in creating tables under testing + + // TODO(SPARK-33393): Move this to `DDLCommandTestUtils` + def checkLocation( + t: String, + spec: TablePartitionSpec, + expected: String): Unit = { + val tablePath = t.split('.') + val tableName = tablePath.last + val ns = tablePath.init.mkString(".") + val partSpec = spec.map { case (key, value) => s"$key = $value"}.mkString(", ") + val information = sql(s"SHOW TABLE EXTENDED IN $ns LIKE '$tableName' PARTITION($partSpec)") + .select("information") + .first().getString(0) + val location = information.split("\\r?\\n").filter(_.startsWith("Location:")).head + assert(location.endsWith(expected)) + } +} diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/DropTableSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/DropTableSuite.scala new file mode 100644 index 0000000000000..497624f0a18de --- /dev/null +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/DropTableSuite.scala @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.execution.command.v1 + +import org.apache.spark.sql.execution.command + +/** + * This base suite contains unified tests for the `DROP TABLE` command that check V1 + * table catalogs. The tests that cannot run for all V1 catalogs are located in more + * specific test suites: + * + * - V1 In-Memory catalog: `org.apache.spark.sql.execution.command.v1.DropTableSuite` + * - V1 Hive External catalog: `org.apache.spark.sql.hive.execution.command.DropTableSuite` + */ +trait DropTableSuiteBase extends command.DropTableSuiteBase { + test("purge option") { + withNamespace(s"$catalog.ns") { + sql(s"CREATE NAMESPACE $catalog.ns") + + createTable(s"$catalog.ns.tbl") + checkTables("ns", "tbl") + + sql(s"DROP TABLE $catalog.ns.tbl PURGE") + checkTables("ns") // no tables + } + } +} + +/** + * The class contains tests for the `DROP TABLE` command to check V1 In-Memory table catalog. + */ +class DropTableSuite extends DropTableSuiteBase with CommandSuiteBase + diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/ShowNamespacesSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/ShowNamespacesSuite.scala new file mode 100644 index 0000000000000..fd76ef2490f35 --- /dev/null +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/ShowNamespacesSuite.scala @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.execution.command.v1 + +import org.apache.spark.sql.AnalysisException +import org.apache.spark.sql.execution.command +import org.apache.spark.sql.internal.SQLConf + +/** + * This base suite contains unified tests for the `SHOW NAMESPACES` and `SHOW DATABASES` commands + * that check V1 table catalogs. The tests that cannot run for all V1 catalogs are located in more + * specific test suites: + * + * - V1 In-Memory catalog: `org.apache.spark.sql.execution.command.v1.ShowNamespacesSuite` + * - V1 Hive External catalog: `org.apache.spark.sql.hive.execution.command.ShowNamespacesSuite` + */ +trait ShowNamespacesSuiteBase extends command.ShowNamespacesSuiteBase { + override protected def builtinTopNamespaces: Seq[String] = Seq("default") + + test("IN namespace doesn't exist") { + val errMsg = intercept[AnalysisException] { + sql("SHOW NAMESPACES in dummy") + }.getMessage + assert(errMsg.contains("Namespace 'dummy' not found")) + } +} + +class ShowNamespacesSuite extends ShowNamespacesSuiteBase with CommandSuiteBase { + test("case sensitivity") { + Seq(true, false).foreach { caseSensitive => + withSQLConf(SQLConf.CASE_SENSITIVE.key -> caseSensitive.toString) { + withNamespace(s"$catalog.AAA", s"$catalog.bbb") { + sql(s"CREATE NAMESPACE $catalog.AAA") + sql(s"CREATE NAMESPACE $catalog.bbb") + val expected = if (caseSensitive) "AAA" else "aaa" + runShowNamespacesSql( + s"SHOW NAMESPACES IN $catalog", + Seq(expected, "bbb") ++ builtinTopNamespaces) + runShowNamespacesSql(s"SHOW NAMESPACES IN $catalog LIKE 'AAA'", Seq(expected)) + runShowNamespacesSql(s"SHOW NAMESPACES IN $catalog LIKE 'aaa'", Seq(expected)) + } + } + } + } +} diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/ShowPartitionsSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/ShowPartitionsSuite.scala new file mode 100644 index 0000000000000..a26e29706e147 --- /dev/null +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/ShowPartitionsSuite.scala @@ -0,0 +1,129 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.execution.command.v1 + +import org.apache.spark.sql.{AnalysisException, Row, SaveMode} +import org.apache.spark.sql.execution.command + +/** + * This base suite contains unified tests for the `SHOW PARTITIONS` command that check V1 + * table catalogs. The tests that cannot run for all V1 catalogs are located in more + * specific test suites: + * + * - V1 In-Memory catalog: `org.apache.spark.sql.execution.command.v1.ShowPartitionsSuite` + * - V1 Hive External catalog: `org.apache.spark.sql.hive.execution.command.ShowPartitionsSuite` + */ +trait ShowPartitionsSuiteBase extends command.ShowPartitionsSuiteBase { + test("show everything in the default database") { + val table = "dateTable" + withTable(table) { + createDateTable(table) + runShowPartitionsSql( + s"show partitions default.$table", + Row("year=2015/month=1") :: + Row("year=2015/month=2") :: + Row("year=2016/month=2") :: + Row("year=2016/month=3") :: Nil) + } + } + + // The test fails for V2 Table Catalogs with the exception: + // org.apache.spark.sql.AnalysisException: CREATE VIEW is only supported with v1 tables. + test("show partitions of a view") { + val table = "dateTable" + withTable(table) { + createDateTable(table) + val view = "view1" + withView(view) { + sql(s"CREATE VIEW $view as select * from $table") + val errMsg = intercept[AnalysisException] { + sql(s"SHOW PARTITIONS $view") + }.getMessage + assert(errMsg.contains("'SHOW PARTITIONS' expects a table")) + } + } + } + + test("show partitions of a temporary view") { + val viewName = "test_view" + withTempView(viewName) { + spark.range(10).createTempView(viewName) + val errMsg = intercept[AnalysisException] { + sql(s"SHOW PARTITIONS $viewName") + }.getMessage + assert(errMsg.contains("'SHOW PARTITIONS' expects a table")) + } + } + + test("SPARK-33591: null as a partition value") { + val t = "part_table" + withTable(t) { + sql(s"CREATE TABLE $t (col1 INT, p1 STRING) $defaultUsing PARTITIONED BY (p1)") + sql(s"INSERT INTO TABLE $t PARTITION (p1 = null) SELECT 0") + checkAnswer(sql(s"SHOW PARTITIONS $t"), Row("p1=__HIVE_DEFAULT_PARTITION__")) + checkAnswer( + sql(s"SHOW PARTITIONS $t PARTITION (p1 = null)"), + Row("p1=__HIVE_DEFAULT_PARTITION__")) + } + } +} + +/** + * The class contains tests for the `SHOW PARTITIONS` command to check V1 In-Memory table catalog. + */ +class ShowPartitionsSuite extends ShowPartitionsSuiteBase with CommandSuiteBase { + // The test is placed here because it fails with `USING HIVE`: + // org.apache.spark.sql.AnalysisException: + // Hive data source can only be used with tables, you can't use it with CREATE TEMP VIEW USING + test("issue exceptions on the temporary view") { + val viewName = "test_view" + withTempView(viewName) { + sql(s""" + |CREATE TEMPORARY VIEW $viewName (c1 INT, c2 STRING) + |$defaultUsing""".stripMargin) + val errMsg = intercept[AnalysisException] { + sql(s"SHOW PARTITIONS $viewName") + }.getMessage + assert(errMsg.contains("'SHOW PARTITIONS' expects a table")) + } + } + + test("show partitions from a datasource") { + import testImplicits._ + withTable("part_datasrc") { + val df = (1 to 3).map(i => (i, s"val_$i", i * 2)).toDF("a", "b", "c") + df.write + .partitionBy("a") + .format("parquet") + .mode(SaveMode.Overwrite) + .saveAsTable("part_datasrc") + + assert(sql("SHOW PARTITIONS part_datasrc").count() == 3) + } + } + + test("SPARK-33904: null and empty string as partition values") { + withNamespaceAndTable("ns", "tbl") { t => + createNullPartTable(t, "parquet") + runShowPartitionsSql( + s"SHOW PARTITIONS $t", + Row("part=__HIVE_DEFAULT_PARTITION__") :: Nil) + checkAnswer(spark.table(t), Row(0, null) :: Row(1, null) :: Nil) + } + } +} diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/ShowTablesSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/ShowTablesSuite.scala new file mode 100644 index 0000000000000..5f5bcc8170aa2 --- /dev/null +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/ShowTablesSuite.scala @@ -0,0 +1,141 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.execution.command.v1 + +import org.apache.spark.sql.{AnalysisException, Row, SaveMode} +import org.apache.spark.sql.execution.command +import org.apache.spark.sql.internal.SQLConf +import org.apache.spark.sql.types.{BooleanType, StringType, StructType} + +/** + * This base suite contains unified tests for the `SHOW TABLES` command that check V1 + * table catalogs. The tests that cannot run for all V1 catalogs are located in more + * specific test suites: + * + * - V1 In-Memory catalog: `org.apache.spark.sql.execution.command.v1.ShowTablesSuite` + * - V1 Hive External catalog: `org.apache.spark.sql.hive.execution.command.ShowTablesSuite` + */ +trait ShowTablesSuiteBase extends command.ShowTablesSuiteBase { + override def defaultNamespace: Seq[String] = Seq("default") + override def showSchema: StructType = { + new StructType() + .add("database", StringType, nullable = false) + .add("tableName", StringType, nullable = false) + .add("isTemporary", BooleanType, nullable = false) + } + override def getRows(showRows: Seq[ShowRow]): Seq[Row] = { + showRows.map { + case ShowRow(namespace, table, isTemporary) => Row(namespace, table, isTemporary) + } + } + + private def withSourceViews(f: => Unit): Unit = { + withTable("source", "source2") { + val df = spark.createDataFrame(Seq((1L, "a"), (2L, "b"), (3L, "c"))).toDF("id", "data") + df.createOrReplaceTempView("source") + val df2 = spark.createDataFrame(Seq((4L, "d"), (5L, "e"), (6L, "f"))).toDF("id", "data") + df2.createOrReplaceTempView("source2") + f + } + } + + // `SHOW TABLES` from v2 catalog returns empty result. + test("v1 SHOW TABLES list the temp views") { + withSourceViews { + runShowTablesSql( + "SHOW TABLES FROM default", + Seq(ShowRow("", "source", true), ShowRow("", "source2", true))) + } + } + + test("only support single-level namespace") { + val errMsg = intercept[AnalysisException] { + runShowTablesSql("SHOW TABLES FROM a.b", Seq()) + }.getMessage + assert(errMsg.contains("Nested databases are not supported by v1 session catalog: a.b")) + } + + test("SHOW TABLE EXTENDED from default") { + withSourceViews { + val expected = Seq(Row("", "source", true), Row("", "source2", true)) + val schema = new StructType() + .add("database", StringType, nullable = false) + .add("tableName", StringType, nullable = false) + .add("isTemporary", BooleanType, nullable = false) + .add("information", StringType, nullable = false) + + val df = sql("SHOW TABLE EXTENDED FROM default LIKE '*source*'") + val result = df.collect() + val resultWithoutInfo = result.map { case Row(db, table, temp, _) => Row(db, table, temp) } + + assert(df.schema === schema) + assert(resultWithoutInfo === expected) + result.foreach { case Row(_, _, _, info: String) => assert(info.nonEmpty) } + } + } + + test("case sensitivity of partition spec") { + withNamespaceAndTable("ns", "part_table") { t => + sql(s""" + |CREATE TABLE $t (price int, qty int, year int, month int) + |$defaultUsing + |partitioned by (year, month)""".stripMargin) + sql(s"INSERT INTO $t PARTITION(year = 2015, month = 1) SELECT 1, 1") + Seq( + true -> "PARTITION(year = 2015, month = 1)", + false -> "PARTITION(YEAR = 2015, Month = 1)" + ).foreach { case (caseSensitive, partitionSpec) => + withSQLConf(SQLConf.CASE_SENSITIVE.key -> caseSensitive.toString) { + val df = sql(s"SHOW TABLE EXTENDED LIKE 'part_table' $partitionSpec") + val information = df.select("information").first().getString(0) + assert(information.contains("Partition Values: [year=2015, month=1]")) + } + } + } + } + + test("no database specified") { + Seq( + s"SHOW TABLES IN $catalog", + s"SHOW TABLE EXTENDED IN $catalog LIKE '*tbl'").foreach { showTableCmd => + val errMsg = intercept[AnalysisException] { + sql(showTableCmd) + }.getMessage + assert(errMsg.contains("Database from v1 session catalog is not specified")) + } + } +} + +/** + * The class contains tests for the `SHOW TABLES` command to check V1 In-Memory table catalog. + */ +class ShowTablesSuite extends ShowTablesSuiteBase with CommandSuiteBase { + test("SPARK-33670: show partitions from a datasource table") { + import testImplicits._ + withNamespace(s"$catalog.ns") { + sql(s"CREATE NAMESPACE $catalog.ns") + sql(s"USE $catalog.ns") + val t = "part_datasrc" + withTable(t) { + val df = (1 to 3).map(i => (i, s"val_$i", i * 2)).toDF("a", "b", "c") + df.write.partitionBy("a").format("parquet").mode(SaveMode.Overwrite).saveAsTable(t) + assert(sql(s"SHOW TABLE EXTENDED LIKE '$t' PARTITION(a = 1)").count() === 1) + } + } + } +} diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/AlterTableAddPartitionSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/AlterTableAddPartitionSuite.scala new file mode 100644 index 0000000000000..65494a7266756 --- /dev/null +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/AlterTableAddPartitionSuite.scala @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.execution.command.v2 + +import org.apache.spark.sql.AnalysisException +import org.apache.spark.sql.execution.command + +/** + * The class contains tests for the `ALTER TABLE .. ADD PARTITION` command + * to check V2 table catalogs. + */ +class AlterTableAddPartitionSuite + extends command.AlterTableAddPartitionSuiteBase + with CommandSuiteBase { + test("SPARK-33650: add partition into a table which doesn't support partition management") { + withNamespaceAndTable("ns", "tbl", s"non_part_$catalog") { t => + sql(s"CREATE TABLE $t (id bigint, data string) $defaultUsing") + val errMsg = intercept[AnalysisException] { + sql(s"ALTER TABLE $t ADD PARTITION (id=1)") + }.getMessage + assert(errMsg.contains(s"Table $t can not alter partitions")) + } + } + + test("empty string as partition value") { + withNamespaceAndTable("ns", "tbl") { t => + sql(s"CREATE TABLE $t (col1 INT, p1 STRING) $defaultUsing PARTITIONED BY (p1)") + sql(s"ALTER TABLE $t ADD PARTITION (p1 = '')") + checkPartitions(t, Map("p1" -> "")) + } + } +} diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/AlterTableDropPartitionSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/AlterTableDropPartitionSuite.scala new file mode 100644 index 0000000000000..3515fa3390206 --- /dev/null +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/AlterTableDropPartitionSuite.scala @@ -0,0 +1,66 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.execution.command.v2 + +import org.apache.spark.sql.AnalysisException +import org.apache.spark.sql.execution.command + +/** + * The class contains tests for the `ALTER TABLE .. DROP PARTITION` command + * to check V2 table catalogs. + */ +class AlterTableDropPartitionSuite + extends command.AlterTableDropPartitionSuiteBase + with CommandSuiteBase { + override protected val notFullPartitionSpecErr = "Partition spec is invalid" + override protected def nullPartitionValue: String = "null" + + test("SPARK-33650: drop partition into a table which doesn't support partition management") { + withNamespaceAndTable("ns", "tbl", s"non_part_$catalog") { t => + sql(s"CREATE TABLE $t (id bigint, data string) $defaultUsing") + val errMsg = intercept[AnalysisException] { + sql(s"ALTER TABLE $t DROP PARTITION (id=1)") + }.getMessage + assert(errMsg.contains("can not alter partitions")) + } + } + + test("purge partition data") { + withNamespaceAndTable("ns", "tbl") { t => + sql(s"CREATE TABLE $t (id bigint, data string) $defaultUsing PARTITIONED BY (id)") + sql(s"ALTER TABLE $t ADD PARTITION (id=1)") + try { + val errMsg = intercept[UnsupportedOperationException] { + sql(s"ALTER TABLE $t DROP PARTITION (id=1) PURGE") + }.getMessage + assert(errMsg.contains("purge is not supported")) + } finally { + sql(s"ALTER TABLE $t DROP PARTITION (id=1)") + } + } + } + + test("empty string as partition value") { + withNamespaceAndTable("ns", "tbl") { t => + sql(s"CREATE TABLE $t (col1 INT, p1 STRING) $defaultUsing PARTITIONED BY (p1)") + sql(s"ALTER TABLE $t ADD PARTITION (p1 = '')") + sql(s"ALTER TABLE $t DROP PARTITION (p1 = '')") + checkPartitions(t) + } + } +} diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/AlterTableRenamePartitionSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/AlterTableRenamePartitionSuite.scala new file mode 100644 index 0000000000000..bb06818da48b1 --- /dev/null +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/AlterTableRenamePartitionSuite.scala @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.execution.command.v2 + +import org.apache.spark.sql.Row +import org.apache.spark.sql.execution.command + +/** + * The class contains tests for the `ALTER TABLE .. RENAME PARTITION` command + * to check V2 table catalogs. + */ +class AlterTableRenamePartitionSuite + extends command.AlterTableRenamePartitionSuiteBase + with CommandSuiteBase { + + test("with location") { + withNamespaceAndTable("ns", "tbl") { t => + createSinglePartTable(t) + val loc = "location1" + sql(s"ALTER TABLE $t ADD PARTITION (id = 2) LOCATION '$loc'") + sql(s"INSERT INTO $t PARTITION (id = 2) SELECT 'def'") + checkPartitions(t, Map("id" -> "1"), Map("id" -> "2")) + checkLocation(t, Map("id" -> "2"), loc) + + sql(s"ALTER TABLE $t PARTITION (id = 2) RENAME TO PARTITION (id = 3)") + checkPartitions(t, Map("id" -> "1"), Map("id" -> "3")) + // `InMemoryPartitionTableCatalog` should keep the original location + checkLocation(t, Map("id" -> "3"), loc) + checkAnswer(sql(s"SELECT id, data FROM $t WHERE id = 3"), Row(3, "def")) + } + } +} diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/CommandSuiteBase.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/CommandSuiteBase.scala new file mode 100644 index 0000000000000..2dd80b7bb6a02 --- /dev/null +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/CommandSuiteBase.scala @@ -0,0 +1,64 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.execution.command.v2 + +import org.apache.spark.SparkConf +import org.apache.spark.sql.catalyst.analysis.ResolvePartitionSpec +import org.apache.spark.sql.catalyst.catalog.CatalogTypes.TablePartitionSpec +import org.apache.spark.sql.connector.{InMemoryPartitionTable, InMemoryPartitionTableCatalog, InMemoryTableCatalog} +import org.apache.spark.sql.connector.catalog.{CatalogV2Implicits, Identifier} +import org.apache.spark.sql.test.SharedSparkSession + +/** + * The trait contains settings and utility functions. It can be mixed to the test suites for + * datasource v2 catalogs (in-memory test catalogs). This trait complements the trait + * `org.apache.spark.sql.execution.command.DDLCommandTestUtils` with common utility functions + * for all unified datasource V1 and V2 test suites. + */ +trait CommandSuiteBase extends SharedSparkSession { + def version: String = "V2" // The prefix is added to test names + def catalog: String = "test_catalog" // The default V2 catalog for testing + def defaultUsing: String = "USING _" // The clause is used in creating v2 tables under testing + + // V2 catalogs created and used especially for testing + override def sparkConf: SparkConf = super.sparkConf + .set(s"spark.sql.catalog.$catalog", classOf[InMemoryPartitionTableCatalog].getName) + .set(s"spark.sql.catalog.non_part_$catalog", classOf[InMemoryTableCatalog].getName) + + def checkLocation( + t: String, + spec: TablePartitionSpec, + expected: String): Unit = { + import CatalogV2Implicits._ + + val tablePath = t.split('.') + val catalogName = tablePath.head + val namespaceWithTable = tablePath.tail + val namespaces = namespaceWithTable.init + val tableName = namespaceWithTable.last + val catalogPlugin = spark.sessionState.catalogManager.catalog(catalogName) + val partTable = catalogPlugin.asTableCatalog + .loadTable(Identifier.of(namespaces, tableName)) + .asInstanceOf[InMemoryPartitionTable] + val ident = ResolvePartitionSpec.convertToPartIdent(spec, partTable.partitionSchema.fields) + val partMetadata = partTable.loadPartitionMetadata(ident) + + assert(partMetadata.containsKey("location")) + assert(partMetadata.get("location") === expected) + } +} diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/DropTableSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/DropTableSuite.scala new file mode 100644 index 0000000000000..9c9b7d3049c7a --- /dev/null +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/DropTableSuite.scala @@ -0,0 +1,55 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.execution.command.v2 + +import org.apache.spark.sql.Row +import org.apache.spark.sql.connector.InMemoryTableSessionCatalog +import org.apache.spark.sql.execution.command +import org.apache.spark.sql.internal.SQLConf.V2_SESSION_CATALOG_IMPLEMENTATION + +/** + * The class contains tests for the `DROP TABLE` command to check V2 table catalogs. + */ +class DropTableSuite extends command.DropTableSuiteBase with CommandSuiteBase { + test("purge option") { + withNamespaceAndTable("ns", "tbl") { t => + createTable(t) + val errMsg = intercept[UnsupportedOperationException] { + sql(s"DROP TABLE $catalog.ns.tbl PURGE") + }.getMessage + // The default TableCatalog.purgeTable implementation throws an exception. + assert(errMsg.contains("Purge table is not supported")) + } + } + + test("table qualified with the session catalog name") { + withSQLConf( + V2_SESSION_CATALOG_IMPLEMENTATION.key -> classOf[InMemoryTableSessionCatalog].getName) { + + sql("CREATE TABLE tbl USING json AS SELECT 1 AS i") + checkAnswer( + sql("SHOW TABLES IN spark_catalog.default").select("tableName"), + Row("tbl")) + + sql("DROP TABLE spark_catalog.default.tbl") + checkAnswer( + sql("SHOW TABLES IN spark_catalog.default").select("tableName"), + Seq.empty) + } + } +} diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/ShowNamespacesSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/ShowNamespacesSuite.scala new file mode 100644 index 0000000000000..7a2c136eeada4 --- /dev/null +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/ShowNamespacesSuite.scala @@ -0,0 +1,72 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.execution.command.v2 + +import org.apache.spark.SparkConf +import org.apache.spark.sql.AnalysisException +import org.apache.spark.sql.connector.BasicInMemoryTableCatalog +import org.apache.spark.sql.execution.command +import org.apache.spark.sql.internal.SQLConf + +/** + * The class contains tests for the `SHOW NAMESPACES` command to check V2 table catalogs. + */ +class ShowNamespacesSuite extends command.ShowNamespacesSuiteBase with CommandSuiteBase { + override def sparkConf: SparkConf = super.sparkConf + .set("spark.sql.catalog.testcat_no_namespace", classOf[BasicInMemoryTableCatalog].getName) + + test("IN namespace doesn't exist") { + withSQLConf(SQLConf.DEFAULT_CATALOG.key -> catalog) { + runShowNamespacesSql("SHOW NAMESPACES in dummy", Seq.empty) + } + runShowNamespacesSql(s"SHOW NAMESPACES in $catalog.ns1", Seq.empty) + runShowNamespacesSql(s"SHOW NAMESPACES in $catalog.ns1.ns3", Seq.empty) + } + + test("default v2 catalog doesn't support namespace") { + withSQLConf(SQLConf.DEFAULT_CATALOG.key -> "testcat_no_namespace") { + val errMsg = intercept[AnalysisException] { + sql("SHOW NAMESPACES") + }.getMessage + assert(errMsg.contains("does not support namespaces")) + } + } + + test("v2 catalog doesn't support namespace") { + val errMsg = intercept[AnalysisException] { + sql("SHOW NAMESPACES in testcat_no_namespace") + }.getMessage + assert(errMsg.contains("does not support namespaces")) + } + + test("case sensitivity") { + Seq(true, false).foreach { caseSensitive => + withSQLConf(SQLConf.CASE_SENSITIVE.key -> caseSensitive.toString) { + withNamespace(s"$catalog.AAA", s"$catalog.bbb") { + sql(s"CREATE NAMESPACE $catalog.AAA") + sql(s"CREATE NAMESPACE $catalog.bbb") + runShowNamespacesSql( + s"SHOW NAMESPACES IN $catalog", + Seq("AAA", "bbb") ++ builtinTopNamespaces) + runShowNamespacesSql(s"SHOW NAMESPACES IN $catalog LIKE 'AAA'", Seq("AAA")) + runShowNamespacesSql(s"SHOW NAMESPACES IN $catalog LIKE 'aaa'", Seq("AAA")) + } + } + } + } +} diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/ShowPartitionsSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/ShowPartitionsSuite.scala new file mode 100644 index 0000000000000..42f05ee55504a --- /dev/null +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/ShowPartitionsSuite.scala @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.execution.command.v2 + +import org.apache.spark.sql.{AnalysisException, Row} +import org.apache.spark.sql.execution.command + +/** + * The class contains tests for the `SHOW PARTITIONS` command to check V2 table catalogs. + */ +class ShowPartitionsSuite extends command.ShowPartitionsSuiteBase with CommandSuiteBase { + test("a table does not support partitioning") { + val table = s"non_part_$catalog.tab1" + withTable(table) { + sql(s""" + |CREATE TABLE $table (price int, qty int, year int, month int) + |$defaultUsing""".stripMargin) + val errMsg = intercept[AnalysisException] { + sql(s"SHOW PARTITIONS $table") + }.getMessage + assert(errMsg.contains( + "SHOW PARTITIONS cannot run for a table which does not support partitioning")) + } + } + + test("SPARK-33889, SPARK-33904: null and empty string as partition values") { + withNamespaceAndTable("ns", "tbl") { t => + createNullPartTable(t, "parquet") + runShowPartitionsSql(s"SHOW PARTITIONS $t", Row("part=") :: Row("part=null") :: Nil) + checkAnswer(spark.table(t), Row(0, "") :: Row(1, null) :: Nil) + } + } +} diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/ShowTablesSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/ShowTablesSuite.scala new file mode 100644 index 0000000000000..6a9a9399b9563 --- /dev/null +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/ShowTablesSuite.scala @@ -0,0 +1,103 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.execution.command.v2 + +import org.apache.spark.sql.{AnalysisException, Row} +import org.apache.spark.sql.execution.command +import org.apache.spark.sql.types.{StringType, StructType} + +/** + * The class contains tests for the `SHOW TABLES` command to check V2 table catalogs. + */ +class ShowTablesSuite extends command.ShowTablesSuiteBase with CommandSuiteBase { + override def defaultNamespace: Seq[String] = Nil + override def showSchema: StructType = { + new StructType() + .add("namespace", StringType, nullable = false) + .add("tableName", StringType, nullable = false) + } + override def getRows(showRows: Seq[ShowRow]): Seq[Row] = { + showRows.map { + case ShowRow(namespace, table, _) => Row(namespace, table) + } + } + + // The test fails for V1 catalog with the error: + // org.apache.spark.sql.AnalysisException: + // The namespace in session catalog must have exactly one name part: spark_catalog.n1.n2.db + test("show tables in nested namespaces") { + withTable(s"$catalog.n1.n2.db") { + spark.sql(s"CREATE TABLE $catalog.n1.n2.db.table_name (id bigint, data string) $defaultUsing") + runShowTablesSql( + s"SHOW TABLES FROM $catalog.n1.n2.db", + Seq(ShowRow("n1.n2.db", "table_name", false))) + } + } + + // The test fails for V1 catalog with the error: + // org.apache.spark.sql.AnalysisException: + // The namespace in session catalog must have exactly one name part: spark_catalog.table + test("using v2 catalog with empty namespace") { + withTable(s"$catalog.table") { + spark.sql(s"CREATE TABLE $catalog.table (id bigint, data string) $defaultUsing") + runShowTablesSql(s"SHOW TABLES FROM $catalog", Seq(ShowRow("", "table", false))) + } + } + + // The test fails for V1 catalog with the error: + // org.apache.spark.sql.AnalysisException: + // The namespace in session catalog must have exactly one name part: spark_catalog.ns1.ns2.tbl + test("SHOW TABLE EXTENDED not valid v1 database") { + def testV1CommandNamespace(sqlCommand: String, namespace: String): Unit = { + val e = intercept[AnalysisException] { + sql(sqlCommand) + } + assert(e.message.contains(s"SHOW TABLE EXTENDED is not supported for v2 tables")) + } + + val namespace = s"$catalog.ns1.ns2" + val table = "tbl" + withTable(s"$namespace.$table") { + sql(s"CREATE TABLE $namespace.$table (id bigint, data string) " + + s"$defaultUsing PARTITIONED BY (id)") + + testV1CommandNamespace(s"SHOW TABLE EXTENDED FROM $namespace LIKE 'tb*'", + namespace) + testV1CommandNamespace(s"SHOW TABLE EXTENDED IN $namespace LIKE 'tb*'", + namespace) + testV1CommandNamespace("SHOW TABLE EXTENDED " + + s"FROM $namespace LIKE 'tb*' PARTITION(id=1)", + namespace) + testV1CommandNamespace("SHOW TABLE EXTENDED " + + s"IN $namespace LIKE 'tb*' PARTITION(id=1)", + namespace) + } + } + + // TODO(SPARK-33393): Support SHOW TABLE EXTENDED in DSv2 + test("SHOW TABLE EXTENDED: an existing table") { + val table = "people" + withTable(s"$catalog.$table") { + sql(s"CREATE TABLE $catalog.$table (name STRING, id INT) $defaultUsing") + val errMsg = intercept[AnalysisException] { + sql(s"SHOW TABLE EXTENDED FROM $catalog LIKE '*$table*'").collect() + }.getMessage + assert(errMsg.contains("SHOW TABLE EXTENDED is not supported for v2 tables")) + } + } +} diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/BasicWriteJobStatsTrackerMetricSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/BasicWriteJobStatsTrackerMetricSuite.scala new file mode 100644 index 0000000000000..3e58c225d8c7a --- /dev/null +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/BasicWriteJobStatsTrackerMetricSuite.scala @@ -0,0 +1,59 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.execution.datasources + +import org.apache.spark.SparkFunSuite +import org.apache.spark.sql.{LocalSparkSession, SparkSession} + +class BasicWriteJobStatsTrackerMetricSuite extends SparkFunSuite with LocalSparkSession { + + test("SPARK-32978: make sure the number of dynamic part metric is correct") { + try { + val partitions = "50" + spark = SparkSession.builder().master("local[4]").getOrCreate() + val statusStore = spark.sharedState.statusStore + + spark.sql("create table dynamic_partition(i bigint, part bigint) " + + "using parquet partitioned by (part)") + val oldExecutionsSize = statusStore.executionsList().size + spark.sql("insert overwrite table dynamic_partition partition(part) " + + s"select id, id % $partitions as part from range(10000)") + + // Wait for listener to finish computing the metrics for the executions. + while (statusStore.executionsList().size - oldExecutionsSize < 1 || + statusStore.executionsList().last.metricValues == null) { + Thread.sleep(100) + } + + // There should be 2 SQLExecutionUIData in executionsList and the 2nd item is we need, + // but the executionId is indeterminate in maven test, + // so the `statusStore.execution(executionId)` API is not used. + assert(statusStore.executionsCount() == 2) + val executionData = statusStore.executionsList()(1) + val accumulatorIdOpt = + executionData.metrics.find(_.name == "number of dynamic part").map(_.accumulatorId) + assert(accumulatorIdOpt.isDefined) + val numPartsOpt = executionData.metricValues.get(accumulatorIdOpt.get) + assert(numPartsOpt.isDefined && numPartsOpt.get == partitions) + + } finally { + spark.sql("drop table if exists dynamic_partition") + spark.stop() + } + } +} diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/CommonFileDataSourceSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/CommonFileDataSourceSuite.scala new file mode 100644 index 0000000000000..b7d0a7fc306ad --- /dev/null +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/CommonFileDataSourceSuite.scala @@ -0,0 +1,62 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.execution.datasources + +import org.scalatest.funsuite.AnyFunSuite + +import org.apache.spark.sql.{Dataset, Encoders, FakeFileSystemRequiringDSOption, SparkSession} +import org.apache.spark.sql.catalyst.plans.SQLHelper + +/** + * The trait contains tests for all file-based data sources. + * The tests that are not applicable to all file-based data sources should be placed to + * [[org.apache.spark.sql.FileBasedDataSourceSuite]]. + */ +trait CommonFileDataSourceSuite extends SQLHelper { self: AnyFunSuite => + + protected def spark: SparkSession + protected def dataSourceFormat: String + protected def inputDataset: Dataset[_] = spark.createDataset(Seq("abc"))(Encoders.STRING) + + test(s"Propagate Hadoop configs from $dataSourceFormat options to underlying file system") { + withSQLConf( + "fs.file.impl" -> classOf[FakeFileSystemRequiringDSOption].getName, + "fs.file.impl.disable.cache" -> "true") { + Seq(false, true).foreach { mergeSchema => + withTempPath { dir => + val path = dir.getAbsolutePath + val conf = Map("ds_option" -> "value", "mergeSchema" -> mergeSchema.toString) + inputDataset + .write + .options(conf) + .format(dataSourceFormat) + .save(path) + Seq(path, "file:" + path.stripPrefix("file:")).foreach { p => + val readback = spark + .read + .options(conf) + .format(dataSourceFormat) + .load(p) + // Checks that read doesn't throw the exception from `FakeFileSystemRequiringDSOption` + readback.write.mode("overwrite").format("noop").save() + } + } + } + } + } +} diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/DataSourceSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/DataSourceSuite.scala index dc97b7a55ee9a..6ba3d2723412b 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/DataSourceSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/DataSourceSuite.scala @@ -141,14 +141,14 @@ class DataSourceSuite extends SharedSparkSession with PrivateMethodTester { } test("Data source options should be propagated in method checkAndGlobPathIfNecessary") { - val dataSourceOptions = Map("fs.defaultFS" -> "nonexistsFs://nonexistsFs") + val dataSourceOptions = Map("fs.defaultFS" -> "nonexistentFs://nonexistentFs") val dataSource = DataSource(spark, "parquet", Seq("/path3"), options = dataSourceOptions) val checkAndGlobPathIfNecessary = PrivateMethod[Seq[Path]]('checkAndGlobPathIfNecessary) val message = intercept[java.io.IOException] { dataSource invokePrivate checkAndGlobPathIfNecessary(false, false) }.getMessage - val expectMessage = "No FileSystem for scheme nonexistsFs" + val expectMessage = "No FileSystem for scheme nonexistentFs" assert(message.filterNot(Set(':', '"').contains) == expectMessage) } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/FileFormatWriterSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/FileFormatWriterSuite.scala index ce511842e6356..f492fc653653e 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/FileFormatWriterSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/FileFormatWriterSuite.scala @@ -61,4 +61,17 @@ class FileFormatWriterSuite checkAnswer(spark.table("t2").sort("id"), Seq(Row(0, null), Row(1, null), Row(2, null))) } } + + test("SPARK-33904: save and insert into a table in a namespace of spark_catalog") { + val ns = "spark_catalog.ns" + withNamespace(ns) { + spark.sql(s"CREATE NAMESPACE $ns") + val t = s"$ns.tbl" + withTable(t) { + spark.range(1).write.saveAsTable(t) + Seq(100).toDF().write.insertInto(t) + checkAnswer(spark.table(t), Seq(Row(0), Row(100))) + } + } + } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategySuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategySuite.scala index a808546745817..50f32126e5dec 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategySuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategySuite.scala @@ -262,20 +262,22 @@ class FileSourceStrategySuite extends QueryTest with SharedSparkSession with Pre "p1=2/file7_0000" -> 1), buckets = 3) - // No partition pruning - checkScan(table) { partitions => - assert(partitions.size == 3) - assert(partitions(0).files.size == 5) - assert(partitions(1).files.size == 0) - assert(partitions(2).files.size == 2) - } + withSQLConf(SQLConf.AUTO_BUCKETED_SCAN_ENABLED.key -> "false") { + // No partition pruning + checkScan(table) { partitions => + assert(partitions.size == 3) + assert(partitions(0).files.size == 5) + assert(partitions(1).files.size == 0) + assert(partitions(2).files.size == 2) + } - // With partition pruning - checkScan(table.where("p1=2")) { partitions => - assert(partitions.size == 3) - assert(partitions(0).files.size == 3) - assert(partitions(1).files.size == 0) - assert(partitions(2).files.size == 1) + // With partition pruning + checkScan(table.where("p1=2")) { partitions => + assert(partitions.size == 3) + assert(partitions(0).files.size == 3) + assert(partitions(1).files.size == 0) + assert(partitions(2).files.size == 1) + } } } @@ -549,17 +551,22 @@ class FileSourceStrategySuite extends QueryTest with SharedSparkSession with Pre assert(table.rdd.partitions.length == 3) } - withSQLConf(SQLConf.FILES_MIN_PARTITION_NUM.key -> "16") { - val partitions = (1 to 100).map(i => s"file$i" -> 128 * 1024 * 1024) - val table = createTable(files = partitions) - // partition is limited by filesMaxPartitionBytes(128MB) - assert(table.rdd.partitions.length == 100) - } + withSQLConf( + SQLConf.FILES_MAX_PARTITION_BYTES.key -> "2MB", + SQLConf.FILES_OPEN_COST_IN_BYTES.key -> String.valueOf(4 * 1024 * 1024)) { + + withSQLConf(SQLConf.FILES_MIN_PARTITION_NUM.key -> "8") { + val partitions = (1 to 12).map(i => s"file$i" -> 2 * 1024 * 1024) + val table = createTable(files = partitions) + // partition is limited by filesMaxPartitionBytes(2MB) + assert(table.rdd.partitions.length == 12) + } - withSQLConf(SQLConf.FILES_MIN_PARTITION_NUM.key -> "32") { - val partitions = (1 to 800).map(i => s"file$i" -> 4 * 1024 * 1024) - val table = createTable(files = partitions) - assert(table.rdd.partitions.length == 50) + withSQLConf(SQLConf.FILES_MIN_PARTITION_NUM.key -> "16") { + val partitions = (1 to 12).map(i => s"file$i" -> 4 * 1024 * 1024) + val table = createTable(files = partitions) + assert(table.rdd.partitions.length == 24) + } } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/PathFilterStrategySuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/PathFilterStrategySuite.scala new file mode 100644 index 0000000000000..b965a78c9eec0 --- /dev/null +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/PathFilterStrategySuite.scala @@ -0,0 +1,54 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.execution.datasources + +import org.apache.spark.sql.QueryTest +import org.apache.spark.sql.catalyst.util.CaseInsensitiveMap +import org.apache.spark.sql.test.SharedSparkSession + +class PathFilterStrategySuite extends QueryTest with SharedSparkSession { + + test("SPARK-31962: PathFilterStrategies - modifiedAfter option") { + val options = + CaseInsensitiveMap[String](Map("modifiedAfter" -> "2010-10-01T01:01:00")) + val strategy = PathFilterFactory.create(options) + assert(strategy.head.isInstanceOf[ModifiedAfterFilter]) + assert(strategy.size == 1) + } + + test("SPARK-31962: PathFilterStrategies - modifiedBefore option") { + val options = + CaseInsensitiveMap[String](Map("modifiedBefore" -> "2020-10-01T01:01:00")) + val strategy = PathFilterFactory.create(options) + assert(strategy.head.isInstanceOf[ModifiedBeforeFilter]) + assert(strategy.size == 1) + } + + test("SPARK-31962: PathFilterStrategies - pathGlobFilter option") { + val options = CaseInsensitiveMap[String](Map("pathGlobFilter" -> "*.txt")) + val strategy = PathFilterFactory.create(options) + assert(strategy.head.isInstanceOf[PathGlobFilter]) + assert(strategy.size == 1) + } + + test("SPARK-31962: PathFilterStrategies - no options") { + val options = CaseInsensitiveMap[String](Map.empty) + val strategy = PathFilterFactory.create(options) + assert(strategy.isEmpty) + } +} diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/PathFilterSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/PathFilterSuite.scala new file mode 100644 index 0000000000000..1af2adfd8640c --- /dev/null +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/PathFilterSuite.scala @@ -0,0 +1,307 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.execution.datasources + +import java.io.File +import java.time.{LocalDateTime, ZoneId, ZoneOffset} +import java.time.format.DateTimeFormatter + +import scala.util.Random + +import org.apache.spark.sql.{AnalysisException, QueryTest, Row} +import org.apache.spark.sql.catalyst.util.{stringToFile, DateTimeUtils} +import org.apache.spark.sql.test.SharedSparkSession +import org.apache.spark.sql.types.{StringType, StructField, StructType} + +class PathFilterSuite extends QueryTest with SharedSparkSession { + import testImplicits._ + + test("SPARK-31962: modifiedBefore specified" + + " and sharing same timestamp with file last modified time.") { + withTempDir { dir => + val curTime = LocalDateTime.now(ZoneOffset.UTC) + executeTest(dir, Seq(curTime), 0, modifiedBefore = Some(formatTime(curTime))) + } + } + + test("SPARK-31962: modifiedAfter specified" + + " and sharing same timestamp with file last modified time.") { + withTempDir { dir => + val curTime = LocalDateTime.now(ZoneOffset.UTC) + executeTest(dir, Seq(curTime), 0, modifiedAfter = Some(formatTime(curTime))) + } + } + + test("SPARK-31962: modifiedBefore and modifiedAfter option" + + " share same timestamp with file last modified time.") { + withTempDir { dir => + val curTime = LocalDateTime.now(ZoneOffset.UTC) + val formattedTime = formatTime(curTime) + executeTest(dir, Seq(curTime), 0, modifiedBefore = Some(formattedTime), + modifiedAfter = Some(formattedTime)) + } + } + + test("SPARK-31962: modifiedBefore and modifiedAfter option" + + " share same timestamp with earlier file last modified time.") { + withTempDir { dir => + val curTime = LocalDateTime.now(ZoneOffset.UTC) + val fileTime = curTime.minusDays(3) + val formattedTime = formatTime(curTime) + executeTest(dir, Seq(fileTime), 0, modifiedBefore = Some(formattedTime), + modifiedAfter = Some(formattedTime)) + } + } + + test("SPARK-31962: modifiedBefore and modifiedAfter option" + + " share same timestamp with later file last modified time.") { + withTempDir { dir => + val curTime = LocalDateTime.now(ZoneOffset.UTC) + val formattedTime = formatTime(curTime) + executeTest(dir, Seq(curTime), 0, modifiedBefore = Some(formattedTime), + modifiedAfter = Some(formattedTime)) + } + } + + test("SPARK-31962: when modifiedAfter specified with a past date") { + withTempDir { dir => + val curTime = LocalDateTime.now(ZoneOffset.UTC) + val pastTime = curTime.minusYears(1) + val formattedTime = formatTime(pastTime) + executeTest(dir, Seq(curTime), 1, modifiedAfter = Some(formattedTime)) + } + } + + test("SPARK-31962: when modifiedBefore specified with a future date") { + withTempDir { dir => + val curTime = LocalDateTime.now(ZoneOffset.UTC) + val futureTime = curTime.plusYears(1) + val formattedTime = formatTime(futureTime) + executeTest(dir, Seq(curTime), 1, modifiedBefore = Some(formattedTime)) + } + } + + test("SPARK-31962: with modifiedBefore option provided using a past date") { + withTempDir { dir => + val curTime = LocalDateTime.now(ZoneOffset.UTC) + val pastTime = curTime.minusYears(1) + val formattedTime = formatTime(pastTime) + executeTest(dir, Seq(curTime), 0, modifiedBefore = Some(formattedTime)) + } + } + + test("SPARK-31962: modifiedAfter specified with a past date, multiple files, one valid") { + withTempDir { dir => + val fileTime1 = LocalDateTime.now(ZoneOffset.UTC) + val fileTime2 = LocalDateTime.ofEpochSecond(0, 0, ZoneOffset.UTC) + val pastTime = fileTime1.minusYears(1) + val formattedTime = formatTime(pastTime) + executeTest(dir, Seq(fileTime1, fileTime2), 1, modifiedAfter = Some(formattedTime)) + } + } + + test("SPARK-31962: modifiedAfter specified with a past date, multiple files, both valid") { + withTempDir { dir => + val curTime = LocalDateTime.now(ZoneOffset.UTC) + val pastTime = curTime.minusYears(1) + val formattedTime = formatTime(pastTime) + executeTest(dir, Seq(curTime, curTime), 2, modifiedAfter = Some(formattedTime)) + } + } + + test("SPARK-31962: modifiedAfter specified with a past date, multiple files, none valid") { + withTempDir { dir => + val fileTime = LocalDateTime.ofEpochSecond(0, 0, ZoneOffset.UTC) + val pastTime = LocalDateTime.now(ZoneOffset.UTC).minusYears(1) + val formattedTime = formatTime(pastTime) + executeTest(dir, Seq(fileTime, fileTime), 0, modifiedAfter = Some(formattedTime)) + } + } + + test("SPARK-31962: modifiedBefore specified with a future date, multiple files, both valid") { + withTempDir { dir => + val fileTime = LocalDateTime.ofEpochSecond(0, 0, ZoneOffset.UTC) + val futureTime = LocalDateTime.now(ZoneOffset.UTC).plusYears(1) + val formattedTime = formatTime(futureTime) + executeTest(dir, Seq(fileTime, fileTime), 2, modifiedBefore = Some(formattedTime)) + } + } + + test("SPARK-31962: modifiedBefore specified with a future date, multiple files, one valid") { + withTempDir { dir => + val curTime = LocalDateTime.now(ZoneOffset.UTC) + val fileTime1 = LocalDateTime.ofEpochSecond(0, 0, ZoneOffset.UTC) + val fileTime2 = curTime.plusDays(3) + val formattedTime = formatTime(curTime) + executeTest(dir, Seq(fileTime1, fileTime2), 1, modifiedBefore = Some(formattedTime)) + } + } + + test("SPARK-31962: modifiedBefore specified with a future date, multiple files, none valid") { + withTempDir { dir => + val fileTime = LocalDateTime.now(ZoneOffset.UTC).minusDays(1) + val formattedTime = formatTime(fileTime) + executeTest(dir, Seq(fileTime, fileTime), 0, modifiedBefore = Some(formattedTime)) + } + } + + test("SPARK-31962: modifiedBefore/modifiedAfter is specified with an invalid date") { + executeTestWithBadOption( + Map("modifiedBefore" -> "2024-05+1 01:00:00"), + Seq("The timestamp provided", "modifiedbefore", "2024-05+1 01:00:00")) + + executeTestWithBadOption( + Map("modifiedAfter" -> "2024-05+1 01:00:00"), + Seq("The timestamp provided", "modifiedafter", "2024-05+1 01:00:00")) + } + + test("SPARK-31962: modifiedBefore/modifiedAfter - empty option") { + executeTestWithBadOption( + Map("modifiedBefore" -> ""), + Seq("The timestamp provided", "modifiedbefore")) + + executeTestWithBadOption( + Map("modifiedAfter" -> ""), + Seq("The timestamp provided", "modifiedafter")) + } + + test("SPARK-31962: modifiedBefore/modifiedAfter filter takes into account local timezone " + + "when specified as an option.") { + Seq("modifiedbefore", "modifiedafter").foreach { filterName => + // CET = UTC + 1 hour, HST = UTC - 10 hours + Seq("CET", "HST").foreach { tzId => + testModifiedDateFilterWithTimezone(tzId, filterName) + } + } + } + + test("Option pathGlobFilter: filter files correctly") { + withTempPath { path => + val dataDir = path.getCanonicalPath + Seq("foo").toDS().write.text(dataDir) + Seq("bar").toDS().write.mode("append").orc(dataDir) + val df = spark.read.option("pathGlobFilter", "*.txt").text(dataDir) + checkAnswer(df, Row("foo")) + + // Both glob pattern in option and path should be effective to filter files. + val df2 = spark.read.option("pathGlobFilter", "*.txt").text(dataDir + "/*.orc") + checkAnswer(df2, Seq.empty) + + val df3 = spark.read.option("pathGlobFilter", "*.txt").text(dataDir + "/*xt") + checkAnswer(df3, Row("foo")) + } + } + + test("Option pathGlobFilter: simple extension filtering should contains partition info") { + withTempPath { path => + val input = Seq(("foo", 1), ("oof", 2)).toDF("a", "b") + input.write.partitionBy("b").text(path.getCanonicalPath) + Seq("bar").toDS().write.mode("append").orc(path.getCanonicalPath + "/b=1") + + // If we use glob pattern in the path, the partition column won't be shown in the result. + val df = spark.read.text(path.getCanonicalPath + "/*/*.txt") + checkAnswer(df, input.select("a")) + + val df2 = spark.read.option("pathGlobFilter", "*.txt").text(path.getCanonicalPath) + checkAnswer(df2, input) + } + } + + private def executeTest( + dir: File, + fileDates: Seq[LocalDateTime], + expectedCount: Long, + modifiedBefore: Option[String] = None, + modifiedAfter: Option[String] = None): Unit = { + fileDates.foreach { fileDate => + val file = createSingleFile(dir) + setFileTime(fileDate, file) + } + + val schema = StructType(Seq(StructField("a", StringType))) + + var dfReader = spark.read.format("csv").option("timeZone", "UTC").schema(schema) + modifiedBefore.foreach { opt => dfReader = dfReader.option("modifiedBefore", opt) } + modifiedAfter.foreach { opt => dfReader = dfReader.option("modifiedAfter", opt) } + + if (expectedCount > 0) { + // without pathGlobFilter + val df1 = dfReader.load(dir.getCanonicalPath) + assert(df1.count() === expectedCount) + + // pathGlobFilter matched + val df2 = dfReader.option("pathGlobFilter", "*.csv").load(dir.getCanonicalPath) + assert(df2.count() === expectedCount) + + // pathGlobFilter mismatched + val df3 = dfReader.option("pathGlobFilter", "*.txt").load(dir.getCanonicalPath) + assert(df3.count() === 0) + } else { + val df = dfReader.load(dir.getCanonicalPath) + assert(df.count() === 0) + } + } + + private def executeTestWithBadOption( + options: Map[String, String], + expectedMsgParts: Seq[String]): Unit = { + withTempDir { dir => + createSingleFile(dir) + val exc = intercept[AnalysisException] { + var dfReader = spark.read.format("csv") + options.foreach { case (key, value) => + dfReader = dfReader.option(key, value) + } + dfReader.load(dir.getCanonicalPath) + } + expectedMsgParts.foreach { msg => assert(exc.getMessage.contains(msg)) } + } + } + + private def testModifiedDateFilterWithTimezone( + timezoneId: String, + filterParamName: String): Unit = { + val curTime = LocalDateTime.now(ZoneOffset.UTC) + val zoneId: ZoneId = DateTimeUtils.getTimeZone(timezoneId).toZoneId + val strategyTimeInMicros = + ModifiedDateFilter.toThreshold( + curTime.toString, + timezoneId, + filterParamName) + val strategyTimeInSeconds = strategyTimeInMicros / 1000 / 1000 + + val curTimeAsSeconds = curTime.atZone(zoneId).toEpochSecond + withClue(s"timezone: $timezoneId / param: $filterParamName,") { + assert(strategyTimeInSeconds === curTimeAsSeconds) + } + } + + private def createSingleFile(dir: File): File = { + val file = new File(dir, "temp" + Random.nextInt(1000000) + ".csv") + stringToFile(file, "text") + } + + private def setFileTime(time: LocalDateTime, file: File): Boolean = { + val sameTime = time.toEpochSecond(ZoneOffset.UTC) + file.setLastModified(sameTime * 1000) + } + + private def formatTime(time: LocalDateTime): String = { + time.format(DateTimeFormatter.ofPattern("yyyy-MM-dd'T'HH:mm:ss")) + } +} diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/ReadSchemaTest.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/ReadSchemaTest.scala index fd70b6529ff51..22db55afc27c9 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/ReadSchemaTest.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/ReadSchemaTest.scala @@ -21,7 +21,7 @@ import java.io.File import org.apache.spark.sql.{QueryTest, Row} import org.apache.spark.sql.functions._ -import org.apache.spark.sql.test.{SharedSparkSession, SQLTestUtils} +import org.apache.spark.sql.test.SharedSparkSession /** * The reader schema is said to be evolved (or projected) when it changed after the data is diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/RowDataSourceStrategySuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/RowDataSourceStrategySuite.scala index 6420081a9757b..3e8a4fe290502 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/RowDataSourceStrategySuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/RowDataSourceStrategySuite.scala @@ -22,15 +22,10 @@ import java.util.Properties import org.scalatest.BeforeAndAfter -import org.apache.spark.SparkFunSuite -import org.apache.spark.sql.{DataFrame, Row} -import org.apache.spark.sql.sources._ import org.apache.spark.sql.test.SharedSparkSession -import org.apache.spark.sql.types._ import org.apache.spark.util.Utils class RowDataSourceStrategySuite extends SharedSparkSession with BeforeAndAfter { - import testImplicits._ val url = "jdbc:h2:mem:testdb0" val urlWithUserAndPass = "jdbc:h2:mem:testdb0;user=testUser;password=testPass" diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/SaveIntoDataSourceCommandSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/SaveIntoDataSourceCommandSuite.scala index 233978289f068..e843d1d328425 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/SaveIntoDataSourceCommandSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/SaveIntoDataSourceCommandSuite.scala @@ -17,7 +17,6 @@ package org.apache.spark.sql.execution.datasources -import org.apache.spark.SparkConf import org.apache.spark.sql.SaveMode import org.apache.spark.sql.test.SharedSparkSession diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/SchemaPruningSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/SchemaPruningSuite.scala index 2b5cb27d59ad9..c90732183cb7a 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/SchemaPruningSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/SchemaPruningSuite.scala @@ -623,9 +623,9 @@ abstract class SchemaPruningSuite spark.read.format(dataSourceName).schema(schema).load(path + "/contacts") .createOrReplaceTempView("contacts") - val departmentScahem = "`depId` INT,`depName` STRING,`contactId` INT, " + + val departmentSchema = "`depId` INT,`depName` STRING,`contactId` INT, " + "`employer` STRUCT<`id`: INT, `company`: STRUCT<`name`: STRING, `address`: STRING>>" - spark.read.format(dataSourceName).schema(departmentScahem).load(path + "/departments") + spark.read.format(dataSourceName).schema(departmentSchema).load(path + "/departments") .createOrReplaceTempView("departments") testThunk @@ -651,9 +651,9 @@ abstract class SchemaPruningSuite spark.read.format(dataSourceName).schema(schema).load(path + "/contacts") .createOrReplaceTempView("contacts") - val departmentScahem = "`depId` INT,`depName` STRING,`contactId` INT, " + + val departmentSchema = "`depId` INT,`depName` STRING,`contactId` INT, " + "`employer` STRUCT<`id`: INT, `company`: STRUCT<`name`: STRING, `address`: STRING>>" - spark.read.format(dataSourceName).schema(departmentScahem).load(path + "/departments") + spark.read.format(dataSourceName).schema(departmentSchema).load(path + "/departments") .createOrReplaceTempView("departments") testThunk diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/binaryfile/BinaryFileFormatSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/binaryfile/BinaryFileFormatSuite.scala index 8462916daaab8..86ff026d7b1e9 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/binaryfile/BinaryFileFormatSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/binaryfile/BinaryFileFormatSuite.scala @@ -34,7 +34,7 @@ import org.apache.spark.sql.execution.datasources.PartitionedFile import org.apache.spark.sql.functions.col import org.apache.spark.sql.internal.SQLConf.SOURCES_BINARY_FILE_MAX_LENGTH import org.apache.spark.sql.sources._ -import org.apache.spark.sql.test.{SharedSparkSession, SQLTestUtils} +import org.apache.spark.sql.test.SharedSparkSession import org.apache.spark.sql.types._ import org.apache.spark.util.Utils diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/csv/CSVSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/csv/CSVSuite.scala index 066259075d6bf..30f0e45d04eab 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/csv/CSVSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/csv/CSVSuite.scala @@ -36,13 +36,21 @@ import org.apache.hadoop.io.compress.GzipCodec import org.apache.spark.{SparkConf, SparkException, TestUtils} import org.apache.spark.sql.{AnalysisException, Column, DataFrame, QueryTest, Row} import org.apache.spark.sql.catalyst.util.DateTimeUtils +import org.apache.spark.sql.execution.datasources.CommonFileDataSourceSuite import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.test.SharedSparkSession import org.apache.spark.sql.types._ -abstract class CSVSuite extends QueryTest with SharedSparkSession with TestCsvData { +abstract class CSVSuite + extends QueryTest + with SharedSparkSession + with TestCsvData + with CommonFileDataSourceSuite { + import testImplicits._ + override protected def dataSourceFormat = "csv" + private val carsFile = "test-data/cars.csv" private val carsMalformedFile = "test-data/cars-malformed.csv" private val carsFile8859 = "test-data/cars_iso-8859-1.csv" @@ -2420,6 +2428,30 @@ abstract class CSVSuite extends QueryTest with SharedSparkSession with TestCsvDa assert(readback.collect sameElements Array(Row("0"), Row("1"), Row("2"))) } } + + test("SPARK-33566: configure UnescapedQuoteHandling to parse " + + "unescaped quotes and unescaped delimiter data correctly") { + withTempPath { path => + val dataPath = path.getCanonicalPath + val row1 = Row("""a,""b,c""", "xyz") + val row2 = Row("""a,b,c""", """x""yz""") + // Generate the test data, use `,` as delimiter and `"` as quotes, but they didn't escape. + Seq( + """c1,c2""", + s""""${row1.getString(0)}","${row1.getString(1)}"""", + s""""${row2.getString(0)}","${row2.getString(1)}"""") + .toDF().repartition(1).write.text(dataPath) + // Without configure UnescapedQuoteHandling to STOP_AT_CLOSING_QUOTE, + // the result will be Row(""""a,""b""", """c""""), Row("""a,b,c""", """"x""yz"""") + val result = spark.read + .option("inferSchema", "true") + .option("header", "true") + .option("unescapedQuoteHandling", "STOP_AT_CLOSING_QUOTE") + .csv(dataPath).collect() + val exceptResults = Array(row1, row2) + assert(result.sameElements(exceptResults)) + } + } } class CSVv1Suite extends CSVSuite { diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/jdbc/DriverRegistrySuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/jdbc/DriverRegistrySuite.scala new file mode 100644 index 0000000000000..51dbdacb5e0fe --- /dev/null +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/jdbc/DriverRegistrySuite.scala @@ -0,0 +1,29 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.execution.datasources.jdbc + +import org.apache.spark.SparkFunSuite +import org.apache.spark.sql.execution.datasources.jdbc.connection.TestDriver + +class DriverRegistrySuite extends SparkFunSuite { + test("SPARK-32229: get must give back wrapped driver if wrapped") { + val className = classOf[TestDriver].getName + DriverRegistry.register(className) + assert(DriverRegistry.get(className).isInstanceOf[TestDriver]) + } +} diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/jdbc/connection/ConnectionProviderSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/jdbc/connection/ConnectionProviderSuite.scala index ff5fe4f620a1d..0e9498b2681e2 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/jdbc/connection/ConnectionProviderSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/jdbc/connection/ConnectionProviderSuite.scala @@ -19,27 +19,55 @@ package org.apache.spark.sql.execution.datasources.jdbc.connection import javax.security.auth.login.Configuration -class ConnectionProviderSuite extends ConnectionProviderSuiteBase { +import org.apache.spark.sql.internal.SQLConf +import org.apache.spark.sql.test.SharedSparkSession + +class ConnectionProviderSuite extends ConnectionProviderSuiteBase with SharedSparkSession { + test("All built-in providers must be loaded") { + IntentionallyFaultyConnectionProvider.constructed = false + val providers = ConnectionProvider.loadProviders() + assert(providers.exists(_.isInstanceOf[BasicConnectionProvider])) + assert(providers.exists(_.isInstanceOf[DB2ConnectionProvider])) + assert(providers.exists(_.isInstanceOf[MariaDBConnectionProvider])) + assert(providers.exists(_.isInstanceOf[MSSQLConnectionProvider])) + assert(providers.exists(_.isInstanceOf[PostgresConnectionProvider])) + assert(providers.exists(_.isInstanceOf[OracleConnectionProvider])) + assert(IntentionallyFaultyConnectionProvider.constructed) + assert(!providers.exists(_.isInstanceOf[IntentionallyFaultyConnectionProvider])) + assert(providers.size === 6) + } + + test("Disabled provider must not be loaded") { + withSQLConf(SQLConf.DISABLED_JDBC_CONN_PROVIDER_LIST.key -> "db2") { + val providers = ConnectionProvider.loadProviders() + assert(!providers.exists(_.isInstanceOf[DB2ConnectionProvider])) + assert(providers.size === 5) + } + } + test("Multiple security configs must be reachable") { Configuration.setConfiguration(null) - val postgresDriver = registerDriver(PostgresConnectionProvider.driverClass) - val postgresProvider = new PostgresConnectionProvider( - postgresDriver, options("jdbc:postgresql://localhost/postgres")) - val db2Driver = registerDriver(DB2ConnectionProvider.driverClass) - val db2Provider = new DB2ConnectionProvider(db2Driver, options("jdbc:db2://localhost/db2")) + val postgresProvider = new PostgresConnectionProvider() + val postgresDriver = registerDriver(postgresProvider.driverClass) + val postgresOptions = options("jdbc:postgresql://localhost/postgres") + val postgresAppEntry = postgresProvider.appEntry(postgresDriver, postgresOptions) + val db2Provider = new DB2ConnectionProvider() + val db2Driver = registerDriver(db2Provider.driverClass) + val db2Options = options("jdbc:db2://localhost/db2") + val db2AppEntry = db2Provider.appEntry(db2Driver, db2Options) // Make sure no authentication for the databases are set val oldConfig = Configuration.getConfiguration - assert(oldConfig.getAppConfigurationEntry(postgresProvider.appEntry) == null) - assert(oldConfig.getAppConfigurationEntry(db2Provider.appEntry) == null) + assert(oldConfig.getAppConfigurationEntry(postgresAppEntry) == null) + assert(oldConfig.getAppConfigurationEntry(db2AppEntry) == null) - postgresProvider.setAuthenticationConfigIfNeeded() - db2Provider.setAuthenticationConfigIfNeeded() + postgresProvider.setAuthenticationConfigIfNeeded(postgresDriver, postgresOptions) + db2Provider.setAuthenticationConfigIfNeeded(db2Driver, db2Options) // Make sure authentication for the databases are set val newConfig = Configuration.getConfiguration assert(oldConfig != newConfig) - assert(newConfig.getAppConfigurationEntry(postgresProvider.appEntry) != null) - assert(newConfig.getAppConfigurationEntry(db2Provider.appEntry) != null) + assert(newConfig.getAppConfigurationEntry(postgresAppEntry) != null) + assert(newConfig.getAppConfigurationEntry(db2AppEntry) != null) } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/jdbc/connection/ConnectionProviderSuiteBase.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/jdbc/connection/ConnectionProviderSuiteBase.scala index d18a3088c4f2f..a299841b3c149 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/jdbc/connection/ConnectionProviderSuiteBase.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/jdbc/connection/ConnectionProviderSuiteBase.scala @@ -42,7 +42,7 @@ abstract class ConnectionProviderSuiteBase extends SparkFunSuite with BeforeAndA JDBCOptions.JDBC_PRINCIPAL -> "principal" )) - override def afterEach(): Unit = { + protected override def afterEach(): Unit = { try { Configuration.setConfiguration(null) } finally { @@ -50,20 +50,25 @@ abstract class ConnectionProviderSuiteBase extends SparkFunSuite with BeforeAndA } } - protected def testSecureConnectionProvider(provider: SecureConnectionProvider): Unit = { + protected def testSecureConnectionProvider( + provider: SecureConnectionProvider, + driver: Driver, + options: JDBCOptions): Unit = { + val providerAppEntry = provider.appEntry(driver, options) + // Make sure no authentication for the database is set - assert(Configuration.getConfiguration.getAppConfigurationEntry(provider.appEntry) == null) + assert(Configuration.getConfiguration.getAppConfigurationEntry(providerAppEntry) == null) // Make sure the first call sets authentication properly val savedConfig = Configuration.getConfiguration - provider.setAuthenticationConfigIfNeeded() + provider.setAuthenticationConfigIfNeeded(driver, options) val config = Configuration.getConfiguration assert(savedConfig != config) - val appEntry = config.getAppConfigurationEntry(provider.appEntry) + val appEntry = config.getAppConfigurationEntry(providerAppEntry) assert(appEntry != null) // Make sure a second call is not modifying the existing authentication - provider.setAuthenticationConfigIfNeeded() - assert(config.getAppConfigurationEntry(provider.appEntry) === appEntry) + provider.setAuthenticationConfigIfNeeded(driver, options) + assert(config.getAppConfigurationEntry(providerAppEntry) === appEntry) } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/jdbc/connection/DB2ConnectionProviderSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/jdbc/connection/DB2ConnectionProviderSuite.scala index d656f83e2ebb9..5885af82532d4 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/jdbc/connection/DB2ConnectionProviderSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/jdbc/connection/DB2ConnectionProviderSuite.scala @@ -19,9 +19,9 @@ package org.apache.spark.sql.execution.datasources.jdbc.connection class DB2ConnectionProviderSuite extends ConnectionProviderSuiteBase { test("setAuthenticationConfigIfNeeded must set authentication if not set") { - val driver = registerDriver(DB2ConnectionProvider.driverClass) - val provider = new DB2ConnectionProvider(driver, options("jdbc:db2://localhost/db2")) + val provider = new DB2ConnectionProvider() + val driver = registerDriver(provider.driverClass) - testSecureConnectionProvider(provider) + testSecureConnectionProvider(provider, driver, options("jdbc:db2://localhost/db2")) } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/jdbc/connection/IntentionallyFaultyConnectionProvider.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/jdbc/connection/IntentionallyFaultyConnectionProvider.scala new file mode 100644 index 0000000000000..329d79cae62e8 --- /dev/null +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/jdbc/connection/IntentionallyFaultyConnectionProvider.scala @@ -0,0 +1,34 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.execution.datasources.jdbc.connection + +import java.sql.{Connection, Driver} + +import org.apache.spark.sql.jdbc.JdbcConnectionProvider + +private class IntentionallyFaultyConnectionProvider extends JdbcConnectionProvider { + IntentionallyFaultyConnectionProvider.constructed = true + throw new IllegalArgumentException("Intentional Exception") + override val name: String = "IntentionallyFaultyConnectionProvider" + override def canHandle(driver: Driver, options: Map[String, String]): Boolean = true + override def getConnection(driver: Driver, options: Map[String, String]): Connection = null +} + +private object IntentionallyFaultyConnectionProvider { + var constructed = false +} diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/jdbc/connection/MSSQLConnectionProviderSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/jdbc/connection/MSSQLConnectionProviderSuite.scala index 249f1e36347ed..a5704e842e018 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/jdbc/connection/MSSQLConnectionProviderSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/jdbc/connection/MSSQLConnectionProviderSuite.scala @@ -17,35 +17,35 @@ package org.apache.spark.sql.execution.datasources.jdbc.connection +import java.sql.Driver + +import org.apache.spark.sql.execution.datasources.jdbc.JDBCOptions + class MSSQLConnectionProviderSuite extends ConnectionProviderSuiteBase { test("setAuthenticationConfigIfNeeded default parser must set authentication if not set") { - val driver = registerDriver(MSSQLConnectionProvider.driverClass) - val defaultProvider = new MSSQLConnectionProvider( - driver, options("jdbc:sqlserver://localhost/mssql")) - val customProvider = new MSSQLConnectionProvider( - driver, options("jdbc:sqlserver://localhost/mssql;jaasConfigurationName=custommssql")) + val provider = new MSSQLConnectionProvider() + val driver = registerDriver(provider.driverClass) - testProviders(defaultProvider, customProvider) + testProviders(driver, provider, options("jdbc:sqlserver://localhost/mssql"), + options("jdbc:sqlserver://localhost/mssql;jaasConfigurationName=custommssql")) } test("setAuthenticationConfigIfNeeded custom parser must set authentication if not set") { - val parserMethod = "IntentionallyNotExistingMethod" - val driver = registerDriver(MSSQLConnectionProvider.driverClass) - val defaultProvider = new MSSQLConnectionProvider( - driver, options("jdbc:sqlserver://localhost/mssql"), parserMethod) - val customProvider = new MSSQLConnectionProvider( - driver, - options("jdbc:sqlserver://localhost/mssql;jaasConfigurationName=custommssql"), - parserMethod) - - testProviders(defaultProvider, customProvider) + val provider = new MSSQLConnectionProvider() { + override val parserMethod: String = "IntentionallyNotExistingMethod" + } + val driver = registerDriver(provider.driverClass) + + testProviders(driver, provider, options("jdbc:sqlserver://localhost/mssql"), + options("jdbc:sqlserver://localhost/mssql;jaasConfigurationName=custommssql")) } private def testProviders( - defaultProvider: SecureConnectionProvider, - customProvider: SecureConnectionProvider) = { - assert(defaultProvider.appEntry !== customProvider.appEntry) - testSecureConnectionProvider(defaultProvider) - testSecureConnectionProvider(customProvider) + driver: Driver, + provider: SecureConnectionProvider, + defaultOptions: JDBCOptions, + customOptions: JDBCOptions) = { + testSecureConnectionProvider(provider, driver, defaultOptions) + testSecureConnectionProvider(provider, driver, customOptions) } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/jdbc/connection/MariaDBConnectionProviderSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/jdbc/connection/MariaDBConnectionProviderSuite.scala index 70cad2097eb43..f450662fcbe74 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/jdbc/connection/MariaDBConnectionProviderSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/jdbc/connection/MariaDBConnectionProviderSuite.scala @@ -19,9 +19,9 @@ package org.apache.spark.sql.execution.datasources.jdbc.connection class MariaDBConnectionProviderSuite extends ConnectionProviderSuiteBase { test("setAuthenticationConfigIfNeeded must set authentication if not set") { - val driver = registerDriver(MariaDBConnectionProvider.driverClass) - val provider = new MariaDBConnectionProvider(driver, options("jdbc:mysql://localhost/mysql")) + val provider = new MariaDBConnectionProvider() + val driver = registerDriver(provider.driverClass) - testSecureConnectionProvider(provider) + testSecureConnectionProvider(provider, driver, options("jdbc:mysql://localhost/mysql")) } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/jdbc/connection/OracleConnectionProviderSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/jdbc/connection/OracleConnectionProviderSuite.scala index 13cde32ddbe4e..40e7f1191dccc 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/jdbc/connection/OracleConnectionProviderSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/jdbc/connection/OracleConnectionProviderSuite.scala @@ -19,10 +19,9 @@ package org.apache.spark.sql.execution.datasources.jdbc.connection class OracleConnectionProviderSuite extends ConnectionProviderSuiteBase { test("setAuthenticationConfigIfNeeded must set authentication if not set") { - val driver = registerDriver(OracleConnectionProvider.driverClass) - val provider = new OracleConnectionProvider(driver, - options("jdbc:oracle:thin:@//localhost/xe")) + val provider = new OracleConnectionProvider() + val driver = registerDriver(provider.driverClass) - testSecureConnectionProvider(provider) + testSecureConnectionProvider(provider, driver, options("jdbc:oracle:thin:@//localhost/xe")) } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/jdbc/connection/PostgresConnectionProviderSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/jdbc/connection/PostgresConnectionProviderSuite.scala index 8cef7652f9c54..ee43a7d9708c5 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/jdbc/connection/PostgresConnectionProviderSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/jdbc/connection/PostgresConnectionProviderSuite.scala @@ -19,14 +19,14 @@ package org.apache.spark.sql.execution.datasources.jdbc.connection class PostgresConnectionProviderSuite extends ConnectionProviderSuiteBase { test("setAuthenticationConfigIfNeeded must set authentication if not set") { - val driver = registerDriver(PostgresConnectionProvider.driverClass) - val defaultProvider = new PostgresConnectionProvider( - driver, options("jdbc:postgresql://localhost/postgres")) - val customProvider = new PostgresConnectionProvider( - driver, options(s"jdbc:postgresql://localhost/postgres?jaasApplicationName=custompgjdbc")) + val provider = new PostgresConnectionProvider() + val defaultOptions = options("jdbc:postgresql://localhost/postgres") + val customOptions = + options(s"jdbc:postgresql://localhost/postgres?jaasApplicationName=custompgjdbc") + val driver = registerDriver(provider.driverClass) - assert(defaultProvider.appEntry !== customProvider.appEntry) - testSecureConnectionProvider(defaultProvider) - testSecureConnectionProvider(customProvider) + assert(provider.appEntry(driver, defaultOptions) !== provider.appEntry(driver, customOptions)) + testSecureConnectionProvider(provider, driver, defaultOptions) + testSecureConnectionProvider(provider, driver, customOptions) } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/jdbc/connection/TestDriver.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/jdbc/connection/TestDriver.scala new file mode 100644 index 0000000000000..6b57a95ed458b --- /dev/null +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/jdbc/connection/TestDriver.scala @@ -0,0 +1,33 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.execution.datasources.jdbc.connection + +import java.sql.{Connection, Driver, DriverPropertyInfo} +import java.util.Properties +import java.util.logging.Logger + +private[jdbc] class TestDriver() extends Driver { + override def connect(url: String, info: Properties): Connection = null + override def acceptsURL(url: String): Boolean = false + override def getPropertyInfo(url: String, info: Properties): Array[DriverPropertyInfo] = + Array.empty + override def getMajorVersion: Int = 0 + override def getMinorVersion: Int = 0 + override def jdbcCompliant(): Boolean = false + override def getParentLogger: Logger = null +} diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonBenchmark.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonBenchmark.scala index 9ff35c0946cc9..ffe8e66f3368a 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonBenchmark.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonBenchmark.scala @@ -39,16 +39,9 @@ import org.apache.spark.sql.types._ * Results will be written to "benchmarks/JSONBenchmark-results.txt". * }}} */ - object JsonBenchmark extends SqlBasedBenchmark { import spark.implicits._ - private def prepareDataInfo(benchmark: Benchmark): Unit = { - // scalastyle:off println - benchmark.out.println("Preparing data for benchmarking ...") - // scalastyle:on println - } - def schemaInferring(rowsNum: Int, numIters: Int): Unit = { val benchmark = new Benchmark("JSON schema inferring", rowsNum, output = output) @@ -128,18 +121,6 @@ object JsonBenchmark extends SqlBasedBenchmark { .add("z", StringType) } - def writeWideRow(path: String, rowsNum: Int): StructType = { - val colsNum = 1000 - val fields = Seq.tabulate(colsNum)(i => StructField(s"col$i", IntegerType)) - val schema = StructType(fields) - - spark.range(rowsNum) - .select(Seq.tabulate(colsNum)(i => lit(i).as(s"col$i")): _*) - .write.json(path) - - schema - } - def countWideColumn(rowsNum: Int, numIters: Int): Unit = { val benchmark = new Benchmark("count a wide column", rowsNum, output = output) @@ -171,7 +152,7 @@ object JsonBenchmark extends SqlBasedBenchmark { withTempPath { path => prepareDataInfo(benchmark) - val schema = writeWideRow(path.getAbsolutePath, rowsNum) + val schema = writeWideRow(path.getAbsolutePath, rowsNum, 1000) benchmark.addCase("No encoding", numIters) { _ => spark.read diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonParsingOptionsSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonParsingOptionsSuite.scala index d27b5c4737a11..e9fe79a0641b9 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonParsingOptionsSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonParsingOptionsSuite.scala @@ -18,12 +18,11 @@ package org.apache.spark.sql.execution.datasources.json import org.apache.spark.sql.{QueryTest, Row} -import org.apache.spark.sql.catalyst.json.JSONOptions import org.apache.spark.sql.test.SharedSparkSession -import org.apache.spark.sql.types.{DoubleType, StringType, StructType} +import org.apache.spark.sql.types.{StringType, StructType} /** - * Test cases for various [[JSONOptions]]. + * Test cases for various [[org.apache.spark.sql.catalyst.json.JSONOptions]]. */ class JsonParsingOptionsSuite extends QueryTest with SharedSparkSession { import testImplicits._ @@ -74,14 +73,14 @@ class JsonParsingOptionsSuite extends QueryTest with SharedSparkSession { } test("allowUnquotedControlChars off") { - val str = """{"name": "a\u0001b"}""" + val str = "{\"name\": \"a\u0001b\"}" val df = spark.read.json(Seq(str).toDS()) assert(df.schema.head.name == "_corrupt_record") } test("allowUnquotedControlChars on") { - val str = """{"name": "a\u0001b"}""" + val str = "{\"name\": \"a\u0001b\"}" val df = spark.read.option("allowUnquotedControlChars", "true").json(Seq(str).toDS()) assert(df.schema.head.name == "name") diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonSuite.scala index d9270024d5b28..76e05a2ed6ed7 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonSuite.scala @@ -35,7 +35,7 @@ import org.apache.spark.sql.{functions => F, _} import org.apache.spark.sql.catalyst.json._ import org.apache.spark.sql.catalyst.util.DateTimeUtils import org.apache.spark.sql.execution.ExternalRDD -import org.apache.spark.sql.execution.datasources.{DataSource, InMemoryFileIndex, NoopCache} +import org.apache.spark.sql.execution.datasources.{CommonFileDataSourceSuite, DataSource, InMemoryFileIndex, NoopCache} import org.apache.spark.sql.execution.datasources.v2.json.JsonScanBuilder import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.test.SharedSparkSession @@ -49,9 +49,16 @@ class TestFileFilter extends PathFilter { override def accept(path: Path): Boolean = path.getParent.getName != "p=2" } -abstract class JsonSuite extends QueryTest with SharedSparkSession with TestJsonData { +abstract class JsonSuite + extends QueryTest + with SharedSparkSession + with TestJsonData + with CommonFileDataSourceSuite { + import testImplicits._ + override protected def dataSourceFormat = "json" + test("Type promotion") { def checkTypePromotion(expected: Any, actual: Any): Unit = { assert(expected.getClass == actual.getClass, diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/orc/OrcColumnarBatchReaderSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/orc/OrcColumnarBatchReaderSuite.scala index 719bf91e1786b..bfcef46339908 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/orc/OrcColumnarBatchReaderSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/orc/OrcColumnarBatchReaderSuite.scala @@ -17,16 +17,29 @@ package org.apache.spark.sql.execution.datasources.orc +import java.io.File + +import org.apache.hadoop.fs.Path +import org.apache.hadoop.mapreduce.{JobID, TaskAttemptID, TaskID, TaskType} +import org.apache.hadoop.mapreduce.lib.input.FileSplit +import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl import org.apache.orc.TypeDescription import org.apache.spark.sql.QueryTest import org.apache.spark.sql.catalyst.InternalRow +import org.apache.spark.sql.catalyst.expressions.GenericInternalRow +import org.apache.spark.sql.catalyst.util.DateTimeUtils +import org.apache.spark.sql.execution.datasources.parquet.SpecificParquetRecordReaderBase import org.apache.spark.sql.execution.vectorized.{OnHeapColumnVector, WritableColumnVector} import org.apache.spark.sql.test.SharedSparkSession -import org.apache.spark.sql.types.{StructField, StructType} +import org.apache.spark.sql.types._ +import org.apache.spark.unsafe.types.UTF8String import org.apache.spark.unsafe.types.UTF8String.fromString class OrcColumnarBatchReaderSuite extends QueryTest with SharedSparkSession { + + import testImplicits._ + private val dataSchema = StructType.fromDDL("col1 int, col2 int") private val partitionSchema = StructType.fromDDL("p1 string, p2 string") private val partitionValues = InternalRow(fromString("partValue1"), fromString("partValue2")) @@ -77,4 +90,66 @@ class OrcColumnarBatchReaderSuite extends QueryTest with SharedSparkSession { assert(p1.getUTF8String(0) === partitionValues.getUTF8String(0)) } } + + test("SPARK-33593: partition column types") { + withTempPath { dir => + Seq(1).toDF().repartition(1).write.orc(dir.getCanonicalPath) + + val dataTypes = + Seq(StringType, BooleanType, ByteType, BinaryType, ShortType, IntegerType, LongType, + FloatType, DoubleType, DecimalType(25, 5), DateType, TimestampType) + + val constantValues = + Seq( + UTF8String.fromString("a string"), + true, + 1.toByte, + "Spark SQL".getBytes, + 2.toShort, + 3, + Long.MaxValue, + 0.25.toFloat, + 0.75D, + Decimal("1234.23456"), + DateTimeUtils.fromJavaDate(java.sql.Date.valueOf("2015-01-01")), + DateTimeUtils.fromJavaTimestamp(java.sql.Timestamp.valueOf("2015-01-01 23:50:59.123"))) + + dataTypes.zip(constantValues).foreach { case (dt, v) => + val schema = StructType(StructField("col1", IntegerType) :: StructField("pcol", dt) :: Nil) + val partitionValues = new GenericInternalRow(Array(v)) + val file = new File(SpecificParquetRecordReaderBase.listDirectory(dir).get(0)) + val fileSplit = new FileSplit(new Path(file.getCanonicalPath), 0L, file.length, Array.empty) + val taskConf = sqlContext.sessionState.newHadoopConf() + val orcFileSchema = TypeDescription.fromString(schema.simpleString) + val vectorizedReader = new OrcColumnarBatchReader(4096) + val attemptId = new TaskAttemptID(new TaskID(new JobID(), TaskType.MAP, 0), 0) + val taskAttemptContext = new TaskAttemptContextImpl(taskConf, attemptId) + + try { + vectorizedReader.initialize(fileSplit, taskAttemptContext) + vectorizedReader.initBatch( + orcFileSchema, + schema.toArray, + Array(0, -1), + Array(-1, 0), + partitionValues) + vectorizedReader.nextKeyValue() + val row = vectorizedReader.getCurrentValue.getRow(0) + + // Use `GenericMutableRow` by explicitly copying rather than `ColumnarBatch` + // in order to use get(...) method which is not implemented in `ColumnarBatch`. + val actual = row.copy().get(1, dt) + val expected = v + if (dt.isInstanceOf[BinaryType]) { + assert(actual.asInstanceOf[Array[Byte]] + sameElements expected.asInstanceOf[Array[Byte]]) + } else { + assert(actual == expected) + } + } finally { + vectorizedReader.close() + } + } + } + } } diff --git a/sql/core/v2.3/src/test/scala/org/apache/spark/sql/execution/datasources/orc/OrcFilterSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/orc/OrcFilterSuite.scala similarity index 98% rename from sql/core/v2.3/src/test/scala/org/apache/spark/sql/execution/datasources/orc/OrcFilterSuite.scala rename to sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/orc/OrcFilterSuite.scala index afc83d7c395f0..681ed91afaa12 100644 --- a/sql/core/v2.3/src/test/scala/org/apache/spark/sql/execution/datasources/orc/OrcFilterSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/orc/OrcFilterSuite.scala @@ -39,10 +39,6 @@ import org.apache.spark.sql.types._ /** * A test suite that tests Apache ORC filter API based filter pushdown optimization. - * OrcFilterSuite and HiveOrcFilterSuite is logically duplicated to provide the same test coverage. - * The difference are the packages containing 'Predicate' and 'SearchArgument' classes. - * - OrcFilterSuite uses 'org.apache.orc.storage.ql.io.sarg' package. - * - HiveOrcFilterSuite uses 'org.apache.hadoop.hive.ql.io.sarg' package. */ class OrcFilterSuite extends OrcTest with SharedSparkSession { diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/orc/OrcQuerySuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/orc/OrcQuerySuite.scala index d2970ef1bb63d..ead2c2cf1b70f 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/orc/OrcQuerySuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/orc/OrcQuerySuite.scala @@ -217,7 +217,6 @@ abstract class OrcQueryTest extends OrcTest { } } - // Hive supports zlib, snappy and none for Hive 1.2.1. test("Compression options for writing to an ORC file (SNAPPY, ZLIB and NONE)") { withTempPath { file => spark.range(0, 10).write diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/orc/OrcSourceSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/orc/OrcSourceSuite.scala index b70fd7476ed98..c763f4c9428c8 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/orc/OrcSourceSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/orc/OrcSourceSuite.scala @@ -33,7 +33,7 @@ import org.scalatest.BeforeAndAfterAll import org.apache.spark.{SPARK_VERSION_SHORT, SparkException} import org.apache.spark.sql.{Row, SPARK_VERSION_METADATA_KEY} -import org.apache.spark.sql.execution.datasources.SchemaMergeUtils +import org.apache.spark.sql.execution.datasources.{CommonFileDataSourceSuite, SchemaMergeUtils} import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.test.SharedSparkSession import org.apache.spark.sql.types.{LongType, StructField, StructType} @@ -41,9 +41,11 @@ import org.apache.spark.util.Utils case class OrcData(intField: Int, stringField: String) -abstract class OrcSuite extends OrcTest with BeforeAndAfterAll { +abstract class OrcSuite extends OrcTest with BeforeAndAfterAll with CommonFileDataSourceSuite { import testImplicits._ + override protected def dataSourceFormat = "orc" + var orcTableDir: File = null var orcTableAsDir: File = null @@ -120,8 +122,7 @@ abstract class OrcSuite extends OrcTest with BeforeAndAfterAll { } } - protected def testSelectiveDictionaryEncoding(isSelective: Boolean, - isHive23: Boolean = false): Unit = { + protected def testSelectiveDictionaryEncoding(isSelective: Boolean, isHiveOrc: Boolean): Unit = { val tableName = "orcTable" withTempDir { dir => @@ -174,7 +175,7 @@ abstract class OrcSuite extends OrcTest with BeforeAndAfterAll { // Hive 0.11 and RLE v2 is introduced in Hive 0.12 ORC with more improvements. // For more details, see https://orc.apache.org/specification/ assert(stripe.getColumns(1).getKind === DICTIONARY_V2) - if (isSelective || isHive23) { + if (isSelective || isHiveOrc) { assert(stripe.getColumns(2).getKind === DIRECT_V2) } else { assert(stripe.getColumns(2).getKind === DICTIONARY_V2) @@ -336,7 +337,7 @@ abstract class OrcSuite extends OrcTest with BeforeAndAfterAll { } // Test all the valid options of spark.sql.orc.compression.codec - Seq("NONE", "UNCOMPRESSED", "SNAPPY", "ZLIB", "LZO").foreach { c => + Seq("NONE", "UNCOMPRESSED", "SNAPPY", "ZLIB", "LZO", "ZSTD").foreach { c => withSQLConf(SQLConf.ORC_COMPRESSION.key -> c) { val expected = if (c == "UNCOMPRESSED") "NONE" else c assert(new OrcOptions(Map.empty[String, String], conf).compressionCodec == expected) @@ -581,7 +582,7 @@ class OrcSourceSuite extends OrcSuite with SharedSparkSession { } test("Enforce direct encoding column-wise selectively") { - testSelectiveDictionaryEncoding(isSelective = true) + testSelectiveDictionaryEncoding(isSelective = true, isHiveOrc = false) } test("SPARK-11412 read and merge orc schemas in parallel") { @@ -593,4 +594,12 @@ class OrcSourceSuite extends OrcSuite with SharedSparkSession { val df = readResourceOrcFile("test-data/TestStringDictionary.testRowIndex.orc") assert(df.where("str < 'row 001000'").count() === 1000) } + + test("SPARK-33978: Write and read a file with ZSTD compression") { + withTempPath { dir => + val path = dir.getAbsolutePath + spark.range(3).write.option("compression", "zstd").orc(path) + checkAnswer(spark.read.orc(path), Seq(Row(0), Row(1), Row(2))) + } + } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/orc/OrcTest.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/orc/OrcTest.scala index aec61acda5444..4243318ac1dd8 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/orc/OrcTest.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/orc/OrcTest.scala @@ -46,7 +46,6 @@ import org.apache.spark.sql.internal.SQLConf.ORC_IMPLEMENTATION * -> OrcPartitionDiscoverySuite * -> HiveOrcPartitionDiscoverySuite * -> OrcFilterSuite - * -> HiveOrcFilterSuite */ abstract class OrcTest extends QueryTest with FileBasedDataSourceTest with BeforeAndAfterAll { diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/orc/OrcV2SchemaPruningSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/orc/OrcV2SchemaPruningSuite.scala index 6c9bd32913178..378b52f9c6c8c 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/orc/OrcV2SchemaPruningSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/orc/OrcV2SchemaPruningSuite.scala @@ -17,7 +17,7 @@ package org.apache.spark.sql.execution.datasources.orc import org.apache.spark.SparkConf -import org.apache.spark.sql.{DataFrame, Row} +import org.apache.spark.sql.DataFrame import org.apache.spark.sql.catalyst.parser.CatalystSqlParser import org.apache.spark.sql.execution.adaptive.AdaptiveSparkPlanHelper import org.apache.spark.sql.execution.datasources.SchemaPruningSuite diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetCommitterSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetCommitterSuite.scala index 4b2437803d645..7f408dbba5099 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetCommitterSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetCommitterSuite.scala @@ -17,8 +17,6 @@ package org.apache.spark.sql.execution.datasources.parquet -import java.io.FileNotFoundException - import org.apache.hadoop.conf.Configuration import org.apache.hadoop.fs.{FileStatus, Path} import org.apache.hadoop.mapreduce.{JobContext, TaskAttemptContext} @@ -149,7 +147,7 @@ private object MarkingFileOutput { * @param outputPath destination directory * @param conf configuration to create the FS with * @return the status of the marker - * @throws FileNotFoundException if the marker is absent + * @throws java.io.FileNotFoundException if the marker is absent */ def checkMarker(outputPath: Path, conf: Configuration): FileStatus = { outputPath.getFileSystem(conf).getFileStatus(new Path(outputPath, "marker")) diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetFileFormatSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetFileFormatSuite.scala index e65f4d12bf7f2..c52b57eb31e4d 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetFileFormatSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetFileFormatSuite.scala @@ -19,12 +19,19 @@ package org.apache.spark.sql.execution.datasources.parquet import org.apache.hadoop.fs.{FileSystem, Path} -import org.apache.spark.SparkException +import org.apache.spark.{SparkConf, SparkException} import org.apache.spark.sql.QueryTest +import org.apache.spark.sql.execution.datasources.CommonFileDataSourceSuite import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.test.SharedSparkSession -class ParquetFileFormatSuite extends QueryTest with ParquetTest with SharedSparkSession { +abstract class ParquetFileFormatSuite + extends QueryTest + with ParquetTest + with SharedSparkSession + with CommonFileDataSourceSuite { + + override protected def dataSourceFormat = "parquet" test("read parquet footers in parallel") { def testReadFooters(ignoreCorruptFiles: Boolean): Unit = { @@ -57,3 +64,17 @@ class ParquetFileFormatSuite extends QueryTest with ParquetTest with SharedSpark assert(exception.getMessage().contains("Could not read footer for file")) } } + +class ParquetFileFormatV1Suite extends ParquetFileFormatSuite { + override protected def sparkConf: SparkConf = + super + .sparkConf + .set(SQLConf.USE_V1_SOURCE_LIST, "parquet") +} + +class ParquetFileFormatV2Suite extends ParquetFileFormatSuite { + override protected def sparkConf: SparkConf = + super + .sparkConf + .set(SQLConf.USE_V1_SOURCE_LIST, "") +} diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetFilterSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetFilterSuite.scala index 5689b9d05d7bb..24a1ba124e56b 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetFilterSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetFilterSuite.scala @@ -45,6 +45,7 @@ import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.internal.SQLConf.ParquetOutputTimestampType import org.apache.spark.sql.test.SharedSparkSession import org.apache.spark.sql.types._ +import org.apache.spark.tags.ExtendedSQLTest import org.apache.spark.util.{AccumulatorContext, AccumulatorV2} /** @@ -585,7 +586,8 @@ abstract class ParquetFilterSuite extends QueryTest with ParquetTest with Shared Seq(true, false).foreach { java8Api => withSQLConf( SQLConf.DATETIME_JAVA8API_ENABLED.key -> java8Api.toString, - SQLConf.LEGACY_PARQUET_REBASE_MODE_IN_WRITE.key -> "CORRECTED") { + SQLConf.LEGACY_PARQUET_REBASE_MODE_IN_WRITE.key -> "CORRECTED", + SQLConf.LEGACY_PARQUET_INT96_REBASE_MODE_IN_WRITE.key -> "CORRECTED") { // spark.sql.parquet.outputTimestampType = TIMESTAMP_MILLIS val millisData = Seq( "1000-06-14 08:28:53.123", @@ -1571,6 +1573,7 @@ abstract class ParquetFilterSuite extends QueryTest with ParquetTest with Shared } } +@ExtendedSQLTest class ParquetV1FilterSuite extends ParquetFilterSuite { override protected def sparkConf: SparkConf = super @@ -1650,6 +1653,7 @@ class ParquetV1FilterSuite extends ParquetFilterSuite { } } +@ExtendedSQLTest class ParquetV2FilterSuite extends ParquetFilterSuite { // TODO: enable Parquet V2 write path after file source V2 writers are workable. override protected def sparkConf: SparkConf = diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetIOSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetIOSuite.scala index 2dc8a062bb73d..c69f2e6911ba3 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetIOSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetIOSuite.scala @@ -19,7 +19,6 @@ package org.apache.spark.sql.execution.datasources.parquet import java.nio.file.{Files, Paths, StandardCopyOption} import java.sql.{Date, Timestamp} -import java.time._ import java.util.Locale import scala.collection.JavaConverters._ @@ -791,7 +790,7 @@ class ParquetIOSuite extends QueryTest with ParquetTest with SharedSparkSession Seq(1).toDF().repartition(1).write.parquet(dir.getCanonicalPath) val dataTypes = - Seq(StringType, BooleanType, ByteType, ShortType, IntegerType, LongType, + Seq(StringType, BooleanType, ByteType, BinaryType, ShortType, IntegerType, LongType, FloatType, DoubleType, DecimalType(25, 5), DateType, TimestampType) val constantValues = @@ -799,6 +798,7 @@ class ParquetIOSuite extends QueryTest with ParquetTest with SharedSparkSession UTF8String.fromString("a string"), true, 1.toByte, + "Spark SQL".getBytes, 2.toShort, 3, Long.MaxValue, @@ -826,7 +826,11 @@ class ParquetIOSuite extends QueryTest with ParquetTest with SharedSparkSession // in order to use get(...) method which is not implemented in `ColumnarBatch`. val actual = row.copy().get(1, dt) val expected = v - assert(actual == expected) + if (dt.isInstanceOf[BinaryType]) { + assert(actual.asInstanceOf[Array[Byte]] sameElements expected.asInstanceOf[Array[Byte]]) + } else { + assert(actual == expected) + } } finally { vectorizedReader.close() } @@ -859,20 +863,24 @@ class ParquetIOSuite extends QueryTest with ParquetTest with SharedSparkSession } } - test("Write Spark version into Parquet metadata") { - withTempPath { dir => - val path = dir.getAbsolutePath - spark.range(1).repartition(1).write.parquet(path) - val file = SpecificParquetRecordReaderBase.listDirectory(dir).get(0) - - val conf = new Configuration() - val hadoopInputFile = HadoopInputFile.fromPath(new Path(file), conf) - val parquetReadOptions = HadoopReadOptions.builder(conf).build() - val m = ParquetFileReader.open(hadoopInputFile, parquetReadOptions) - val metaData = m.getFileMetaData.getKeyValueMetaData + private def getMetaData(dir: java.io.File): Map[String, String] = { + val file = SpecificParquetRecordReaderBase.listDirectory(dir).get(0) + val conf = new Configuration() + val hadoopInputFile = HadoopInputFile.fromPath(new Path(file), conf) + val parquetReadOptions = HadoopReadOptions.builder(conf).build() + val m = ParquetFileReader.open(hadoopInputFile, parquetReadOptions) + val metadata = try { + m.getFileMetaData.getKeyValueMetaData + } finally { m.close() + } + metadata.asScala.toMap + } - assert(metaData.get(SPARK_VERSION_METADATA_KEY) === SPARK_VERSION_SHORT) + test("Write Spark version into Parquet metadata") { + withTempPath { dir => + spark.range(1).repartition(1).write.parquet(dir.getAbsolutePath) + assert(getMetaData(dir)(SPARK_VERSION_METADATA_KEY) === SPARK_VERSION_SHORT) } } @@ -947,7 +955,9 @@ class ParquetIOSuite extends QueryTest with ParquetTest with SharedSparkSession rowFunc: Int => (String, String), toJavaType: String => T, checkDefaultLegacyRead: String => Unit, - tsOutputType: String = "TIMESTAMP_MICROS"): Unit = { + tsOutputType: String = "TIMESTAMP_MICROS", + inWriteConf: String = SQLConf.LEGACY_PARQUET_REBASE_MODE_IN_WRITE.key, + inReadConf: String = SQLConf.LEGACY_PARQUET_REBASE_MODE_IN_READ.key): Unit = { withTempPaths(2) { paths => paths.foreach(_.delete()) val path2_4 = getResourceParquetFilePath("test-data/" + fileName) @@ -958,18 +968,20 @@ class ParquetIOSuite extends QueryTest with ParquetTest with SharedSparkSession withSQLConf(SQLConf.PARQUET_OUTPUT_TIMESTAMP_TYPE.key -> tsOutputType) { checkDefaultLegacyRead(path2_4) // By default we should fail to write ancient datetime values. - val e = intercept[SparkException](df.write.parquet(path3_0)) - assert(e.getCause.getCause.getCause.isInstanceOf[SparkUpgradeException]) - withSQLConf(SQLConf.LEGACY_PARQUET_REBASE_MODE_IN_WRITE.key -> CORRECTED.toString) { + if (tsOutputType != "INT96") { + val e = intercept[SparkException](df.write.parquet(path3_0)) + assert(e.getCause.getCause.getCause.isInstanceOf[SparkUpgradeException]) + } + withSQLConf(inWriteConf -> CORRECTED.toString) { df.write.mode("overwrite").parquet(path3_0) } - withSQLConf(SQLConf.LEGACY_PARQUET_REBASE_MODE_IN_WRITE.key -> LEGACY.toString) { + withSQLConf(inWriteConf -> LEGACY.toString) { df.write.parquet(path3_0_rebase) } } // For Parquet files written by Spark 3.0, we know the writer info and don't need the // config to guide the rebase behavior. - withSQLConf(SQLConf.LEGACY_PARQUET_REBASE_MODE_IN_READ.key -> LEGACY.toString) { + withSQLConf(inReadConf -> LEGACY.toString) { checkAnswer( spark.read.format("parquet").load(path2_4, path3_0, path3_0_rebase), (0 until N).flatMap { i => @@ -1011,15 +1023,22 @@ class ParquetIOSuite extends QueryTest with ParquetTest with SharedSparkSession java.sql.Timestamp.valueOf, checkDefaultRead, tsOutputType = "TIMESTAMP_MILLIS") - // INT96 is a legacy timestamp format and we always rebase the seconds for it. + } + } + Seq( + "2_4_5" -> failInRead _, + "2_4_6" -> successInRead _).foreach { case (version, checkDefaultRead) => + withAllParquetReaders { Seq("plain", "dict").foreach { enc => - checkAnswer(readResourceParquetFile( - s"test-data/before_1582_timestamp_int96_${enc}_v$version.snappy.parquet"), - Seq.tabulate(N) { i => - Row( - java.sql.Timestamp.valueOf("1001-01-01 01:02:03.123456"), - java.sql.Timestamp.valueOf(s"1001-01-0${i + 1} 01:02:03.123456")) - }) + checkReadMixedFiles( + s"before_1582_timestamp_int96_${enc}_v$version.snappy.parquet", + "timestamp", + (i: Int) => ("1001-01-01 01:02:03.123456", s"1001-01-0${i + 1} 01:02:03.123456"), + java.sql.Timestamp.valueOf, + checkDefaultRead, + tsOutputType = "INT96", + inWriteConf = SQLConf.LEGACY_PARQUET_INT96_REBASE_MODE_IN_WRITE.key, + inReadConf = SQLConf.LEGACY_PARQUET_INT96_REBASE_MODE_IN_READ.key) } } } @@ -1029,15 +1048,31 @@ class ParquetIOSuite extends QueryTest with ParquetTest with SharedSparkSession val N = 8 Seq(false, true).foreach { dictionaryEncoding => Seq( - ("TIMESTAMP_MILLIS", "1001-01-01 01:02:03.123", "1001-01-07 01:09:05.123"), - ("TIMESTAMP_MICROS", "1001-01-01 01:02:03.123456", "1001-01-07 01:09:05.123456"), - ("INT96", "1001-01-01 01:02:03.123456", "1001-01-01 01:02:03.123456") - ).foreach { case (outType, tsStr, nonRebased) => + ( + "TIMESTAMP_MILLIS", + "1001-01-01 01:02:03.123", + "1001-01-07 01:09:05.123", + SQLConf.LEGACY_PARQUET_REBASE_MODE_IN_WRITE.key, + SQLConf.LEGACY_PARQUET_REBASE_MODE_IN_READ.key), + ( + "TIMESTAMP_MICROS", + "1001-01-01 01:02:03.123456", + "1001-01-07 01:09:05.123456", + SQLConf.LEGACY_PARQUET_REBASE_MODE_IN_WRITE.key, + SQLConf.LEGACY_PARQUET_REBASE_MODE_IN_READ.key), + ( + "INT96", + "1001-01-01 01:02:03.123456", + "1001-01-07 01:09:05.123456", + SQLConf.LEGACY_PARQUET_INT96_REBASE_MODE_IN_WRITE.key, + SQLConf.LEGACY_PARQUET_INT96_REBASE_MODE_IN_READ.key + ) + ).foreach { case (outType, tsStr, nonRebased, inWriteConf, inReadConf) => withClue(s"output type $outType") { withSQLConf(SQLConf.PARQUET_OUTPUT_TIMESTAMP_TYPE.key -> outType) { withTempPath { dir => val path = dir.getAbsolutePath - withSQLConf(SQLConf.LEGACY_PARQUET_REBASE_MODE_IN_WRITE.key -> LEGACY.toString) { + withSQLConf(inWriteConf -> LEGACY.toString) { Seq.tabulate(N)(_ => tsStr).toDF("tsS") .select($"tsS".cast("timestamp").as("ts")) .repartition(1) @@ -1050,8 +1085,7 @@ class ParquetIOSuite extends QueryTest with ParquetTest with SharedSparkSession // The file metadata indicates if it needs rebase or not, so we can always get the // correct result regardless of the "rebase mode" config. Seq(LEGACY, CORRECTED, EXCEPTION).foreach { mode => - withSQLConf( - SQLConf.LEGACY_PARQUET_REBASE_MODE_IN_READ.key -> mode.toString) { + withSQLConf(inReadConf -> mode.toString) { checkAnswer( spark.read.parquet(path), Seq.tabulate(N)(_ => Row(Timestamp.valueOf(tsStr)))) @@ -1109,6 +1143,59 @@ class ParquetIOSuite extends QueryTest with ParquetTest with SharedSparkSession } } } + + test("SPARK-33163: write the metadata key 'org.apache.spark.legacyDateTime'") { + def saveTs(dir: java.io.File): Unit = { + Seq(Timestamp.valueOf("2020-10-15 01:02:03")).toDF() + .repartition(1) + .write + .parquet(dir.getAbsolutePath) + } + withSQLConf(SQLConf.LEGACY_PARQUET_REBASE_MODE_IN_WRITE.key -> LEGACY.toString) { + withTempPath { dir => + saveTs(dir) + assert(getMetaData(dir)(SPARK_LEGACY_DATETIME) === "") + } + } + Seq(CORRECTED, EXCEPTION).foreach { mode => + withSQLConf(SQLConf.LEGACY_PARQUET_REBASE_MODE_IN_WRITE.key -> mode.toString) { + withTempPath { dir => + saveTs(dir) + assert(getMetaData(dir).get(SPARK_LEGACY_DATETIME).isEmpty) + } + } + } + } + + test("SPARK-33160: write the metadata key 'org.apache.spark.legacyINT96'") { + def saveTs(dir: java.io.File, ts: String = "1000-01-01 01:02:03"): Unit = { + Seq(Timestamp.valueOf(ts)).toDF() + .repartition(1) + .write + .parquet(dir.getAbsolutePath) + } + withSQLConf(SQLConf.LEGACY_PARQUET_INT96_REBASE_MODE_IN_WRITE.key -> LEGACY.toString) { + withTempPath { dir => + saveTs(dir) + assert(getMetaData(dir)(SPARK_LEGACY_INT96) === "") + } + } + withSQLConf(SQLConf.LEGACY_PARQUET_INT96_REBASE_MODE_IN_WRITE.key -> CORRECTED.toString) { + withTempPath { dir => + saveTs(dir) + assert(getMetaData(dir).get(SPARK_LEGACY_INT96).isEmpty) + } + } + withSQLConf(SQLConf.LEGACY_PARQUET_INT96_REBASE_MODE_IN_WRITE.key -> EXCEPTION.toString) { + withTempPath { dir => intercept[SparkException] { saveTs(dir) } } + } + withSQLConf(SQLConf.LEGACY_PARQUET_INT96_REBASE_MODE_IN_WRITE.key -> EXCEPTION.toString) { + withTempPath { dir => + saveTs(dir, "2020-10-22 01:02:03") + assert(getMetaData(dir).get(SPARK_LEGACY_INT96).isEmpty) + } + } + } } class JobCommitFailureParquetOutputCommitter(outputPath: Path, context: TaskAttemptContext) diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetInteroperabilitySuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetInteroperabilitySuite.scala index 8c4eedfde76cd..2fe5953cbe12e 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetInteroperabilitySuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetInteroperabilitySuite.scala @@ -21,7 +21,7 @@ import java.io.File import java.time.ZoneOffset import org.apache.commons.io.FileUtils -import org.apache.hadoop.fs.{FileSystem, Path, PathFilter} +import org.apache.hadoop.fs.{Path, PathFilter} import org.apache.parquet.format.converter.ParquetMetadataConverter.NO_FILTER import org.apache.parquet.hadoop.ParquetFileReader import org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName @@ -183,7 +183,7 @@ class ParquetInteroperabilitySuite extends ParquetCompatibilityTest with SharedS val oneBlockColumnMeta = oneBlockMeta.getColumns().get(0) // This is the important assert. Column stats are written, but they are ignored // when the data is read back as mentioned above, b/c int96 is unsigned. This - // assert makes sure this holds even if we change parquet versions (if eg. there + // assert makes sure this holds even if we change parquet versions (if e.g. there // were ever statistics even on unsigned columns). assert(!oneBlockColumnMeta.getStatistics.hasNonNullValue) } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetPartitionDiscoverySuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetPartitionDiscoverySuite.scala index accd04592bec5..400f4d8e1b156 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetPartitionDiscoverySuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetPartitionDiscoverySuite.scala @@ -23,8 +23,6 @@ import java.sql.{Date, Timestamp} import java.time.{ZoneId, ZoneOffset} import java.util.{Calendar, Locale} -import scala.collection.mutable.ArrayBuffer - import com.google.common.io.Files import org.apache.hadoop.fs.Path import org.apache.parquet.hadoop.ParquetOutputFormat @@ -1159,7 +1157,7 @@ class ParquetV1PartitionDiscoverySuite extends ParquetPartitionDiscoverySuite { test("SPARK-21463: MetadataLogFileIndex should respect userSpecifiedSchema for partition cols") { withTempDir { tempDir => val output = new File(tempDir, "output").toString - val checkpoint = new File(tempDir, "chkpoint").toString + val checkpoint = new File(tempDir, "checkpoint").toString try { val stream = MemoryStream[(String, Int)] val df = stream.toDS().toDF("time", "value") @@ -1305,7 +1303,7 @@ class ParquetV2PartitionDiscoverySuite extends ParquetPartitionDiscoverySuite { test("SPARK-21463: MetadataLogFileIndex should respect userSpecifiedSchema for partition cols") { withTempDir { tempDir => val output = new File(tempDir, "output").toString - val checkpoint = new File(tempDir, "chkpoint").toString + val checkpoint = new File(tempDir, "checkpoint").toString try { val stream = MemoryStream[(String, Int)] val df = stream.toDS().toDF("time", "value") diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetQuerySuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetQuerySuite.scala index 05d305a9b52ba..8f85fe3c52583 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetQuerySuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetQuerySuite.scala @@ -857,7 +857,7 @@ class ParquetV1QuerySuite extends ParquetQuerySuite { val df = spark.range(10).select(Seq.tabulate(11) {i => ('id + i).as(s"c$i")} : _*) df.write.mode(SaveMode.Overwrite).parquet(path) - // donot return batch, because whole stage codegen is disabled for wide table (>200 columns) + // do not return batch - whole stage codegen is disabled for wide table (>200 columns) val df2 = spark.read.parquet(path) val fileScan2 = df2.queryExecution.sparkPlan.find(_.isInstanceOf[FileSourceScanExec]).get assert(!fileScan2.asInstanceOf[FileSourceScanExec].supportsColumnar) @@ -890,7 +890,7 @@ class ParquetV2QuerySuite extends ParquetQuerySuite { val df = spark.range(10).select(Seq.tabulate(11) {i => ('id + i).as(s"c$i")} : _*) df.write.mode(SaveMode.Overwrite).parquet(path) - // donot return batch, because whole stage codegen is disabled for wide table (>200 columns) + // do not return batch - whole stage codegen is disabled for wide table (>200 columns) val df2 = spark.read.parquet(path) val fileScan2 = df2.queryExecution.sparkPlan.find(_.isInstanceOf[BatchScanExec]).get val parquetScan2 = fileScan2.asInstanceOf[BatchScanExec].scan.asInstanceOf[ParquetScan] diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetSchemaPruningSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetSchemaPruningSuite.scala index c64e95078e916..cab93bd96fff4 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetSchemaPruningSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetSchemaPruningSuite.scala @@ -25,6 +25,7 @@ import org.apache.spark.sql.execution.datasources.SchemaPruningSuite import org.apache.spark.sql.execution.datasources.v2.BatchScanExec import org.apache.spark.sql.execution.datasources.v2.parquet.ParquetScan import org.apache.spark.sql.internal.SQLConf +import org.apache.spark.tags.ExtendedSQLTest abstract class ParquetSchemaPruningSuite extends SchemaPruningSuite with AdaptiveSparkPlanHelper { override protected val dataSourceName: String = "parquet" @@ -33,6 +34,7 @@ abstract class ParquetSchemaPruningSuite extends SchemaPruningSuite with Adaptiv } +@ExtendedSQLTest class ParquetV1SchemaPruningSuite extends ParquetSchemaPruningSuite { override protected def sparkConf: SparkConf = super @@ -40,6 +42,7 @@ class ParquetV1SchemaPruningSuite extends ParquetSchemaPruningSuite { .set(SQLConf.USE_V1_SOURCE_LIST, "parquet") } +@ExtendedSQLTest class ParquetV2SchemaPruningSuite extends ParquetSchemaPruningSuite { // TODO: enable Parquet V2 write path after file source V2 writers are workable. override protected def sparkConf: SparkConf = diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetSchemaSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetSchemaSuite.scala index 7990b1c27437a..e97c6cd29709c 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetSchemaSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetSchemaSuite.scala @@ -23,7 +23,7 @@ import scala.reflect.runtime.universe.TypeTag import org.apache.parquet.io.ParquetDecodingException import org.apache.parquet.schema.{MessageType, MessageTypeParser} -import org.apache.spark.{SparkConf, SparkException} +import org.apache.spark.SparkException import org.apache.spark.sql.catalyst.ScalaReflection import org.apache.spark.sql.execution.QueryExecutionException import org.apache.spark.sql.execution.datasources.SchemaColumnConvertNotSupportedException diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/text/TextSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/text/TextSuite.scala index 7e97994476694..1eb32ed285799 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/text/TextSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/text/TextSuite.scala @@ -26,14 +26,17 @@ import org.apache.hadoop.io.compress.GzipCodec import org.apache.spark.{SparkConf, TestUtils} import org.apache.spark.sql.{AnalysisException, DataFrame, QueryTest, Row, SaveMode} +import org.apache.spark.sql.execution.datasources.CommonFileDataSourceSuite import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.test.SharedSparkSession import org.apache.spark.sql.types.{StringType, StructType} import org.apache.spark.util.Utils -abstract class TextSuite extends QueryTest with SharedSparkSession { +abstract class TextSuite extends QueryTest with SharedSparkSession with CommonFileDataSourceSuite { import testImplicits._ + override protected def dataSourceFormat = "text" + test("reading text file") { verifyFrame(spark.read.format("text").load(testFile)) } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/v2/V2SessionCatalogSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/v2/V2SessionCatalogSuite.scala index c3bcf86c1ed27..1a4f08418f8d3 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/v2/V2SessionCatalogSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/v2/V2SessionCatalogSuite.scala @@ -29,7 +29,7 @@ import org.scalatest.BeforeAndAfter import org.apache.spark.sql.AnalysisException import org.apache.spark.sql.catalyst.analysis.{NamespaceAlreadyExistsException, NoSuchNamespaceException, NoSuchTableException, TableAlreadyExistsException} import org.apache.spark.sql.catalyst.parser.CatalystSqlParser -import org.apache.spark.sql.connector.catalog.{CatalogV2Util, Identifier, NamespaceChange, SupportsNamespaces, TableCatalog, TableChange, V1Table} +import org.apache.spark.sql.connector.catalog.{CatalogV2Util, Identifier, NamespaceChange, TableCatalog, TableChange, V1Table} import org.apache.spark.sql.test.SharedSparkSession import org.apache.spark.sql.types.{DoubleType, IntegerType, LongType, StringType, StructField, StructType, TimestampType} import org.apache.spark.sql.util.CaseInsensitiveStringMap @@ -46,7 +46,7 @@ abstract class V2SessionCatalogBaseSuite extends SharedSparkSession with BeforeA val testIdent: Identifier = Identifier.of(testNs, "test_table") def newCatalog(): V2SessionCatalog = { - val newCatalog = new V2SessionCatalog(spark.sessionState.catalog, spark.sessionState.conf) + val newCatalog = new V2SessionCatalog(spark.sessionState.catalog) newCatalog.initialize("test", CaseInsensitiveStringMap.empty()) newCatalog } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/v2/jdbc/JDBCTableCatalogSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/v2/jdbc/JDBCTableCatalogSuite.scala index b308934ba03c0..2fd976e0b9e17 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/v2/jdbc/JDBCTableCatalogSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/v2/jdbc/JDBCTableCatalogSuite.scala @@ -19,8 +19,13 @@ package org.apache.spark.sql.execution.datasources.v2.jdbc import java.sql.{Connection, DriverManager} import java.util.Properties +import org.apache.log4j.Level + import org.apache.spark.SparkConf -import org.apache.spark.sql.{QueryTest, Row} +import org.apache.spark.sql.{AnalysisException, QueryTest, Row} +import org.apache.spark.sql.catalyst.analysis.{NoSuchNamespaceException, TableAlreadyExistsException} +import org.apache.spark.sql.catalyst.parser.ParseException +import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.test.SharedSparkSession import org.apache.spark.sql.types._ import org.apache.spark.util.Utils @@ -63,6 +68,8 @@ class JDBCTableCatalogSuite extends QueryTest with SharedSparkSession { test("show tables") { checkAnswer(sql("SHOW TABLES IN h2.test"), Seq(Row("test", "people"))) + // Check not existing namespace + checkAnswer(sql("SHOW TABLES IN h2.bad_test"), Seq()) } test("drop a table and test whether the table exists") { @@ -72,6 +79,17 @@ class JDBCTableCatalogSuite extends QueryTest with SharedSparkSession { checkAnswer(sql("SHOW TABLES IN h2.test"), Seq(Row("test", "to_drop"), Row("test", "people"))) sql("DROP TABLE h2.test.to_drop") checkAnswer(sql("SHOW TABLES IN h2.test"), Seq(Row("test", "people"))) + Seq( + "h2.test.not_existing_table" -> + "Table or view not found: h2.test.not_existing_table", + "h2.bad_test.not_existing_table" -> + "Table or view not found: h2.bad_test.not_existing_table" + ).foreach { case (table, expectedMsg) => + val msg = intercept[AnalysisException] { + sql(s"DROP TABLE $table") + }.getMessage + assert(msg.contains(expectedMsg)) + } } test("rename a table") { @@ -87,6 +105,34 @@ class JDBCTableCatalogSuite extends QueryTest with SharedSparkSession { sql("SHOW TABLES IN h2.test"), Seq(Row("test", "dst_table"), Row("test", "people"))) } + // Rename not existing table or namespace + val exp1 = intercept[AnalysisException] { + sql("ALTER TABLE h2.test.not_existing_table RENAME TO test.dst_table") + } + assert(exp1.getMessage.contains( + "Table or view not found: h2.test.not_existing_table")) + val exp2 = intercept[AnalysisException] { + sql("ALTER TABLE h2.bad_test.not_existing_table RENAME TO test.dst_table") + } + assert(exp2.getMessage.contains( + "Table or view not found: h2.bad_test.not_existing_table")) + // Rename to an existing table + withTable("h2.test.dst_table") { + withConnection { conn => + conn.prepareStatement("""CREATE TABLE "test"."dst_table" (id INTEGER)""").executeUpdate() + } + withTable("h2.test.src_table") { + withConnection { conn => + conn.prepareStatement("""CREATE TABLE "test"."src_table" (id INTEGER)""").executeUpdate() + } + val exp = intercept[TableAlreadyExistsException] { + sql("ALTER TABLE h2.test.src_table RENAME TO test.dst_table") + } + assert(exp.getMessage.contains( + "Failed table renaming from test.src_table to test.dst_table")) + assert(exp.cause.get.getMessage.contains("Table \"dst_table\" already exists")) + } + } } test("load a table") { @@ -95,82 +141,279 @@ class JDBCTableCatalogSuite extends QueryTest with SharedSparkSession { .add("NAME", StringType) .add("ID", IntegerType) assert(t.schema === expectedSchema) + Seq("h2.test.not_existing_table", "h2.bad_test.not_existing_table").foreach { table => + val msg = intercept[AnalysisException] { + spark.table(table).schema + }.getMessage + assert(msg.contains("Table or view not found")) + } } test("create a table") { withTable("h2.test.new_table") { - // TODO (SPARK-32427): Omit USING in CREATE TABLE - sql("CREATE TABLE h2.test.new_table(i INT, j STRING) USING _") + sql("CREATE TABLE h2.test.new_table(i INT, j STRING)") checkAnswer( sql("SHOW TABLES IN h2.test"), Seq(Row("test", "people"), Row("test", "new_table"))) } + withTable("h2.test.new_table") { + sql("CREATE TABLE h2.test.new_table(i INT, j STRING)") + val msg = intercept[AnalysisException] { + sql("CREATE TABLE h2.test.new_table(i INT, j STRING)") + }.getMessage + assert(msg.contains("Table test.new_table already exists")) + } + val exp = intercept[NoSuchNamespaceException] { + sql("CREATE TABLE h2.bad_test.new_table(i INT, j STRING)") + } + assert(exp.getMessage.contains("Failed table creation: bad_test.new_table")) + assert(exp.cause.get.getMessage.contains("Schema \"bad_test\" not found")) } - test("alter table ... add column") { - withTable("h2.test.alt_table") { - sql("CREATE TABLE h2.test.alt_table (ID INTEGER) USING _") - sql("ALTER TABLE h2.test.alt_table ADD COLUMNS (C1 INTEGER, C2 STRING)") - var t = spark.table("h2.test.alt_table") + test("ALTER TABLE ... add column") { + val tableName = "h2.test.alt_table" + withTable(tableName) { + sql(s"CREATE TABLE $tableName (ID INTEGER)") + sql(s"ALTER TABLE $tableName ADD COLUMNS (C1 INTEGER, C2 STRING)") + var t = spark.table(tableName) var expectedSchema = new StructType() .add("ID", IntegerType) .add("C1", IntegerType) .add("C2", StringType) assert(t.schema === expectedSchema) - sql("ALTER TABLE h2.test.alt_table ADD COLUMNS (C3 DOUBLE)") - t = spark.table("h2.test.alt_table") - expectedSchema = expectedSchema.add("C3", DoubleType) + sql(s"ALTER TABLE $tableName ADD COLUMNS (c3 DOUBLE)") + t = spark.table(tableName) + expectedSchema = expectedSchema.add("c3", DoubleType) assert(t.schema === expectedSchema) + // Add already existing column + val msg = intercept[AnalysisException] { + sql(s"ALTER TABLE $tableName ADD COLUMNS (c3 DOUBLE)") + }.getMessage + assert(msg.contains("Cannot add column, because c3 already exists")) + } + // Add a column to not existing table and namespace + Seq("h2.test.not_existing_table", "h2.bad_test.not_existing_table").foreach { table => + val msg = intercept[AnalysisException] { + sql(s"ALTER TABLE $table ADD COLUMNS (C4 STRING)") + }.getMessage + assert(msg.contains("Table not found")) } } - test("alter table ... rename column") { - withTable("h2.test.alt_table") { - sql("CREATE TABLE h2.test.alt_table (ID INTEGER) USING _") - sql("ALTER TABLE h2.test.alt_table RENAME COLUMN ID TO C") - val t = spark.table("h2.test.alt_table") - val expectedSchema = new StructType().add("C", IntegerType) + test("ALTER TABLE ... rename column") { + val tableName = "h2.test.alt_table" + withTable(tableName) { + sql(s"CREATE TABLE $tableName (id INTEGER, C0 INTEGER)") + sql(s"ALTER TABLE $tableName RENAME COLUMN id TO C") + val t = spark.table(tableName) + val expectedSchema = new StructType() + .add("C", IntegerType) + .add("C0", IntegerType) assert(t.schema === expectedSchema) + // Rename to already existing column + val msg = intercept[AnalysisException] { + sql(s"ALTER TABLE $tableName RENAME COLUMN C TO C0") + }.getMessage + assert(msg.contains("Cannot rename column, because C0 already exists")) + } + // Rename a column in not existing table and namespace + Seq("h2.test.not_existing_table", "h2.bad_test.not_existing_table").foreach { table => + val msg = intercept[AnalysisException] { + sql(s"ALTER TABLE $table RENAME COLUMN ID TO C") + }.getMessage + assert(msg.contains("Table not found")) } } - test("alter table ... drop column") { - withTable("h2.test.alt_table") { - sql("CREATE TABLE h2.test.alt_table (C1 INTEGER, C2 INTEGER) USING _") - sql("ALTER TABLE h2.test.alt_table DROP COLUMN C1") - val t = spark.table("h2.test.alt_table") + test("ALTER TABLE ... drop column") { + val tableName = "h2.test.alt_table" + withTable(tableName) { + sql(s"CREATE TABLE $tableName (C1 INTEGER, C2 INTEGER, c3 INTEGER)") + sql(s"ALTER TABLE $tableName DROP COLUMN C1") + sql(s"ALTER TABLE $tableName DROP COLUMN c3") + val t = spark.table(tableName) val expectedSchema = new StructType().add("C2", IntegerType) assert(t.schema === expectedSchema) + // Drop not existing column + val msg = intercept[AnalysisException] { + sql(s"ALTER TABLE $tableName DROP COLUMN bad_column") + }.getMessage + assert(msg.contains("Cannot delete missing field bad_column in test.alt_table schema")) + } + // Drop a column to not existing table and namespace + Seq("h2.test.not_existing_table", "h2.bad_test.not_existing_table").foreach { table => + val msg = intercept[AnalysisException] { + sql(s"ALTER TABLE $table DROP COLUMN C1") + }.getMessage + assert(msg.contains("Table not found")) + } + } + + test("ALTER TABLE ... update column type") { + val tableName = "h2.test.alt_table" + withTable(tableName) { + sql(s"CREATE TABLE $tableName (ID INTEGER, deptno INTEGER)") + sql(s"ALTER TABLE $tableName ALTER COLUMN id TYPE DOUBLE") + sql(s"ALTER TABLE $tableName ALTER COLUMN deptno TYPE DOUBLE") + val t = spark.table(tableName) + val expectedSchema = new StructType().add("ID", DoubleType).add("deptno", DoubleType) + assert(t.schema === expectedSchema) + // Update not existing column + val msg1 = intercept[AnalysisException] { + sql(s"ALTER TABLE $tableName ALTER COLUMN bad_column TYPE DOUBLE") + }.getMessage + assert(msg1.contains("Cannot update missing field bad_column in test.alt_table schema")) + // Update column to wrong type + val msg2 = intercept[ParseException] { + sql(s"ALTER TABLE $tableName ALTER COLUMN id TYPE bad_type") + }.getMessage + assert(msg2.contains("DataType bad_type is not supported")) + } + // Update column type in not existing table and namespace + Seq("h2.test.not_existing_table", "h2.bad_test.not_existing_table").foreach { table => + val msg = intercept[AnalysisException] { + sql(s"ALTER TABLE $table ALTER COLUMN id TYPE DOUBLE") + }.getMessage + assert(msg.contains("Table not found")) } } - test("alter table ... update column type") { - withTable("h2.test.alt_table") { - sql("CREATE TABLE h2.test.alt_table (ID INTEGER) USING _") - sql("ALTER TABLE h2.test.alt_table ALTER COLUMN id TYPE DOUBLE") - val t = spark.table("h2.test.alt_table") - val expectedSchema = new StructType().add("ID", DoubleType) + test("ALTER TABLE ... update column nullability") { + val tableName = "h2.test.alt_table" + withTable(tableName) { + sql(s"CREATE TABLE $tableName (ID INTEGER NOT NULL, deptno INTEGER NOT NULL)") + sql(s"ALTER TABLE $tableName ALTER COLUMN ID DROP NOT NULL") + sql(s"ALTER TABLE $tableName ALTER COLUMN deptno DROP NOT NULL") + val t = spark.table(tableName) + val expectedSchema = new StructType() + .add("ID", IntegerType, nullable = true).add("deptno", IntegerType, nullable = true) assert(t.schema === expectedSchema) + // Update nullability of not existing column + val msg = intercept[AnalysisException] { + sql(s"ALTER TABLE $tableName ALTER COLUMN bad_column DROP NOT NULL") + }.getMessage + assert(msg.contains("Cannot update missing field bad_column in test.alt_table")) + } + // Update column nullability in not existing table and namespace + Seq("h2.test.not_existing_table", "h2.bad_test.not_existing_table").foreach { table => + val msg = intercept[AnalysisException] { + sql(s"ALTER TABLE $table ALTER COLUMN ID DROP NOT NULL") + }.getMessage + assert(msg.contains("Table not found")) + } + } + + test("ALTER TABLE ... update column comment not supported") { + val tableName = "h2.test.alt_table" + withTable(tableName) { + sql(s"CREATE TABLE $tableName (ID INTEGER)") + val exp = intercept[AnalysisException] { + sql(s"ALTER TABLE $tableName ALTER COLUMN ID COMMENT 'test'") + } + assert(exp.getMessage.contains("Failed table altering: test.alt_table")) + assert(exp.cause.get.getMessage.contains("Unsupported TableChange")) + // Update comment for not existing column + val msg = intercept[AnalysisException] { + sql(s"ALTER TABLE $tableName ALTER COLUMN bad_column COMMENT 'test'") + }.getMessage + assert(msg.contains("Cannot update missing field bad_column in test.alt_table")) + } + // Update column comments in not existing table and namespace + Seq("h2.test.not_existing_table", "h2.bad_test.not_existing_table").foreach { table => + val msg = intercept[AnalysisException] { + sql(s"ALTER TABLE $table ALTER COLUMN ID COMMENT 'test'") + }.getMessage + assert(msg.contains("Table not found")) } } - test("alter table ... update column nullability") { - withTable("h2.test.alt_table") { - sql("CREATE TABLE h2.test.alt_table (ID INTEGER NOT NULL) USING _") - sql("ALTER TABLE h2.test.alt_table ALTER COLUMN ID DROP NOT NULL") - val t = spark.table("h2.test.alt_table") - val expectedSchema = new StructType().add("ID", IntegerType, nullable = true) + test("ALTER TABLE case sensitivity") { + val tableName = "h2.test.alt_table" + withTable(tableName) { + sql(s"CREATE TABLE $tableName (c1 INTEGER NOT NULL, c2 INTEGER)") + var t = spark.table(tableName) + var expectedSchema = new StructType().add("c1", IntegerType).add("c2", IntegerType) assert(t.schema === expectedSchema) + + withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") { + val msg = intercept[AnalysisException] { + sql(s"ALTER TABLE $tableName RENAME COLUMN C2 TO c3") + }.getMessage + assert(msg.contains("Cannot rename missing field C2 in test.alt_table schema")) + } + + withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") { + sql(s"ALTER TABLE $tableName RENAME COLUMN C2 TO c3") + expectedSchema = new StructType().add("c1", IntegerType).add("c3", IntegerType) + t = spark.table(tableName) + assert(t.schema === expectedSchema) + } + + withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") { + val msg = intercept[AnalysisException] { + sql(s"ALTER TABLE $tableName DROP COLUMN C3") + }.getMessage + assert(msg.contains("Cannot delete missing field C3 in test.alt_table schema")) + } + + withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") { + sql(s"ALTER TABLE $tableName DROP COLUMN C3") + expectedSchema = new StructType().add("c1", IntegerType) + t = spark.table(tableName) + assert(t.schema === expectedSchema) + } + + withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") { + val msg = intercept[AnalysisException] { + sql(s"ALTER TABLE $tableName ALTER COLUMN C1 TYPE DOUBLE") + }.getMessage + assert(msg.contains("Cannot update missing field C1 in test.alt_table schema")) + } + + withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") { + sql(s"ALTER TABLE $tableName ALTER COLUMN C1 TYPE DOUBLE") + expectedSchema = new StructType().add("c1", DoubleType) + t = spark.table(tableName) + assert(t.schema === expectedSchema) + } + + withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") { + val msg = intercept[AnalysisException] { + sql(s"ALTER TABLE $tableName ALTER COLUMN C1 DROP NOT NULL") + }.getMessage + assert(msg.contains("Cannot update missing field C1 in test.alt_table schema")) + } + + withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") { + sql(s"ALTER TABLE $tableName ALTER COLUMN C1 DROP NOT NULL") + expectedSchema = new StructType().add("c1", DoubleType, nullable = true) + t = spark.table(tableName) + assert(t.schema === expectedSchema) + } } } - test("alter table ... update column comment not supported") { - withTable("h2.test.alt_table") { - sql("CREATE TABLE h2.test.alt_table (ID INTEGER) USING _") - val thrown = intercept[java.sql.SQLFeatureNotSupportedException] { - sql("ALTER TABLE h2.test.alt_table ALTER COLUMN ID COMMENT 'test'") + test("CREATE TABLE with table comment") { + withTable("h2.test.new_table") { + val logAppender = new LogAppender("table comment") + withLogAppender(logAppender) { + sql("CREATE TABLE h2.test.new_table(i INT, j STRING) COMMENT 'this is a comment'") } - assert(thrown.getMessage.contains("Unsupported TableChange")) + val createCommentWarning = logAppender.loggingEvents + .filter(_.getLevel == Level.WARN) + .map(_.getRenderedMessage) + .exists(_.contains("Cannot create JDBC table comment")) + assert(createCommentWarning === false) + } + } + + test("CREATE TABLE with table property") { + withTable("h2.test.new_table") { + val m = intercept[AnalysisException] { + sql("CREATE TABLE h2.test.new_table(i INT, j STRING)" + + " TBLPROPERTIES('ENGINE'='tableEngineName')") + }.cause.get.getMessage + assert(m.contains("\"TABLEENGINENAME\" not found")) } } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/exchange/EnsureRequirementsSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/exchange/EnsureRequirementsSuite.scala new file mode 100644 index 0000000000000..061799f439e5b --- /dev/null +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/exchange/EnsureRequirementsSuite.scala @@ -0,0 +1,122 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.execution.exchange + +import org.apache.spark.sql.catalyst.expressions.Literal +import org.apache.spark.sql.catalyst.plans.Inner +import org.apache.spark.sql.catalyst.plans.physical.{HashPartitioning, PartitioningCollection} +import org.apache.spark.sql.execution.{DummySparkPlan, SortExec} +import org.apache.spark.sql.execution.joins.SortMergeJoinExec +import org.apache.spark.sql.test.SharedSparkSession + +class EnsureRequirementsSuite extends SharedSparkSession { + private val exprA = Literal(1) + private val exprB = Literal(2) + private val exprC = Literal(3) + + test("reorder should handle PartitioningCollection") { + val plan1 = DummySparkPlan( + outputPartitioning = PartitioningCollection(Seq( + HashPartitioning(exprA :: exprB :: Nil, 5), + HashPartitioning(exprA :: Nil, 5)))) + val plan2 = DummySparkPlan() + + // Test PartitioningCollection on the left side of join. + val smjExec1 = SortMergeJoinExec( + exprB :: exprA :: Nil, exprA :: exprB :: Nil, Inner, None, plan1, plan2) + EnsureRequirements.apply(smjExec1) match { + case SortMergeJoinExec(leftKeys, rightKeys, _, _, + SortExec(_, _, DummySparkPlan(_, _, _: PartitioningCollection, _, _), _), + SortExec(_, _, ShuffleExchangeExec(_: HashPartitioning, _, _), _), _) => + assert(leftKeys === Seq(exprA, exprB)) + assert(rightKeys === Seq(exprB, exprA)) + case other => fail(other.toString) + } + + // Test PartitioningCollection on the right side of join. + val smjExec2 = SortMergeJoinExec( + exprA :: exprB :: Nil, exprB :: exprA :: Nil, Inner, None, plan2, plan1) + EnsureRequirements.apply(smjExec2) match { + case SortMergeJoinExec(leftKeys, rightKeys, _, _, + SortExec(_, _, ShuffleExchangeExec(_: HashPartitioning, _, _), _), + SortExec(_, _, DummySparkPlan(_, _, _: PartitioningCollection, _, _), _), _) => + assert(leftKeys === Seq(exprB, exprA)) + assert(rightKeys === Seq(exprA, exprB)) + case other => fail(other.toString) + } + + // Both sides are PartitioningCollection, but left side cannot be reordered to match + // and it should fall back to the right side. + val smjExec3 = SortMergeJoinExec( + exprA :: exprC :: Nil, exprB :: exprA :: Nil, Inner, None, plan1, plan1) + EnsureRequirements.apply(smjExec3) match { + case SortMergeJoinExec(leftKeys, rightKeys, _, _, + SortExec(_, _, ShuffleExchangeExec(_: HashPartitioning, _, _), _), + SortExec(_, _, DummySparkPlan(_, _, _: PartitioningCollection, _, _), _), _) => + assert(leftKeys === Seq(exprC, exprA)) + assert(rightKeys === Seq(exprA, exprB)) + case other => fail(other.toString) + } + } + + test("reorder should fallback to the other side partitioning") { + val plan1 = DummySparkPlan( + outputPartitioning = HashPartitioning(exprA :: exprB :: exprC :: Nil, 5)) + val plan2 = DummySparkPlan( + outputPartitioning = HashPartitioning(exprB :: exprC :: Nil, 5)) + + // Test fallback to the right side, which has HashPartitioning. + val smjExec1 = SortMergeJoinExec( + exprA :: exprB :: Nil, exprC :: exprB :: Nil, Inner, None, plan1, plan2) + EnsureRequirements.apply(smjExec1) match { + case SortMergeJoinExec(leftKeys, rightKeys, _, _, + SortExec(_, _, ShuffleExchangeExec(_: HashPartitioning, _, _), _), + SortExec(_, _, DummySparkPlan(_, _, _: HashPartitioning, _, _), _), _) => + assert(leftKeys === Seq(exprB, exprA)) + assert(rightKeys === Seq(exprB, exprC)) + case other => fail(other.toString) + } + + // Test fallback to the right side, which has PartitioningCollection. + val plan3 = DummySparkPlan( + outputPartitioning = PartitioningCollection(Seq(HashPartitioning(exprB :: exprC :: Nil, 5)))) + val smjExec2 = SortMergeJoinExec( + exprA :: exprB :: Nil, exprC :: exprB :: Nil, Inner, None, plan1, plan3) + EnsureRequirements.apply(smjExec2) match { + case SortMergeJoinExec(leftKeys, rightKeys, _, _, + SortExec(_, _, ShuffleExchangeExec(_: HashPartitioning, _, _), _), + SortExec(_, _, DummySparkPlan(_, _, _: PartitioningCollection, _, _), _), _) => + assert(leftKeys === Seq(exprB, exprA)) + assert(rightKeys === Seq(exprB, exprC)) + case other => fail(other.toString) + } + + // The right side has HashPartitioning, so it is matched first, but no reordering match is + // found, and it should fall back to the left side, which has a PartitioningCollection. + val smjExec3 = SortMergeJoinExec( + exprC :: exprB :: Nil, exprA :: exprB :: Nil, Inner, None, plan3, plan1) + EnsureRequirements.apply(smjExec3) match { + case SortMergeJoinExec(leftKeys, rightKeys, _, _, + SortExec(_, _, DummySparkPlan(_, _, _: PartitioningCollection, _, _), _), + SortExec(_, _, ShuffleExchangeExec(_: HashPartitioning, _, _), _), _) => + assert(leftKeys === Seq(exprB, exprC)) + assert(rightKeys === Seq(exprB, exprA)) + case other => fail(other.toString) + } + } +} diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/joins/BroadcastJoinSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/joins/BroadcastJoinSuite.scala index 7ff945f5cbfb4..98a1089709b92 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/joins/BroadcastJoinSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/joins/BroadcastJoinSuite.scala @@ -91,7 +91,7 @@ abstract class BroadcastJoinSuiteBase extends QueryTest with SQLTestUtils } else { df1.join(df2, joinExpression, joinType) } - val plan = EnsureRequirements(spark.sessionState.conf).apply(df3.queryExecution.sparkPlan) + val plan = EnsureRequirements.apply(df3.queryExecution.sparkPlan) assert(plan.collect { case p: T => p }.size === 1) plan } @@ -171,7 +171,7 @@ abstract class BroadcastJoinSuiteBase extends QueryTest with SQLTestUtils val df4 = Seq((1, "5"), (2, "5")).toDF("key", "value") val df5 = df4.join(df3, Seq("key"), "inner") - val plan = EnsureRequirements(spark.sessionState.conf).apply(df5.queryExecution.sparkPlan) + val plan = EnsureRequirements.apply(df5.queryExecution.sparkPlan) assert(plan.collect { case p: BroadcastHashJoinExec => p }.size === 1) assert(plan.collect { case p: SortMergeJoinExec => p }.size === 1) @@ -182,7 +182,7 @@ abstract class BroadcastJoinSuiteBase extends QueryTest with SQLTestUtils val df1 = Seq((1, "4"), (2, "2")).toDF("key", "value") val joined = df1.join(df, Seq("key"), "inner") - val plan = EnsureRequirements(spark.sessionState.conf).apply(joined.queryExecution.sparkPlan) + val plan = EnsureRequirements.apply(joined.queryExecution.sparkPlan) assert(plan.collect { case p: BroadcastHashJoinExec => p }.size === 1) } @@ -242,33 +242,40 @@ abstract class BroadcastJoinSuiteBase extends QueryTest with SQLTestUtils assert(HashJoin.rewriteKeyExpr(l :: l :: Nil) === l :: l :: Nil) assert(HashJoin.rewriteKeyExpr(l :: i :: Nil) === l :: i :: Nil) - assert(HashJoin.rewriteKeyExpr(i :: Nil) === Cast(i, LongType) :: Nil) + assert(HashJoin.rewriteKeyExpr(i :: Nil) === + Cast(i, LongType, Some(conf.sessionLocalTimeZone)) :: Nil) assert(HashJoin.rewriteKeyExpr(i :: l :: Nil) === i :: l :: Nil) assert(HashJoin.rewriteKeyExpr(i :: i :: Nil) === - BitwiseOr(ShiftLeft(Cast(i, LongType), Literal(32)), - BitwiseAnd(Cast(i, LongType), Literal((1L << 32) - 1))) :: Nil) + BitwiseOr(ShiftLeft(Cast(i, LongType, Some(conf.sessionLocalTimeZone)), Literal(32)), + BitwiseAnd(Cast(i, LongType, Some(conf.sessionLocalTimeZone)), Literal((1L << 32) - 1))) :: + Nil) assert(HashJoin.rewriteKeyExpr(i :: i :: i :: Nil) === i :: i :: i :: Nil) - assert(HashJoin.rewriteKeyExpr(s :: Nil) === Cast(s, LongType) :: Nil) + assert(HashJoin.rewriteKeyExpr(s :: Nil) === + Cast(s, LongType, Some(conf.sessionLocalTimeZone)) :: Nil) assert(HashJoin.rewriteKeyExpr(s :: l :: Nil) === s :: l :: Nil) assert(HashJoin.rewriteKeyExpr(s :: s :: Nil) === - BitwiseOr(ShiftLeft(Cast(s, LongType), Literal(16)), - BitwiseAnd(Cast(s, LongType), Literal((1L << 16) - 1))) :: Nil) + BitwiseOr(ShiftLeft(Cast(s, LongType, Some(conf.sessionLocalTimeZone)), Literal(16)), + BitwiseAnd(Cast(s, LongType, Some(conf.sessionLocalTimeZone)), Literal((1L << 16) - 1))) :: + Nil) assert(HashJoin.rewriteKeyExpr(s :: s :: s :: Nil) === BitwiseOr(ShiftLeft( - BitwiseOr(ShiftLeft(Cast(s, LongType), Literal(16)), - BitwiseAnd(Cast(s, LongType), Literal((1L << 16) - 1))), + BitwiseOr(ShiftLeft(Cast(s, LongType, Some(conf.sessionLocalTimeZone)), Literal(16)), + BitwiseAnd(Cast(s, LongType, Some(conf.sessionLocalTimeZone)), Literal((1L << 16) - 1))), Literal(16)), - BitwiseAnd(Cast(s, LongType), Literal((1L << 16) - 1))) :: Nil) + BitwiseAnd(Cast(s, LongType, Some(conf.sessionLocalTimeZone)), Literal((1L << 16) - 1))) :: + Nil) assert(HashJoin.rewriteKeyExpr(s :: s :: s :: s :: Nil) === BitwiseOr(ShiftLeft( BitwiseOr(ShiftLeft( - BitwiseOr(ShiftLeft(Cast(s, LongType), Literal(16)), - BitwiseAnd(Cast(s, LongType), Literal((1L << 16) - 1))), + BitwiseOr(ShiftLeft(Cast(s, LongType, Some(conf.sessionLocalTimeZone)), Literal(16)), + BitwiseAnd(Cast(s, LongType, Some(conf.sessionLocalTimeZone)), + Literal((1L << 16) - 1))), Literal(16)), - BitwiseAnd(Cast(s, LongType), Literal((1L << 16) - 1))), + BitwiseAnd(Cast(s, LongType, Some(conf.sessionLocalTimeZone)), Literal((1L << 16) - 1))), Literal(16)), - BitwiseAnd(Cast(s, LongType), Literal((1L << 16) - 1))) :: Nil) + BitwiseAnd(Cast(s, LongType, Some(conf.sessionLocalTimeZone)), Literal((1L << 16) - 1))) :: + Nil) assert(HashJoin.rewriteKeyExpr(s :: s :: s :: s :: s :: Nil) === s :: s :: s :: s :: s :: Nil) @@ -432,22 +439,24 @@ abstract class BroadcastJoinSuiteBase extends QueryTest with SQLTestUtils // join1 is a broadcast join where df2 is broadcasted. Note that output partitioning on the // streamed side (t1) is HashPartitioning (bucketed files). val join1 = t1.join(df2, t1("i1") === df2("i2") && t1("j1") === df2("j2")) - val plan1 = join1.queryExecution.executedPlan - assert(collect(plan1) { case e: ShuffleExchangeExec => e }.isEmpty) - val broadcastJoins = collect(plan1) { case b: BroadcastHashJoinExec => b } - assert(broadcastJoins.size == 1) - assert(broadcastJoins(0).outputPartitioning.isInstanceOf[PartitioningCollection]) - val p = broadcastJoins(0).outputPartitioning.asInstanceOf[PartitioningCollection] - assert(p.partitionings.size == 4) - // Verify all the combinations of output partitioning. - Seq(Seq(t1("i1"), t1("j1")), - Seq(t1("i1"), df2("j2")), - Seq(df2("i2"), t1("j1")), - Seq(df2("i2"), df2("j2"))).foreach { expected => - val expectedExpressions = expected.map(_.expr) - assert(p.partitionings.exists { - case h: HashPartitioning => expressionsEqual(h.expressions, expectedExpressions) - }) + withSQLConf(SQLConf.AUTO_BUCKETED_SCAN_ENABLED.key -> "false") { + val plan1 = join1.queryExecution.executedPlan + assert(collect(plan1) { case e: ShuffleExchangeExec => e }.isEmpty) + val broadcastJoins = collect(plan1) { case b: BroadcastHashJoinExec => b } + assert(broadcastJoins.size == 1) + assert(broadcastJoins(0).outputPartitioning.isInstanceOf[PartitioningCollection]) + val p = broadcastJoins(0).outputPartitioning.asInstanceOf[PartitioningCollection] + assert(p.partitionings.size == 4) + // Verify all the combinations of output partitioning. + Seq(Seq(t1("i1"), t1("j1")), + Seq(t1("i1"), df2("j2")), + Seq(df2("i2"), t1("j1")), + Seq(df2("i2"), df2("j2"))).foreach { expected => + val expectedExpressions = expected.map(_.expr) + assert(p.partitionings.exists { + case h: HashPartitioning => expressionsEqual(h.expressions, expectedExpressions) + }) + } } // Join on the column from the broadcasted side (i2, j2) and make sure output partitioning diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/joins/ExistenceJoinSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/joins/ExistenceJoinSuite.scala index e8ac09fdb634e..fcbc0da9d5551 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/joins/ExistenceJoinSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/joins/ExistenceJoinSuite.scala @@ -107,13 +107,13 @@ class ExistenceJoinSuite extends SparkPlanTest with SharedSparkSession { extractJoinParts().foreach { case (_, leftKeys, rightKeys, boundCondition, _, _, _) => withSQLConf(SQLConf.SHUFFLE_PARTITIONS.key -> "1") { checkAnswer2(leftRows, rightRows, (left: SparkPlan, right: SparkPlan) => - EnsureRequirements(left.sqlContext.sessionState.conf).apply( + EnsureRequirements.apply( ShuffledHashJoinExec( leftKeys, rightKeys, joinType, BuildRight, boundCondition, left, right)), expectedAnswer, sortAnswers = true) checkAnswer2(leftRows, rightRows, (left: SparkPlan, right: SparkPlan) => - EnsureRequirements(left.sqlContext.sessionState.conf).apply( + EnsureRequirements.apply( createLeftSemiPlusJoin(ShuffledHashJoinExec( leftKeys, rightKeys, leftSemiPlus, BuildRight, boundCondition, left, right))), expectedAnswer, @@ -126,13 +126,13 @@ class ExistenceJoinSuite extends SparkPlanTest with SharedSparkSession { extractJoinParts().foreach { case (_, leftKeys, rightKeys, boundCondition, _, _, _) => withSQLConf(SQLConf.SHUFFLE_PARTITIONS.key -> "1") { checkAnswer2(leftRows, rightRows, (left: SparkPlan, right: SparkPlan) => - EnsureRequirements(left.sqlContext.sessionState.conf).apply( + EnsureRequirements.apply( BroadcastHashJoinExec( leftKeys, rightKeys, joinType, BuildRight, boundCondition, left, right)), expectedAnswer, sortAnswers = true) checkAnswer2(leftRows, rightRows, (left: SparkPlan, right: SparkPlan) => - EnsureRequirements(left.sqlContext.sessionState.conf).apply( + EnsureRequirements.apply( createLeftSemiPlusJoin(BroadcastHashJoinExec( leftKeys, rightKeys, leftSemiPlus, BuildRight, boundCondition, left, right))), expectedAnswer, @@ -145,12 +145,12 @@ class ExistenceJoinSuite extends SparkPlanTest with SharedSparkSession { extractJoinParts().foreach { case (_, leftKeys, rightKeys, boundCondition, _, _, _) => withSQLConf(SQLConf.SHUFFLE_PARTITIONS.key -> "1") { checkAnswer2(leftRows, rightRows, (left: SparkPlan, right: SparkPlan) => - EnsureRequirements(left.sqlContext.sessionState.conf).apply( + EnsureRequirements.apply( SortMergeJoinExec(leftKeys, rightKeys, joinType, boundCondition, left, right)), expectedAnswer, sortAnswers = true) checkAnswer2(leftRows, rightRows, (left: SparkPlan, right: SparkPlan) => - EnsureRequirements(left.sqlContext.sessionState.conf).apply( + EnsureRequirements.apply( createLeftSemiPlusJoin(SortMergeJoinExec( leftKeys, rightKeys, leftSemiPlus, boundCondition, left, right))), expectedAnswer, @@ -162,12 +162,12 @@ class ExistenceJoinSuite extends SparkPlanTest with SharedSparkSession { test(s"$testName using BroadcastNestedLoopJoin build left") { withSQLConf(SQLConf.SHUFFLE_PARTITIONS.key -> "1") { checkAnswer2(leftRows, rightRows, (left: SparkPlan, right: SparkPlan) => - EnsureRequirements(left.sqlContext.sessionState.conf).apply( + EnsureRequirements.apply( BroadcastNestedLoopJoinExec(left, right, BuildLeft, joinType, Some(condition))), expectedAnswer, sortAnswers = true) checkAnswer2(leftRows, rightRows, (left: SparkPlan, right: SparkPlan) => - EnsureRequirements(left.sqlContext.sessionState.conf).apply( + EnsureRequirements.apply( createLeftSemiPlusJoin(BroadcastNestedLoopJoinExec( left, right, BuildLeft, leftSemiPlus, Some(condition)))), expectedAnswer, @@ -178,12 +178,12 @@ class ExistenceJoinSuite extends SparkPlanTest with SharedSparkSession { test(s"$testName using BroadcastNestedLoopJoin build right") { withSQLConf(SQLConf.SHUFFLE_PARTITIONS.key -> "1") { checkAnswer2(leftRows, rightRows, (left: SparkPlan, right: SparkPlan) => - EnsureRequirements(left.sqlContext.sessionState.conf).apply( + EnsureRequirements.apply( BroadcastNestedLoopJoinExec(left, right, BuildRight, joinType, Some(condition))), expectedAnswer, sortAnswers = true) checkAnswer2(leftRows, rightRows, (left: SparkPlan, right: SparkPlan) => - EnsureRequirements(left.sqlContext.sessionState.conf).apply( + EnsureRequirements.apply( createLeftSemiPlusJoin(BroadcastNestedLoopJoinExec( left, right, BuildRight, leftSemiPlus, Some(condition)))), expectedAnswer, diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/joins/InnerJoinSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/joins/InnerJoinSuite.scala index 44ab3f7d023d3..f476c15f59983 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/joins/InnerJoinSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/joins/InnerJoinSuite.scala @@ -101,7 +101,7 @@ class InnerJoinSuite extends SparkPlanTest with SharedSparkSession { boundCondition, leftPlan, rightPlan) - EnsureRequirements(spark.sessionState.conf).apply(broadcastJoin) + EnsureRequirements.apply(broadcastJoin) } def makeShuffledHashJoin( @@ -115,7 +115,7 @@ class InnerJoinSuite extends SparkPlanTest with SharedSparkSession { side, None, leftPlan, rightPlan) val filteredJoin = boundCondition.map(FilterExec(_, shuffledHashJoin)).getOrElse(shuffledHashJoin) - EnsureRequirements(spark.sessionState.conf).apply(filteredJoin) + EnsureRequirements.apply(filteredJoin) } def makeSortMergeJoin( @@ -126,7 +126,7 @@ class InnerJoinSuite extends SparkPlanTest with SharedSparkSession { rightPlan: SparkPlan) = { val sortMergeJoin = joins.SortMergeJoinExec(leftKeys, rightKeys, Inner, boundCondition, leftPlan, rightPlan) - EnsureRequirements(spark.sessionState.conf).apply(sortMergeJoin) + EnsureRequirements.apply(sortMergeJoin) } testWithWholeStageCodegenOnAndOff(s"$testName using BroadcastHashJoin (build=left)") { _ => diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/joins/OuterJoinSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/joins/OuterJoinSuite.scala index a466e05816ad8..9f7e0a14f6a5c 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/joins/OuterJoinSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/joins/OuterJoinSuite.scala @@ -110,7 +110,7 @@ class OuterJoinSuite extends SparkPlanTest with SharedSparkSession { withSQLConf(SQLConf.SHUFFLE_PARTITIONS.key -> "1") { val buildSide = if (joinType == LeftOuter) BuildRight else BuildLeft checkAnswer2(leftRows, rightRows, (left: SparkPlan, right: SparkPlan) => - EnsureRequirements(spark.sessionState.conf).apply( + EnsureRequirements.apply( ShuffledHashJoinExec( leftKeys, rightKeys, joinType, buildSide, boundCondition, left, right)), expectedAnswer.map(Row.fromTuple), @@ -143,7 +143,7 @@ class OuterJoinSuite extends SparkPlanTest with SharedSparkSession { extractJoinParts().foreach { case (_, leftKeys, rightKeys, boundCondition, _, _, _) => withSQLConf(SQLConf.SHUFFLE_PARTITIONS.key -> "1") { checkAnswer2(leftRows, rightRows, (left: SparkPlan, right: SparkPlan) => - EnsureRequirements(spark.sessionState.conf).apply( + EnsureRequirements.apply( SortMergeJoinExec(leftKeys, rightKeys, joinType, boundCondition, left, right)), expectedAnswer.map(Row.fromTuple), sortAnswers = true) diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/metric/SQLMetricsSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/metric/SQLMetricsSuite.scala index 4e10c27edb0e9..21d17f40abb34 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/metric/SQLMetricsSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/metric/SQLMetricsSuite.scala @@ -28,7 +28,7 @@ import org.apache.spark.sql.catalyst.plans.logical.LocalRelation import org.apache.spark.sql.execution.{FilterExec, RangeExec, SparkPlan, WholeStageCodegenExec} import org.apache.spark.sql.execution.adaptive.DisableAdaptiveExecutionSuite import org.apache.spark.sql.execution.aggregate.HashAggregateExec -import org.apache.spark.sql.execution.exchange.ShuffleExchangeExec +import org.apache.spark.sql.execution.exchange.{BroadcastExchangeExec, ShuffleExchangeExec} import org.apache.spark.sql.execution.joins.ShuffledHashJoinExec import org.apache.spark.sql.functions._ import org.apache.spark.sql.internal.SQLConf @@ -181,7 +181,7 @@ class SQLMetricsSuite extends SharedSparkSession with SQLMetricsTestUtils assert(probes.toDouble > 1.0) } else { val mainValue = probes.split("\n").apply(1).stripPrefix("(").stripSuffix(")") - // Extract min, med, max from the string and strip off everthing else. + // Extract min, med, max from the string and strip off everything else. val index = mainValue.indexOf(" (", 0) mainValue.slice(0, index).split(", ").foreach { probe => assert(probe.toDouble > 1.0) @@ -705,7 +705,7 @@ class SQLMetricsSuite extends SharedSparkSession with SQLMetricsTestUtils sql("CREATE TEMPORARY VIEW inMemoryTable AS SELECT 1 AS c1") sql("CACHE TABLE inMemoryTable") testSparkPlanMetrics(spark.table("inMemoryTable"), 1, - Map(1L -> (("Scan In-memory table `inMemoryTable`", Map.empty))) + Map(1L -> (("Scan In-memory table inMemoryTable", Map.empty))) ) sql("CREATE TEMPORARY VIEW ```a``b``` AS SELECT 2 AS c1") @@ -736,4 +736,23 @@ class SQLMetricsSuite extends SharedSparkSession with SQLMetricsTestUtils Map("dataSize" -> 3200, "shuffleRecordsWritten" -> 100)) testMetricsInSparkPlanOperator(exchanges(1), Map("dataSize" -> 0, "shuffleRecordsWritten" -> 0)) } + + test("Add numRows to metric of BroadcastExchangeExec") { + withSQLConf(SQLConf.AUTO_SIZE_UPDATE_ENABLED.key -> "true") { + withTable("t1", "t2") { + spark.range(2).write.saveAsTable("t1") + spark.range(2).write.saveAsTable("t2") + val df = sql("SELECT t1.* FROM t1 JOIN t2 ON t1.id = t2.id") + df.collect() + val plan = df.queryExecution.executedPlan + + val exchanges = plan.collect { + case s: BroadcastExchangeExec => s + } + + assert(exchanges.size === 1) + testMetricsInSparkPlanOperator(exchanges.head, Map("numOutputRows" -> 2)) + } + } + } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/python/BatchEvalPythonExecSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/python/BatchEvalPythonExecSuite.scala index 5fe3d6a71167e..cb5e23e0534d0 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/python/BatchEvalPythonExecSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/python/BatchEvalPythonExecSuite.scala @@ -137,6 +137,13 @@ class MyDummyPythonUDF extends UserDefinedPythonFunction( pythonEvalType = PythonEvalType.SQL_BATCHED_UDF, udfDeterministic = true) +class MyDummyNondeterministicPythonUDF extends UserDefinedPythonFunction( + name = "dummyNondeterministicUDF", + func = new DummyUDF, + dataType = BooleanType, + pythonEvalType = PythonEvalType.SQL_BATCHED_UDF, + udfDeterministic = false) + class MyDummyGroupedAggPandasUDF extends UserDefinedPythonFunction( name = "dummyGroupedAggPandasUDF", func = new DummyUDF, diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/python/ExtractPythonUDFsSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/python/ExtractPythonUDFsSuite.scala index 87d541d2d22b0..325f4923bd6c6 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/python/ExtractPythonUDFsSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/python/ExtractPythonUDFsSuite.scala @@ -28,6 +28,7 @@ class ExtractPythonUDFsSuite extends SparkPlanTest with SharedSparkSession { import testImplicits._ val batchedPythonUDF = new MyDummyPythonUDF + val batchedNondeterministicPythonUDF = new MyDummyNondeterministicPythonUDF val scalarPandasUDF = new MyDummyScalarPandasUDF private def collectBatchExec(plan: SparkPlan): Seq[BatchEvalPythonExec] = plan.collect { @@ -166,5 +167,31 @@ class ExtractPythonUDFsSuite extends SparkPlanTest with SharedSparkSession { } } + test("SPARK-33303: Deterministic UDF calls are deduplicated") { + val df = Seq("Hello").toDF("a") + + val df2 = df.withColumn("c", batchedPythonUDF(col("a"))).withColumn("d", col("c")) + val pythonEvalNodes2 = collectBatchExec(df2.queryExecution.executedPlan) + assert(pythonEvalNodes2.size == 1) + assert(pythonEvalNodes2.head.udfs.size == 1) + + val df3 = df.withColumns(Seq("c", "d"), + Seq(batchedPythonUDF(col("a")), batchedPythonUDF(col("a")))) + val pythonEvalNodes3 = collectBatchExec(df3.queryExecution.executedPlan) + assert(pythonEvalNodes3.size == 1) + assert(pythonEvalNodes3.head.udfs.size == 1) + + val df4 = df.withColumn("c", batchedNondeterministicPythonUDF(col("a"))) + .withColumn("d", col("c")) + val pythonEvalNodes4 = collectBatchExec(df4.queryExecution.executedPlan) + assert(pythonEvalNodes4.size == 1) + assert(pythonEvalNodes4.head.udfs.size == 1) + + val df5 = df.withColumns(Seq("c", "d"), + Seq(batchedNondeterministicPythonUDF(col("a")), batchedNondeterministicPythonUDF(col("a")))) + val pythonEvalNodes5 = collectBatchExec(df5.queryExecution.executedPlan) + assert(pythonEvalNodes5.size == 1) + assert(pythonEvalNodes5.head.udfs.size == 2) + } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/FileStreamSinkLogSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/FileStreamSinkLogSuite.scala index c53617b40e09d..d6707e7be71fc 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/FileStreamSinkLogSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/FileStreamSinkLogSuite.scala @@ -22,11 +22,10 @@ import java.lang.{Long => JLong} import java.net.URI import java.nio.charset.StandardCharsets.UTF_8 import java.util.concurrent.ConcurrentHashMap -import java.util.concurrent.atomic.AtomicLong import scala.util.Random -import org.apache.hadoop.fs.{FSDataInputStream, Path, RawLocalFileSystem} +import org.apache.hadoop.fs.{FileSystem, FSDataInputStream, Path, RawLocalFileSystem} import org.apache.spark.SparkFunSuite import org.apache.spark.sql.internal.SQLConf @@ -40,7 +39,7 @@ class FileStreamSinkLogSuite extends SparkFunSuite with SharedSparkSession { test("shouldRetain") { withFileStreamSinkLog { sinkLog => val log = newFakeSinkFileStatus("/a/b/x", FileStreamSinkLog.ADD_ACTION) - assert(sinkLog.shouldRetain(log)) + assert(sinkLog.shouldRetain(log, System.currentTimeMillis())) } } @@ -130,6 +129,17 @@ class FileStreamSinkLogSuite extends SparkFunSuite with SharedSparkSession { } } + private def listBatchFiles(fs: FileSystem, sinkLog: FileStreamSinkLog): Set[String] = { + fs.listStatus(sinkLog.metadataPath).map(_.getPath.getName).filter { fileName => + try { + getBatchIdFromFileName(fileName) + true + } catch { + case _: NumberFormatException => false + } + }.toSet + } + test("delete expired file") { // Set FILE_SINK_LOG_CLEANUP_DELAY to 0 so that we can detect the deleting behaviour // deterministically and one min batches to retain @@ -139,18 +149,7 @@ class FileStreamSinkLogSuite extends SparkFunSuite with SharedSparkSession { SQLConf.MIN_BATCHES_TO_RETAIN.key -> "1") { withFileStreamSinkLog { sinkLog => val fs = sinkLog.metadataPath.getFileSystem(spark.sessionState.newHadoopConf()) - - def listBatchFiles(): Set[String] = { - fs.listStatus(sinkLog.metadataPath).map(_.getPath.getName).filter { fileName => - try { - getBatchIdFromFileName(fileName) - true - } catch { - case _: NumberFormatException => false - } - }.toSet - } - + def listBatchFiles(): Set[String] = this.listBatchFiles(fs, sinkLog) sinkLog.add(0, Array(newFakeSinkFileStatus("/a/b/0", FileStreamSinkLog.ADD_ACTION))) assert(Set("0") === listBatchFiles()) sinkLog.add(1, Array(newFakeSinkFileStatus("/a/b/1", FileStreamSinkLog.ADD_ACTION))) @@ -174,18 +173,7 @@ class FileStreamSinkLogSuite extends SparkFunSuite with SharedSparkSession { SQLConf.MIN_BATCHES_TO_RETAIN.key -> "2") { withFileStreamSinkLog { sinkLog => val fs = sinkLog.metadataPath.getFileSystem(spark.sessionState.newHadoopConf()) - - def listBatchFiles(): Set[String] = { - fs.listStatus(sinkLog.metadataPath).map(_.getPath.getName).filter { fileName => - try { - getBatchIdFromFileName(fileName) - true - } catch { - case _: NumberFormatException => false - } - }.toSet - } - + def listBatchFiles(): Set[String] = this.listBatchFiles(fs, sinkLog) sinkLog.add(0, Array(newFakeSinkFileStatus("/a/b/0", FileStreamSinkLog.ADD_ACTION))) assert(Set("0") === listBatchFiles()) sinkLog.add(1, Array(newFakeSinkFileStatus("/a/b/1", FileStreamSinkLog.ADD_ACTION))) @@ -206,6 +194,24 @@ class FileStreamSinkLogSuite extends SparkFunSuite with SharedSparkSession { } } + test("filter out outdated entries when compacting") { + val curTime = System.currentTimeMillis() + withFileStreamSinkLog(sinkLog => { + val logs = Seq( + newFakeSinkFileStatus("/a/b/x", FileStreamSinkLog.ADD_ACTION, curTime), + newFakeSinkFileStatus("/a/b/y", FileStreamSinkLog.ADD_ACTION, curTime), + newFakeSinkFileStatus("/a/b/z", FileStreamSinkLog.ADD_ACTION, curTime)) + logs.foreach { log => assert(sinkLog.shouldRetain(log, curTime)) } + + val logs2 = Seq( + newFakeSinkFileStatus("/a/b/m", FileStreamSinkLog.ADD_ACTION, curTime - 80000), + newFakeSinkFileStatus("/a/b/n", FileStreamSinkLog.ADD_ACTION, curTime - 120000)) + logs2.foreach { log => + assert(!sinkLog.shouldRetain(log, curTime)) + } + }, Some(60000)) + } + test("read Spark 2.1.0 log format") { assert(readFromResource("file-sink-log-version-2.1.0") === Seq( SinkFileStatus("/a/b/0", 1, false, 1, 1, 100, FileStreamSinkLog.ADD_ACTION), @@ -260,23 +266,29 @@ class FileStreamSinkLogSuite extends SparkFunSuite with SharedSparkSession { } /** - * Create a fake SinkFileStatus using path and action. Most of tests don't care about other fields - * in SinkFileStatus. + * Create a fake SinkFileStatus using path and action, and optionally modification time. + * Most of tests don't care about other fields in SinkFileStatus. */ - private def newFakeSinkFileStatus(path: String, action: String): SinkFileStatus = { + private def newFakeSinkFileStatus( + path: String, + action: String, + modificationTime: Long = Long.MaxValue): SinkFileStatus = { SinkFileStatus( path = path, size = 100L, isDir = false, - modificationTime = 100L, + modificationTime = modificationTime, blockReplication = 1, blockSize = 100L, action = action) } - private def withFileStreamSinkLog(f: FileStreamSinkLog => Unit): Unit = { + private def withFileStreamSinkLog( + f: FileStreamSinkLog => Unit, + ttl: Option[Long] = None): Unit = { withTempDir { file => - val sinkLog = new FileStreamSinkLog(FileStreamSinkLog.VERSION, spark, file.getCanonicalPath) + val sinkLog = new FileStreamSinkLog(FileStreamSinkLog.VERSION, spark, file.getCanonicalPath, + ttl) f(sinkLog) } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/HDFSMetadataLogSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/HDFSMetadataLogSuite.scala index 67dd88cbab63b..980d532dd4779 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/HDFSMetadataLogSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/HDFSMetadataLogSuite.scala @@ -199,7 +199,7 @@ class HDFSMetadataLogSuite extends SharedSparkSession { intercept[IllegalStateException](verifyBatchIds(Seq(2, 3, 4), Some(1L), Some(5L))) intercept[IllegalStateException](verifyBatchIds(Seq(1, 2, 4, 5), Some(1L), Some(5L))) - // Related to SPARK-26629, this capatures the behavior for verifyBatchIds when startId > endId + // Related to SPARK-26629, this captures the behavior for verifyBatchIds when startId > endId intercept[IllegalStateException](verifyBatchIds(Seq(), Some(2L), Some(1L))) intercept[AssertionError](verifyBatchIds(Seq(2), Some(2L), Some(1L))) intercept[AssertionError](verifyBatchIds(Seq(1), Some(2L), Some(1L))) diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/MemorySinkSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/MemorySinkSuite.scala index 3ead91fcf712a..014840d758c0c 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/MemorySinkSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/MemorySinkSuite.scala @@ -24,7 +24,7 @@ import org.scalatest.BeforeAndAfter import org.apache.spark.sql._ import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.execution.streaming.sources._ -import org.apache.spark.sql.streaming.{OutputMode, StreamTest} +import org.apache.spark.sql.streaming.StreamTest import org.apache.spark.sql.types.{IntegerType, StructField, StructType} import org.apache.spark.util.Utils diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/state/FlatMapGroupsWithStateExecHelperSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/state/FlatMapGroupsWithStateExecHelperSuite.scala index dec30fd01f7e2..ea6fd8ab312c9 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/state/FlatMapGroupsWithStateExecHelperSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/state/FlatMapGroupsWithStateExecHelperSuite.scala @@ -23,7 +23,6 @@ import org.apache.spark.sql.Encoder import org.apache.spark.sql.catalyst.encoders.ExpressionEncoder import org.apache.spark.sql.catalyst.expressions.{GenericInternalRow, UnsafeProjection, UnsafeRow} import org.apache.spark.sql.execution.streaming.GroupStateImpl._ -import org.apache.spark.sql.streaming.FlatMapGroupsWithStateSuite._ import org.apache.spark.sql.streaming.StreamTest import org.apache.spark.sql.types._ diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/state/StateSchemaCompatibilityCheckerSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/state/StateSchemaCompatibilityCheckerSuite.scala new file mode 100644 index 0000000000000..4eb7603b316aa --- /dev/null +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/state/StateSchemaCompatibilityCheckerSuite.scala @@ -0,0 +1,230 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.execution.streaming.state + +import java.util.UUID + +import scala.util.Random + +import org.apache.hadoop.conf.Configuration + +import org.apache.spark.sql.execution.streaming.state.StateStoreTestsHelper.newDir +import org.apache.spark.sql.test.SharedSparkSession +import org.apache.spark.sql.types._ + +class StateSchemaCompatibilityCheckerSuite extends SharedSparkSession { + + private val hadoopConf: Configuration = new Configuration() + private val opId = Random.nextInt(100000) + private val partitionId = StateStore.PARTITION_ID_TO_CHECK_SCHEMA + + private val structSchema = new StructType() + .add(StructField("nested1", IntegerType, nullable = true)) + .add(StructField("nested2", StringType, nullable = true)) + + private val keySchema = new StructType() + .add(StructField("key1", IntegerType, nullable = true)) + .add(StructField("key2", StringType, nullable = true)) + .add(StructField("key3", structSchema, nullable = true)) + + private val valueSchema = new StructType() + .add(StructField("value1", IntegerType, nullable = true)) + .add(StructField("value2", StringType, nullable = true)) + .add(StructField("value3", structSchema, nullable = true)) + + test("adding field to key should fail") { + val fieldAddedKeySchema = keySchema.add(StructField("newKey", IntegerType)) + verifyException(keySchema, valueSchema, fieldAddedKeySchema, valueSchema) + } + + test("adding field to value should fail") { + val fieldAddedValueSchema = valueSchema.add(StructField("newValue", IntegerType)) + verifyException(keySchema, valueSchema, keySchema, fieldAddedValueSchema) + } + + test("adding nested field in key should fail") { + val fieldAddedNestedSchema = structSchema.add(StructField("newNested", IntegerType)) + val newKeySchema = applyNewSchemaToNestedFieldInKey(fieldAddedNestedSchema) + verifyException(keySchema, valueSchema, newKeySchema, valueSchema) + } + + test("adding nested field in value should fail") { + val fieldAddedNestedSchema = structSchema.add(StructField("newNested", IntegerType)) + val newValueSchema = applyNewSchemaToNestedFieldInValue(fieldAddedNestedSchema) + verifyException(keySchema, valueSchema, keySchema, newValueSchema) + } + + test("removing field from key should fail") { + val fieldRemovedKeySchema = StructType(keySchema.dropRight(1)) + verifyException(keySchema, valueSchema, fieldRemovedKeySchema, valueSchema) + } + + test("removing field from value should fail") { + val fieldRemovedValueSchema = StructType(valueSchema.drop(1)) + verifyException(keySchema, valueSchema, keySchema, fieldRemovedValueSchema) + } + + test("removing nested field from key should fail") { + val fieldRemovedNestedSchema = StructType(structSchema.dropRight(1)) + val newKeySchema = applyNewSchemaToNestedFieldInKey(fieldRemovedNestedSchema) + verifyException(keySchema, valueSchema, newKeySchema, valueSchema) + } + + test("removing nested field from value should fail") { + val fieldRemovedNestedSchema = StructType(structSchema.drop(1)) + val newValueSchema = applyNewSchemaToNestedFieldInValue(fieldRemovedNestedSchema) + verifyException(keySchema, valueSchema, keySchema, newValueSchema) + } + + test("changing the type of field in key should fail") { + val typeChangedKeySchema = StructType(keySchema.map(_.copy(dataType = TimestampType))) + verifyException(keySchema, valueSchema, typeChangedKeySchema, valueSchema) + } + + test("changing the type of field in value should fail") { + val typeChangedValueSchema = StructType(valueSchema.map(_.copy(dataType = TimestampType))) + verifyException(keySchema, valueSchema, keySchema, typeChangedValueSchema) + } + + test("changing the type of nested field in key should fail") { + val typeChangedNestedSchema = StructType(structSchema.map(_.copy(dataType = TimestampType))) + val newKeySchema = applyNewSchemaToNestedFieldInKey(typeChangedNestedSchema) + verifyException(keySchema, valueSchema, newKeySchema, valueSchema) + } + + test("changing the type of nested field in value should fail") { + val typeChangedNestedSchema = StructType(structSchema.map(_.copy(dataType = TimestampType))) + val newValueSchema = applyNewSchemaToNestedFieldInValue(typeChangedNestedSchema) + verifyException(keySchema, valueSchema, keySchema, newValueSchema) + } + + test("changing the nullability of nullable to non-nullable in key should fail") { + val nonNullChangedKeySchema = StructType(keySchema.map(_.copy(nullable = false))) + verifyException(keySchema, valueSchema, nonNullChangedKeySchema, valueSchema) + } + + test("changing the nullability of nullable to non-nullable in value should fail") { + val nonNullChangedValueSchema = StructType(valueSchema.map(_.copy(nullable = false))) + verifyException(keySchema, valueSchema, keySchema, nonNullChangedValueSchema) + } + + test("changing the nullability of nullable to nonnullable in nested field in key should fail") { + val typeChangedNestedSchema = StructType(structSchema.map(_.copy(nullable = false))) + val newKeySchema = applyNewSchemaToNestedFieldInKey(typeChangedNestedSchema) + verifyException(keySchema, valueSchema, newKeySchema, valueSchema) + } + + test("changing the nullability of nullable to nonnullable in nested field in value should fail") { + val typeChangedNestedSchema = StructType(structSchema.map(_.copy(nullable = false))) + val newValueSchema = applyNewSchemaToNestedFieldInValue(typeChangedNestedSchema) + verifyException(keySchema, valueSchema, keySchema, newValueSchema) + } + + test("changing the name of field in key should be allowed") { + val newName: StructField => StructField = f => f.copy(name = f.name + "_new") + val fieldNameChangedKeySchema = StructType(keySchema.map(newName)) + verifySuccess(keySchema, valueSchema, fieldNameChangedKeySchema, valueSchema) + } + + test("changing the name of field in value should be allowed") { + val newName: StructField => StructField = f => f.copy(name = f.name + "_new") + val fieldNameChangedValueSchema = StructType(valueSchema.map(newName)) + verifySuccess(keySchema, valueSchema, keySchema, fieldNameChangedValueSchema) + } + + test("changing the name of nested field in key should be allowed") { + val newName: StructField => StructField = f => f.copy(name = f.name + "_new") + val newNestedFieldsSchema = StructType(structSchema.map(newName)) + val fieldNameChangedKeySchema = applyNewSchemaToNestedFieldInKey(newNestedFieldsSchema) + verifySuccess(keySchema, valueSchema, fieldNameChangedKeySchema, valueSchema) + } + + test("changing the name of nested field in value should be allowed") { + val newName: StructField => StructField = f => f.copy(name = f.name + "_new") + val newNestedFieldsSchema = StructType(structSchema.map(newName)) + val fieldNameChangedValueSchema = applyNewSchemaToNestedFieldInValue(newNestedFieldsSchema) + verifySuccess(keySchema, valueSchema, keySchema, fieldNameChangedValueSchema) + } + + private def applyNewSchemaToNestedFieldInKey(newNestedSchema: StructType): StructType = { + applyNewSchemaToNestedField(keySchema, newNestedSchema, "key3") + } + + private def applyNewSchemaToNestedFieldInValue(newNestedSchema: StructType): StructType = { + applyNewSchemaToNestedField(valueSchema, newNestedSchema, "value3") + } + + private def applyNewSchemaToNestedField( + originSchema: StructType, + newNestedSchema: StructType, + fieldName: String): StructType = { + val newFields = originSchema.map { field => + if (field.name == fieldName) { + field.copy(dataType = newNestedSchema) + } else { + field + } + } + StructType(newFields) + } + + private def runSchemaChecker( + dir: String, + queryId: UUID, + newKeySchema: StructType, + newValueSchema: StructType): Unit = { + // in fact, Spark doesn't support online state schema change, so need to check + // schema only once for each running of JVM + val providerId = StateStoreProviderId( + StateStoreId(dir, opId, partitionId), queryId) + + new StateSchemaCompatibilityChecker(providerId, hadoopConf) + .check(newKeySchema, newValueSchema) + } + + private def verifyException( + oldKeySchema: StructType, + oldValueSchema: StructType, + newKeySchema: StructType, + newValueSchema: StructType): Unit = { + val dir = newDir() + val queryId = UUID.randomUUID() + runSchemaChecker(dir, queryId, oldKeySchema, oldValueSchema) + + val e = intercept[StateSchemaNotCompatible] { + runSchemaChecker(dir, queryId, newKeySchema, newValueSchema) + } + + e.getMessage.contains("Provided schema doesn't match to the schema for existing state!") + e.getMessage.contains(newKeySchema.json) + e.getMessage.contains(newValueSchema.json) + e.getMessage.contains(oldKeySchema.json) + e.getMessage.contains(oldValueSchema.json) + } + + private def verifySuccess( + oldKeySchema: StructType, + oldValueSchema: StructType, + newKeySchema: StructType, + newValueSchema: StructType): Unit = { + val dir = newDir() + val queryId = UUID.randomUUID() + runSchemaChecker(dir, queryId, oldKeySchema, oldValueSchema) + runSchemaChecker(dir, queryId, newKeySchema, newValueSchema) + } +} diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/state/StateStoreCompatibilitySuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/state/StateStoreCompatibilitySuite.scala new file mode 100644 index 0000000000000..b189de8d2a21e --- /dev/null +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/state/StateStoreCompatibilitySuite.scala @@ -0,0 +1,84 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.execution.streaming.state + +import java.io.File + +import org.apache.commons.io.FileUtils + +import org.apache.spark.SparkFunSuite +import org.apache.spark.io.CompressionCodec +import org.apache.spark.sql.catalyst.plans.PlanTestBase +import org.apache.spark.sql.catalyst.streaming.InternalOutputModes.Update +import org.apache.spark.sql.execution.streaming.MemoryStream +import org.apache.spark.sql.functions.count +import org.apache.spark.sql.internal.SQLConf +import org.apache.spark.sql.streaming.StreamTest +import org.apache.spark.util.Utils + +class StateStoreCompatibilitySuite extends StreamTest with StateStoreCodecsTest { + testWithAllCodec( + "SPARK-33263: Recovery from checkpoint before codec config introduced") { + val resourceUri = this.getClass.getResource( + "/structured-streaming/checkpoint-version-3.0.0-streaming-statestore-codec/").toURI + val checkpointDir = Utils.createTempDir().getCanonicalFile + FileUtils.copyDirectory(new File(resourceUri), checkpointDir) + + import testImplicits._ + + val inputData = MemoryStream[Int] + val aggregated = inputData.toDF().groupBy("value").agg(count("*")) + inputData.addData(1, 2, 3) + + /** + * Note: The checkpoint was generated using the following input in Spark version 3.0.0: + * AddData(inputData, 1, 2, 3) + */ + + testStream(aggregated, Update)( + StartStream( + checkpointLocation = checkpointDir.getAbsolutePath, + additionalConfs = Map(SQLConf.SHUFFLE_PARTITIONS.key -> "1")), + AddData(inputData, 1, 2), + CheckNewAnswer((1, 2), (2, 2)) + ) + } +} + +trait StateStoreCodecsTest extends SparkFunSuite with PlanTestBase { + private val codecsInShortName = + CompressionCodec.ALL_COMPRESSION_CODECS.map { c => CompressionCodec.getShortName(c) } + + protected def testWithAllCodec(name: String)(func: => Any): Unit = { + codecsInShortName.foreach { codecShortName => + test(s"$name - with codec $codecShortName") { + withSQLConf(SQLConf.STATE_STORE_COMPRESSION_CODEC.key -> codecShortName) { + func + } + } + } + + CompressionCodec.ALL_COMPRESSION_CODECS.foreach { codecShortName => + test(s"$name - with codec $codecShortName") { + withSQLConf(SQLConf.STATE_STORE_COMPRESSION_CODEC.key -> codecShortName) { + func + } + } + } + } +} diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/state/StateStoreCoordinatorSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/state/StateStoreCoordinatorSuite.scala index 7bca225dfdd8f..d039c72bb7d18 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/state/StateStoreCoordinatorSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/state/StateStoreCoordinatorSuite.scala @@ -41,7 +41,7 @@ class StateStoreCoordinatorSuite extends SparkFunSuite with SharedSparkContext { assert(coordinatorRef.verifyIfInstanceActive(id, "exec1") === false) assert(coordinatorRef.getLocation(id) === None) - coordinatorRef.reportActiveInstance(id, "hostX", "exec1") + coordinatorRef.reportActiveInstance(id, "hostX", "exec1", Seq.empty) eventually(timeout(5.seconds)) { assert(coordinatorRef.verifyIfInstanceActive(id, "exec1")) assert( @@ -49,7 +49,7 @@ class StateStoreCoordinatorSuite extends SparkFunSuite with SharedSparkContext { Some(ExecutorCacheTaskLocation("hostX", "exec1").toString)) } - coordinatorRef.reportActiveInstance(id, "hostX", "exec2") + coordinatorRef.reportActiveInstance(id, "hostX", "exec2", Seq.empty) eventually(timeout(5.seconds)) { assert(coordinatorRef.verifyIfInstanceActive(id, "exec1") === false) @@ -72,9 +72,9 @@ class StateStoreCoordinatorSuite extends SparkFunSuite with SharedSparkContext { val host = "hostX" val exec = "exec1" - coordinatorRef.reportActiveInstance(id1, host, exec) - coordinatorRef.reportActiveInstance(id2, host, exec) - coordinatorRef.reportActiveInstance(id3, host, exec) + coordinatorRef.reportActiveInstance(id1, host, exec, Seq.empty) + coordinatorRef.reportActiveInstance(id2, host, exec, Seq.empty) + coordinatorRef.reportActiveInstance(id3, host, exec, Seq.empty) eventually(timeout(5.seconds)) { assert(coordinatorRef.verifyIfInstanceActive(id1, exec)) @@ -106,7 +106,7 @@ class StateStoreCoordinatorSuite extends SparkFunSuite with SharedSparkContext { val id = StateStoreProviderId(StateStoreId("x", 0, 0), UUID.randomUUID) - coordRef1.reportActiveInstance(id, "hostX", "exec1") + coordRef1.reportActiveInstance(id, "hostX", "exec1", Seq.empty) eventually(timeout(5.seconds)) { assert(coordRef2.verifyIfInstanceActive(id, "exec1")) diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/state/StateStoreRDDSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/state/StateStoreRDDSuite.scala index 015415a534ff5..378aa1dca139f 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/state/StateStoreRDDSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/state/StateStoreRDDSuite.scala @@ -159,8 +159,8 @@ class StateStoreRDDSuite extends SparkFunSuite with BeforeAndAfter with BeforeAn val coordinatorRef = sqlContext.streams.stateStoreCoordinator val storeProviderId1 = StateStoreProviderId(StateStoreId(path, opId, 0), queryRunId) val storeProviderId2 = StateStoreProviderId(StateStoreId(path, opId, 1), queryRunId) - coordinatorRef.reportActiveInstance(storeProviderId1, "host1", "exec1") - coordinatorRef.reportActiveInstance(storeProviderId2, "host2", "exec2") + coordinatorRef.reportActiveInstance(storeProviderId1, "host1", "exec1", Seq.empty) + coordinatorRef.reportActiveInstance(storeProviderId2, "host2", "exec2", Seq.empty) require( coordinatorRef.getLocation(storeProviderId1) === diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/state/StateStoreSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/state/StateStoreSuite.scala index 488879938339d..291c05fb9078d 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/state/StateStoreSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/state/StateStoreSuite.scala @@ -390,25 +390,27 @@ class StateStoreSuite extends StateStoreSuiteBase[HDFSBackedStateStoreProvider] val conf = new SparkConf() .setMaster("local") .setAppName("test") - // Make maintenance thread do snapshots and cleanups very fast - .set(StateStore.MAINTENANCE_INTERVAL_CONFIG, "10ms") // Make sure that when SparkContext stops, the StateStore maintenance thread 'quickly' // fails to talk to the StateStoreCoordinator and unloads all the StateStores .set(RPC_NUM_RETRIES, 1) val opId = 0 - val dir = newDir() - val storeProviderId = StateStoreProviderId(StateStoreId(dir, opId, 0), UUID.randomUUID) + val dir1 = newDir() + val storeProviderId1 = StateStoreProviderId(StateStoreId(dir1, opId, 0), UUID.randomUUID) + val dir2 = newDir() + val storeProviderId2 = StateStoreProviderId(StateStoreId(dir2, opId, 1), UUID.randomUUID) val sqlConf = new SQLConf() sqlConf.setConf(SQLConf.MIN_BATCHES_TO_RETAIN, 2) + // Make maintenance thread do snapshots and cleanups very fast + sqlConf.setConf(SQLConf.STREAMING_MAINTENANCE_INTERVAL, 10L) val storeConf = StateStoreConf(sqlConf) val hadoopConf = new Configuration() - val provider = newStoreProvider(storeProviderId.storeId) + val provider = newStoreProvider(storeProviderId1.storeId) var latestStoreVersion = 0 def generateStoreVersions(): Unit = { for (i <- 1 to 20) { - val store = StateStore.get(storeProviderId, keySchema, valueSchema, None, + val store = StateStore.get(storeProviderId1, keySchema, valueSchema, None, latestStoreVersion, storeConf, hadoopConf) put(store, "a", i) store.commit() @@ -428,7 +430,7 @@ class StateStoreSuite extends StateStoreSuiteBase[HDFSBackedStateStoreProvider] eventually(timeout(timeoutDuration)) { // Store should have been reported to the coordinator - assert(coordinatorRef.getLocation(storeProviderId).nonEmpty, + assert(coordinatorRef.getLocation(storeProviderId1).nonEmpty, "active instance was not reported") // Background maintenance should clean up and generate snapshots @@ -452,33 +454,44 @@ class StateStoreSuite extends StateStoreSuiteBase[HDFSBackedStateStoreProvider] // If driver decides to deactivate all stores related to a query run, // then this instance should be unloaded - coordinatorRef.deactivateInstances(storeProviderId.queryRunId) + coordinatorRef.deactivateInstances(storeProviderId1.queryRunId) eventually(timeout(timeoutDuration)) { - assert(!StateStore.isLoaded(storeProviderId)) + assert(!StateStore.isLoaded(storeProviderId1)) } // Reload the store and verify - StateStore.get(storeProviderId, keySchema, valueSchema, indexOrdinal = None, + StateStore.get(storeProviderId1, keySchema, valueSchema, indexOrdinal = None, latestStoreVersion, storeConf, hadoopConf) - assert(StateStore.isLoaded(storeProviderId)) + assert(StateStore.isLoaded(storeProviderId1)) // If some other executor loads the store, then this instance should be unloaded - coordinatorRef.reportActiveInstance(storeProviderId, "other-host", "other-exec") + coordinatorRef + .reportActiveInstance(storeProviderId1, "other-host", "other-exec", Seq.empty) eventually(timeout(timeoutDuration)) { - assert(!StateStore.isLoaded(storeProviderId)) + assert(!StateStore.isLoaded(storeProviderId1)) } // Reload the store and verify - StateStore.get(storeProviderId, keySchema, valueSchema, indexOrdinal = None, + StateStore.get(storeProviderId1, keySchema, valueSchema, indexOrdinal = None, latestStoreVersion, storeConf, hadoopConf) - assert(StateStore.isLoaded(storeProviderId)) + assert(StateStore.isLoaded(storeProviderId1)) + + // If some other executor loads the store, and when this executor loads other store, + // then this executor should unload inactive instances immediately. + coordinatorRef + .reportActiveInstance(storeProviderId1, "other-host", "other-exec", Seq.empty) + StateStore.get(storeProviderId2, keySchema, valueSchema, indexOrdinal = None, + 0, storeConf, hadoopConf) + assert(!StateStore.isLoaded(storeProviderId1)) + assert(StateStore.isLoaded(storeProviderId2)) } } // Verify if instance is unloaded if SparkContext is stopped eventually(timeout(timeoutDuration)) { require(SparkEnv.get === null) - assert(!StateStore.isLoaded(storeProviderId)) + assert(!StateStore.isLoaded(storeProviderId1)) + assert(!StateStore.isLoaded(storeProviderId2)) assert(!StateStore.isMaintenanceRunning) } } @@ -767,6 +780,7 @@ class StateStoreSuite extends StateStoreSuiteBase[HDFSBackedStateStoreProvider] sqlConf.setConf(SQLConf.STATE_STORE_MIN_DELTAS_FOR_SNAPSHOT, minDeltasForSnapshot) sqlConf.setConf(SQLConf.MAX_BATCHES_TO_RETAIN_IN_MEMORY, numOfVersToRetainInMemory) sqlConf.setConf(SQLConf.MIN_BATCHES_TO_RETAIN, 2) + sqlConf.setConf(SQLConf.STATE_STORE_COMPRESSION_CODEC, SQLConf.get.stateStoreCompressionCodec) val provider = new HDFSBackedStateStoreProvider() provider.init( StateStoreId(dir, opId, partition), @@ -815,10 +829,10 @@ class StateStoreSuite extends StateStoreSuiteBase[HDFSBackedStateStoreProvider] } abstract class StateStoreSuiteBase[ProviderClass <: StateStoreProvider] - extends SparkFunSuite { + extends StateStoreCodecsTest { import StateStoreTestsHelper._ - test("get, put, remove, commit, and all data iterator") { + testWithAllCodec("get, put, remove, commit, and all data iterator") { val provider = newStoreProvider() // Verify state before starting a new set of updates @@ -870,7 +884,7 @@ abstract class StateStoreSuiteBase[ProviderClass <: StateStoreProvider] assert(getData(provider, version = 1) === Set("b" -> 2)) } - test("removing while iterating") { + testWithAllCodec("removing while iterating") { val provider = newStoreProvider() // Verify state before starting a new set of updates @@ -892,7 +906,7 @@ abstract class StateStoreSuiteBase[ProviderClass <: StateStoreProvider] assert(get(store, "b") === None) } - test("abort") { + testWithAllCodec("abort") { val provider = newStoreProvider() val store = provider.getStore(0) put(store, "a", 1) @@ -905,7 +919,7 @@ abstract class StateStoreSuiteBase[ProviderClass <: StateStoreProvider] store1.abort() } - test("getStore with invalid versions") { + testWithAllCodec("getStore with invalid versions") { val provider = newStoreProvider() def checkInvalidVersion(version: Int): Unit = { @@ -939,7 +953,7 @@ abstract class StateStoreSuiteBase[ProviderClass <: StateStoreProvider] checkInvalidVersion(3) } - test("two concurrent StateStores - one for read-only and one for read-write") { + testWithAllCodec("two concurrent StateStores - one for read-only and one for read-write") { // During Streaming Aggregation, we have two StateStores per task, one used as read-only in // `StateStoreRestoreExec`, and one read-write used in `StateStoreSaveExec`. `StateStore.abort` // will be called for these StateStores if they haven't committed their results. We need to @@ -957,7 +971,7 @@ abstract class StateStoreSuiteBase[ProviderClass <: StateStoreProvider] // two state stores val provider1 = newStoreProvider(storeId) - val restoreStore = provider1.getStore(1) + val restoreStore = provider1.getReadStore(1) val saveStore = provider1.getStore(1) put(saveStore, key, get(restoreStore, key).get + 1) @@ -1033,7 +1047,7 @@ object StateStoreTestsHelper { store.put(stringToRow(key), intToRow(value)) } - def get(store: StateStore, key: String): Option[Int] = { + def get(store: ReadStateStore, key: String): Option[Int] = { Option(store.get(stringToRow(key))).map(rowToInt) } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/ui/SQLAppStatusListenerSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/ui/SQLAppStatusListenerSuite.scala index f49a3a384b450..00f23718a0e9e 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/ui/SQLAppStatusListenerSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/ui/SQLAppStatusListenerSuite.scala @@ -680,7 +680,7 @@ class SQLAppStatusListenerSuite extends SharedSparkSession with JsonTestUtils assert(sparkPlanInfo.nodeName === "WholeStageCodegen (2)") } - test("SPARK-32615: SQLMetrics validation after sparkPlanInfo updated in AQE") { + test("SPARK-32615,SPARK-33016: SQLMetrics validation after sparkPlanInfo updated in AQE") { val statusStore = createStatusStore() val listener = statusStore.listener.get @@ -755,7 +755,7 @@ class SQLAppStatusListenerSuite extends SharedSparkSession with JsonTestUtils .allNodes.flatMap(_.metrics.map(_.accumulatorId)) // Assume that AQE update sparkPlanInfo with newPlan - // ExecutionMetrics will be replaced using newPlan's SQLMetrics + // ExecutionMetrics will be appended using newPlan's SQLMetrics listener.onOtherEvent(SparkListenerSQLAdaptiveExecutionUpdate( executionId, "test", @@ -770,8 +770,8 @@ class SQLAppStatusListenerSuite extends SharedSparkSession with JsonTestUtils listener.onStageSubmitted(SparkListenerStageSubmitted(createStageInfo(1, 0))) listener.onTaskStart(SparkListenerTaskStart(1, 0, createTaskInfo(0, 0))) - // live metrics will be override, and ExecutionMetrics should be empty as the newPlan updated. - assert(statusStore.executionMetrics(executionId).isEmpty) + // historical metrics will be kept despite of the newPlan updated. + assert(statusStore.executionMetrics(executionId).size == 2) // update new metrics with Id 4 & 5, since 3 is timing metrics, // timing metrics has a complicated string presentation so we don't test it here. @@ -780,9 +780,9 @@ class SQLAppStatusListenerSuite extends SharedSparkSession with JsonTestUtils (0L, 1, 0, createAccumulatorInfos(newMetricsValueMap)) ))) - assert(statusStore.executionMetrics(executionId).size == 2) + assert(statusStore.executionMetrics(executionId).size == 4) statusStore.executionMetrics(executionId).foreach { m => - assert(m._2 == "500") + assert(m._2 == "100" || m._2 == "500") } listener.onTaskEnd(SparkListenerTaskEnd( @@ -802,10 +802,10 @@ class SQLAppStatusListenerSuite extends SharedSparkSession with JsonTestUtils JobSucceeded )) - // aggregateMetrics should ignore metrics from job 0 + // aggregateMetrics should contains all metrics from job 0 and job 1 val aggregateMetrics = listener.liveExecutionMetrics(executionId) if (aggregateMetrics.isDefined) { - oldAccumulatorIds.foreach(id => assert(!aggregateMetrics.get.contains(id))) + assert(aggregateMetrics.get.keySet.size == 4) } listener.onOtherEvent(SparkListenerSQLExecutionEnd( diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/ui/SparkPlanInfoSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/ui/SparkPlanInfoSuite.scala index a702e00ff9f92..dfc64a41d9f86 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/ui/SparkPlanInfoSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/ui/SparkPlanInfoSuite.scala @@ -24,10 +24,10 @@ class SparkPlanInfoSuite extends SharedSparkSession{ import testImplicits._ - def vaidateSparkPlanInfo(sparkPlanInfo: SparkPlanInfo): Unit = { + def validateSparkPlanInfo(sparkPlanInfo: SparkPlanInfo): Unit = { sparkPlanInfo.nodeName match { case "InMemoryTableScan" => assert(sparkPlanInfo.children.length == 1) - case _ => sparkPlanInfo.children.foreach(vaidateSparkPlanInfo) + case _ => sparkPlanInfo.children.foreach(validateSparkPlanInfo) } } @@ -39,6 +39,6 @@ class SparkPlanInfoSuite extends SharedSparkSession{ val planInfoResult = SparkPlanInfo.fromSparkPlan(dfWithCache.queryExecution.executedPlan) - vaidateSparkPlanInfo(planInfoResult) + validateSparkPlanInfo(planInfoResult) } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/expressions/ExpressionInfoSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/expressions/ExpressionInfoSuite.scala index 53f9757750735..438fd2351ab9f 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/expressions/ExpressionInfoSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/expressions/ExpressionInfoSuite.scala @@ -43,6 +43,10 @@ class ExpressionInfoSuite extends SparkFunSuite with SharedSparkSession { test("group info in ExpressionInfo") { val info = spark.sessionState.catalog.lookupFunctionInfo(FunctionIdentifier("sum")) assert(info.getGroup === "agg_funcs") + Seq("agg_funcs", "array_funcs", "binary_funcs", "bitwise_funcs", "collection_funcs", + "predicate_funcs", "conditional_funcs", "conversion_funcs", "csv_funcs", "datetime_funcs", + "generator_funcs", "hash_funcs", "json_funcs", "lambda_funcs", "map_funcs", "math_funcs", + "misc_funcs", "string_funcs", "struct_funcs", "window_funcs", "xml_funcs") Seq("agg_funcs", "array_funcs", "datetime_funcs", "json_funcs", "map_funcs", "window_funcs") .foreach { groupName => @@ -105,11 +109,38 @@ class ExpressionInfoSuite extends SparkFunSuite with SharedSparkSession { } } + test("SPARK-32870: Default expressions in FunctionRegistry should have their " + + "usage, examples, since, and group filled") { + val ignoreSet = Set( + // Explicitly inherits NonSQLExpression, and has no ExpressionDescription + "org.apache.spark.sql.catalyst.expressions.TimeWindow", + // Cast aliases do not need examples + "org.apache.spark.sql.catalyst.expressions.Cast") + + spark.sessionState.functionRegistry.listFunction().foreach { funcId => + val info = spark.sessionState.catalog.lookupFunctionInfo(funcId) + if (!ignoreSet.contains(info.getClassName)) { + withClue(s"Function '${info.getName}', Expression class '${info.getClassName}'") { + assert(info.getUsage.nonEmpty) + assert(info.getExamples.startsWith("\n Examples:\n")) + assert(info.getExamples.endsWith("\n ")) + assert(info.getSince.matches("[0-9]+\\.[0-9]+\\.[0-9]+")) + assert(info.getGroup.nonEmpty) + + if (info.getArguments.nonEmpty) { + assert(info.getArguments.startsWith("\n Arguments:\n")) + assert(info.getArguments.endsWith("\n ")) + } + } + } + } + } + test("check outputs of expression examples") { def unindentAndTrim(s: String): String = { s.replaceAll("\n\\s+", "\n").trim } - val beginSqlStmtRe = " > ".r + val beginSqlStmtRe = "\n > ".r val endSqlStmtRe = ";\n".r def checkExampleSyntax(example: String): Unit = { val beginStmtNum = beginSqlStmtRe.findAllIn(example).length @@ -123,14 +154,24 @@ class ExpressionInfoSuite extends SparkFunSuite with SharedSparkSession { "org.apache.spark.sql.catalyst.expressions.UnixTimestamp", "org.apache.spark.sql.catalyst.expressions.CurrentDate", "org.apache.spark.sql.catalyst.expressions.CurrentTimestamp", + "org.apache.spark.sql.catalyst.expressions.CurrentTimeZone", "org.apache.spark.sql.catalyst.expressions.Now", // Random output without a seed "org.apache.spark.sql.catalyst.expressions.Rand", "org.apache.spark.sql.catalyst.expressions.Randn", "org.apache.spark.sql.catalyst.expressions.Shuffle", "org.apache.spark.sql.catalyst.expressions.Uuid", + // Other nondeterministic expressions + "org.apache.spark.sql.catalyst.expressions.MonotonicallyIncreasingID", + "org.apache.spark.sql.catalyst.expressions.SparkPartitionID", + "org.apache.spark.sql.catalyst.expressions.InputFileName", + "org.apache.spark.sql.catalyst.expressions.InputFileBlockStart", + "org.apache.spark.sql.catalyst.expressions.InputFileBlockLength", // The example calls methods that return unstable results. - "org.apache.spark.sql.catalyst.expressions.CallMethodViaReflection") + "org.apache.spark.sql.catalyst.expressions.CallMethodViaReflection", + "org.apache.spark.sql.catalyst.expressions.SparkVersion", + // Throws an error + "org.apache.spark.sql.catalyst.expressions.RaiseError") val parFuncs = new ParVector(spark.sessionState.functionRegistry.listFunction().toVector) parFuncs.foreach { funcId => @@ -164,9 +205,16 @@ class ExpressionInfoSuite extends SparkFunSuite with SharedSparkSession { val exprTypesToCheck = Seq(classOf[UnaryExpression], classOf[BinaryExpression], classOf[TernaryExpression], classOf[QuaternaryExpression], classOf[SeptenaryExpression]) - // Do not check these expressions, because these expressions extend NullIntolerant - // and override the eval method to avoid evaluating input1 if input2 is 0. - val ignoreSet = Set(classOf[IntegralDivide], classOf[Divide], classOf[Remainder], classOf[Pmod]) + // Do not check these expressions, because these expressions override the eval method + val ignoreSet = Set( + // Extend NullIntolerant and avoid evaluating input1 if input2 is 0 + classOf[IntegralDivide], + classOf[Divide], + classOf[Remainder], + classOf[Pmod], + // Throws an exception, even if input is null + classOf[RaiseError] + ) val candidateExprsToCheck = spark.sessionState.functionRegistry.listFunction() .map(spark.sessionState.catalog.lookupFunctionInfo).map(_.getClassName) diff --git a/sql/core/src/test/scala/org/apache/spark/sql/internal/CatalogSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/internal/CatalogSuite.scala index 298820349b683..6eb070138c3b8 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/internal/CatalogSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/internal/CatalogSuite.scala @@ -23,7 +23,7 @@ import org.apache.spark.sql.AnalysisException import org.apache.spark.sql.catalog.{Column, Database, Function, Table} import org.apache.spark.sql.catalyst.{FunctionIdentifier, ScalaReflection, TableIdentifier} import org.apache.spark.sql.catalyst.catalog._ -import org.apache.spark.sql.catalyst.expressions.{Expression, ExpressionInfo} +import org.apache.spark.sql.catalyst.expressions.Expression import org.apache.spark.sql.catalyst.plans.logical.Range import org.apache.spark.sql.test.SharedSparkSession import org.apache.spark.sql.types.StructType diff --git a/sql/core/src/test/scala/org/apache/spark/sql/internal/ExecutorSideSQLConfSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/internal/ExecutorSideSQLConfSuite.scala index 567524ac75c2e..13b22dba1168b 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/internal/ExecutorSideSQLConfSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/internal/ExecutorSideSQLConfSuite.scala @@ -108,7 +108,7 @@ class ExecutorSideSQLConfSuite extends SparkFunSuite with SQLTestUtils { .queryExecution.executedPlan) assert(res.length == 2) assert(res.forall { case (_, code, _) => - (code.contains("* Codegend pipeline") == flag) && + (code.contains("* Codegened pipeline") == flag) && (code.contains("// input[") == flag) }) } @@ -175,7 +175,7 @@ class ExecutorSideSQLConfSuite extends SparkFunSuite with SQLTestUtils { df.hint("broadcast") } - // set local propert and assert + // set local property and assert val df2 = generateBroadcastDataFrame(confKey, confValue1) spark.sparkContext.setLocalProperty(confKey, confValue1) val checks = df1.join(df2).collect() diff --git a/sql/core/src/test/scala/org/apache/spark/sql/internal/SQLConfSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/internal/SQLConfSuite.scala index 77a5d12cd8c95..e699c972268a9 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/internal/SQLConfSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/internal/SQLConfSuite.scala @@ -19,8 +19,6 @@ package org.apache.spark.sql.internal import java.util.TimeZone -import scala.language.reflectiveCalls - import org.apache.hadoop.fs.Path import org.apache.log4j.Level @@ -192,7 +190,7 @@ class SQLConfSuite extends QueryTest with SharedSparkSession { assert(spark.conf.get("spark.app.id") === appId, "Should not change spark core ones") // spark core conf w/ entry registered val e1 = intercept[AnalysisException](sql("RESET spark.executor.cores")) - assert(e1.getMessage === "Cannot modify the value of a Spark config: spark.executor.cores;") + assert(e1.getMessage === "Cannot modify the value of a Spark config: spark.executor.cores") // user defined settings sql("SET spark.abc=xyz") @@ -219,7 +217,7 @@ class SQLConfSuite extends QueryTest with SharedSparkSession { // static sql configs val e2 = intercept[AnalysisException](sql(s"RESET ${StaticSQLConf.WAREHOUSE_PATH.key}")) assert(e2.getMessage === - s"Cannot modify the value of a static config: ${StaticSQLConf.WAREHOUSE_PATH.key};") + s"Cannot modify the value of a static config: ${StaticSQLConf.WAREHOUSE_PATH.key}") } @@ -284,23 +282,23 @@ class SQLConfSuite extends QueryTest with SharedSparkSession { } test("static SQL conf comes from SparkConf") { - val previousValue = sparkContext.conf.get(SCHEMA_STRING_LENGTH_THRESHOLD) + val previousValue = sparkContext.conf.get(GLOBAL_TEMP_DATABASE) try { - sparkContext.conf.set(SCHEMA_STRING_LENGTH_THRESHOLD, 2000) + sparkContext.conf.set(GLOBAL_TEMP_DATABASE, "a") val newSession = new SparkSession(sparkContext) - assert(newSession.conf.get(SCHEMA_STRING_LENGTH_THRESHOLD) == 2000) + assert(newSession.conf.get(GLOBAL_TEMP_DATABASE) == "a") checkAnswer( - newSession.sql(s"SET ${SCHEMA_STRING_LENGTH_THRESHOLD.key}"), - Row(SCHEMA_STRING_LENGTH_THRESHOLD.key, "2000")) + newSession.sql(s"SET ${GLOBAL_TEMP_DATABASE.key}"), + Row(GLOBAL_TEMP_DATABASE.key, "a")) } finally { - sparkContext.conf.set(SCHEMA_STRING_LENGTH_THRESHOLD, previousValue) + sparkContext.conf.set(GLOBAL_TEMP_DATABASE, previousValue) } } test("cannot set/unset static SQL conf") { - val e1 = intercept[AnalysisException](sql(s"SET ${SCHEMA_STRING_LENGTH_THRESHOLD.key}=10")) + val e1 = intercept[AnalysisException](sql(s"SET ${GLOBAL_TEMP_DATABASE.key}=10")) assert(e1.message.contains("Cannot modify the value of a static config")) - val e2 = intercept[AnalysisException](spark.conf.unset(SCHEMA_STRING_LENGTH_THRESHOLD.key)) + val e2 = intercept[AnalysisException](spark.conf.unset(GLOBAL_TEMP_DATABASE.key)) assert(e2.message.contains("Cannot modify the value of a static config")) } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/internal/SharedStateSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/internal/SharedStateSuite.scala index 81bf15342423c..60a899b89e731 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/internal/SharedStateSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/internal/SharedStateSuite.scala @@ -52,4 +52,15 @@ class SharedStateSuite extends SharedSparkSession { assert(conf.isInstanceOf[Configuration]) assert(conf.asInstanceOf[Configuration].get("fs.defaultFS") == "file:///") } + + test("SPARK-33740: hadoop configs in hive-site.xml can overrides pre-existing hadoop ones") { + val conf = new SparkConf() + val hadoopConf = new Configuration() + SharedState.loadHiveConfFile(conf, hadoopConf, Map.empty) + assert(hadoopConf.get("hadoop.tmp.dir") === "/tmp/hive_one") + hadoopConf.clear() + SharedState.loadHiveConfFile( + conf.set("spark.hadoop.hadoop.tmp.dir", "noop"), hadoopConf, Map.empty) + assert(hadoopConf.get("hadoop.tmp.dir") === null) + } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/internal/VariableSubstitutionSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/internal/VariableSubstitutionSuite.scala index d5a946aeaac31..d5da2553c7186 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/internal/VariableSubstitutionSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/internal/VariableSubstitutionSuite.scala @@ -18,12 +18,11 @@ package org.apache.spark.sql.internal import org.apache.spark.SparkFunSuite -import org.apache.spark.sql.AnalysisException +import org.apache.spark.sql.catalyst.plans.SQLHelper -class VariableSubstitutionSuite extends SparkFunSuite { +class VariableSubstitutionSuite extends SparkFunSuite with SQLHelper { - private lazy val conf = new SQLConf - private lazy val sub = new VariableSubstitution(conf) + private lazy val sub = new VariableSubstitution() test("system property") { System.setProperty("varSubSuite.var", "abcd") @@ -35,26 +34,26 @@ class VariableSubstitutionSuite extends SparkFunSuite { } test("Spark configuration variable") { - conf.setConfString("some-random-string-abcd", "1234abcd") - assert(sub.substitute("${hiveconf:some-random-string-abcd}") == "1234abcd") - assert(sub.substitute("${sparkconf:some-random-string-abcd}") == "1234abcd") - assert(sub.substitute("${spark:some-random-string-abcd}") == "1234abcd") - assert(sub.substitute("${some-random-string-abcd}") == "1234abcd") + withSQLConf("some-random-string-abcd" -> "1234abcd") { + assert(sub.substitute("${hiveconf:some-random-string-abcd}") == "1234abcd") + assert(sub.substitute("${sparkconf:some-random-string-abcd}") == "1234abcd") + assert(sub.substitute("${spark:some-random-string-abcd}") == "1234abcd") + assert(sub.substitute("${some-random-string-abcd}") == "1234abcd") + } } test("multiple substitutes") { val q = "select ${bar} ${foo} ${doo} this is great" - conf.setConfString("bar", "1") - conf.setConfString("foo", "2") - conf.setConfString("doo", "3") - assert(sub.substitute(q) == "select 1 2 3 this is great") + withSQLConf("bar"-> "1", "foo"-> "2", "doo" -> "3") { + assert(sub.substitute(q) == "select 1 2 3 this is great") + } } test("test nested substitutes") { val q = "select ${bar} ${foo} this is great" - conf.setConfString("bar", "1") - conf.setConfString("foo", "${bar}") - assert(sub.substitute(q) == "select 1 1 this is great") + withSQLConf("bar"-> "1", "foo"-> "${bar}") { + assert(sub.substitute(q) == "select 1 1 this is great") + } } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala index 7af55550a7736..639fd0e6fd0f4 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala @@ -20,6 +20,7 @@ package org.apache.spark.sql.jdbc import java.math.BigDecimal import java.sql.{Date, DriverManager, SQLException, Timestamp} import java.util.{Calendar, GregorianCalendar, Properties} +import java.util.concurrent.TimeUnit import scala.collection.JavaConverters._ @@ -610,7 +611,13 @@ class JDBCSuite extends QueryTest test("H2 time types") { val rows = sql("SELECT * FROM timetypes").collect() val cal = new GregorianCalendar(java.util.Locale.ROOT) - cal.setTime(rows(0).getAs[java.sql.Timestamp](0)) + val epochMillis = java.time.LocalTime.ofNanoOfDay( + TimeUnit.MILLISECONDS.toNanos(rows(0).getAs[Int](0))) + .atDate(java.time.LocalDate.ofEpochDay(0)) + .atZone(java.time.ZoneId.systemDefault()) + .toInstant() + .toEpochMilli() + cal.setTime(new Date(epochMillis)) assert(cal.get(Calendar.HOUR_OF_DAY) === 12) assert(cal.get(Calendar.MINUTE) === 34) assert(cal.get(Calendar.SECOND) === 56) @@ -625,9 +632,26 @@ class JDBCSuite extends QueryTest assert(cal.get(Calendar.HOUR) === 11) assert(cal.get(Calendar.MINUTE) === 22) assert(cal.get(Calendar.SECOND) === 33) + assert(cal.get(Calendar.MILLISECOND) === 543) assert(rows(0).getAs[java.sql.Timestamp](2).getNanos === 543543000) } + test("SPARK-33888: test TIME types") { + val rows = spark.read.jdbc( + urlWithUserAndPass, "TEST.TIMETYPES", new Properties()).collect() + val cachedRows = spark.read.jdbc(urlWithUserAndPass, "TEST.TIMETYPES", new Properties()) + .cache().collect() + val expectedTimeRaw = java.sql.Time.valueOf("12:34:56") + val expectedTimeMillis = Math.toIntExact( + java.util.concurrent.TimeUnit.NANOSECONDS.toMillis( + expectedTimeRaw.toLocalTime().toNanoOfDay() + ) + ) + assert(rows(0).getAs[Int](0) === expectedTimeMillis) + assert(rows(1).getAs[Int](0) === expectedTimeMillis) + assert(cachedRows(0).getAs[Int](0) === expectedTimeMillis) + } + test("test DATE types") { val rows = spark.read.jdbc( urlWithUserAndPass, "TEST.TIMETYPES", new Properties()).collect() @@ -770,9 +794,14 @@ class JDBCSuite extends QueryTest } test("Dialect unregister") { - JdbcDialects.registerDialect(testH2Dialect) - JdbcDialects.unregisterDialect(testH2Dialect) - assert(JdbcDialects.get(urlWithUserAndPass) == NoopDialect) + JdbcDialects.unregisterDialect(H2Dialect) + try { + JdbcDialects.registerDialect(testH2Dialect) + JdbcDialects.unregisterDialect(testH2Dialect) + assert(JdbcDialects.get(urlWithUserAndPass) == NoopDialect) + } finally { + JdbcDialects.registerDialect(H2Dialect) + } } test("Aggregated dialects") { @@ -1413,7 +1442,7 @@ class JDBCSuite extends QueryTest } test("SPARK-24327 verify and normalize a partition column based on a JDBC resolved schema") { - def testJdbcParitionColumn(partColName: String, expectedColumnName: String): Unit = { + def testJdbcPartitionColumn(partColName: String, expectedColumnName: String): Unit = { val df = spark.read.format("jdbc") .option("url", urlWithUserAndPass) .option("dbtable", "TEST.PARTITION") @@ -1434,16 +1463,16 @@ class JDBCSuite extends QueryTest } } - testJdbcParitionColumn("THEID", "THEID") - testJdbcParitionColumn("\"THEID\"", "THEID") + testJdbcPartitionColumn("THEID", "THEID") + testJdbcPartitionColumn("\"THEID\"", "THEID") withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") { - testJdbcParitionColumn("ThEiD", "THEID") + testJdbcPartitionColumn("ThEiD", "THEID") } - testJdbcParitionColumn("THE ID", "THE ID") + testJdbcPartitionColumn("THE ID", "THE ID") def testIncorrectJdbcPartitionColumn(partColName: String): Unit = { val errMsg = intercept[AnalysisException] { - testJdbcParitionColumn(partColName, "THEID") + testJdbcPartitionColumn(partColName, "THEID") }.getMessage assert(errMsg.contains(s"User-defined partition column $partColName not found " + "in the JDBC relation:")) diff --git a/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCV2Suite.scala b/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCV2Suite.scala index 3bcacd03b4a0d..e8157e552d754 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCV2Suite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCV2Suite.scala @@ -111,7 +111,7 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession { test("read/write with partition info") { withTable("h2.test.abc") { - sql("CREATE TABLE h2.test.abc USING _ AS SELECT * FROM h2.test.people") + sql("CREATE TABLE h2.test.abc AS SELECT * FROM h2.test.people") val df1 = Seq(("evan", 3), ("cathy", 4), ("alex", 5)).toDF("NAME", "ID") val e = intercept[IllegalArgumentException] { df1.write @@ -148,11 +148,9 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession { Seq(Row("test", "people"), Row("test", "empty_table"))) } - // TODO (SPARK-32603): Operation not allowed: CREATE TABLE ... STORED AS ... does not support - // multi-part identifiers test("SQL API: create table as select") { withTable("h2.test.abc") { - sql("CREATE TABLE h2.test.abc USING _ AS SELECT * FROM h2.test.people") + sql("CREATE TABLE h2.test.abc AS SELECT * FROM h2.test.people") checkAnswer(sql("SELECT name, id FROM h2.test.abc"), Seq(Row("fred", 1), Row("mary", 2))) } } @@ -164,15 +162,14 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession { } } - // TODO (SPARK-32603): ParseException: mismatched input 'AS' expecting {'(', 'USING'} test("SQL API: replace table as select") { withTable("h2.test.abc") { intercept[CannotReplaceMissingTableException] { - sql("REPLACE TABLE h2.test.abc USING _ AS SELECT 1 as col") + sql("REPLACE TABLE h2.test.abc AS SELECT 1 as col") } - sql("CREATE OR REPLACE TABLE h2.test.abc USING _ AS SELECT 1 as col") + sql("CREATE OR REPLACE TABLE h2.test.abc AS SELECT 1 as col") checkAnswer(sql("SELECT col FROM h2.test.abc"), Row(1)) - sql("REPLACE TABLE h2.test.abc USING _ AS SELECT * FROM h2.test.people") + sql("REPLACE TABLE h2.test.abc AS SELECT * FROM h2.test.people") checkAnswer(sql("SELECT name, id FROM h2.test.abc"), Seq(Row("fred", 1), Row("mary", 2))) } } @@ -189,11 +186,9 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession { } } - // TODO (SPARK-32603): Operation not allowed: CREATE TABLE ... STORED AS ... does not support - // multi-part identifiers test("SQL API: insert and overwrite") { withTable("h2.test.abc") { - sql("CREATE TABLE h2.test.abc USING _ AS SELECT * FROM h2.test.people") + sql("CREATE TABLE h2.test.abc AS SELECT * FROM h2.test.people") sql("INSERT INTO h2.test.abc SELECT 'lucy', 3") checkAnswer( @@ -205,11 +200,9 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession { } } - // TODO (SPARK-32603): Operation not allowed: CREATE TABLE ... STORED AS ... does not support - // multi-part identifiers test("DataFrameWriterV2: insert and overwrite") { withTable("h2.test.abc") { - sql("CREATE TABLE h2.test.abc USING _ AS SELECT * FROM h2.test.people") + sql("CREATE TABLE h2.test.abc AS SELECT * FROM h2.test.people") // `DataFrameWriterV2` is by-name. sql("SELECT 3 AS ID, 'lucy' AS NAME").writeTo("h2.test.abc").append() diff --git a/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCWriteSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCWriteSuite.scala index 3f621e04338a3..1a28523cc939f 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCWriteSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCWriteSuite.scala @@ -194,24 +194,29 @@ class JDBCWriteSuite extends SharedSparkSession with BeforeAndAfter { } test("Truncate") { - JdbcDialects.registerDialect(testH2Dialect) - val df = spark.createDataFrame(sparkContext.parallelize(arr2x2), schema2) - val df2 = spark.createDataFrame(sparkContext.parallelize(arr1x2), schema2) - val df3 = spark.createDataFrame(sparkContext.parallelize(arr2x3), schema3) - - df.write.jdbc(url1, "TEST.TRUNCATETEST", properties) - df2.write.mode(SaveMode.Overwrite).option("truncate", true) - .jdbc(url1, "TEST.TRUNCATETEST", properties) - assert(1 === spark.read.jdbc(url1, "TEST.TRUNCATETEST", properties).count()) - assert(2 === spark.read.jdbc(url1, "TEST.TRUNCATETEST", properties).collect()(0).length) + JdbcDialects.unregisterDialect(H2Dialect) + try { + JdbcDialects.registerDialect(testH2Dialect) + val df = spark.createDataFrame(sparkContext.parallelize(arr2x2), schema2) + val df2 = spark.createDataFrame(sparkContext.parallelize(arr1x2), schema2) + val df3 = spark.createDataFrame(sparkContext.parallelize(arr2x3), schema3) - val m = intercept[AnalysisException] { - df3.write.mode(SaveMode.Overwrite).option("truncate", true) + df.write.jdbc(url1, "TEST.TRUNCATETEST", properties) + df2.write.mode(SaveMode.Overwrite).option("truncate", true) .jdbc(url1, "TEST.TRUNCATETEST", properties) - }.getMessage - assert(m.contains("Column \"seq\" not found")) - assert(0 === spark.read.jdbc(url1, "TEST.TRUNCATETEST", properties).count()) - JdbcDialects.unregisterDialect(testH2Dialect) + assert(1 === spark.read.jdbc(url1, "TEST.TRUNCATETEST", properties).count()) + assert(2 === spark.read.jdbc(url1, "TEST.TRUNCATETEST", properties).collect()(0).length) + + val m = intercept[AnalysisException] { + df3.write.mode(SaveMode.Overwrite).option("truncate", true) + .jdbc(url1, "TEST.TRUNCATETEST", properties) + }.getMessage + assert(m.contains("Column \"seq\" not found")) + assert(0 === spark.read.jdbc(url1, "TEST.TRUNCATETEST", properties).count()) + } finally { + JdbcDialects.unregisterDialect(testH2Dialect) + JdbcDialects.registerDialect(H2Dialect) + } } test("createTableOptions") { @@ -385,14 +390,13 @@ class JDBCWriteSuite extends SharedSparkSession with BeforeAndAfter { .foldLeft(new StructType())((schema, colType) => schema.add(colType._1, colType._2)) val createTableColTypes = colTypes.map { case (col, dataType) => s"$col $dataType" }.mkString(", ") - val df = spark.createDataFrame(sparkContext.parallelize(Seq(Row.empty)), schema) val expectedSchemaStr = colTypes.map { case (col, dataType) => s""""$col" $dataType """ }.mkString(", ") assert(JdbcUtils.schemaString( - df.schema, - df.sqlContext.conf.caseSensitiveAnalysis, + schema, + spark.sqlContext.conf.caseSensitiveAnalysis, url1, Option(createTableColTypes)) == expectedSchemaStr) } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/sources/BucketedReadSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/sources/BucketedReadSuite.scala index f8276b143c1e6..9dcc0cfda93f1 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/sources/BucketedReadSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/sources/BucketedReadSuite.scala @@ -27,8 +27,8 @@ import org.apache.spark.sql.catalyst.catalog.BucketSpec import org.apache.spark.sql.catalyst.expressions import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.catalyst.plans.physical.HashPartitioning -import org.apache.spark.sql.execution.{DataSourceScanExec, FileSourceScanExec, SortExec, SparkPlan} -import org.apache.spark.sql.execution.adaptive.AdaptiveSparkPlanExec +import org.apache.spark.sql.execution.{FileSourceScanExec, SortExec, SparkPlan} +import org.apache.spark.sql.execution.adaptive.{AdaptiveSparkPlanExec, AdaptiveSparkPlanHelper, DisableAdaptiveExecution} import org.apache.spark.sql.execution.datasources.BucketingUtils import org.apache.spark.sql.execution.exchange.ShuffleExchangeExec import org.apache.spark.sql.execution.joins.SortMergeJoinExec @@ -39,7 +39,8 @@ import org.apache.spark.sql.test.{SharedSparkSession, SQLTestUtils} import org.apache.spark.util.Utils import org.apache.spark.util.collection.BitSet -class BucketedReadWithoutHiveSupportSuite extends BucketedReadSuite with SharedSparkSession { +class BucketedReadWithoutHiveSupportSuite + extends BucketedReadSuite with SharedSparkSession { protected override def beforeAll(): Unit = { super.beforeAll() assert(spark.sparkContext.conf.get(CATALOG_IMPLEMENTATION) == "in-memory") @@ -47,7 +48,7 @@ class BucketedReadWithoutHiveSupportSuite extends BucketedReadSuite with SharedS } -abstract class BucketedReadSuite extends QueryTest with SQLTestUtils { +abstract class BucketedReadSuite extends QueryTest with SQLTestUtils with AdaptiveSparkPlanHelper { import testImplicits._ protected override def beforeAll(): Unit = { @@ -81,27 +82,29 @@ abstract class BucketedReadSuite extends QueryTest with SQLTestUtils { .bucketBy(8, "j", "k") .saveAsTable("bucketed_table") - val bucketValue = Random.nextInt(maxI) - val table = spark.table("bucketed_table").filter($"i" === bucketValue) - val query = table.queryExecution - val output = query.analyzed.output - val rdd = query.toRdd - - assert(rdd.partitions.length == 8) - - val attrs = table.select("j", "k").queryExecution.analyzed.output - val checkBucketId = rdd.mapPartitionsWithIndex((index, rows) => { - val getBucketId = UnsafeProjection.create( - HashPartitioning(attrs, 8).partitionIdExpression :: Nil, - output) - rows.map(row => getBucketId(row).getInt(0) -> index) - }) - checkBucketId.collect().foreach(r => assert(r._1 == r._2)) + withSQLConf(SQLConf.AUTO_BUCKETED_SCAN_ENABLED.key -> "false") { + val bucketValue = Random.nextInt(maxI) + val table = spark.table("bucketed_table").filter($"i" === bucketValue) + val query = table.queryExecution + val output = query.analyzed.output + val rdd = query.toRdd + + assert(rdd.partitions.length == 8) + + val attrs = table.select("j", "k").queryExecution.analyzed.output + val checkBucketId = rdd.mapPartitionsWithIndex((index, rows) => { + val getBucketId = UnsafeProjection.create( + HashPartitioning(attrs, 8).partitionIdExpression :: Nil, + output) + rows.map(row => getBucketId(row).getInt(0) -> index) + }) + checkBucketId.collect().foreach(r => assert(r._1 == r._2)) + } } } private def getFileScan(plan: SparkPlan): FileSourceScanExec = { - val fileScan = plan.collect { case f: FileSourceScanExec => f } + val fileScan = collect(plan) { case f: FileSourceScanExec => f } assert(fileScan.nonEmpty, plan) fileScan.head } @@ -111,7 +114,7 @@ abstract class BucketedReadSuite extends QueryTest with SQLTestUtils { // 2) Verify the final result is the same as the expected one private def checkPrunedAnswers( bucketSpec: BucketSpec, - bucketValues: Seq[Integer], + bucketValues: Seq[Any], filterCondition: Column, originalDataFrame: DataFrame): Unit = { // This test verifies parts of the plan. Disable whole stage codegen. @@ -188,7 +191,7 @@ abstract class BucketedReadSuite extends QueryTest with SQLTestUtils { // Case 4: InSet val inSetExpr = expressions.InSet($"j".expr, - Set(bucketValue, bucketValue + 1, bucketValue + 2, bucketValue + 3).map(lit(_).expr)) + Set(bucketValue, bucketValue + 1, bucketValue + 2, bucketValue + 3)) checkPrunedAnswers( bucketSpec, bucketValues = Seq(bucketValue, bucketValue + 1, bucketValue + 2, bucketValue + 3), @@ -243,6 +246,25 @@ abstract class BucketedReadSuite extends QueryTest with SQLTestUtils { } } + test("bucket pruning support IsNaN") { + withTable("bucketed_table") { + val numBuckets = NumBucketsForPruningNullDf + val bucketSpec = BucketSpec(numBuckets, Seq("j"), Nil) + val naNDF = nullDF.selectExpr("i", "cast(if(isnull(j), 'NaN', j) as double) as j", "k") + // json does not support predicate push-down, and thus json is used here + naNDF.write + .format("json") + .bucketBy(numBuckets, "j") + .saveAsTable("bucketed_table") + + checkPrunedAnswers( + bucketSpec, + bucketValues = Double.NaN :: Nil, + filterCondition = $"j".isNaN, + naNDF) + } + } + test("read partitioning bucketed tables having composite filters") { withTable("bucketed_table") { val numBuckets = NumBucketsForPruningDF @@ -617,13 +639,14 @@ abstract class BucketedReadSuite extends QueryTest with SQLTestUtils { withTable("bucketed_table") { df1.write.format("parquet").bucketBy(8, "i", "j").saveAsTable("bucketed_table") val tbl = spark.table("bucketed_table") - val agged = tbl.groupBy("i", "j").agg(max("k")) + val aggregated = tbl.groupBy("i", "j").agg(max("k")) checkAnswer( - agged.sort("i", "j"), + aggregated.sort("i", "j"), df1.groupBy("i", "j").agg(max("k")).sort("i", "j")) - assert(agged.queryExecution.executedPlan.find(_.isInstanceOf[ShuffleExchangeExec]).isEmpty) + assert( + aggregated.queryExecution.executedPlan.find(_.isInstanceOf[ShuffleExchangeExec]).isEmpty) } } @@ -657,13 +680,14 @@ abstract class BucketedReadSuite extends QueryTest with SQLTestUtils { withTable("bucketed_table") { df1.write.format("parquet").bucketBy(8, "i").saveAsTable("bucketed_table") val tbl = spark.table("bucketed_table") - val agged = tbl.groupBy("i", "j").agg(max("k")) + val aggregated = tbl.groupBy("i", "j").agg(max("k")) checkAnswer( - agged.sort("i", "j"), + aggregated.sort("i", "j"), df1.groupBy("i", "j").agg(max("k")).sort("i", "j")) - assert(agged.queryExecution.executedPlan.find(_.isInstanceOf[ShuffleExchangeExec]).isEmpty) + assert( + aggregated.queryExecution.executedPlan.find(_.isInstanceOf[ShuffleExchangeExec]).isEmpty) } } @@ -784,9 +808,9 @@ abstract class BucketedReadSuite extends QueryTest with SQLTestUtils { Utils.deleteRecursively(tableDir) df1.write.parquet(tableDir.getAbsolutePath) - val agged = spark.table("bucketed_table").groupBy("i").count() + val aggregated = spark.table("bucketed_table").groupBy("i").count() val error = intercept[Exception] { - agged.count() + aggregated.count() } assert(error.getCause().toString contains "Invalid bucket file") @@ -906,7 +930,9 @@ abstract class BucketedReadSuite extends QueryTest with SQLTestUtils { } test("bucket coalescing eliminates shuffle") { - withSQLConf(SQLConf.COALESCE_BUCKETS_IN_JOIN_ENABLED.key -> "true") { + withSQLConf( + SQLConf.COALESCE_BUCKETS_IN_JOIN_ENABLED.key -> "true", + SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "false") { // The side with bucketedTableTestSpec1 will be coalesced to have 4 output partitions. // Currently, sort will be introduced for the side that is coalesced. val testSpec1 = BucketedTableTestSpec( @@ -973,7 +999,8 @@ abstract class BucketedReadSuite extends QueryTest with SQLTestUtils { } } - test("bucket coalescing is applied when join expressions match with partitioning expressions") { + test("bucket coalescing is applied when join expressions match with partitioning expressions", + DisableAdaptiveExecution("Expected shuffle num mismatched")) { withTable("t1", "t2") { df1.write.format("parquet").bucketBy(8, "i", "j").saveAsTable("t1") df2.write.format("parquet").bucketBy(4, "i", "j").saveAsTable("t2") diff --git a/sql/core/src/test/scala/org/apache/spark/sql/sources/BucketedWriteSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/sources/BucketedWriteSuite.scala index a410f32d4af7e..0a5feda1bd533 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/sources/BucketedWriteSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/sources/BucketedWriteSuite.scala @@ -88,7 +88,7 @@ abstract class BucketedWriteSuite extends QueryTest with SQLTestUtils { val e = intercept[AnalysisException] { df.write.sortBy("j").saveAsTable("tt") } - assert(e.getMessage == "sortBy must be used together with bucketBy;") + assert(e.getMessage == "sortBy must be used together with bucketBy") } test("sorting by non-orderable column") { @@ -102,7 +102,7 @@ abstract class BucketedWriteSuite extends QueryTest with SQLTestUtils { val e = intercept[AnalysisException] { df.write.bucketBy(2, "i").parquet("/tmp/path") } - assert(e.getMessage == "'save' does not support bucketBy right now;") + assert(e.getMessage == "'save' does not support bucketBy right now") } test("write bucketed and sorted data using save()") { @@ -111,7 +111,7 @@ abstract class BucketedWriteSuite extends QueryTest with SQLTestUtils { val e = intercept[AnalysisException] { df.write.bucketBy(2, "i").sortBy("i").parquet("/tmp/path") } - assert(e.getMessage == "'save' does not support bucketBy and sortBy right now;") + assert(e.getMessage == "'save' does not support bucketBy and sortBy right now") } test("write bucketed data using insertInto()") { @@ -120,7 +120,7 @@ abstract class BucketedWriteSuite extends QueryTest with SQLTestUtils { val e = intercept[AnalysisException] { df.write.bucketBy(2, "i").insertInto("tt") } - assert(e.getMessage == "'insertInto' does not support bucketBy right now;") + assert(e.getMessage == "'insertInto' does not support bucketBy right now") } test("write bucketed and sorted data using insertInto()") { @@ -129,7 +129,7 @@ abstract class BucketedWriteSuite extends QueryTest with SQLTestUtils { val e = intercept[AnalysisException] { df.write.bucketBy(2, "i").sortBy("i").insertInto("tt") } - assert(e.getMessage == "'insertInto' does not support bucketBy and sortBy right now;") + assert(e.getMessage == "'insertInto' does not support bucketBy and sortBy right now") } private lazy val df = { diff --git a/sql/core/src/test/scala/org/apache/spark/sql/sources/CreateTableAsSelectSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/sources/CreateTableAsSelectSuite.scala index 983209051c8ae..9a7c7e0edc409 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/sources/CreateTableAsSelectSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/sources/CreateTableAsSelectSuite.scala @@ -22,7 +22,7 @@ import java.io.File import org.apache.spark.SparkException import org.apache.spark.sql.AnalysisException import org.apache.spark.sql.catalyst.TableIdentifier -import org.apache.spark.sql.catalyst.catalog.BucketSpec +import org.apache.spark.sql.catalyst.catalog.{BucketSpec, CatalogTableType} import org.apache.spark.sql.catalyst.parser.ParseException import org.apache.spark.sql.internal.SQLConf.BUCKETING_MAX_BUCKETS import org.apache.spark.sql.test.SharedSparkSession @@ -166,24 +166,21 @@ class CreateTableAsSelectSuite extends DataSourceTest with SharedSparkSession { ) }.getMessage assert(error.contains("Operation not allowed") && - error.contains("CREATE TEMPORARY TABLE ... USING ... AS query")) + error.contains("CREATE TEMPORARY TABLE")) } } - test("disallows CREATE EXTERNAL TABLE ... USING ... AS query") { + test("SPARK-33651: allow CREATE EXTERNAL TABLE ... USING ... if location is specified") { withTable("t") { - val error = intercept[ParseException] { - sql( - s""" - |CREATE EXTERNAL TABLE t USING PARQUET - |OPTIONS (PATH '${path.toURI}') - |AS SELECT 1 AS a, 2 AS b - """.stripMargin - ) - }.getMessage - - assert(error.contains("Operation not allowed") && - error.contains("CREATE EXTERNAL TABLE ...")) + sql( + s""" + |CREATE EXTERNAL TABLE t USING PARQUET + |OPTIONS (PATH '${path.toURI}') + |AS SELECT 1 AS a, 2 AS b + """.stripMargin) + val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t")) + assert(table.tableType == CatalogTableType.EXTERNAL) + assert(table.location.toString == path.toURI.toString.stripSuffix("/")) } } @@ -237,7 +234,7 @@ class CreateTableAsSelectSuite extends DataSourceTest with SharedSparkSession { } } - test("create table using as select - with overriden max number of buckets") { + test("create table using as select - with overridden max number of buckets") { def createTableSql(numBuckets: Int): String = s""" |CREATE TABLE t USING PARQUET diff --git a/sql/core/src/test/scala/org/apache/spark/sql/sources/DataSourceAnalysisSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/sources/DataSourceAnalysisSuite.scala index a6c50904d395b..81ce979ef0b62 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/sources/DataSourceAnalysisSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/sources/DataSourceAnalysisSuite.scala @@ -23,12 +23,13 @@ import org.apache.spark.SparkFunSuite import org.apache.spark.sql.AnalysisException import org.apache.spark.sql.catalyst.dsl.expressions._ import org.apache.spark.sql.catalyst.expressions.{Alias, AnsiCast, Attribute, Cast, Expression, Literal} +import org.apache.spark.sql.catalyst.plans.SQLHelper import org.apache.spark.sql.execution.datasources.DataSourceAnalysis import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.internal.SQLConf.StoreAssignmentPolicy import org.apache.spark.sql.types.{DataType, IntegerType, StructType} -class DataSourceAnalysisSuite extends SparkFunSuite with BeforeAndAfterAll { +class DataSourceAnalysisSuite extends SparkFunSuite with BeforeAndAfterAll with SQLHelper { private var targetAttributes: Seq[Attribute] = _ private var targetPartitionSchema: StructType = _ @@ -51,19 +52,26 @@ class DataSourceAnalysisSuite extends SparkFunSuite with BeforeAndAfterAll { } Seq(true, false).foreach { caseSensitive => - val conf = new SQLConf().copy(SQLConf.CASE_SENSITIVE -> caseSensitive) + def testRule(testName: String, caseSensitive: Boolean)(func: => Unit): Unit = { + test(s"$testName (caseSensitive: $caseSensitive)") { + withSQLConf(SQLConf.CASE_SENSITIVE.key -> caseSensitive.toString) { + func + } + } + } + def cast(e: Expression, dt: DataType): Expression = { - conf.storeAssignmentPolicy match { + SQLConf.get.storeAssignmentPolicy match { case StoreAssignmentPolicy.ANSI | StoreAssignmentPolicy.STRICT => - AnsiCast(e, dt, Option(conf.sessionLocalTimeZone)) + AnsiCast(e, dt, Option(SQLConf.get.sessionLocalTimeZone)) case _ => - Cast(e, dt, Option(conf.sessionLocalTimeZone)) + Cast(e, dt, Option(SQLConf.get.sessionLocalTimeZone)) } } - val rule = DataSourceAnalysis(conf) - test( - s"convertStaticPartitions only handle INSERT having at least static partitions " + - s"(caseSensitive: $caseSensitive)") { + val rule = DataSourceAnalysis + testRule( + "convertStaticPartitions only handle INSERT having at least static partitions", + caseSensitive) { intercept[AssertionError] { rule.convertStaticPartitions( sourceAttributes = Seq('e.int, 'f.int), @@ -73,7 +81,7 @@ class DataSourceAnalysisSuite extends SparkFunSuite with BeforeAndAfterAll { } } - test(s"Missing columns (caseSensitive: $caseSensitive)") { + testRule("Missing columns", caseSensitive) { // Missing columns. intercept[AnalysisException] { rule.convertStaticPartitions( @@ -84,7 +92,7 @@ class DataSourceAnalysisSuite extends SparkFunSuite with BeforeAndAfterAll { } } - test(s"Missing partitioning columns (caseSensitive: $caseSensitive)") { + testRule("Missing partitioning columns", caseSensitive) { // Missing partitioning columns. intercept[AnalysisException] { rule.convertStaticPartitions( @@ -113,7 +121,7 @@ class DataSourceAnalysisSuite extends SparkFunSuite with BeforeAndAfterAll { } } - test(s"Wrong partitioning columns (caseSensitive: $caseSensitive)") { + testRule("Wrong partitioning columns", caseSensitive) { // Wrong partitioning columns. intercept[AnalysisException] { rule.convertStaticPartitions( @@ -144,9 +152,7 @@ class DataSourceAnalysisSuite extends SparkFunSuite with BeforeAndAfterAll { } } - test( - s"Static partitions need to appear before dynamic partitions" + - s" (caseSensitive: $caseSensitive)") { + testRule("Static partitions need to appear before dynamic partitions", caseSensitive) { // Static partitions need to appear before dynamic partitions. intercept[AnalysisException] { rule.convertStaticPartitions( @@ -157,7 +163,7 @@ class DataSourceAnalysisSuite extends SparkFunSuite with BeforeAndAfterAll { } } - test(s"All static partitions (caseSensitive: $caseSensitive)") { + testRule("All static partitions", caseSensitive) { if (!caseSensitive) { val nonPartitionedAttributes = Seq('e.int, 'f.int) val expected = nonPartitionedAttributes ++ @@ -195,7 +201,7 @@ class DataSourceAnalysisSuite extends SparkFunSuite with BeforeAndAfterAll { } } - test(s"Static partition and dynamic partition (caseSensitive: $caseSensitive)") { + testRule("Static partition and dynamic partition", caseSensitive) { val nonPartitionedAttributes = Seq('e.int, 'f.int) val dynamicPartitionAttributes = Seq('g.int) val expected = diff --git a/sql/core/src/test/scala/org/apache/spark/sql/sources/DisableUnnecessaryBucketedScanSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/sources/DisableUnnecessaryBucketedScanSuite.scala new file mode 100644 index 0000000000000..179cdeb976391 --- /dev/null +++ b/sql/core/src/test/scala/org/apache/spark/sql/sources/DisableUnnecessaryBucketedScanSuite.scala @@ -0,0 +1,261 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.sources + +import org.apache.spark.sql.QueryTest +import org.apache.spark.sql.catalyst.expressions.AttributeReference +import org.apache.spark.sql.catalyst.plans.physical.HashPartitioning +import org.apache.spark.sql.execution.FileSourceScanExec +import org.apache.spark.sql.execution.adaptive.{AdaptiveSparkPlanHelper, DisableAdaptiveExecutionSuite, EnableAdaptiveExecutionSuite} +import org.apache.spark.sql.execution.exchange.ShuffleExchangeExec +import org.apache.spark.sql.internal.SQLConf +import org.apache.spark.sql.internal.StaticSQLConf.CATALOG_IMPLEMENTATION +import org.apache.spark.sql.test.{SharedSparkSession, SQLTestUtils} + +class DisableUnnecessaryBucketedScanWithoutHiveSupportSuite + extends DisableUnnecessaryBucketedScanSuite + with SharedSparkSession + with DisableAdaptiveExecutionSuite { + + protected override def beforeAll(): Unit = { + super.beforeAll() + assert(spark.sparkContext.conf.get(CATALOG_IMPLEMENTATION) == "in-memory") + } +} + +class DisableUnnecessaryBucketedScanWithoutHiveSupportSuiteAE + extends DisableUnnecessaryBucketedScanSuite + with SharedSparkSession + with EnableAdaptiveExecutionSuite { + + protected override def beforeAll(): Unit = { + super.beforeAll() + assert(spark.sparkContext.conf.get(CATALOG_IMPLEMENTATION) == "in-memory") + } +} + +abstract class DisableUnnecessaryBucketedScanSuite + extends QueryTest + with SQLTestUtils + with AdaptiveSparkPlanHelper { + + import testImplicits._ + + private lazy val df1 = + (0 until 50).map(i => (i % 5, i % 13, i.toString)).toDF("i", "j", "k").as("df1") + private lazy val df2 = + (0 until 50).map(i => (i % 7, i % 11, i.toString)).toDF("i", "j", "k").as("df2") + + private def checkDisableBucketedScan( + query: String, + expectedNumScanWithAutoScanEnabled: Int, + expectedNumScanWithAutoScanDisabled: Int): Unit = { + + def checkNumBucketedScan(query: String, expectedNumBucketedScan: Int): Unit = { + val plan = sql(query).queryExecution.executedPlan + val bucketedScan = collect(plan) { case s: FileSourceScanExec if s.bucketedScan => s } + assert(bucketedScan.length == expectedNumBucketedScan) + } + + withSQLConf(SQLConf.AUTO_BUCKETED_SCAN_ENABLED.key -> "true") { + checkNumBucketedScan(query, expectedNumScanWithAutoScanEnabled) + val result = sql(query).collect() + + withSQLConf(SQLConf.AUTO_BUCKETED_SCAN_ENABLED.key -> "false") { + checkNumBucketedScan(query, expectedNumScanWithAutoScanDisabled) + checkAnswer(sql(query), result) + } + } + } + + test("SPARK-32859: disable unnecessary bucketed table scan - basic test") { + withTable("t1", "t2", "t3") { + df1.write.format("parquet").bucketBy(8, "i").saveAsTable("t1") + df2.write.format("parquet").bucketBy(8, "i").saveAsTable("t2") + df2.write.format("parquet").bucketBy(4, "i").saveAsTable("t3") + + Seq( + // Read bucketed table + ("SELECT * FROM t1", 0, 1), + ("SELECT i FROM t1", 0, 1), + ("SELECT j FROM t1", 0, 0), + // Filter on bucketed column + ("SELECT * FROM t1 WHERE i = 1", 1, 1), + // Filter on non-bucketed column + ("SELECT * FROM t1 WHERE j = 1", 0, 1), + // Join with same buckets + ("SELECT /*+ broadcast(t1)*/ * FROM t1 JOIN t2 ON t1.i = t2.i", 0, 2), + ("SELECT /*+ shuffle_hash(t1)*/ * FROM t1 JOIN t2 ON t1.i = t2.i", 2, 2), + ("SELECT /*+ merge(t1)*/ * FROM t1 JOIN t2 ON t1.i = t2.i", 2, 2), + // Join with different buckets + ("SELECT /*+ broadcast(t1)*/ * FROM t1 JOIN t3 ON t1.i = t3.i", 0, 2), + ("SELECT /*+ shuffle_hash(t1)*/ * FROM t1 JOIN t3 ON t1.i = t3.i", 1, 2), + ("SELECT /*+ merge(t1)*/ * FROM t1 JOIN t3 ON t1.i = t3.i", 1, 2), + // Join on non-bucketed column + ("SELECT /*+ broadcast(t1)*/ * FROM t1 JOIN t2 ON t1.i = t2.j", 0, 2), + ("SELECT /*+ shuffle_hash(t1)*/ * FROM t1 JOIN t2 ON t1.i = t2.j", 1, 2), + ("SELECT /*+ merge(t1)*/ * FROM t1 JOIN t2 ON t1.i = t2.j", 1, 2), + ("SELECT /*+ broadcast(t1)*/ * FROM t1 JOIN t2 ON t1.j = t2.j", 0, 2), + ("SELECT /*+ shuffle_hash(t1)*/ * FROM t1 JOIN t2 ON t1.j = t2.j", 0, 2), + ("SELECT /*+ merge(t1)*/ * FROM t1 JOIN t2 ON t1.j = t2.j", 0, 2), + // Aggregate on bucketed column + ("SELECT SUM(i) FROM t1 GROUP BY i", 1, 1), + // Aggregate on non-bucketed column + ("SELECT SUM(i) FROM t1 GROUP BY j", 0, 1), + ("SELECT j, SUM(i), COUNT(j) FROM t1 GROUP BY j", 0, 1) + ).foreach { case (query, numScanWithAutoScanEnabled, numScanWithAutoScanDisabled) => + checkDisableBucketedScan(query, numScanWithAutoScanEnabled, numScanWithAutoScanDisabled) + } + } + } + + test("SPARK-32859: disable unnecessary bucketed table scan - multiple joins test") { + withTable("t1", "t2", "t3") { + df1.write.format("parquet").bucketBy(8, "i").saveAsTable("t1") + df2.write.format("parquet").bucketBy(8, "i").saveAsTable("t2") + df2.write.format("parquet").bucketBy(4, "i").saveAsTable("t3") + + Seq( + // Multiple joins on bucketed columns + (""" + SELECT /*+ broadcast(t1, t3)*/ * FROM t1 JOIN t2 JOIN t3 + ON t1.i = t2.i AND t2.i = t3.i + """.stripMargin, 0, 3), + (""" + SELECT /*+ broadcast(t1) merge(t3)*/ * FROM t1 JOIN t2 JOIN t3 + ON t1.i = t2.i AND t2.i = t3.i + """.stripMargin, 2, 3), + (""" + SELECT /*+ merge(t1) broadcast(t3)*/ * FROM t1 JOIN t2 JOIN t3 + ON t1.i = t2.i AND t2.i = t3.i + """.stripMargin, 2, 3), + (""" + SELECT /*+ merge(t1, t3)*/ * FROM t1 JOIN t2 JOIN t3 + ON t1.i = t2.i AND t2.i = t3.i + """.stripMargin, 2, 3), + // Multiple joins on non-bucketed columns + (""" + SELECT /*+ broadcast(t1, t3)*/ * FROM t1 JOIN t2 JOIN t3 + ON t1.i = t2.j AND t2.j = t3.i + """.stripMargin, 0, 3), + (""" + SELECT /*+ merge(t1, t3)*/ * FROM t1 JOIN t2 JOIN t3 + ON t1.i = t2.j AND t2.j = t3.i + """.stripMargin, 1, 3), + (""" + SELECT /*+ merge(t1, t3)*/ * FROM t1 JOIN t2 JOIN t3 + ON t1.j = t2.j AND t2.j = t3.j + """.stripMargin, 0, 3) + ).foreach { case (query, numScanWithAutoScanEnabled, numScanWithAutoScanDisabled) => + checkDisableBucketedScan(query, numScanWithAutoScanEnabled, numScanWithAutoScanDisabled) + } + } + } + + test("SPARK-32859: disable unnecessary bucketed table scan - multiple bucketed columns test") { + withTable("t1", "t2", "t3") { + df1.write.format("parquet").bucketBy(8, "i", "j").saveAsTable("t1") + df2.write.format("parquet").bucketBy(8, "i", "j").saveAsTable("t2") + df2.write.format("parquet").bucketBy(4, "i", "j").saveAsTable("t3") + + Seq( + // Filter on bucketed columns + ("SELECT * FROM t1 WHERE i = 1", 0, 1), + ("SELECT * FROM t1 WHERE i = 1 AND j = 1", 0, 1), + // Join on bucketed columns + (""" + SELECT /*+ broadcast(t1)*/ * FROM t1 JOIN t2 ON t1.i = t2.i AND t1.j = t2.j + """.stripMargin, 0, 2), + (""" + SELECT /*+ merge(t1)*/ * FROM t1 JOIN t2 ON t1.i = t2.i AND t1.j = t2.j + """.stripMargin, 2, 2), + (""" + SELECT /*+ merge(t1)*/ * FROM t1 JOIN t3 ON t1.i = t3.i AND t1.j = t3.j + """.stripMargin, 1, 2), + ("SELECT /*+ merge(t1)*/ * FROM t1 JOIN t2 ON t1.i = t2.i", 0, 2), + // Aggregate on bucketed columns + ("SELECT i, j, COUNT(*) FROM t1 GROUP BY i, j", 1, 1), + ("SELECT i, COUNT(i) FROM t1 GROUP BY i", 0, 0), + ("SELECT i, COUNT(j) FROM t1 GROUP BY i", 0, 1) + ).foreach { case (query, numScanWithAutoScanEnabled, numScanWithAutoScanDisabled) => + checkDisableBucketedScan(query, numScanWithAutoScanEnabled, numScanWithAutoScanDisabled) + } + } + } + + test("SPARK-32859: disable unnecessary bucketed table scan - other operators test") { + withTable("t1", "t2", "t3") { + df1.write.format("parquet").bucketBy(8, "i").saveAsTable("t1") + df2.write.format("parquet").bucketBy(8, "i").saveAsTable("t2") + df1.write.format("parquet").saveAsTable("t3") + + Seq( + // Operator with interesting partition not in sub-plan + (""" + SELECT t1.i FROM t1 + UNION ALL + (SELECT t2.i FROM t2 GROUP BY t2.i) + """.stripMargin, 1, 2), + // Non-allowed operator in sub-plan + (""" + SELECT COUNT(*) + FROM (SELECT t1.i FROM t1 UNION ALL SELECT t2.i FROM t2) + GROUP BY i + """.stripMargin, 2, 2), + // Multiple [[Exchange]] in sub-plan + (""" + SELECT j, SUM(i), COUNT(*) FROM t1 GROUP BY j + DISTRIBUTE BY j + """.stripMargin, 0, 1), + (""" + SELECT j, COUNT(*) + FROM (SELECT i, j FROM t1 DISTRIBUTE BY i, j) + GROUP BY j + """.stripMargin, 0, 1), + // No bucketed table scan in plan + (""" + SELECT j, COUNT(*) + FROM (SELECT t1.j FROM t1 JOIN t3 ON t1.j = t3.j) + GROUP BY j + """.stripMargin, 0, 0) + ).foreach { case (query, numScanWithAutoScanEnabled, numScanWithAutoScanDisabled) => + checkDisableBucketedScan(query, numScanWithAutoScanEnabled, numScanWithAutoScanDisabled) + } + } + } + + test("SPARK-33075: not disable bucketed table scan for cached query") { + withTable("t1") { + withSQLConf(SQLConf.AUTO_BUCKETED_SCAN_ENABLED.key -> "true") { + df1.write.format("parquet").bucketBy(8, "i").saveAsTable("t1") + spark.catalog.cacheTable("t1") + assertCached(spark.table("t1")) + + // Verify cached bucketed table scan not disabled + val partitioning = stripAQEPlan(spark.table("t1").queryExecution.executedPlan) + .outputPartitioning + assert(partitioning match { + case HashPartitioning(Seq(column: AttributeReference), 8) if column.name == "i" => true + case _ => false + }) + val aggregateQueryPlan = sql("SELECT SUM(i) FROM t1 GROUP BY i").queryExecution.executedPlan + assert(find(aggregateQueryPlan)(_.isInstanceOf[ShuffleExchangeExec]).isEmpty) + } + } + } +} diff --git a/sql/core/src/test/scala/org/apache/spark/sql/sources/InsertSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/sources/InsertSuite.scala index abd33ab8a8f22..bfd04ffaaf754 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/sources/InsertSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/sources/InsertSuite.scala @@ -756,6 +756,47 @@ class InsertSuite extends DataSourceTest with SharedSparkSession { } } + test("SPARK-33354: Throw exceptions on inserting invalid cast with ANSI casting policy") { + withSQLConf( + SQLConf.STORE_ASSIGNMENT_POLICY.key -> SQLConf.StoreAssignmentPolicy.ANSI.toString) { + withTable("t") { + sql("CREATE TABLE t(i int, t timestamp) USING parquet") + val msg = intercept[AnalysisException] { + sql("INSERT INTO t VALUES (TIMESTAMP('2010-09-02 14:10:10'), 1)") + }.getMessage + assert(msg.contains("Cannot safely cast 'i': timestamp to int")) + assert(msg.contains("Cannot safely cast 't': int to timestamp")) + } + + withTable("t") { + sql("CREATE TABLE t(i int, d date) USING parquet") + val msg = intercept[AnalysisException] { + sql("INSERT INTO t VALUES (date('2010-09-02'), 1)") + }.getMessage + assert(msg.contains("Cannot safely cast 'i': date to int")) + assert(msg.contains("Cannot safely cast 'd': int to date")) + } + + withTable("t") { + sql("CREATE TABLE t(b boolean, t timestamp) USING parquet") + val msg = intercept[AnalysisException] { + sql("INSERT INTO t VALUES (TIMESTAMP('2010-09-02 14:10:10'), true)") + }.getMessage + assert(msg.contains("Cannot safely cast 'b': timestamp to boolean")) + assert(msg.contains("Cannot safely cast 't': boolean to timestamp")) + } + + withTable("t") { + sql("CREATE TABLE t(b boolean, d date) USING parquet") + val msg = intercept[AnalysisException] { + sql("INSERT INTO t VALUES (date('2010-09-02'), true)") + }.getMessage + assert(msg.contains("Cannot safely cast 'b': date to boolean")) + assert(msg.contains("Cannot safely cast 'd': boolean to date")) + } + } + } + test("SPARK-30844: static partition should also follow StoreAssignmentPolicy") { SQLConf.StoreAssignmentPolicy.values.foreach { policy => withSQLConf( @@ -804,7 +845,7 @@ class InsertSuite extends DataSourceTest with SharedSparkSession { .add("s", StringType, false) val newTable = CatalogTable( identifier = TableIdentifier("test_table", None), - tableType = CatalogTableType.EXTERNAL, + tableType = CatalogTableType.MANAGED, storage = CatalogStorageFormat( locationUri = None, inputFormat = None, @@ -826,21 +867,29 @@ class InsertSuite extends DataSourceTest with SharedSparkSession { } test("Stop task set if FileAlreadyExistsException was thrown") { - withSQLConf("fs.file.impl" -> classOf[FileExistingTestFileSystem].getName, - "fs.file.impl.disable.cache" -> "true") { - withTable("t") { - sql( - """ - |CREATE TABLE t(i INT, part1 INT) USING PARQUET - |PARTITIONED BY (part1) + Seq(true, false).foreach { fastFail => + withSQLConf("fs.file.impl" -> classOf[FileExistingTestFileSystem].getName, + "fs.file.impl.disable.cache" -> "true", + SQLConf.FASTFAIL_ON_FILEFORMAT_OUTPUT.key -> fastFail.toString) { + withTable("t") { + sql( + """ + |CREATE TABLE t(i INT, part1 INT) USING PARQUET + |PARTITIONED BY (part1) """.stripMargin) - val df = Seq((1, 1)).toDF("i", "part1") - val err = intercept[SparkException] { - df.write.mode("overwrite").format("parquet").insertInto("t") + val df = Seq((1, 1)).toDF("i", "part1") + val err = intercept[SparkException] { + df.write.mode("overwrite").format("parquet").insertInto("t") + } + + if (fastFail) { + assert(err.getCause.getMessage.contains("can not write to output file: " + + "org.apache.hadoop.fs.FileAlreadyExistsException")) + } else { + assert(err.getCause.getMessage.contains("Task failed while writing rows")) + } } - assert(err.getCause.getMessage.contains("can not write to output file: " + - "org.apache.hadoop.fs.FileAlreadyExistsException")) } } } @@ -866,6 +915,45 @@ class InsertSuite extends DataSourceTest with SharedSparkSession { }.getMessage assert(message.contains("LOCAL is supported only with file: scheme")) } + + test("SPARK-32508 " + + "Disallow empty part col values in partition spec before static partition writing") { + withTable("insertTable") { + sql( + """ + |CREATE TABLE insertTable(i int, part1 string, part2 string) USING PARQUET + |PARTITIONED BY (part1, part2) + """.stripMargin) + val msg = "Partition spec is invalid" + assert(intercept[AnalysisException] { + sql("INSERT INTO TABLE insertTable PARTITION(part1=1, part2='') SELECT 1") + }.getMessage.contains(msg)) + assert(intercept[AnalysisException] { + sql("INSERT INTO TABLE insertTable PARTITION(part1='', part2) SELECT 1 ,'' AS part2") + }.getMessage.contains(msg)) + + sql("INSERT INTO TABLE insertTable PARTITION(part1='1', part2='2') SELECT 1") + sql("INSERT INTO TABLE insertTable PARTITION(part1='1', part2) SELECT 1 ,'2' AS part2") + sql("INSERT INTO TABLE insertTable PARTITION(part1='1', part2) SELECT 1 ,'' AS part2") + } + } + + test("SPARK-33294: Add query resolved check before analyze InsertIntoDir") { + withTempPath { path => + val msg = intercept[AnalysisException] { + sql( + s""" + |INSERT OVERWRITE DIRECTORY '${path.getAbsolutePath}' USING PARQUET + |SELECT * FROM ( + | SELECT c3 FROM ( + | SELECT c1, c2 from values(1,2) t(c1, c2) + | ) + |) + """.stripMargin) + }.getMessage + assert(msg.contains("cannot resolve '`c3`' given input columns")) + } + } } class FileExistingTestFileSystem extends RawLocalFileSystem { diff --git a/sql/core/src/test/scala/org/apache/spark/sql/sources/PartitionedWriteSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/sources/PartitionedWriteSuite.scala index 6df1c5db14c26..b9266429f81a5 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/sources/PartitionedWriteSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/sources/PartitionedWriteSuite.scala @@ -20,7 +20,8 @@ package org.apache.spark.sql.sources import java.io.File import java.sql.Timestamp -import org.apache.hadoop.mapreduce.TaskAttemptContext +import org.apache.hadoop.fs.Path +import org.apache.hadoop.mapreduce.{JobContext, TaskAttemptContext} import org.apache.spark.TestUtils import org.apache.spark.internal.Logging @@ -161,7 +162,51 @@ class PartitionedWriteSuite extends QueryTest with SharedSparkSession { withTempPath { f => val e = intercept[AnalysisException]( Seq((3, 2)).toDF("a", "b").write.partitionBy("b", "b").csv(f.getAbsolutePath)) - assert(e.getMessage.contains("Found duplicate column(s) b, b: `b`;")) + assert(e.getMessage.contains("Found duplicate column(s) b, b: `b`")) } } + + test("SPARK-27194 SPARK-29302: Fix commit collision in dynamic partition overwrite mode") { + withSQLConf(SQLConf.PARTITION_OVERWRITE_MODE.key -> + SQLConf.PartitionOverwriteMode.DYNAMIC.toString, + SQLConf.FILE_COMMIT_PROTOCOL_CLASS.key -> + classOf[PartitionFileExistCommitProtocol].getName) { + withTempDir { d => + withTable("t") { + sql( + s""" + | create table t(c1 int, p1 int) using parquet partitioned by (p1) + | location '${d.getAbsolutePath}' + """.stripMargin) + + val df = Seq((1, 2)).toDF("c1", "p1") + df.write + .partitionBy("p1") + .mode("overwrite") + .saveAsTable("t") + checkAnswer(sql("select * from t"), df) + } + } + } + } +} + +/** + * A file commit protocol with pre-created partition file. when try to overwrite partition dir + * in dynamic partition mode, FileAlreadyExist exception would raise without SPARK-27194 + */ +private class PartitionFileExistCommitProtocol( + jobId: String, + path: String, + dynamicPartitionOverwrite: Boolean) + extends SQLHadoopMapReduceCommitProtocol(jobId, path, dynamicPartitionOverwrite) { + override def setupJob(jobContext: JobContext): Unit = { + super.setupJob(jobContext) + val stagingDir = new File(new Path(path).toUri.getPath, s".spark-staging-$jobId") + stagingDir.mkdirs() + val stagingPartDir = new File(stagingDir, "p1=2") + stagingPartDir.mkdirs() + val conflictTaskFile = new File(stagingPartDir, s"part-00000-$jobId.c000.snappy.parquet") + conflictTaskFile.createNewFile() + } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/sources/PathOptionSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/sources/PathOptionSuite.scala index 9b26a5659df49..48d717daf00d4 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/sources/PathOptionSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/sources/PathOptionSuite.scala @@ -19,8 +19,6 @@ package org.apache.spark.sql.sources import java.net.URI -import org.apache.hadoop.fs.Path - import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession, SQLContext} import org.apache.spark.sql.catalyst.TableIdentifier import org.apache.spark.sql.catalyst.catalog.CatalogUtils diff --git a/sql/core/src/test/scala/org/apache/spark/sql/sources/TableScanSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/sources/TableScanSuite.scala index 9a95bf770772e..0da6b487e31ee 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/sources/TableScanSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/sources/TableScanSuite.scala @@ -22,6 +22,7 @@ import java.sql.{Date, Timestamp} import org.apache.spark.rdd.RDD import org.apache.spark.sql._ +import org.apache.spark.sql.catalyst.util.CharVarcharUtils import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.test.SharedSparkSession import org.apache.spark.sql.types._ @@ -127,7 +128,7 @@ class TableScanSuite extends DataSourceTest with SharedSparkSession { Date.valueOf("1970-01-01"), new Timestamp(20000 + i), s"varchar_$i", - s"char_$i", + s"char_$i".padTo(18, ' '), Seq(i, i + 1), Seq(Map(s"str_$i" -> Row(i.toLong))), Map(i -> i.toString), @@ -206,10 +207,6 @@ class TableScanSuite extends DataSourceTest with SharedSparkSession { (2 to 10).map(i => Row(i, i - 1)).toSeq) test("Schema and all fields") { - def hiveMetadata(dt: String): Metadata = { - new MetadataBuilder().putString(HIVE_TYPE_STRING, dt).build() - } - val expectedSchema = StructType( StructField("string$%Field", StringType, true) :: StructField("binaryField", BinaryType, true) :: @@ -224,8 +221,8 @@ class TableScanSuite extends DataSourceTest with SharedSparkSession { StructField("decimalField2", DecimalType(9, 2), true) :: StructField("dateField", DateType, true) :: StructField("timestampField", TimestampType, true) :: - StructField("varcharField", StringType, true, hiveMetadata("varchar(12)")) :: - StructField("charField", StringType, true, hiveMetadata("char(18)")) :: + StructField("varcharField", VarcharType(12), true) :: + StructField("charField", CharType(18), true) :: StructField("arrayFieldSimple", ArrayType(IntegerType), true) :: StructField("arrayFieldComplex", ArrayType( @@ -248,7 +245,8 @@ class TableScanSuite extends DataSourceTest with SharedSparkSession { Nil ) - assert(expectedSchema == spark.table("tableWithSchema").schema) + assert(CharVarcharUtils.replaceCharVarcharWithStringInSchema(expectedSchema) == + spark.table("tableWithSchema").schema) withSQLConf(SQLConf.SUPPORT_QUOTED_REGEX_COLUMN_NAME.key -> "false") { checkAnswer( @@ -361,7 +359,7 @@ class TableScanSuite extends DataSourceTest with SharedSparkSession { val schemaNotMatch = intercept[Exception] { sql( s""" - |CREATE $tableType relationProvierWithSchema (i int) + |CREATE $tableType relationProviderWithSchema (i int) |USING org.apache.spark.sql.sources.SimpleScanSource |OPTIONS ( | From '1', @@ -375,7 +373,7 @@ class TableScanSuite extends DataSourceTest with SharedSparkSession { val schemaNeeded = intercept[Exception] { sql( s""" - |CREATE $tableType schemaRelationProvierWithoutSchema + |CREATE $tableType schemaRelationProviderWithoutSchema |USING org.apache.spark.sql.sources.AllDataTypesScanSource |OPTIONS ( | From '1', @@ -389,7 +387,7 @@ class TableScanSuite extends DataSourceTest with SharedSparkSession { test("read the data source tables that do not extend SchemaRelationProvider") { Seq("TEMPORARY VIEW", "TABLE").foreach { tableType => - val tableName = "relationProvierWithSchema" + val tableName = "relationProviderWithSchema" withTable (tableName) { sql( s""" diff --git a/sql/core/src/test/scala/org/apache/spark/sql/streaming/FileStreamSourceSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/streaming/FileStreamSourceSuite.scala index cf9664a9764be..6b9fa9c968fb4 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/streaming/FileStreamSourceSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/streaming/FileStreamSourceSuite.scala @@ -19,6 +19,8 @@ package org.apache.spark.sql.streaming import java.io.File import java.net.URI +import java.time.{LocalDateTime, ZoneOffset} +import java.time.format.DateTimeFormatter import java.util.concurrent.atomic.AtomicLong import scala.collection.mutable @@ -40,7 +42,6 @@ import org.apache.spark.sql.execution.streaming.sources.MemorySink import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.streaming.util.StreamManualClock import org.apache.spark.sql.test.SharedSparkSession -import org.apache.spark.sql.types._ import org.apache.spark.sql.types.{StructType, _} import org.apache.spark.util.Utils @@ -412,7 +413,7 @@ class FileStreamSourceSuite extends FileStreamSourceTest { createFileStreamSourceAndGetSchema( format = Some("json"), path = Some(src.getCanonicalPath), schema = None) } - assert("Unable to infer schema for JSON. It must be specified manually.;" === e.getMessage) + assert("Unable to infer schema for JSON. It must be specified manually." === e.getMessage) } } } @@ -1375,6 +1376,70 @@ class FileStreamSourceSuite extends FileStreamSourceTest { } } + test("restore from file stream source log") { + def createEntries(batchId: Long, count: Int): Array[FileEntry] = { + (1 to count).map { idx => + FileEntry(s"path_${batchId}_$idx", 10000 * batchId + count, batchId) + }.toArray + } + + withSQLConf(SQLConf.FILE_SOURCE_LOG_COMPACT_INTERVAL.key -> "5") { + def verifyBatchAvailabilityInCache( + fileEntryCache: java.util.LinkedHashMap[Long, Array[FileEntry]], + expectNotAvailable: Seq[Int], + expectAvailable: Seq[Int]): Unit = { + expectNotAvailable.foreach { batchId => + assert(!fileEntryCache.containsKey(batchId.toLong)) + } + expectAvailable.foreach { batchId => + assert(fileEntryCache.containsKey(batchId.toLong)) + } + } + withTempDir { chk => + val _fileEntryCache = PrivateMethod[java.util.LinkedHashMap[Long, Array[FileEntry]]]( + Symbol("fileEntryCache")) + + val metadata = new FileStreamSourceLog(FileStreamSourceLog.VERSION, spark, + chk.getCanonicalPath) + val fileEntryCache = metadata invokePrivate _fileEntryCache() + + (0 to 4).foreach { batchId => + metadata.add(batchId, createEntries(batchId, 100)) + } + val allFiles = metadata.allFiles() + + // batch 4 is a compact batch which logs would be cached in fileEntryCache + verifyBatchAvailabilityInCache(fileEntryCache, Seq(0, 1, 2, 3), Seq(4)) + + val metadata2 = new FileStreamSourceLog(FileStreamSourceLog.VERSION, spark, + chk.getCanonicalPath) + val fileEntryCache2 = metadata2 invokePrivate _fileEntryCache() + + // allFiles() doesn't restore the logs for the latest compact batch into file entry cache + assert(metadata2.allFiles() === allFiles) + verifyBatchAvailabilityInCache(fileEntryCache2, Seq(0, 1, 2, 3, 4), Seq.empty) + + // restore() will restore the logs for the latest compact batch into file entry cache + assert(metadata2.restore() === allFiles) + verifyBatchAvailabilityInCache(fileEntryCache2, Seq(0, 1, 2, 3), Seq(4)) + + (5 to 5 + FileStreamSourceLog.PREV_NUM_BATCHES_TO_READ_IN_RESTORE).foreach { batchId => + metadata2.add(batchId, createEntries(batchId, 100)) + } + + val metadata3 = new FileStreamSourceLog(FileStreamSourceLog.VERSION, spark, + chk.getCanonicalPath) + val fileEntryCache3 = metadata3 invokePrivate _fileEntryCache() + + // restore() will not restore the logs for the latest compact batch into file entry cache + // if the latest batch is too far from latest compact batch, because it's unlikely Spark + // will request the batch for the start point. + assert(metadata3.restore() === metadata2.allFiles()) + verifyBatchAvailabilityInCache(fileEntryCache3, Seq(0, 1, 2, 3, 4), Seq.empty) + } + } + } + test("get arbitrary batch from FileStreamSource") { withTempDirs { case (src, tmp) => withSQLConf( @@ -1881,9 +1946,9 @@ class FileStreamSourceSuite extends FileStreamSourceTest { test("SourceFileArchiver - fail when base archive path matches source pattern") { val fakeFileSystem = new FakeFileSystem("fake") - def assertThrowIllegalArgumentException(sourcePatttern: Path, baseArchivePath: Path): Unit = { + def assertThrowIllegalArgumentException(sourcePattern: Path, baseArchivePath: Path): Unit = { intercept[IllegalArgumentException] { - new SourceFileArchiver(fakeFileSystem, sourcePatttern, fakeFileSystem, baseArchivePath) + new SourceFileArchiver(fakeFileSystem, sourcePattern, fakeFileSystem, baseArchivePath) } } @@ -2054,6 +2119,47 @@ class FileStreamSourceSuite extends FileStreamSourceTest { } } + test("SPARK-31962: file stream source shouldn't allow modifiedBefore/modifiedAfter") { + def formatTime(time: LocalDateTime): String = { + time.format(DateTimeFormatter.ofPattern("yyyy-MM-dd'T'HH:mm:ss")) + } + + def assertOptionIsNotSupported(options: Map[String, String], path: String): Unit = { + val schema = StructType(Seq(StructField("a", StringType))) + var dsReader = spark.readStream + .format("csv") + .option("timeZone", "UTC") + .schema(schema) + + options.foreach { case (k, v) => dsReader = dsReader.option(k, v) } + + val df = dsReader.load(path) + + testStream(df)( + ExpectFailure[IllegalArgumentException]( + t => assert(t.getMessage.contains("is not allowed in file stream source")), + isFatalError = false) + ) + } + + withTempDir { dir => + // "modifiedBefore" + val futureTime = LocalDateTime.now(ZoneOffset.UTC).plusYears(1) + val formattedFutureTime = formatTime(futureTime) + assertOptionIsNotSupported(Map("modifiedBefore" -> formattedFutureTime), dir.getCanonicalPath) + + // "modifiedAfter" + val prevTime = LocalDateTime.now(ZoneOffset.UTC).minusYears(1) + val formattedPrevTime = formatTime(prevTime) + assertOptionIsNotSupported(Map("modifiedAfter" -> formattedPrevTime), dir.getCanonicalPath) + + // both + assertOptionIsNotSupported( + Map("modifiedBefore" -> formattedFutureTime, "modifiedAfter" -> formattedPrevTime), + dir.getCanonicalPath) + } + } + private def createFile(content: String, src: File, tmp: File): File = { val tempFile = Utils.tempFileWith(new File(tmp, "text")) val finalFile = new File(src, tempFile.getName) diff --git a/sql/core/src/test/scala/org/apache/spark/sql/streaming/FlatMapGroupsWithStateSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/streaming/FlatMapGroupsWithStateSuite.scala index e2887e78b0508..788be539fe073 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/streaming/FlatMapGroupsWithStateSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/streaming/FlatMapGroupsWithStateSuite.scala @@ -21,12 +21,11 @@ import java.io.File import java.sql.Date import org.apache.commons.io.FileUtils -import org.scalatest.BeforeAndAfterAll import org.scalatest.exceptions.TestFailedException import org.apache.spark.SparkException import org.apache.spark.api.java.function.FlatMapGroupsWithStateFunction -import org.apache.spark.sql.Encoder +import org.apache.spark.sql.{DataFrame, Encoder} import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.catalyst.expressions.{GenericInternalRow, UnsafeProjection, UnsafeRow} import org.apache.spark.sql.catalyst.plans.logical.FlatMapGroupsWithState @@ -34,7 +33,7 @@ import org.apache.spark.sql.catalyst.plans.physical.UnknownPartitioning import org.apache.spark.sql.catalyst.streaming.InternalOutputModes._ import org.apache.spark.sql.execution.RDDScanExec import org.apache.spark.sql.execution.streaming._ -import org.apache.spark.sql.execution.streaming.state.{FlatMapGroupsWithStateExecHelper, MemoryStateStore, StateStore, StateStoreId, StateStoreMetrics, UnsafeRowPair} +import org.apache.spark.sql.execution.streaming.state.{FlatMapGroupsWithStateExecHelper, MemoryStateStore, StateStore} import org.apache.spark.sql.functions.timestamp_seconds import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.streaming.util.StreamManualClock @@ -1020,6 +1019,56 @@ class FlatMapGroupsWithStateSuite extends StateStoreMetricsTest { spark.createDataset(Seq(("a", 2), ("b", 1))).toDF) } + testWithAllStateVersions("SPARK-29438: ensure UNION doesn't lead (flat)MapGroupsWithState" + + " to use shifted partition IDs") { + val stateFunc = (key: String, values: Iterator[String], state: GroupState[RunningCount]) => { + val count = state.getOption.map(_.count).getOrElse(0L) + values.size + state.update(RunningCount(count)) + (key, count.toString) + } + + def constructUnionDf(desiredPartitionsForInput1: Int) + : (MemoryStream[String], MemoryStream[String], DataFrame) = { + val input1 = MemoryStream[String](desiredPartitionsForInput1) + val input2 = MemoryStream[String] + val df1 = input1.toDF() + .select($"value", $"value") + val df2 = input2.toDS() + .groupByKey(x => x) + .mapGroupsWithState(stateFunc) // Types = State: MyState, Out: (Str, Str) + .toDF() + + // Unioned DF would have columns as (String, String) + (input1, input2, df1.union(df2)) + } + + withTempDir { checkpointDir => + val (input1, input2, unionDf) = constructUnionDf(2) + testStream(unionDf, Update)( + StartStream(checkpointLocation = checkpointDir.getAbsolutePath), + MultiAddData(input1, "input1-a")(input2, "input2-a"), + CheckNewAnswer(("input1-a", "input1-a"), ("input2-a", "1")), + StopStream + ) + + // We're restoring the query with different number of partitions in left side of UNION, + // which may lead right side of union to have mismatched partition IDs (e.g. if it relies on + // TaskContext.partitionId()). This test will verify (flat)MapGroupsWithState doesn't have + // such issue. + + val (newInput1, newInput2, newUnionDf) = constructUnionDf(3) + + newInput1.addData("input1-a") + newInput2.addData("input2-a") + + testStream(newUnionDf, Update)( + StartStream(checkpointLocation = checkpointDir.getAbsolutePath), + MultiAddData(newInput1, "input1-a")(newInput2, "input2-a", "input2-b"), + CheckNewAnswer(("input1-a", "input1-a"), ("input2-a", "2"), ("input2-b", "1")) + ) + } + } + testQuietly("StateStore.abort on task failure handling") { val stateFunc = (key: String, values: Iterator[String], state: GroupState[RunningCount]) => { if (FlatMapGroupsWithStateSuite.failInTask) throw new Exception("expected failure") @@ -1274,7 +1323,9 @@ class FlatMapGroupsWithStateSuite extends StateStoreMetricsTest { def testWithAllStateVersions(name: String)(func: => Unit): Unit = { for (version <- FlatMapGroupsWithStateExecHelper.supportedVersions) { test(s"$name - state format version $version") { - withSQLConf(SQLConf.FLATMAPGROUPSWITHSTATE_STATE_FORMAT_VERSION.key -> version.toString) { + withSQLConf( + SQLConf.FLATMAPGROUPSWITHSTATE_STATE_FORMAT_VERSION.key -> version.toString, + SQLConf.STATEFUL_OPERATOR_CHECK_CORRECTNESS_ENABLED.key -> "false") { func } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamSuite.scala index 9f3ff1a6708e4..440fe997ae133 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamSuite.scala @@ -36,7 +36,6 @@ import org.apache.spark.scheduler.{SparkListener, SparkListenerJobStart} import org.apache.spark.sql._ import org.apache.spark.sql.catalyst.plans.logical.Range import org.apache.spark.sql.catalyst.streaming.{InternalOutputModes, StreamingRelationV2} -import org.apache.spark.sql.catalyst.util.DateTimeConstants.MICROS_PER_MILLIS import org.apache.spark.sql.catalyst.util.DateTimeUtils import org.apache.spark.sql.execution.{LocalLimitExec, SimpleMode, SparkPlan} import org.apache.spark.sql.execution.command.ExplainCommand @@ -47,7 +46,7 @@ import org.apache.spark.sql.functions._ import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.sources.StreamSourceProvider import org.apache.spark.sql.streaming.util.{BlockOnStopSourceProvider, StreamManualClock} -import org.apache.spark.sql.types.{IntegerType, StructField, StructType} +import org.apache.spark.sql.types.{IntegerType, LongType, StructField, StructType} import org.apache.spark.util.Utils class StreamSuite extends StreamTest { @@ -1065,13 +1064,13 @@ class StreamSuite extends StreamTest { } test("SPARK-30657: streaming limit should not apply on limits on state subplans") { - val streanData = MemoryStream[Int] - val streamingDF = streanData.toDF().toDF("value") + val streamData = MemoryStream[Int] + val streamingDF = streamData.toDF().toDF("value") val staticDF = spark.createDataset(Seq(1)).toDF("value").orderBy("value") testStream(streamingDF.join(staticDF.limit(1), "value"))( - AddData(streanData, 1, 2, 3), + AddData(streamData, 1, 2, 3), CheckAnswer(Row(1)), - AddData(streanData, 1, 3, 5), + AddData(streamData, 1, 3, 5), CheckAnswer(Row(1), Row(1))) } @@ -1131,11 +1130,11 @@ class StreamSuite extends StreamTest { verifyLocalLimit(inputDF.dropDuplicates().repartition(1).limit(1), expectStreamingLimit = false) // Should be LocalLimitExec in the first place, not from optimization of StreamingLocalLimitExec - val staticDF = spark.range(1).toDF("value").limit(1) + val staticDF = spark.range(2).toDF("value").limit(1) verifyLocalLimit(inputDF.toDF("value").join(staticDF, "value"), expectStreamingLimit = false) verifyLocalLimit( - inputDF.groupBy().count().limit(1), + inputDF.groupBy("value").count().limit(1), expectStreamingLimit = false, outputMode = OutputMode.Complete()) } @@ -1268,7 +1267,7 @@ class StreamSuite extends StreamTest { } abstract class FakeSource extends StreamSourceProvider { - private val fakeSchema = StructType(StructField("a", IntegerType) :: Nil) + private val fakeSchema = StructType(StructField("a", LongType) :: Nil) override def sourceSchema( spark: SQLContext, @@ -1290,7 +1289,7 @@ class FakeDefaultSource extends FakeSource { new Source { private var offset = -1L - override def schema: StructType = StructType(StructField("a", IntegerType) :: Nil) + override def schema: StructType = StructType(StructField("a", LongType) :: Nil) override def getOffset: Option[Offset] = { if (offset >= 10) { diff --git a/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamTest.scala b/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamTest.scala index 7a2e29f1258ae..624b630401f47 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamTest.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamTest.scala @@ -19,7 +19,6 @@ package org.apache.spark.sql.streaming import scala.collection.mutable import scala.collection.mutable.ArrayBuffer -import scala.language.experimental.macros import scala.reflect.ClassTag import scala.util.Random import scala.util.control.NonFatal diff --git a/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingAggregationSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingAggregationSuite.scala index cb69460ca1580..491b0d8b2c26c 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingAggregationSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingAggregationSuite.scala @@ -20,14 +20,14 @@ package org.apache.spark.sql.streaming import java.io.File import java.util.{Locale, TimeZone} -import scala.collection.mutable +import scala.annotation.tailrec import org.apache.commons.io.FileUtils import org.scalatest.Assertions import org.apache.spark.{SparkEnv, SparkException} import org.apache.spark.rdd.BlockRDD -import org.apache.spark.sql.{AnalysisException, DataFrame, Dataset, SparkSession} +import org.apache.spark.sql.{AnalysisException, DataFrame, Dataset, Row, SparkSession} import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.catalyst.plans.logical.Aggregate import org.apache.spark.sql.catalyst.util.DateTimeConstants._ @@ -35,7 +35,7 @@ import org.apache.spark.sql.execution.{SparkPlan, UnaryExecNode} import org.apache.spark.sql.execution.exchange.Exchange import org.apache.spark.sql.execution.streaming._ import org.apache.spark.sql.execution.streaming.sources.MemorySink -import org.apache.spark.sql.execution.streaming.state.StreamingAggregationStateManager +import org.apache.spark.sql.execution.streaming.state.{StateSchemaNotCompatible, StateStore, StreamingAggregationStateManager} import org.apache.spark.sql.functions._ import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.streaming.OutputMode._ @@ -337,6 +337,49 @@ class StreamingAggregationSuite extends StateStoreMetricsTest with Assertions { ) } + testWithAllStateVersions("SPARK-29438: ensure UNION doesn't lead streaming aggregation to use" + + " shifted partition IDs") { + def constructUnionDf(desiredPartitionsForInput1: Int) + : (MemoryStream[Int], MemoryStream[Int], DataFrame) = { + val input1 = MemoryStream[Int](desiredPartitionsForInput1) + val input2 = MemoryStream[Int] + val df1 = input1.toDF() + .select($"value", $"value" + 1) + val df2 = input2.toDF() + .groupBy($"value") + .agg(count("*")) + + // Unioned DF would have columns as (Int, Int) + (input1, input2, df1.union(df2)) + } + + withTempDir { checkpointDir => + val (input1, input2, unionDf) = constructUnionDf(2) + testStream(unionDf, Update)( + StartStream(checkpointLocation = checkpointDir.getAbsolutePath), + MultiAddData(input1, 11, 12)(input2, 21, 22), + CheckNewAnswer(Row(11, 12), Row(12, 13), Row(21, 1), Row(22, 1)), + StopStream + ) + + // We're restoring the query with different number of partitions in left side of UNION, + // which may lead right side of union to have mismatched partition IDs (e.g. if it relies on + // TaskContext.partitionId()). This test will verify streaming aggregation doesn't have + // such issue. + + val (newInput1, newInput2, newUnionDf) = constructUnionDf(3) + + newInput1.addData(11, 12) + newInput2.addData(21, 22) + + testStream(newUnionDf, Update)( + StartStream(checkpointLocation = checkpointDir.getAbsolutePath), + MultiAddData(newInput1, 13, 14)(newInput2, 22, 23), + CheckNewAnswer(Row(13, 14), Row(14, 15), Row(22, 2), Row(23, 1)) + ) + } + } + testQuietlyWithAllStateVersions("midbatch failure") { val inputData = MemoryStream[Int] FailureSingleton.firstTime = true @@ -712,6 +755,89 @@ class StreamingAggregationSuite extends StateStoreMetricsTest with Assertions { ) } + testQuietlyWithAllStateVersions("changing schema of state when restarting query", + (SQLConf.STATE_STORE_FORMAT_VALIDATION_ENABLED.key, "false")) { + withTempDir { tempDir => + val (inputData, aggregated) = prepareTestForChangingSchemaOfState(tempDir) + + // if we don't have verification phase on state schema, modified query would throw NPE with + // stack trace which end users would not easily understand + + testStream(aggregated, Update())( + StartStream(checkpointLocation = tempDir.getAbsolutePath), + AddData(inputData, 21), + ExpectFailure[SparkException] { e => + val stateSchemaExc = findStateSchemaNotCompatible(e) + assert(stateSchemaExc.isDefined) + val msg = stateSchemaExc.get.getMessage + assert(msg.contains("Provided schema doesn't match to the schema for existing state")) + // other verifications are presented in StateStoreSuite + } + ) + } + } + + testQuietlyWithAllStateVersions("changing schema of state when restarting query -" + + " schema check off", + (SQLConf.STATE_SCHEMA_CHECK_ENABLED.key, "false"), + (SQLConf.STATE_STORE_FORMAT_VALIDATION_ENABLED.key, "false")) { + withTempDir { tempDir => + val (inputData, aggregated) = prepareTestForChangingSchemaOfState(tempDir) + + testStream(aggregated, Update())( + StartStream(checkpointLocation = tempDir.getAbsolutePath), + AddData(inputData, 21), + ExpectFailure[SparkException] { e => + val stateSchemaExc = findStateSchemaNotCompatible(e) + // it would bring other error in runtime, but it shouldn't check schema in any way + assert(stateSchemaExc.isEmpty) + } + ) + } + } + + private def prepareTestForChangingSchemaOfState( + tempDir: File): (MemoryStream[Int], DataFrame) = { + val inputData = MemoryStream[Int] + val aggregated = inputData.toDF() + .selectExpr("value % 10 AS id", "value") + .groupBy($"id") + .agg( + sum("value").as("sum_value"), + avg("value").as("avg_value"), + max("value").as("max_value")) + + testStream(aggregated, Update())( + StartStream(checkpointLocation = tempDir.getAbsolutePath), + AddData(inputData, 1, 11), + CheckLastBatch((1L, 12L, 6.0, 11)), + StopStream + ) + + StateStore.unloadAll() + + val inputData2 = MemoryStream[Int] + val aggregated2 = inputData2.toDF() + .selectExpr("value % 10 AS id", "value") + .groupBy($"id") + .agg( + sum("value").as("sum_value"), + avg("value").as("avg_value"), + collect_list("value").as("values")) + + inputData2.addData(1, 11) + + (inputData2, aggregated2) + } + + @tailrec + private def findStateSchemaNotCompatible(exc: Throwable): Option[StateSchemaNotCompatible] = { + exc match { + case e1: StateSchemaNotCompatible => Some(e1) + case e1 if e1.getCause != null => findStateSchemaNotCompatible(e1.getCause) + case _ => None + } + } /** Add blocks of data to the `BlockRDDBackedSource`. */ case class AddBlockData(source: BlockRDDBackedSource, data: Seq[Int]*) extends AddData { diff --git a/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingDeduplicationSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingDeduplicationSuite.scala index 1f346aac8d2c2..ac9cd1a12d06f 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingDeduplicationSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingDeduplicationSuite.scala @@ -17,12 +17,9 @@ package org.apache.spark.sql.streaming -import org.scalatest.BeforeAndAfterAll - -import org.apache.spark.sql.catalyst.plans.physical.{ClusteredDistribution, HashPartitioning, SinglePartition} +import org.apache.spark.sql.DataFrame import org.apache.spark.sql.catalyst.streaming.InternalOutputModes._ -import org.apache.spark.sql.execution.streaming.{MemoryStream, StreamingDeduplicateExec} -import org.apache.spark.sql.execution.streaming.state.StateStore +import org.apache.spark.sql.execution.streaming.MemoryStream import org.apache.spark.sql.functions._ import org.apache.spark.sql.internal.SQLConf @@ -294,4 +291,45 @@ class StreamingDeduplicationSuite extends StateStoreMetricsTest { testWithFlag(true) testWithFlag(false) } + + test("SPARK-29438: ensure UNION doesn't lead streaming deduplication to use" + + " shifted partition IDs") { + def constructUnionDf(desiredPartitionsForInput1: Int) + : (MemoryStream[Int], MemoryStream[Int], DataFrame) = { + val input1 = MemoryStream[Int](desiredPartitionsForInput1) + val input2 = MemoryStream[Int] + val df1 = input1.toDF().select($"value") + val df2 = input2.toDF().dropDuplicates("value") + + // Unioned DF would have columns as (Int) + (input1, input2, df1.union(df2)) + } + + withTempDir { checkpointDir => + val (input1, input2, unionDf) = constructUnionDf(2) + testStream(unionDf, Append)( + StartStream(checkpointLocation = checkpointDir.getAbsolutePath), + MultiAddData(input1, 11, 12)(input2, 21, 22), + CheckNewAnswer(11, 12, 21, 22), + StopStream + ) + + // We're restoring the query with different number of partitions in left side of UNION, + // which may lead right side of union to have mismatched partition IDs (e.g. if it relies on + // TaskContext.partitionId()). This test will verify streaming deduplication doesn't have + // such issue. + + val (newInput1, newInput2, newUnionDf) = constructUnionDf(3) + + newInput1.addData(11, 12) + newInput2.addData(21, 22) + + testStream(newUnionDf, Append)( + StartStream(checkpointLocation = checkpointDir.getAbsolutePath), + MultiAddData(newInput1, 13, 14)(newInput2, 22, 23), + CheckNewAnswer(13, 14, 23) + ) + } + } + } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingJoinSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingJoinSuite.scala index b182727408bbf..40131e822c5ce 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingJoinSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingJoinSuite.scala @@ -26,23 +26,17 @@ import scala.util.Random import org.apache.commons.io.FileUtils import org.scalatest.BeforeAndAfter -import org.apache.spark.SparkContext import org.apache.spark.scheduler.ExecutorCacheTaskLocation -import org.apache.spark.sql.{AnalysisException, DataFrame, Row, SparkSession} -import org.apache.spark.sql.catalyst.analysis.StreamingJoinHelper -import org.apache.spark.sql.catalyst.expressions.{AttributeReference, AttributeSet, Literal} -import org.apache.spark.sql.catalyst.plans.logical.{EventTimeWatermark, Filter} -import org.apache.spark.sql.catalyst.trees.TreeNode -import org.apache.spark.sql.execution.{FileSourceScanExec, LogicalRDD} -import org.apache.spark.sql.execution.datasources.LogicalRelation +import org.apache.spark.sql.{DataFrame, Row, SparkSession} import org.apache.spark.sql.execution.streaming.{MemoryStream, StatefulOperatorStateInfo, StreamingSymmetricHashJoinExec, StreamingSymmetricHashJoinHelper} import org.apache.spark.sql.execution.streaming.state.{StateStore, StateStoreProviderId} import org.apache.spark.sql.functions._ -import org.apache.spark.sql.types._ import org.apache.spark.util.Utils +abstract class StreamingJoinSuite + extends StreamTest with StateStoreMetricsTest with BeforeAndAfter { -class StreamingInnerJoinSuite extends StreamTest with StateStoreMetricsTest with BeforeAndAfter { + import testImplicits._ before { SparkSession.setActiveSession(spark) // set this before force initializing 'joinExec' @@ -53,6 +47,162 @@ class StreamingInnerJoinSuite extends StreamTest with StateStoreMetricsTest with StateStore.stop() } + protected def setupStream(prefix: String, multiplier: Int): (MemoryStream[Int], DataFrame) = { + val input = MemoryStream[Int] + val df = input.toDF + .select( + 'value as "key", + timestamp_seconds($"value") as s"${prefix}Time", + ('value * multiplier) as s"${prefix}Value") + .withWatermark(s"${prefix}Time", "10 seconds") + + (input, df) + } + + protected def setupWindowedJoin(joinType: String) + : (MemoryStream[Int], MemoryStream[Int], DataFrame) = { + + val (input1, df1) = setupStream("left", 2) + val (input2, df2) = setupStream("right", 3) + val windowed1 = df1.select('key, window('leftTime, "10 second"), 'leftValue) + val windowed2 = df2.select('key, window('rightTime, "10 second"), 'rightValue) + val joined = windowed1.join(windowed2, Seq("key", "window"), joinType) + val select = if (joinType == "left_semi") { + joined.select('key, $"window.end".cast("long"), 'leftValue) + } else { + joined.select('key, $"window.end".cast("long"), 'leftValue, 'rightValue) + } + + (input1, input2, select) + } + + protected def setupWindowedJoinWithLeftCondition(joinType: String) + : (MemoryStream[Int], MemoryStream[Int], DataFrame) = { + + val (leftInput, df1) = setupStream("left", 2) + val (rightInput, df2) = setupStream("right", 3) + // Use different schemas to ensure the null row is being generated from the correct side. + val left = df1.select('key, window('leftTime, "10 second"), 'leftValue) + val right = df2.select('key, window('rightTime, "10 second"), 'rightValue.cast("string")) + + val joined = left.join( + right, + left("key") === right("key") + && left("window") === right("window") + && 'leftValue > 4, + joinType) + + val select = if (joinType == "left_semi") { + joined.select(left("key"), left("window.end").cast("long"), 'leftValue) + } else if (joinType == "left_outer") { + joined.select(left("key"), left("window.end").cast("long"), 'leftValue, 'rightValue) + } else if (joinType == "right_outer") { + joined.select(right("key"), right("window.end").cast("long"), 'leftValue, 'rightValue) + } else { + joined.select(left("key"), left("window.end").cast("long"), 'leftValue, + right("key"), right("window.end").cast("long"), 'rightValue) + } + + (leftInput, rightInput, select) + } + + protected def setupWindowedJoinWithRightCondition(joinType: String) + : (MemoryStream[Int], MemoryStream[Int], DataFrame) = { + + val (leftInput, df1) = setupStream("left", 2) + val (rightInput, df2) = setupStream("right", 3) + // Use different schemas to ensure the null row is being generated from the correct side. + val left = df1.select('key, window('leftTime, "10 second"), 'leftValue) + val right = df2.select('key, window('rightTime, "10 second"), 'rightValue.cast("string")) + + val joined = left.join( + right, + left("key") === right("key") + && left("window") === right("window") + && 'rightValue.cast("int") > 7, + joinType) + + val select = if (joinType == "left_semi") { + joined.select(left("key"), left("window.end").cast("long"), 'leftValue) + } else if (joinType == "left_outer") { + joined.select(left("key"), left("window.end").cast("long"), 'leftValue, 'rightValue) + } else if (joinType == "right_outer") { + joined.select(right("key"), right("window.end").cast("long"), 'leftValue, 'rightValue) + } else { + joined.select(left("key"), left("window.end").cast("long"), 'leftValue, + right("key"), right("window.end").cast("long"), 'rightValue) + } + + (leftInput, rightInput, select) + } + + protected def setupJoinWithRangeCondition(joinType: String) + : (MemoryStream[(Int, Int)], MemoryStream[(Int, Int)], DataFrame) = { + + val leftInput = MemoryStream[(Int, Int)] + val rightInput = MemoryStream[(Int, Int)] + + val df1 = leftInput.toDF.toDF("leftKey", "time") + .select('leftKey, timestamp_seconds($"time") as "leftTime", ('leftKey * 2) as "leftValue") + .withWatermark("leftTime", "10 seconds") + + val df2 = rightInput.toDF.toDF("rightKey", "time") + .select('rightKey, timestamp_seconds($"time") as "rightTime", + ('rightKey * 3) as "rightValue") + .withWatermark("rightTime", "10 seconds") + + val joined = + df1.join( + df2, + expr("leftKey = rightKey AND " + + "leftTime BETWEEN rightTime - interval 5 seconds AND rightTime + interval 5 seconds"), + joinType) + + val select = if (joinType == "left_semi") { + joined.select('leftKey, 'leftTime.cast("int")) + } else { + joined.select('leftKey, 'rightKey, 'leftTime.cast("int"), 'rightTime.cast("int")) + } + + (leftInput, rightInput, select) + } + + protected def setupSelfJoin(joinType: String) + : (MemoryStream[(Int, Long)], DataFrame) = { + + val inputStream = MemoryStream[(Int, Long)] + + val df = inputStream.toDS() + .select(col("_1").as("value"), timestamp_seconds($"_2").as("timestamp")) + + val leftStream = df.select(col("value").as("leftId"), col("timestamp").as("leftTime")) + + val rightStream = df + // Introduce misses for ease of debugging + .where(col("value") % 2 === 0) + .select(col("value").as("rightId"), col("timestamp").as("rightTime")) + + val joined = leftStream + .withWatermark("leftTime", "5 seconds") + .join( + rightStream.withWatermark("rightTime", "5 seconds"), + expr("leftId = rightId AND rightTime >= leftTime AND " + + "rightTime <= leftTime + interval 5 seconds"), + joinType) + + val select = if (joinType == "left_semi") { + joined.select(col("leftId"), col("leftTime").cast("int")) + } else { + joined.select(col("leftId"), col("leftTime").cast("int"), + col("rightId"), col("rightTime").cast("int")) + } + + (inputStream, select) + } +} + +class StreamingInnerJoinSuite extends StreamingJoinSuite { + import testImplicits._ test("stream stream inner join on non-time column") { val input1 = MemoryStream[Int] @@ -373,7 +523,7 @@ class StreamingInnerJoinSuite extends StreamTest with StateStoreMetricsTest with }.toMap partitionAndStoreNameToLocation.foreach { case ((partIndex, storeName), hostName) => val providerId = StateStoreProviderId(stateInfo, partIndex, storeName) - coordinatorRef.reportActiveInstance(providerId, hostName, s"exec-$hostName") + coordinatorRef.reportActiveInstance(providerId, hostName, s"exec-$hostName", Seq.empty) require( coordinatorRef.getLocation(providerId) === Some(ExecutorCacheTaskLocation(hostName, s"exec-$hostName").toString)) @@ -486,58 +636,13 @@ class StreamingInnerJoinSuite extends StreamTest with StateStoreMetricsTest with } -class StreamingOuterJoinSuite extends StreamTest with StateStoreMetricsTest with BeforeAndAfter { +class StreamingOuterJoinSuite extends StreamingJoinSuite { import testImplicits._ import org.apache.spark.sql.functions._ - before { - SparkSession.setActiveSession(spark) // set this before force initializing 'joinExec' - spark.streams.stateStoreCoordinator // initialize the lazy coordinator - } - - after { - StateStore.stop() - } - - private def setupStream(prefix: String, multiplier: Int): (MemoryStream[Int], DataFrame) = { - val input = MemoryStream[Int] - val df = input.toDF - .select( - 'value as "key", - timestamp_seconds($"value") as s"${prefix}Time", - ('value * multiplier) as s"${prefix}Value") - .withWatermark(s"${prefix}Time", "10 seconds") - - return (input, df) - } - - private def setupWindowedJoin(joinType: String): - (MemoryStream[Int], MemoryStream[Int], DataFrame) = { - val (input1, df1) = setupStream("left", 2) - val (input2, df2) = setupStream("right", 3) - val windowed1 = df1.select('key, window('leftTime, "10 second"), 'leftValue) - val windowed2 = df2.select('key, window('rightTime, "10 second"), 'rightValue) - val joined = windowed1.join(windowed2, Seq("key", "window"), joinType) - .select('key, $"window.end".cast("long"), 'leftValue, 'rightValue) - - (input1, input2, joined) - } - test("left outer early state exclusion on left") { - val (leftInput, df1) = setupStream("left", 2) - val (rightInput, df2) = setupStream("right", 3) - // Use different schemas to ensure the null row is being generated from the correct side. - val left = df1.select('key, window('leftTime, "10 second"), 'leftValue) - val right = df2.select('key, window('rightTime, "10 second"), 'rightValue.cast("string")) - - val joined = left.join( - right, - left("key") === right("key") - && left("window") === right("window") - && 'leftValue > 4, - "left_outer") - .select(left("key"), left("window.end").cast("long"), 'leftValue, 'rightValue) + val (leftInput, rightInput, joined) = setupWindowedJoinWithLeftCondition("left_outer") testStream(joined)( MultiAddData(leftInput, 1, 2, 3)(rightInput, 3, 4, 5), @@ -554,19 +659,7 @@ class StreamingOuterJoinSuite extends StreamTest with StateStoreMetricsTest with } test("left outer early state exclusion on right") { - val (leftInput, df1) = setupStream("left", 2) - val (rightInput, df2) = setupStream("right", 3) - // Use different schemas to ensure the null row is being generated from the correct side. - val left = df1.select('key, window('leftTime, "10 second"), 'leftValue) - val right = df2.select('key, window('rightTime, "10 second"), 'rightValue.cast("string")) - - val joined = left.join( - right, - left("key") === right("key") - && left("window") === right("window") - && 'rightValue.cast("int") > 7, - "left_outer") - .select(left("key"), left("window.end").cast("long"), 'leftValue, 'rightValue) + val (leftInput, rightInput, joined) = setupWindowedJoinWithRightCondition("left_outer") testStream(joined)( MultiAddData(leftInput, 3, 4, 5)(rightInput, 1, 2, 3), @@ -583,19 +676,7 @@ class StreamingOuterJoinSuite extends StreamTest with StateStoreMetricsTest with } test("right outer early state exclusion on left") { - val (leftInput, df1) = setupStream("left", 2) - val (rightInput, df2) = setupStream("right", 3) - // Use different schemas to ensure the null row is being generated from the correct side. - val left = df1.select('key, window('leftTime, "10 second"), 'leftValue) - val right = df2.select('key, window('rightTime, "10 second"), 'rightValue.cast("string")) - - val joined = left.join( - right, - left("key") === right("key") - && left("window") === right("window") - && 'leftValue > 4, - "right_outer") - .select(right("key"), right("window.end").cast("long"), 'leftValue, 'rightValue) + val (leftInput, rightInput, joined) = setupWindowedJoinWithLeftCondition("right_outer") testStream(joined)( MultiAddData(leftInput, 1, 2, 3)(rightInput, 3, 4, 5), @@ -612,19 +693,7 @@ class StreamingOuterJoinSuite extends StreamTest with StateStoreMetricsTest with } test("right outer early state exclusion on right") { - val (leftInput, df1) = setupStream("left", 2) - val (rightInput, df2) = setupStream("right", 3) - // Use different schemas to ensure the null row is being generated from the correct side. - val left = df1.select('key, window('leftTime, "10 second"), 'leftValue) - val right = df2.select('key, window('rightTime, "10 second"), 'rightValue.cast("string")) - - val joined = left.join( - right, - left("key") === right("key") - && left("window") === right("window") - && 'rightValue.cast("int") > 7, - "right_outer") - .select(right("key"), right("window.end").cast("long"), 'leftValue, 'rightValue) + val (leftInput, rightInput, joined) = setupWindowedJoinWithRightCondition("right_outer") testStream(joined)( MultiAddData(leftInput, 3, 4, 5)(rightInput, 1, 2, 3), @@ -681,27 +750,8 @@ class StreamingOuterJoinSuite extends StreamTest with StateStoreMetricsTest with ("right_outer", Row(null, 2, null, 5)) ).foreach { case (joinType: String, outerResult) => test(s"${joinType.replaceAllLiterally("_", " ")} with watermark range condition") { - import org.apache.spark.sql.functions._ - - val leftInput = MemoryStream[(Int, Int)] - val rightInput = MemoryStream[(Int, Int)] - - val df1 = leftInput.toDF.toDF("leftKey", "time") - .select('leftKey, timestamp_seconds($"time") as "leftTime", ('leftKey * 2) as "leftValue") - .withWatermark("leftTime", "10 seconds") - - val df2 = rightInput.toDF.toDF("rightKey", "time") - .select('rightKey, timestamp_seconds($"time") as "rightTime", - ('rightKey * 3) as "rightValue") - .withWatermark("rightTime", "10 seconds") - - val joined = - df1.join( - df2, - expr("leftKey = rightKey AND " + - "leftTime BETWEEN rightTime - interval 5 seconds AND rightTime + interval 5 seconds"), - joinType) - .select('leftKey, 'rightKey, 'leftTime.cast("int"), 'rightTime.cast("int")) + val (leftInput, rightInput, joined) = setupJoinWithRangeCondition(joinType) + testStream(joined)( AddData(leftInput, (1, 5), (3, 5)), CheckAnswer(), @@ -780,27 +830,7 @@ class StreamingOuterJoinSuite extends StreamTest with StateStoreMetricsTest with } test("SPARK-26187 self left outer join should not return outer nulls for already matched rows") { - val inputStream = MemoryStream[(Int, Long)] - - val df = inputStream.toDS() - .select(col("_1").as("value"), timestamp_seconds($"_2").as("timestamp")) - - val leftStream = df.select(col("value").as("leftId"), col("timestamp").as("leftTime")) - - val rightStream = df - // Introduce misses for ease of debugging - .where(col("value") % 2 === 0) - .select(col("value").as("rightId"), col("timestamp").as("rightTime")) - - val query = leftStream - .withWatermark("leftTime", "5 seconds") - .join( - rightStream.withWatermark("rightTime", "5 seconds"), - expr("leftId = rightId AND rightTime >= leftTime AND " + - "rightTime <= leftTime + interval 5 seconds"), - joinType = "leftOuter") - .select(col("leftId"), col("leftTime").cast("int"), - col("rightId"), col("rightTime").cast("int")) + val (inputStream, query) = setupSelfJoin("left_outer") testStream(query)( AddData(inputStream, (1, 1L), (2, 2L), (3, 3L), (4, 4L), (5, 5L)), @@ -938,7 +968,7 @@ class StreamingOuterJoinSuite extends StreamTest with StateStoreMetricsTest with throw writer.exception.get } assert(e.getMessage.toLowerCase(Locale.ROOT) - .contains("the query is using stream-stream outer join with state format version 1")) + .contains("the query is using stream-stream leftouter join with state format version 1")) } test("SPARK-29438: ensure UNION doesn't lead stream-stream join to use shifted partition IDs") { @@ -1041,3 +1071,408 @@ class StreamingOuterJoinSuite extends StreamTest with StateStoreMetricsTest with ) } } + +class StreamingFullOuterJoinSuite extends StreamingJoinSuite { + + test("windowed full outer join") { + val (leftInput, rightInput, joined) = setupWindowedJoin("full_outer") + + testStream(joined)( + MultiAddData(leftInput, 1, 2, 3, 4, 5)(rightInput, 3, 4, 5, 6, 7), + CheckNewAnswer(Row(3, 10, 6, 9), Row(4, 10, 8, 12), Row(5, 10, 10, 15)), + // states + // left: 1, 2, 3, 4 ,5 + // right: 3, 4, 5, 6, 7 + assertNumStateRows(total = 10, updated = 10), + MultiAddData(leftInput, 21)(rightInput, 22), + // Watermark = 11, should remove rows having window=[0,10]. + CheckNewAnswer(Row(1, 10, 2, null), Row(2, 10, 4, null), Row(6, 10, null, 18), + Row(7, 10, null, 21)), + // states + // left: 21 + // right: 22 + // + // states evicted + // left: 1, 2, 3, 4 ,5 (below watermark) + // right: 3, 4, 5, 6, 7 (below watermark) + assertNumStateRows(total = 2, updated = 2), + AddData(leftInput, 22), + CheckNewAnswer(Row(22, 30, 44, 66)), + // states + // left: 21, 22 + // right: 22 + assertNumStateRows(total = 3, updated = 1), + StopStream, + StartStream(), + + AddData(leftInput, 1), + // Row not add as 1 < state key watermark = 12. + CheckNewAnswer(), + // states + // left: 21, 22 + // right: 22 + assertNumStateRows(total = 3, updated = 0, droppedByWatermark = 1), + AddData(rightInput, 5), + // Row not add as 5 < state key watermark = 12. + CheckNewAnswer(), + // states + // left: 21, 22 + // right: 22 + assertNumStateRows(total = 3, updated = 0, droppedByWatermark = 1) + ) + } + + test("full outer early state exclusion on left") { + val (leftInput, rightInput, joined) = setupWindowedJoinWithLeftCondition("full_outer") + + testStream(joined)( + MultiAddData(leftInput, 1, 2, 3)(rightInput, 3, 4, 5), + // The left rows with leftValue <= 4 should generate their outer join rows now and + // not get added to the state. + CheckNewAnswer(Row(1, 10, 2, null, null, null), Row(2, 10, 4, null, null, null), + Row(3, 10, 6, 3, 10, "9")), + // states + // left: 3 + // right: 3, 4, 5 + assertNumStateRows(total = 4, updated = 4), + // Generate outer join result for all non-matched rows when the watermark advances. + MultiAddData(leftInput, 20)(rightInput, 21), + CheckNewAnswer(Row(null, null, null, 4, 10, "12"), Row(null, null, null, 5, 10, "15")), + // states + // left: 20 + // right: 21 + // + // states evicted + // left: 3 (below watermark) + // right: 3, 4, 5 (below watermark) + assertNumStateRows(total = 2, updated = 2), + AddData(rightInput, 20), + CheckNewAnswer(Row(20, 30, 40, 20, 30, "60")), + // states + // left: 20 + // right: 21, 20 + assertNumStateRows(total = 3, updated = 1) + ) + } + + test("full outer early state exclusion on right") { + val (leftInput, rightInput, joined) = setupWindowedJoinWithRightCondition("full_outer") + + testStream(joined)( + MultiAddData(leftInput, 3, 4, 5)(rightInput, 1, 2, 3), + // The right rows with rightValue <= 7 should generate their outer join rows now, + // and never be added to the state. + // The right row with rightValue = 9 > 7, hence joined and added to state. + CheckNewAnswer(Row(null, null, null, 1, 10, "3"), Row(null, null, null, 2, 10, "6"), + Row(3, 10, 6, 3, 10, "9")), + // states + // left: 3, 4, 5 + // right: 3 + assertNumStateRows(total = 4, updated = 4), + // Generate outer join result for all non-matched rows when the watermark advances. + MultiAddData(leftInput, 20)(rightInput, 21), + CheckNewAnswer(Row(4, 10, 8, null, null, null), Row(5, 10, 10, null, null, null)), + // states + // left: 20 + // right: 21 + // + // states evicted + // left: 3, 4, 5 (below watermark) + // right: 3 (below watermark) + assertNumStateRows(total = 2, updated = 2), + AddData(rightInput, 20), + CheckNewAnswer(Row(20, 30, 40, 20, 30, "60")), + // states + // left: 20 + // right: 21, 20 + assertNumStateRows(total = 3, updated = 1) + ) + } + + test("full outer join with watermark range condition") { + val (leftInput, rightInput, joined) = setupJoinWithRangeCondition("full_outer") + + testStream(joined)( + AddData(leftInput, (1, 5), (3, 5)), + CheckNewAnswer(), + // states + // left: (1, 5), (3, 5) + // right: nothing + assertNumStateRows(total = 2, updated = 2), + AddData(rightInput, (1, 10), (2, 5)), + // Match left row in the state. + CheckNewAnswer(Row(1, 1, 5, 10)), + // states + // left: (1, 5), (3, 5) + // right: (1, 10), (2, 5) + assertNumStateRows(total = 4, updated = 2), + AddData(rightInput, (1, 9)), + // Match left row in the state. + CheckNewAnswer(Row(1, 1, 5, 9)), + // states + // left: (1, 5), (3, 5) + // right: (1, 10), (2, 5), (1, 9) + assertNumStateRows(total = 5, updated = 1), + // Increase event time watermark to 20s by adding data with time = 30s on both inputs. + AddData(leftInput, (1, 7), (1, 30)), + CheckNewAnswer(Row(1, 1, 7, 9), Row(1, 1, 7, 10)), + // states + // left: (1, 5), (3, 5), (1, 7), (1, 30) + // right: (1, 10), (2, 5), (1, 9) + assertNumStateRows(total = 7, updated = 2), + // Watermark = 30 - 10 = 20, no matched row. + // Generate outer join result for all non-matched rows when the watermark advances. + AddData(rightInput, (0, 30)), + CheckNewAnswer(Row(3, null, 5, null), Row(null, 2, null, 5)), + // states + // left: (1, 30) + // right: (0, 30) + // + // states evicted + // left: (1, 5), (3, 5), (1, 5) (below watermark = 20) + // right: (1, 10), (2, 5), (1, 9) (below watermark = 20) + assertNumStateRows(total = 2, updated = 1) + ) + } + + test("self full outer join") { + val (inputStream, query) = setupSelfJoin("full_outer") + + testStream(query)( + AddData(inputStream, (1, 1L), (2, 2L), (3, 3L), (4, 4L), (5, 5L)), + CheckNewAnswer(Row(2, 2L, 2, 2L), Row(4, 4L, 4, 4L)), + // batch 1 - global watermark = 0 + // states + // left: (1, 1L), (2, 2L), (3, 3L), (4, 4L), (5, 5L) + // right: (2, 2L), (4, 4L) + assertNumStateRows(total = 7, updated = 7), + AddData(inputStream, (6, 6L), (7, 7L), (8, 8L), (9, 9L), (10, 10L)), + CheckNewAnswer(Row(6, 6L, 6, 6L), Row(8, 8L, 8, 8L), Row(10, 10L, 10, 10L)), + // batch 2 - global watermark = 5 + // states + // left: (1, 1L), (2, 2L), (3, 3L), (4, 4L), (5, 5L), (6, 6L), (7, 7L), (8, 8L), + // (9, 9L), (10, 10L) + // right: (6, 6L), (8, 8L), (10, 10L) + // + // states evicted + // left: nothing (it waits for 5 seconds more than watermark due to join condition) + // right: (2, 2L), (4, 4L) + assertNumStateRows(total = 13, updated = 8), + AddData(inputStream, (11, 11L), (12, 12L), (13, 13L), (14, 14L), (15, 15L)), + CheckNewAnswer(Row(12, 12L, 12, 12L), Row(14, 14L, 14, 14L), Row(1, 1L, null, null), + Row(3, 3L, null, null)), + // batch 3 - global watermark = 9 + // states + // left: (4, 4L), (5, 5L), (6, 6L), (7, 7L), (8, 8L), (9, 9L), (10, 10L), (11, 11L), + // (12, 12L), (13, 13L), (14, 14L), (15, 15L) + // right: (10, 10L), (12, 12L), (14, 14L) + // + // states evicted + // left: (1, 1L), (2, 2L), (3, 3L) + // right: (6, 6L), (8, 8L) + assertNumStateRows(total = 15, updated = 7) + ) + } +} + +class StreamingLeftSemiJoinSuite extends StreamingJoinSuite { + + import testImplicits._ + + test("windowed left semi join") { + val (leftInput, rightInput, joined) = setupWindowedJoin("left_semi") + + testStream(joined)( + MultiAddData(leftInput, 1, 2, 3, 4, 5)(rightInput, 3, 4, 5, 6, 7), + CheckNewAnswer(Row(3, 10, 6), Row(4, 10, 8), Row(5, 10, 10)), + // states + // left: 1, 2, 3, 4 ,5 + // right: 3, 4, 5, 6, 7 + assertNumStateRows(total = 10, updated = 10), + MultiAddData(leftInput, 21)(rightInput, 22), + // Watermark = 11, should remove rows having window=[0,10]. + CheckNewAnswer(), + // states + // left: 21 + // right: 22 + // + // states evicted + // left: 1, 2, 3, 4 ,5 (below watermark) + // right: 3, 4, 5, 6, 7 (below watermark) + assertNumStateRows(total = 2, updated = 2), + AddData(leftInput, 22), + CheckNewAnswer(Row(22, 30, 44)), + // Unlike inner/outer joins, given left input row matches with right input row, + // we don't buffer the matched left input row to the state store. + // + // states + // left: 21 + // right: 22 + assertNumStateRows(total = 2, updated = 0), + StopStream, + StartStream(), + + AddData(leftInput, 1), + // Row not add as 1 < state key watermark = 12. + CheckNewAnswer(), + // states + // left: 21 + // right: 22 + assertNumStateRows(total = 2, updated = 0, droppedByWatermark = 1), + AddData(rightInput, 5), + // Row not add as 5 < state key watermark = 12. + CheckNewAnswer(), + // states + // left: 21 + // right: 22 + assertNumStateRows(total = 2, updated = 0, droppedByWatermark = 1) + ) + } + + test("left semi early state exclusion on left") { + val (leftInput, rightInput, joined) = setupWindowedJoinWithLeftCondition("left_semi") + + testStream(joined)( + MultiAddData(leftInput, 1, 2, 3)(rightInput, 3, 4, 5), + // The left rows with leftValue <= 4 should not generate their semi join rows and + // not get added to the state. + CheckNewAnswer(Row(3, 10, 6)), + // states + // left: 3 + // right: 3, 4, 5 + assertNumStateRows(total = 4, updated = 4), + // We shouldn't get more semi join rows when the watermark advances. + MultiAddData(leftInput, 20)(rightInput, 21), + CheckNewAnswer(), + // states + // left: 20 + // right: 21 + // + // states evicted + // left: 3 (below watermark) + // right: 3, 4, 5 (below watermark) + assertNumStateRows(total = 2, updated = 2), + AddData(rightInput, 20), + CheckNewAnswer((20, 30, 40)), + // states + // left: 20 + // right: 21, 20 + assertNumStateRows(total = 3, updated = 1) + ) + } + + test("left semi early state exclusion on right") { + val (leftInput, rightInput, joined) = setupWindowedJoinWithRightCondition("left_semi") + + testStream(joined)( + MultiAddData(leftInput, 3, 4, 5)(rightInput, 1, 2, 3), + // The right rows with rightValue <= 7 should never be added to the state. + // The right row with rightValue = 9 > 7, hence joined and added to state. + CheckNewAnswer(Row(3, 10, 6)), + // states + // left: 3, 4, 5 + // right: 3 + assertNumStateRows(total = 4, updated = 4), + // We shouldn't get more semi join rows when the watermark advances. + MultiAddData(leftInput, 20)(rightInput, 21), + CheckNewAnswer(), + // states + // left: 20 + // right: 21 + // + // states evicted + // left: 3, 4, 5 (below watermark) + // right: 3 (below watermark) + assertNumStateRows(total = 2, updated = 2), + AddData(rightInput, 20), + CheckNewAnswer((20, 30, 40)), + // states + // left: 20 + // right: 21, 20 + assertNumStateRows(total = 3, updated = 1) + ) + } + + test("left semi join with watermark range condition") { + val (leftInput, rightInput, joined) = setupJoinWithRangeCondition("left_semi") + + testStream(joined)( + AddData(leftInput, (1, 5), (3, 5)), + CheckNewAnswer(), + // states + // left: (1, 5), (3, 5) + // right: nothing + assertNumStateRows(total = 2, updated = 2), + AddData(rightInput, (1, 10), (2, 5)), + // Match left row in the state. + CheckNewAnswer((1, 5)), + // states + // left: (1, 5), (3, 5) + // right: (1, 10), (2, 5) + assertNumStateRows(total = 4, updated = 2), + AddData(rightInput, (1, 9)), + // No match as left row is already matched. + CheckNewAnswer(), + // states + // left: (1, 5), (3, 5) + // right: (1, 10), (2, 5), (1, 9) + assertNumStateRows(total = 5, updated = 1), + // Increase event time watermark to 20s by adding data with time = 30s on both inputs. + AddData(leftInput, (1, 7), (1, 30)), + CheckNewAnswer((1, 7)), + // states + // left: (1, 5), (3, 5), (1, 30) + // right: (1, 10), (2, 5), (1, 9) + assertNumStateRows(total = 6, updated = 1), + // Watermark = 30 - 10 = 20, no matched row. + AddData(rightInput, (0, 30)), + CheckNewAnswer(), + // states + // left: (1, 30) + // right: (0, 30) + // + // states evicted + // left: (1, 5), (3, 5) (below watermark = 20) + // right: (1, 10), (2, 5), (1, 9) (below watermark = 20) + assertNumStateRows(total = 2, updated = 1) + ) + } + + test("self left semi join") { + val (inputStream, query) = setupSelfJoin("left_semi") + + testStream(query)( + AddData(inputStream, (1, 1L), (2, 2L), (3, 3L), (4, 4L), (5, 5L)), + CheckNewAnswer((2, 2), (4, 4)), + // batch 1 - global watermark = 0 + // states + // left: (2, 2L), (4, 4L) + // (left rows with value % 2 != 0 is filtered per [[PushPredicateThroughJoin]]) + // right: (2, 2L), (4, 4L) + // (right rows with value % 2 != 0 is filtered per [[PushPredicateThroughJoin]]) + assertNumStateRows(total = 4, updated = 4), + AddData(inputStream, (6, 6L), (7, 7L), (8, 8L), (9, 9L), (10, 10L)), + CheckNewAnswer((6, 6), (8, 8), (10, 10)), + // batch 2 - global watermark = 5 + // states + // left: (2, 2L), (4, 4L), (6, 6L), (8, 8L), (10, 10L) + // right: (6, 6L), (8, 8L), (10, 10L) + // + // states evicted + // left: nothing (it waits for 5 seconds more than watermark due to join condition) + // right: (2, 2L), (4, 4L) + assertNumStateRows(total = 8, updated = 6), + AddData(inputStream, (11, 11L), (12, 12L), (13, 13L), (14, 14L), (15, 15L)), + CheckNewAnswer((12, 12), (14, 14)), + // batch 3 - global watermark = 9 + // states + // left: (4, 4L), (6, 6L), (8, 8L), (10, 10L), (12, 12L), (14, 14L) + // right: (10, 10L), (12, 12L), (14, 14L) + // + // states evicted + // left: (2, 2L) + // right: (6, 6L), (8, 8L) + assertNumStateRows(total = 9, updated = 4) + ) + } +} diff --git a/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingQueryStatusAndProgressSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingQueryStatusAndProgressSuite.scala index ec61102804ea3..c0aefb8120808 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingQueryStatusAndProgressSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingQueryStatusAndProgressSuite.scala @@ -75,6 +75,7 @@ class StreamingQueryStatusAndProgressSuite extends StreamTest with Eventually { | "description" : "source", | "startOffset" : 123, | "endOffset" : 456, + | "latestOffset" : 789, | "numInputRows" : 678, | "inputRowsPerSecond" : 10.0 | } ], @@ -121,6 +122,7 @@ class StreamingQueryStatusAndProgressSuite extends StreamTest with Eventually { | "description" : "source", | "startOffset" : 123, | "endOffset" : 456, + | "latestOffset" : 789, | "numInputRows" : 678 | } ], | "sink" : { @@ -333,6 +335,7 @@ object StreamingQueryStatusAndProgressSuite { description = "source", startOffset = "123", endOffset = "456", + latestOffset = "789", numInputRows = 678, inputRowsPerSecond = 10.0, processedRowsPerSecond = Double.PositiveInfinity // should not be present in the json @@ -361,6 +364,7 @@ object StreamingQueryStatusAndProgressSuite { description = "source", startOffset = "123", endOffset = "456", + latestOffset = "789", numInputRows = 678, inputRowsPerSecond = Double.NaN, // should not be present in the json processedRowsPerSecond = Double.NegativeInfinity // should not be present in the json diff --git a/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingQuerySuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingQuerySuite.scala index 1f408d55fd811..9c2403dffbb1a 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingQuerySuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingQuerySuite.scala @@ -705,7 +705,7 @@ class StreamingQuerySuite extends StreamTest with BeforeAndAfter with Logging wi val q2 = startQuery(input(1).toDS.map { i => // Emulate that `StreamingQuery` get captured with normal usage unintentionally. // It should not fail the query. - q1 + val q = q1 i }, "stream_serializable_test_2") val q3 = startQuery(input(2).toDS.map { i => diff --git a/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingStateStoreFormatCompatibilitySuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingStateStoreFormatCompatibilitySuite.scala index 33f6b02acb6dd..1032d6c5b6ff2 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingStateStoreFormatCompatibilitySuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingStateStoreFormatCompatibilitySuite.scala @@ -19,12 +19,15 @@ package org.apache.spark.sql.streaming import java.io.File +import scala.annotation.tailrec + import org.apache.commons.io.FileUtils import org.apache.spark.SparkException import org.apache.spark.sql.Row import org.apache.spark.sql.catalyst.streaming.InternalOutputModes.Complete import org.apache.spark.sql.execution.streaming.MemoryStream +import org.apache.spark.sql.execution.streaming.state.{InvalidUnsafeRowException, StateSchemaNotCompatible} import org.apache.spark.sql.functions._ import org.apache.spark.util.Utils @@ -239,11 +242,19 @@ class StreamingStateStoreFormatCompatibilitySuite extends StreamTest { CheckAnswer(Row(0, 20, Seq(0, 2, 4, 6, 8)), Row(1, 25, Seq(1, 3, 5, 7, 9))) */ AddData(inputData, 10 to 19: _*), - ExpectFailure[SparkException](e => { - // Check the exception message to make sure the state store format changing. - assert(e.getCause.getCause.getMessage.contains( - "The streaming query failed by state format invalidation.")) - }) + ExpectFailure[SparkException] { e => + assert(findStateSchemaException(e)) + } ) } + + @tailrec + private def findStateSchemaException(exc: Throwable): Boolean = { + exc match { + case _: StateSchemaNotCompatible => true + case _: InvalidUnsafeRowException => true + case e1 if e1.getCause != null => findStateSchemaException(e1.getCause) + case _ => false + } + } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/streaming/continuous/ContinuousSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/streaming/continuous/ContinuousSuite.scala index 0d17f2e0bc7fb..02f91399fce1c 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/streaming/continuous/ContinuousSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/streaming/continuous/ContinuousSuite.scala @@ -22,7 +22,6 @@ import java.sql.Timestamp import org.apache.spark.{SparkContext, SparkException} import org.apache.spark.scheduler.{SparkListener, SparkListenerTaskStart} import org.apache.spark.sql._ -import org.apache.spark.sql.execution.datasources.v2.ContinuousScanExec import org.apache.spark.sql.execution.streaming._ import org.apache.spark.sql.execution.streaming.continuous._ import org.apache.spark.sql.execution.streaming.sources.ContinuousMemoryStream diff --git a/sql/core/src/test/scala/org/apache/spark/sql/streaming/sources/StreamingDataSourceV2Suite.scala b/sql/core/src/test/scala/org/apache/spark/sql/streaming/sources/StreamingDataSourceV2Suite.scala index 05cf324f8d490..ae0dba746d8a8 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/streaming/sources/StreamingDataSourceV2Suite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/streaming/sources/StreamingDataSourceV2Suite.scala @@ -25,6 +25,7 @@ import scala.collection.JavaConverters._ import org.apache.spark.sql.{DataFrame, SQLContext} import org.apache.spark.sql.connector.catalog.{SessionConfigSupport, SupportsRead, SupportsWrite, Table, TableCapability, TableProvider} import org.apache.spark.sql.connector.catalog.TableCapability._ +import org.apache.spark.sql.connector.expressions.Transform import org.apache.spark.sql.connector.read.{InputPartition, PartitionReaderFactory, Scan, ScanBuilder} import org.apache.spark.sql.connector.read.streaming.{ContinuousPartitionReaderFactory, ContinuousStream, MicroBatchStream, Offset, PartitionOffset} import org.apache.spark.sql.connector.write.{LogicalWriteInfo, PhysicalWriteInfo, WriteBuilder, WriterCommitMessage} @@ -195,6 +196,30 @@ class FakeNoWrite extends DataSourceRegister with SimpleTableProvider { } } +class FakeWriteSupportingExternalMetadata + extends DataSourceRegister + with TableProvider { + override def shortName(): String = "fake-write-supporting-external-metadata" + + override def supportsExternalMetadata(): Boolean = true + + override def inferSchema(options: CaseInsensitiveStringMap): StructType = { + throw new IllegalArgumentException( + "Data stream writer should not require inferring table schema the data source supports" + + " external Metadata.") + } + + override def getTable( + tableSchema: StructType, + partitioning: Array[Transform], + properties: util.Map[String, String]): Table = { + new Table with FakeStreamingWriteTable { + override def name(): String = "fake" + override def schema(): StructType = tableSchema + } + } +} + case class FakeWriteV1FallbackException() extends Exception class FakeSink extends Sink { @@ -265,7 +290,7 @@ class StreamingDataSourceV2Suite extends StreamTest { Trigger.Continuous(1000)) private def testPositiveCase(readFormat: String, writeFormat: String, trigger: Trigger): Unit = { - testPositiveCaseWithQuery(readFormat, writeFormat, trigger)(() => _) + testPositiveCaseWithQuery(readFormat, writeFormat, trigger)(_ => ()) } private def testPositiveCaseWithQuery( @@ -314,6 +339,17 @@ class StreamingDataSourceV2Suite extends StreamTest { } } + test("SPARK-33369: Skip schema inference in DataStreamWriter.start() if table provider " + + "supports external metadata") { + testPositiveCaseWithQuery( + "fake-read-microbatch-continuous", "fake-write-supporting-external-metadata", + Trigger.Once()) { v2Query => + val sink = v2Query.asInstanceOf[StreamingQueryWrapper].streamingQuery.sink + assert(sink.isInstanceOf[Table]) + assert(sink.asInstanceOf[Table].schema() == StructType(Nil)) + } + } + test("disabled v2 write") { // Ensure the V2 path works normally and generates a V2 sink.. testPositiveCaseWithQuery( diff --git a/sql/core/src/test/scala/org/apache/spark/sql/streaming/test/DataStreamReaderWriterSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/streaming/test/DataStreamReaderWriterSuite.scala index 64b0cb296635a..bdc714d49fcc9 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/streaming/test/DataStreamReaderWriterSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/streaming/test/DataStreamReaderWriterSuite.scala @@ -43,11 +43,13 @@ object LastOptions { var mockStreamSourceProvider = mock(classOf[StreamSourceProvider]) var mockStreamSinkProvider = mock(classOf[StreamSinkProvider]) var parameters: Map[String, String] = null + var sinkParameters: Map[String, String] = null var schema: Option[StructType] = null var partitionColumns: Seq[String] = Nil def clear(): Unit = { parameters = null + sinkParameters = null schema = null partitionColumns = null reset(mockStreamSourceProvider) @@ -87,8 +89,6 @@ class DefaultSource extends StreamSourceProvider with StreamSinkProvider { override def getOffset: Option[Offset] = Some(new LongOffset(0)) override def getBatch(start: Option[Offset], end: Offset): DataFrame = { - import spark.implicits._ - spark.internalCreateDataFrame(spark.sparkContext.emptyRDD, schema, isStreaming = true) } @@ -101,7 +101,7 @@ class DefaultSource extends StreamSourceProvider with StreamSinkProvider { parameters: Map[String, String], partitionColumns: Seq[String], outputMode: OutputMode): Sink = { - LastOptions.parameters = parameters + LastOptions.sinkParameters = parameters LastOptions.partitionColumns = partitionColumns LastOptions.mockStreamSinkProvider.createSink(spark, parameters, partitionColumns, outputMode) (_: Long, _: DataFrame) => {} @@ -170,20 +170,19 @@ class DataStreamReaderWriterSuite extends StreamTest with BeforeAndAfter { LastOptions.clear() - val query = df.writeStream + df.writeStream .format("org.apache.spark.sql.streaming.test") .option("opt1", "5") .options(Map("opt2" -> "4")) .options(map) .option("checkpointLocation", newMetadataDir) .start() + .stop() - assert(LastOptions.parameters("opt1") == "5") - assert(LastOptions.parameters("opt2") == "4") - assert(LastOptions.parameters("opt3") == "3") - assert(LastOptions.parameters.contains("checkpointLocation")) - - query.stop() + assert(LastOptions.sinkParameters("opt1") == "5") + assert(LastOptions.sinkParameters("opt2") == "4") + assert(LastOptions.sinkParameters("opt3") == "3") + assert(LastOptions.sinkParameters.contains("checkpointLocation")) } test("SPARK-32832: later option should override earlier options for load()") { @@ -204,7 +203,7 @@ class DataStreamReaderWriterSuite extends StreamTest with BeforeAndAfter { .load() assert(LastOptions.parameters.isEmpty) - val query = ds.writeStream + ds.writeStream .format("org.apache.spark.sql.streaming.test") .option("checkpointLocation", newMetadataDir) .option("paTh", "1") @@ -213,8 +212,8 @@ class DataStreamReaderWriterSuite extends StreamTest with BeforeAndAfter { .option("patH", "4") .option("path", "5") .start() - assert(LastOptions.parameters("path") == "5") - query.stop() + .stop() + assert(LastOptions.sinkParameters("path") == "5") } test("partitioning") { @@ -787,15 +786,30 @@ class DataStreamReaderWriterSuite extends StreamTest with BeforeAndAfter { withTempDir { checkpointPath => withSQLConf(SQLConf.LEGACY_PATH_OPTION_BEHAVIOR.key -> "true", SQLConf.CHECKPOINT_LOCATION.key -> checkpointPath.getAbsolutePath) { - val query = df.writeStream + df.writeStream .format("org.apache.spark.sql.streaming.test") .option("path", "tmp4") .start("tmp5") + .stop() // The legacy behavior overwrites the path option. - assert(LastOptions.parameters("path") == "tmp5") - query.stop() + assert(LastOptions.sinkParameters("path") == "tmp5") } } } } + + test("SPARK-32853: consecutive load/start calls should be allowed") { + val dfr = spark.readStream.format(classOf[DefaultSource].getName) + var df = dfr.load("1") + df = dfr.load("2") + withTempDir { checkpointPath => + val dfw = df.writeStream + .option("checkpointLocation", checkpointPath.getCanonicalPath) + .format(classOf[DefaultSource].getName) + var query = dfw.start("1") + query.stop() + query = dfw.start("2") + query.stop() + } + } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/streaming/test/DataStreamTableAPISuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/streaming/test/DataStreamTableAPISuite.scala new file mode 100644 index 0000000000000..4c5c5e63cecb6 --- /dev/null +++ b/sql/core/src/test/scala/org/apache/spark/sql/streaming/test/DataStreamTableAPISuite.scala @@ -0,0 +1,463 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.streaming.test + +import java.io.File +import java.util + +import scala.collection.JavaConverters._ + +import org.scalatest.BeforeAndAfter + +import org.apache.spark.sql.{AnalysisException, Row} +import org.apache.spark.sql.catalyst.TableIdentifier +import org.apache.spark.sql.catalyst.analysis.TableAlreadyExistsException +import org.apache.spark.sql.catalyst.catalog.{CatalogStorageFormat, CatalogTable, CatalogTableType} +import org.apache.spark.sql.catalyst.streaming.StreamingRelationV2 +import org.apache.spark.sql.connector.{FakeV2Provider, InMemoryTableCatalog, InMemoryTableSessionCatalog} +import org.apache.spark.sql.connector.catalog.{Identifier, SupportsRead, Table, TableCapability, V2TableWithV1Fallback} +import org.apache.spark.sql.connector.expressions.Transform +import org.apache.spark.sql.connector.read.ScanBuilder +import org.apache.spark.sql.execution.streaming.{MemoryStream, MemoryStreamScanBuilder} +import org.apache.spark.sql.internal.SQLConf +import org.apache.spark.sql.streaming.StreamTest +import org.apache.spark.sql.streaming.sources.FakeScanBuilder +import org.apache.spark.sql.types.StructType +import org.apache.spark.sql.util.CaseInsensitiveStringMap +import org.apache.spark.util.Utils + +class DataStreamTableAPISuite extends StreamTest with BeforeAndAfter { + import testImplicits._ + import org.apache.spark.sql.connector.catalog.CatalogV2Implicits._ + + before { + spark.conf.set("spark.sql.catalog.testcat", classOf[InMemoryTableCatalog].getName) + spark.conf.set("spark.sql.catalog.teststream", classOf[InMemoryStreamTableCatalog].getName) + } + + after { + spark.sessionState.catalogManager.reset() + spark.sessionState.conf.clear() + sqlContext.streams.active.foreach(_.stop()) + } + + test("read: table API with file source") { + Seq("parquet", "").foreach { source => + withSQLConf(SQLConf.USE_V1_SOURCE_LIST.key -> source) { + withTempDir { tempDir => + val tblName = "my_table" + val dir = tempDir.getAbsolutePath + withTable(tblName) { + spark.range(3).write.format("parquet").option("path", dir).saveAsTable(tblName) + + testStream(spark.readStream.table(tblName))( + ProcessAllAvailable(), + CheckAnswer(Row(0), Row(1), Row(2)) + ) + } + } + } + } + } + + test("read: read non-exist table") { + intercept[AnalysisException] { + spark.readStream.table("non_exist_table") + }.message.contains("Table not found") + } + + test("read: stream table API with temp view") { + val tblName = "my_table" + val stream = MemoryStream[Int] + withTable(tblName) { + stream.toDF().createOrReplaceTempView(tblName) + + testStream(spark.readStream.table(tblName)) ( + AddData(stream, 1, 2, 3), + CheckLastBatch(1, 2, 3), + AddData(stream, 4, 5), + CheckLastBatch(4, 5) + ) + } + } + + test("read: stream table API with non-streaming temp view") { + val tblName = "my_table" + withTable(tblName) { + spark.range(3).createOrReplaceTempView(tblName) + intercept[AnalysisException] { + spark.readStream.table(tblName) + }.message.contains("is not a temp view of streaming logical plan") + } + } + + test("read: read table without streaming capability support") { + val tableIdentifier = "testcat.table_name" + + spark.sql(s"CREATE TABLE $tableIdentifier (id bigint, data string) USING foo") + + intercept[AnalysisException] { + spark.readStream.table(tableIdentifier) + }.message.contains("does not support either micro-batch or continuous scan") + } + + test("read: read table with custom catalog") { + val tblName = "teststream.table_name" + withTable(tblName) { + spark.sql(s"CREATE TABLE $tblName (data int) USING foo") + val stream = MemoryStream[Int] + val testCatalog = spark.sessionState.catalogManager.catalog("teststream").asTableCatalog + val table = testCatalog.loadTable(Identifier.of(Array(), "table_name")) + table.asInstanceOf[InMemoryStreamTable].setStream(stream) + + testStream(spark.readStream.table(tblName)) ( + AddData(stream, 1, 2, 3), + CheckLastBatch(1, 2, 3), + AddData(stream, 4, 5), + CheckLastBatch(4, 5) + ) + } + } + + test("read: read table with custom catalog & namespace") { + spark.sql("CREATE NAMESPACE teststream.ns") + + val tblName = "teststream.ns.table_name" + withTable(tblName) { + spark.sql(s"CREATE TABLE $tblName (data int) USING foo") + val stream = MemoryStream[Int] + val testCatalog = spark.sessionState.catalogManager.catalog("teststream").asTableCatalog + val table = testCatalog.loadTable(Identifier.of(Array("ns"), "table_name")) + table.asInstanceOf[InMemoryStreamTable].setStream(stream) + + testStream(spark.readStream.table(tblName)) ( + AddData(stream, 1, 2, 3), + CheckLastBatch(1, 2, 3), + AddData(stream, 4, 5), + CheckLastBatch(4, 5) + ) + } + } + + test("read: fallback to V1 relation") { + val tblName = DataStreamTableAPISuite.V1FallbackTestTableName + spark.conf.set(SQLConf.V2_SESSION_CATALOG_IMPLEMENTATION.key, + classOf[InMemoryStreamTableCatalog].getName) + val v2Source = classOf[FakeV2Provider].getName + withTempDir { tempDir => + withTable(tblName) { + spark.sql(s"CREATE TABLE $tblName (data int) USING $v2Source") + + // Check the StreamingRelationV2 has been replaced by StreamingRelation + val plan = spark.readStream.option("path", tempDir.getCanonicalPath).table(tblName) + .queryExecution.analyzed.collectFirst { + case d: StreamingRelationV2 => d + } + assert(plan.isEmpty) + } + } + } + + test("write: write to table with custom catalog & no namespace") { + val tableIdentifier = "testcat.table_name" + + withTable(tableIdentifier) { + spark.sql(s"CREATE TABLE $tableIdentifier (id bigint, data string) USING foo") + checkAnswer(spark.table(tableIdentifier), Seq.empty) + + runTestWithStreamAppend(tableIdentifier) + } + } + + test("write: write to table with custom catalog & namespace") { + spark.sql("CREATE NAMESPACE testcat.ns") + val tableIdentifier = "testcat.ns.table_name" + + withTable(tableIdentifier) { + spark.sql(s"CREATE TABLE $tableIdentifier (id bigint, data string) USING foo") + checkAnswer(spark.table(tableIdentifier), Seq.empty) + + runTestWithStreamAppend(tableIdentifier) + } + } + + test("write: write to table with default session catalog") { + val v2Source = classOf[FakeV2Provider].getName + spark.conf.set(SQLConf.V2_SESSION_CATALOG_IMPLEMENTATION.key, + classOf[InMemoryTableSessionCatalog].getName) + + spark.sql("CREATE NAMESPACE ns") + + val tableIdentifier = "ns.table_name" + withTable(tableIdentifier) { + spark.sql(s"CREATE TABLE $tableIdentifier (id bigint, data string) USING $v2Source") + checkAnswer(spark.table(tableIdentifier), Seq.empty) + + runTestWithStreamAppend(tableIdentifier) + } + } + + test("write: write to non-exist table with custom catalog") { + val tableIdentifier = "testcat.nonexistenttable" + + withTable(tableIdentifier) { + runTestWithStreamAppend(tableIdentifier) + } + } + + test("write: write to temporary view isn't allowed yet") { + val tableIdentifier = "testcat.table_name" + val tempViewIdentifier = "temp_view" + + spark.sql(s"CREATE TABLE $tableIdentifier (id bigint, data string) USING foo") + checkAnswer(spark.table(tableIdentifier), Seq.empty) + + spark.table(tableIdentifier).createOrReplaceTempView(tempViewIdentifier) + + withTempDir { checkpointDir => + val exc = intercept[AnalysisException] { + runStreamQueryAppendMode(tempViewIdentifier, checkpointDir, Seq.empty, Seq.empty) + } + assert(exc.getMessage.contains("doesn't support streaming write")) + } + } + + test("write: write to view shouldn't be allowed") { + val tableIdentifier = "testcat.table_name" + val viewIdentifier = "table_view" + + spark.sql(s"CREATE TABLE $tableIdentifier (id bigint, data string) USING foo") + checkAnswer(spark.table(tableIdentifier), Seq.empty) + + spark.sql(s"CREATE VIEW $viewIdentifier AS SELECT id, data FROM $tableIdentifier") + + withTempDir { checkpointDir => + val exc = intercept[AnalysisException] { + runStreamQueryAppendMode(viewIdentifier, checkpointDir, Seq.empty, Seq.empty) + } + assert(exc.getMessage.contains(s"Streaming into views $viewIdentifier is not supported")) + } + } + + test("write: write to an external table") { + withTempDir { dir => + val tableName = "stream_test" + withTable(tableName) { + checkForStreamTable(Some(dir), tableName) + } + } + } + + test("write: write to a managed table") { + val tableName = "stream_test" + withTable(tableName) { + checkForStreamTable(None, tableName) + } + } + + test("write: write to an external table with existing path") { + withTempDir { dir => + val tableName = "stream_test" + withTable(tableName) { + // The file written by batch will not be seen after the table was written by a streaming + // query. This is because we load files from the metadata log instead of listing them + // using HDFS API. + Seq(4, 5, 6).toDF("value").write.format("parquet") + .option("path", dir.getCanonicalPath).saveAsTable(tableName) + + checkForStreamTable(Some(dir), tableName) + } + } + } + + test("write: write to a managed table with existing path") { + val tableName = "stream_test" + withTable(tableName) { + // The file written by batch will not be seen after the table was written by a streaming + // query. This is because we load files from the metadata log instead of listing them + // using HDFS API. + Seq(4, 5, 6).toDF("value").write.format("parquet").saveAsTable(tableName) + + checkForStreamTable(None, tableName) + } + } + + test("write: write to an external path and create table") { + withTempDir { dir => + val tableName = "stream_test" + withTable(tableName) { + // The file written by batch will not be seen after the table was written by a streaming + // query. This is because we load files from the metadata log instead of listing them + // using HDFS API. + Seq(4, 5, 6).toDF("value").write + .mode("append").format("parquet").save(dir.getCanonicalPath) + + checkForStreamTable(Some(dir), tableName) + } + } + } + + test("write: write to table with different format shouldn't be allowed") { + val tableName = "stream_test" + + spark.sql(s"CREATE TABLE $tableName (id bigint, data string) USING json") + checkAnswer(spark.table(tableName), Seq.empty) + + withTempDir { checkpointDir => + val exc = intercept[AnalysisException] { + runStreamQueryAppendMode(tableName, checkpointDir, Seq.empty, Seq.empty) + } + assert(exc.getMessage.contains("The input source(parquet) is different from the table " + + s"$tableName's data source provider(json)")) + } + } + + private def checkForStreamTable(dir: Option[File], tableName: String): Unit = { + val memory = MemoryStream[Int] + val dsw = memory.toDS().writeStream.format("parquet") + dir.foreach { output => + dsw.option("path", output.getCanonicalPath) + } + val sq = dsw + .option("checkpointLocation", Utils.createTempDir().getCanonicalPath) + .toTable(tableName) + memory.addData(1, 2, 3) + sq.processAllAvailable() + + checkDataset( + spark.table(tableName).as[Int], + 1, 2, 3) + val catalogTable = spark.sessionState.catalog.getTableMetadata(TableIdentifier(tableName)) + val path = if (dir.nonEmpty) { + dir.get + } else { + new File(catalogTable.location) + } + checkDataset( + spark.read.format("parquet").load(path.getCanonicalPath).as[Int], + 1, 2, 3) + } + + private def runTestWithStreamAppend(tableIdentifier: String) = { + withTempDir { checkpointDir => + val input1 = Seq((1L, "a"), (2L, "b"), (3L, "c")) + verifyStreamAppend(tableIdentifier, checkpointDir, Seq.empty, input1, input1) + + val input2 = Seq((4L, "d"), (5L, "e"), (6L, "f")) + verifyStreamAppend(tableIdentifier, checkpointDir, Seq(input1), input2, input1 ++ input2) + } + } + + private def runStreamQueryAppendMode( + tableIdentifier: String, + checkpointDir: File, + prevInputs: Seq[Seq[(Long, String)]], + newInputs: Seq[(Long, String)]): Unit = { + val inputData = MemoryStream[(Long, String)] + val inputDF = inputData.toDF().toDF("id", "data") + + prevInputs.foreach { inputsPerBatch => + inputData.addData(inputsPerBatch: _*) + } + + val query = inputDF + .writeStream + .option("checkpointLocation", checkpointDir.getAbsolutePath) + .toTable(tableIdentifier) + + inputData.addData(newInputs: _*) + + query.processAllAvailable() + query.stop() + } + + private def verifyStreamAppend( + tableIdentifier: String, + checkpointDir: File, + prevInputs: Seq[Seq[(Long, String)]], + newInputs: Seq[(Long, String)], + expectedOutputs: Seq[(Long, String)]): Unit = { + runStreamQueryAppendMode(tableIdentifier, checkpointDir, prevInputs, newInputs) + checkAnswer( + spark.table(tableIdentifier), + expectedOutputs.map { case (id, data) => Row(id, data) } + ) + } +} + +object DataStreamTableAPISuite { + val V1FallbackTestTableName = "fallbackV1Test" +} + +class InMemoryStreamTable(override val name: String) extends Table with SupportsRead { + var stream: MemoryStream[Int] = _ + + def setStream(inputData: MemoryStream[Int]): Unit = stream = inputData + + override def schema(): StructType = stream.fullSchema() + + override def capabilities(): util.Set[TableCapability] = { + Set(TableCapability.MICRO_BATCH_READ, TableCapability.CONTINUOUS_READ).asJava + } + + override def newScanBuilder(options: CaseInsensitiveStringMap): ScanBuilder = { + new MemoryStreamScanBuilder(stream) + } +} + +class NonStreamV2Table(override val name: String) + extends Table with SupportsRead with V2TableWithV1Fallback { + override def schema(): StructType = StructType(Nil) + override def capabilities(): util.Set[TableCapability] = Set(TableCapability.BATCH_READ).asJava + override def newScanBuilder(options: CaseInsensitiveStringMap): ScanBuilder = new FakeScanBuilder + + override def v1Table: CatalogTable = { + CatalogTable( + identifier = + TableIdentifier(DataStreamTableAPISuite.V1FallbackTestTableName, Some("default")), + tableType = CatalogTableType.MANAGED, + storage = CatalogStorageFormat.empty, + owner = null, + schema = schema(), + provider = Some("parquet")) + } +} + + +class InMemoryStreamTableCatalog extends InMemoryTableCatalog { + import org.apache.spark.sql.connector.catalog.CatalogV2Implicits._ + + override def createTable( + ident: Identifier, + schema: StructType, + partitions: Array[Transform], + properties: util.Map[String, String]): Table = { + if (tables.containsKey(ident)) { + throw new TableAlreadyExistsException(ident) + } + + val table = if (ident.name() == DataStreamTableAPISuite.V1FallbackTestTableName) { + new NonStreamV2Table(s"$name.${ident.quoted}") + } else { + new InMemoryStreamTable(s"$name.${ident.quoted}") + } + tables.put(ident, table) + namespaces.putIfAbsent(ident.namespace.toList, Map()) + table + } +} diff --git a/sql/core/src/test/scala/org/apache/spark/sql/streaming/ui/StreamingQueryHistorySuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/streaming/ui/StreamingQueryHistorySuite.scala new file mode 100644 index 0000000000000..160535ea4d048 --- /dev/null +++ b/sql/core/src/test/scala/org/apache/spark/sql/streaming/ui/StreamingQueryHistorySuite.scala @@ -0,0 +1,63 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.streaming.ui + +import java.util.Locale +import javax.servlet.http.HttpServletRequest + +import org.mockito.Mockito.{mock, when} +import org.scalatest.BeforeAndAfter + +import org.apache.spark.deploy.history.{Utils => HsUtils} +import org.apache.spark.sql.execution.ui.StreamingQueryStatusStore +import org.apache.spark.sql.test.SharedSparkSession + +class StreamingQueryHistorySuite extends SharedSparkSession with BeforeAndAfter { + + test("support streaming query events") { + val logDir = Thread.currentThread().getContextClassLoader.getResource("spark-events").toString + HsUtils.withFsHistoryProvider(logDir) { provider => + val appUi = provider.getAppUI("local-1596020211915", None).getOrElse { + assert(false, "Failed to load event log of local-1596020211915.") + null + } + assert(appUi.ui.appName == "StructuredKafkaWordCount") + assert(appUi.ui.store.store.count(classOf[StreamingQueryData]) == 1) + assert(appUi.ui.store.store.count(classOf[StreamingQueryProgressWrapper]) == 8) + + val store = new StreamingQueryStatusStore(appUi.ui.store.store) + val tab = new StreamingQueryTab(store, appUi.ui) + val request = mock(classOf[HttpServletRequest]) + var html = new StreamingQueryPage(tab).render(request) + .toString().toLowerCase(Locale.ROOT) + // 81.39: Avg Input /sec + assert(html.contains("81.39")) + // 157.05: Avg Process /sec + assert(html.contains("157.05")) + + val id = "8d268dc2-bc9c-4be8-97a9-b135d2943028" + val runId = "e225d92f-2545-48f8-87a2-9c0309580f8a" + when(request.getParameter("id")).thenReturn(runId) + html = new StreamingQueryStatisticsPage(tab).render(request) + .toString().toLowerCase(Locale.ROOT) + assert(html.contains("8 completed batches")) + assert(html.contains(id)) + assert(html.contains(runId)) + } + } +} diff --git a/sql/core/src/test/scala/org/apache/spark/sql/streaming/ui/StreamingQueryPageSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/streaming/ui/StreamingQueryPageSuite.scala index 640c21c52a146..246fa1f7c9184 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/streaming/ui/StreamingQueryPageSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/streaming/ui/StreamingQueryPageSuite.scala @@ -20,12 +20,16 @@ package org.apache.spark.sql.streaming.ui import java.util.{Locale, UUID} import javax.servlet.http.HttpServletRequest +import scala.xml.Node + import org.mockito.Mockito.{mock, when, RETURNS_SMART_NULLS} import org.scalatest.BeforeAndAfter -import scala.xml.Node +import org.apache.spark.SparkConf +import org.apache.spark.sql.execution.ui.StreamingQueryStatusStore import org.apache.spark.sql.streaming.StreamingQueryProgress import org.apache.spark.sql.test.SharedSparkSession +import org.apache.spark.ui.SparkUI class StreamingQueryPageSuite extends SharedSparkSession with BeforeAndAfter { @@ -33,26 +37,26 @@ class StreamingQueryPageSuite extends SharedSparkSession with BeforeAndAfter { val id = UUID.randomUUID() val request = mock(classOf[HttpServletRequest]) val tab = mock(classOf[StreamingQueryTab], RETURNS_SMART_NULLS) - val statusListener = mock(classOf[StreamingQueryStatusListener], RETURNS_SMART_NULLS) + val store = mock(classOf[StreamingQueryStatusStore], RETURNS_SMART_NULLS) when(tab.appName).thenReturn("testing") when(tab.headerTabs).thenReturn(Seq.empty) - when(tab.statusListener).thenReturn(statusListener) + when(tab.store).thenReturn(store) val streamQuery = createStreamQueryUIData(id) - when(statusListener.allQueryStatus).thenReturn(Seq(streamQuery)) + when(store.allQueryUIData).thenReturn(Seq(streamQuery)) var html = renderStreamingQueryPage(request, tab) .toString().toLowerCase(Locale.ROOT) assert(html.contains("active streaming queries (1)")) - when(streamQuery.isActive).thenReturn(false) - when(streamQuery.exception).thenReturn(None) + when(streamQuery.summary.isActive).thenReturn(false) + when(streamQuery.summary.exception).thenReturn(None) html = renderStreamingQueryPage(request, tab) .toString().toLowerCase(Locale.ROOT) assert(html.contains("completed streaming queries (1)")) assert(html.contains("finished")) - when(streamQuery.isActive).thenReturn(false) - when(streamQuery.exception).thenReturn(Option("exception in query")) + when(streamQuery.summary.isActive).thenReturn(false) + when(streamQuery.summary.exception).thenReturn(Option("exception in query")) html = renderStreamingQueryPage(request, tab) .toString().toLowerCase(Locale.ROOT) assert(html.contains("completed streaming queries (1)")) @@ -64,14 +68,20 @@ class StreamingQueryPageSuite extends SharedSparkSession with BeforeAndAfter { val id = UUID.randomUUID() val request = mock(classOf[HttpServletRequest]) val tab = mock(classOf[StreamingQueryTab], RETURNS_SMART_NULLS) - val statusListener = mock(classOf[StreamingQueryStatusListener], RETURNS_SMART_NULLS) + val store = mock(classOf[StreamingQueryStatusStore], RETURNS_SMART_NULLS) when(request.getParameter("id")).thenReturn(id.toString) when(tab.appName).thenReturn("testing") when(tab.headerTabs).thenReturn(Seq.empty) - when(tab.statusListener).thenReturn(statusListener) + when(tab.store).thenReturn(store) + val ui = mock(classOf[SparkUI]) + when(request.getParameter("id")).thenReturn(id.toString) + when(tab.appName).thenReturn("testing") + when(tab.headerTabs).thenReturn(Seq.empty) + when(ui.conf).thenReturn(new SparkConf()) + when(tab.parent).thenReturn(ui) val streamQuery = createStreamQueryUIData(id) - when(statusListener.allQueryStatus).thenReturn(Seq(streamQuery)) + when(store.allQueryUIData).thenReturn(Seq(streamQuery)) val html = renderStreamingQueryStatisticsPage(request, tab) .toString().toLowerCase(Locale.ROOT) @@ -89,15 +99,18 @@ class StreamingQueryPageSuite extends SharedSparkSession with BeforeAndAfter { when(progress.batchId).thenReturn(2) when(progress.prettyJson).thenReturn("""{"a":1}""") + val summary = mock(classOf[StreamingQueryData], RETURNS_SMART_NULLS) + when(summary.isActive).thenReturn(true) + when(summary.name).thenReturn("query") + when(summary.id).thenReturn(id) + when(summary.runId).thenReturn(id) + when(summary.startTimestamp).thenReturn(1L) + when(summary.exception).thenReturn(None) + val streamQuery = mock(classOf[StreamingQueryUIData], RETURNS_SMART_NULLS) - when(streamQuery.isActive).thenReturn(true) - when(streamQuery.name).thenReturn("query") - when(streamQuery.id).thenReturn(id) - when(streamQuery.runId).thenReturn(id) - when(streamQuery.startTimestamp).thenReturn(1L) + when(streamQuery.summary).thenReturn(summary) when(streamQuery.lastProgress).thenReturn(progress) when(streamQuery.recentProgress).thenReturn(Array(progress)) - when(streamQuery.exception).thenReturn(None) streamQuery } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/streaming/ui/StreamingQueryStatusListenerSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/streaming/ui/StreamingQueryStatusListenerSuite.scala index 6aa440e5609c5..91c55d5598a6b 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/streaming/ui/StreamingQueryStatusListenerSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/streaming/ui/StreamingQueryStatusListenerSuite.scala @@ -17,19 +17,28 @@ package org.apache.spark.sql.streaming.ui -import java.util.UUID +import java.text.SimpleDateFormat +import java.util.{Date, UUID} import org.mockito.Mockito.{mock, when, RETURNS_SMART_NULLS} +import org.scalatest.time.SpanSugar._ +import org.apache.spark.sql.catalyst.util.DateTimeUtils.getTimeZone +import org.apache.spark.sql.execution.ui.StreamingQueryStatusStore +import org.apache.spark.sql.internal.StaticSQLConf import org.apache.spark.sql.streaming.{StreamingQueryListener, StreamingQueryProgress, StreamTest} import org.apache.spark.sql.streaming +import org.apache.spark.status.ElementTrackingStore +import org.apache.spark.util.kvstore.InMemoryStore class StreamingQueryStatusListenerSuite extends StreamTest { test("onQueryStarted, onQueryProgress, onQueryTerminated") { - val listener = new StreamingQueryStatusListener(spark.sparkContext.conf) + val kvStore = new ElementTrackingStore(new InMemoryStore(), sparkConf) + val listener = new StreamingQueryStatusListener(spark.sparkContext.conf, kvStore) + val queryStore = new StreamingQueryStatusStore(kvStore) - // hanlde query started event + // handle query started event val id = UUID.randomUUID() val runId = UUID.randomUUID() val startEvent = new StreamingQueryListener.QueryStartedEvent( @@ -37,8 +46,9 @@ class StreamingQueryStatusListenerSuite extends StreamTest { listener.onQueryStarted(startEvent) // result checking - assert(listener.activeQueryStatus.size() == 1) - assert(listener.activeQueryStatus.get(runId).name == "test") + assert(queryStore.allQueryUIData.count(_.summary.isActive) == 1) + assert(queryStore.allQueryUIData.filter(_.summary.isActive).exists(uiData => + uiData.summary.runId == runId && uiData.summary.name.equals("test"))) // handle query progress event val progress = mock(classOf[StreamingQueryProgress], RETURNS_SMART_NULLS) @@ -53,28 +63,32 @@ class StreamingQueryStatusListenerSuite extends StreamTest { listener.onQueryProgress(processEvent) // result checking - val activeQuery = listener.activeQueryStatus.get(runId) - assert(activeQuery.isActive) - assert(activeQuery.recentProgress.length == 1) - assert(activeQuery.lastProgress.id == id) - assert(activeQuery.lastProgress.runId == runId) - assert(activeQuery.lastProgress.timestamp == "2001-10-01T01:00:00.100Z") - assert(activeQuery.lastProgress.inputRowsPerSecond == 10.0) - assert(activeQuery.lastProgress.processedRowsPerSecond == 12.0) - assert(activeQuery.lastProgress.batchId == 2) - assert(activeQuery.lastProgress.prettyJson == """{"a":1}""") + val activeQuery = + queryStore.allQueryUIData.filter(_.summary.isActive).find(_.summary.runId == runId) + assert(activeQuery.isDefined) + assert(activeQuery.get.summary.isActive) + assert(activeQuery.get.recentProgress.length == 1) + assert(activeQuery.get.lastProgress.id == id) + assert(activeQuery.get.lastProgress.runId == runId) + assert(activeQuery.get.lastProgress.timestamp == "2001-10-01T01:00:00.100Z") + assert(activeQuery.get.lastProgress.inputRowsPerSecond == 10.0) + assert(activeQuery.get.lastProgress.processedRowsPerSecond == 12.0) + assert(activeQuery.get.lastProgress.batchId == 2) + assert(activeQuery.get.lastProgress.prettyJson == """{"a":1}""") // handle terminate event val terminateEvent = new StreamingQueryListener.QueryTerminatedEvent(id, runId, None) listener.onQueryTerminated(terminateEvent) - assert(!listener.inactiveQueryStatus.head.isActive) - assert(listener.inactiveQueryStatus.head.runId == runId) - assert(listener.inactiveQueryStatus.head.id == id) + assert(!queryStore.allQueryUIData.filterNot(_.summary.isActive).head.summary.isActive) + assert(queryStore.allQueryUIData.filterNot(_.summary.isActive).head.summary.runId == runId) + assert(queryStore.allQueryUIData.filterNot(_.summary.isActive).head.summary.id == id) } test("same query start multiple times") { - val listener = new StreamingQueryStatusListener(spark.sparkContext.conf) + val kvStore = new ElementTrackingStore(new InMemoryStore(), sparkConf) + val listener = new StreamingQueryStatusListener(spark.sparkContext.conf, kvStore) + val queryStore = new StreamingQueryStatusStore(kvStore) // handle first time start val id = UUID.randomUUID() @@ -94,11 +108,106 @@ class StreamingQueryStatusListenerSuite extends StreamTest { listener.onQueryStarted(startEvent1) // result checking - assert(listener.activeQueryStatus.size() == 1) - assert(listener.inactiveQueryStatus.length == 1) - assert(listener.activeQueryStatus.containsKey(runId1)) - assert(listener.activeQueryStatus.get(runId1).id == id) - assert(listener.inactiveQueryStatus.head.runId == runId0) - assert(listener.inactiveQueryStatus.head.id == id) + assert(queryStore.allQueryUIData.count(_.summary.isActive) == 1) + assert(queryStore.allQueryUIData.filterNot(_.summary.isActive).length == 1) + assert(queryStore.allQueryUIData.filter(_.summary.isActive).exists(_.summary.runId == runId1)) + assert(queryStore.allQueryUIData.filter(_.summary.isActive).exists(uiData => + uiData.summary.runId == runId1 && uiData.summary.id == id)) + assert(queryStore.allQueryUIData.filterNot(_.summary.isActive).head.summary.runId == runId0) + assert(queryStore.allQueryUIData.filterNot(_.summary.isActive).head.summary.id == id) + } + + test("test small retained queries") { + val kvStore = new ElementTrackingStore(new InMemoryStore(), sparkConf) + val conf = spark.sparkContext.conf + conf.set(StaticSQLConf.STREAMING_UI_RETAINED_QUERIES.key, "2") + val listener = new StreamingQueryStatusListener(conf, kvStore) + val queryStore = new StreamingQueryStatusStore(kvStore) + + def addNewQuery(): (UUID, UUID) = { + val format = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'") // ISO8601 + format.setTimeZone(getTimeZone("UTC")) + val id = UUID.randomUUID() + val runId = UUID.randomUUID() + val startEvent = new StreamingQueryListener.QueryStartedEvent( + id, runId, "test1", format.format(new Date(System.currentTimeMillis()))) + listener.onQueryStarted(startEvent) + (id, runId) + } + + def checkInactiveQueryStatus(numInactives: Int, targetInactives: Seq[UUID]): Unit = { + eventually(timeout(10.seconds)) { + val inactiveQueries = queryStore.allQueryUIData.filter(!_.summary.isActive) + assert(inactiveQueries.size == numInactives) + assert(inactiveQueries.map(_.summary.id).toSet == targetInactives.toSet) + } + } + + val (id1, runId1) = addNewQuery() + val (id2, runId2) = addNewQuery() + val (id3, runId3) = addNewQuery() + assert(queryStore.allQueryUIData.count(!_.summary.isActive) == 0) + + val terminateEvent1 = new StreamingQueryListener.QueryTerminatedEvent(id1, runId1, None) + listener.onQueryTerminated(terminateEvent1) + checkInactiveQueryStatus(1, Seq(id1)) + val terminateEvent2 = new StreamingQueryListener.QueryTerminatedEvent(id2, runId2, None) + listener.onQueryTerminated(terminateEvent2) + checkInactiveQueryStatus(2, Seq(id1, id2)) + val terminateEvent3 = new StreamingQueryListener.QueryTerminatedEvent(id3, runId3, None) + listener.onQueryTerminated(terminateEvent3) + checkInactiveQueryStatus(2, Seq(id2, id3)) + } + + test("test small retained progress") { + val kvStore = new ElementTrackingStore(new InMemoryStore(), sparkConf) + val conf = spark.sparkContext.conf + conf.set(StaticSQLConf.STREAMING_UI_RETAINED_PROGRESS_UPDATES.key, "5") + val listener = new StreamingQueryStatusListener(conf, kvStore) + val queryStore = new StreamingQueryStatusStore(kvStore) + + val id = UUID.randomUUID() + val runId = UUID.randomUUID() + val startEvent = new StreamingQueryListener.QueryStartedEvent( + id, runId, "test", "2016-12-05T20:54:20.827Z") + listener.onQueryStarted(startEvent) + + var batchId: Int = 0 + + def addQueryProgress(): Unit = { + val progress = mockProgressData(id, runId) + val processEvent = new streaming.StreamingQueryListener.QueryProgressEvent(progress) + listener.onQueryProgress(processEvent) + } + + def mockProgressData(id: UUID, runId: UUID): StreamingQueryProgress = { + val format = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'") // ISO8601 + format.setTimeZone(getTimeZone("UTC")) + + val progress = mock(classOf[StreamingQueryProgress], RETURNS_SMART_NULLS) + when(progress.id).thenReturn(id) + when(progress.runId).thenReturn(runId) + when(progress.timestamp).thenReturn(format.format(new Date(System.currentTimeMillis()))) + when(progress.inputRowsPerSecond).thenReturn(10.0) + when(progress.processedRowsPerSecond).thenReturn(12.0) + when(progress.batchId).thenReturn(batchId) + when(progress.prettyJson).thenReturn("""{"a":1}""") + + batchId += 1 + progress + } + + def checkQueryProcessData(targetNum: Int): Unit = { + eventually(timeout(10.seconds)) { + assert(queryStore.getQueryProgressData(runId).size == targetNum) + } + } + + Array.tabulate(4) { _ => addQueryProgress() } + checkQueryProcessData(4) + addQueryProgress() + checkQueryProcessData(5) + addQueryProgress() + checkQueryProcessData(5) } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/streaming/ui/UISeleniumSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/streaming/ui/UISeleniumSuite.scala index 82aa1453f9ba2..db3d6529c9906 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/streaming/ui/UISeleniumSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/streaming/ui/UISeleniumSuite.scala @@ -31,7 +31,10 @@ import org.apache.spark.internal.config.UI.{UI_ENABLED, UI_PORT} import org.apache.spark.sql.LocalSparkSession.withSparkSession import org.apache.spark.sql.SparkSession import org.apache.spark.sql.catalyst.util.quietly -import org.apache.spark.sql.streaming.StreamingQueryException +import org.apache.spark.sql.functions.{window => windowFn, _} +import org.apache.spark.sql.internal.SQLConf.SHUFFLE_PARTITIONS +import org.apache.spark.sql.internal.StaticSQLConf.ENABLED_STREAMING_UI_CUSTOM_METRIC_LIST +import org.apache.spark.sql.streaming.{StreamingQueryException, Trigger} import org.apache.spark.ui.SparkUICssErrorHandler class UISeleniumSuite extends SparkFunSuite with WebBrowser with Matchers with BeforeAndAfterAll { @@ -51,8 +54,10 @@ class UISeleniumSuite extends SparkFunSuite with WebBrowser with Matchers with B val conf = new SparkConf() .setMaster(master) .setAppName("ui-test") + .set(SHUFFLE_PARTITIONS, 5) .set(UI_ENABLED, true) .set(UI_PORT, 0) + .set(ENABLED_STREAMING_UI_CUSTOM_METRIC_LIST, Seq("stateOnCurrentVersionSizeBytes")) additionalConfs.foreach { case (k, v) => conf.set(k, v) } val spark = SparkSession.builder().master(master).config(conf).getOrCreate() assert(spark.sparkContext.ui.isDefined) @@ -75,10 +80,17 @@ class UISeleniumSuite extends SparkFunSuite with WebBrowser with Matchers with B val h3Text = findAll(cssSelector("h3")).map(_.text).toSeq h3Text should not contain ("Streaming Query") + val input1 = spark.readStream.format("rate").load() + val input2 = spark.readStream.format("rate").load() + val input3 = spark.readStream.format("rate").load() val activeQuery = - spark.readStream.format("rate").load().writeStream.format("noop").start() + input1.selectExpr("timestamp", "mod(value, 100) as mod", "value") + .withWatermark("timestamp", "0 second") + .groupBy(windowFn($"timestamp", "10 seconds", "2 seconds"), $"mod") + .agg(avg("value").as("avg_value")) + .writeStream.format("noop").trigger(Trigger.ProcessingTime("5 seconds")).start() val completedQuery = - spark.readStream.format("rate").load().writeStream.format("noop").start() + input2.join(input3, "value").writeStream.format("noop").start() completedQuery.stop() val failedQuery = spark.readStream.format("rate").load().select("value").as[Long] .map(_ / 0).writeStream.format("noop").start() @@ -129,6 +141,20 @@ class UISeleniumSuite extends SparkFunSuite with WebBrowser with Matchers with B findAll(cssSelector("""#stat-table th""")).map(_.text).toSeq should be { List("", "Timelines", "Histograms") } + summaryText should contain ("Input Rate (?)") + summaryText should contain ("Process Rate (?)") + summaryText should contain ("Input Rows (?)") + summaryText should contain ("Batch Duration (?)") + summaryText should contain ("Operation Duration (?)") + summaryText should contain ("Global Watermark Gap (?)") + summaryText should contain ("Aggregated Number Of Total State Rows (?)") + summaryText should contain ("Aggregated Number Of Updated State Rows (?)") + summaryText should contain ("Aggregated State Memory Used In Bytes (?)") + summaryText should contain ("Aggregated Number Of Rows Dropped By Watermark (?)") + summaryText should contain ("Aggregated Custom Metric stateOnCurrentVersionSizeBytes" + + " (?)") + summaryText should not contain ("Aggregated Custom Metric loadedMapCacheHitCount (?)") + summaryText should not contain ("Aggregated Custom Metric loadedMapCacheMissCount (?)") } } finally { spark.streams.active.foreach(_.stop()) diff --git a/sql/core/src/test/scala/org/apache/spark/sql/test/DataFrameReaderWriterSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/test/DataFrameReaderWriterSuite.scala index c4ca85d6237b2..4e61dba4955af 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/test/DataFrameReaderWriterSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/test/DataFrameReaderWriterSuite.scala @@ -40,7 +40,7 @@ import org.apache.spark.sql._ import org.apache.spark.sql.catalyst.TableIdentifier import org.apache.spark.sql.catalyst.plans.logical.{AppendData, LogicalPlan, OverwriteByExpression} import org.apache.spark.sql.execution.QueryExecution -import org.apache.spark.sql.execution.datasources.DataSourceUtils +import org.apache.spark.sql.execution.datasources.{DataSourceUtils, HadoopFsRelation, LogicalRelation} import org.apache.spark.sql.execution.datasources.noop.NoopDataSource import org.apache.spark.sql.execution.datasources.parquet.SpecificParquetRecordReaderBase import org.apache.spark.sql.internal.SQLConf @@ -1190,4 +1190,33 @@ class DataFrameReaderWriterSuite extends QueryTest with SharedSparkSession with verifyLoadFails(df.write.option("path", path).format("parquet").save(path)) verifyLoadFails(df.write.option("path", path).format("parquet").save("")) } + + test("SPARK-32853: consecutive load/save calls should be allowed") { + val dfr = spark.read.format(classOf[FakeSourceOne].getName) + dfr.load("1") + dfr.load("2") + val dfw = spark.range(10).write.format(classOf[DefaultSource].getName) + dfw.save("1") + dfw.save("2") + } + + test("SPARK-32844: DataFrameReader.table take the specified options for V1 relation") { + withSQLConf(SQLConf.USE_V1_SOURCE_LIST.key -> "parquet") { + withTable("t") { + sql("CREATE TABLE t(i int, d double) USING parquet OPTIONS ('p1'='v1', 'p2'='v2')") + + val msg = intercept[AnalysisException] { + spark.read.option("P1", "v3").table("t").count() + }.getMessage + assert(msg.contains("duplicated key")) + + val df = spark.read.option("P2", "v2").option("p3", "v3").table("t") + val options = df.queryExecution.analyzed.collectFirst { + case r: LogicalRelation => r.relation.asInstanceOf[HadoopFsRelation].options + }.get + assert(options("p2") == "v2") + assert(options("p3") == "v3") + } + } + } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/test/GenericFunSpecSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/test/GenericFunSpecSuite.scala index 1b6724054a3ad..d15e5c42732d1 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/test/GenericFunSpecSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/test/GenericFunSpecSuite.scala @@ -19,8 +19,6 @@ package org.apache.spark.sql.test import org.scalatest.funspec.AnyFunSpec -import org.apache.spark.sql.Dataset - /** * The purpose of this suite is to make sure that generic FunSpec-based scala * tests work with a shared spark session diff --git a/sql/core/src/test/scala/org/apache/spark/sql/test/SQLTestData.scala b/sql/core/src/test/scala/org/apache/spark/sql/test/SQLTestData.scala index c51faaf10f5dd..a1fd4a0215b1f 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/test/SQLTestData.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/test/SQLTestData.scala @@ -169,10 +169,10 @@ private[sql] trait SQLTestData { self => rdd } - protected lazy val calenderIntervalData: RDD[IntervalData] = { + protected lazy val calendarIntervalData: RDD[IntervalData] = { val rdd = spark.sparkContext.parallelize( IntervalData(new CalendarInterval(1, 1, 1)) :: Nil) - rdd.toDF().createOrReplaceTempView("calenderIntervalData") + rdd.toDF().createOrReplaceTempView("calendarIntervalData") rdd } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/test/SharedSparkSession.scala b/sql/core/src/test/scala/org/apache/spark/sql/test/SharedSparkSession.scala index ee29b4b8fb32b..ed2e309fa075a 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/test/SharedSparkSession.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/test/SharedSparkSession.scala @@ -25,13 +25,14 @@ import org.scalatest.concurrent.Eventually import org.apache.spark.{DebugFilesystem, SparkConf} import org.apache.spark.internal.config.UNSAFE_EXCEPTION_ON_MEMORY_LEAK import org.apache.spark.sql.{SparkSession, SQLContext} +import org.apache.spark.sql.catalyst.expressions.CodegenObjectFactoryMode import org.apache.spark.sql.catalyst.optimizer.ConvertToLocalRelation import org.apache.spark.sql.internal.{SQLConf, StaticSQLConf} trait SharedSparkSession extends SQLTestUtils with SharedSparkSessionBase { /** - * Suites extending [[SharedSparkSession]] are sharing resources (eg. SparkSession) in their + * Suites extending [[SharedSparkSession]] are sharing resources (e.g. SparkSession) in their * tests. That trait initializes the spark session in its [[beforeAll()]] implementation before * the automatic thread snapshot is performed, so the audit code could fail to report threads * leaked by that shared session. @@ -67,6 +68,7 @@ trait SharedSparkSessionBase .set("spark.hadoop.fs.file.impl", classOf[DebugFilesystem].getName) .set(UNSAFE_EXCEPTION_ON_MEMORY_LEAK, true) .set(SQLConf.CODEGEN_FALLBACK.key, "false") + .set(SQLConf.CODEGEN_FACTORY_MODE.key, CodegenObjectFactoryMode.CODEGEN_ONLY.toString) // Disable ConvertToLocalRelation for better test coverage. Test cases built on // LocalRelation will exercise the optimization rules better by disabling it as // this rule may potentially block testing of other optimization rules such as diff --git a/sql/core/src/test/scala/org/apache/spark/sql/test/TestSQLContext.scala b/sql/core/src/test/scala/org/apache/spark/sql/test/TestSQLContext.scala index 17603deacdcdd..380723029b8a8 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/test/TestSQLContext.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/test/TestSQLContext.scala @@ -25,12 +25,12 @@ import org.apache.spark.sql.internal.{SessionState, SessionStateBuilder, SQLConf * A special `SparkSession` prepared for testing. */ private[spark] class TestSparkSession(sc: SparkContext) extends SparkSession(sc) { self => - def this(sparkConf: SparkConf) { + def this(sparkConf: SparkConf) = { this(new SparkContext("local[2]", "test-sql-context", sparkConf.set("spark.sql.testkey", "true"))) } - def this() { + def this() = { this(new SparkConf) } @@ -39,7 +39,7 @@ private[spark] class TestSparkSession(sc: SparkContext) extends SparkSession(sc) @transient override lazy val sessionState: SessionState = { - new TestSQLSessionStateBuilder(this, None).build() + new TestSQLSessionStateBuilder(this, None, Map.empty).build() } // Needed for Java tests @@ -66,8 +66,9 @@ private[sql] object TestSQLContext { private[sql] class TestSQLSessionStateBuilder( session: SparkSession, - state: Option[SessionState]) - extends SessionStateBuilder(session, state) with WithTestConf { + state: Option[SessionState], + options: Map[String, String]) + extends SessionStateBuilder(session, state, options) with WithTestConf { override def overrideConfs: Map[String, String] = TestSQLContext.overrideConfs - override def newBuilder: NewBuilder = new TestSQLSessionStateBuilder(_, _) + override def newBuilder: NewBuilder = new TestSQLSessionStateBuilder(_, _, Map.empty) } diff --git a/sql/core/src/test/scala/org/apache/spark/status/api/v1/sql/SqlResourceWithActualMetricsSuite.scala b/sql/core/src/test/scala/org/apache/spark/status/api/v1/sql/SqlResourceWithActualMetricsSuite.scala index 0c0e3ac90510e..1510e8957f9ae 100644 --- a/sql/core/src/test/scala/org/apache/spark/status/api/v1/sql/SqlResourceWithActualMetricsSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/status/api/v1/sql/SqlResourceWithActualMetricsSuite.scala @@ -26,7 +26,9 @@ import org.json4s.jackson.JsonMethods import org.apache.spark.SparkConf import org.apache.spark.deploy.history.HistoryServerSuite.getContentAndCode import org.apache.spark.sql.DataFrame +import org.apache.spark.sql.catalyst.plans.SQLHelper import org.apache.spark.sql.execution.metric.SQLMetricsTestUtils +import org.apache.spark.sql.internal.SQLConf.ADAPTIVE_EXECUTION_ENABLED import org.apache.spark.sql.test.SharedSparkSession case class Person(id: Int, name: String, age: Int) @@ -35,7 +37,8 @@ case class Salary(personId: Int, salary: Double) /** * Sql Resource Public API Unit Tests running query and extracting the metrics. */ -class SqlResourceWithActualMetricsSuite extends SharedSparkSession with SQLMetricsTestUtils { +class SqlResourceWithActualMetricsSuite + extends SharedSparkSession with SQLMetricsTestUtils with SQLHelper { import testImplicits._ @@ -52,8 +55,10 @@ class SqlResourceWithActualMetricsSuite extends SharedSparkSession with SQLMetri test("Check Sql Rest Api Endpoints") { // Materalize result DataFrame - val count = getDF().count() - assert(count == 2, s"Expected Query Count is 2 but received: $count") + withSQLConf(ADAPTIVE_EXECUTION_ENABLED.key -> "false") { + val count = getDF().count() + assert(count == 2, s"Expected Query Count is 2 but received: $count") + } // Spark apps launched by local-mode seems not having `attemptId` as default // so UT is just added for existing endpoints. diff --git a/sql/core/v1.2/src/main/java/org/apache/spark/sql/execution/datasources/orc/OrcColumnVector.java b/sql/core/v1.2/src/main/java/org/apache/spark/sql/execution/datasources/orc/OrcColumnVector.java deleted file mode 100644 index 6601bcb9018f4..0000000000000 --- a/sql/core/v1.2/src/main/java/org/apache/spark/sql/execution/datasources/orc/OrcColumnVector.java +++ /dev/null @@ -1,208 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.spark.sql.execution.datasources.orc; - -import java.math.BigDecimal; - -import org.apache.orc.storage.ql.exec.vector.*; - -import org.apache.spark.sql.catalyst.util.DateTimeUtils; -import org.apache.spark.sql.catalyst.util.RebaseDateTime; -import org.apache.spark.sql.types.DataType; -import org.apache.spark.sql.types.DateType; -import org.apache.spark.sql.types.Decimal; -import org.apache.spark.sql.types.TimestampType; -import org.apache.spark.sql.vectorized.ColumnarArray; -import org.apache.spark.sql.vectorized.ColumnarMap; -import org.apache.spark.unsafe.types.UTF8String; - -/** - * A column vector class wrapping Hive's ColumnVector. Because Spark ColumnarBatch only accepts - * Spark's vectorized.ColumnVector, this column vector is used to adapt Hive ColumnVector with - * Spark ColumnarVector. - */ -public class OrcColumnVector extends org.apache.spark.sql.vectorized.ColumnVector { - private ColumnVector baseData; - private LongColumnVector longData; - private DoubleColumnVector doubleData; - private BytesColumnVector bytesData; - private DecimalColumnVector decimalData; - private TimestampColumnVector timestampData; - private final boolean isTimestamp; - private final boolean isDate; - - private int batchSize; - - OrcColumnVector(DataType type, ColumnVector vector) { - super(type); - - if (type instanceof TimestampType) { - isTimestamp = true; - } else { - isTimestamp = false; - } - - if (type instanceof DateType) { - isDate = true; - } else { - isDate = false; - } - - baseData = vector; - if (vector instanceof LongColumnVector) { - longData = (LongColumnVector) vector; - } else if (vector instanceof DoubleColumnVector) { - doubleData = (DoubleColumnVector) vector; - } else if (vector instanceof BytesColumnVector) { - bytesData = (BytesColumnVector) vector; - } else if (vector instanceof DecimalColumnVector) { - decimalData = (DecimalColumnVector) vector; - } else if (vector instanceof TimestampColumnVector) { - timestampData = (TimestampColumnVector) vector; - } else { - throw new UnsupportedOperationException(); - } - } - - public void setBatchSize(int batchSize) { - this.batchSize = batchSize; - } - - @Override - public void close() { - - } - - @Override - public boolean hasNull() { - return !baseData.noNulls; - } - - @Override - public int numNulls() { - if (baseData.isRepeating) { - if (baseData.isNull[0]) { - return batchSize; - } else { - return 0; - } - } else if (baseData.noNulls) { - return 0; - } else { - int count = 0; - for (int i = 0; i < batchSize; i++) { - if (baseData.isNull[i]) count++; - } - return count; - } - } - - /* A helper method to get the row index in a column. */ - private int getRowIndex(int rowId) { - return baseData.isRepeating ? 0 : rowId; - } - - @Override - public boolean isNullAt(int rowId) { - return baseData.isNull[getRowIndex(rowId)]; - } - - @Override - public boolean getBoolean(int rowId) { - return longData.vector[getRowIndex(rowId)] == 1; - } - - @Override - public byte getByte(int rowId) { - return (byte) longData.vector[getRowIndex(rowId)]; - } - - @Override - public short getShort(int rowId) { - return (short) longData.vector[getRowIndex(rowId)]; - } - - @Override - public int getInt(int rowId) { - int value = (int) longData.vector[getRowIndex(rowId)]; - if (isDate) { - return RebaseDateTime.rebaseJulianToGregorianDays(value); - } else { - return value; - } - } - - @Override - public long getLong(int rowId) { - int index = getRowIndex(rowId); - if (isTimestamp) { - return DateTimeUtils.fromJavaTimestamp(timestampData.asScratchTimestamp(index)); - } else { - return longData.vector[index]; - } - } - - @Override - public float getFloat(int rowId) { - return (float) doubleData.vector[getRowIndex(rowId)]; - } - - @Override - public double getDouble(int rowId) { - return doubleData.vector[getRowIndex(rowId)]; - } - - @Override - public Decimal getDecimal(int rowId, int precision, int scale) { - if (isNullAt(rowId)) return null; - BigDecimal data = decimalData.vector[getRowIndex(rowId)].getHiveDecimal().bigDecimalValue(); - return Decimal.apply(data, precision, scale); - } - - @Override - public UTF8String getUTF8String(int rowId) { - if (isNullAt(rowId)) return null; - int index = getRowIndex(rowId); - BytesColumnVector col = bytesData; - return UTF8String.fromBytes(col.vector[index], col.start[index], col.length[index]); - } - - @Override - public byte[] getBinary(int rowId) { - if (isNullAt(rowId)) return null; - int index = getRowIndex(rowId); - byte[] binary = new byte[bytesData.length[index]]; - System.arraycopy(bytesData.vector[index], bytesData.start[index], binary, 0, binary.length); - return binary; - } - - @Override - public ColumnarArray getArray(int rowId) { - throw new UnsupportedOperationException(); - } - - @Override - public ColumnarMap getMap(int rowId) { - throw new UnsupportedOperationException(); - } - - @Override - public org.apache.spark.sql.vectorized.ColumnVector getChild(int ordinal) { - throw new UnsupportedOperationException(); - } -} diff --git a/sql/core/v1.2/src/main/scala/org/apache/spark/sql/execution/datasources/orc/DaysWritable.scala b/sql/core/v1.2/src/main/scala/org/apache/spark/sql/execution/datasources/orc/DaysWritable.scala deleted file mode 100644 index 1dccf0ca1faef..0000000000000 --- a/sql/core/v1.2/src/main/scala/org/apache/spark/sql/execution/datasources/orc/DaysWritable.scala +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.spark.sql.execution.datasources.orc - -import java.io.{DataInput, DataOutput, IOException} -import java.sql.Date - -import org.apache.hadoop.io.WritableUtils -import org.apache.orc.storage.serde2.io.DateWritable - -import org.apache.spark.sql.catalyst.util.RebaseDateTime.{rebaseGregorianToJulianDays, rebaseJulianToGregorianDays} - -/** - * The class accepts/returns days in Gregorian calendar and rebase them - * via conversion to local date in Julian calendar for dates before 1582-10-15 - * in read/write for backward compatibility with Spark 2.4 and earlier versions. - * - * This is a clone of `org.apache.spark.sql.execution.datasources.DaysWritable`. - * The class is cloned because Hive ORC v1.2 uses different `DateWritable`: - * - v1.2: `org.apache.orc.storage.serde2.io.DateWritable` - * - v2.3 and `HiveInspectors`: `org.apache.hadoop.hive.serde2.io.DateWritable` - * - * @param gregorianDays The number of days since the epoch 1970-01-01 in - * Gregorian calendar. - * @param julianDays The number of days since the epoch 1970-01-01 in - * Julian calendar. - */ -class DaysWritable( - var gregorianDays: Int, - var julianDays: Int) - extends DateWritable { - - def this() = this(0, 0) - def this(gregorianDays: Int) = - this(gregorianDays, rebaseGregorianToJulianDays(gregorianDays)) - def this(dateWritable: DateWritable) = { - this( - gregorianDays = dateWritable match { - case daysWritable: DaysWritable => daysWritable.gregorianDays - case dateWritable: DateWritable => - rebaseJulianToGregorianDays(dateWritable.getDays) - }, - julianDays = dateWritable.getDays) - } - - override def getDays: Int = julianDays - override def get(): Date = new Date(DateWritable.daysToMillis(julianDays)) - - override def set(d: Int): Unit = { - gregorianDays = d - julianDays = rebaseGregorianToJulianDays(d) - } - - @throws[IOException] - override def write(out: DataOutput): Unit = { - WritableUtils.writeVInt(out, julianDays) - } - - @throws[IOException] - override def readFields(in: DataInput): Unit = { - julianDays = WritableUtils.readVInt(in) - gregorianDays = rebaseJulianToGregorianDays(julianDays) - } -} diff --git a/sql/core/v1.2/src/main/scala/org/apache/spark/sql/execution/datasources/orc/OrcFilters.scala b/sql/core/v1.2/src/main/scala/org/apache/spark/sql/execution/datasources/orc/OrcFilters.scala deleted file mode 100644 index 0e657bfe66238..0000000000000 --- a/sql/core/v1.2/src/main/scala/org/apache/spark/sql/execution/datasources/orc/OrcFilters.scala +++ /dev/null @@ -1,275 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.spark.sql.execution.datasources.orc - -import java.time.{Instant, LocalDate} - -import org.apache.orc.storage.common.`type`.HiveDecimal -import org.apache.orc.storage.ql.io.sarg.{PredicateLeaf, SearchArgument} -import org.apache.orc.storage.ql.io.sarg.SearchArgument.Builder -import org.apache.orc.storage.ql.io.sarg.SearchArgumentFactory.newBuilder -import org.apache.orc.storage.serde2.io.HiveDecimalWritable - -import org.apache.spark.SparkException -import org.apache.spark.sql.catalyst.util.DateTimeUtils.{instantToMicros, localDateToDays, toJavaDate, toJavaTimestamp} -import org.apache.spark.sql.internal.SQLConf -import org.apache.spark.sql.sources.Filter -import org.apache.spark.sql.types._ - -/** - * Helper object for building ORC `SearchArgument`s, which are used for ORC predicate push-down. - * - * Due to limitation of ORC `SearchArgument` builder, we had to implement separate checking and - * conversion passes through the Filter to make sure we only convert predicates that are known - * to be convertible. - * - * An ORC `SearchArgument` must be built in one pass using a single builder. For example, you can't - * build `a = 1` and `b = 2` first, and then combine them into `a = 1 AND b = 2`. This is quite - * different from the cases in Spark SQL or Parquet, where complex filters can be easily built using - * existing simpler ones. - * - * The annoying part is that, `SearchArgument` builder methods like `startAnd()`, `startOr()`, and - * `startNot()` mutate internal state of the builder instance. This forces us to translate all - * convertible filters with a single builder instance. However, if we try to translate a filter - * before checking whether it can be converted or not, we may end up with a builder whose internal - * state is inconsistent in the case of an inconvertible filter. - * - * For example, to convert an `And` filter with builder `b`, we call `b.startAnd()` first, and then - * try to convert its children. Say we convert `left` child successfully, but find that `right` - * child is inconvertible. Alas, `b.startAnd()` call can't be rolled back, and `b` is inconsistent - * now. - * - * The workaround employed here is to trim the Spark filters before trying to convert them. This - * way, we can only do the actual conversion on the part of the Filter that is known to be - * convertible. - * - * P.S.: Hive seems to use `SearchArgument` together with `ExprNodeGenericFuncDesc` only. Usage of - * builder methods mentioned above can only be found in test code, where all tested filters are - * known to be convertible. - */ -private[sql] object OrcFilters extends OrcFiltersBase { - - /** - * Create ORC filter as a SearchArgument instance. - */ - def createFilter(schema: StructType, filters: Seq[Filter]): Option[SearchArgument] = { - val dataTypeMap = OrcFilters.getSearchableTypeMap(schema, SQLConf.get.caseSensitiveAnalysis) - // Combines all convertible filters using `And` to produce a single conjunction - val conjunctionOptional = buildTree(convertibleFilters(schema, dataTypeMap, filters)) - conjunctionOptional.map { conjunction => - // Then tries to build a single ORC `SearchArgument` for the conjunction predicate. - // The input predicate is fully convertible. There should not be any empty result in the - // following recursive method call `buildSearchArgument`. - buildSearchArgument(dataTypeMap, conjunction, newBuilder).build() - } - } - - def convertibleFilters( - schema: StructType, - dataTypeMap: Map[String, OrcPrimitiveField], - filters: Seq[Filter]): Seq[Filter] = { - import org.apache.spark.sql.sources._ - - def convertibleFiltersHelper( - filter: Filter, - canPartialPushDown: Boolean): Option[Filter] = filter match { - // At here, it is not safe to just convert one side and remove the other side - // if we do not understand what the parent filters are. - // - // Here is an example used to explain the reason. - // Let's say we have NOT(a = 2 AND b in ('1')) and we do not understand how to - // convert b in ('1'). If we only convert a = 2, we will end up with a filter - // NOT(a = 2), which will generate wrong results. - // - // Pushing one side of AND down is only safe to do at the top level or in the child - // AND before hitting NOT or OR conditions, and in this case, the unsupported predicate - // can be safely removed. - case And(left, right) => - val leftResultOptional = convertibleFiltersHelper(left, canPartialPushDown) - val rightResultOptional = convertibleFiltersHelper(right, canPartialPushDown) - (leftResultOptional, rightResultOptional) match { - case (Some(leftResult), Some(rightResult)) => Some(And(leftResult, rightResult)) - case (Some(leftResult), None) if canPartialPushDown => Some(leftResult) - case (None, Some(rightResult)) if canPartialPushDown => Some(rightResult) - case _ => None - } - - // The Or predicate is convertible when both of its children can be pushed down. - // That is to say, if one/both of the children can be partially pushed down, the Or - // predicate can be partially pushed down as well. - // - // Here is an example used to explain the reason. - // Let's say we have - // (a1 AND a2) OR (b1 AND b2), - // a1 and b1 is convertible, while a2 and b2 is not. - // The predicate can be converted as - // (a1 OR b1) AND (a1 OR b2) AND (a2 OR b1) AND (a2 OR b2) - // As per the logical in And predicate, we can push down (a1 OR b1). - case Or(left, right) => - for { - lhs <- convertibleFiltersHelper(left, canPartialPushDown) - rhs <- convertibleFiltersHelper(right, canPartialPushDown) - } yield Or(lhs, rhs) - case Not(pred) => - val childResultOptional = convertibleFiltersHelper(pred, canPartialPushDown = false) - childResultOptional.map(Not) - case other => - for (_ <- buildLeafSearchArgument(dataTypeMap, other, newBuilder())) yield other - } - filters.flatMap { filter => - convertibleFiltersHelper(filter, true) - } - } - - /** - * Get PredicateLeafType which is corresponding to the given DataType. - */ - def getPredicateLeafType(dataType: DataType): PredicateLeaf.Type = dataType match { - case BooleanType => PredicateLeaf.Type.BOOLEAN - case ByteType | ShortType | IntegerType | LongType => PredicateLeaf.Type.LONG - case FloatType | DoubleType => PredicateLeaf.Type.FLOAT - case StringType => PredicateLeaf.Type.STRING - case DateType => PredicateLeaf.Type.DATE - case TimestampType => PredicateLeaf.Type.TIMESTAMP - case _: DecimalType => PredicateLeaf.Type.DECIMAL - case _ => throw new UnsupportedOperationException(s"DataType: ${dataType.catalogString}") - } - - /** - * Cast literal values for filters. - * - * We need to cast to long because ORC raises exceptions - * at 'checkLiteralType' of SearchArgumentImpl.java. - */ - private def castLiteralValue(value: Any, dataType: DataType): Any = dataType match { - case ByteType | ShortType | IntegerType | LongType => - value.asInstanceOf[Number].longValue - case FloatType | DoubleType => - value.asInstanceOf[Number].doubleValue() - case _: DecimalType => - new HiveDecimalWritable(HiveDecimal.create(value.asInstanceOf[java.math.BigDecimal])) - case _: DateType if value.isInstanceOf[LocalDate] => - toJavaDate(localDateToDays(value.asInstanceOf[LocalDate])) - case _: TimestampType if value.isInstanceOf[Instant] => - toJavaTimestamp(instantToMicros(value.asInstanceOf[Instant])) - case _ => value - } - - /** - * Build a SearchArgument and return the builder so far. - * - * @param dataTypeMap a map from the attribute name to its data type. - * @param expression the input predicates, which should be fully convertible to SearchArgument. - * @param builder the input SearchArgument.Builder. - * @return the builder so far. - */ - private def buildSearchArgument( - dataTypeMap: Map[String, OrcPrimitiveField], - expression: Filter, - builder: Builder): Builder = { - import org.apache.spark.sql.sources._ - - expression match { - case And(left, right) => - val lhs = buildSearchArgument(dataTypeMap, left, builder.startAnd()) - val rhs = buildSearchArgument(dataTypeMap, right, lhs) - rhs.end() - - case Or(left, right) => - val lhs = buildSearchArgument(dataTypeMap, left, builder.startOr()) - val rhs = buildSearchArgument(dataTypeMap, right, lhs) - rhs.end() - - case Not(child) => - buildSearchArgument(dataTypeMap, child, builder.startNot()).end() - - case other => - buildLeafSearchArgument(dataTypeMap, other, builder).getOrElse { - throw new SparkException( - "The input filter of OrcFilters.buildSearchArgument should be fully convertible.") - } - } - } - - /** - * Build a SearchArgument for a leaf predicate and return the builder so far. - * - * @param dataTypeMap a map from the attribute name to its data type. - * @param expression the input filter predicates. - * @param builder the input SearchArgument.Builder. - * @return the builder so far. - */ - private def buildLeafSearchArgument( - dataTypeMap: Map[String, OrcPrimitiveField], - expression: Filter, - builder: Builder): Option[Builder] = { - def getType(attribute: String): PredicateLeaf.Type = - getPredicateLeafType(dataTypeMap(attribute).fieldType) - - import org.apache.spark.sql.sources._ - - // NOTE: For all case branches dealing with leaf predicates below, the additional `startAnd()` - // call is mandatory. ORC `SearchArgument` builder requires that all leaf predicates must be - // wrapped by a "parent" predicate (`And`, `Or`, or `Not`). - expression match { - case EqualTo(name, value) if dataTypeMap.contains(name) => - val castedValue = castLiteralValue(value, dataTypeMap(name).fieldType) - Some(builder.startAnd() - .equals(dataTypeMap(name).fieldName, getType(name), castedValue).end()) - - case EqualNullSafe(name, value) if dataTypeMap.contains(name) => - val castedValue = castLiteralValue(value, dataTypeMap(name).fieldType) - Some(builder.startAnd() - .nullSafeEquals(dataTypeMap(name).fieldName, getType(name), castedValue).end()) - - case LessThan(name, value) if dataTypeMap.contains(name) => - val castedValue = castLiteralValue(value, dataTypeMap(name).fieldType) - Some(builder.startAnd() - .lessThan(dataTypeMap(name).fieldName, getType(name), castedValue).end()) - - case LessThanOrEqual(name, value) if dataTypeMap.contains(name) => - val castedValue = castLiteralValue(value, dataTypeMap(name).fieldType) - Some(builder.startAnd() - .lessThanEquals(dataTypeMap(name).fieldName, getType(name), castedValue).end()) - - case GreaterThan(name, value) if dataTypeMap.contains(name) => - val castedValue = castLiteralValue(value, dataTypeMap(name).fieldType) - Some(builder.startNot() - .lessThanEquals(dataTypeMap(name).fieldName, getType(name), castedValue).end()) - - case GreaterThanOrEqual(name, value) if dataTypeMap.contains(name) => - val castedValue = castLiteralValue(value, dataTypeMap(name).fieldType) - Some(builder.startNot() - .lessThan(dataTypeMap(name).fieldName, getType(name), castedValue).end()) - - case IsNull(name) if dataTypeMap.contains(name) => - Some(builder.startAnd().isNull(dataTypeMap(name).fieldName, getType(name)).end()) - - case IsNotNull(name) if dataTypeMap.contains(name) => - Some(builder.startNot().isNull(dataTypeMap(name).fieldName, getType(name)).end()) - - case In(name, values) if dataTypeMap.contains(name) => - val castedValues = values.map(v => castLiteralValue(v, dataTypeMap(name).fieldType)) - Some(builder.startAnd().in(dataTypeMap(name).fieldName, getType(name), - castedValues.map(_.asInstanceOf[AnyRef]): _*).end()) - - case _ => None - } - } -} - diff --git a/sql/core/v1.2/src/main/scala/org/apache/spark/sql/execution/datasources/orc/OrcShimUtils.scala b/sql/core/v1.2/src/main/scala/org/apache/spark/sql/execution/datasources/orc/OrcShimUtils.scala deleted file mode 100644 index 7fbc1cd205b13..0000000000000 --- a/sql/core/v1.2/src/main/scala/org/apache/spark/sql/execution/datasources/orc/OrcShimUtils.scala +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.spark.sql.execution.datasources.orc - -import org.apache.orc.storage.common.`type`.HiveDecimal -import org.apache.orc.storage.ql.exec.vector.VectorizedRowBatch -import org.apache.orc.storage.ql.io.sarg.{SearchArgument => OrcSearchArgument} -import org.apache.orc.storage.ql.io.sarg.PredicateLeaf.{Operator => OrcOperator} -import org.apache.orc.storage.serde2.io.{DateWritable, HiveDecimalWritable} - -import org.apache.spark.sql.catalyst.expressions.SpecializedGetters -import org.apache.spark.sql.types.Decimal - -/** - * Various utilities for ORC used to upgrade the built-in Hive. - */ -private[sql] object OrcShimUtils { - - class VectorizedRowBatchWrap(val batch: VectorizedRowBatch) {} - - private[sql] type Operator = OrcOperator - private[sql] type SearchArgument = OrcSearchArgument - - def getGregorianDays(value: Any): Int = { - new DaysWritable(value.asInstanceOf[DateWritable]).gregorianDays - } - - def getDecimal(value: Any): Decimal = { - val decimal = value.asInstanceOf[HiveDecimalWritable].getHiveDecimal() - Decimal(decimal.bigDecimalValue, decimal.precision(), decimal.scale()) - } - - def getDateWritable(reuseObj: Boolean): (SpecializedGetters, Int) => DateWritable = { - if (reuseObj) { - val result = new DaysWritable() - (getter, ordinal) => - result.set(getter.getInt(ordinal)) - result - } else { - (getter: SpecializedGetters, ordinal: Int) => - new DaysWritable(getter.getInt(ordinal)) - } - } - - def getHiveDecimalWritable(precision: Int, scale: Int): - (SpecializedGetters, Int) => HiveDecimalWritable = { - (getter, ordinal) => - val d = getter.getDecimal(ordinal, precision, scale) - new HiveDecimalWritable(HiveDecimal.create(d.toJavaBigDecimal)) - } -} diff --git a/sql/core/v1.2/src/test/scala/org/apache/spark/sql/execution/datasources/orc/OrcFilterSuite.scala b/sql/core/v1.2/src/test/scala/org/apache/spark/sql/execution/datasources/orc/OrcFilterSuite.scala deleted file mode 100644 index e159a0588dfff..0000000000000 --- a/sql/core/v1.2/src/test/scala/org/apache/spark/sql/execution/datasources/orc/OrcFilterSuite.scala +++ /dev/null @@ -1,676 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.spark.sql.execution.datasources.orc - -import java.math.MathContext -import java.nio.charset.StandardCharsets -import java.sql.{Date, Timestamp} - -import scala.collection.JavaConverters._ - -import org.apache.orc.storage.ql.io.sarg.{PredicateLeaf, SearchArgument} -import org.apache.orc.storage.ql.io.sarg.SearchArgumentFactory.newBuilder - -import org.apache.spark.{SparkConf, SparkException} -import org.apache.spark.sql.{AnalysisException, Column, DataFrame, Row} -import org.apache.spark.sql.catalyst.dsl.expressions._ -import org.apache.spark.sql.catalyst.expressions._ -import org.apache.spark.sql.catalyst.planning.PhysicalOperation -import org.apache.spark.sql.execution.datasources.v2.DataSourceV2ScanRelation -import org.apache.spark.sql.execution.datasources.v2.orc.OrcScan -import org.apache.spark.sql.internal.SQLConf -import org.apache.spark.sql.test.SharedSparkSession -import org.apache.spark.sql.types._ - -/** - * A test suite that tests Apache ORC filter API based filter pushdown optimization. - * OrcFilterSuite and HiveOrcFilterSuite is logically duplicated to provide the same test coverage. - * The difference are the packages containing 'Predicate' and 'SearchArgument' classes. - * - OrcFilterSuite uses 'org.apache.orc.storage.ql.io.sarg' package. - * - HiveOrcFilterSuite uses 'org.apache.hadoop.hive.ql.io.sarg' package. - */ -class OrcFilterSuite extends OrcTest with SharedSparkSession { - - override protected def sparkConf: SparkConf = - super - .sparkConf - .set(SQLConf.USE_V1_SOURCE_LIST, "") - - protected def checkFilterPredicate( - df: DataFrame, - predicate: Predicate, - checker: (SearchArgument) => Unit): Unit = { - val output = predicate.collect { case a: Attribute => a }.distinct - val query = df - .select(output.map(e => Column(e)): _*) - .where(Column(predicate)) - - query.queryExecution.optimizedPlan match { - case PhysicalOperation(_, filters, DataSourceV2ScanRelation(_, o: OrcScan, _)) => - assert(filters.nonEmpty, "No filter is analyzed from the given query") - assert(o.pushedFilters.nonEmpty, "No filter is pushed down") - val maybeFilter = OrcFilters.createFilter(query.schema, o.pushedFilters) - assert(maybeFilter.isDefined, s"Couldn't generate filter predicate for ${o.pushedFilters}") - checker(maybeFilter.get) - - case _ => - throw new AnalysisException("Can not match OrcTable in the query.") - } - } - - protected def checkFilterPredicate - (predicate: Predicate, filterOperator: PredicateLeaf.Operator) - (implicit df: DataFrame): Unit = { - def checkComparisonOperator(filter: SearchArgument) = { - val operator = filter.getLeaves.asScala - assert(operator.map(_.getOperator).contains(filterOperator)) - } - checkFilterPredicate(df, predicate, checkComparisonOperator) - } - - protected def checkFilterPredicate - (predicate: Predicate, stringExpr: String) - (implicit df: DataFrame): Unit = { - def checkLogicalOperator(filter: SearchArgument) = { - assert(filter.toString == stringExpr) - } - checkFilterPredicate(df, predicate, checkLogicalOperator) - } - - test("filter pushdown - integer") { - withNestedOrcDataFrame((1 to 4).map(i => Tuple1(Option(i)))) { case (inputDF, colName, _) => - implicit val df: DataFrame = inputDF - - val intAttr = df(colName).expr - assert(df(colName).expr.dataType === IntegerType) - - checkFilterPredicate(intAttr.isNull, PredicateLeaf.Operator.IS_NULL) - - checkFilterPredicate(intAttr === 1, PredicateLeaf.Operator.EQUALS) - checkFilterPredicate(intAttr <=> 1, PredicateLeaf.Operator.NULL_SAFE_EQUALS) - - checkFilterPredicate(intAttr < 2, PredicateLeaf.Operator.LESS_THAN) - checkFilterPredicate(intAttr > 3, PredicateLeaf.Operator.LESS_THAN_EQUALS) - checkFilterPredicate(intAttr <= 1, PredicateLeaf.Operator.LESS_THAN_EQUALS) - checkFilterPredicate(intAttr >= 4, PredicateLeaf.Operator.LESS_THAN) - - checkFilterPredicate(Literal(1) === intAttr, PredicateLeaf.Operator.EQUALS) - checkFilterPredicate(Literal(1) <=> intAttr, PredicateLeaf.Operator.NULL_SAFE_EQUALS) - checkFilterPredicate(Literal(2) > intAttr, PredicateLeaf.Operator.LESS_THAN) - checkFilterPredicate(Literal(3) < intAttr, PredicateLeaf.Operator.LESS_THAN_EQUALS) - checkFilterPredicate(Literal(1) >= intAttr, PredicateLeaf.Operator.LESS_THAN_EQUALS) - checkFilterPredicate(Literal(4) <= intAttr, PredicateLeaf.Operator.LESS_THAN) - } - } - - test("filter pushdown - long") { - withNestedOrcDataFrame( - (1 to 4).map(i => Tuple1(Option(i.toLong)))) { case (inputDF, colName, _) => - implicit val df: DataFrame = inputDF - - val longAttr = df(colName).expr - assert(df(colName).expr.dataType === LongType) - - checkFilterPredicate(longAttr.isNull, PredicateLeaf.Operator.IS_NULL) - - checkFilterPredicate(longAttr === 1, PredicateLeaf.Operator.EQUALS) - checkFilterPredicate(longAttr <=> 1, PredicateLeaf.Operator.NULL_SAFE_EQUALS) - - checkFilterPredicate(longAttr < 2, PredicateLeaf.Operator.LESS_THAN) - checkFilterPredicate(longAttr > 3, PredicateLeaf.Operator.LESS_THAN_EQUALS) - checkFilterPredicate(longAttr <= 1, PredicateLeaf.Operator.LESS_THAN_EQUALS) - checkFilterPredicate(longAttr >= 4, PredicateLeaf.Operator.LESS_THAN) - - checkFilterPredicate(Literal(1) === longAttr, PredicateLeaf.Operator.EQUALS) - checkFilterPredicate(Literal(1) <=> longAttr, PredicateLeaf.Operator.NULL_SAFE_EQUALS) - checkFilterPredicate(Literal(2) > longAttr, PredicateLeaf.Operator.LESS_THAN) - checkFilterPredicate(Literal(3) < longAttr, PredicateLeaf.Operator.LESS_THAN_EQUALS) - checkFilterPredicate(Literal(1) >= longAttr, PredicateLeaf.Operator.LESS_THAN_EQUALS) - checkFilterPredicate(Literal(4) <= longAttr, PredicateLeaf.Operator.LESS_THAN) - } - } - - test("filter pushdown - float") { - withNestedOrcDataFrame( - (1 to 4).map(i => Tuple1(Option(i.toFloat)))) { case (inputDF, colName, _) => - implicit val df: DataFrame = inputDF - - val floatAttr = df(colName).expr - assert(df(colName).expr.dataType === FloatType) - - checkFilterPredicate(floatAttr.isNull, PredicateLeaf.Operator.IS_NULL) - - checkFilterPredicate(floatAttr === 1, PredicateLeaf.Operator.EQUALS) - checkFilterPredicate(floatAttr <=> 1, PredicateLeaf.Operator.NULL_SAFE_EQUALS) - - checkFilterPredicate(floatAttr < 2, PredicateLeaf.Operator.LESS_THAN) - checkFilterPredicate(floatAttr > 3, PredicateLeaf.Operator.LESS_THAN_EQUALS) - checkFilterPredicate(floatAttr <= 1, PredicateLeaf.Operator.LESS_THAN_EQUALS) - checkFilterPredicate(floatAttr >= 4, PredicateLeaf.Operator.LESS_THAN) - - checkFilterPredicate(Literal(1) === floatAttr, PredicateLeaf.Operator.EQUALS) - checkFilterPredicate(Literal(1) <=> floatAttr, PredicateLeaf.Operator.NULL_SAFE_EQUALS) - checkFilterPredicate(Literal(2) > floatAttr, PredicateLeaf.Operator.LESS_THAN) - checkFilterPredicate(Literal(3) < floatAttr, PredicateLeaf.Operator.LESS_THAN_EQUALS) - checkFilterPredicate(Literal(1) >= floatAttr, PredicateLeaf.Operator.LESS_THAN_EQUALS) - checkFilterPredicate(Literal(4) <= floatAttr, PredicateLeaf.Operator.LESS_THAN) - } - } - - test("filter pushdown - double") { - withNestedOrcDataFrame( - (1 to 4).map(i => Tuple1(Option(i.toDouble)))) { case (inputDF, colName, _) => - implicit val df: DataFrame = inputDF - - val doubleAttr = df(colName).expr - assert(df(colName).expr.dataType === DoubleType) - - checkFilterPredicate(doubleAttr.isNull, PredicateLeaf.Operator.IS_NULL) - - checkFilterPredicate(doubleAttr === 1, PredicateLeaf.Operator.EQUALS) - checkFilterPredicate(doubleAttr <=> 1, PredicateLeaf.Operator.NULL_SAFE_EQUALS) - - checkFilterPredicate(doubleAttr < 2, PredicateLeaf.Operator.LESS_THAN) - checkFilterPredicate(doubleAttr > 3, PredicateLeaf.Operator.LESS_THAN_EQUALS) - checkFilterPredicate(doubleAttr <= 1, PredicateLeaf.Operator.LESS_THAN_EQUALS) - checkFilterPredicate(doubleAttr >= 4, PredicateLeaf.Operator.LESS_THAN) - - checkFilterPredicate(Literal(1) === doubleAttr, PredicateLeaf.Operator.EQUALS) - checkFilterPredicate(Literal(1) <=> doubleAttr, PredicateLeaf.Operator.NULL_SAFE_EQUALS) - checkFilterPredicate(Literal(2) > doubleAttr, PredicateLeaf.Operator.LESS_THAN) - checkFilterPredicate(Literal(3) < doubleAttr, PredicateLeaf.Operator.LESS_THAN_EQUALS) - checkFilterPredicate(Literal(1) >= doubleAttr, PredicateLeaf.Operator.LESS_THAN_EQUALS) - checkFilterPredicate(Literal(4) <= doubleAttr, PredicateLeaf.Operator.LESS_THAN) - } - } - - test("filter pushdown - string") { - withNestedOrcDataFrame((1 to 4).map(i => Tuple1(i.toString))) { case (inputDF, colName, _) => - implicit val df: DataFrame = inputDF - - val strAttr = df(colName).expr - assert(df(colName).expr.dataType === StringType) - - checkFilterPredicate(strAttr.isNull, PredicateLeaf.Operator.IS_NULL) - - checkFilterPredicate(strAttr === "1", PredicateLeaf.Operator.EQUALS) - checkFilterPredicate(strAttr <=> "1", PredicateLeaf.Operator.NULL_SAFE_EQUALS) - - checkFilterPredicate(strAttr < "2", PredicateLeaf.Operator.LESS_THAN) - checkFilterPredicate(strAttr > "3", PredicateLeaf.Operator.LESS_THAN_EQUALS) - checkFilterPredicate(strAttr <= "1", PredicateLeaf.Operator.LESS_THAN_EQUALS) - checkFilterPredicate(strAttr >= "4", PredicateLeaf.Operator.LESS_THAN) - - checkFilterPredicate(Literal("1") === strAttr, PredicateLeaf.Operator.EQUALS) - checkFilterPredicate(Literal("1") <=> strAttr, PredicateLeaf.Operator.NULL_SAFE_EQUALS) - checkFilterPredicate(Literal("2") > strAttr, PredicateLeaf.Operator.LESS_THAN) - checkFilterPredicate(Literal("3") < strAttr, PredicateLeaf.Operator.LESS_THAN_EQUALS) - checkFilterPredicate(Literal("1") >= strAttr, PredicateLeaf.Operator.LESS_THAN_EQUALS) - checkFilterPredicate(Literal("4") <= strAttr, PredicateLeaf.Operator.LESS_THAN) - } - } - - test("filter pushdown - boolean") { - withNestedOrcDataFrame( - (true :: false :: Nil).map(b => Tuple1.apply(Option(b)))) { case (inputDF, colName, _) => - implicit val df: DataFrame = inputDF - - val booleanAttr = df(colName).expr - assert(df(colName).expr.dataType === BooleanType) - - checkFilterPredicate(booleanAttr.isNull, PredicateLeaf.Operator.IS_NULL) - - checkFilterPredicate(booleanAttr === true, PredicateLeaf.Operator.EQUALS) - checkFilterPredicate(booleanAttr <=> true, PredicateLeaf.Operator.NULL_SAFE_EQUALS) - - checkFilterPredicate(booleanAttr < true, PredicateLeaf.Operator.LESS_THAN) - checkFilterPredicate(booleanAttr > false, PredicateLeaf.Operator.LESS_THAN_EQUALS) - checkFilterPredicate(booleanAttr <= false, PredicateLeaf.Operator.LESS_THAN_EQUALS) - checkFilterPredicate(booleanAttr >= false, PredicateLeaf.Operator.LESS_THAN) - - checkFilterPredicate(Literal(false) === booleanAttr, PredicateLeaf.Operator.EQUALS) - checkFilterPredicate(Literal(false) <=> booleanAttr, - PredicateLeaf.Operator.NULL_SAFE_EQUALS) - checkFilterPredicate(Literal(false) > booleanAttr, PredicateLeaf.Operator.LESS_THAN) - checkFilterPredicate(Literal(true) < booleanAttr, PredicateLeaf.Operator.LESS_THAN_EQUALS) - checkFilterPredicate(Literal(true) >= booleanAttr, PredicateLeaf.Operator.LESS_THAN_EQUALS) - checkFilterPredicate(Literal(true) <= booleanAttr, PredicateLeaf.Operator.LESS_THAN) - } - } - - test("filter pushdown - decimal") { - withNestedOrcDataFrame( - (1 to 4).map(i => Tuple1.apply(BigDecimal.valueOf(i)))) { case (inputDF, colName, _) => - implicit val df: DataFrame = inputDF - - val decimalAttr = df(colName).expr - assert(df(colName).expr.dataType === DecimalType(38, 18)) - - checkFilterPredicate(decimalAttr.isNull, PredicateLeaf.Operator.IS_NULL) - - checkFilterPredicate(decimalAttr === BigDecimal.valueOf(1), PredicateLeaf.Operator.EQUALS) - checkFilterPredicate(decimalAttr <=> BigDecimal.valueOf(1), - PredicateLeaf.Operator.NULL_SAFE_EQUALS) - - checkFilterPredicate(decimalAttr < BigDecimal.valueOf(2), PredicateLeaf.Operator.LESS_THAN) - checkFilterPredicate(decimalAttr > BigDecimal.valueOf(3), - PredicateLeaf.Operator.LESS_THAN_EQUALS) - checkFilterPredicate(decimalAttr <= BigDecimal.valueOf(1), - PredicateLeaf.Operator.LESS_THAN_EQUALS) - checkFilterPredicate(decimalAttr >= BigDecimal.valueOf(4), PredicateLeaf.Operator.LESS_THAN) - - checkFilterPredicate( - Literal(BigDecimal.valueOf(1)) === decimalAttr, PredicateLeaf.Operator.EQUALS) - checkFilterPredicate( - Literal(BigDecimal.valueOf(1)) <=> decimalAttr, PredicateLeaf.Operator.NULL_SAFE_EQUALS) - checkFilterPredicate( - Literal(BigDecimal.valueOf(2)) > decimalAttr, PredicateLeaf.Operator.LESS_THAN) - checkFilterPredicate( - Literal(BigDecimal.valueOf(3)) < decimalAttr, PredicateLeaf.Operator.LESS_THAN_EQUALS) - checkFilterPredicate( - Literal(BigDecimal.valueOf(1)) >= decimalAttr, PredicateLeaf.Operator.LESS_THAN_EQUALS) - checkFilterPredicate( - Literal(BigDecimal.valueOf(4)) <= decimalAttr, PredicateLeaf.Operator.LESS_THAN) - } - } - - test("filter pushdown - timestamp") { - val input = Seq( - "1000-01-01 01:02:03", - "1582-10-01 00:11:22", - "1900-01-01 23:59:59", - "2020-05-25 10:11:12").map(Timestamp.valueOf) - - withOrcFile(input.map(Tuple1(_))) { path => - Seq(false, true).foreach { java8Api => - withSQLConf(SQLConf.DATETIME_JAVA8API_ENABLED.key -> java8Api.toString) { - readFile(path) { implicit df => - val timestamps = input.map(Literal(_)) - checkFilterPredicate($"_1".isNull, PredicateLeaf.Operator.IS_NULL) - - checkFilterPredicate($"_1" === timestamps(0), PredicateLeaf.Operator.EQUALS) - checkFilterPredicate($"_1" <=> timestamps(0), PredicateLeaf.Operator.NULL_SAFE_EQUALS) - - checkFilterPredicate($"_1" < timestamps(1), PredicateLeaf.Operator.LESS_THAN) - checkFilterPredicate($"_1" > timestamps(2), PredicateLeaf.Operator.LESS_THAN_EQUALS) - checkFilterPredicate($"_1" <= timestamps(0), PredicateLeaf.Operator.LESS_THAN_EQUALS) - checkFilterPredicate($"_1" >= timestamps(3), PredicateLeaf.Operator.LESS_THAN) - - checkFilterPredicate(Literal(timestamps(0)) === $"_1", PredicateLeaf.Operator.EQUALS) - checkFilterPredicate( - Literal(timestamps(0)) <=> $"_1", PredicateLeaf.Operator.NULL_SAFE_EQUALS) - checkFilterPredicate(Literal(timestamps(1)) > $"_1", PredicateLeaf.Operator.LESS_THAN) - checkFilterPredicate( - Literal(timestamps(2)) < $"_1", - PredicateLeaf.Operator.LESS_THAN_EQUALS) - checkFilterPredicate( - Literal(timestamps(0)) >= $"_1", - PredicateLeaf.Operator.LESS_THAN_EQUALS) - checkFilterPredicate(Literal(timestamps(3)) <= $"_1", PredicateLeaf.Operator.LESS_THAN) - } - } - } - } - } - - test("filter pushdown - combinations with logical operators") { - withOrcDataFrame((1 to 4).map(i => Tuple1(Option(i)))) { implicit df => - checkFilterPredicate( - $"_1".isNotNull, - "leaf-0 = (IS_NULL _1), expr = (not leaf-0)" - ) - checkFilterPredicate( - $"_1" =!= 1, - "leaf-0 = (IS_NULL _1), leaf-1 = (EQUALS _1 1), expr = (and (not leaf-0) (not leaf-1))" - ) - checkFilterPredicate( - !($"_1" < 4), - "leaf-0 = (IS_NULL _1), leaf-1 = (LESS_THAN _1 4), expr = (and (not leaf-0) (not leaf-1))" - ) - checkFilterPredicate( - $"_1" < 2 || $"_1" > 3, - "leaf-0 = (LESS_THAN _1 2), leaf-1 = (LESS_THAN_EQUALS _1 3), " + - "expr = (or leaf-0 (not leaf-1))" - ) - checkFilterPredicate( - $"_1" < 2 && $"_1" > 3, - "leaf-0 = (IS_NULL _1), leaf-1 = (LESS_THAN _1 2), leaf-2 = (LESS_THAN_EQUALS _1 3), " + - "expr = (and (not leaf-0) leaf-1 (not leaf-2))" - ) - } - } - - test("filter pushdown - date") { - val input = Seq("2017-08-18", "2017-08-19", "2017-08-20", "2017-08-21").map { day => - Date.valueOf(day) - } - withOrcFile(input.map(Tuple1(_))) { path => - Seq(false, true).foreach { java8Api => - withSQLConf(SQLConf.DATETIME_JAVA8API_ENABLED.key -> java8Api.toString) { - readFile(path) { implicit df => - val dates = input.map(Literal(_)) - checkFilterPredicate($"_1".isNull, PredicateLeaf.Operator.IS_NULL) - - checkFilterPredicate($"_1" === dates(0), PredicateLeaf.Operator.EQUALS) - checkFilterPredicate($"_1" <=> dates(0), PredicateLeaf.Operator.NULL_SAFE_EQUALS) - - checkFilterPredicate($"_1" < dates(1), PredicateLeaf.Operator.LESS_THAN) - checkFilterPredicate($"_1" > dates(2), PredicateLeaf.Operator.LESS_THAN_EQUALS) - checkFilterPredicate($"_1" <= dates(0), PredicateLeaf.Operator.LESS_THAN_EQUALS) - checkFilterPredicate($"_1" >= dates(3), PredicateLeaf.Operator.LESS_THAN) - - checkFilterPredicate(dates(0) === $"_1", PredicateLeaf.Operator.EQUALS) - checkFilterPredicate(dates(0) <=> $"_1", PredicateLeaf.Operator.NULL_SAFE_EQUALS) - checkFilterPredicate(dates(1) > $"_1", PredicateLeaf.Operator.LESS_THAN) - checkFilterPredicate(dates(2) < $"_1", PredicateLeaf.Operator.LESS_THAN_EQUALS) - checkFilterPredicate(dates(0) >= $"_1", PredicateLeaf.Operator.LESS_THAN_EQUALS) - checkFilterPredicate(dates(3) <= $"_1", PredicateLeaf.Operator.LESS_THAN) - } - } - } - } - } - - test("no filter pushdown - non-supported types") { - implicit class IntToBinary(int: Int) { - def b: Array[Byte] = int.toString.getBytes(StandardCharsets.UTF_8) - } - // ArrayType - withOrcDataFrame((1 to 4).map(i => Tuple1(Array(i)))) { implicit df => - checkNoFilterPredicate($"_1".isNull, noneSupported = true) - } - // BinaryType - withOrcDataFrame((1 to 4).map(i => Tuple1(i.b))) { implicit df => - checkNoFilterPredicate($"_1" <=> 1.b, noneSupported = true) - } - // MapType - withOrcDataFrame((1 to 4).map(i => Tuple1(Map(i -> i)))) { implicit df => - checkNoFilterPredicate($"_1".isNotNull, noneSupported = true) - } - } - - test("SPARK-12218 and SPARK-25699 Converting conjunctions into ORC SearchArguments") { - import org.apache.spark.sql.sources._ - // The `LessThan` should be converted while the `StringContains` shouldn't - val schema = new StructType( - Array( - StructField("a", IntegerType, nullable = true), - StructField("b", StringType, nullable = true))) - assertResult("leaf-0 = (LESS_THAN a 10), expr = leaf-0") { - OrcFilters.createFilter(schema, Array( - LessThan("a", 10), - StringContains("b", "prefix") - )).get.toString - } - - // The `LessThan` should be converted while the whole inner `And` shouldn't - assertResult("leaf-0 = (LESS_THAN a 10), expr = leaf-0") { - OrcFilters.createFilter(schema, Array( - LessThan("a", 10), - Not(And( - GreaterThan("a", 1), - StringContains("b", "prefix") - )) - )).get.toString - } - - // Safely remove unsupported `StringContains` predicate and push down `LessThan` - assertResult("leaf-0 = (LESS_THAN a 10), expr = leaf-0") { - OrcFilters.createFilter(schema, Array( - And( - LessThan("a", 10), - StringContains("b", "prefix") - ) - )).get.toString - } - - // Safely remove unsupported `StringContains` predicate, push down `LessThan` and `GreaterThan`. - assertResult("leaf-0 = (LESS_THAN a 10), leaf-1 = (LESS_THAN_EQUALS a 1)," + - " expr = (and leaf-0 (not leaf-1))") { - OrcFilters.createFilter(schema, Array( - And( - And( - LessThan("a", 10), - StringContains("b", "prefix") - ), - GreaterThan("a", 1) - ) - )).get.toString - } - } - - test("SPARK-27699 Converting disjunctions into ORC SearchArguments") { - import org.apache.spark.sql.sources._ - // The `LessThan` should be converted while the `StringContains` shouldn't - val schema = new StructType( - Array( - StructField("a", IntegerType, nullable = true), - StructField("b", StringType, nullable = true))) - - // The predicate `StringContains` predicate is not able to be pushed down. - assertResult("leaf-0 = (LESS_THAN_EQUALS a 10), leaf-1 = (LESS_THAN a 1)," + - " expr = (or (not leaf-0) leaf-1)") { - OrcFilters.createFilter(schema, Array( - Or( - GreaterThan("a", 10), - And( - StringContains("b", "prefix"), - LessThan("a", 1) - ) - ) - )).get.toString - } - - assertResult("leaf-0 = (LESS_THAN_EQUALS a 10), leaf-1 = (LESS_THAN a 1)," + - " expr = (or (not leaf-0) leaf-1)") { - OrcFilters.createFilter(schema, Array( - Or( - And( - GreaterThan("a", 10), - StringContains("b", "foobar") - ), - And( - StringContains("b", "prefix"), - LessThan("a", 1) - ) - ) - )).get.toString - } - - assert(OrcFilters.createFilter(schema, Array( - Or( - StringContains("b", "foobar"), - And( - StringContains("b", "prefix"), - LessThan("a", 1) - ) - ) - )).isEmpty) - } - - test("SPARK-27160: Fix casting of the DecimalType literal") { - import org.apache.spark.sql.sources._ - val schema = StructType(Array(StructField("a", DecimalType(3, 2)))) - assertResult("leaf-0 = (LESS_THAN a 3.14), expr = leaf-0") { - OrcFilters.createFilter(schema, Array( - LessThan( - "a", - new java.math.BigDecimal(3.14, MathContext.DECIMAL64).setScale(2))) - ).get.toString - } - } - - test("SPARK-32622: case sensitivity in predicate pushdown") { - withTempPath { dir => - val count = 10 - val tableName = "spark_32622" - val tableDir1 = dir.getAbsoluteFile + "/table1" - - // Physical ORC files have both `A` and `a` fields. - withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") { - spark.range(count).repartition(count).selectExpr("id - 1 as A", "id as a") - .write.mode("overwrite").orc(tableDir1) - } - - // Metastore table has both `A` and `a` fields too. - withTable(tableName) { - withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") { - sql( - s""" - |CREATE TABLE $tableName (A LONG, a LONG) USING ORC LOCATION '$tableDir1' - """.stripMargin) - - checkAnswer(sql(s"select a, A from $tableName"), (0 until count).map(c => Row(c, c - 1))) - - val actual1 = stripSparkFilter(sql(s"select A from $tableName where A < 0")) - assert(actual1.count() == 1) - - val actual2 = stripSparkFilter(sql(s"select A from $tableName where a < 0")) - assert(actual2.count() == 0) - } - - // Exception thrown for ambiguous case. - withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") { - val e = intercept[AnalysisException] { - sql(s"select a from $tableName where a < 0").collect() - } - assert(e.getMessage.contains( - "Reference 'a' is ambiguous")) - } - } - - // Metastore table has only `A` field. - withTable(tableName) { - withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") { - sql( - s""" - |CREATE TABLE $tableName (A LONG) USING ORC LOCATION '$tableDir1' - """.stripMargin) - - val e = intercept[SparkException] { - sql(s"select A from $tableName where A < 0").collect() - } - assert(e.getCause.isInstanceOf[RuntimeException] && e.getCause.getMessage.contains( - """Found duplicate field(s) "A": [A, a] in case-insensitive mode""")) - } - } - - // Physical ORC files have only `A` field. - val tableDir2 = dir.getAbsoluteFile + "/table2" - withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") { - spark.range(count).repartition(count).selectExpr("id - 1 as A") - .write.mode("overwrite").orc(tableDir2) - } - - withTable(tableName) { - withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") { - sql( - s""" - |CREATE TABLE $tableName (a LONG) USING ORC LOCATION '$tableDir2' - """.stripMargin) - - checkAnswer(sql(s"select a from $tableName"), (0 until count).map(c => Row(c - 1))) - - val actual = stripSparkFilter(sql(s"select a from $tableName where a < 0")) - assert(actual.count() == 1) - } - } - - withTable(tableName) { - withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") { - sql( - s""" - |CREATE TABLE $tableName (A LONG) USING ORC LOCATION '$tableDir2' - """.stripMargin) - - checkAnswer(sql(s"select A from $tableName"), (0 until count).map(c => Row(c - 1))) - - val actual = stripSparkFilter(sql(s"select A from $tableName where A < 0")) - assert(actual.count() == 1) - } - } - } - } - - test("SPARK-32646: Case-insensitive field resolution for pushdown when reading ORC") { - import org.apache.spark.sql.sources._ - - def getOrcFilter( - schema: StructType, - filters: Seq[Filter], - caseSensitive: String): Option[SearchArgument] = { - var orcFilter: Option[SearchArgument] = None - withSQLConf(SQLConf.CASE_SENSITIVE.key -> caseSensitive) { - orcFilter = - OrcFilters.createFilter(schema, filters) - } - orcFilter - } - - def testFilter( - schema: StructType, - filters: Seq[Filter], - expected: SearchArgument): Unit = { - val caseSensitiveFilters = getOrcFilter(schema, filters, "true") - val caseInsensitiveFilters = getOrcFilter(schema, filters, "false") - - assert(caseSensitiveFilters.isEmpty) - assert(caseInsensitiveFilters.isDefined) - - assert(caseInsensitiveFilters.get.getLeaves().size() > 0) - assert(caseInsensitiveFilters.get.getLeaves().size() == expected.getLeaves().size()) - (0 until expected.getLeaves().size()).foreach { index => - assert(caseInsensitiveFilters.get.getLeaves().get(index) == expected.getLeaves().get(index)) - } - } - - val schema1 = StructType(Seq(StructField("cint", IntegerType))) - testFilter(schema1, Seq(GreaterThan("CINT", 1)), - newBuilder.startNot() - .lessThanEquals("cint", OrcFilters.getPredicateLeafType(IntegerType), 1L).`end`().build()) - testFilter(schema1, Seq( - And(GreaterThan("CINT", 1), EqualTo("Cint", 2))), - newBuilder.startAnd() - .startNot() - .lessThanEquals("cint", OrcFilters.getPredicateLeafType(IntegerType), 1L).`end`() - .equals("cint", OrcFilters.getPredicateLeafType(IntegerType), 2L) - .`end`().build()) - - // Nested column case - val schema2 = StructType(Seq(StructField("a", - StructType(Seq(StructField("cint", IntegerType)))))) - - testFilter(schema2, Seq(GreaterThan("A.CINT", 1)), - newBuilder.startNot() - .lessThanEquals("a.cint", OrcFilters.getPredicateLeafType(IntegerType), 1L).`end`().build()) - testFilter(schema2, Seq(GreaterThan("a.CINT", 1)), - newBuilder.startNot() - .lessThanEquals("a.cint", OrcFilters.getPredicateLeafType(IntegerType), 1L).`end`().build()) - testFilter(schema2, Seq(GreaterThan("A.cint", 1)), - newBuilder.startNot() - .lessThanEquals("a.cint", OrcFilters.getPredicateLeafType(IntegerType), 1L).`end`().build()) - testFilter(schema2, Seq( - And(GreaterThan("a.CINT", 1), EqualTo("a.Cint", 2))), - newBuilder.startAnd() - .startNot() - .lessThanEquals("a.cint", OrcFilters.getPredicateLeafType(IntegerType), 1L).`end`() - .equals("a.cint", OrcFilters.getPredicateLeafType(IntegerType), 2L) - .`end`().build()) - } -} - diff --git a/sql/create-docs.sh b/sql/create-docs.sh index 6614c714e90c7..8721df874ee73 100755 --- a/sql/create-docs.sh +++ b/sql/create-docs.sh @@ -27,14 +27,14 @@ set -e FWDIR="$(cd "`dirname "${BASH_SOURCE[0]}"`"; pwd)" SPARK_HOME="$(cd "`dirname "${BASH_SOURCE[0]}"`"/..; pwd)" -if ! hash python 2>/dev/null; then - echo "Missing python in your path, skipping SQL documentation generation." +if ! hash python3 2>/dev/null; then + echo "Missing python3 in your path, skipping SQL documentation generation." exit 0 fi if ! hash mkdocs 2>/dev/null; then echo "Missing mkdocs in your path, trying to install mkdocs for SQL documentation generation." - pip install mkdocs + pip3 install mkdocs fi pushd "$FWDIR" > /dev/null diff --git a/sql/gen-sql-api-docs.py b/sql/gen-sql-api-docs.py index 61328997c1c58..2f734093b106c 100644 --- a/sql/gen-sql-api-docs.py +++ b/sql/gen-sql-api-docs.py @@ -195,6 +195,7 @@ def generate_sql_api_markdown(jvm, path): """ with open(path, 'w') as mdfile: + mdfile.write("# Built-in Functions\n\n") for info in _list_function_infos(jvm): name = info.name usage = _make_pretty_usage(info.usage) diff --git a/sql/hive-thriftserver/pom.xml b/sql/hive-thriftserver/pom.xml index 5bf20b209aff7..dd6d21e3cbdac 100644 --- a/sql/hive-thriftserver/pom.xml +++ b/sql/hive-thriftserver/pom.xml @@ -22,7 +22,7 @@ org.apache.spark spark-parent_2.12 - 3.1.0-SNAPSHOT + 3.2.0-SNAPSHOT ../../pom.xml @@ -77,6 +77,10 @@ ${hive.group} hive-beeline
+ + ${hive.group} + hive-service-rpc + org.eclipse.jetty jetty-server @@ -133,27 +137,5 @@ target/scala-${scala.binary.version}/classes target/scala-${scala.binary.version}/test-classes - - - org.codehaus.mojo - build-helper-maven-plugin - - - add-source - generate-sources - - add-source - - - - v${hive.version.short}/src/gen/java - v${hive.version.short}/src/main/java - v${hive.version.short}/src/main/scala - - - - - - diff --git a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/AbstractService.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/AbstractService.java similarity index 100% rename from sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/AbstractService.java rename to sql/hive-thriftserver/src/main/java/org/apache/hive/service/AbstractService.java diff --git a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/CompositeService.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/CompositeService.java similarity index 100% rename from sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/CompositeService.java rename to sql/hive-thriftserver/src/main/java/org/apache/hive/service/CompositeService.java diff --git a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/CookieSigner.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/CookieSigner.java similarity index 100% rename from sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/CookieSigner.java rename to sql/hive-thriftserver/src/main/java/org/apache/hive/service/CookieSigner.java diff --git a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/ServiceOperations.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/ServiceOperations.java similarity index 100% rename from sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/ServiceOperations.java rename to sql/hive-thriftserver/src/main/java/org/apache/hive/service/ServiceOperations.java diff --git a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/ServiceUtils.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/ServiceUtils.java similarity index 100% rename from sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/ServiceUtils.java rename to sql/hive-thriftserver/src/main/java/org/apache/hive/service/ServiceUtils.java diff --git a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/auth/HiveAuthFactory.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/auth/HiveAuthFactory.java similarity index 100% rename from sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/auth/HiveAuthFactory.java rename to sql/hive-thriftserver/src/main/java/org/apache/hive/service/auth/HiveAuthFactory.java diff --git a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/auth/HttpAuthUtils.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/auth/HttpAuthUtils.java similarity index 100% rename from sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/auth/HttpAuthUtils.java rename to sql/hive-thriftserver/src/main/java/org/apache/hive/service/auth/HttpAuthUtils.java diff --git a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/auth/KerberosSaslHelper.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/auth/KerberosSaslHelper.java similarity index 100% rename from sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/auth/KerberosSaslHelper.java rename to sql/hive-thriftserver/src/main/java/org/apache/hive/service/auth/KerberosSaslHelper.java diff --git a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/auth/PlainSaslHelper.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/auth/PlainSaslHelper.java similarity index 100% rename from sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/auth/PlainSaslHelper.java rename to sql/hive-thriftserver/src/main/java/org/apache/hive/service/auth/PlainSaslHelper.java diff --git a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/auth/TSetIpAddressProcessor.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/auth/TSetIpAddressProcessor.java similarity index 100% rename from sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/auth/TSetIpAddressProcessor.java rename to sql/hive-thriftserver/src/main/java/org/apache/hive/service/auth/TSetIpAddressProcessor.java diff --git a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/CLIService.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/CLIService.java similarity index 98% rename from sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/CLIService.java rename to sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/CLIService.java index bdc1e6251e560..68f044c6a0f28 100644 --- a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/CLIService.java +++ b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/CLIService.java @@ -45,6 +45,7 @@ import org.apache.hive.service.cli.operation.Operation; import org.apache.hive.service.cli.session.HiveSession; import org.apache.hive.service.cli.session.SessionManager; +import org.apache.hive.service.rpc.thrift.TOperationHandle; import org.apache.hive.service.rpc.thrift.TProtocolVersion; import org.apache.hive.service.server.HiveServer2; import org.slf4j.Logger; @@ -567,6 +568,15 @@ public void renewDelegationToken(SessionHandle sessionHandle, HiveAuthFactory au LOG.info(sessionHandle + ": renewDelegationToken()"); } + @Override + public String getQueryId(TOperationHandle opHandle) throws HiveSQLException { + Operation operation = sessionManager.getOperationManager().getOperation( + new OperationHandle(opHandle)); + final String queryId = operation.getParentSession().getHiveConf().getVar(ConfVars.HIVEQUERYID); + LOG.debug(opHandle + ": getQueryId() " + queryId); + return queryId; + } + public SessionManager getSessionManager() { return sessionManager; } diff --git a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/ColumnBasedSet.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/ColumnBasedSet.java similarity index 100% rename from sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/ColumnBasedSet.java rename to sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/ColumnBasedSet.java diff --git a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/ColumnDescriptor.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/ColumnDescriptor.java similarity index 97% rename from sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/ColumnDescriptor.java rename to sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/ColumnDescriptor.java index d8e61a87e7f62..b2ef1c7722ef8 100644 --- a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/ColumnDescriptor.java +++ b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/ColumnDescriptor.java @@ -49,7 +49,7 @@ public ColumnDescriptor(TColumnDesc tColumnDesc) { public static ColumnDescriptor newPrimitiveColumnDescriptor(String name, String comment, Type type, int position) { // Current usage looks like it's only for metadata columns, but if that changes then - // this method may need to require a type qualifiers aruments. + // this method may need to require a type qualifiers arguments. return new ColumnDescriptor(name, comment, new TypeDescriptor(type), position); } diff --git a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/ColumnValue.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/ColumnValue.java similarity index 100% rename from sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/ColumnValue.java rename to sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/ColumnValue.java diff --git a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/FetchOrientation.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/FetchOrientation.java similarity index 100% rename from sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/FetchOrientation.java rename to sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/FetchOrientation.java diff --git a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/GetInfoType.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/GetInfoType.java similarity index 97% rename from sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/GetInfoType.java rename to sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/GetInfoType.java index a64d262a8f301..575dff8f8f47b 100644 --- a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/GetInfoType.java +++ b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/GetInfoType.java @@ -72,7 +72,8 @@ public enum GetInfoType { CLI_DESCRIBE_PARAMETER(TGetInfoType.CLI_DESCRIBE_PARAMETER), CLI_CATALOG_NAME(TGetInfoType.CLI_CATALOG_NAME), CLI_COLLATION_SEQ(TGetInfoType.CLI_COLLATION_SEQ), - CLI_MAX_IDENTIFIER_LEN(TGetInfoType.CLI_MAX_IDENTIFIER_LEN); + CLI_MAX_IDENTIFIER_LEN(TGetInfoType.CLI_MAX_IDENTIFIER_LEN), + CLI_ODBC_KEYWORDS(TGetInfoType.CLI_ODBC_KEYWORDS); private final TGetInfoType tInfoType; diff --git a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/GetInfoValue.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/GetInfoValue.java similarity index 96% rename from sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/GetInfoValue.java rename to sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/GetInfoValue.java index 2b2359cc13c0f..bf3c6b27ea81d 100644 --- a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/GetInfoValue.java +++ b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/GetInfoValue.java @@ -52,7 +52,7 @@ public GetInfoValue(TGetInfoValue tGetInfoValue) { stringValue = tGetInfoValue.getStringValue(); break; default: - throw new IllegalArgumentException("Unreconigzed TGetInfoValue"); + throw new IllegalArgumentException("Unrecognized TGetInfoValue"); } } diff --git a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/Handle.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/Handle.java similarity index 100% rename from sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/Handle.java rename to sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/Handle.java diff --git a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/HandleIdentifier.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/HandleIdentifier.java similarity index 100% rename from sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/HandleIdentifier.java rename to sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/HandleIdentifier.java diff --git a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/HiveSQLException.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/HiveSQLException.java similarity index 100% rename from sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/HiveSQLException.java rename to sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/HiveSQLException.java diff --git a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/ICLIService.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/ICLIService.java similarity index 96% rename from sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/ICLIService.java rename to sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/ICLIService.java index 3200909477821..a87c6691ebac7 100644 --- a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/ICLIService.java +++ b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/ICLIService.java @@ -24,6 +24,7 @@ import org.apache.hive.service.auth.HiveAuthFactory; +import org.apache.hive.service.rpc.thrift.TOperationHandle; public interface ICLIService { @@ -98,6 +99,8 @@ RowSet fetchResults(OperationHandle opHandle, FetchOrientation orientation, String getDelegationToken(SessionHandle sessionHandle, HiveAuthFactory authFactory, String owner, String renewer) throws HiveSQLException; + String getQueryId(TOperationHandle operationHandle) throws HiveSQLException; + void cancelDelegationToken(SessionHandle sessionHandle, HiveAuthFactory authFactory, String tokenStr) throws HiveSQLException; diff --git a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/OperationHandle.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/OperationHandle.java similarity index 100% rename from sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/OperationHandle.java rename to sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/OperationHandle.java diff --git a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/OperationState.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/OperationState.java similarity index 100% rename from sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/OperationState.java rename to sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/OperationState.java diff --git a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/OperationType.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/OperationType.java similarity index 100% rename from sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/OperationType.java rename to sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/OperationType.java diff --git a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/RowBasedSet.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/RowBasedSet.java similarity index 100% rename from sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/RowBasedSet.java rename to sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/RowBasedSet.java diff --git a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/RowSet.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/RowSet.java similarity index 100% rename from sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/RowSet.java rename to sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/RowSet.java diff --git a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/RowSetFactory.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/RowSetFactory.java similarity index 100% rename from sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/RowSetFactory.java rename to sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/RowSetFactory.java diff --git a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/SessionHandle.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/SessionHandle.java similarity index 100% rename from sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/SessionHandle.java rename to sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/SessionHandle.java diff --git a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/TableSchema.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/TableSchema.java similarity index 100% rename from sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/TableSchema.java rename to sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/TableSchema.java diff --git a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/TypeDescriptor.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/TypeDescriptor.java similarity index 100% rename from sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/TypeDescriptor.java rename to sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/TypeDescriptor.java diff --git a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/TypeQualifiers.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/TypeQualifiers.java similarity index 100% rename from sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/TypeQualifiers.java rename to sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/TypeQualifiers.java diff --git a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/operation/ClassicTableTypeMapping.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/ClassicTableTypeMapping.java similarity index 100% rename from sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/operation/ClassicTableTypeMapping.java rename to sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/ClassicTableTypeMapping.java diff --git a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/operation/ExecuteStatementOperation.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/ExecuteStatementOperation.java similarity index 100% rename from sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/operation/ExecuteStatementOperation.java rename to sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/ExecuteStatementOperation.java diff --git a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/operation/GetCatalogsOperation.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetCatalogsOperation.java similarity index 100% rename from sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/operation/GetCatalogsOperation.java rename to sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetCatalogsOperation.java diff --git a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/operation/GetColumnsOperation.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetColumnsOperation.java similarity index 99% rename from sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/operation/GetColumnsOperation.java rename to sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetColumnsOperation.java index c25c742d392b3..59630672847e4 100644 --- a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/operation/GetColumnsOperation.java +++ b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetColumnsOperation.java @@ -103,7 +103,7 @@ public class GetColumnsOperation extends MetadataOperation { "Schema of table that is the scope of a reference attribute " + "(null if the DATA_TYPE isn't REF)") .addPrimitiveColumn("SCOPE_TABLE", Type.STRING_TYPE, - "Table name that this the scope of a reference attribure " + "Table name that this the scope of a reference attribute " + "(null if the DATA_TYPE isn't REF)") .addPrimitiveColumn("SOURCE_DATA_TYPE", Type.SMALLINT_TYPE, "Source type of a distinct type or user-generated Ref type, " diff --git a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/operation/GetCrossReferenceOperation.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetCrossReferenceOperation.java similarity index 100% rename from sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/operation/GetCrossReferenceOperation.java rename to sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetCrossReferenceOperation.java diff --git a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/operation/GetFunctionsOperation.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetFunctionsOperation.java similarity index 100% rename from sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/operation/GetFunctionsOperation.java rename to sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetFunctionsOperation.java diff --git a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/operation/GetPrimaryKeysOperation.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetPrimaryKeysOperation.java similarity index 100% rename from sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/operation/GetPrimaryKeysOperation.java rename to sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetPrimaryKeysOperation.java diff --git a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/operation/GetSchemasOperation.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetSchemasOperation.java similarity index 100% rename from sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/operation/GetSchemasOperation.java rename to sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetSchemasOperation.java diff --git a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/operation/GetTableTypesOperation.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetTableTypesOperation.java similarity index 100% rename from sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/operation/GetTableTypesOperation.java rename to sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetTableTypesOperation.java diff --git a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/operation/GetTablesOperation.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetTablesOperation.java similarity index 100% rename from sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/operation/GetTablesOperation.java rename to sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetTablesOperation.java diff --git a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/operation/GetTypeInfoOperation.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetTypeInfoOperation.java similarity index 100% rename from sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/operation/GetTypeInfoOperation.java rename to sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/GetTypeInfoOperation.java diff --git a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/operation/HiveCommandOperation.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/HiveCommandOperation.java similarity index 100% rename from sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/operation/HiveCommandOperation.java rename to sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/HiveCommandOperation.java diff --git a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/operation/HiveTableTypeMapping.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/HiveTableTypeMapping.java similarity index 100% rename from sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/operation/HiveTableTypeMapping.java rename to sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/HiveTableTypeMapping.java diff --git a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/operation/MetadataOperation.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/MetadataOperation.java similarity index 100% rename from sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/operation/MetadataOperation.java rename to sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/MetadataOperation.java diff --git a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/operation/Operation.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/Operation.java similarity index 100% rename from sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/operation/Operation.java rename to sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/Operation.java diff --git a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/operation/OperationManager.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/OperationManager.java similarity index 99% rename from sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/operation/OperationManager.java rename to sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/OperationManager.java index 75edc5763ce44..3df842d2b4af9 100644 --- a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/operation/OperationManager.java +++ b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/OperationManager.java @@ -97,7 +97,8 @@ public ExecuteStatementOperation newExecuteStatementOperation(HiveSession parent public ExecuteStatementOperation newExecuteStatementOperation(HiveSession parentSession, String statement, Map confOverlay, boolean runAsync, long queryTimeout) throws HiveSQLException { - return newExecuteStatementOperation(parentSession, statement, confOverlay, runAsync); + return newExecuteStatementOperation(parentSession, statement, confOverlay, runAsync, + queryTimeout); } public GetTypeInfoOperation newGetTypeInfoOperation(HiveSession parentSession) { @@ -207,6 +208,7 @@ public void cancelOperation(OperationHandle opHandle) throws HiveSQLException { Operation operation = getOperation(opHandle); OperationState opState = operation.getStatus().getState(); if (opState == OperationState.CANCELED || + opState == OperationState.TIMEDOUT || opState == OperationState.CLOSED || opState == OperationState.FINISHED || opState == OperationState.ERROR || diff --git a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/operation/SQLOperation.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/SQLOperation.java similarity index 98% rename from sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/operation/SQLOperation.java rename to sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/SQLOperation.java index e2ac1ea78c1ab..894793152f409 100644 --- a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/operation/SQLOperation.java +++ b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/SQLOperation.java @@ -155,11 +155,12 @@ private void runQuery(HiveConf sqlOperationConf) throws HiveSQLException { throw toSQLException("Error while processing statement", response); } } catch (HiveSQLException e) { - // If the operation was cancelled by another thread, + // If the operation was cancelled by another thread or timed out, // Driver#run will return a non-zero response code. - // We will simply return if the operation state is CANCELED, + // We will simply return if the operation state is CANCELED or TIMEDOUT, // otherwise throw an exception - if (getStatus().getState() == OperationState.CANCELED) { + if (getStatus().getState() == OperationState.CANCELED || + getStatus().getState() == OperationState.TIMEDOUT) { return; } else { diff --git a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/operation/TableTypeMapping.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/TableTypeMapping.java similarity index 100% rename from sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/operation/TableTypeMapping.java rename to sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/operation/TableTypeMapping.java diff --git a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/session/HiveSession.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/session/HiveSession.java similarity index 100% rename from sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/session/HiveSession.java rename to sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/session/HiveSession.java diff --git a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/session/HiveSessionBase.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/session/HiveSessionBase.java similarity index 100% rename from sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/session/HiveSessionBase.java rename to sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/session/HiveSessionBase.java diff --git a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/session/HiveSessionHookContext.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/session/HiveSessionHookContext.java similarity index 100% rename from sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/session/HiveSessionHookContext.java rename to sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/session/HiveSessionHookContext.java diff --git a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/session/HiveSessionHookContextImpl.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/session/HiveSessionHookContextImpl.java similarity index 100% rename from sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/session/HiveSessionHookContextImpl.java rename to sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/session/HiveSessionHookContextImpl.java diff --git a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/session/HiveSessionImpl.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/session/HiveSessionImpl.java similarity index 99% rename from sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/session/HiveSessionImpl.java rename to sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/session/HiveSessionImpl.java index 1b3e8fe6bfb9d..f47a4388f7bea 100644 --- a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/session/HiveSessionImpl.java +++ b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/session/HiveSessionImpl.java @@ -175,9 +175,9 @@ protected BufferedReader loadFile(String fileName) throws IOException { @Override protected int processCmd(String cmd) { int rc = 0; - String cmd_trimed = cmd.trim(); + String cmd_trimmed = cmd.trim(); try { - executeStatementInternal(cmd_trimed, null, false, 0); + executeStatementInternal(cmd_trimmed, null, false, 0); } catch (HiveSQLException e) { rc = -1; LOG.warn("Failed to execute HQL command in global .hiverc file.", e); diff --git a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/session/HiveSessionImplwithUGI.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/session/HiveSessionImplwithUGI.java similarity index 100% rename from sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/session/HiveSessionImplwithUGI.java rename to sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/session/HiveSessionImplwithUGI.java diff --git a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/session/SessionManager.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/session/SessionManager.java similarity index 100% rename from sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/session/SessionManager.java rename to sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/session/SessionManager.java diff --git a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/thrift/ThriftBinaryCLIService.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/thrift/ThriftBinaryCLIService.java similarity index 92% rename from sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/thrift/ThriftBinaryCLIService.java rename to sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/thrift/ThriftBinaryCLIService.java index ce79e3c8228a6..ffca1070d0047 100644 --- a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/thrift/ThriftBinaryCLIService.java +++ b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/thrift/ThriftBinaryCLIService.java @@ -32,7 +32,11 @@ import org.apache.hive.service.ServiceException; import org.apache.hive.service.auth.HiveAuthFactory; import org.apache.hive.service.cli.CLIService; +import org.apache.hive.service.cli.HiveSQLException; +import org.apache.hive.service.rpc.thrift.TGetQueryIdReq; +import org.apache.hive.service.rpc.thrift.TGetQueryIdResp; import org.apache.hive.service.server.ThreadFactoryWithGarbageCleanup; +import org.apache.thrift.TException; import org.apache.thrift.TProcessorFactory; import org.apache.thrift.protocol.TBinaryProtocol; import org.apache.thrift.server.TThreadPoolServer; @@ -107,6 +111,15 @@ protected void initializeServer() { } } + @Override + public TGetQueryIdResp GetQueryId(TGetQueryIdReq req) throws TException { + try { + return new TGetQueryIdResp(cliService.getQueryId(req.getOperationHandle())); + } catch (HiveSQLException e) { + throw new TException(e); + } + } + @Override public void run() { try { diff --git a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java similarity index 95% rename from sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java rename to sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java index ea9ed57410045..150f1d60fc466 100644 --- a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java +++ b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java @@ -262,6 +262,28 @@ public TOpenSessionResp OpenSession(TOpenSessionReq req) throws TException { return resp; } + @Override + public TSetClientInfoResp SetClientInfo(TSetClientInfoReq req) throws TException { + // TODO: We don't do anything for now, just log this for debugging. + // We may be able to make use of this later, e.g. for workload management. + if (req.isSetConfiguration()) { + StringBuilder sb = null; + for (Map.Entry e : req.getConfiguration().entrySet()) { + if (sb == null) { + SessionHandle sh = new SessionHandle(req.getSessionHandle()); + sb = new StringBuilder("Client information for ").append(sh).append(": "); + } else { + sb.append(", "); + } + sb.append(e.getKey()).append(" = ").append(e.getValue()); + } + if (sb != null) { + LOG.info("{}", sb); + } + } + return new TSetClientInfoResp(OK_STATUS); + } + private String getIpAddress() { String clientIpAddress; // Http transport mode. @@ -674,6 +696,15 @@ public TGetCrossReferenceResp GetCrossReference(TGetCrossReferenceReq req) protected abstract void initializeServer(); + @Override + public TGetQueryIdResp GetQueryId(TGetQueryIdReq req) throws TException { + try { + return new TGetQueryIdResp(cliService.getQueryId(req.getOperationHandle())); + } catch (HiveSQLException e) { + throw new TException(e); + } + } + @Override public abstract void run(); diff --git a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/thrift/ThriftCLIServiceClient.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/thrift/ThriftCLIServiceClient.java similarity index 98% rename from sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/thrift/ThriftCLIServiceClient.java rename to sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/thrift/ThriftCLIServiceClient.java index b13ddf72f77e7..0e81e4446caac 100644 --- a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/thrift/ThriftCLIServiceClient.java +++ b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/thrift/ThriftCLIServiceClient.java @@ -490,4 +490,13 @@ public OperationHandle getCrossReference(SessionHandle sessionHandle, throw new HiveSQLException(e); } } + + @Override + public String getQueryId(TOperationHandle operationHandle) throws HiveSQLException { + try { + return cliService.GetQueryId(new TGetQueryIdReq(operationHandle)).getQueryId(); + } catch (TException e) { + throw new HiveSQLException(e); + } + } } diff --git a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/thrift/ThriftHttpCLIService.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/thrift/ThriftHttpCLIService.java similarity index 99% rename from sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/thrift/ThriftHttpCLIService.java rename to sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/thrift/ThriftHttpCLIService.java index ab9ed5b1f371e..13fc552a9a42e 100644 --- a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/thrift/ThriftHttpCLIService.java +++ b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/thrift/ThriftHttpCLIService.java @@ -137,7 +137,7 @@ protected void initializeServer() { httpServer.setHandler(context); context.addServlet(new ServletHolder(thriftHttpServlet), httpPath); - // TODO: check defaults: maxTimeout, keepalive, maxBodySize, bodyRecieveDuration, etc. + // TODO: check defaults: maxTimeout, keepalive, maxBodySize, bodyReceiveDuration, etc. // Finally, start the server httpServer.start(); // In case HIVE_SERVER2_THRIFT_HTTP_PORT or hive.server2.thrift.http.port is configured with diff --git a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/thrift/ThriftHttpServlet.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/thrift/ThriftHttpServlet.java similarity index 100% rename from sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/cli/thrift/ThriftHttpServlet.java rename to sql/hive-thriftserver/src/main/java/org/apache/hive/service/cli/thrift/ThriftHttpServlet.java diff --git a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/server/HiveServer2.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/server/HiveServer2.java similarity index 100% rename from sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/server/HiveServer2.java rename to sql/hive-thriftserver/src/main/java/org/apache/hive/service/server/HiveServer2.java diff --git a/sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/server/ThreadWithGarbageCleanup.java b/sql/hive-thriftserver/src/main/java/org/apache/hive/service/server/ThreadWithGarbageCleanup.java similarity index 100% rename from sql/hive-thriftserver/v2.3/src/main/java/org/apache/hive/service/server/ThreadWithGarbageCleanup.java rename to sql/hive-thriftserver/src/main/java/org/apache/hive/service/server/ThreadWithGarbageCleanup.java diff --git a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/FetchIterator.scala b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/FetchIterator.scala new file mode 100644 index 0000000000000..b9db657952b56 --- /dev/null +++ b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/FetchIterator.scala @@ -0,0 +1,107 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.hive.thriftserver + +private[hive] sealed trait FetchIterator[A] extends Iterator[A] { + /** + * Begin a fetch block, forward from the current position. + * Resets the fetch start offset. + */ + def fetchNext(): Unit + + /** + * Begin a fetch block, moving the iterator back by offset from the start of the previous fetch + * block start. + * Resets the fetch start offset. + * + * @param offset the amount to move a fetch start position toward the prior direction. + */ + def fetchPrior(offset: Long): Unit = fetchAbsolute(getFetchStart - offset) + + /** + * Begin a fetch block, moving the iterator to the given position. + * Resets the fetch start offset. + * + * @param pos index to move a position of iterator. + */ + def fetchAbsolute(pos: Long): Unit + + def getFetchStart: Long + + def getPosition: Long +} + +private[hive] class ArrayFetchIterator[A](src: Array[A]) extends FetchIterator[A] { + private var fetchStart: Long = 0 + + private var position: Long = 0 + + override def fetchNext(): Unit = fetchStart = position + + override def fetchAbsolute(pos: Long): Unit = { + position = (pos max 0) min src.length + fetchStart = position + } + + override def getFetchStart: Long = fetchStart + + override def getPosition: Long = position + + override def hasNext: Boolean = position < src.length + + override def next(): A = { + position += 1 + src(position.toInt - 1) + } +} + +private[hive] class IterableFetchIterator[A](iterable: Iterable[A]) extends FetchIterator[A] { + private var iter: Iterator[A] = iterable.iterator + + private var fetchStart: Long = 0 + + private var position: Long = 0 + + override def fetchNext(): Unit = fetchStart = position + + override def fetchAbsolute(pos: Long): Unit = { + val newPos = pos max 0 + if (newPos < position) resetPosition() + while (position < newPos && hasNext) next() + fetchStart = position + } + + override def getFetchStart: Long = fetchStart + + override def getPosition: Long = position + + override def hasNext: Boolean = iter.hasNext + + override def next(): A = { + position += 1 + iter.next() + } + + private def resetPosition(): Unit = { + if (position != 0) { + iter = iterable.iterator + position = 0 + fetchStart = 0 + } + } +} diff --git a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/HiveThriftServer2.scala b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/HiveThriftServer2.scala index 4e6729faced43..a1f2d62a0b72c 100644 --- a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/HiveThriftServer2.scala +++ b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/HiveThriftServer2.scala @@ -116,7 +116,7 @@ object HiveThriftServer2 extends Logging { } private[thriftserver] object ExecutionState extends Enumeration { - val STARTED, COMPILED, CANCELED, FAILED, FINISHED, CLOSED = Value + val STARTED, COMPILED, CANCELED, TIMEDOUT, FAILED, FINISHED, CLOSED = Value type ExecutionState = Value } } diff --git a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkExecuteStatementOperation.scala b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkExecuteStatementOperation.scala index d30951f89cf6b..8ca0ab91a73f7 100644 --- a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkExecuteStatementOperation.scala +++ b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkExecuteStatementOperation.scala @@ -19,7 +19,7 @@ package org.apache.spark.sql.hive.thriftserver import java.security.PrivilegedExceptionAction import java.util.{Arrays, Map => JMap} -import java.util.concurrent.RejectedExecutionException +import java.util.concurrent.{Executors, RejectedExecutionException, TimeUnit} import scala.collection.JavaConverters._ import scala.collection.mutable.ArrayBuffer @@ -45,20 +45,33 @@ private[hive] class SparkExecuteStatementOperation( parentSession: HiveSession, statement: String, confOverlay: JMap[String, String], - runInBackground: Boolean = true) + runInBackground: Boolean = true, + queryTimeout: Long) extends ExecuteStatementOperation(parentSession, statement, confOverlay, runInBackground) with SparkOperation with Logging { + // If a timeout value `queryTimeout` is specified by users and it is smaller than + // a global timeout value, we use the user-specified value. + // This code follows the Hive timeout behaviour (See #29933 for details). + private val timeout = { + val globalTimeout = sqlContext.conf.getConf(SQLConf.THRIFTSERVER_QUERY_TIMEOUT) + if (globalTimeout > 0 && (queryTimeout <= 0 || globalTimeout < queryTimeout)) { + globalTimeout + } else { + queryTimeout + } + } + + private val forceCancel = sqlContext.conf.getConf(SQLConf.THRIFTSERVER_FORCE_CANCEL) + + private val substitutorStatement = SQLConf.withExistingConf(sqlContext.conf) { + new VariableSubstitution().substitute(statement) + } + private var result: DataFrame = _ - // We cache the returned rows to get iterators again in case the user wants to use FETCH_FIRST. - // This is only used when `spark.sql.thriftServer.incrementalCollect` is set to `false`. - // In case of `true`, this will be `None` and FETCH_FIRST will trigger re-execution. - private var resultList: Option[Array[SparkRow]] = _ - private var previousFetchEndOffset: Long = 0 - private var previousFetchStartOffset: Long = 0 - private var iter: Iterator[SparkRow] = _ + private var iter: FetchIterator[SparkRow] = _ private var dataTypes: Array[DataType] = _ private lazy val resultSchema: TableSchema = { @@ -113,51 +126,32 @@ private[hive] class SparkExecuteStatementOperation( } def getNextRowSet(order: FetchOrientation, maxRowsL: Long): RowSet = withLocalProperties { + try { + sqlContext.sparkContext.setJobGroup(statementId, substitutorStatement, forceCancel) + getNextRowSetInternal(order, maxRowsL) + } finally { + sqlContext.sparkContext.clearJobGroup() + } + } + + private def getNextRowSetInternal( + order: FetchOrientation, + maxRowsL: Long): RowSet = withLocalProperties { log.info(s"Received getNextRowSet request order=${order} and maxRowsL=${maxRowsL} " + s"with ${statementId}") validateDefaultFetchOrientation(order) assertState(OperationState.FINISHED) setHasResultSet(true) - val resultRowSet: RowSet = - ThriftserverShimUtils.resultRowSet(getResultSetSchema, getProtocolVersion) - - // Reset iter when FETCH_FIRST or FETCH_PRIOR - if ((order.equals(FetchOrientation.FETCH_FIRST) || - order.equals(FetchOrientation.FETCH_PRIOR)) && previousFetchEndOffset != 0) { - // Reset the iterator to the beginning of the query. - iter = if (sqlContext.getConf(SQLConf.THRIFTSERVER_INCREMENTAL_COLLECT.key).toBoolean) { - resultList = None - result.toLocalIterator.asScala - } else { - if (resultList.isEmpty) { - resultList = Some(result.collect()) - } - resultList.get.iterator - } - } + val resultRowSet: RowSet = RowSetFactory.create(getResultSetSchema, getProtocolVersion, false) - var resultOffset = { - if (order.equals(FetchOrientation.FETCH_FIRST)) { - logInfo(s"FETCH_FIRST request with $statementId. Resetting to resultOffset=0") - 0 - } else if (order.equals(FetchOrientation.FETCH_PRIOR)) { - // TODO: FETCH_PRIOR should be handled more efficiently than rewinding to beginning and - // reiterating. - val targetOffset = math.max(previousFetchStartOffset - maxRowsL, 0) - logInfo(s"FETCH_PRIOR request with $statementId. Resetting to resultOffset=$targetOffset") - var off = 0 - while (off < targetOffset && iter.hasNext) { - iter.next() - off += 1 - } - off - } else { // FETCH_NEXT - previousFetchEndOffset - } + if (order.equals(FetchOrientation.FETCH_FIRST)) { + iter.fetchAbsolute(0) + } else if (order.equals(FetchOrientation.FETCH_PRIOR)) { + iter.fetchPrior(maxRowsL) + } else { + iter.fetchNext() } - - resultRowSet.setStartOffset(resultOffset) - previousFetchStartOffset = resultOffset + resultRowSet.setStartOffset(iter.getPosition) if (!iter.hasNext) { resultRowSet } else { @@ -179,11 +173,9 @@ private[hive] class SparkExecuteStatementOperation( } resultRowSet.addRow(row.toArray.asInstanceOf[Array[Object]]) curRow += 1 - resultOffset += 1 } - previousFetchEndOffset = resultOffset log.info(s"Returning result set with ${curRow} rows from offsets " + - s"[$previousFetchStartOffset, $previousFetchEndOffset) with $statementId") + s"[${iter.getFetchStart}, ${iter.getPosition}) with $statementId") resultRowSet } } @@ -201,6 +193,23 @@ private[hive] class SparkExecuteStatementOperation( parentSession.getUsername) setHasResultSet(true) // avoid no resultset for async run + if (timeout > 0) { + val timeoutExecutor = Executors.newSingleThreadScheduledExecutor() + timeoutExecutor.schedule(new Runnable { + override def run(): Unit = { + try { + timeoutCancel() + } catch { + case NonFatal(e) => + setOperationException(new HiveSQLException(e)) + logError(s"Error cancelling the query after timeout: $timeout seconds") + } finally { + timeoutExecutor.shutdown() + } + } + }, timeout, TimeUnit.SECONDS) + } + if (!runInBackground) { execute() } else { @@ -277,20 +286,17 @@ private[hive] class SparkExecuteStatementOperation( parentSession.getSessionState.getConf.setClassLoader(executionHiveClassLoader) } - val substitutorStatement = new VariableSubstitution(sqlContext.conf).substitute(statement) - sqlContext.sparkContext.setJobGroup(statementId, substitutorStatement) + sqlContext.sparkContext.setJobGroup(statementId, substitutorStatement, forceCancel) result = sqlContext.sql(statement) logDebug(result.queryExecution.toString()) HiveThriftServer2.eventManager.onStatementParsed(statementId, result.queryExecution.toString()) - iter = { - if (sqlContext.getConf(SQLConf.THRIFTSERVER_INCREMENTAL_COLLECT.key).toBoolean) { - resultList = None - result.toLocalIterator.asScala - } else { - resultList = Some(result.collect()) - resultList.get.iterator - } + iter = if (sqlContext.getConf(SQLConf.THRIFTSERVER_INCREMENTAL_COLLECT.key).toBoolean) { + new IterableFetchIterator[SparkRow](new Iterable[SparkRow] { + override def iterator: Iterator[SparkRow] = result.toLocalIterator.asScala + }) + } else { + new ArrayFetchIterator[SparkRow](result.collect()) } dataTypes = result.schema.fields.map(_.dataType) } catch { @@ -329,6 +335,17 @@ private[hive] class SparkExecuteStatementOperation( } } + def timeoutCancel(): Unit = { + synchronized { + if (!getStatus.getState.isTerminal) { + logInfo(s"Query with $statementId timed out after $timeout seconds") + setState(OperationState.TIMEDOUT) + cleanup() + HiveThriftServer2.eventManager.onStatementTimeout(statementId) + } + } + } + override def cancel(): Unit = { synchronized { if (!getStatus.getState.isTerminal) { diff --git a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkGetColumnsOperation.scala b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkGetColumnsOperation.scala index 88aebb36633f6..1f9c05c330ace 100644 --- a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkGetColumnsOperation.scala +++ b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkGetColumnsOperation.scala @@ -105,7 +105,7 @@ private[hive] class SparkGetColumnsOperation( val databasePattern = Pattern.compile(CLIServiceUtils.patternToRegex(schemaName)) if (databasePattern.matcher(globalTempViewDb).matches()) { catalog.globalTempViewManager.listViewNames(tablePattern).foreach { globalTempView => - catalog.globalTempViewManager.get(globalTempView).foreach { plan => + catalog.getGlobalTempView(globalTempView).foreach { plan => addToRowSet(columnPattern, globalTempViewDb, globalTempView, plan.schema) } } @@ -133,6 +133,7 @@ private[hive] class SparkGetColumnsOperation( case dt @ (BooleanType | _: NumericType | DateType | TimestampType | CalendarIntervalType | NullType) => Some(dt.defaultSize) + case CharType(n) => Some(n) case StructType(fields) => val sizeArr = fields.map(f => getColumnSize(f.dataType)) if (sizeArr.contains(None)) { @@ -176,6 +177,8 @@ private[hive] class SparkGetColumnsOperation( case DoubleType => java.sql.Types.DOUBLE case _: DecimalType => java.sql.Types.DECIMAL case StringType => java.sql.Types.VARCHAR + case VarcharType(_) => java.sql.Types.VARCHAR + case CharType(_) => java.sql.Types.CHAR case BinaryType => java.sql.Types.BINARY case DateType => java.sql.Types.DATE case TimestampType => java.sql.Types.TIMESTAMP diff --git a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkGetSchemasOperation.scala b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkGetSchemasOperation.scala index 16fd502048e80..45cfa86ba9343 100644 --- a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkGetSchemasOperation.scala +++ b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkGetSchemasOperation.scala @@ -17,10 +17,8 @@ package org.apache.spark.sql.hive.thriftserver -import java.util.UUID import java.util.regex.Pattern -import org.apache.commons.lang3.exception.ExceptionUtils import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveOperationType import org.apache.hive.service.cli._ import org.apache.hive.service.cli.operation.GetSchemasOperation @@ -29,7 +27,6 @@ import org.apache.hive.service.cli.session.HiveSession import org.apache.spark.internal.Logging import org.apache.spark.sql.SQLContext -import org.apache.spark.util.{Utils => SparkUtils} /** * Spark's own GetSchemasOperation @@ -77,7 +74,8 @@ private[hive] class SparkGetSchemasOperation( val globalTempViewDb = sqlContext.sessionState.catalog.globalTempViewManager.database val databasePattern = Pattern.compile(CLIServiceUtils.patternToRegex(schemaName)) - if (databasePattern.matcher(globalTempViewDb).matches()) { + if (schemaName == null || schemaName.isEmpty || + databasePattern.matcher(globalTempViewDb).matches()) { rowSet.addRow(Array[AnyRef](globalTempViewDb, DEFAULT_HIVE_CATALOG)) } setState(OperationState.FINISHED) diff --git a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkGetTablesOperation.scala b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkGetTablesOperation.scala index 0d4b9b392f074..bddf5eb82012f 100644 --- a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkGetTablesOperation.scala +++ b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkGetTablesOperation.scala @@ -30,7 +30,6 @@ import org.apache.hive.service.cli.session.HiveSession import org.apache.spark.internal.Logging import org.apache.spark.sql.SQLContext import org.apache.spark.sql.catalyst.catalog.CatalogTableType._ -import org.apache.spark.sql.hive.HiveUtils /** * Spark's own GetTablesOperation @@ -125,10 +124,6 @@ private[hive] class SparkGetTablesOperation( tableType, comment.getOrElse("")) // Since HIVE-7575(Hive 2.0.0), adds 5 additional columns to the ResultSet of GetTables. - if (HiveUtils.isHive23) { - rowSet.addRow(rowData ++ Array(null, null, null, null, null)) - } else { - rowSet.addRow(rowData) - } + rowSet.addRow(rowData ++ Array(null, null, null, null, null)) } } diff --git a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkGetTypeInfoOperation.scala b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkGetTypeInfoOperation.scala index c2568ad4ada0a..bd6feeaff08e8 100644 --- a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkGetTypeInfoOperation.scala +++ b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkGetTypeInfoOperation.scala @@ -20,6 +20,8 @@ package org.apache.spark.sql.hive.thriftserver import java.util.UUID import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveOperationType +import org.apache.hadoop.hive.serde2.thrift.Type +import org.apache.hadoop.hive.serde2.thrift.Type._ import org.apache.hive.service.cli.OperationState import org.apache.hive.service.cli.operation.GetTypeInfoOperation import org.apache.hive.service.cli.session.HiveSession @@ -61,7 +63,7 @@ private[hive] class SparkGetTypeInfoOperation( parentSession.getUsername) try { - ThriftserverShimUtils.supportedType().foreach(typeInfo => { + SparkGetTypeInfoUtil.supportedType.foreach(typeInfo => { val rowData = Array[AnyRef]( typeInfo.getName, // TYPE_NAME typeInfo.toJavaSQLType.asInstanceOf[AnyRef], // DATA_TYPE @@ -90,3 +92,13 @@ private[hive] class SparkGetTypeInfoOperation( HiveThriftServer2.eventManager.onStatementFinish(statementId) } } + +private[hive] object SparkGetTypeInfoUtil { + val supportedType: Seq[Type] = { + Seq(NULL_TYPE, BOOLEAN_TYPE, STRING_TYPE, BINARY_TYPE, + TINYINT_TYPE, SMALLINT_TYPE, INT_TYPE, BIGINT_TYPE, + FLOAT_TYPE, DOUBLE_TYPE, DECIMAL_TYPE, + DATE_TYPE, TIMESTAMP_TYPE, + ARRAY_TYPE, MAP_TYPE, STRUCT_TYPE, CHAR_TYPE, VARCHAR_TYPE) + } +} diff --git a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLCLIDriver.scala b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLCLIDriver.scala index 6676223af4fce..8606aaab1cae2 100644 --- a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLCLIDriver.scala +++ b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLCLIDriver.scala @@ -37,6 +37,7 @@ import org.apache.hadoop.hive.ql.session.SessionState import org.apache.hadoop.security.{Credentials, UserGroupInformation} import org.apache.log4j.Level import org.apache.thrift.transport.TSocket +import org.slf4j.LoggerFactory import sun.misc.{Signal, SignalHandler} import org.apache.spark.SparkConf @@ -198,6 +199,8 @@ private[hive] object SparkSQLCLIDriver extends Logging { SparkSQLEnv.sqlContext.setConf(k, v) } + cli.printMasterAndAppId + if (sessionState.execString != null) { System.exit(cli.processLine(sessionState.execString)) } @@ -267,8 +270,6 @@ private[hive] object SparkSQLCLIDriver extends Logging { def continuedPromptWithDBSpaces: String = continuedPrompt + ReflectionUtils.invokeStatic( classOf[CliDriver], "spacesForString", classOf[String] -> currentDB) - cli.printMasterAndAppId - var currentPrompt = promptWithCurrentDB var line = reader.readLine(currentPrompt + "> ") @@ -307,7 +308,9 @@ private[hive] object SparkSQLCLIDriver extends Logging { private[hive] class SparkSQLCLIDriver extends CliDriver with Logging { private val sessionState = SessionState.get().asInstanceOf[CliSessionState] - private val console = ThriftserverShimUtils.getConsole + private val LOG = LoggerFactory.getLogger(classOf[SparkSQLCLIDriver]) + + private val console = new SessionState.LogHelper(LOG) private val isRemoteMode = { SparkSQLCLIDriver.isRemoteMode(sessionState) @@ -462,7 +465,7 @@ private[hive] class SparkSQLCLIDriver extends CliDriver with Logging { oldSignal = Signal.handle(interruptSignal, new SignalHandler() { private var interruptRequested: Boolean = false - override def handle(signal: Signal) { + override def handle(signal: Signal): Unit = { val initialRequest = !interruptRequested interruptRequested = true @@ -500,7 +503,7 @@ private[hive] class SparkSQLCLIDriver extends CliDriver with Logging { val ignoreErrors = HiveConf.getBoolVar(conf, HiveConf.ConfVars.CLIIGNOREERRORS) if (ret != 0 && !ignoreErrors) { CommandProcessorFactory.clean(conf.asInstanceOf[HiveConf]) - ret + return ret } } } @@ -519,15 +522,32 @@ private[hive] class SparkSQLCLIDriver extends CliDriver with Logging { // Note: [SPARK-31595] if there is a `'` in a double quoted string, or a `"` in a single quoted // string, the origin implementation from Hive will not drop the trailing semicolon as expected, // hence we refined this function a little bit. + // Note: [SPARK-33100] Ignore a semicolon inside a bracketed comment in spark-sql. private def splitSemiColon(line: String): JList[String] = { var insideSingleQuote = false var insideDoubleQuote = false - var insideComment = false + var insideSimpleComment = false + var bracketedCommentLevel = 0 var escape = false var beginIndex = 0 + var leavingBracketedComment = false + var isStatement = false val ret = new JArrayList[String] + def insideBracketedComment: Boolean = bracketedCommentLevel > 0 + def insideComment: Boolean = insideSimpleComment || insideBracketedComment + def statementInProgress(index: Int): Boolean = isStatement || (!insideComment && + index > beginIndex && !s"${line.charAt(index)}".trim.isEmpty) + for (index <- 0 until line.length) { + // Checks if we need to decrement a bracketed comment level; the last character '/' of + // bracketed comments is still inside the comment, so `insideBracketedComment` must keep true + // in the previous loop and we decrement the level here if needed. + if (leavingBracketedComment) { + bracketedCommentLevel -= 1 + leavingBracketedComment = false + } + if (line.charAt(index) == '\'' && !insideComment) { // take a look to see if it is escaped // See the comment above about SPARK-31595 @@ -550,21 +570,34 @@ private[hive] class SparkSQLCLIDriver extends CliDriver with Logging { // Sample query: select "quoted value --" // ^^ avoids starting a comment if it's inside quotes. } else if (hasNext && line.charAt(index + 1) == '-') { - // ignore quotes and ; - insideComment = true + // ignore quotes and ; in simple comment + insideSimpleComment = true } } else if (line.charAt(index) == ';') { if (insideSingleQuote || insideDoubleQuote || insideComment) { // do not split } else { - // split, do not include ; itself - ret.add(line.substring(beginIndex, index)) + if (isStatement) { + // split, do not include ; itself + ret.add(line.substring(beginIndex, index)) + } beginIndex = index + 1 + isStatement = false } } else if (line.charAt(index) == '\n') { - // with a new line the inline comment should end. + // with a new line the inline simple comment should end. if (!escape) { - insideComment = false + insideSimpleComment = false + } + } else if (line.charAt(index) == '/' && !insideSimpleComment) { + val hasNext = index + 1 < line.length + if (insideSingleQuote || insideDoubleQuote) { + // Ignores '/' in any case of quotes + } else if (insideBracketedComment && line.charAt(index - 1) == '*' ) { + // Decrements `bracketedCommentLevel` at the beginning of the next loop + leavingBracketedComment = true + } else if (hasNext && !insideBracketedComment && line.charAt(index + 1) == '*') { + bracketedCommentLevel += 1 } } // set the escape @@ -573,8 +606,12 @@ private[hive] class SparkSQLCLIDriver extends CliDriver with Logging { } else if (line.charAt(index) == '\\') { escape = true } + + isStatement = statementInProgress(index) + } + if (isStatement) { + ret.add(line.substring(beginIndex)) } - ret.add(line.substring(beginIndex)) ret } } diff --git a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLCLIService.scala b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLCLIService.scala index 984625c76e057..e9420ad21bebd 100644 --- a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLCLIService.scala +++ b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLCLIService.scala @@ -24,7 +24,6 @@ import javax.security.auth.login.LoginException import scala.collection.JavaConverters._ import scala.util.control.NonFatal -import org.apache.commons.logging.Log import org.apache.hadoop.hive.conf.HiveConf import org.apache.hadoop.hive.conf.HiveConf.ConfVars import org.apache.hadoop.hive.shims.Utils @@ -37,7 +36,6 @@ import org.apache.hive.service.server.HiveServer2 import org.slf4j.Logger import org.apache.spark.sql.SQLContext -import org.apache.spark.sql.hive.HiveUtils import org.apache.spark.sql.hive.thriftserver.ReflectionUtils._ private[hive] class SparkSQLCLIService(hiveServer: HiveServer2, sqlContext: SQLContext) @@ -106,6 +104,7 @@ private[hive] class SparkSQLCLIService(hiveServer: HiveServer2, sqlContext: SQLC case GetInfoType.CLI_SERVER_NAME => new GetInfoValue("Spark SQL") case GetInfoType.CLI_DBMS_NAME => new GetInfoValue("Spark SQL") case GetInfoType.CLI_DBMS_VER => new GetInfoValue(sqlContext.sparkContext.version) + case GetInfoType.CLI_ODBC_KEYWORDS => new GetInfoValue("Unimplemented") case _ => super.getInfo(sessionHandle, getInfoType) } } @@ -113,17 +112,10 @@ private[hive] class SparkSQLCLIService(hiveServer: HiveServer2, sqlContext: SQLC private[thriftserver] trait ReflectedCompositeService { this: AbstractService => - private val logInfo = (msg: String) => if (HiveUtils.isHive23) { - getAncestorField[Logger](this, 3, "LOG").info(msg) - } else { - getAncestorField[Log](this, 3, "LOG").info(msg) - } + private val logInfo = (msg: String) => getAncestorField[Logger](this, 3, "LOG").info(msg) - private val logError = (msg: String, e: Throwable) => if (HiveUtils.isHive23) { + private val logError = (msg: String, e: Throwable) => getAncestorField[Logger](this, 3, "LOG").error(msg, e) - } else { - getAncestorField[Log](this, 3, "LOG").error(msg, e) - } def initCompositeService(hiveConf: HiveConf): Unit = { // Emulating `CompositeService.init(hiveConf)` diff --git a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLDriver.scala b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLDriver.scala index 8faeee523d983..8bc762fe99233 100644 --- a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLDriver.scala +++ b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLDriver.scala @@ -30,7 +30,7 @@ import org.apache.spark.internal.Logging import org.apache.spark.sql.{AnalysisException, SQLContext} import org.apache.spark.sql.execution.{QueryExecution, SQLExecution} import org.apache.spark.sql.execution.HiveResult.hiveResultString -import org.apache.spark.sql.internal.VariableSubstitution +import org.apache.spark.sql.internal.{SQLConf, VariableSubstitution} private[hive] class SparkSQLDriver(val context: SQLContext = SparkSQLEnv.sqlContext) @@ -60,7 +60,9 @@ private[hive] class SparkSQLDriver(val context: SQLContext = SparkSQLEnv.sqlCont override def run(command: String): CommandProcessorResponse = { // TODO unify the error code try { - val substitutorCommand = new VariableSubstitution(context.conf).substitute(command) + val substitutorCommand = SQLConf.withExistingConf(context.conf) { + new VariableSubstitution().substitute(command) + } context.sparkContext.setJobDescription(substitutorCommand) val execution = context.sessionState.executePlan(context.sql(command).logicalPlan) hiveResponse = SQLExecution.withNewExecutionId(execution) { diff --git a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLSessionManager.scala b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLSessionManager.scala index 806b6146b2db1..89aaa31c35790 100644 --- a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLSessionManager.scala +++ b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLSessionManager.scala @@ -17,15 +17,15 @@ package org.apache.spark.sql.hive.thriftserver -import java.util.concurrent.Executors +import scala.util.control.NonFatal -import org.apache.commons.logging.Log import org.apache.hadoop.hive.conf.HiveConf -import org.apache.hadoop.hive.conf.HiveConf.ConfVars -import org.apache.hive.service.cli.SessionHandle +import org.apache.hive.service.cli.{HiveSQLException, SessionHandle} import org.apache.hive.service.cli.session.SessionManager +import org.apache.hive.service.rpc.thrift.TProtocolVersion import org.apache.hive.service.server.HiveServer2 +import org.apache.spark.internal.Logging import org.apache.spark.sql.SQLContext import org.apache.spark.sql.hive.HiveUtils import org.apache.spark.sql.hive.thriftserver.ReflectionUtils._ @@ -35,7 +35,7 @@ import org.apache.spark.sql.internal.SQLConf private[hive] class SparkSQLSessionManager(hiveServer: HiveServer2, sqlContext: SQLContext) extends SessionManager(hiveServer) - with ReflectedCompositeService { + with ReflectedCompositeService with Logging { private lazy val sparkSqlOperationManager = new SparkSQLOperationManager() @@ -45,7 +45,7 @@ private[hive] class SparkSQLSessionManager(hiveServer: HiveServer2, sqlContext: } override def openSession( - protocol: ThriftserverShimUtils.TProtocolVersion, + protocol: TProtocolVersion, username: String, passwd: String, ipAddress: String, @@ -55,24 +55,35 @@ private[hive] class SparkSQLSessionManager(hiveServer: HiveServer2, sqlContext: val sessionHandle = super.openSession(protocol, username, passwd, ipAddress, sessionConf, withImpersonation, delegationToken) - val session = super.getSession(sessionHandle) - HiveThriftServer2.eventManager.onSessionCreated( - session.getIpAddress, sessionHandle.getSessionId.toString, session.getUsername) - val ctx = if (sqlContext.conf.hiveThriftServerSingleSession) { - sqlContext - } else { - sqlContext.newSession() + try { + val session = super.getSession(sessionHandle) + HiveThriftServer2.eventManager.onSessionCreated( + session.getIpAddress, sessionHandle.getSessionId.toString, session.getUsername) + val ctx = if (sqlContext.conf.hiveThriftServerSingleSession) { + sqlContext + } else { + sqlContext.newSession() + } + ctx.setConf(HiveUtils.FAKE_HIVE_VERSION.key, HiveUtils.builtinHiveVersion) + ctx.setConf(SQLConf.DATETIME_JAVA8API_ENABLED, true) + val hiveSessionState = session.getSessionState + setConfMap(ctx, hiveSessionState.getOverriddenConfigurations) + setConfMap(ctx, hiveSessionState.getHiveVariables) + if (sessionConf != null && sessionConf.containsKey("use:database")) { + ctx.sql(s"use ${sessionConf.get("use:database")}") + } + sparkSqlOperationManager.sessionToContexts.put(sessionHandle, ctx) + sessionHandle + } catch { + case NonFatal(e) => + try { + closeSession(sessionHandle) + } catch { + case NonFatal(inner) => + logWarning("Error closing session", inner) + } + throw new HiveSQLException("Failed to open new session: " + e, e) } - ctx.setConf(HiveUtils.FAKE_HIVE_VERSION.key, HiveUtils.builtinHiveVersion) - ctx.setConf(SQLConf.DATETIME_JAVA8API_ENABLED, true) - val hiveSessionState = session.getSessionState - setConfMap(ctx, hiveSessionState.getOverriddenConfigurations) - setConfMap(ctx, hiveSessionState.getHiveVariables) - if (sessionConf != null && sessionConf.containsKey("use:database")) { - ctx.sql(s"use ${sessionConf.get("use:database")}") - } - sparkSqlOperationManager.sessionToContexts.put(sessionHandle, ctx) - sessionHandle } override def closeSession(sessionHandle: SessionHandle): Unit = { diff --git a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/server/SparkSQLOperationManager.scala b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/server/SparkSQLOperationManager.scala index bc9c13eb0d4f8..ba42eefed2a22 100644 --- a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/server/SparkSQLOperationManager.scala +++ b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/server/SparkSQLOperationManager.scala @@ -44,14 +44,15 @@ private[thriftserver] class SparkSQLOperationManager() parentSession: HiveSession, statement: String, confOverlay: JMap[String, String], - async: Boolean): ExecuteStatementOperation = synchronized { + async: Boolean, + queryTimeout: Long): ExecuteStatementOperation = synchronized { val sqlContext = sessionToContexts.get(parentSession.getSessionHandle) require(sqlContext != null, s"Session handle: ${parentSession.getSessionHandle} has not been" + s" initialized or had already closed.") val conf = sqlContext.sessionState.conf val runInBackground = async && conf.getConf(HiveUtils.HIVE_THRIFT_SERVER_ASYNC) val operation = new SparkExecuteStatementOperation( - sqlContext, parentSession, statement, confOverlay, runInBackground) + sqlContext, parentSession, statement, confOverlay, runInBackground, queryTimeout) handleToOperation.put(operation.getHandle, operation) logDebug(s"Created Operation for $statement with session=$parentSession, " + s"runInBackground=$runInBackground") diff --git a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/ui/HiveThriftServer2AppStatusStore.scala b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/ui/HiveThriftServer2AppStatusStore.scala index 5cb78f6e64650..8bd8f29a4b9ec 100644 --- a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/ui/HiveThriftServer2AppStatusStore.scala +++ b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/ui/HiveThriftServer2AppStatusStore.scala @@ -119,6 +119,7 @@ private[thriftserver] class ExecutionInfo( def isExecutionActive: Boolean = { !(state == ExecutionState.FAILED || state == ExecutionState.CANCELED || + state == ExecutionState.TIMEDOUT || state == ExecutionState.CLOSED) } diff --git a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/ui/HiveThriftServer2EventManager.scala b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/ui/HiveThriftServer2EventManager.scala index fa04c67896a69..202fdf33c0dd9 100644 --- a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/ui/HiveThriftServer2EventManager.scala +++ b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/ui/HiveThriftServer2EventManager.scala @@ -57,6 +57,10 @@ private[thriftserver] class HiveThriftServer2EventManager(sc: SparkContext) { postLiveListenerBus(SparkListenerThriftServerOperationCanceled(id, System.currentTimeMillis())) } + def onStatementTimeout(id: String): Unit = { + postLiveListenerBus(SparkListenerThriftServerOperationTimeout(id, System.currentTimeMillis())) + } + def onStatementError(id: String, errorMsg: String, errorTrace: String): Unit = { postLiveListenerBus(SparkListenerThriftServerOperationError(id, errorMsg, errorTrace, System.currentTimeMillis())) @@ -96,6 +100,9 @@ private[thriftserver] case class SparkListenerThriftServerOperationParsed( private[thriftserver] case class SparkListenerThriftServerOperationCanceled( id: String, finishTime: Long) extends SparkListenerEvent +private[thriftserver] case class SparkListenerThriftServerOperationTimeout( + id: String, finishTime: Long) extends SparkListenerEvent + private[thriftserver] case class SparkListenerThriftServerOperationError( id: String, errorMsg: String, diff --git a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/ui/HiveThriftServer2Listener.scala b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/ui/HiveThriftServer2Listener.scala index 6b7e5ee611417..4cf672e3d9d9e 100644 --- a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/ui/HiveThriftServer2Listener.scala +++ b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/ui/HiveThriftServer2Listener.scala @@ -119,6 +119,7 @@ private[thriftserver] class HiveThriftServer2Listener( case e: SparkListenerThriftServerOperationStart => onOperationStart(e) case e: SparkListenerThriftServerOperationParsed => onOperationParsed(e) case e: SparkListenerThriftServerOperationCanceled => onOperationCanceled(e) + case e: SparkListenerThriftServerOperationTimeout => onOperationTimeout(e) case e: SparkListenerThriftServerOperationError => onOperationError(e) case e: SparkListenerThriftServerOperationFinish => onOperationFinished(e) case e: SparkListenerThriftServerOperationClosed => onOperationClosed(e) @@ -181,6 +182,15 @@ private[thriftserver] class HiveThriftServer2Listener( case None => logWarning(s"onOperationCanceled called with unknown operation id: ${e.id}") } + private def onOperationTimeout(e: SparkListenerThriftServerOperationTimeout): Unit = + Option(executionList.get(e.id)) match { + case Some(executionData) => + executionData.finishTimestamp = e.finishTime + executionData.state = ExecutionState.TIMEDOUT + updateLiveStore(executionData) + case None => logWarning(s"onOperationCanceled called with unknown operation id: ${e.id}") + } + private def onOperationError(e: SparkListenerThriftServerOperationError): Unit = Option(executionList.get(e.id)) match { case Some(executionData) => diff --git a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/ui/ThriftServerPage.scala b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/ui/ThriftServerPage.scala index 8efbdb30c605c..54a40e3990f09 100644 --- a/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/ui/ThriftServerPage.scala +++ b/sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/ui/ThriftServerPage.scala @@ -19,7 +19,6 @@ package org.apache.spark.sql.hive.thriftserver.ui import java.net.URLEncoder import java.nio.charset.StandardCharsets.UTF_8 -import java.util.Calendar import javax.servlet.http.HttpServletRequest import scala.xml.Node diff --git a/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/HiveMetastoreLazyInitializationSuite.scala b/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/HiveMetastoreLazyInitializationSuite.scala index 277df548aefd0..951f92793732f 100644 --- a/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/HiveMetastoreLazyInitializationSuite.scala +++ b/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/HiveMetastoreLazyInitializationSuite.scala @@ -31,6 +31,7 @@ class HiveMetastoreLazyInitializationSuite extends SparkFunSuite { .config("spark.hadoop.hive.metastore.uris", "thrift://127.0.0.1:11111") .getOrCreate() val originalLevel = org.apache.log4j.Logger.getRootLogger().getLevel + val originalClassLoader = Thread.currentThread().getContextClassLoader try { // Avoid outputting a lot of expected warning logs spark.sparkContext.setLogLevel("error") @@ -64,6 +65,7 @@ class HiveMetastoreLazyInitializationSuite extends SparkFunSuite { exceptionString.contains(msg) } } finally { + Thread.currentThread().setContextClassLoader(originalClassLoader) spark.sparkContext.setLogLevel(originalLevel.toString) spark.stop() } diff --git a/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/CliSuite.scala b/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/CliSuite.scala index 2064a99137bf9..1a96012a0b4e9 100644 --- a/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/CliSuite.scala +++ b/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/CliSuite.scala @@ -27,7 +27,7 @@ import scala.concurrent.Promise import scala.concurrent.duration._ import org.apache.hadoop.hive.conf.HiveConf.ConfVars -import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach} +import org.scalatest.BeforeAndAfterAll import org.apache.spark.SparkFunSuite import org.apache.spark.internal.Logging @@ -98,10 +98,8 @@ class CliSuite extends SparkFunSuite with BeforeAndAfterAll with Logging { Seq(answer) } else { // spark-sql echoes the submitted queries - val queryEcho = query.split("\n").toList match { - case firstLine :: tail => - s"spark-sql> $firstLine" :: tail.map(l => s" > $l") - } + val xs = query.split("\n").toList + val queryEcho = s"spark-sql> ${xs.head}" :: xs.tail.map(l => s" > $l") // longer lines sometimes get split in the output, // match the first 60 characters of each query line queryEcho.map(_.take(60)) :+ answer @@ -573,4 +571,27 @@ class CliSuite extends SparkFunSuite with BeforeAndAfterAll with Logging { // the date formatter for `java.sql.LocalDate` must output negative years with sign. runCliWithin(1.minute)("SELECT MAKE_DATE(-44, 3, 15);" -> "-0044-03-15") } + + test("SPARK-33100: Ignore a semicolon inside a bracketed comment in spark-sql") { + runCliWithin(4.minute)( + "/* SELECT 'test';*/ SELECT 'test';" -> "test", + ";;/* SELECT 'test';*/ SELECT 'test';" -> "test", + "/* SELECT 'test';*/;; SELECT 'test';" -> "test", + "SELECT 'test'; -- SELECT 'test';" -> "test", + "SELECT 'test'; /* SELECT 'test';*/;" -> "test", + "/*$meta chars{^\\;}*/ SELECT 'test';" -> "test", + "/*\nmulti-line\n*/ SELECT 'test';" -> "test", + "/*/* multi-level bracketed*/ SELECT 'test';" -> "test" + ) + } + + test("SPARK-33100: test sql statements with hint in bracketed comment") { + runCliWithin(2.minute)( + "CREATE TEMPORARY VIEW t1 AS SELECT * FROM VALUES(1, 2) AS t1(k, v);" -> "", + "CREATE TEMPORARY VIEW t2 AS SELECT * FROM VALUES(2, 1) AS t2(k, v);" -> "", + "EXPLAIN SELECT /*+ MERGEJOIN(t1) */ t1.* FROM t1 JOIN t2 ON t1.k = t2.v;" -> "SortMergeJoin", + "EXPLAIN SELECT /* + MERGEJOIN(t1) */ t1.* FROM t1 JOIN t2 ON t1.k = t2.v;" + -> "BroadcastHashJoin" + ) + } } diff --git a/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/DummyListeners.scala b/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/DummyListeners.scala index 4564c2209a931..820859b65925b 100644 --- a/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/DummyListeners.scala +++ b/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/DummyListeners.scala @@ -17,7 +17,7 @@ /** * These classes in this package are intentionally placed to the outer package of spark, - * because IsolatedClientLoader leverages Spark classloader for shared classess including + * because IsolatedClientLoader leverages Spark classloader for shared classes including * spark package, and the test should fail if Spark initializes these listeners with * IsolatedClientLoader. */ diff --git a/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/FetchIteratorSuite.scala b/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/FetchIteratorSuite.scala new file mode 100644 index 0000000000000..0fbdb8a9050c8 --- /dev/null +++ b/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/FetchIteratorSuite.scala @@ -0,0 +1,134 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.hive.thriftserver + +import org.apache.spark.SparkFunSuite + +class FetchIteratorSuite extends SparkFunSuite { + + private def getRows(fetchIter: FetchIterator[Int], maxRowCount: Int): Seq[Int] = { + for (_ <- 0 until maxRowCount if fetchIter.hasNext) yield fetchIter.next() + } + + test("SPARK-33655: Test fetchNext and fetchPrior") { + val testData = 0 until 10 + + def iteratorTest(fetchIter: FetchIterator[Int]): Unit = { + fetchIter.fetchNext() + assert(fetchIter.getFetchStart == 0) + assert(fetchIter.getPosition == 0) + assertResult(0 until 2)(getRows(fetchIter, 2)) + assert(fetchIter.getFetchStart == 0) + assert(fetchIter.getPosition == 2) + + fetchIter.fetchNext() + assert(fetchIter.getFetchStart == 2) + assert(fetchIter.getPosition == 2) + assertResult(2 until 3)(getRows(fetchIter, 1)) + assert(fetchIter.getFetchStart == 2) + assert(fetchIter.getPosition == 3) + + fetchIter.fetchPrior(2) + assert(fetchIter.getFetchStart == 0) + assert(fetchIter.getPosition == 0) + assertResult(0 until 3)(getRows(fetchIter, 3)) + assert(fetchIter.getFetchStart == 0) + assert(fetchIter.getPosition == 3) + + fetchIter.fetchNext() + assert(fetchIter.getFetchStart == 3) + assert(fetchIter.getPosition == 3) + assertResult(3 until 8)(getRows(fetchIter, 5)) + assert(fetchIter.getFetchStart == 3) + assert(fetchIter.getPosition == 8) + + fetchIter.fetchPrior(2) + assert(fetchIter.getFetchStart == 1) + assert(fetchIter.getPosition == 1) + assertResult(1 until 4)(getRows(fetchIter, 3)) + assert(fetchIter.getFetchStart == 1) + assert(fetchIter.getPosition == 4) + + fetchIter.fetchNext() + assert(fetchIter.getFetchStart == 4) + assert(fetchIter.getPosition == 4) + assertResult(4 until 10)(getRows(fetchIter, 10)) + assert(fetchIter.getFetchStart == 4) + assert(fetchIter.getPosition == 10) + + fetchIter.fetchNext() + assert(fetchIter.getFetchStart == 10) + assert(fetchIter.getPosition == 10) + assertResult(Seq.empty[Int])(getRows(fetchIter, 10)) + assert(fetchIter.getFetchStart == 10) + assert(fetchIter.getPosition == 10) + + fetchIter.fetchPrior(20) + assert(fetchIter.getFetchStart == 0) + assert(fetchIter.getPosition == 0) + assertResult(0 until 3)(getRows(fetchIter, 3)) + assert(fetchIter.getFetchStart == 0) + assert(fetchIter.getPosition == 3) + } + iteratorTest(new ArrayFetchIterator[Int](testData.toArray)) + iteratorTest(new IterableFetchIterator[Int](testData)) + } + + test("SPARK-33655: Test fetchAbsolute") { + val testData = 0 until 10 + + def iteratorTest(fetchIter: FetchIterator[Int]): Unit = { + fetchIter.fetchNext() + assert(fetchIter.getFetchStart == 0) + assert(fetchIter.getPosition == 0) + assertResult(0 until 5)(getRows(fetchIter, 5)) + assert(fetchIter.getFetchStart == 0) + assert(fetchIter.getPosition == 5) + + fetchIter.fetchAbsolute(2) + assert(fetchIter.getFetchStart == 2) + assert(fetchIter.getPosition == 2) + assertResult(2 until 5)(getRows(fetchIter, 3)) + assert(fetchIter.getFetchStart == 2) + assert(fetchIter.getPosition == 5) + + fetchIter.fetchAbsolute(7) + assert(fetchIter.getFetchStart == 7) + assert(fetchIter.getPosition == 7) + assertResult(7 until 8)(getRows(fetchIter, 1)) + assert(fetchIter.getFetchStart == 7) + assert(fetchIter.getPosition == 8) + + fetchIter.fetchAbsolute(20) + assert(fetchIter.getFetchStart == 10) + assert(fetchIter.getPosition == 10) + assertResult(Seq.empty[Int])(getRows(fetchIter, 1)) + assert(fetchIter.getFetchStart == 10) + assert(fetchIter.getPosition == 10) + + fetchIter.fetchAbsolute(0) + assert(fetchIter.getFetchStart == 0) + assert(fetchIter.getPosition == 0) + assertResult(0 until 3)(getRows(fetchIter, 3)) + assert(fetchIter.getFetchStart == 0) + assert(fetchIter.getPosition == 3) + } + iteratorTest(new ArrayFetchIterator[Int](testData.toArray)) + iteratorTest(new IterableFetchIterator[Int](testData)) + } +} diff --git a/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/GetCatalogsOperationMock.scala b/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/GetCatalogsOperationMock.scala index 764f1690d5a66..1bc9aaf672c3b 100644 --- a/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/GetCatalogsOperationMock.scala +++ b/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/GetCatalogsOperationMock.scala @@ -22,8 +22,7 @@ import java.util.UUID import org.apache.hive.service.cli.OperationHandle import org.apache.hive.service.cli.operation.GetCatalogsOperation import org.apache.hive.service.cli.session.HiveSession - -import org.apache.spark.sql.hive.thriftserver.ThriftserverShimUtils.{THandleIdentifier, TOperationHandle, TOperationType} +import org.apache.hive.service.rpc.thrift.{THandleIdentifier, TOperationHandle, TOperationType} class GetCatalogsOperationMock(parentSession: HiveSession) extends GetCatalogsOperation(parentSession) { diff --git a/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/HiveSessionImplSuite.scala b/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/HiveSessionImplSuite.scala index 47db7e34a5a2c..7c42348f74453 100644 --- a/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/HiveSessionImplSuite.scala +++ b/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/HiveSessionImplSuite.scala @@ -25,6 +25,7 @@ import org.apache.hadoop.hive.conf.HiveConf import org.apache.hive.service.cli.OperationHandle import org.apache.hive.service.cli.operation.{GetCatalogsOperation, Operation, OperationManager} import org.apache.hive.service.cli.session.{HiveSession, HiveSessionImpl, SessionManager} +import org.apache.hive.service.rpc.thrift.TProtocolVersion import org.apache.spark.SparkFunSuite @@ -32,14 +33,14 @@ class HiveSessionImplSuite extends SparkFunSuite { private var session: HiveSessionImpl = _ private var operationManager: OperationManagerMock = _ - override def beforeAll() { + override def beforeAll(): Unit = { super.beforeAll() val sessionManager = new SessionManager(null) operationManager = new OperationManagerMock() session = new HiveSessionImpl( - ThriftserverShimUtils.testedProtocolVersions.head, + TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V1, "", "", new HiveConf(), diff --git a/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/HiveThriftServer2Suites.scala b/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/HiveThriftServer2Suites.scala index ad0f97cae3f8e..bd0db743b8d4c 100644 --- a/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/HiveThriftServer2Suites.scala +++ b/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/HiveThriftServer2Suites.scala @@ -29,7 +29,7 @@ import scala.collection.mutable.ArrayBuffer import scala.concurrent.{ExecutionContext, Future, Promise} import scala.concurrent.duration._ import scala.io.Source -import scala.util.{Random, Try} +import scala.util.Try import com.google.common.io.Files import org.apache.hadoop.hive.conf.HiveConf.ConfVars @@ -37,14 +37,17 @@ import org.apache.hive.jdbc.HiveDriver import org.apache.hive.service.auth.PlainSaslHelper import org.apache.hive.service.cli.{FetchOrientation, FetchType, GetInfoType, RowSet} import org.apache.hive.service.cli.thrift.ThriftCLIServiceClient +import org.apache.hive.service.rpc.thrift.TCLIService.Client import org.apache.thrift.protocol.TBinaryProtocol import org.apache.thrift.transport.TSocket import org.scalatest.BeforeAndAfterAll +import org.scalatest.concurrent.Eventually._ import org.apache.spark.{SparkException, SparkFunSuite} import org.apache.spark.internal.Logging import org.apache.spark.sql.hive.HiveUtils import org.apache.spark.sql.hive.test.HiveTestJars +import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.internal.StaticSQLConf.HIVE_THRIFT_SERVER_SINGLESESSION import org.apache.spark.sql.test.ProcessTestUtils.ProcessOutputCapturer import org.apache.spark.util.{ThreadUtils, Utils} @@ -58,7 +61,7 @@ object TestData { val smallKvWithNull = getTestDataFilePath("small_kv_with_null.txt") } -class HiveThriftBinaryServerSuite extends HiveThriftJdbcTest { +class HiveThriftBinaryServerSuite extends HiveThriftServer2Test { override def mode: ServerMode.Value = ServerMode.binary private def withCLIServiceClient(f: ThriftCLIServiceClient => Unit): Unit = { @@ -67,7 +70,7 @@ class HiveThriftBinaryServerSuite extends HiveThriftJdbcTest { val user = System.getProperty("user.name") val transport = PlainSaslHelper.getPlainTransport(user, "anonymous", rawTransport) val protocol = new TBinaryProtocol(transport) - val client = new ThriftCLIServiceClient(new ThriftserverShimUtils.Client(protocol)) + val client = new ThriftCLIServiceClient(new Client(protocol)) transport.open() try f(client) finally transport.close() @@ -284,7 +287,6 @@ class HiveThriftBinaryServerSuite extends HiveThriftJdbcTest { } test("test multiple session") { - import org.apache.spark.sql.internal.SQLConf var defaultV1: String = null var defaultV2: String = null var data: ArrayBuffer[Int] = null @@ -304,7 +306,7 @@ class HiveThriftBinaryServerSuite extends HiveThriftJdbcTest { val plan = statement.executeQuery("explain select * from test_table") plan.next() plan.next() - assert(plan.getString(1).contains("Scan In-memory table `test_table`")) + assert(plan.getString(1).contains("Scan In-memory table test_table")) val rs1 = statement.executeQuery("SELECT key FROM test_table ORDER BY KEY DESC") val buf1 = new collection.mutable.ArrayBuffer[Int]() @@ -390,7 +392,7 @@ class HiveThriftBinaryServerSuite extends HiveThriftJdbcTest { val plan = statement.executeQuery("explain select key from test_map ORDER BY key DESC") plan.next() plan.next() - assert(plan.getString(1).contains("Scan In-memory table `test_table`")) + assert(plan.getString(1).contains("Scan In-memory table test_table")) val rs = statement.executeQuery("SELECT key FROM test_map ORDER BY KEY DESC") val buf = new collection.mutable.ArrayBuffer[Int]() @@ -544,11 +546,7 @@ class HiveThriftBinaryServerSuite extends HiveThriftJdbcTest { conf += resultSet.getString(1) -> resultSet.getString(2) } - if (HiveUtils.isHive23) { - assert(conf.get(HiveUtils.FAKE_HIVE_VERSION.key) === Some("2.3.7")) - } else { - assert(conf.get(HiveUtils.FAKE_HIVE_VERSION.key) === Some("1.2.1")) - } + assert(conf.get(HiveUtils.FAKE_HIVE_VERSION.key) === Some("2.3.7")) } } @@ -561,11 +559,7 @@ class HiveThriftBinaryServerSuite extends HiveThriftJdbcTest { conf += resultSet.getString(1) -> resultSet.getString(2) } - if (HiveUtils.isHive23) { - assert(conf.get(HiveUtils.FAKE_HIVE_VERSION.key) === Some("2.3.7")) - } else { - assert(conf.get(HiveUtils.FAKE_HIVE_VERSION.key) === Some("1.2.1")) - } + assert(conf.get(HiveUtils.FAKE_HIVE_VERSION.key) === Some("2.3.7")) } } @@ -643,11 +637,7 @@ class HiveThriftBinaryServerSuite extends HiveThriftJdbcTest { val sessionHandle = client.openSession(user, "") val sessionID = sessionHandle.getSessionId - if (HiveUtils.isHive23) { - assert(pipeoutFileList(sessionID).length == 2) - } else { - assert(pipeoutFileList(sessionID).length == 1) - } + assert(pipeoutFileList(sessionID).length == 2) client.closeSession(sessionHandle) @@ -891,9 +881,62 @@ class HiveThriftBinaryServerSuite extends HiveThriftJdbcTest { assert(rs.getString(1) === expected.toString) } } + + test("SPARK-26533: Support query auto timeout cancel on thriftserver - setQueryTimeout") { + withJdbcStatement() { statement => + statement.setQueryTimeout(1) + val e = intercept[SQLException] { + statement.execute("select java_method('java.lang.Thread', 'sleep', 10000L)") + }.getMessage + assert(e.contains("Query timed out after")) + + statement.setQueryTimeout(0) + val rs1 = statement.executeQuery( + "select 'test', java_method('java.lang.Thread', 'sleep', 3000L)") + rs1.next() + assert(rs1.getString(1) == "test") + + statement.setQueryTimeout(-1) + val rs2 = statement.executeQuery( + "select 'test', java_method('java.lang.Thread', 'sleep', 3000L)") + rs2.next() + assert(rs2.getString(1) == "test") + } + } + + test("SPARK-26533: Support query auto timeout cancel on thriftserver - SQLConf") { + withJdbcStatement() { statement => + statement.execute(s"SET ${SQLConf.THRIFTSERVER_QUERY_TIMEOUT.key}=1") + val e1 = intercept[SQLException] { + statement.execute("select java_method('java.lang.Thread', 'sleep', 10000L)") + }.getMessage + assert(e1.contains("Query timed out after")) + + statement.execute(s"SET ${SQLConf.THRIFTSERVER_QUERY_TIMEOUT.key}=0") + val rs = statement.executeQuery( + "select 'test', java_method('java.lang.Thread', 'sleep', 3000L)") + rs.next() + assert(rs.getString(1) == "test") + + // Uses a smaller timeout value of a config value and an a user-specified one + statement.execute(s"SET ${SQLConf.THRIFTSERVER_QUERY_TIMEOUT.key}=1") + statement.setQueryTimeout(30) + val e2 = intercept[SQLException] { + statement.execute("select java_method('java.lang.Thread', 'sleep', 10000L)") + }.getMessage + assert(e2.contains("Query timed out after")) + + statement.execute(s"SET ${SQLConf.THRIFTSERVER_QUERY_TIMEOUT.key}=30") + statement.setQueryTimeout(1) + val e3 = intercept[SQLException] { + statement.execute("select java_method('java.lang.Thread', 'sleep', 10000L)") + }.getMessage + assert(e3.contains("Query timed out after")) + } + } } -class SingleSessionSuite extends HiveThriftJdbcTest { +class SingleSessionSuite extends HiveThriftServer2TestBase { override def mode: ServerMode.Value = ServerMode.binary override protected def extraConf: Seq[String] = @@ -1004,7 +1047,7 @@ class SingleSessionSuite extends HiveThriftJdbcTest { } } -class HiveThriftCleanUpScratchDirSuite extends HiveThriftJdbcTest{ +class HiveThriftCleanUpScratchDirSuite extends HiveThriftServer2TestBase { var tempScratchDir: File = _ override protected def beforeAll(): Unit = { @@ -1037,7 +1080,7 @@ class HiveThriftCleanUpScratchDirSuite extends HiveThriftJdbcTest{ } } -class HiveThriftHttpServerSuite extends HiveThriftJdbcTest { +class HiveThriftHttpServerSuite extends HiveThriftServer2Test { override def mode: ServerMode.Value = ServerMode.http test("JDBC query execution") { @@ -1080,63 +1123,7 @@ object ServerMode extends Enumeration { val binary, http = Value } -abstract class HiveThriftJdbcTest extends HiveThriftServer2Test { - Utils.classForName(classOf[HiveDriver].getCanonicalName) - - private def jdbcUri = if (mode == ServerMode.http) { - s"""jdbc:hive2://localhost:$serverPort/ - |default? - |hive.server2.transport.mode=http; - |hive.server2.thrift.http.path=cliservice; - |${hiveConfList}#${hiveVarList} - """.stripMargin.split("\n").mkString.trim - } else { - s"jdbc:hive2://localhost:$serverPort/?${hiveConfList}#${hiveVarList}" - } - - def withMultipleConnectionJdbcStatement(tableNames: String*)(fs: (Statement => Unit)*): Unit = { - val user = System.getProperty("user.name") - val connections = fs.map { _ => DriverManager.getConnection(jdbcUri, user, "") } - val statements = connections.map(_.createStatement()) - - try { - statements.zip(fs).foreach { case (s, f) => f(s) } - } finally { - tableNames.foreach { name => - // TODO: Need a better way to drop the view. - if (name.toUpperCase(Locale.ROOT).startsWith("VIEW")) { - statements(0).execute(s"DROP VIEW IF EXISTS $name") - } else { - statements(0).execute(s"DROP TABLE IF EXISTS $name") - } - } - statements.foreach(_.close()) - connections.foreach(_.close()) - } - } - - def withDatabase(dbNames: String*)(fs: (Statement => Unit)*): Unit = { - val user = System.getProperty("user.name") - val connections = fs.map { _ => DriverManager.getConnection(jdbcUri, user, "") } - val statements = connections.map(_.createStatement()) - - try { - statements.zip(fs).foreach { case (s, f) => f(s) } - } finally { - dbNames.foreach { name => - statements(0).execute(s"DROP DATABASE IF EXISTS $name") - } - statements.foreach(_.close()) - connections.foreach(_.close()) - } - } - - def withJdbcStatement(tableNames: String*)(f: Statement => Unit): Unit = { - withMultipleConnectionJdbcStatement(tableNames: _*)(f) - } -} - -abstract class HiveThriftServer2Test extends SparkFunSuite with BeforeAndAfterAll with Logging { +abstract class HiveThriftServer2TestBase extends SparkFunSuite with BeforeAndAfterAll with Logging { def mode: ServerMode.Value private val CLASS_NAME = HiveThriftServer2.getClass.getCanonicalName.stripSuffix("$") @@ -1165,7 +1152,7 @@ abstract class HiveThriftServer2Test extends SparkFunSuite with BeforeAndAfterAl protected def extraConf: Seq[String] = Nil - protected def serverStartCommand(port: Int) = { + protected def serverStartCommand(): Seq[String] = { val portConf = if (mode == ServerMode.binary) { ConfVars.HIVE_SERVER2_THRIFT_PORT } else { @@ -1178,7 +1165,7 @@ abstract class HiveThriftServer2Test extends SparkFunSuite with BeforeAndAfterAl val tempLog4jConf = Utils.createTempDir().getCanonicalPath Files.write( - """log4j.rootCategory=DEBUG, console + """log4j.rootCategory=INFO, console |log4j.appender.console=org.apache.log4j.ConsoleAppender |log4j.appender.console.target=System.err |log4j.appender.console.layout=org.apache.log4j.PatternLayout @@ -1198,7 +1185,7 @@ abstract class HiveThriftServer2Test extends SparkFunSuite with BeforeAndAfterAl | --hiveconf ${ConfVars.HIVE_SERVER2_TRANSPORT_MODE}=$mode | --hiveconf ${ConfVars.HIVE_SERVER2_LOGGING_OPERATION_LOG_LOCATION}=$operationLogPath | --hiveconf ${ConfVars.LOCALSCRATCHDIR}=$lScratchDir - | --hiveconf $portConf=$port + | --hiveconf $portConf=0 | --driver-class-path $driverClassPath | --driver-java-options -Dlog4j.debug | --conf spark.ui.enabled=false @@ -1220,7 +1207,7 @@ abstract class HiveThriftServer2Test extends SparkFunSuite with BeforeAndAfterAl val SERVER_STARTUP_TIMEOUT = 3.minutes - private def startThriftServer(port: Int, attempt: Int) = { + private def startThriftServer(attempt: Int) = { warehousePath = Utils.createTempDir() warehousePath.delete() metastorePath = Utils.createTempDir() @@ -1232,18 +1219,16 @@ abstract class HiveThriftServer2Test extends SparkFunSuite with BeforeAndAfterAl logPath = null logTailingProcess = null - val command = serverStartCommand(port) + val command = serverStartCommand() diagnosisBuffer ++= s""" |### Attempt $attempt ### |HiveThriftServer2 command line: $command - |Listening port: $port + |Listening port: 0 |System user: $user """.stripMargin.split("\n") - logInfo(s"Trying to start HiveThriftServer2: port=$port, mode=$mode, attempt=$attempt") - logPath = { val lines = Utils.executeAndGetOutput( command = command, @@ -1270,7 +1255,11 @@ abstract class HiveThriftServer2Test extends SparkFunSuite with BeforeAndAfterAl // Ensures that the following "tail" command won't fail. logPath.createNewFile() - val successLines = Seq(THRIFT_BINARY_SERVICE_LIVE, THRIFT_HTTP_SERVICE_LIVE) + val successLine = if (mode == ServerMode.http) { + THRIFT_HTTP_SERVICE_LIVE + } else { + THRIFT_BINARY_SERVICE_LIVE + } logTailingProcess = { val command = s"/usr/bin/env tail -n +0 -f ${logPath.getCanonicalPath}".split(" ") @@ -1279,14 +1268,15 @@ abstract class HiveThriftServer2Test extends SparkFunSuite with BeforeAndAfterAl val captureOutput = (line: String) => diagnosisBuffer.synchronized { diagnosisBuffer += line - successLines.foreach { r => - if (line.contains(r)) { - serverStarted.trySuccess(()) - } + if (line.contains(successLine)) { + listeningPort = line.split(" on port ")(1).split(' ').head.toInt + logInfo(s"Started HiveThriftServer2: port=$listeningPort, mode=$mode, attempt=$attempt") + serverStarted.trySuccess(()) + () } } - val process = builder.start() + val process = builder.start() new ProcessOutputCapturer(process.getInputStream, captureOutput).start() new ProcessOutputCapturer(process.getErrorStream, captureOutput).start() @@ -1337,16 +1327,18 @@ abstract class HiveThriftServer2Test extends SparkFunSuite with BeforeAndAfterAl override protected def beforeAll(): Unit = { super.beforeAll() - // Chooses a random port between 10000 and 19999 - listeningPort = 10000 + Random.nextInt(10000) diagnosisBuffer.clear() // Retries up to 3 times with different port numbers if the server fails to start - (1 to 3).foldLeft(Try(startThriftServer(listeningPort, 0))) { case (started, attempt) => + (1 to 3).foldLeft(Try(startThriftServer(0))) { case (started, attempt) => started.orElse { - listeningPort += 1 stopThriftServer() - Try(startThriftServer(listeningPort, attempt)) + Try { + startThriftServer(attempt) + eventually(timeout(30.seconds), interval(1.seconds)) { + withJdbcStatement() { _.execute("SELECT 1") } + } + } } }.recover { case cause: Throwable => @@ -1365,4 +1357,91 @@ abstract class HiveThriftServer2Test extends SparkFunSuite with BeforeAndAfterAl super.afterAll() } } + + Utils.classForName(classOf[HiveDriver].getCanonicalName) + + protected def jdbcUri(database: String = "default"): String = if (mode == ServerMode.http) { + s"""jdbc:hive2://localhost:$serverPort/ + |$database? + |hive.server2.transport.mode=http; + |hive.server2.thrift.http.path=cliservice; + |${hiveConfList}#${hiveVarList} + """.stripMargin.split("\n").mkString.trim + } else { + s"jdbc:hive2://localhost:$serverPort/$database?${hiveConfList}#${hiveVarList}" + } + + private def tryCaptureSysLog(f: => Unit): Unit = { + try f catch { + case e: Exception => + // Dump the HiveThriftServer2 log if error occurs, e.g. getConnection failure. + dumpLogs() + throw e + } + } + + def withMultipleConnectionJdbcStatement( + tableNames: String*)(fs: (Statement => Unit)*): Unit = tryCaptureSysLog { + val user = System.getProperty("user.name") + val connections = fs.map { _ => DriverManager.getConnection(jdbcUri(), user, "") } + val statements = connections.map(_.createStatement()) + + try { + statements.zip(fs).foreach { case (s, f) => f(s) } + } finally { + tableNames.foreach { name => + // TODO: Need a better way to drop the view. + if (name.toUpperCase(Locale.ROOT).startsWith("VIEW")) { + statements(0).execute(s"DROP VIEW IF EXISTS $name") + } else { + statements(0).execute(s"DROP TABLE IF EXISTS $name") + } + } + statements.foreach(_.close()) + connections.foreach(_.close()) + } + } + + def withDatabase(dbNames: String*)(fs: (Statement => Unit)*): Unit = tryCaptureSysLog { + val user = System.getProperty("user.name") + val connections = fs.map { _ => DriverManager.getConnection(jdbcUri(), user, "") } + val statements = connections.map(_.createStatement()) + + try { + statements.zip(fs).foreach { case (s, f) => f(s) } + } finally { + dbNames.foreach { name => + statements(0).execute(s"DROP DATABASE IF EXISTS $name") + } + statements.foreach(_.close()) + connections.foreach(_.close()) + } + } + + def withJdbcStatement(tableNames: String*)(f: Statement => Unit): Unit = { + withMultipleConnectionJdbcStatement(tableNames: _*)(f) + } +} + +/** + * Common tests for both binary and http mode thrift server + * TODO: SPARK-31914: Move common tests from subclasses to this trait + */ +abstract class HiveThriftServer2Test extends HiveThriftServer2TestBase { + test("SPARK-17819: Support default database in connection URIs") { + withDatabase("spark17819") { statement => + statement.execute(s"CREATE DATABASE IF NOT EXISTS spark17819") + val jdbcStr = jdbcUri("spark17819") + val connection = DriverManager.getConnection(jdbcStr, user, "") + val statementN = connection.createStatement() + try { + val resultSet = statementN.executeQuery("select current_database()") + resultSet.next() + assert(resultSet.getString(1) === "spark17819") + } finally { + statementN.close() + connection.close() + } + } + } } diff --git a/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/JdbcConnectionUriSuite.scala b/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/JdbcConnectionUriSuite.scala deleted file mode 100644 index fb8a7e273ae44..0000000000000 --- a/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/JdbcConnectionUriSuite.scala +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.spark.sql.hive.thriftserver - -import java.sql.DriverManager - -import org.apache.hive.jdbc.HiveDriver - -import org.apache.spark.util.Utils - -class JdbcConnectionUriSuite extends HiveThriftServer2Test { - Utils.classForName(classOf[HiveDriver].getCanonicalName) - - override def mode: ServerMode.Value = ServerMode.binary - - val JDBC_TEST_DATABASE = "jdbc_test_database" - val USER = System.getProperty("user.name") - val PASSWORD = "" - - override protected def beforeAll(): Unit = { - super.beforeAll() - - val jdbcUri = s"jdbc:hive2://localhost:$serverPort/" - val connection = DriverManager.getConnection(jdbcUri, USER, PASSWORD) - val statement = connection.createStatement() - statement.execute(s"CREATE DATABASE $JDBC_TEST_DATABASE") - connection.close() - } - - override protected def afterAll(): Unit = { - try { - val jdbcUri = s"jdbc:hive2://localhost:$serverPort/" - val connection = DriverManager.getConnection(jdbcUri, USER, PASSWORD) - val statement = connection.createStatement() - statement.execute(s"DROP DATABASE $JDBC_TEST_DATABASE") - connection.close() - } finally { - super.afterAll() - } - } - - test("SPARK-17819 Support default database in connection URIs") { - val jdbcUri = s"jdbc:hive2://localhost:$serverPort/$JDBC_TEST_DATABASE" - val connection = DriverManager.getConnection(jdbcUri, USER, PASSWORD) - val statement = connection.createStatement() - try { - val resultSet = statement.executeQuery("select current_database()") - resultSet.next() - assert(resultSet.getString(1) === JDBC_TEST_DATABASE) - } finally { - statement.close() - connection.close() - } - } -} diff --git a/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/SharedThriftServer.scala b/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/SharedThriftServer.scala index 5f17607585521..8f61268c838fe 100644 --- a/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/SharedThriftServer.scala +++ b/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/SharedThriftServer.scala @@ -31,6 +31,7 @@ import org.apache.hadoop.hive.ql.session.SessionState import org.apache.hive.jdbc.HttpBasicAuthInterceptor import org.apache.hive.service.auth.PlainSaslHelper import org.apache.hive.service.cli.thrift.{ThriftCLIService, ThriftCLIServiceClient} +import org.apache.hive.service.rpc.thrift.TCLIService.Client import org.apache.http.impl.client.HttpClientBuilder import org.apache.thrift.protocol.TBinaryProtocol import org.apache.thrift.transport.{THttpClient, TSocket} @@ -115,7 +116,7 @@ trait SharedThriftServer extends SharedSparkSession { } val protocol = new TBinaryProtocol(transport) - val client = new ThriftCLIServiceClient(new ThriftserverShimUtils.Client(protocol)) + val client = new ThriftCLIServiceClient(new Client(protocol)) transport.open() try f(client) finally transport.close() diff --git a/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/SparkExecuteStatementOperationSuite.scala b/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/SparkExecuteStatementOperationSuite.scala index 4c2f29e0bf394..c8bb6d9ee0821 100644 --- a/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/SparkExecuteStatementOperationSuite.scala +++ b/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/SparkExecuteStatementOperationSuite.scala @@ -25,6 +25,7 @@ import scala.concurrent.duration._ import org.apache.hadoop.hive.conf.HiveConf import org.apache.hive.service.cli.OperationState import org.apache.hive.service.cli.session.{HiveSession, HiveSessionImpl} +import org.apache.hive.service.rpc.thrift.TProtocolVersion import org.mockito.Mockito.{doReturn, mock, spy, when, RETURNS_DEEP_STUBS} import org.mockito.invocation.InvocationOnMock @@ -60,11 +61,12 @@ class SparkExecuteStatementOperationSuite extends SparkFunSuite with SharedSpark Seq( (OperationState.CANCELED, (_: SparkExecuteStatementOperation).cancel()), + (OperationState.TIMEDOUT, (_: SparkExecuteStatementOperation).timeoutCancel()), (OperationState.CLOSED, (_: SparkExecuteStatementOperation).close()) ).foreach { case (finalState, transition) => test("SPARK-32057 SparkExecuteStatementOperation should not transiently become ERROR " + s"before being set to $finalState") { - val hiveSession = new HiveSessionImpl(ThriftserverShimUtils.testedProtocolVersions.head, + val hiveSession = new HiveSessionImpl(TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V1, "username", "password", new HiveConf, "ip address") hiveSession.open(new util.HashMap) @@ -108,7 +110,7 @@ class SparkExecuteStatementOperationSuite extends SparkFunSuite with SharedSpark signal: Semaphore, finalState: OperationState) extends SparkExecuteStatementOperation(sqlContext, hiveSession, statement, - new util.HashMap, false) { + new util.HashMap, false, 0) { override def cleanup(): Unit = { super.cleanup() diff --git a/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/SparkMetadataOperationSuite.scala b/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/SparkMetadataOperationSuite.scala index 7369dbfcf7a51..897ea00975a05 100644 --- a/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/SparkMetadataOperationSuite.scala +++ b/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/SparkMetadataOperationSuite.scala @@ -17,34 +17,56 @@ package org.apache.spark.sql.hive.thriftserver -import java.sql.{DatabaseMetaData, ResultSet} +import java.sql.{DatabaseMetaData, ResultSet, SQLFeatureNotSupportedException} +import org.apache.hive.common.util.HiveVersionInfo +import org.apache.hive.service.cli.HiveSQLException + +import org.apache.spark.SPARK_VERSION import org.apache.spark.sql.catalyst.analysis.FunctionRegistry import org.apache.spark.sql.types._ +import org.apache.spark.util.VersionUtils -class SparkMetadataOperationSuite extends HiveThriftJdbcTest { +class SparkMetadataOperationSuite extends HiveThriftServer2TestBase { override def mode: ServerMode.Value = ServerMode.binary test("Spark's own GetSchemasOperation(SparkGetSchemasOperation)") { def checkResult(rs: ResultSet, dbNames: Seq[String]): Unit = { - for (i <- dbNames.indices) { - assert(rs.next()) - assert(rs.getString("TABLE_SCHEM") === dbNames(i)) + val expected = dbNames.iterator + while(rs.next() || expected.hasNext) { + assert(rs.getString("TABLE_SCHEM") === expected.next) + assert(rs.getString("TABLE_CATALOG").isEmpty) } // Make sure there are no more elements assert(!rs.next()) + assert(!expected.hasNext, "All expected schemas should be visited") } - withDatabase("db1", "db2") { statement => - Seq("CREATE DATABASE db1", "CREATE DATABASE db2").foreach(statement.execute) - + val dbs = Seq("db1", "db2", "db33", "db44") + val dbDflts = Seq("default", "global_temp") + withDatabase(dbs: _*) { statement => + dbs.foreach( db => statement.execute(s"CREATE DATABASE IF NOT EXISTS $db")) val metaData = statement.getConnection.getMetaData - checkResult(metaData.getSchemas(null, "%"), Seq("db1", "db2", "default", "global_temp")) + Seq("", "%", null, ".*", "_*", "_%", ".%") foreach { pattern => + checkResult(metaData.getSchemas(null, pattern), dbs ++ dbDflts) + } + + Seq("db%", "db*") foreach { pattern => + checkResult(metaData.getSchemas(null, pattern), dbs) + } + + Seq("db_", "db.") foreach { pattern => + checkResult(metaData.getSchemas(null, pattern), dbs.take(2)) + } + checkResult(metaData.getSchemas(null, "db1"), Seq("db1")) checkResult(metaData.getSchemas(null, "db_not_exist"), Seq.empty) - checkResult(metaData.getSchemas(null, "db*"), Seq("db1", "db2")) + + val e = intercept[HiveSQLException](metaData.getSchemas(null, "*")) + assert(e.getCause.getMessage === + "Error operating GET_SCHEMAS Dangling meta character '*' near index 0\n*\n^") } } @@ -236,7 +258,7 @@ class SparkMetadataOperationSuite extends HiveThriftJdbcTest { withJdbcStatement() { statement => val metaData = statement.getConnection.getMetaData - checkResult(metaData.getTypeInfo, ThriftserverShimUtils.supportedType().map(_.getName)) + checkResult(metaData.getTypeInfo, SparkGetTypeInfoUtil.supportedType.map(_.getName)) } } @@ -261,6 +283,8 @@ class SparkMetadataOperationSuite extends HiveThriftJdbcTest { .add("c14", "timestamp", nullable = false, "14") .add("c15", "struct", nullable = true, "15") .add("c16", "binary", nullable = false, "16") + .add("c17", "char(255)", nullable = true, "17") + .add("c18", "varchar(1024)", nullable = false, "18") val ddl = s""" @@ -277,7 +301,8 @@ class SparkMetadataOperationSuite extends HiveThriftJdbcTest { import java.sql.Types._ val expectedJavaTypes = Seq(BOOLEAN, TINYINT, SMALLINT, INTEGER, BIGINT, FLOAT, DOUBLE, - DECIMAL, DECIMAL, VARCHAR, ARRAY, ARRAY, JAVA_OBJECT, DATE, TIMESTAMP, STRUCT, BINARY) + DECIMAL, DECIMAL, VARCHAR, ARRAY, ARRAY, JAVA_OBJECT, DATE, TIMESTAMP, STRUCT, BINARY, + CHAR, VARCHAR) var pos = 0 @@ -291,7 +316,8 @@ class SparkMetadataOperationSuite extends HiveThriftJdbcTest { val colSize = rowSet.getInt("COLUMN_SIZE") schema(pos).dataType match { - case StringType | BinaryType | _: ArrayType | _: MapType => assert(colSize === 0) + case StringType | BinaryType | _: ArrayType | _: MapType | _: VarcharType => + assert(colSize === 0) case o => assert(colSize === o.defaultSize) } @@ -320,7 +346,7 @@ class SparkMetadataOperationSuite extends HiveThriftJdbcTest { pos += 1 } - assert(pos === 17, "all columns should have been verified") + assert(pos === 19, "all columns should have been verified") } } @@ -377,4 +403,205 @@ class SparkMetadataOperationSuite extends HiveThriftJdbcTest { } } } + + test("Hive ThriftServer JDBC Database MetaData API Auditing - Method not supported") { + // These APIs belong to the upstream Apache Hive's hive-jdbc artifact where defines the hive + // behavior. Users can also use it to interact with Spark ThriftServer directly. Some behaviors + // are not fully consistent with Spark e.g. we support correlated subqueries but the hive-jdbc + // now fail directly at client side. There is nothing we can do but accept the current + // condition and highlight the difference and make it perspective in future changes both from + // upstream and inside Spark. + withJdbcStatement() { statement => + val metaData = statement.getConnection.getMetaData + Seq( + () => metaData.allProceduresAreCallable, + () => metaData.getURL, + () => metaData.getUserName, + () => metaData.isReadOnly, + () => metaData.nullsAreSortedHigh, + () => metaData.nullsAreSortedLow, + () => metaData.nullsAreSortedAtStart, + () => metaData.nullsAreSortedAtEnd, + () => metaData.usesLocalFiles, + () => metaData.usesLocalFilePerTable, + () => metaData.supportsMixedCaseIdentifiers, + () => metaData.supportsMixedCaseQuotedIdentifiers, + () => metaData.storesUpperCaseIdentifiers, + () => metaData.storesUpperCaseQuotedIdentifiers, + () => metaData.storesLowerCaseIdentifiers, + () => metaData.storesLowerCaseQuotedIdentifiers, + () => metaData.storesMixedCaseIdentifiers, + () => metaData.storesMixedCaseQuotedIdentifiers, + () => metaData.getSQLKeywords, + () => metaData.nullPlusNonNullIsNull, + () => metaData.supportsConvert, + () => metaData.supportsTableCorrelationNames, + () => metaData.supportsDifferentTableCorrelationNames, + () => metaData.supportsExpressionsInOrderBy, + () => metaData.supportsOrderByUnrelated, + () => metaData.supportsGroupByUnrelated, + () => metaData.supportsGroupByBeyondSelect, + () => metaData.supportsLikeEscapeClause, + () => metaData.supportsMultipleTransactions, + () => metaData.supportsMinimumSQLGrammar, + () => metaData.supportsCoreSQLGrammar, + () => metaData.supportsExtendedSQLGrammar, + () => metaData.supportsANSI92EntryLevelSQL, + () => metaData.supportsANSI92IntermediateSQL, + () => metaData.supportsANSI92FullSQL, + () => metaData.supportsIntegrityEnhancementFacility, + () => metaData.isCatalogAtStart, + () => metaData.supportsSubqueriesInComparisons, + () => metaData.supportsSubqueriesInExists, + () => metaData.supportsSubqueriesInIns, + () => metaData.supportsSubqueriesInQuantifieds, + // Spark support this, see https://issues.apache.org/jira/browse/SPARK-18455 + () => metaData.supportsCorrelatedSubqueries, + () => metaData.supportsOpenCursorsAcrossCommit, + () => metaData.supportsOpenCursorsAcrossRollback, + () => metaData.supportsOpenStatementsAcrossCommit, + () => metaData.supportsOpenStatementsAcrossRollback, + () => metaData.getMaxBinaryLiteralLength, + () => metaData.getMaxCharLiteralLength, + () => metaData.getMaxColumnsInGroupBy, + () => metaData.getMaxColumnsInIndex, + () => metaData.getMaxColumnsInOrderBy, + () => metaData.getMaxColumnsInSelect, + () => metaData.getMaxColumnsInTable, + () => metaData.getMaxConnections, + () => metaData.getMaxCursorNameLength, + () => metaData.getMaxIndexLength, + () => metaData.getMaxSchemaNameLength, + () => metaData.getMaxProcedureNameLength, + () => metaData.getMaxCatalogNameLength, + () => metaData.getMaxRowSize, + () => metaData.doesMaxRowSizeIncludeBlobs, + () => metaData.getMaxStatementLength, + () => metaData.getMaxStatements, + () => metaData.getMaxTableNameLength, + () => metaData.getMaxTablesInSelect, + () => metaData.getMaxUserNameLength, + () => metaData.supportsTransactionIsolationLevel(1), + () => metaData.supportsDataDefinitionAndDataManipulationTransactions, + () => metaData.supportsDataManipulationTransactionsOnly, + () => metaData.dataDefinitionCausesTransactionCommit, + () => metaData.dataDefinitionIgnoredInTransactions, + () => metaData.getColumnPrivileges("", "%", "%", "%"), + () => metaData.getTablePrivileges("", "%", "%"), + () => metaData.getBestRowIdentifier("", "%", "%", 0, true), + () => metaData.getVersionColumns("", "%", "%"), + () => metaData.getExportedKeys("", "default", ""), + () => metaData.supportsResultSetConcurrency(ResultSet.TYPE_FORWARD_ONLY, 2), + () => metaData.ownUpdatesAreVisible(ResultSet.TYPE_FORWARD_ONLY), + () => metaData.ownDeletesAreVisible(ResultSet.TYPE_FORWARD_ONLY), + () => metaData.ownInsertsAreVisible(ResultSet.TYPE_FORWARD_ONLY), + () => metaData.othersUpdatesAreVisible(ResultSet.TYPE_FORWARD_ONLY), + () => metaData.othersDeletesAreVisible(ResultSet.TYPE_FORWARD_ONLY), + () => metaData.othersInsertsAreVisible(ResultSet.TYPE_FORWARD_ONLY), + () => metaData.updatesAreDetected(ResultSet.TYPE_FORWARD_ONLY), + () => metaData.deletesAreDetected(ResultSet.TYPE_FORWARD_ONLY), + () => metaData.insertsAreDetected(ResultSet.TYPE_FORWARD_ONLY), + () => metaData.supportsNamedParameters, + () => metaData.supportsMultipleOpenResults, + () => metaData.supportsGetGeneratedKeys, + () => metaData.getSuperTypes("", "%", "%"), + () => metaData.getSuperTables("", "%", "%"), + () => metaData.getAttributes("", "%", "%", "%"), + () => metaData.getResultSetHoldability, + () => metaData.locatorsUpdateCopy, + () => metaData.supportsStatementPooling, + () => metaData.getRowIdLifetime, + () => metaData.supportsStoredFunctionsUsingCallSyntax, + () => metaData.autoCommitFailureClosesAllResultSets, + () => metaData.getClientInfoProperties, + () => metaData.getFunctionColumns("", "%", "%", "%"), + () => metaData.getPseudoColumns("", "%", "%", "%"), + () => metaData.generatedKeyAlwaysReturned).foreach { func => + val e = intercept[SQLFeatureNotSupportedException](func()) + assert(e.getMessage === "Method not supported") + } + } + } + + test("Hive ThriftServer JDBC Database MetaData API Auditing - Method supported") { + // These APIs belong to the upstream Apache Hive's hive-jdbc artifact where defines the hive + // behavior. Users can also use it to interact with Spark ThriftServer directly. Some behaviors + // are not fully consistent with Spark e.g. we can work with multiple catalogs. + // There is nothing we can do but accept the current condition and highlight the difference + // and make it perspective in future changes both from upstream and inside Spark. + withJdbcStatement() { statement => + val metaData = statement.getConnection.getMetaData + assert(metaData.allTablesAreSelectable) + assert(metaData.getDatabaseProductName === "Spark SQL") + assert(metaData.getDatabaseProductVersion === SPARK_VERSION) + assert(metaData.getDriverName === "Hive JDBC") + assert(metaData.getDriverVersion === HiveVersionInfo.getVersion) + assert(metaData.getDatabaseMajorVersion === VersionUtils.majorVersion(SPARK_VERSION)) + assert(metaData.getDatabaseMinorVersion === VersionUtils.minorVersion(SPARK_VERSION)) + assert(metaData.getIdentifierQuoteString === " ", + "This method returns a space \" \" if identifier quoting is not supported") + assert(metaData.getNumericFunctions === "") + assert(metaData.getStringFunctions === "") + assert(metaData.getSystemFunctions === "") + assert(metaData.getTimeDateFunctions === "") + assert(metaData.getSearchStringEscape === "\\") + assert(metaData.getExtraNameCharacters === "") + assert(metaData.supportsAlterTableWithAddColumn()) + assert(!metaData.supportsAlterTableWithDropColumn()) + assert(metaData.supportsColumnAliasing()) + assert(metaData.supportsGroupBy) + assert(!metaData.supportsMultipleResultSets) + assert(!metaData.supportsNonNullableColumns) + assert(metaData.supportsOuterJoins) + assert(metaData.supportsFullOuterJoins) + assert(metaData.supportsLimitedOuterJoins) + assert(metaData.getSchemaTerm === "database") + assert(metaData.getProcedureTerm === "UDF") + assert(metaData.getCatalogTerm === "instance") + assert(metaData.getCatalogSeparator === ".") + assert(metaData.supportsSchemasInDataManipulation) + assert(!metaData.supportsSchemasInProcedureCalls) + assert(metaData.supportsSchemasInTableDefinitions) + assert(!metaData.supportsSchemasInIndexDefinitions) + assert(!metaData.supportsSchemasInPrivilegeDefinitions) + // This is actually supported, but hive jdbc package return false + assert(!metaData.supportsCatalogsInDataManipulation) + assert(!metaData.supportsCatalogsInProcedureCalls) + // This is actually supported, but hive jdbc package return false + assert(!metaData.supportsCatalogsInTableDefinitions) + assert(!metaData.supportsCatalogsInIndexDefinitions) + assert(!metaData.supportsCatalogsInPrivilegeDefinitions) + assert(!metaData.supportsPositionedDelete) + assert(!metaData.supportsPositionedUpdate) + assert(!metaData.supportsSelectForUpdate) + assert(!metaData.supportsStoredProcedures) + // This is actually supported, but hive jdbc package return false + assert(!metaData.supportsUnion) + assert(metaData.supportsUnionAll) + assert(metaData.getMaxColumnNameLength === 128) + assert(metaData.getDefaultTransactionIsolation === java.sql.Connection.TRANSACTION_NONE) + assert(!metaData.supportsTransactions) + assert(!metaData.getProcedureColumns("", "%", "%", "%").next()) + assert(!metaData.getImportedKeys("", "default", "").next()) + + // TODO: SPARK-33219 Disable GetPrimaryKeys and GetCrossReference APIs explicitly + // for Spark ThriftServer + assert(!metaData.getPrimaryKeys("", "default", "").next()) + assert(!metaData.getCrossReference("", "default", "src", "", "default", "src2").next()) + + assert(!metaData.getIndexInfo("", "default", "src", true, true).next()) + assert(metaData.supportsResultSetType(ResultSet.TYPE_FORWARD_ONLY)) + assert(metaData.supportsResultSetType(ResultSet.TYPE_SCROLL_INSENSITIVE)) + assert(metaData.supportsResultSetType(ResultSet.TYPE_SCROLL_SENSITIVE)) + assert(!metaData.supportsBatchUpdates) + assert(!metaData.getUDTs(",", "%", "%", null).next()) + assert(!metaData.supportsSavepoints) + assert(!metaData.supportsResultSetHoldability(ResultSet.HOLD_CURSORS_OVER_COMMIT)) + assert(metaData.getJDBCMajorVersion === 3) + assert(metaData.getJDBCMinorVersion === 0) + assert(metaData.getSQLStateType === DatabaseMetaData.sqlStateSQL) + assert(metaData.getMaxLogicalLobSize === 0) + assert(!metaData.supportsRefCursors) + } + } } diff --git a/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLEnvSuite.scala b/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLEnvSuite.scala index f28faea2be868..f2bb337e4a826 100644 --- a/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLEnvSuite.scala +++ b/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLEnvSuite.scala @@ -42,7 +42,7 @@ class SparkSQLEnvSuite extends SparkFunSuite { QUERY_EXECUTION_LISTENERS.key -> classOf[DummyQueryExecutionListener].getCanonicalName, STREAMING_QUERY_LISTENERS.key -> classOf[DummyStreamingQueryListener].getCanonicalName, WAREHOUSE_PATH.key -> TestHiveContext.makeWarehouseDir().toURI.getPath, - // The issue occured from "maven" and list of custom jars, but providing list of custom + // The issue occurred from "maven" and list of custom jars, but providing list of custom // jars to initialize HiveClient isn't trivial, so just use "maven". HIVE_METASTORE_JARS.key -> "maven", HIVE_METASTORE_VERSION.key -> null, diff --git a/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/SparkThriftServerProtocolVersionsSuite.scala b/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/SparkThriftServerProtocolVersionsSuite.scala index fd45e7a48c0eb..fd4d7231e8989 100644 --- a/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/SparkThriftServerProtocolVersionsSuite.scala +++ b/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/SparkThriftServerProtocolVersionsSuite.scala @@ -23,31 +23,33 @@ import java.util.{List => JList, Properties} import org.apache.hive.jdbc.{HiveConnection, HiveQueryResultSet} import org.apache.hive.service.auth.PlainSaslHelper import org.apache.hive.service.cli.GetInfoType +import org.apache.hive.service.rpc.thrift.{TExecuteStatementReq, TGetInfoReq, TGetTablesReq, TOpenSessionReq, TProtocolVersion} +import org.apache.hive.service.rpc.thrift.TCLIService.Client import org.apache.thrift.protocol.TBinaryProtocol import org.apache.thrift.transport.TSocket import org.apache.spark.sql.catalyst.util.NumberConverter import org.apache.spark.unsafe.types.UTF8String -class SparkThriftServerProtocolVersionsSuite extends HiveThriftJdbcTest { +class SparkThriftServerProtocolVersionsSuite extends HiveThriftServer2TestBase { override def mode: ServerMode.Value = ServerMode.binary def testExecuteStatementWithProtocolVersion( - version: ThriftserverShimUtils.TProtocolVersion, + version: TProtocolVersion, sql: String)(f: HiveQueryResultSet => Unit): Unit = { val rawTransport = new TSocket("localhost", serverPort) val connection = new HiveConnection(s"jdbc:hive2://localhost:$serverPort", new Properties) val user = System.getProperty("user.name") val transport = PlainSaslHelper.getPlainTransport(user, "anonymous", rawTransport) - val client = new ThriftserverShimUtils.Client(new TBinaryProtocol(transport)) + val client = new Client(new TBinaryProtocol(transport)) transport.open() var rs: HiveQueryResultSet = null try { - val clientProtocol = new ThriftserverShimUtils.TOpenSessionReq(version) + val clientProtocol = new TOpenSessionReq(version) val openResp = client.OpenSession(clientProtocol) val sessHandle = openResp.getSessionHandle - val execReq = new ThriftserverShimUtils.TExecuteStatementReq(sessHandle, sql) + val execReq = new TExecuteStatementReq(sessHandle, sql) val execResp = client.ExecuteStatement(execReq) val stmtHandle = execResp.getOperationHandle @@ -72,23 +74,21 @@ class SparkThriftServerProtocolVersionsSuite extends HiveThriftJdbcTest { } } - def testGetInfoWithProtocolVersion(version: ThriftserverShimUtils.TProtocolVersion): Unit = { + def testGetInfoWithProtocolVersion(version: TProtocolVersion): Unit = { val rawTransport = new TSocket("localhost", serverPort) val connection = new HiveConnection(s"jdbc:hive2://localhost:$serverPort", new Properties) val transport = PlainSaslHelper.getPlainTransport(user, "anonymous", rawTransport) - val client = new ThriftserverShimUtils.Client(new TBinaryProtocol(transport)) + val client = new Client(new TBinaryProtocol(transport)) transport.open() try { - val clientProtocol = new ThriftserverShimUtils.TOpenSessionReq(version) + val clientProtocol = new TOpenSessionReq(version) val openResp = client.OpenSession(clientProtocol) val sessHandle = openResp.getSessionHandle - val dbVersionReq = - new ThriftserverShimUtils.TGetInfoReq(sessHandle, GetInfoType.CLI_DBMS_VER.toTGetInfoType) + val dbVersionReq = new TGetInfoReq(sessHandle, GetInfoType.CLI_DBMS_VER.toTGetInfoType) val dbVersion = client.GetInfo(dbVersionReq).getInfoValue.getStringValue - val dbNameReq = - new ThriftserverShimUtils.TGetInfoReq(sessHandle, GetInfoType.CLI_DBMS_NAME.toTGetInfoType) + val dbNameReq = new TGetInfoReq(sessHandle, GetInfoType.CLI_DBMS_NAME.toTGetInfoType) val dbName = client.GetInfo(dbNameReq).getInfoValue.getStringValue assert(dbVersion === org.apache.spark.SPARK_VERSION) @@ -101,21 +101,21 @@ class SparkThriftServerProtocolVersionsSuite extends HiveThriftJdbcTest { } def testGetTablesWithProtocolVersion( - version: ThriftserverShimUtils.TProtocolVersion, + version: TProtocolVersion, schema: String, tableNamePattern: String, tableTypes: JList[String])(f: HiveQueryResultSet => Unit): Unit = { val rawTransport = new TSocket("localhost", serverPort) val connection = new HiveConnection(s"jdbc:hive2://localhost:$serverPort", new Properties) val transport = PlainSaslHelper.getPlainTransport(user, "anonymous", rawTransport) - val client = new ThriftserverShimUtils.Client(new TBinaryProtocol(transport)) + val client = new Client(new TBinaryProtocol(transport)) transport.open() var rs: HiveQueryResultSet = null try { - val clientProtocol = new ThriftserverShimUtils.TOpenSessionReq(version) + val clientProtocol = new TOpenSessionReq(version) val openResp = client.OpenSession(clientProtocol) val sessHandle = openResp.getSessionHandle - val getTableReq = new ThriftserverShimUtils.TGetTablesReq(sessHandle) + val getTableReq = new TGetTablesReq(sessHandle) getTableReq.setSchemaName(schema) getTableReq.setTableName(tableNamePattern) getTableReq.setTableTypes(tableTypes) @@ -143,11 +143,17 @@ class SparkThriftServerProtocolVersionsSuite extends HiveThriftJdbcTest { } } - ThriftserverShimUtils.testedProtocolVersions.foreach { version => + TProtocolVersion.values().foreach { version => test(s"$version get byte type") { testExecuteStatementWithProtocolVersion(version, "SELECT cast(1 as byte)") { rs => assert(rs.next()) assert(rs.getByte(1) === 1.toByte) + val metaData = rs.getMetaData + assert(metaData.getColumnName(1) === "CAST(1 AS TINYINT)") + assert(metaData.getColumnTypeName(1) === "tinyint") + assert(metaData.getColumnType(1) === java.sql.Types.TINYINT) + assert(metaData.getPrecision(1) === 3) + assert(metaData.getScale(1) === 0) } } @@ -155,6 +161,12 @@ class SparkThriftServerProtocolVersionsSuite extends HiveThriftJdbcTest { testExecuteStatementWithProtocolVersion(version, "SELECT cast(1 as short)") { rs => assert(rs.next()) assert(rs.getShort(1) === 1.toShort) + val metaData = rs.getMetaData + assert(metaData.getColumnName(1) === "CAST(1 AS SMALLINT)") + assert(metaData.getColumnTypeName(1) === "smallint") + assert(metaData.getColumnType(1) === java.sql.Types.SMALLINT) + assert(metaData.getPrecision(1) === 5) + assert(metaData.getScale(1) === 0) } } @@ -162,6 +174,12 @@ class SparkThriftServerProtocolVersionsSuite extends HiveThriftJdbcTest { testExecuteStatementWithProtocolVersion(version, "SELECT 1") { rs => assert(rs.next()) assert(rs.getInt(1) === 1) + val metaData = rs.getMetaData + assert(metaData.getColumnName(1) === "1") + assert(metaData.getColumnTypeName(1) === "int") + assert(metaData.getColumnType(1) === java.sql.Types.INTEGER) + assert(metaData.getPrecision(1) === 10) + assert(metaData.getScale(1) === 0) } } @@ -169,6 +187,12 @@ class SparkThriftServerProtocolVersionsSuite extends HiveThriftJdbcTest { testExecuteStatementWithProtocolVersion(version, "SELECT cast(1 as bigint)") { rs => assert(rs.next()) assert(rs.getLong(1) === 1L) + val metaData = rs.getMetaData + assert(metaData.getColumnName(1) === "CAST(1 AS BIGINT)") + assert(metaData.getColumnTypeName(1) === "bigint") + assert(metaData.getColumnType(1) === java.sql.Types.BIGINT) + assert(metaData.getPrecision(1) === 19) + assert(metaData.getScale(1) === 0) } } @@ -176,6 +200,12 @@ class SparkThriftServerProtocolVersionsSuite extends HiveThriftJdbcTest { testExecuteStatementWithProtocolVersion(version, "SELECT cast(1.2 as float)") { rs => assert(rs.next()) assert(rs.getFloat(1) === 1.2F) + val metaData = rs.getMetaData + assert(metaData.getColumnName(1) === "CAST(1.2 AS FLOAT)") + assert(metaData.getColumnTypeName(1) === "float") + assert(metaData.getColumnType(1) === java.sql.Types.FLOAT) + assert(metaData.getPrecision(1) === 7) + assert(metaData.getScale(1) === 7) } } @@ -183,14 +213,30 @@ class SparkThriftServerProtocolVersionsSuite extends HiveThriftJdbcTest { testExecuteStatementWithProtocolVersion(version, "SELECT cast(1.2 as double)") { rs => assert(rs.next()) assert(rs.getDouble(1) === 1.2D) + val metaData = rs.getMetaData + assert(metaData.getColumnName(1) === "CAST(1.2 AS DOUBLE)") + assert(metaData.getColumnTypeName(1) === "double") + assert(metaData.getColumnType(1) === java.sql.Types.DOUBLE) + assert(metaData.getPrecision(1) === 15) + assert(metaData.getScale(1) === 15) } } test(s"$version get decimal type") { testExecuteStatementWithProtocolVersion(version, - "SELECT cast(1 as decimal(18, 2)) as c") { rs => + "SELECT cast(1 as decimal(9, 1)) as col0, 1234.56BD as col1, 0.123 as col2") { rs => assert(rs.next()) - assert(rs.getBigDecimal(1) === new java.math.BigDecimal("1.00")) + assert(rs.getBigDecimal(1) === new java.math.BigDecimal("1.0")) + assert(rs.getBigDecimal("col1") === new java.math.BigDecimal("1234.56")) + assert(rs.getBigDecimal("col2") === new java.math.BigDecimal("0.123")) + val metaData = rs.getMetaData + (1 to 3) foreach { i => + assert(metaData.getColumnName(i) === s"col${i - 1}") + assert(metaData.getColumnTypeName(i) === "decimal") + assert(metaData.getColumnType(i) === java.sql.Types.DECIMAL) + assert(metaData.getPrecision(i) == 12 - i * 3) + assert(metaData.getScale(i) == i) + } } testExecuteStatementWithProtocolVersion(version, "SELECT cast(null as decimal) ") { rs => @@ -203,6 +249,12 @@ class SparkThriftServerProtocolVersionsSuite extends HiveThriftJdbcTest { testExecuteStatementWithProtocolVersion(version, "SELECT 'str'") { rs => assert(rs.next()) assert(rs.getString(1) === "str") + val metaData = rs.getMetaData + assert(metaData.getColumnName(1) ==="str") + assert(metaData.getColumnTypeName(1) === "string") + assert(metaData.getColumnType(1) === java.sql.Types.VARCHAR) + assert(metaData.getPrecision(1) === Int.MaxValue) + assert(metaData.getScale(1) === 0) } } @@ -211,6 +263,12 @@ class SparkThriftServerProtocolVersionsSuite extends HiveThriftJdbcTest { "SELECT cast('char-str' as char(10))") { rs => assert(rs.next()) assert(rs.getString(1) === "char-str") + val metaData = rs.getMetaData + assert(metaData.getColumnName(1) ==="CAST(char-str AS STRING)") + assert(metaData.getColumnTypeName(1) === "string") + assert(metaData.getColumnType(1) === java.sql.Types.VARCHAR) + assert(metaData.getPrecision(1) === Int.MaxValue) + assert(metaData.getScale(1) === 0) } } @@ -219,6 +277,12 @@ class SparkThriftServerProtocolVersionsSuite extends HiveThriftJdbcTest { "SELECT cast('varchar-str' as varchar(10))") { rs => assert(rs.next()) assert(rs.getString(1) === "varchar-str") + val metaData = rs.getMetaData + assert(metaData.getColumnName(1) ==="CAST(varchar-str AS STRING)") + assert(metaData.getColumnTypeName(1) === "string") + assert(metaData.getColumnType(1) === java.sql.Types.VARCHAR) + assert(metaData.getPrecision(1) === Int.MaxValue) + assert(metaData.getScale(1) === 0) } } @@ -226,6 +290,12 @@ class SparkThriftServerProtocolVersionsSuite extends HiveThriftJdbcTest { testExecuteStatementWithProtocolVersion(version, "SELECT cast('ABC' as binary)") { rs => assert(rs.next()) assert(rs.getString(1) === "ABC") + val metaData = rs.getMetaData + assert(metaData.getColumnName(1) === "CAST(ABC AS BINARY)") + assert(metaData.getColumnTypeName(1) === "binary") + assert(metaData.getColumnType(1) === java.sql.Types.BINARY) + assert(metaData.getPrecision(1) === Int.MaxValue) + assert(metaData.getScale(1) === 0) } testExecuteStatementWithProtocolVersion(version, "SELECT cast(49960 as binary)") { rs => assert(rs.next()) @@ -241,6 +311,12 @@ class SparkThriftServerProtocolVersionsSuite extends HiveThriftJdbcTest { testExecuteStatementWithProtocolVersion(version, "SELECT true") { rs => assert(rs.next()) assert(rs.getBoolean(1) === true) + val metaData = rs.getMetaData + assert(metaData.getColumnName(1) === "true") + assert(metaData.getColumnTypeName(1) === "boolean") + assert(metaData.getColumnType(1) === java.sql.Types.BOOLEAN) + assert(metaData.getPrecision(1) === 1) + assert(metaData.getScale(1) === 0) } } @@ -248,6 +324,12 @@ class SparkThriftServerProtocolVersionsSuite extends HiveThriftJdbcTest { testExecuteStatementWithProtocolVersion(version, "SELECT cast('2019-07-22' as date)") { rs => assert(rs.next()) assert(rs.getDate(1) === Date.valueOf("2019-07-22")) + val metaData = rs.getMetaData + assert(metaData.getColumnName(1) === "CAST(2019-07-22 AS DATE)") + assert(metaData.getColumnTypeName(1) === "date") + assert(metaData.getColumnType(1) === java.sql.Types.DATE) + assert(metaData.getPrecision(1) === 10) + assert(metaData.getScale(1) === 0) } } @@ -256,6 +338,12 @@ class SparkThriftServerProtocolVersionsSuite extends HiveThriftJdbcTest { "SELECT cast('2019-07-22 18:14:00' as timestamp)") { rs => assert(rs.next()) assert(rs.getTimestamp(1) === Timestamp.valueOf("2019-07-22 18:14:00")) + val metaData = rs.getMetaData + assert(metaData.getColumnName(1) === "CAST(2019-07-22 18:14:00 AS TIMESTAMP)") + assert(metaData.getColumnTypeName(1) === "timestamp") + assert(metaData.getColumnType(1) === java.sql.Types.TIMESTAMP) + assert(metaData.getPrecision(1) === 29) + assert(metaData.getScale(1) === 9) } } @@ -263,6 +351,12 @@ class SparkThriftServerProtocolVersionsSuite extends HiveThriftJdbcTest { testExecuteStatementWithProtocolVersion(version, "SELECT null") { rs => assert(rs.next()) assert(rs.getString(1) === null) + val metaData = rs.getMetaData + assert(metaData.getColumnName(1) === "NULL") + assert(metaData.getColumnTypeName(1) === "void") + assert(metaData.getColumnType(1) === java.sql.Types.NULL) + assert(metaData.getPrecision(1) === 0) + assert(metaData.getScale(1) === 0) } } @@ -270,28 +364,67 @@ class SparkThriftServerProtocolVersionsSuite extends HiveThriftJdbcTest { testExecuteStatementWithProtocolVersion(version, "SELECT interval '1' year '2' day") { rs => assert(rs.next()) assert(rs.getString(1) === "1 years 2 days") + val metaData = rs.getMetaData + assert(metaData.getColumnName(1) === "INTERVAL '1 years 2 days'") + assert(metaData.getColumnTypeName(1) === "string") + assert(metaData.getColumnType(1) === java.sql.Types.VARCHAR) + assert(metaData.getPrecision(1) === Int.MaxValue) + assert(metaData.getScale(1) === 0) } } test(s"$version get array type") { - testExecuteStatementWithProtocolVersion(version, "SELECT array(1, 2)") { rs => + testExecuteStatementWithProtocolVersion( + version, "SELECT array() AS col1, array(1, 2) AS col2") { rs => assert(rs.next()) - assert(rs.getString(1) === "[1,2]") + assert(rs.getString(2) === "[1,2]") + assert(rs.getObject("col1") === "[]") + assert(rs.getObject("col2") === "[1,2]") + val metaData = rs.getMetaData + (1 to 2) foreach { i => + assert(metaData.getColumnName(i) === s"col$i") + assert(metaData.getColumnTypeName(i) === "array") + assert(metaData.getColumnType(i) === java.sql.Types.ARRAY) + assert(metaData.getPrecision(i) === Int.MaxValue) + assert(metaData.getScale(i) == 0) + } } } test(s"$version get map type") { - testExecuteStatementWithProtocolVersion(version, "SELECT map(1, 2)") { rs => + testExecuteStatementWithProtocolVersion(version, + "SELECT map(), map(1, 2, 3, 4)") { rs => assert(rs.next()) - assert(rs.getString(1) === "{1:2}") + assert(rs.getObject(1) === "{}") + assert(rs.getObject(2) === "{1:2,3:4}") + assert(rs.getString(2) === "{1:2,3:4}") + val metaData = rs.getMetaData + (1 to 2) foreach { i => + assert(metaData.getColumnName(i).startsWith("map(")) + assert(metaData.getColumnTypeName(1) === "map") + assert(metaData.getColumnType(i) === java.sql.Types.JAVA_OBJECT) + assert(metaData.getPrecision(i) === Int.MaxValue) + assert(metaData.getScale(i) == 0) + } } } test(s"$version get struct type") { testExecuteStatementWithProtocolVersion(version, - "SELECT struct('alpha' AS A, 'beta' AS B)") { rs => + "SELECT struct('alpha' AS A, 'beta' AS B) as col0," + + " struct('1', '2') AS col1, named_struct('a', 2, 'b', 4) AS col2") { rs => assert(rs.next()) assert(rs.getString(1) === """{"A":"alpha","B":"beta"}""") + assert(rs.getObject("col1") === """{"col1":"1","col2":"2"}""") + assert(rs.getObject("col2") === """{"a":2,"b":4}""") + val metaData = rs.getMetaData + (1 to 3) foreach { i => + assert(metaData.getColumnName(i) === s"col${i - 1}") + assert(metaData.getColumnTypeName(1) === "struct") + assert(metaData.getColumnType(i) === java.sql.Types.STRUCT) + assert(metaData.getPrecision(i) === Int.MaxValue) + assert(metaData.getScale(i) == 0) + } } } diff --git a/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/ThriftServerQueryTestSuite.scala b/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/ThriftServerQueryTestSuite.scala index ecc7ce71d950e..4a87be5f61195 100644 --- a/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/ThriftServerQueryTestSuite.scala +++ b/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/ThriftServerQueryTestSuite.scala @@ -23,7 +23,6 @@ import java.util.{Locale, MissingFormatArgumentException} import scala.util.control.NonFatal -import org.apache.commons.io.FileUtils import org.apache.commons.lang3.exception.ExceptionUtils import org.apache.spark.SparkException @@ -39,12 +38,12 @@ import org.apache.spark.sql.types._ * * To run the entire test suite: * {{{ - * build/sbt "hive-thriftserver/test-only *ThriftServerQueryTestSuite" -Phive-thriftserver + * build/sbt "hive-thriftserver/testOnly *ThriftServerQueryTestSuite" -Phive-thriftserver * }}} * * This test suite won't generate golden files. To re-generate golden files for entire suite, run: * {{{ - * SPARK_GENERATE_GOLDEN_FILES=1 build/sbt "sql/test-only *SQLQueryTestSuite" + * SPARK_GENERATE_GOLDEN_FILES=1 build/sbt "sql/testOnly *SQLQueryTestSuite" * }}} * * TODO: diff --git a/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/ThriftServerWithSparkContextSuite.scala b/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/ThriftServerWithSparkContextSuite.scala index fd3a638c4fa44..3598f966b6259 100644 --- a/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/ThriftServerWithSparkContextSuite.scala +++ b/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/ThriftServerWithSparkContextSuite.scala @@ -18,9 +18,14 @@ package org.apache.spark.sql.hive.thriftserver import java.sql.SQLException +import java.util.concurrent.atomic.AtomicBoolean import org.apache.hive.service.cli.HiveSQLException +import org.apache.spark.TaskKilled +import org.apache.spark.scheduler.{SparkListener, SparkListenerTaskEnd} +import org.apache.spark.sql.internal.SQLConf + trait ThriftServerWithSparkContextSuite extends SharedThriftServer { test("the scratch dir will be deleted during server start but recreated with new operation") { @@ -79,6 +84,39 @@ trait ThriftServerWithSparkContextSuite extends SharedThriftServer { "java.lang.NumberFormatException: invalid input syntax for type numeric: 1.2")) } } + + test("SPARK-33526: Add config to control if cancel invoke interrupt task on thriftserver") { + withJdbcStatement { statement => + val forceCancel = new AtomicBoolean(false) + val listener = new SparkListener { + override def onTaskEnd(taskEnd: SparkListenerTaskEnd): Unit = { + assert(taskEnd.reason.isInstanceOf[TaskKilled]) + if (forceCancel.get()) { + assert(System.currentTimeMillis() - taskEnd.taskInfo.launchTime < 1000) + } else { + // avoid accuracy, we check 2s instead of 3s. + assert(System.currentTimeMillis() - taskEnd.taskInfo.launchTime >= 2000) + } + } + } + + spark.sparkContext.addSparkListener(listener) + try { + Seq(true, false).foreach { force => + statement.setQueryTimeout(0) + statement.execute(s"SET ${SQLConf.THRIFTSERVER_FORCE_CANCEL.key}=$force") + statement.setQueryTimeout(1) + forceCancel.set(force) + val e = intercept[SQLException] { + statement.execute("select java_method('java.lang.Thread', 'sleep', 3000L)") + }.getMessage + assert(e.contains("Query timed out")) + } + } finally { + spark.sparkContext.removeSparkListener(listener) + } + } + } } diff --git a/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/UISeleniumSuite.scala b/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/UISeleniumSuite.scala index d0b829c240327..2d0edb8eb8d48 100644 --- a/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/UISeleniumSuite.scala +++ b/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/UISeleniumSuite.scala @@ -32,7 +32,7 @@ import org.scalatestplus.selenium.WebBrowser import org.apache.spark.ui.SparkUICssErrorHandler class UISeleniumSuite - extends HiveThriftJdbcTest + extends HiveThriftServer2TestBase with WebBrowser with Matchers with BeforeAndAfterAll { implicit var webDriver: WebDriver = _ @@ -57,7 +57,7 @@ class UISeleniumSuite } } - override protected def serverStartCommand(port: Int) = { + override protected def serverStartCommand(): Seq[String] = { val portConf = if (mode == ServerMode.binary) { ConfVars.HIVE_SERVER2_THRIFT_PORT } else { @@ -71,7 +71,7 @@ class UISeleniumSuite | --hiveconf ${ConfVars.METASTOREWAREHOUSE}=$warehousePath | --hiveconf ${ConfVars.HIVE_SERVER2_THRIFT_BIND_HOST}=localhost | --hiveconf ${ConfVars.HIVE_SERVER2_TRANSPORT_MODE}=$mode - | --hiveconf $portConf=$port + | --hiveconf $portConf=0 | --driver-class-path ${sys.props("java.class.path")} | --conf spark.ui.enabled=true | --conf spark.ui.port=$uiPort diff --git a/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/ui/HiveThriftServer2ListenerSuite.scala b/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/ui/HiveThriftServer2ListenerSuite.scala index 9a9f574153a0a..3f0538dd1c943 100644 --- a/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/ui/HiveThriftServer2ListenerSuite.scala +++ b/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/ui/HiveThriftServer2ListenerSuite.scala @@ -151,6 +151,7 @@ class HiveThriftServer2ListenerSuite extends SparkFunSuite with BeforeAndAfter { "stmt", "groupId", 0)) listener.onOtherEvent(SparkListenerThriftServerOperationParsed(unknownOperation, "query")) listener.onOtherEvent(SparkListenerThriftServerOperationCanceled(unknownOperation, 0)) + listener.onOtherEvent(SparkListenerThriftServerOperationTimeout(unknownOperation, 0)) listener.onOtherEvent(SparkListenerThriftServerOperationError(unknownOperation, "msg", "trace", 0)) listener.onOtherEvent(SparkListenerThriftServerOperationFinish(unknownOperation, 0)) diff --git a/sql/hive-thriftserver/v1.2/if/TCLIService.thrift b/sql/hive-thriftserver/v1.2/if/TCLIService.thrift deleted file mode 100644 index 225e319737811..0000000000000 --- a/sql/hive-thriftserver/v1.2/if/TCLIService.thrift +++ /dev/null @@ -1,1173 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Coding Conventions for this file: -// -// Structs/Enums/Unions -// * Struct, Enum, and Union names begin with a "T", -// and use a capital letter for each new word, with no underscores. -// * All fields should be declared as either optional or required. -// -// Functions -// * Function names start with a capital letter and have a capital letter for -// each new word, with no underscores. -// * Each function should take exactly one parameter, named TFunctionNameReq, -// and should return either void or TFunctionNameResp. This convention allows -// incremental updates. -// -// Services -// * Service names begin with the letter "T", use a capital letter for each -// new word (with no underscores), and end with the word "Service". - -namespace java org.apache.hive.service.cli.thrift -namespace cpp apache.hive.service.cli.thrift - -// List of protocol versions. A new token should be -// added to the end of this list every time a change is made. -enum TProtocolVersion { - HIVE_CLI_SERVICE_PROTOCOL_V1, - - // V2 adds support for asynchronous execution - HIVE_CLI_SERVICE_PROTOCOL_V2 - - // V3 add varchar type, primitive type qualifiers - HIVE_CLI_SERVICE_PROTOCOL_V3 - - // V4 add decimal precision/scale, char type - HIVE_CLI_SERVICE_PROTOCOL_V4 - - // V5 adds error details when GetOperationStatus returns in error state - HIVE_CLI_SERVICE_PROTOCOL_V5 - - // V6 uses binary type for binary payload (was string) and uses columnar result set - HIVE_CLI_SERVICE_PROTOCOL_V6 - - // V7 adds support for delegation token based connection - HIVE_CLI_SERVICE_PROTOCOL_V7 - - // V8 adds support for interval types - HIVE_CLI_SERVICE_PROTOCOL_V8 -} - -enum TTypeId { - BOOLEAN_TYPE, - TINYINT_TYPE, - SMALLINT_TYPE, - INT_TYPE, - BIGINT_TYPE, - FLOAT_TYPE, - DOUBLE_TYPE, - STRING_TYPE, - TIMESTAMP_TYPE, - BINARY_TYPE, - ARRAY_TYPE, - MAP_TYPE, - STRUCT_TYPE, - UNION_TYPE, - USER_DEFINED_TYPE, - DECIMAL_TYPE, - NULL_TYPE, - DATE_TYPE, - VARCHAR_TYPE, - CHAR_TYPE, - INTERVAL_YEAR_MONTH_TYPE, - INTERVAL_DAY_TIME_TYPE -} - -const set PRIMITIVE_TYPES = [ - TTypeId.BOOLEAN_TYPE, - TTypeId.TINYINT_TYPE, - TTypeId.SMALLINT_TYPE, - TTypeId.INT_TYPE, - TTypeId.BIGINT_TYPE, - TTypeId.FLOAT_TYPE, - TTypeId.DOUBLE_TYPE, - TTypeId.STRING_TYPE, - TTypeId.TIMESTAMP_TYPE, - TTypeId.BINARY_TYPE, - TTypeId.DECIMAL_TYPE, - TTypeId.NULL_TYPE, - TTypeId.DATE_TYPE, - TTypeId.VARCHAR_TYPE, - TTypeId.CHAR_TYPE, - TTypeId.INTERVAL_YEAR_MONTH_TYPE, - TTypeId.INTERVAL_DAY_TIME_TYPE -] - -const set COMPLEX_TYPES = [ - TTypeId.ARRAY_TYPE - TTypeId.MAP_TYPE - TTypeId.STRUCT_TYPE - TTypeId.UNION_TYPE - TTypeId.USER_DEFINED_TYPE -] - -const set COLLECTION_TYPES = [ - TTypeId.ARRAY_TYPE - TTypeId.MAP_TYPE -] - -const map TYPE_NAMES = { - TTypeId.BOOLEAN_TYPE: "BOOLEAN", - TTypeId.TINYINT_TYPE: "TINYINT", - TTypeId.SMALLINT_TYPE: "SMALLINT", - TTypeId.INT_TYPE: "INT", - TTypeId.BIGINT_TYPE: "BIGINT", - TTypeId.FLOAT_TYPE: "FLOAT", - TTypeId.DOUBLE_TYPE: "DOUBLE", - TTypeId.STRING_TYPE: "STRING", - TTypeId.TIMESTAMP_TYPE: "TIMESTAMP", - TTypeId.BINARY_TYPE: "BINARY", - TTypeId.ARRAY_TYPE: "ARRAY", - TTypeId.MAP_TYPE: "MAP", - TTypeId.STRUCT_TYPE: "STRUCT", - TTypeId.UNION_TYPE: "UNIONTYPE", - TTypeId.DECIMAL_TYPE: "DECIMAL", - TTypeId.NULL_TYPE: "NULL" - TTypeId.DATE_TYPE: "DATE" - TTypeId.VARCHAR_TYPE: "VARCHAR" - TTypeId.CHAR_TYPE: "CHAR" - TTypeId.INTERVAL_YEAR_MONTH_TYPE: "INTERVAL_YEAR_MONTH" - TTypeId.INTERVAL_DAY_TIME_TYPE: "INTERVAL_DAY_TIME" -} - -// Thrift does not support recursively defined types or forward declarations, -// which makes it difficult to represent Hive's nested types. -// To get around these limitations TTypeDesc employs a type list that maps -// integer "pointers" to TTypeEntry objects. The following examples show -// how different types are represented using this scheme: -// -// "INT": -// TTypeDesc { -// types = [ -// TTypeEntry.primitive_entry { -// type = INT_TYPE -// } -// ] -// } -// -// "ARRAY": -// TTypeDesc { -// types = [ -// TTypeEntry.array_entry { -// object_type_ptr = 1 -// }, -// TTypeEntry.primitive_entry { -// type = INT_TYPE -// } -// ] -// } -// -// "MAP": -// TTypeDesc { -// types = [ -// TTypeEntry.map_entry { -// key_type_ptr = 1 -// value_type_ptr = 2 -// }, -// TTypeEntry.primitive_entry { -// type = INT_TYPE -// }, -// TTypeEntry.primitive_entry { -// type = STRING_TYPE -// } -// ] -// } - -typedef i32 TTypeEntryPtr - -// Valid TTypeQualifiers key names -const string CHARACTER_MAXIMUM_LENGTH = "characterMaximumLength" - -// Type qualifier key name for decimal -const string PRECISION = "precision" -const string SCALE = "scale" - -union TTypeQualifierValue { - 1: optional i32 i32Value - 2: optional string stringValue -} - -// Type qualifiers for primitive type. -struct TTypeQualifiers { - 1: required map qualifiers -} - -// Type entry for a primitive type. -struct TPrimitiveTypeEntry { - // The primitive type token. This must satisfy the condition - // that type is in the PRIMITIVE_TYPES set. - 1: required TTypeId type - 2: optional TTypeQualifiers typeQualifiers -} - -// Type entry for an ARRAY type. -struct TArrayTypeEntry { - 1: required TTypeEntryPtr objectTypePtr -} - -// Type entry for a MAP type. -struct TMapTypeEntry { - 1: required TTypeEntryPtr keyTypePtr - 2: required TTypeEntryPtr valueTypePtr -} - -// Type entry for a STRUCT type. -struct TStructTypeEntry { - 1: required map nameToTypePtr -} - -// Type entry for a UNIONTYPE type. -struct TUnionTypeEntry { - 1: required map nameToTypePtr -} - -struct TUserDefinedTypeEntry { - // The fully qualified name of the class implementing this type. - 1: required string typeClassName -} - -// We use a union here since Thrift does not support inheritance. -union TTypeEntry { - 1: TPrimitiveTypeEntry primitiveEntry - 2: TArrayTypeEntry arrayEntry - 3: TMapTypeEntry mapEntry - 4: TStructTypeEntry structEntry - 5: TUnionTypeEntry unionEntry - 6: TUserDefinedTypeEntry userDefinedTypeEntry -} - -// Type descriptor for columns. -struct TTypeDesc { - // The "top" type is always the first element of the list. - // If the top type is an ARRAY, MAP, STRUCT, or UNIONTYPE - // type, then subsequent elements represent nested types. - 1: required list types -} - -// A result set column descriptor. -struct TColumnDesc { - // The name of the column - 1: required string columnName - - // The type descriptor for this column - 2: required TTypeDesc typeDesc - - // The ordinal position of this column in the schema - 3: required i32 position - - 4: optional string comment -} - -// Metadata used to describe the schema (column names, types, comments) -// of result sets. -struct TTableSchema { - 1: required list columns -} - -// A Boolean column value. -struct TBoolValue { - // NULL if value is unset. - 1: optional bool value -} - -// A Byte column value. -struct TByteValue { - // NULL if value is unset. - 1: optional byte value -} - -// A signed, 16 bit column value. -struct TI16Value { - // NULL if value is unset - 1: optional i16 value -} - -// A signed, 32 bit column value -struct TI32Value { - // NULL if value is unset - 1: optional i32 value -} - -// A signed 64 bit column value -struct TI64Value { - // NULL if value is unset - 1: optional i64 value -} - -// A floating point 64 bit column value -struct TDoubleValue { - // NULL if value is unset - 1: optional double value -} - -struct TStringValue { - // NULL if value is unset - 1: optional string value -} - -// A single column value in a result set. -// Note that Hive's type system is richer than Thrift's, -// so in some cases we have to map multiple Hive types -// to the same Thrift type. On the client-side this is -// disambiguated by looking at the Schema of the -// result set. -union TColumnValue { - 1: TBoolValue boolVal // BOOLEAN - 2: TByteValue byteVal // TINYINT - 3: TI16Value i16Val // SMALLINT - 4: TI32Value i32Val // INT - 5: TI64Value i64Val // BIGINT, TIMESTAMP - 6: TDoubleValue doubleVal // FLOAT, DOUBLE - 7: TStringValue stringVal // STRING, LIST, MAP, STRUCT, UNIONTYPE, BINARY, DECIMAL, NULL, INTERVAL_YEAR_MONTH, INTERVAL_DAY_TIME -} - -// Represents a row in a rowset. -struct TRow { - 1: required list colVals -} - -struct TBoolColumn { - 1: required list values - 2: required binary nulls -} - -struct TByteColumn { - 1: required list values - 2: required binary nulls -} - -struct TI16Column { - 1: required list values - 2: required binary nulls -} - -struct TI32Column { - 1: required list values - 2: required binary nulls -} - -struct TI64Column { - 1: required list values - 2: required binary nulls -} - -struct TDoubleColumn { - 1: required list values - 2: required binary nulls -} - -struct TStringColumn { - 1: required list values - 2: required binary nulls -} - -struct TBinaryColumn { - 1: required list values - 2: required binary nulls -} - -// Note that Hive's type system is richer than Thrift's, -// so in some cases we have to map multiple Hive types -// to the same Thrift type. On the client-side this is -// disambiguated by looking at the Schema of the -// result set. -union TColumn { - 1: TBoolColumn boolVal // BOOLEAN - 2: TByteColumn byteVal // TINYINT - 3: TI16Column i16Val // SMALLINT - 4: TI32Column i32Val // INT - 5: TI64Column i64Val // BIGINT, TIMESTAMP - 6: TDoubleColumn doubleVal // FLOAT, DOUBLE - 7: TStringColumn stringVal // STRING, LIST, MAP, STRUCT, UNIONTYPE, DECIMAL, NULL - 8: TBinaryColumn binaryVal // BINARY -} - -// Represents a rowset -struct TRowSet { - // The starting row offset of this rowset. - 1: required i64 startRowOffset - 2: required list rows - 3: optional list columns -} - -// The return status code contained in each response. -enum TStatusCode { - SUCCESS_STATUS, - SUCCESS_WITH_INFO_STATUS, - STILL_EXECUTING_STATUS, - ERROR_STATUS, - INVALID_HANDLE_STATUS -} - -// The return status of a remote request -struct TStatus { - 1: required TStatusCode statusCode - - // If status is SUCCESS_WITH_INFO, info_msgs may be populated with - // additional diagnostic information. - 2: optional list infoMessages - - // If status is ERROR, then the following fields may be set - 3: optional string sqlState // as defined in the ISO/IEF CLI specification - 4: optional i32 errorCode // internal error code - 5: optional string errorMessage -} - -// The state of an operation (i.e. a query or other -// asynchronous operation that generates a result set) -// on the server. -enum TOperationState { - // The operation has been initialized - INITIALIZED_STATE, - - // The operation is running. In this state the result - // set is not available. - RUNNING_STATE, - - // The operation has completed. When an operation is in - // this state its result set may be fetched. - FINISHED_STATE, - - // The operation was canceled by a client - CANCELED_STATE, - - // The operation was closed by a client - CLOSED_STATE, - - // The operation failed due to an error - ERROR_STATE, - - // The operation is in an unrecognized state - UKNOWN_STATE, - - // The operation is in an pending state - PENDING_STATE, -} - -// A string identifier. This is interpreted literally. -typedef string TIdentifier - -// A search pattern. -// -// Valid search pattern characters: -// '_': Any single character. -// '%': Any sequence of zero or more characters. -// '\': Escape character used to include special characters, -// e.g. '_', '%', '\'. If a '\' precedes a non-special -// character it has no special meaning and is interpreted -// literally. -typedef string TPattern - - -// A search pattern or identifier. Used as input -// parameter for many of the catalog functions. -typedef string TPatternOrIdentifier - -struct THandleIdentifier { - // 16 byte globally unique identifier - // This is the public ID of the handle and - // can be used for reporting. - 1: required binary guid, - - // 16 byte secret generated by the server - // and used to verify that the handle is not - // being hijacked by another user. - 2: required binary secret, -} - -// Client-side handle to persistent -// session information on the server-side. -struct TSessionHandle { - 1: required THandleIdentifier sessionId -} - -// The subtype of an OperationHandle. -enum TOperationType { - EXECUTE_STATEMENT, - GET_TYPE_INFO, - GET_CATALOGS, - GET_SCHEMAS, - GET_TABLES, - GET_TABLE_TYPES, - GET_COLUMNS, - GET_FUNCTIONS, - UNKNOWN, -} - -// Client-side reference to a task running -// asynchronously on the server. -struct TOperationHandle { - 1: required THandleIdentifier operationId - 2: required TOperationType operationType - - // If hasResultSet = TRUE, then this operation - // generates a result set that can be fetched. - // Note that the result set may be empty. - // - // If hasResultSet = FALSE, then this operation - // does not generate a result set, and calling - // GetResultSetMetadata or FetchResults against - // this OperationHandle will generate an error. - 3: required bool hasResultSet - - // For operations that don't generate result sets, - // modifiedRowCount is either: - // - // 1) The number of rows that were modified by - // the DML operation (e.g. number of rows inserted, - // number of rows deleted, etc). - // - // 2) 0 for operations that don't modify or add rows. - // - // 3) < 0 if the operation is capable of modifiying rows, - // but Hive is unable to determine how many rows were - // modified. For example, Hive's LOAD DATA command - // doesn't generate row count information because - // Hive doesn't inspect the data as it is loaded. - // - // modifiedRowCount is unset if the operation generates - // a result set. - 4: optional double modifiedRowCount -} - - -// OpenSession() -// -// Open a session (connection) on the server against -// which operations may be executed. -struct TOpenSessionReq { - // The version of the HiveServer2 protocol that the client is using. - 1: required TProtocolVersion client_protocol = TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V8 - - // Username and password for authentication. - // Depending on the authentication scheme being used, - // this information may instead be provided by a lower - // protocol layer, in which case these fields may be - // left unset. - 2: optional string username - 3: optional string password - - // Configuration overlay which is applied when the session is - // first created. - 4: optional map configuration -} - -struct TOpenSessionResp { - 1: required TStatus status - - // The protocol version that the server is using. - 2: required TProtocolVersion serverProtocolVersion = TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V8 - - // Session Handle - 3: optional TSessionHandle sessionHandle - - // The configuration settings for this session. - 4: optional map configuration -} - - -// CloseSession() -// -// Closes the specified session and frees any resources -// currently allocated to that session. Any open -// operations in that session will be canceled. -struct TCloseSessionReq { - 1: required TSessionHandle sessionHandle -} - -struct TCloseSessionResp { - 1: required TStatus status -} - - - -enum TGetInfoType { - CLI_MAX_DRIVER_CONNECTIONS = 0, - CLI_MAX_CONCURRENT_ACTIVITIES = 1, - CLI_DATA_SOURCE_NAME = 2, - CLI_FETCH_DIRECTION = 8, - CLI_SERVER_NAME = 13, - CLI_SEARCH_PATTERN_ESCAPE = 14, - CLI_DBMS_NAME = 17, - CLI_DBMS_VER = 18, - CLI_ACCESSIBLE_TABLES = 19, - CLI_ACCESSIBLE_PROCEDURES = 20, - CLI_CURSOR_COMMIT_BEHAVIOR = 23, - CLI_DATA_SOURCE_READ_ONLY = 25, - CLI_DEFAULT_TXN_ISOLATION = 26, - CLI_IDENTIFIER_CASE = 28, - CLI_IDENTIFIER_QUOTE_CHAR = 29, - CLI_MAX_COLUMN_NAME_LEN = 30, - CLI_MAX_CURSOR_NAME_LEN = 31, - CLI_MAX_SCHEMA_NAME_LEN = 32, - CLI_MAX_CATALOG_NAME_LEN = 34, - CLI_MAX_TABLE_NAME_LEN = 35, - CLI_SCROLL_CONCURRENCY = 43, - CLI_TXN_CAPABLE = 46, - CLI_USER_NAME = 47, - CLI_TXN_ISOLATION_OPTION = 72, - CLI_INTEGRITY = 73, - CLI_GETDATA_EXTENSIONS = 81, - CLI_NULL_COLLATION = 85, - CLI_ALTER_TABLE = 86, - CLI_ORDER_BY_COLUMNS_IN_SELECT = 90, - CLI_SPECIAL_CHARACTERS = 94, - CLI_MAX_COLUMNS_IN_GROUP_BY = 97, - CLI_MAX_COLUMNS_IN_INDEX = 98, - CLI_MAX_COLUMNS_IN_ORDER_BY = 99, - CLI_MAX_COLUMNS_IN_SELECT = 100, - CLI_MAX_COLUMNS_IN_TABLE = 101, - CLI_MAX_INDEX_SIZE = 102, - CLI_MAX_ROW_SIZE = 104, - CLI_MAX_STATEMENT_LEN = 105, - CLI_MAX_TABLES_IN_SELECT = 106, - CLI_MAX_USER_NAME_LEN = 107, - CLI_OJ_CAPABILITIES = 115, - - CLI_XOPEN_CLI_YEAR = 10000, - CLI_CURSOR_SENSITIVITY = 10001, - CLI_DESCRIBE_PARAMETER = 10002, - CLI_CATALOG_NAME = 10003, - CLI_COLLATION_SEQ = 10004, - CLI_MAX_IDENTIFIER_LEN = 10005, -} - -union TGetInfoValue { - 1: string stringValue - 2: i16 smallIntValue - 3: i32 integerBitmask - 4: i32 integerFlag - 5: i32 binaryValue - 6: i64 lenValue -} - -// GetInfo() -// -// This function is based on ODBC's CLIGetInfo() function. -// The function returns general information about the data source -// using the same keys as ODBC. -struct TGetInfoReq { - // The session to run this request against - 1: required TSessionHandle sessionHandle - - 2: required TGetInfoType infoType -} - -struct TGetInfoResp { - 1: required TStatus status - - 2: required TGetInfoValue infoValue -} - - -// ExecuteStatement() -// -// Execute a statement. -// The returned OperationHandle can be used to check on the -// status of the statement, and to fetch results once the -// statement has finished executing. -struct TExecuteStatementReq { - // The session to execute the statement against - 1: required TSessionHandle sessionHandle - - // The statement to be executed (DML, DDL, SET, etc) - 2: required string statement - - // Configuration properties that are overlayed on top of the - // the existing session configuration before this statement - // is executed. These properties apply to this statement - // only and will not affect the subsequent state of the Session. - 3: optional map confOverlay - - // Execute asynchronously when runAsync is true - 4: optional bool runAsync = false -} - -struct TExecuteStatementResp { - 1: required TStatus status - 2: optional TOperationHandle operationHandle -} - -// GetTypeInfo() -// -// Get information about types supported by the HiveServer instance. -// The information is returned as a result set which can be fetched -// using the OperationHandle provided in the response. -// -// Refer to the documentation for ODBC's CLIGetTypeInfo function for -// the format of the result set. -struct TGetTypeInfoReq { - // The session to run this request against. - 1: required TSessionHandle sessionHandle -} - -struct TGetTypeInfoResp { - 1: required TStatus status - 2: optional TOperationHandle operationHandle -} - - -// GetCatalogs() -// -// Returns the list of catalogs (databases) -// Results are ordered by TABLE_CATALOG -// -// Resultset columns : -// col1 -// name: TABLE_CAT -// type: STRING -// desc: Catalog name. NULL if not applicable. -// -struct TGetCatalogsReq { - // Session to run this request against - 1: required TSessionHandle sessionHandle -} - -struct TGetCatalogsResp { - 1: required TStatus status - 2: optional TOperationHandle operationHandle -} - - -// GetSchemas() -// -// Retrieves the schema names available in this database. -// The results are ordered by TABLE_CATALOG and TABLE_SCHEM. -// col1 -// name: TABLE_SCHEM -// type: STRING -// desc: schema name -// col2 -// name: TABLE_CATALOG -// type: STRING -// desc: catalog name -struct TGetSchemasReq { - // Session to run this request against - 1: required TSessionHandle sessionHandle - - // Name of the catalog. Must not contain a search pattern. - 2: optional TIdentifier catalogName - - // schema name or pattern - 3: optional TPatternOrIdentifier schemaName -} - -struct TGetSchemasResp { - 1: required TStatus status - 2: optional TOperationHandle operationHandle -} - - -// GetTables() -// -// Returns a list of tables with catalog, schema, and table -// type information. The information is returned as a result -// set which can be fetched using the OperationHandle -// provided in the response. -// Results are ordered by TABLE_TYPE, TABLE_CAT, TABLE_SCHEM, and TABLE_NAME -// -// Result Set Columns: -// -// col1 -// name: TABLE_CAT -// type: STRING -// desc: Catalog name. NULL if not applicable. -// -// col2 -// name: TABLE_SCHEM -// type: STRING -// desc: Schema name. -// -// col3 -// name: TABLE_NAME -// type: STRING -// desc: Table name. -// -// col4 -// name: TABLE_TYPE -// type: STRING -// desc: The table type, e.g. "TABLE", "VIEW", etc. -// -// col5 -// name: REMARKS -// type: STRING -// desc: Comments about the table -// -struct TGetTablesReq { - // Session to run this request against - 1: required TSessionHandle sessionHandle - - // Name of the catalog or a search pattern. - 2: optional TPatternOrIdentifier catalogName - - // Name of the schema or a search pattern. - 3: optional TPatternOrIdentifier schemaName - - // Name of the table or a search pattern. - 4: optional TPatternOrIdentifier tableName - - // List of table types to match - // e.g. "TABLE", "VIEW", "SYSTEM TABLE", "GLOBAL TEMPORARY", - // "LOCAL TEMPORARY", "ALIAS", "SYNONYM", etc. - 5: optional list tableTypes -} - -struct TGetTablesResp { - 1: required TStatus status - 2: optional TOperationHandle operationHandle -} - - -// GetTableTypes() -// -// Returns the table types available in this database. -// The results are ordered by table type. -// -// col1 -// name: TABLE_TYPE -// type: STRING -// desc: Table type name. -struct TGetTableTypesReq { - // Session to run this request against - 1: required TSessionHandle sessionHandle -} - -struct TGetTableTypesResp { - 1: required TStatus status - 2: optional TOperationHandle operationHandle -} - - -// GetColumns() -// -// Returns a list of columns in the specified tables. -// The information is returned as a result set which can be fetched -// using the OperationHandle provided in the response. -// Results are ordered by TABLE_CAT, TABLE_SCHEM, TABLE_NAME, -// and ORDINAL_POSITION. -// -// Result Set Columns are the same as those for the ODBC CLIColumns -// function. -// -struct TGetColumnsReq { - // Session to run this request against - 1: required TSessionHandle sessionHandle - - // Name of the catalog. Must not contain a search pattern. - 2: optional TIdentifier catalogName - - // Schema name or search pattern - 3: optional TPatternOrIdentifier schemaName - - // Table name or search pattern - 4: optional TPatternOrIdentifier tableName - - // Column name or search pattern - 5: optional TPatternOrIdentifier columnName -} - -struct TGetColumnsResp { - 1: required TStatus status - 2: optional TOperationHandle operationHandle -} - - -// GetFunctions() -// -// Returns a list of functions supported by the data source. The -// behavior of this function matches -// java.sql.DatabaseMetaData.getFunctions() both in terms of -// inputs and outputs. -// -// Result Set Columns: -// -// col1 -// name: FUNCTION_CAT -// type: STRING -// desc: Function catalog (may be null) -// -// col2 -// name: FUNCTION_SCHEM -// type: STRING -// desc: Function schema (may be null) -// -// col3 -// name: FUNCTION_NAME -// type: STRING -// desc: Function name. This is the name used to invoke the function. -// -// col4 -// name: REMARKS -// type: STRING -// desc: Explanatory comment on the function. -// -// col5 -// name: FUNCTION_TYPE -// type: SMALLINT -// desc: Kind of function. One of: -// * functionResultUnknown - Cannot determine if a return value or a table -// will be returned. -// * functionNoTable - Does not a return a table. -// * functionReturnsTable - Returns a table. -// -// col6 -// name: SPECIFIC_NAME -// type: STRING -// desc: The name which uniquely identifies this function within its schema. -// In this case this is the fully qualified class name of the class -// that implements this function. -// -struct TGetFunctionsReq { - // Session to run this request against - 1: required TSessionHandle sessionHandle - - // A catalog name; must match the catalog name as it is stored in the - // database; "" retrieves those without a catalog; null means - // that the catalog name should not be used to narrow the search. - 2: optional TIdentifier catalogName - - // A schema name pattern; must match the schema name as it is stored - // in the database; "" retrieves those without a schema; null means - // that the schema name should not be used to narrow the search. - 3: optional TPatternOrIdentifier schemaName - - // A function name pattern; must match the function name as it is stored - // in the database. - 4: required TPatternOrIdentifier functionName -} - -struct TGetFunctionsResp { - 1: required TStatus status - 2: optional TOperationHandle operationHandle -} - - -// GetOperationStatus() -// -// Get the status of an operation running on the server. -struct TGetOperationStatusReq { - // Session to run this request against - 1: required TOperationHandle operationHandle -} - -struct TGetOperationStatusResp { - 1: required TStatus status - 2: optional TOperationState operationState - - // If operationState is ERROR_STATE, then the following fields may be set - // sqlState as defined in the ISO/IEF CLI specification - 3: optional string sqlState - - // Internal error code - 4: optional i32 errorCode - - // Error message - 5: optional string errorMessage -} - - -// CancelOperation() -// -// Cancels processing on the specified operation handle and -// frees any resources which were allocated. -struct TCancelOperationReq { - // Operation to cancel - 1: required TOperationHandle operationHandle -} - -struct TCancelOperationResp { - 1: required TStatus status -} - - -// CloseOperation() -// -// Given an operation in the FINISHED, CANCELED, -// or ERROR states, CloseOperation() will free -// all of the resources which were allocated on -// the server to service the operation. -struct TCloseOperationReq { - 1: required TOperationHandle operationHandle -} - -struct TCloseOperationResp { - 1: required TStatus status -} - - -// GetResultSetMetadata() -// -// Retrieves schema information for the specified operation -struct TGetResultSetMetadataReq { - // Operation for which to fetch result set schema information - 1: required TOperationHandle operationHandle -} - -struct TGetResultSetMetadataResp { - 1: required TStatus status - 2: optional TTableSchema schema -} - - -enum TFetchOrientation { - // Get the next rowset. The fetch offset is ignored. - FETCH_NEXT, - - // Get the previous rowset. The fetch offset is ignored. - FETCH_PRIOR, - - // Return the rowset at the given fetch offset relative - // to the current rowset. - // NOT SUPPORTED - FETCH_RELATIVE, - - // Return the rowset at the specified fetch offset. - // NOT SUPPORTED - FETCH_ABSOLUTE, - - // Get the first rowset in the result set. - FETCH_FIRST, - - // Get the last rowset in the result set. - // NOT SUPPORTED - FETCH_LAST -} - -// FetchResults() -// -// Fetch rows from the server corresponding to -// a particular OperationHandle. -struct TFetchResultsReq { - // Operation from which to fetch results. - 1: required TOperationHandle operationHandle - - // The fetch orientation. This must be either - // FETCH_NEXT, FETCH_PRIOR or FETCH_FIRST. Defaults to FETCH_NEXT. - 2: required TFetchOrientation orientation = TFetchOrientation.FETCH_NEXT - - // Max number of rows that should be returned in - // the rowset. - 3: required i64 maxRows - - // The type of a fetch results request. 0 represents Query output. 1 represents Log - 4: optional i16 fetchType = 0 -} - -struct TFetchResultsResp { - 1: required TStatus status - - // TRUE if there are more rows left to fetch from the server. - 2: optional bool hasMoreRows - - // The rowset. This is optional so that we have the - // option in the future of adding alternate formats for - // representing result set data, e.g. delimited strings, - // binary encoded, etc. - 3: optional TRowSet results -} - -// GetDelegationToken() -// Retrieve delegation token for the current user -struct TGetDelegationTokenReq { - // session handle - 1: required TSessionHandle sessionHandle - - // userid for the proxy user - 2: required string owner - - // designated renewer userid - 3: required string renewer -} - -struct TGetDelegationTokenResp { - // status of the request - 1: required TStatus status - - // delegation token string - 2: optional string delegationToken -} - -// CancelDelegationToken() -// Cancel the given delegation token -struct TCancelDelegationTokenReq { - // session handle - 1: required TSessionHandle sessionHandle - - // delegation token to cancel - 2: required string delegationToken -} - -struct TCancelDelegationTokenResp { - // status of the request - 1: required TStatus status -} - -// RenewDelegationToken() -// Renew the given delegation token -struct TRenewDelegationTokenReq { - // session handle - 1: required TSessionHandle sessionHandle - - // delegation token to renew - 2: required string delegationToken -} - -struct TRenewDelegationTokenResp { - // status of the request - 1: required TStatus status -} - -service TCLIService { - - TOpenSessionResp OpenSession(1:TOpenSessionReq req); - - TCloseSessionResp CloseSession(1:TCloseSessionReq req); - - TGetInfoResp GetInfo(1:TGetInfoReq req); - - TExecuteStatementResp ExecuteStatement(1:TExecuteStatementReq req); - - TGetTypeInfoResp GetTypeInfo(1:TGetTypeInfoReq req); - - TGetCatalogsResp GetCatalogs(1:TGetCatalogsReq req); - - TGetSchemasResp GetSchemas(1:TGetSchemasReq req); - - TGetTablesResp GetTables(1:TGetTablesReq req); - - TGetTableTypesResp GetTableTypes(1:TGetTableTypesReq req); - - TGetColumnsResp GetColumns(1:TGetColumnsReq req); - - TGetFunctionsResp GetFunctions(1:TGetFunctionsReq req); - - TGetOperationStatusResp GetOperationStatus(1:TGetOperationStatusReq req); - - TCancelOperationResp CancelOperation(1:TCancelOperationReq req); - - TCloseOperationResp CloseOperation(1:TCloseOperationReq req); - - TGetResultSetMetadataResp GetResultSetMetadata(1:TGetResultSetMetadataReq req); - - TFetchResultsResp FetchResults(1:TFetchResultsReq req); - - TGetDelegationTokenResp GetDelegationToken(1:TGetDelegationTokenReq req); - - TCancelDelegationTokenResp CancelDelegationToken(1:TCancelDelegationTokenReq req); - - TRenewDelegationTokenResp RenewDelegationToken(1:TRenewDelegationTokenReq req); -} diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TArrayTypeEntry.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TArrayTypeEntry.java deleted file mode 100644 index 6323d34eac734..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TArrayTypeEntry.java +++ /dev/null @@ -1,383 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TArrayTypeEntry implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TArrayTypeEntry"); - - private static final org.apache.thrift.protocol.TField OBJECT_TYPE_PTR_FIELD_DESC = new org.apache.thrift.protocol.TField("objectTypePtr", org.apache.thrift.protocol.TType.I32, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TArrayTypeEntryStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TArrayTypeEntryTupleSchemeFactory()); - } - - private int objectTypePtr; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - OBJECT_TYPE_PTR((short)1, "objectTypePtr"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // OBJECT_TYPE_PTR - return OBJECT_TYPE_PTR; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private static final int __OBJECTTYPEPTR_ISSET_ID = 0; - private byte __isset_bitfield = 0; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.OBJECT_TYPE_PTR, new org.apache.thrift.meta_data.FieldMetaData("objectTypePtr", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32 , "TTypeEntryPtr"))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TArrayTypeEntry.class, metaDataMap); - } - - public TArrayTypeEntry() { - } - - public TArrayTypeEntry( - int objectTypePtr) - { - this(); - this.objectTypePtr = objectTypePtr; - setObjectTypePtrIsSet(true); - } - - /** - * Performs a deep copy on other. - */ - public TArrayTypeEntry(TArrayTypeEntry other) { - __isset_bitfield = other.__isset_bitfield; - this.objectTypePtr = other.objectTypePtr; - } - - public TArrayTypeEntry deepCopy() { - return new TArrayTypeEntry(this); - } - - @Override - public void clear() { - setObjectTypePtrIsSet(false); - this.objectTypePtr = 0; - } - - public int getObjectTypePtr() { - return this.objectTypePtr; - } - - public void setObjectTypePtr(int objectTypePtr) { - this.objectTypePtr = objectTypePtr; - setObjectTypePtrIsSet(true); - } - - public void unsetObjectTypePtr() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __OBJECTTYPEPTR_ISSET_ID); - } - - /** Returns true if field objectTypePtr is set (has been assigned a value) and false otherwise */ - public boolean isSetObjectTypePtr() { - return EncodingUtils.testBit(__isset_bitfield, __OBJECTTYPEPTR_ISSET_ID); - } - - public void setObjectTypePtrIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __OBJECTTYPEPTR_ISSET_ID, value); - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case OBJECT_TYPE_PTR: - if (value == null) { - unsetObjectTypePtr(); - } else { - setObjectTypePtr((Integer)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case OBJECT_TYPE_PTR: - return Integer.valueOf(getObjectTypePtr()); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case OBJECT_TYPE_PTR: - return isSetObjectTypePtr(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TArrayTypeEntry) - return this.equals((TArrayTypeEntry)that); - return false; - } - - public boolean equals(TArrayTypeEntry that) { - if (that == null) - return false; - - boolean this_present_objectTypePtr = true; - boolean that_present_objectTypePtr = true; - if (this_present_objectTypePtr || that_present_objectTypePtr) { - if (!(this_present_objectTypePtr && that_present_objectTypePtr)) - return false; - if (this.objectTypePtr != that.objectTypePtr) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_objectTypePtr = true; - builder.append(present_objectTypePtr); - if (present_objectTypePtr) - builder.append(objectTypePtr); - - return builder.toHashCode(); - } - - public int compareTo(TArrayTypeEntry other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - TArrayTypeEntry typedOther = (TArrayTypeEntry)other; - - lastComparison = Boolean.valueOf(isSetObjectTypePtr()).compareTo(typedOther.isSetObjectTypePtr()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetObjectTypePtr()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.objectTypePtr, typedOther.objectTypePtr); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TArrayTypeEntry("); - boolean first = true; - - sb.append("objectTypePtr:"); - sb.append(this.objectTypePtr); - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetObjectTypePtr()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'objectTypePtr' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. - __isset_bitfield = 0; - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TArrayTypeEntryStandardSchemeFactory implements SchemeFactory { - public TArrayTypeEntryStandardScheme getScheme() { - return new TArrayTypeEntryStandardScheme(); - } - } - - private static class TArrayTypeEntryStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TArrayTypeEntry struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // OBJECT_TYPE_PTR - if (schemeField.type == org.apache.thrift.protocol.TType.I32) { - struct.objectTypePtr = iprot.readI32(); - struct.setObjectTypePtrIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TArrayTypeEntry struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - oprot.writeFieldBegin(OBJECT_TYPE_PTR_FIELD_DESC); - oprot.writeI32(struct.objectTypePtr); - oprot.writeFieldEnd(); - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TArrayTypeEntryTupleSchemeFactory implements SchemeFactory { - public TArrayTypeEntryTupleScheme getScheme() { - return new TArrayTypeEntryTupleScheme(); - } - } - - private static class TArrayTypeEntryTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TArrayTypeEntry struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - oprot.writeI32(struct.objectTypePtr); - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TArrayTypeEntry struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.objectTypePtr = iprot.readI32(); - struct.setObjectTypePtrIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TBinaryColumn.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TBinaryColumn.java deleted file mode 100644 index 6b1b054d1acad..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TBinaryColumn.java +++ /dev/null @@ -1,550 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TBinaryColumn implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TBinaryColumn"); - - private static final org.apache.thrift.protocol.TField VALUES_FIELD_DESC = new org.apache.thrift.protocol.TField("values", org.apache.thrift.protocol.TType.LIST, (short)1); - private static final org.apache.thrift.protocol.TField NULLS_FIELD_DESC = new org.apache.thrift.protocol.TField("nulls", org.apache.thrift.protocol.TType.STRING, (short)2); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TBinaryColumnStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TBinaryColumnTupleSchemeFactory()); - } - - private List values; // required - private ByteBuffer nulls; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - VALUES((short)1, "values"), - NULLS((short)2, "nulls"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // VALUES - return VALUES; - case 2: // NULLS - return NULLS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.VALUES, new org.apache.thrift.meta_data.FieldMetaData("values", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true)))); - tmpMap.put(_Fields.NULLS, new org.apache.thrift.meta_data.FieldMetaData("nulls", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TBinaryColumn.class, metaDataMap); - } - - public TBinaryColumn() { - } - - public TBinaryColumn( - List values, - ByteBuffer nulls) - { - this(); - this.values = values; - this.nulls = nulls; - } - - /** - * Performs a deep copy on other. - */ - public TBinaryColumn(TBinaryColumn other) { - if (other.isSetValues()) { - List __this__values = new ArrayList(); - for (ByteBuffer other_element : other.values) { - ByteBuffer temp_binary_element = org.apache.thrift.TBaseHelper.copyBinary(other_element); -; - __this__values.add(temp_binary_element); - } - this.values = __this__values; - } - if (other.isSetNulls()) { - this.nulls = org.apache.thrift.TBaseHelper.copyBinary(other.nulls); -; - } - } - - public TBinaryColumn deepCopy() { - return new TBinaryColumn(this); - } - - @Override - public void clear() { - this.values = null; - this.nulls = null; - } - - public int getValuesSize() { - return (this.values == null) ? 0 : this.values.size(); - } - - public java.util.Iterator getValuesIterator() { - return (this.values == null) ? null : this.values.iterator(); - } - - public void addToValues(ByteBuffer elem) { - if (this.values == null) { - this.values = new ArrayList(); - } - this.values.add(elem); - } - - public List getValues() { - return this.values; - } - - public void setValues(List values) { - this.values = values; - } - - public void unsetValues() { - this.values = null; - } - - /** Returns true if field values is set (has been assigned a value) and false otherwise */ - public boolean isSetValues() { - return this.values != null; - } - - public void setValuesIsSet(boolean value) { - if (!value) { - this.values = null; - } - } - - public byte[] getNulls() { - setNulls(org.apache.thrift.TBaseHelper.rightSize(nulls)); - return nulls == null ? null : nulls.array(); - } - - public ByteBuffer bufferForNulls() { - return nulls; - } - - public void setNulls(byte[] nulls) { - setNulls(nulls == null ? (ByteBuffer)null : ByteBuffer.wrap(nulls)); - } - - public void setNulls(ByteBuffer nulls) { - this.nulls = nulls; - } - - public void unsetNulls() { - this.nulls = null; - } - - /** Returns true if field nulls is set (has been assigned a value) and false otherwise */ - public boolean isSetNulls() { - return this.nulls != null; - } - - public void setNullsIsSet(boolean value) { - if (!value) { - this.nulls = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case VALUES: - if (value == null) { - unsetValues(); - } else { - setValues((List)value); - } - break; - - case NULLS: - if (value == null) { - unsetNulls(); - } else { - setNulls((ByteBuffer)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case VALUES: - return getValues(); - - case NULLS: - return getNulls(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case VALUES: - return isSetValues(); - case NULLS: - return isSetNulls(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TBinaryColumn) - return this.equals((TBinaryColumn)that); - return false; - } - - public boolean equals(TBinaryColumn that) { - if (that == null) - return false; - - boolean this_present_values = true && this.isSetValues(); - boolean that_present_values = true && that.isSetValues(); - if (this_present_values || that_present_values) { - if (!(this_present_values && that_present_values)) - return false; - if (!this.values.equals(that.values)) - return false; - } - - boolean this_present_nulls = true && this.isSetNulls(); - boolean that_present_nulls = true && that.isSetNulls(); - if (this_present_nulls || that_present_nulls) { - if (!(this_present_nulls && that_present_nulls)) - return false; - if (!this.nulls.equals(that.nulls)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_values = true && (isSetValues()); - builder.append(present_values); - if (present_values) - builder.append(values); - - boolean present_nulls = true && (isSetNulls()); - builder.append(present_nulls); - if (present_nulls) - builder.append(nulls); - - return builder.toHashCode(); - } - - public int compareTo(TBinaryColumn other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - TBinaryColumn typedOther = (TBinaryColumn)other; - - lastComparison = Boolean.valueOf(isSetValues()).compareTo(typedOther.isSetValues()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetValues()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.values, typedOther.values); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetNulls()).compareTo(typedOther.isSetNulls()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetNulls()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.nulls, typedOther.nulls); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TBinaryColumn("); - boolean first = true; - - sb.append("values:"); - if (this.values == null) { - sb.append("null"); - } else { - sb.append(this.values); - } - first = false; - if (!first) sb.append(", "); - sb.append("nulls:"); - if (this.nulls == null) { - sb.append("null"); - } else { - org.apache.thrift.TBaseHelper.toString(this.nulls, sb); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetValues()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'values' is unset! Struct:" + toString()); - } - - if (!isSetNulls()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'nulls' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TBinaryColumnStandardSchemeFactory implements SchemeFactory { - public TBinaryColumnStandardScheme getScheme() { - return new TBinaryColumnStandardScheme(); - } - } - - private static class TBinaryColumnStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TBinaryColumn struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // VALUES - if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { - { - org.apache.thrift.protocol.TList _list110 = iprot.readListBegin(); - struct.values = new ArrayList(_list110.size); - for (int _i111 = 0; _i111 < _list110.size; ++_i111) - { - ByteBuffer _elem112; // optional - _elem112 = iprot.readBinary(); - struct.values.add(_elem112); - } - iprot.readListEnd(); - } - struct.setValuesIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // NULLS - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.nulls = iprot.readBinary(); - struct.setNullsIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TBinaryColumn struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.values != null) { - oprot.writeFieldBegin(VALUES_FIELD_DESC); - { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.values.size())); - for (ByteBuffer _iter113 : struct.values) - { - oprot.writeBinary(_iter113); - } - oprot.writeListEnd(); - } - oprot.writeFieldEnd(); - } - if (struct.nulls != null) { - oprot.writeFieldBegin(NULLS_FIELD_DESC); - oprot.writeBinary(struct.nulls); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TBinaryColumnTupleSchemeFactory implements SchemeFactory { - public TBinaryColumnTupleScheme getScheme() { - return new TBinaryColumnTupleScheme(); - } - } - - private static class TBinaryColumnTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TBinaryColumn struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - { - oprot.writeI32(struct.values.size()); - for (ByteBuffer _iter114 : struct.values) - { - oprot.writeBinary(_iter114); - } - } - oprot.writeBinary(struct.nulls); - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TBinaryColumn struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - { - org.apache.thrift.protocol.TList _list115 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.values = new ArrayList(_list115.size); - for (int _i116 = 0; _i116 < _list115.size; ++_i116) - { - ByteBuffer _elem117; // optional - _elem117 = iprot.readBinary(); - struct.values.add(_elem117); - } - } - struct.setValuesIsSet(true); - struct.nulls = iprot.readBinary(); - struct.setNullsIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TBoolColumn.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TBoolColumn.java deleted file mode 100644 index efd571cfdfbbf..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TBoolColumn.java +++ /dev/null @@ -1,548 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TBoolColumn implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TBoolColumn"); - - private static final org.apache.thrift.protocol.TField VALUES_FIELD_DESC = new org.apache.thrift.protocol.TField("values", org.apache.thrift.protocol.TType.LIST, (short)1); - private static final org.apache.thrift.protocol.TField NULLS_FIELD_DESC = new org.apache.thrift.protocol.TField("nulls", org.apache.thrift.protocol.TType.STRING, (short)2); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TBoolColumnStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TBoolColumnTupleSchemeFactory()); - } - - private List values; // required - private ByteBuffer nulls; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - VALUES((short)1, "values"), - NULLS((short)2, "nulls"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // VALUES - return VALUES; - case 2: // NULLS - return NULLS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.VALUES, new org.apache.thrift.meta_data.FieldMetaData("values", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)))); - tmpMap.put(_Fields.NULLS, new org.apache.thrift.meta_data.FieldMetaData("nulls", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TBoolColumn.class, metaDataMap); - } - - public TBoolColumn() { - } - - public TBoolColumn( - List values, - ByteBuffer nulls) - { - this(); - this.values = values; - this.nulls = nulls; - } - - /** - * Performs a deep copy on other. - */ - public TBoolColumn(TBoolColumn other) { - if (other.isSetValues()) { - List __this__values = new ArrayList(); - for (Boolean other_element : other.values) { - __this__values.add(other_element); - } - this.values = __this__values; - } - if (other.isSetNulls()) { - this.nulls = org.apache.thrift.TBaseHelper.copyBinary(other.nulls); -; - } - } - - public TBoolColumn deepCopy() { - return new TBoolColumn(this); - } - - @Override - public void clear() { - this.values = null; - this.nulls = null; - } - - public int getValuesSize() { - return (this.values == null) ? 0 : this.values.size(); - } - - public java.util.Iterator getValuesIterator() { - return (this.values == null) ? null : this.values.iterator(); - } - - public void addToValues(boolean elem) { - if (this.values == null) { - this.values = new ArrayList(); - } - this.values.add(elem); - } - - public List getValues() { - return this.values; - } - - public void setValues(List values) { - this.values = values; - } - - public void unsetValues() { - this.values = null; - } - - /** Returns true if field values is set (has been assigned a value) and false otherwise */ - public boolean isSetValues() { - return this.values != null; - } - - public void setValuesIsSet(boolean value) { - if (!value) { - this.values = null; - } - } - - public byte[] getNulls() { - setNulls(org.apache.thrift.TBaseHelper.rightSize(nulls)); - return nulls == null ? null : nulls.array(); - } - - public ByteBuffer bufferForNulls() { - return nulls; - } - - public void setNulls(byte[] nulls) { - setNulls(nulls == null ? (ByteBuffer)null : ByteBuffer.wrap(nulls)); - } - - public void setNulls(ByteBuffer nulls) { - this.nulls = nulls; - } - - public void unsetNulls() { - this.nulls = null; - } - - /** Returns true if field nulls is set (has been assigned a value) and false otherwise */ - public boolean isSetNulls() { - return this.nulls != null; - } - - public void setNullsIsSet(boolean value) { - if (!value) { - this.nulls = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case VALUES: - if (value == null) { - unsetValues(); - } else { - setValues((List)value); - } - break; - - case NULLS: - if (value == null) { - unsetNulls(); - } else { - setNulls((ByteBuffer)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case VALUES: - return getValues(); - - case NULLS: - return getNulls(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case VALUES: - return isSetValues(); - case NULLS: - return isSetNulls(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TBoolColumn) - return this.equals((TBoolColumn)that); - return false; - } - - public boolean equals(TBoolColumn that) { - if (that == null) - return false; - - boolean this_present_values = true && this.isSetValues(); - boolean that_present_values = true && that.isSetValues(); - if (this_present_values || that_present_values) { - if (!(this_present_values && that_present_values)) - return false; - if (!this.values.equals(that.values)) - return false; - } - - boolean this_present_nulls = true && this.isSetNulls(); - boolean that_present_nulls = true && that.isSetNulls(); - if (this_present_nulls || that_present_nulls) { - if (!(this_present_nulls && that_present_nulls)) - return false; - if (!this.nulls.equals(that.nulls)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_values = true && (isSetValues()); - builder.append(present_values); - if (present_values) - builder.append(values); - - boolean present_nulls = true && (isSetNulls()); - builder.append(present_nulls); - if (present_nulls) - builder.append(nulls); - - return builder.toHashCode(); - } - - public int compareTo(TBoolColumn other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - TBoolColumn typedOther = (TBoolColumn)other; - - lastComparison = Boolean.valueOf(isSetValues()).compareTo(typedOther.isSetValues()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetValues()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.values, typedOther.values); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetNulls()).compareTo(typedOther.isSetNulls()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetNulls()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.nulls, typedOther.nulls); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TBoolColumn("); - boolean first = true; - - sb.append("values:"); - if (this.values == null) { - sb.append("null"); - } else { - sb.append(this.values); - } - first = false; - if (!first) sb.append(", "); - sb.append("nulls:"); - if (this.nulls == null) { - sb.append("null"); - } else { - org.apache.thrift.TBaseHelper.toString(this.nulls, sb); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetValues()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'values' is unset! Struct:" + toString()); - } - - if (!isSetNulls()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'nulls' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TBoolColumnStandardSchemeFactory implements SchemeFactory { - public TBoolColumnStandardScheme getScheme() { - return new TBoolColumnStandardScheme(); - } - } - - private static class TBoolColumnStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TBoolColumn struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // VALUES - if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { - { - org.apache.thrift.protocol.TList _list54 = iprot.readListBegin(); - struct.values = new ArrayList(_list54.size); - for (int _i55 = 0; _i55 < _list54.size; ++_i55) - { - boolean _elem56; // optional - _elem56 = iprot.readBool(); - struct.values.add(_elem56); - } - iprot.readListEnd(); - } - struct.setValuesIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // NULLS - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.nulls = iprot.readBinary(); - struct.setNullsIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TBoolColumn struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.values != null) { - oprot.writeFieldBegin(VALUES_FIELD_DESC); - { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.BOOL, struct.values.size())); - for (boolean _iter57 : struct.values) - { - oprot.writeBool(_iter57); - } - oprot.writeListEnd(); - } - oprot.writeFieldEnd(); - } - if (struct.nulls != null) { - oprot.writeFieldBegin(NULLS_FIELD_DESC); - oprot.writeBinary(struct.nulls); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TBoolColumnTupleSchemeFactory implements SchemeFactory { - public TBoolColumnTupleScheme getScheme() { - return new TBoolColumnTupleScheme(); - } - } - - private static class TBoolColumnTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TBoolColumn struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - { - oprot.writeI32(struct.values.size()); - for (boolean _iter58 : struct.values) - { - oprot.writeBool(_iter58); - } - } - oprot.writeBinary(struct.nulls); - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TBoolColumn struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - { - org.apache.thrift.protocol.TList _list59 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.BOOL, iprot.readI32()); - struct.values = new ArrayList(_list59.size); - for (int _i60 = 0; _i60 < _list59.size; ++_i60) - { - boolean _elem61; // optional - _elem61 = iprot.readBool(); - struct.values.add(_elem61); - } - } - struct.setValuesIsSet(true); - struct.nulls = iprot.readBinary(); - struct.setNullsIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TBoolValue.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TBoolValue.java deleted file mode 100644 index c7495ee79e4b5..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TBoolValue.java +++ /dev/null @@ -1,386 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TBoolValue implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TBoolValue"); - - private static final org.apache.thrift.protocol.TField VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("value", org.apache.thrift.protocol.TType.BOOL, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TBoolValueStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TBoolValueTupleSchemeFactory()); - } - - private boolean value; // optional - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - VALUE((short)1, "value"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // VALUE - return VALUE; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private static final int __VALUE_ISSET_ID = 0; - private byte __isset_bitfield = 0; - private _Fields optionals[] = {_Fields.VALUE}; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.VALUE, new org.apache.thrift.meta_data.FieldMetaData("value", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TBoolValue.class, metaDataMap); - } - - public TBoolValue() { - } - - /** - * Performs a deep copy on other. - */ - public TBoolValue(TBoolValue other) { - __isset_bitfield = other.__isset_bitfield; - this.value = other.value; - } - - public TBoolValue deepCopy() { - return new TBoolValue(this); - } - - @Override - public void clear() { - setValueIsSet(false); - this.value = false; - } - - public boolean isValue() { - return this.value; - } - - public void setValue(boolean value) { - this.value = value; - setValueIsSet(true); - } - - public void unsetValue() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __VALUE_ISSET_ID); - } - - /** Returns true if field value is set (has been assigned a value) and false otherwise */ - public boolean isSetValue() { - return EncodingUtils.testBit(__isset_bitfield, __VALUE_ISSET_ID); - } - - public void setValueIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __VALUE_ISSET_ID, value); - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case VALUE: - if (value == null) { - unsetValue(); - } else { - setValue((Boolean)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case VALUE: - return Boolean.valueOf(isValue()); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case VALUE: - return isSetValue(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TBoolValue) - return this.equals((TBoolValue)that); - return false; - } - - public boolean equals(TBoolValue that) { - if (that == null) - return false; - - boolean this_present_value = true && this.isSetValue(); - boolean that_present_value = true && that.isSetValue(); - if (this_present_value || that_present_value) { - if (!(this_present_value && that_present_value)) - return false; - if (this.value != that.value) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_value = true && (isSetValue()); - builder.append(present_value); - if (present_value) - builder.append(value); - - return builder.toHashCode(); - } - - public int compareTo(TBoolValue other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - TBoolValue typedOther = (TBoolValue)other; - - lastComparison = Boolean.valueOf(isSetValue()).compareTo(typedOther.isSetValue()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetValue()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.value, typedOther.value); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TBoolValue("); - boolean first = true; - - if (isSetValue()) { - sb.append("value:"); - sb.append(this.value); - first = false; - } - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. - __isset_bitfield = 0; - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TBoolValueStandardSchemeFactory implements SchemeFactory { - public TBoolValueStandardScheme getScheme() { - return new TBoolValueStandardScheme(); - } - } - - private static class TBoolValueStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TBoolValue struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // VALUE - if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { - struct.value = iprot.readBool(); - struct.setValueIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TBoolValue struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.isSetValue()) { - oprot.writeFieldBegin(VALUE_FIELD_DESC); - oprot.writeBool(struct.value); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TBoolValueTupleSchemeFactory implements SchemeFactory { - public TBoolValueTupleScheme getScheme() { - return new TBoolValueTupleScheme(); - } - } - - private static class TBoolValueTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TBoolValue struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetValue()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetValue()) { - oprot.writeBool(struct.value); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TBoolValue struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.value = iprot.readBool(); - struct.setValueIsSet(true); - } - } - } - -} - diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TByteColumn.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TByteColumn.java deleted file mode 100644 index 169bfdeab3eea..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TByteColumn.java +++ /dev/null @@ -1,548 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TByteColumn implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TByteColumn"); - - private static final org.apache.thrift.protocol.TField VALUES_FIELD_DESC = new org.apache.thrift.protocol.TField("values", org.apache.thrift.protocol.TType.LIST, (short)1); - private static final org.apache.thrift.protocol.TField NULLS_FIELD_DESC = new org.apache.thrift.protocol.TField("nulls", org.apache.thrift.protocol.TType.STRING, (short)2); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TByteColumnStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TByteColumnTupleSchemeFactory()); - } - - private List values; // required - private ByteBuffer nulls; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - VALUES((short)1, "values"), - NULLS((short)2, "nulls"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // VALUES - return VALUES; - case 2: // NULLS - return NULLS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.VALUES, new org.apache.thrift.meta_data.FieldMetaData("values", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BYTE)))); - tmpMap.put(_Fields.NULLS, new org.apache.thrift.meta_data.FieldMetaData("nulls", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TByteColumn.class, metaDataMap); - } - - public TByteColumn() { - } - - public TByteColumn( - List values, - ByteBuffer nulls) - { - this(); - this.values = values; - this.nulls = nulls; - } - - /** - * Performs a deep copy on other. - */ - public TByteColumn(TByteColumn other) { - if (other.isSetValues()) { - List __this__values = new ArrayList(); - for (Byte other_element : other.values) { - __this__values.add(other_element); - } - this.values = __this__values; - } - if (other.isSetNulls()) { - this.nulls = org.apache.thrift.TBaseHelper.copyBinary(other.nulls); -; - } - } - - public TByteColumn deepCopy() { - return new TByteColumn(this); - } - - @Override - public void clear() { - this.values = null; - this.nulls = null; - } - - public int getValuesSize() { - return (this.values == null) ? 0 : this.values.size(); - } - - public java.util.Iterator getValuesIterator() { - return (this.values == null) ? null : this.values.iterator(); - } - - public void addToValues(byte elem) { - if (this.values == null) { - this.values = new ArrayList(); - } - this.values.add(elem); - } - - public List getValues() { - return this.values; - } - - public void setValues(List values) { - this.values = values; - } - - public void unsetValues() { - this.values = null; - } - - /** Returns true if field values is set (has been assigned a value) and false otherwise */ - public boolean isSetValues() { - return this.values != null; - } - - public void setValuesIsSet(boolean value) { - if (!value) { - this.values = null; - } - } - - public byte[] getNulls() { - setNulls(org.apache.thrift.TBaseHelper.rightSize(nulls)); - return nulls == null ? null : nulls.array(); - } - - public ByteBuffer bufferForNulls() { - return nulls; - } - - public void setNulls(byte[] nulls) { - setNulls(nulls == null ? (ByteBuffer)null : ByteBuffer.wrap(nulls)); - } - - public void setNulls(ByteBuffer nulls) { - this.nulls = nulls; - } - - public void unsetNulls() { - this.nulls = null; - } - - /** Returns true if field nulls is set (has been assigned a value) and false otherwise */ - public boolean isSetNulls() { - return this.nulls != null; - } - - public void setNullsIsSet(boolean value) { - if (!value) { - this.nulls = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case VALUES: - if (value == null) { - unsetValues(); - } else { - setValues((List)value); - } - break; - - case NULLS: - if (value == null) { - unsetNulls(); - } else { - setNulls((ByteBuffer)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case VALUES: - return getValues(); - - case NULLS: - return getNulls(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case VALUES: - return isSetValues(); - case NULLS: - return isSetNulls(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TByteColumn) - return this.equals((TByteColumn)that); - return false; - } - - public boolean equals(TByteColumn that) { - if (that == null) - return false; - - boolean this_present_values = true && this.isSetValues(); - boolean that_present_values = true && that.isSetValues(); - if (this_present_values || that_present_values) { - if (!(this_present_values && that_present_values)) - return false; - if (!this.values.equals(that.values)) - return false; - } - - boolean this_present_nulls = true && this.isSetNulls(); - boolean that_present_nulls = true && that.isSetNulls(); - if (this_present_nulls || that_present_nulls) { - if (!(this_present_nulls && that_present_nulls)) - return false; - if (!this.nulls.equals(that.nulls)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_values = true && (isSetValues()); - builder.append(present_values); - if (present_values) - builder.append(values); - - boolean present_nulls = true && (isSetNulls()); - builder.append(present_nulls); - if (present_nulls) - builder.append(nulls); - - return builder.toHashCode(); - } - - public int compareTo(TByteColumn other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - TByteColumn typedOther = (TByteColumn)other; - - lastComparison = Boolean.valueOf(isSetValues()).compareTo(typedOther.isSetValues()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetValues()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.values, typedOther.values); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetNulls()).compareTo(typedOther.isSetNulls()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetNulls()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.nulls, typedOther.nulls); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TByteColumn("); - boolean first = true; - - sb.append("values:"); - if (this.values == null) { - sb.append("null"); - } else { - sb.append(this.values); - } - first = false; - if (!first) sb.append(", "); - sb.append("nulls:"); - if (this.nulls == null) { - sb.append("null"); - } else { - org.apache.thrift.TBaseHelper.toString(this.nulls, sb); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetValues()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'values' is unset! Struct:" + toString()); - } - - if (!isSetNulls()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'nulls' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TByteColumnStandardSchemeFactory implements SchemeFactory { - public TByteColumnStandardScheme getScheme() { - return new TByteColumnStandardScheme(); - } - } - - private static class TByteColumnStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TByteColumn struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // VALUES - if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { - { - org.apache.thrift.protocol.TList _list62 = iprot.readListBegin(); - struct.values = new ArrayList(_list62.size); - for (int _i63 = 0; _i63 < _list62.size; ++_i63) - { - byte _elem64; // optional - _elem64 = iprot.readByte(); - struct.values.add(_elem64); - } - iprot.readListEnd(); - } - struct.setValuesIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // NULLS - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.nulls = iprot.readBinary(); - struct.setNullsIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TByteColumn struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.values != null) { - oprot.writeFieldBegin(VALUES_FIELD_DESC); - { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.BYTE, struct.values.size())); - for (byte _iter65 : struct.values) - { - oprot.writeByte(_iter65); - } - oprot.writeListEnd(); - } - oprot.writeFieldEnd(); - } - if (struct.nulls != null) { - oprot.writeFieldBegin(NULLS_FIELD_DESC); - oprot.writeBinary(struct.nulls); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TByteColumnTupleSchemeFactory implements SchemeFactory { - public TByteColumnTupleScheme getScheme() { - return new TByteColumnTupleScheme(); - } - } - - private static class TByteColumnTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TByteColumn struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - { - oprot.writeI32(struct.values.size()); - for (byte _iter66 : struct.values) - { - oprot.writeByte(_iter66); - } - } - oprot.writeBinary(struct.nulls); - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TByteColumn struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - { - org.apache.thrift.protocol.TList _list67 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.BYTE, iprot.readI32()); - struct.values = new ArrayList(_list67.size); - for (int _i68 = 0; _i68 < _list67.size; ++_i68) - { - byte _elem69; // optional - _elem69 = iprot.readByte(); - struct.values.add(_elem69); - } - } - struct.setValuesIsSet(true); - struct.nulls = iprot.readBinary(); - struct.setNullsIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TByteValue.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TByteValue.java deleted file mode 100644 index 23d9693759968..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TByteValue.java +++ /dev/null @@ -1,386 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TByteValue implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TByteValue"); - - private static final org.apache.thrift.protocol.TField VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("value", org.apache.thrift.protocol.TType.BYTE, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TByteValueStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TByteValueTupleSchemeFactory()); - } - - private byte value; // optional - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - VALUE((short)1, "value"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // VALUE - return VALUE; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private static final int __VALUE_ISSET_ID = 0; - private byte __isset_bitfield = 0; - private _Fields optionals[] = {_Fields.VALUE}; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.VALUE, new org.apache.thrift.meta_data.FieldMetaData("value", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BYTE))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TByteValue.class, metaDataMap); - } - - public TByteValue() { - } - - /** - * Performs a deep copy on other. - */ - public TByteValue(TByteValue other) { - __isset_bitfield = other.__isset_bitfield; - this.value = other.value; - } - - public TByteValue deepCopy() { - return new TByteValue(this); - } - - @Override - public void clear() { - setValueIsSet(false); - this.value = 0; - } - - public byte getValue() { - return this.value; - } - - public void setValue(byte value) { - this.value = value; - setValueIsSet(true); - } - - public void unsetValue() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __VALUE_ISSET_ID); - } - - /** Returns true if field value is set (has been assigned a value) and false otherwise */ - public boolean isSetValue() { - return EncodingUtils.testBit(__isset_bitfield, __VALUE_ISSET_ID); - } - - public void setValueIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __VALUE_ISSET_ID, value); - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case VALUE: - if (value == null) { - unsetValue(); - } else { - setValue((Byte)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case VALUE: - return Byte.valueOf(getValue()); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case VALUE: - return isSetValue(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TByteValue) - return this.equals((TByteValue)that); - return false; - } - - public boolean equals(TByteValue that) { - if (that == null) - return false; - - boolean this_present_value = true && this.isSetValue(); - boolean that_present_value = true && that.isSetValue(); - if (this_present_value || that_present_value) { - if (!(this_present_value && that_present_value)) - return false; - if (this.value != that.value) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_value = true && (isSetValue()); - builder.append(present_value); - if (present_value) - builder.append(value); - - return builder.toHashCode(); - } - - public int compareTo(TByteValue other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - TByteValue typedOther = (TByteValue)other; - - lastComparison = Boolean.valueOf(isSetValue()).compareTo(typedOther.isSetValue()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetValue()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.value, typedOther.value); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TByteValue("); - boolean first = true; - - if (isSetValue()) { - sb.append("value:"); - sb.append(this.value); - first = false; - } - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. - __isset_bitfield = 0; - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TByteValueStandardSchemeFactory implements SchemeFactory { - public TByteValueStandardScheme getScheme() { - return new TByteValueStandardScheme(); - } - } - - private static class TByteValueStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TByteValue struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // VALUE - if (schemeField.type == org.apache.thrift.protocol.TType.BYTE) { - struct.value = iprot.readByte(); - struct.setValueIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TByteValue struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.isSetValue()) { - oprot.writeFieldBegin(VALUE_FIELD_DESC); - oprot.writeByte(struct.value); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TByteValueTupleSchemeFactory implements SchemeFactory { - public TByteValueTupleScheme getScheme() { - return new TByteValueTupleScheme(); - } - } - - private static class TByteValueTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TByteValue struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetValue()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetValue()) { - oprot.writeByte(struct.value); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TByteValue struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.value = iprot.readByte(); - struct.setValueIsSet(true); - } - } - } - -} - diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TCLIService.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TCLIService.java deleted file mode 100644 index 54851b8d51317..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TCLIService.java +++ /dev/null @@ -1,15414 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TCLIService { - - public interface Iface { - - public TOpenSessionResp OpenSession(TOpenSessionReq req) throws org.apache.thrift.TException; - - public TCloseSessionResp CloseSession(TCloseSessionReq req) throws org.apache.thrift.TException; - - public TGetInfoResp GetInfo(TGetInfoReq req) throws org.apache.thrift.TException; - - public TExecuteStatementResp ExecuteStatement(TExecuteStatementReq req) throws org.apache.thrift.TException; - - public TGetTypeInfoResp GetTypeInfo(TGetTypeInfoReq req) throws org.apache.thrift.TException; - - public TGetCatalogsResp GetCatalogs(TGetCatalogsReq req) throws org.apache.thrift.TException; - - public TGetSchemasResp GetSchemas(TGetSchemasReq req) throws org.apache.thrift.TException; - - public TGetTablesResp GetTables(TGetTablesReq req) throws org.apache.thrift.TException; - - public TGetTableTypesResp GetTableTypes(TGetTableTypesReq req) throws org.apache.thrift.TException; - - public TGetColumnsResp GetColumns(TGetColumnsReq req) throws org.apache.thrift.TException; - - public TGetFunctionsResp GetFunctions(TGetFunctionsReq req) throws org.apache.thrift.TException; - - public TGetOperationStatusResp GetOperationStatus(TGetOperationStatusReq req) throws org.apache.thrift.TException; - - public TCancelOperationResp CancelOperation(TCancelOperationReq req) throws org.apache.thrift.TException; - - public TCloseOperationResp CloseOperation(TCloseOperationReq req) throws org.apache.thrift.TException; - - public TGetResultSetMetadataResp GetResultSetMetadata(TGetResultSetMetadataReq req) throws org.apache.thrift.TException; - - public TFetchResultsResp FetchResults(TFetchResultsReq req) throws org.apache.thrift.TException; - - public TGetDelegationTokenResp GetDelegationToken(TGetDelegationTokenReq req) throws org.apache.thrift.TException; - - public TCancelDelegationTokenResp CancelDelegationToken(TCancelDelegationTokenReq req) throws org.apache.thrift.TException; - - public TRenewDelegationTokenResp RenewDelegationToken(TRenewDelegationTokenReq req) throws org.apache.thrift.TException; - - } - - public interface AsyncIface { - - public void OpenSession(TOpenSessionReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - - public void CloseSession(TCloseSessionReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - - public void GetInfo(TGetInfoReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - - public void ExecuteStatement(TExecuteStatementReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - - public void GetTypeInfo(TGetTypeInfoReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - - public void GetCatalogs(TGetCatalogsReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - - public void GetSchemas(TGetSchemasReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - - public void GetTables(TGetTablesReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - - public void GetTableTypes(TGetTableTypesReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - - public void GetColumns(TGetColumnsReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - - public void GetFunctions(TGetFunctionsReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - - public void GetOperationStatus(TGetOperationStatusReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - - public void CancelOperation(TCancelOperationReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - - public void CloseOperation(TCloseOperationReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - - public void GetResultSetMetadata(TGetResultSetMetadataReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - - public void FetchResults(TFetchResultsReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - - public void GetDelegationToken(TGetDelegationTokenReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - - public void CancelDelegationToken(TCancelDelegationTokenReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - - public void RenewDelegationToken(TRenewDelegationTokenReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - - } - - public static class Client extends org.apache.thrift.TServiceClient implements Iface { - public static class Factory implements org.apache.thrift.TServiceClientFactory { - public Factory() {} - public Client getClient(org.apache.thrift.protocol.TProtocol prot) { - return new Client(prot); - } - public Client getClient(org.apache.thrift.protocol.TProtocol iprot, org.apache.thrift.protocol.TProtocol oprot) { - return new Client(iprot, oprot); - } - } - - public Client(org.apache.thrift.protocol.TProtocol prot) - { - super(prot, prot); - } - - public Client(org.apache.thrift.protocol.TProtocol iprot, org.apache.thrift.protocol.TProtocol oprot) { - super(iprot, oprot); - } - - public TOpenSessionResp OpenSession(TOpenSessionReq req) throws org.apache.thrift.TException - { - send_OpenSession(req); - return recv_OpenSession(); - } - - public void send_OpenSession(TOpenSessionReq req) throws org.apache.thrift.TException - { - OpenSession_args args = new OpenSession_args(); - args.setReq(req); - sendBase("OpenSession", args); - } - - public TOpenSessionResp recv_OpenSession() throws org.apache.thrift.TException - { - OpenSession_result result = new OpenSession_result(); - receiveBase(result, "OpenSession"); - if (result.isSetSuccess()) { - return result.success; - } - throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "OpenSession failed: unknown result"); - } - - public TCloseSessionResp CloseSession(TCloseSessionReq req) throws org.apache.thrift.TException - { - send_CloseSession(req); - return recv_CloseSession(); - } - - public void send_CloseSession(TCloseSessionReq req) throws org.apache.thrift.TException - { - CloseSession_args args = new CloseSession_args(); - args.setReq(req); - sendBase("CloseSession", args); - } - - public TCloseSessionResp recv_CloseSession() throws org.apache.thrift.TException - { - CloseSession_result result = new CloseSession_result(); - receiveBase(result, "CloseSession"); - if (result.isSetSuccess()) { - return result.success; - } - throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "CloseSession failed: unknown result"); - } - - public TGetInfoResp GetInfo(TGetInfoReq req) throws org.apache.thrift.TException - { - send_GetInfo(req); - return recv_GetInfo(); - } - - public void send_GetInfo(TGetInfoReq req) throws org.apache.thrift.TException - { - GetInfo_args args = new GetInfo_args(); - args.setReq(req); - sendBase("GetInfo", args); - } - - public TGetInfoResp recv_GetInfo() throws org.apache.thrift.TException - { - GetInfo_result result = new GetInfo_result(); - receiveBase(result, "GetInfo"); - if (result.isSetSuccess()) { - return result.success; - } - throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "GetInfo failed: unknown result"); - } - - public TExecuteStatementResp ExecuteStatement(TExecuteStatementReq req) throws org.apache.thrift.TException - { - send_ExecuteStatement(req); - return recv_ExecuteStatement(); - } - - public void send_ExecuteStatement(TExecuteStatementReq req) throws org.apache.thrift.TException - { - ExecuteStatement_args args = new ExecuteStatement_args(); - args.setReq(req); - sendBase("ExecuteStatement", args); - } - - public TExecuteStatementResp recv_ExecuteStatement() throws org.apache.thrift.TException - { - ExecuteStatement_result result = new ExecuteStatement_result(); - receiveBase(result, "ExecuteStatement"); - if (result.isSetSuccess()) { - return result.success; - } - throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "ExecuteStatement failed: unknown result"); - } - - public TGetTypeInfoResp GetTypeInfo(TGetTypeInfoReq req) throws org.apache.thrift.TException - { - send_GetTypeInfo(req); - return recv_GetTypeInfo(); - } - - public void send_GetTypeInfo(TGetTypeInfoReq req) throws org.apache.thrift.TException - { - GetTypeInfo_args args = new GetTypeInfo_args(); - args.setReq(req); - sendBase("GetTypeInfo", args); - } - - public TGetTypeInfoResp recv_GetTypeInfo() throws org.apache.thrift.TException - { - GetTypeInfo_result result = new GetTypeInfo_result(); - receiveBase(result, "GetTypeInfo"); - if (result.isSetSuccess()) { - return result.success; - } - throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "GetTypeInfo failed: unknown result"); - } - - public TGetCatalogsResp GetCatalogs(TGetCatalogsReq req) throws org.apache.thrift.TException - { - send_GetCatalogs(req); - return recv_GetCatalogs(); - } - - public void send_GetCatalogs(TGetCatalogsReq req) throws org.apache.thrift.TException - { - GetCatalogs_args args = new GetCatalogs_args(); - args.setReq(req); - sendBase("GetCatalogs", args); - } - - public TGetCatalogsResp recv_GetCatalogs() throws org.apache.thrift.TException - { - GetCatalogs_result result = new GetCatalogs_result(); - receiveBase(result, "GetCatalogs"); - if (result.isSetSuccess()) { - return result.success; - } - throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "GetCatalogs failed: unknown result"); - } - - public TGetSchemasResp GetSchemas(TGetSchemasReq req) throws org.apache.thrift.TException - { - send_GetSchemas(req); - return recv_GetSchemas(); - } - - public void send_GetSchemas(TGetSchemasReq req) throws org.apache.thrift.TException - { - GetSchemas_args args = new GetSchemas_args(); - args.setReq(req); - sendBase("GetSchemas", args); - } - - public TGetSchemasResp recv_GetSchemas() throws org.apache.thrift.TException - { - GetSchemas_result result = new GetSchemas_result(); - receiveBase(result, "GetSchemas"); - if (result.isSetSuccess()) { - return result.success; - } - throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "GetSchemas failed: unknown result"); - } - - public TGetTablesResp GetTables(TGetTablesReq req) throws org.apache.thrift.TException - { - send_GetTables(req); - return recv_GetTables(); - } - - public void send_GetTables(TGetTablesReq req) throws org.apache.thrift.TException - { - GetTables_args args = new GetTables_args(); - args.setReq(req); - sendBase("GetTables", args); - } - - public TGetTablesResp recv_GetTables() throws org.apache.thrift.TException - { - GetTables_result result = new GetTables_result(); - receiveBase(result, "GetTables"); - if (result.isSetSuccess()) { - return result.success; - } - throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "GetTables failed: unknown result"); - } - - public TGetTableTypesResp GetTableTypes(TGetTableTypesReq req) throws org.apache.thrift.TException - { - send_GetTableTypes(req); - return recv_GetTableTypes(); - } - - public void send_GetTableTypes(TGetTableTypesReq req) throws org.apache.thrift.TException - { - GetTableTypes_args args = new GetTableTypes_args(); - args.setReq(req); - sendBase("GetTableTypes", args); - } - - public TGetTableTypesResp recv_GetTableTypes() throws org.apache.thrift.TException - { - GetTableTypes_result result = new GetTableTypes_result(); - receiveBase(result, "GetTableTypes"); - if (result.isSetSuccess()) { - return result.success; - } - throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "GetTableTypes failed: unknown result"); - } - - public TGetColumnsResp GetColumns(TGetColumnsReq req) throws org.apache.thrift.TException - { - send_GetColumns(req); - return recv_GetColumns(); - } - - public void send_GetColumns(TGetColumnsReq req) throws org.apache.thrift.TException - { - GetColumns_args args = new GetColumns_args(); - args.setReq(req); - sendBase("GetColumns", args); - } - - public TGetColumnsResp recv_GetColumns() throws org.apache.thrift.TException - { - GetColumns_result result = new GetColumns_result(); - receiveBase(result, "GetColumns"); - if (result.isSetSuccess()) { - return result.success; - } - throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "GetColumns failed: unknown result"); - } - - public TGetFunctionsResp GetFunctions(TGetFunctionsReq req) throws org.apache.thrift.TException - { - send_GetFunctions(req); - return recv_GetFunctions(); - } - - public void send_GetFunctions(TGetFunctionsReq req) throws org.apache.thrift.TException - { - GetFunctions_args args = new GetFunctions_args(); - args.setReq(req); - sendBase("GetFunctions", args); - } - - public TGetFunctionsResp recv_GetFunctions() throws org.apache.thrift.TException - { - GetFunctions_result result = new GetFunctions_result(); - receiveBase(result, "GetFunctions"); - if (result.isSetSuccess()) { - return result.success; - } - throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "GetFunctions failed: unknown result"); - } - - public TGetOperationStatusResp GetOperationStatus(TGetOperationStatusReq req) throws org.apache.thrift.TException - { - send_GetOperationStatus(req); - return recv_GetOperationStatus(); - } - - public void send_GetOperationStatus(TGetOperationStatusReq req) throws org.apache.thrift.TException - { - GetOperationStatus_args args = new GetOperationStatus_args(); - args.setReq(req); - sendBase("GetOperationStatus", args); - } - - public TGetOperationStatusResp recv_GetOperationStatus() throws org.apache.thrift.TException - { - GetOperationStatus_result result = new GetOperationStatus_result(); - receiveBase(result, "GetOperationStatus"); - if (result.isSetSuccess()) { - return result.success; - } - throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "GetOperationStatus failed: unknown result"); - } - - public TCancelOperationResp CancelOperation(TCancelOperationReq req) throws org.apache.thrift.TException - { - send_CancelOperation(req); - return recv_CancelOperation(); - } - - public void send_CancelOperation(TCancelOperationReq req) throws org.apache.thrift.TException - { - CancelOperation_args args = new CancelOperation_args(); - args.setReq(req); - sendBase("CancelOperation", args); - } - - public TCancelOperationResp recv_CancelOperation() throws org.apache.thrift.TException - { - CancelOperation_result result = new CancelOperation_result(); - receiveBase(result, "CancelOperation"); - if (result.isSetSuccess()) { - return result.success; - } - throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "CancelOperation failed: unknown result"); - } - - public TCloseOperationResp CloseOperation(TCloseOperationReq req) throws org.apache.thrift.TException - { - send_CloseOperation(req); - return recv_CloseOperation(); - } - - public void send_CloseOperation(TCloseOperationReq req) throws org.apache.thrift.TException - { - CloseOperation_args args = new CloseOperation_args(); - args.setReq(req); - sendBase("CloseOperation", args); - } - - public TCloseOperationResp recv_CloseOperation() throws org.apache.thrift.TException - { - CloseOperation_result result = new CloseOperation_result(); - receiveBase(result, "CloseOperation"); - if (result.isSetSuccess()) { - return result.success; - } - throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "CloseOperation failed: unknown result"); - } - - public TGetResultSetMetadataResp GetResultSetMetadata(TGetResultSetMetadataReq req) throws org.apache.thrift.TException - { - send_GetResultSetMetadata(req); - return recv_GetResultSetMetadata(); - } - - public void send_GetResultSetMetadata(TGetResultSetMetadataReq req) throws org.apache.thrift.TException - { - GetResultSetMetadata_args args = new GetResultSetMetadata_args(); - args.setReq(req); - sendBase("GetResultSetMetadata", args); - } - - public TGetResultSetMetadataResp recv_GetResultSetMetadata() throws org.apache.thrift.TException - { - GetResultSetMetadata_result result = new GetResultSetMetadata_result(); - receiveBase(result, "GetResultSetMetadata"); - if (result.isSetSuccess()) { - return result.success; - } - throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "GetResultSetMetadata failed: unknown result"); - } - - public TFetchResultsResp FetchResults(TFetchResultsReq req) throws org.apache.thrift.TException - { - send_FetchResults(req); - return recv_FetchResults(); - } - - public void send_FetchResults(TFetchResultsReq req) throws org.apache.thrift.TException - { - FetchResults_args args = new FetchResults_args(); - args.setReq(req); - sendBase("FetchResults", args); - } - - public TFetchResultsResp recv_FetchResults() throws org.apache.thrift.TException - { - FetchResults_result result = new FetchResults_result(); - receiveBase(result, "FetchResults"); - if (result.isSetSuccess()) { - return result.success; - } - throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "FetchResults failed: unknown result"); - } - - public TGetDelegationTokenResp GetDelegationToken(TGetDelegationTokenReq req) throws org.apache.thrift.TException - { - send_GetDelegationToken(req); - return recv_GetDelegationToken(); - } - - public void send_GetDelegationToken(TGetDelegationTokenReq req) throws org.apache.thrift.TException - { - GetDelegationToken_args args = new GetDelegationToken_args(); - args.setReq(req); - sendBase("GetDelegationToken", args); - } - - public TGetDelegationTokenResp recv_GetDelegationToken() throws org.apache.thrift.TException - { - GetDelegationToken_result result = new GetDelegationToken_result(); - receiveBase(result, "GetDelegationToken"); - if (result.isSetSuccess()) { - return result.success; - } - throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "GetDelegationToken failed: unknown result"); - } - - public TCancelDelegationTokenResp CancelDelegationToken(TCancelDelegationTokenReq req) throws org.apache.thrift.TException - { - send_CancelDelegationToken(req); - return recv_CancelDelegationToken(); - } - - public void send_CancelDelegationToken(TCancelDelegationTokenReq req) throws org.apache.thrift.TException - { - CancelDelegationToken_args args = new CancelDelegationToken_args(); - args.setReq(req); - sendBase("CancelDelegationToken", args); - } - - public TCancelDelegationTokenResp recv_CancelDelegationToken() throws org.apache.thrift.TException - { - CancelDelegationToken_result result = new CancelDelegationToken_result(); - receiveBase(result, "CancelDelegationToken"); - if (result.isSetSuccess()) { - return result.success; - } - throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "CancelDelegationToken failed: unknown result"); - } - - public TRenewDelegationTokenResp RenewDelegationToken(TRenewDelegationTokenReq req) throws org.apache.thrift.TException - { - send_RenewDelegationToken(req); - return recv_RenewDelegationToken(); - } - - public void send_RenewDelegationToken(TRenewDelegationTokenReq req) throws org.apache.thrift.TException - { - RenewDelegationToken_args args = new RenewDelegationToken_args(); - args.setReq(req); - sendBase("RenewDelegationToken", args); - } - - public TRenewDelegationTokenResp recv_RenewDelegationToken() throws org.apache.thrift.TException - { - RenewDelegationToken_result result = new RenewDelegationToken_result(); - receiveBase(result, "RenewDelegationToken"); - if (result.isSetSuccess()) { - return result.success; - } - throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "RenewDelegationToken failed: unknown result"); - } - - } - public static class AsyncClient extends org.apache.thrift.async.TAsyncClient implements AsyncIface { - public static class Factory implements org.apache.thrift.async.TAsyncClientFactory { - private org.apache.thrift.async.TAsyncClientManager clientManager; - private org.apache.thrift.protocol.TProtocolFactory protocolFactory; - public Factory(org.apache.thrift.async.TAsyncClientManager clientManager, org.apache.thrift.protocol.TProtocolFactory protocolFactory) { - this.clientManager = clientManager; - this.protocolFactory = protocolFactory; - } - public AsyncClient getAsyncClient(org.apache.thrift.transport.TNonblockingTransport transport) { - return new AsyncClient(protocolFactory, clientManager, transport); - } - } - - public AsyncClient(org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.async.TAsyncClientManager clientManager, org.apache.thrift.transport.TNonblockingTransport transport) { - super(protocolFactory, clientManager, transport); - } - - public void OpenSession(TOpenSessionReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { - checkReady(); - OpenSession_call method_call = new OpenSession_call(req, resultHandler, this, ___protocolFactory, ___transport); - this.___currentMethod = method_call; - ___manager.call(method_call); - } - - public static class OpenSession_call extends org.apache.thrift.async.TAsyncMethodCall { - private TOpenSessionReq req; - public OpenSession_call(TOpenSessionReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { - super(client, protocolFactory, transport, resultHandler, false); - this.req = req; - } - - public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { - prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("OpenSession", org.apache.thrift.protocol.TMessageType.CALL, 0)); - OpenSession_args args = new OpenSession_args(); - args.setReq(req); - args.write(prot); - prot.writeMessageEnd(); - } - - public TOpenSessionResp getResult() throws org.apache.thrift.TException { - if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { - throw new IllegalStateException("Method call not finished!"); - } - org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); - org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); - return (new Client(prot)).recv_OpenSession(); - } - } - - public void CloseSession(TCloseSessionReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { - checkReady(); - CloseSession_call method_call = new CloseSession_call(req, resultHandler, this, ___protocolFactory, ___transport); - this.___currentMethod = method_call; - ___manager.call(method_call); - } - - public static class CloseSession_call extends org.apache.thrift.async.TAsyncMethodCall { - private TCloseSessionReq req; - public CloseSession_call(TCloseSessionReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { - super(client, protocolFactory, transport, resultHandler, false); - this.req = req; - } - - public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { - prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("CloseSession", org.apache.thrift.protocol.TMessageType.CALL, 0)); - CloseSession_args args = new CloseSession_args(); - args.setReq(req); - args.write(prot); - prot.writeMessageEnd(); - } - - public TCloseSessionResp getResult() throws org.apache.thrift.TException { - if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { - throw new IllegalStateException("Method call not finished!"); - } - org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); - org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); - return (new Client(prot)).recv_CloseSession(); - } - } - - public void GetInfo(TGetInfoReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { - checkReady(); - GetInfo_call method_call = new GetInfo_call(req, resultHandler, this, ___protocolFactory, ___transport); - this.___currentMethod = method_call; - ___manager.call(method_call); - } - - public static class GetInfo_call extends org.apache.thrift.async.TAsyncMethodCall { - private TGetInfoReq req; - public GetInfo_call(TGetInfoReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { - super(client, protocolFactory, transport, resultHandler, false); - this.req = req; - } - - public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { - prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("GetInfo", org.apache.thrift.protocol.TMessageType.CALL, 0)); - GetInfo_args args = new GetInfo_args(); - args.setReq(req); - args.write(prot); - prot.writeMessageEnd(); - } - - public TGetInfoResp getResult() throws org.apache.thrift.TException { - if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { - throw new IllegalStateException("Method call not finished!"); - } - org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); - org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); - return (new Client(prot)).recv_GetInfo(); - } - } - - public void ExecuteStatement(TExecuteStatementReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { - checkReady(); - ExecuteStatement_call method_call = new ExecuteStatement_call(req, resultHandler, this, ___protocolFactory, ___transport); - this.___currentMethod = method_call; - ___manager.call(method_call); - } - - public static class ExecuteStatement_call extends org.apache.thrift.async.TAsyncMethodCall { - private TExecuteStatementReq req; - public ExecuteStatement_call(TExecuteStatementReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { - super(client, protocolFactory, transport, resultHandler, false); - this.req = req; - } - - public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { - prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("ExecuteStatement", org.apache.thrift.protocol.TMessageType.CALL, 0)); - ExecuteStatement_args args = new ExecuteStatement_args(); - args.setReq(req); - args.write(prot); - prot.writeMessageEnd(); - } - - public TExecuteStatementResp getResult() throws org.apache.thrift.TException { - if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { - throw new IllegalStateException("Method call not finished!"); - } - org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); - org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); - return (new Client(prot)).recv_ExecuteStatement(); - } - } - - public void GetTypeInfo(TGetTypeInfoReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { - checkReady(); - GetTypeInfo_call method_call = new GetTypeInfo_call(req, resultHandler, this, ___protocolFactory, ___transport); - this.___currentMethod = method_call; - ___manager.call(method_call); - } - - public static class GetTypeInfo_call extends org.apache.thrift.async.TAsyncMethodCall { - private TGetTypeInfoReq req; - public GetTypeInfo_call(TGetTypeInfoReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { - super(client, protocolFactory, transport, resultHandler, false); - this.req = req; - } - - public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { - prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("GetTypeInfo", org.apache.thrift.protocol.TMessageType.CALL, 0)); - GetTypeInfo_args args = new GetTypeInfo_args(); - args.setReq(req); - args.write(prot); - prot.writeMessageEnd(); - } - - public TGetTypeInfoResp getResult() throws org.apache.thrift.TException { - if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { - throw new IllegalStateException("Method call not finished!"); - } - org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); - org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); - return (new Client(prot)).recv_GetTypeInfo(); - } - } - - public void GetCatalogs(TGetCatalogsReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { - checkReady(); - GetCatalogs_call method_call = new GetCatalogs_call(req, resultHandler, this, ___protocolFactory, ___transport); - this.___currentMethod = method_call; - ___manager.call(method_call); - } - - public static class GetCatalogs_call extends org.apache.thrift.async.TAsyncMethodCall { - private TGetCatalogsReq req; - public GetCatalogs_call(TGetCatalogsReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { - super(client, protocolFactory, transport, resultHandler, false); - this.req = req; - } - - public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { - prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("GetCatalogs", org.apache.thrift.protocol.TMessageType.CALL, 0)); - GetCatalogs_args args = new GetCatalogs_args(); - args.setReq(req); - args.write(prot); - prot.writeMessageEnd(); - } - - public TGetCatalogsResp getResult() throws org.apache.thrift.TException { - if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { - throw new IllegalStateException("Method call not finished!"); - } - org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); - org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); - return (new Client(prot)).recv_GetCatalogs(); - } - } - - public void GetSchemas(TGetSchemasReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { - checkReady(); - GetSchemas_call method_call = new GetSchemas_call(req, resultHandler, this, ___protocolFactory, ___transport); - this.___currentMethod = method_call; - ___manager.call(method_call); - } - - public static class GetSchemas_call extends org.apache.thrift.async.TAsyncMethodCall { - private TGetSchemasReq req; - public GetSchemas_call(TGetSchemasReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { - super(client, protocolFactory, transport, resultHandler, false); - this.req = req; - } - - public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { - prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("GetSchemas", org.apache.thrift.protocol.TMessageType.CALL, 0)); - GetSchemas_args args = new GetSchemas_args(); - args.setReq(req); - args.write(prot); - prot.writeMessageEnd(); - } - - public TGetSchemasResp getResult() throws org.apache.thrift.TException { - if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { - throw new IllegalStateException("Method call not finished!"); - } - org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); - org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); - return (new Client(prot)).recv_GetSchemas(); - } - } - - public void GetTables(TGetTablesReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { - checkReady(); - GetTables_call method_call = new GetTables_call(req, resultHandler, this, ___protocolFactory, ___transport); - this.___currentMethod = method_call; - ___manager.call(method_call); - } - - public static class GetTables_call extends org.apache.thrift.async.TAsyncMethodCall { - private TGetTablesReq req; - public GetTables_call(TGetTablesReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { - super(client, protocolFactory, transport, resultHandler, false); - this.req = req; - } - - public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { - prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("GetTables", org.apache.thrift.protocol.TMessageType.CALL, 0)); - GetTables_args args = new GetTables_args(); - args.setReq(req); - args.write(prot); - prot.writeMessageEnd(); - } - - public TGetTablesResp getResult() throws org.apache.thrift.TException { - if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { - throw new IllegalStateException("Method call not finished!"); - } - org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); - org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); - return (new Client(prot)).recv_GetTables(); - } - } - - public void GetTableTypes(TGetTableTypesReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { - checkReady(); - GetTableTypes_call method_call = new GetTableTypes_call(req, resultHandler, this, ___protocolFactory, ___transport); - this.___currentMethod = method_call; - ___manager.call(method_call); - } - - public static class GetTableTypes_call extends org.apache.thrift.async.TAsyncMethodCall { - private TGetTableTypesReq req; - public GetTableTypes_call(TGetTableTypesReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { - super(client, protocolFactory, transport, resultHandler, false); - this.req = req; - } - - public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { - prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("GetTableTypes", org.apache.thrift.protocol.TMessageType.CALL, 0)); - GetTableTypes_args args = new GetTableTypes_args(); - args.setReq(req); - args.write(prot); - prot.writeMessageEnd(); - } - - public TGetTableTypesResp getResult() throws org.apache.thrift.TException { - if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { - throw new IllegalStateException("Method call not finished!"); - } - org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); - org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); - return (new Client(prot)).recv_GetTableTypes(); - } - } - - public void GetColumns(TGetColumnsReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { - checkReady(); - GetColumns_call method_call = new GetColumns_call(req, resultHandler, this, ___protocolFactory, ___transport); - this.___currentMethod = method_call; - ___manager.call(method_call); - } - - public static class GetColumns_call extends org.apache.thrift.async.TAsyncMethodCall { - private TGetColumnsReq req; - public GetColumns_call(TGetColumnsReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { - super(client, protocolFactory, transport, resultHandler, false); - this.req = req; - } - - public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { - prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("GetColumns", org.apache.thrift.protocol.TMessageType.CALL, 0)); - GetColumns_args args = new GetColumns_args(); - args.setReq(req); - args.write(prot); - prot.writeMessageEnd(); - } - - public TGetColumnsResp getResult() throws org.apache.thrift.TException { - if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { - throw new IllegalStateException("Method call not finished!"); - } - org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); - org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); - return (new Client(prot)).recv_GetColumns(); - } - } - - public void GetFunctions(TGetFunctionsReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { - checkReady(); - GetFunctions_call method_call = new GetFunctions_call(req, resultHandler, this, ___protocolFactory, ___transport); - this.___currentMethod = method_call; - ___manager.call(method_call); - } - - public static class GetFunctions_call extends org.apache.thrift.async.TAsyncMethodCall { - private TGetFunctionsReq req; - public GetFunctions_call(TGetFunctionsReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { - super(client, protocolFactory, transport, resultHandler, false); - this.req = req; - } - - public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { - prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("GetFunctions", org.apache.thrift.protocol.TMessageType.CALL, 0)); - GetFunctions_args args = new GetFunctions_args(); - args.setReq(req); - args.write(prot); - prot.writeMessageEnd(); - } - - public TGetFunctionsResp getResult() throws org.apache.thrift.TException { - if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { - throw new IllegalStateException("Method call not finished!"); - } - org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); - org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); - return (new Client(prot)).recv_GetFunctions(); - } - } - - public void GetOperationStatus(TGetOperationStatusReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { - checkReady(); - GetOperationStatus_call method_call = new GetOperationStatus_call(req, resultHandler, this, ___protocolFactory, ___transport); - this.___currentMethod = method_call; - ___manager.call(method_call); - } - - public static class GetOperationStatus_call extends org.apache.thrift.async.TAsyncMethodCall { - private TGetOperationStatusReq req; - public GetOperationStatus_call(TGetOperationStatusReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { - super(client, protocolFactory, transport, resultHandler, false); - this.req = req; - } - - public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { - prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("GetOperationStatus", org.apache.thrift.protocol.TMessageType.CALL, 0)); - GetOperationStatus_args args = new GetOperationStatus_args(); - args.setReq(req); - args.write(prot); - prot.writeMessageEnd(); - } - - public TGetOperationStatusResp getResult() throws org.apache.thrift.TException { - if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { - throw new IllegalStateException("Method call not finished!"); - } - org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); - org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); - return (new Client(prot)).recv_GetOperationStatus(); - } - } - - public void CancelOperation(TCancelOperationReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { - checkReady(); - CancelOperation_call method_call = new CancelOperation_call(req, resultHandler, this, ___protocolFactory, ___transport); - this.___currentMethod = method_call; - ___manager.call(method_call); - } - - public static class CancelOperation_call extends org.apache.thrift.async.TAsyncMethodCall { - private TCancelOperationReq req; - public CancelOperation_call(TCancelOperationReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { - super(client, protocolFactory, transport, resultHandler, false); - this.req = req; - } - - public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { - prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("CancelOperation", org.apache.thrift.protocol.TMessageType.CALL, 0)); - CancelOperation_args args = new CancelOperation_args(); - args.setReq(req); - args.write(prot); - prot.writeMessageEnd(); - } - - public TCancelOperationResp getResult() throws org.apache.thrift.TException { - if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { - throw new IllegalStateException("Method call not finished!"); - } - org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); - org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); - return (new Client(prot)).recv_CancelOperation(); - } - } - - public void CloseOperation(TCloseOperationReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { - checkReady(); - CloseOperation_call method_call = new CloseOperation_call(req, resultHandler, this, ___protocolFactory, ___transport); - this.___currentMethod = method_call; - ___manager.call(method_call); - } - - public static class CloseOperation_call extends org.apache.thrift.async.TAsyncMethodCall { - private TCloseOperationReq req; - public CloseOperation_call(TCloseOperationReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { - super(client, protocolFactory, transport, resultHandler, false); - this.req = req; - } - - public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { - prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("CloseOperation", org.apache.thrift.protocol.TMessageType.CALL, 0)); - CloseOperation_args args = new CloseOperation_args(); - args.setReq(req); - args.write(prot); - prot.writeMessageEnd(); - } - - public TCloseOperationResp getResult() throws org.apache.thrift.TException { - if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { - throw new IllegalStateException("Method call not finished!"); - } - org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); - org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); - return (new Client(prot)).recv_CloseOperation(); - } - } - - public void GetResultSetMetadata(TGetResultSetMetadataReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { - checkReady(); - GetResultSetMetadata_call method_call = new GetResultSetMetadata_call(req, resultHandler, this, ___protocolFactory, ___transport); - this.___currentMethod = method_call; - ___manager.call(method_call); - } - - public static class GetResultSetMetadata_call extends org.apache.thrift.async.TAsyncMethodCall { - private TGetResultSetMetadataReq req; - public GetResultSetMetadata_call(TGetResultSetMetadataReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { - super(client, protocolFactory, transport, resultHandler, false); - this.req = req; - } - - public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { - prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("GetResultSetMetadata", org.apache.thrift.protocol.TMessageType.CALL, 0)); - GetResultSetMetadata_args args = new GetResultSetMetadata_args(); - args.setReq(req); - args.write(prot); - prot.writeMessageEnd(); - } - - public TGetResultSetMetadataResp getResult() throws org.apache.thrift.TException { - if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { - throw new IllegalStateException("Method call not finished!"); - } - org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); - org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); - return (new Client(prot)).recv_GetResultSetMetadata(); - } - } - - public void FetchResults(TFetchResultsReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { - checkReady(); - FetchResults_call method_call = new FetchResults_call(req, resultHandler, this, ___protocolFactory, ___transport); - this.___currentMethod = method_call; - ___manager.call(method_call); - } - - public static class FetchResults_call extends org.apache.thrift.async.TAsyncMethodCall { - private TFetchResultsReq req; - public FetchResults_call(TFetchResultsReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { - super(client, protocolFactory, transport, resultHandler, false); - this.req = req; - } - - public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { - prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("FetchResults", org.apache.thrift.protocol.TMessageType.CALL, 0)); - FetchResults_args args = new FetchResults_args(); - args.setReq(req); - args.write(prot); - prot.writeMessageEnd(); - } - - public TFetchResultsResp getResult() throws org.apache.thrift.TException { - if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { - throw new IllegalStateException("Method call not finished!"); - } - org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); - org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); - return (new Client(prot)).recv_FetchResults(); - } - } - - public void GetDelegationToken(TGetDelegationTokenReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { - checkReady(); - GetDelegationToken_call method_call = new GetDelegationToken_call(req, resultHandler, this, ___protocolFactory, ___transport); - this.___currentMethod = method_call; - ___manager.call(method_call); - } - - public static class GetDelegationToken_call extends org.apache.thrift.async.TAsyncMethodCall { - private TGetDelegationTokenReq req; - public GetDelegationToken_call(TGetDelegationTokenReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { - super(client, protocolFactory, transport, resultHandler, false); - this.req = req; - } - - public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { - prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("GetDelegationToken", org.apache.thrift.protocol.TMessageType.CALL, 0)); - GetDelegationToken_args args = new GetDelegationToken_args(); - args.setReq(req); - args.write(prot); - prot.writeMessageEnd(); - } - - public TGetDelegationTokenResp getResult() throws org.apache.thrift.TException { - if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { - throw new IllegalStateException("Method call not finished!"); - } - org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); - org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); - return (new Client(prot)).recv_GetDelegationToken(); - } - } - - public void CancelDelegationToken(TCancelDelegationTokenReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { - checkReady(); - CancelDelegationToken_call method_call = new CancelDelegationToken_call(req, resultHandler, this, ___protocolFactory, ___transport); - this.___currentMethod = method_call; - ___manager.call(method_call); - } - - public static class CancelDelegationToken_call extends org.apache.thrift.async.TAsyncMethodCall { - private TCancelDelegationTokenReq req; - public CancelDelegationToken_call(TCancelDelegationTokenReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { - super(client, protocolFactory, transport, resultHandler, false); - this.req = req; - } - - public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { - prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("CancelDelegationToken", org.apache.thrift.protocol.TMessageType.CALL, 0)); - CancelDelegationToken_args args = new CancelDelegationToken_args(); - args.setReq(req); - args.write(prot); - prot.writeMessageEnd(); - } - - public TCancelDelegationTokenResp getResult() throws org.apache.thrift.TException { - if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { - throw new IllegalStateException("Method call not finished!"); - } - org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); - org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); - return (new Client(prot)).recv_CancelDelegationToken(); - } - } - - public void RenewDelegationToken(TRenewDelegationTokenReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { - checkReady(); - RenewDelegationToken_call method_call = new RenewDelegationToken_call(req, resultHandler, this, ___protocolFactory, ___transport); - this.___currentMethod = method_call; - ___manager.call(method_call); - } - - public static class RenewDelegationToken_call extends org.apache.thrift.async.TAsyncMethodCall { - private TRenewDelegationTokenReq req; - public RenewDelegationToken_call(TRenewDelegationTokenReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { - super(client, protocolFactory, transport, resultHandler, false); - this.req = req; - } - - public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { - prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("RenewDelegationToken", org.apache.thrift.protocol.TMessageType.CALL, 0)); - RenewDelegationToken_args args = new RenewDelegationToken_args(); - args.setReq(req); - args.write(prot); - prot.writeMessageEnd(); - } - - public TRenewDelegationTokenResp getResult() throws org.apache.thrift.TException { - if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { - throw new IllegalStateException("Method call not finished!"); - } - org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); - org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); - return (new Client(prot)).recv_RenewDelegationToken(); - } - } - - } - - public static class Processor extends org.apache.thrift.TBaseProcessor implements org.apache.thrift.TProcessor { - private static final Logger LOGGER = LoggerFactory.getLogger(Processor.class.getName()); - public Processor(I iface) { - super(iface, getProcessMap(new HashMap>())); - } - - protected Processor(I iface, Map> processMap) { - super(iface, getProcessMap(processMap)); - } - - private static Map> getProcessMap(Map> processMap) { - processMap.put("OpenSession", new OpenSession()); - processMap.put("CloseSession", new CloseSession()); - processMap.put("GetInfo", new GetInfo()); - processMap.put("ExecuteStatement", new ExecuteStatement()); - processMap.put("GetTypeInfo", new GetTypeInfo()); - processMap.put("GetCatalogs", new GetCatalogs()); - processMap.put("GetSchemas", new GetSchemas()); - processMap.put("GetTables", new GetTables()); - processMap.put("GetTableTypes", new GetTableTypes()); - processMap.put("GetColumns", new GetColumns()); - processMap.put("GetFunctions", new GetFunctions()); - processMap.put("GetOperationStatus", new GetOperationStatus()); - processMap.put("CancelOperation", new CancelOperation()); - processMap.put("CloseOperation", new CloseOperation()); - processMap.put("GetResultSetMetadata", new GetResultSetMetadata()); - processMap.put("FetchResults", new FetchResults()); - processMap.put("GetDelegationToken", new GetDelegationToken()); - processMap.put("CancelDelegationToken", new CancelDelegationToken()); - processMap.put("RenewDelegationToken", new RenewDelegationToken()); - return processMap; - } - - public static class OpenSession extends org.apache.thrift.ProcessFunction { - public OpenSession() { - super("OpenSession"); - } - - public OpenSession_args getEmptyArgsInstance() { - return new OpenSession_args(); - } - - protected boolean isOneway() { - return false; - } - - public OpenSession_result getResult(I iface, OpenSession_args args) throws org.apache.thrift.TException { - OpenSession_result result = new OpenSession_result(); - result.success = iface.OpenSession(args.req); - return result; - } - } - - public static class CloseSession extends org.apache.thrift.ProcessFunction { - public CloseSession() { - super("CloseSession"); - } - - public CloseSession_args getEmptyArgsInstance() { - return new CloseSession_args(); - } - - protected boolean isOneway() { - return false; - } - - public CloseSession_result getResult(I iface, CloseSession_args args) throws org.apache.thrift.TException { - CloseSession_result result = new CloseSession_result(); - result.success = iface.CloseSession(args.req); - return result; - } - } - - public static class GetInfo extends org.apache.thrift.ProcessFunction { - public GetInfo() { - super("GetInfo"); - } - - public GetInfo_args getEmptyArgsInstance() { - return new GetInfo_args(); - } - - protected boolean isOneway() { - return false; - } - - public GetInfo_result getResult(I iface, GetInfo_args args) throws org.apache.thrift.TException { - GetInfo_result result = new GetInfo_result(); - result.success = iface.GetInfo(args.req); - return result; - } - } - - public static class ExecuteStatement extends org.apache.thrift.ProcessFunction { - public ExecuteStatement() { - super("ExecuteStatement"); - } - - public ExecuteStatement_args getEmptyArgsInstance() { - return new ExecuteStatement_args(); - } - - protected boolean isOneway() { - return false; - } - - public ExecuteStatement_result getResult(I iface, ExecuteStatement_args args) throws org.apache.thrift.TException { - ExecuteStatement_result result = new ExecuteStatement_result(); - result.success = iface.ExecuteStatement(args.req); - return result; - } - } - - public static class GetTypeInfo extends org.apache.thrift.ProcessFunction { - public GetTypeInfo() { - super("GetTypeInfo"); - } - - public GetTypeInfo_args getEmptyArgsInstance() { - return new GetTypeInfo_args(); - } - - protected boolean isOneway() { - return false; - } - - public GetTypeInfo_result getResult(I iface, GetTypeInfo_args args) throws org.apache.thrift.TException { - GetTypeInfo_result result = new GetTypeInfo_result(); - result.success = iface.GetTypeInfo(args.req); - return result; - } - } - - public static class GetCatalogs extends org.apache.thrift.ProcessFunction { - public GetCatalogs() { - super("GetCatalogs"); - } - - public GetCatalogs_args getEmptyArgsInstance() { - return new GetCatalogs_args(); - } - - protected boolean isOneway() { - return false; - } - - public GetCatalogs_result getResult(I iface, GetCatalogs_args args) throws org.apache.thrift.TException { - GetCatalogs_result result = new GetCatalogs_result(); - result.success = iface.GetCatalogs(args.req); - return result; - } - } - - public static class GetSchemas extends org.apache.thrift.ProcessFunction { - public GetSchemas() { - super("GetSchemas"); - } - - public GetSchemas_args getEmptyArgsInstance() { - return new GetSchemas_args(); - } - - protected boolean isOneway() { - return false; - } - - public GetSchemas_result getResult(I iface, GetSchemas_args args) throws org.apache.thrift.TException { - GetSchemas_result result = new GetSchemas_result(); - result.success = iface.GetSchemas(args.req); - return result; - } - } - - public static class GetTables extends org.apache.thrift.ProcessFunction { - public GetTables() { - super("GetTables"); - } - - public GetTables_args getEmptyArgsInstance() { - return new GetTables_args(); - } - - protected boolean isOneway() { - return false; - } - - public GetTables_result getResult(I iface, GetTables_args args) throws org.apache.thrift.TException { - GetTables_result result = new GetTables_result(); - result.success = iface.GetTables(args.req); - return result; - } - } - - public static class GetTableTypes extends org.apache.thrift.ProcessFunction { - public GetTableTypes() { - super("GetTableTypes"); - } - - public GetTableTypes_args getEmptyArgsInstance() { - return new GetTableTypes_args(); - } - - protected boolean isOneway() { - return false; - } - - public GetTableTypes_result getResult(I iface, GetTableTypes_args args) throws org.apache.thrift.TException { - GetTableTypes_result result = new GetTableTypes_result(); - result.success = iface.GetTableTypes(args.req); - return result; - } - } - - public static class GetColumns extends org.apache.thrift.ProcessFunction { - public GetColumns() { - super("GetColumns"); - } - - public GetColumns_args getEmptyArgsInstance() { - return new GetColumns_args(); - } - - protected boolean isOneway() { - return false; - } - - public GetColumns_result getResult(I iface, GetColumns_args args) throws org.apache.thrift.TException { - GetColumns_result result = new GetColumns_result(); - result.success = iface.GetColumns(args.req); - return result; - } - } - - public static class GetFunctions extends org.apache.thrift.ProcessFunction { - public GetFunctions() { - super("GetFunctions"); - } - - public GetFunctions_args getEmptyArgsInstance() { - return new GetFunctions_args(); - } - - protected boolean isOneway() { - return false; - } - - public GetFunctions_result getResult(I iface, GetFunctions_args args) throws org.apache.thrift.TException { - GetFunctions_result result = new GetFunctions_result(); - result.success = iface.GetFunctions(args.req); - return result; - } - } - - public static class GetOperationStatus extends org.apache.thrift.ProcessFunction { - public GetOperationStatus() { - super("GetOperationStatus"); - } - - public GetOperationStatus_args getEmptyArgsInstance() { - return new GetOperationStatus_args(); - } - - protected boolean isOneway() { - return false; - } - - public GetOperationStatus_result getResult(I iface, GetOperationStatus_args args) throws org.apache.thrift.TException { - GetOperationStatus_result result = new GetOperationStatus_result(); - result.success = iface.GetOperationStatus(args.req); - return result; - } - } - - public static class CancelOperation extends org.apache.thrift.ProcessFunction { - public CancelOperation() { - super("CancelOperation"); - } - - public CancelOperation_args getEmptyArgsInstance() { - return new CancelOperation_args(); - } - - protected boolean isOneway() { - return false; - } - - public CancelOperation_result getResult(I iface, CancelOperation_args args) throws org.apache.thrift.TException { - CancelOperation_result result = new CancelOperation_result(); - result.success = iface.CancelOperation(args.req); - return result; - } - } - - public static class CloseOperation extends org.apache.thrift.ProcessFunction { - public CloseOperation() { - super("CloseOperation"); - } - - public CloseOperation_args getEmptyArgsInstance() { - return new CloseOperation_args(); - } - - protected boolean isOneway() { - return false; - } - - public CloseOperation_result getResult(I iface, CloseOperation_args args) throws org.apache.thrift.TException { - CloseOperation_result result = new CloseOperation_result(); - result.success = iface.CloseOperation(args.req); - return result; - } - } - - public static class GetResultSetMetadata extends org.apache.thrift.ProcessFunction { - public GetResultSetMetadata() { - super("GetResultSetMetadata"); - } - - public GetResultSetMetadata_args getEmptyArgsInstance() { - return new GetResultSetMetadata_args(); - } - - protected boolean isOneway() { - return false; - } - - public GetResultSetMetadata_result getResult(I iface, GetResultSetMetadata_args args) throws org.apache.thrift.TException { - GetResultSetMetadata_result result = new GetResultSetMetadata_result(); - result.success = iface.GetResultSetMetadata(args.req); - return result; - } - } - - public static class FetchResults extends org.apache.thrift.ProcessFunction { - public FetchResults() { - super("FetchResults"); - } - - public FetchResults_args getEmptyArgsInstance() { - return new FetchResults_args(); - } - - protected boolean isOneway() { - return false; - } - - public FetchResults_result getResult(I iface, FetchResults_args args) throws org.apache.thrift.TException { - FetchResults_result result = new FetchResults_result(); - result.success = iface.FetchResults(args.req); - return result; - } - } - - public static class GetDelegationToken extends org.apache.thrift.ProcessFunction { - public GetDelegationToken() { - super("GetDelegationToken"); - } - - public GetDelegationToken_args getEmptyArgsInstance() { - return new GetDelegationToken_args(); - } - - protected boolean isOneway() { - return false; - } - - public GetDelegationToken_result getResult(I iface, GetDelegationToken_args args) throws org.apache.thrift.TException { - GetDelegationToken_result result = new GetDelegationToken_result(); - result.success = iface.GetDelegationToken(args.req); - return result; - } - } - - public static class CancelDelegationToken extends org.apache.thrift.ProcessFunction { - public CancelDelegationToken() { - super("CancelDelegationToken"); - } - - public CancelDelegationToken_args getEmptyArgsInstance() { - return new CancelDelegationToken_args(); - } - - protected boolean isOneway() { - return false; - } - - public CancelDelegationToken_result getResult(I iface, CancelDelegationToken_args args) throws org.apache.thrift.TException { - CancelDelegationToken_result result = new CancelDelegationToken_result(); - result.success = iface.CancelDelegationToken(args.req); - return result; - } - } - - public static class RenewDelegationToken extends org.apache.thrift.ProcessFunction { - public RenewDelegationToken() { - super("RenewDelegationToken"); - } - - public RenewDelegationToken_args getEmptyArgsInstance() { - return new RenewDelegationToken_args(); - } - - protected boolean isOneway() { - return false; - } - - public RenewDelegationToken_result getResult(I iface, RenewDelegationToken_args args) throws org.apache.thrift.TException { - RenewDelegationToken_result result = new RenewDelegationToken_result(); - result.success = iface.RenewDelegationToken(args.req); - return result; - } - } - - } - - public static class OpenSession_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("OpenSession_args"); - - private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new OpenSession_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new OpenSession_argsTupleSchemeFactory()); - } - - private TOpenSessionReq req; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - REQ((short)1, "req"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // REQ - return REQ; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TOpenSessionReq.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(OpenSession_args.class, metaDataMap); - } - - public OpenSession_args() { - } - - public OpenSession_args( - TOpenSessionReq req) - { - this(); - this.req = req; - } - - /** - * Performs a deep copy on other. - */ - public OpenSession_args(OpenSession_args other) { - if (other.isSetReq()) { - this.req = new TOpenSessionReq(other.req); - } - } - - public OpenSession_args deepCopy() { - return new OpenSession_args(this); - } - - @Override - public void clear() { - this.req = null; - } - - public TOpenSessionReq getReq() { - return this.req; - } - - public void setReq(TOpenSessionReq req) { - this.req = req; - } - - public void unsetReq() { - this.req = null; - } - - /** Returns true if field req is set (has been assigned a value) and false otherwise */ - public boolean isSetReq() { - return this.req != null; - } - - public void setReqIsSet(boolean value) { - if (!value) { - this.req = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case REQ: - if (value == null) { - unsetReq(); - } else { - setReq((TOpenSessionReq)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case REQ: - return getReq(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case REQ: - return isSetReq(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof OpenSession_args) - return this.equals((OpenSession_args)that); - return false; - } - - public boolean equals(OpenSession_args that) { - if (that == null) - return false; - - boolean this_present_req = true && this.isSetReq(); - boolean that_present_req = true && that.isSetReq(); - if (this_present_req || that_present_req) { - if (!(this_present_req && that_present_req)) - return false; - if (!this.req.equals(that.req)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_req = true && (isSetReq()); - builder.append(present_req); - if (present_req) - builder.append(req); - - return builder.toHashCode(); - } - - public int compareTo(OpenSession_args other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - OpenSession_args typedOther = (OpenSession_args)other; - - lastComparison = Boolean.valueOf(isSetReq()).compareTo(typedOther.isSetReq()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetReq()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.req, typedOther.req); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("OpenSession_args("); - boolean first = true; - - sb.append("req:"); - if (this.req == null) { - sb.append("null"); - } else { - sb.append(this.req); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (req != null) { - req.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class OpenSession_argsStandardSchemeFactory implements SchemeFactory { - public OpenSession_argsStandardScheme getScheme() { - return new OpenSession_argsStandardScheme(); - } - } - - private static class OpenSession_argsStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, OpenSession_args struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // REQ - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.req = new TOpenSessionReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, OpenSession_args struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.req != null) { - oprot.writeFieldBegin(REQ_FIELD_DESC); - struct.req.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class OpenSession_argsTupleSchemeFactory implements SchemeFactory { - public OpenSession_argsTupleScheme getScheme() { - return new OpenSession_argsTupleScheme(); - } - } - - private static class OpenSession_argsTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, OpenSession_args struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetReq()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetReq()) { - struct.req.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, OpenSession_args struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.req = new TOpenSessionReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } - } - } - - } - - public static class OpenSession_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("OpenSession_result"); - - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new OpenSession_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new OpenSession_resultTupleSchemeFactory()); - } - - private TOpenSessionResp success; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SUCCESS((short)0, "success"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 0: // SUCCESS - return SUCCESS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TOpenSessionResp.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(OpenSession_result.class, metaDataMap); - } - - public OpenSession_result() { - } - - public OpenSession_result( - TOpenSessionResp success) - { - this(); - this.success = success; - } - - /** - * Performs a deep copy on other. - */ - public OpenSession_result(OpenSession_result other) { - if (other.isSetSuccess()) { - this.success = new TOpenSessionResp(other.success); - } - } - - public OpenSession_result deepCopy() { - return new OpenSession_result(this); - } - - @Override - public void clear() { - this.success = null; - } - - public TOpenSessionResp getSuccess() { - return this.success; - } - - public void setSuccess(TOpenSessionResp success) { - this.success = success; - } - - public void unsetSuccess() { - this.success = null; - } - - /** Returns true if field success is set (has been assigned a value) and false otherwise */ - public boolean isSetSuccess() { - return this.success != null; - } - - public void setSuccessIsSet(boolean value) { - if (!value) { - this.success = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case SUCCESS: - if (value == null) { - unsetSuccess(); - } else { - setSuccess((TOpenSessionResp)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case SUCCESS: - return getSuccess(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case SUCCESS: - return isSetSuccess(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof OpenSession_result) - return this.equals((OpenSession_result)that); - return false; - } - - public boolean equals(OpenSession_result that) { - if (that == null) - return false; - - boolean this_present_success = true && this.isSetSuccess(); - boolean that_present_success = true && that.isSetSuccess(); - if (this_present_success || that_present_success) { - if (!(this_present_success && that_present_success)) - return false; - if (!this.success.equals(that.success)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_success = true && (isSetSuccess()); - builder.append(present_success); - if (present_success) - builder.append(success); - - return builder.toHashCode(); - } - - public int compareTo(OpenSession_result other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - OpenSession_result typedOther = (OpenSession_result)other; - - lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(typedOther.isSetSuccess()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSuccess()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, typedOther.success); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("OpenSession_result("); - boolean first = true; - - sb.append("success:"); - if (this.success == null) { - sb.append("null"); - } else { - sb.append(this.success); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (success != null) { - success.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class OpenSession_resultStandardSchemeFactory implements SchemeFactory { - public OpenSession_resultStandardScheme getScheme() { - return new OpenSession_resultStandardScheme(); - } - } - - private static class OpenSession_resultStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, OpenSession_result struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new TOpenSessionResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, OpenSession_result struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.success != null) { - oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - struct.success.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class OpenSession_resultTupleSchemeFactory implements SchemeFactory { - public OpenSession_resultTupleScheme getScheme() { - return new OpenSession_resultTupleScheme(); - } - } - - private static class OpenSession_resultTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, OpenSession_result struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetSuccess()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetSuccess()) { - struct.success.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, OpenSession_result struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.success = new TOpenSessionResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } - } - } - - } - - public static class CloseSession_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("CloseSession_args"); - - private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new CloseSession_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new CloseSession_argsTupleSchemeFactory()); - } - - private TCloseSessionReq req; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - REQ((short)1, "req"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // REQ - return REQ; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TCloseSessionReq.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(CloseSession_args.class, metaDataMap); - } - - public CloseSession_args() { - } - - public CloseSession_args( - TCloseSessionReq req) - { - this(); - this.req = req; - } - - /** - * Performs a deep copy on other. - */ - public CloseSession_args(CloseSession_args other) { - if (other.isSetReq()) { - this.req = new TCloseSessionReq(other.req); - } - } - - public CloseSession_args deepCopy() { - return new CloseSession_args(this); - } - - @Override - public void clear() { - this.req = null; - } - - public TCloseSessionReq getReq() { - return this.req; - } - - public void setReq(TCloseSessionReq req) { - this.req = req; - } - - public void unsetReq() { - this.req = null; - } - - /** Returns true if field req is set (has been assigned a value) and false otherwise */ - public boolean isSetReq() { - return this.req != null; - } - - public void setReqIsSet(boolean value) { - if (!value) { - this.req = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case REQ: - if (value == null) { - unsetReq(); - } else { - setReq((TCloseSessionReq)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case REQ: - return getReq(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case REQ: - return isSetReq(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof CloseSession_args) - return this.equals((CloseSession_args)that); - return false; - } - - public boolean equals(CloseSession_args that) { - if (that == null) - return false; - - boolean this_present_req = true && this.isSetReq(); - boolean that_present_req = true && that.isSetReq(); - if (this_present_req || that_present_req) { - if (!(this_present_req && that_present_req)) - return false; - if (!this.req.equals(that.req)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_req = true && (isSetReq()); - builder.append(present_req); - if (present_req) - builder.append(req); - - return builder.toHashCode(); - } - - public int compareTo(CloseSession_args other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - CloseSession_args typedOther = (CloseSession_args)other; - - lastComparison = Boolean.valueOf(isSetReq()).compareTo(typedOther.isSetReq()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetReq()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.req, typedOther.req); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("CloseSession_args("); - boolean first = true; - - sb.append("req:"); - if (this.req == null) { - sb.append("null"); - } else { - sb.append(this.req); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (req != null) { - req.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class CloseSession_argsStandardSchemeFactory implements SchemeFactory { - public CloseSession_argsStandardScheme getScheme() { - return new CloseSession_argsStandardScheme(); - } - } - - private static class CloseSession_argsStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, CloseSession_args struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // REQ - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.req = new TCloseSessionReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, CloseSession_args struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.req != null) { - oprot.writeFieldBegin(REQ_FIELD_DESC); - struct.req.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class CloseSession_argsTupleSchemeFactory implements SchemeFactory { - public CloseSession_argsTupleScheme getScheme() { - return new CloseSession_argsTupleScheme(); - } - } - - private static class CloseSession_argsTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, CloseSession_args struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetReq()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetReq()) { - struct.req.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, CloseSession_args struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.req = new TCloseSessionReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } - } - } - - } - - public static class CloseSession_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("CloseSession_result"); - - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new CloseSession_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new CloseSession_resultTupleSchemeFactory()); - } - - private TCloseSessionResp success; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SUCCESS((short)0, "success"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 0: // SUCCESS - return SUCCESS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TCloseSessionResp.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(CloseSession_result.class, metaDataMap); - } - - public CloseSession_result() { - } - - public CloseSession_result( - TCloseSessionResp success) - { - this(); - this.success = success; - } - - /** - * Performs a deep copy on other. - */ - public CloseSession_result(CloseSession_result other) { - if (other.isSetSuccess()) { - this.success = new TCloseSessionResp(other.success); - } - } - - public CloseSession_result deepCopy() { - return new CloseSession_result(this); - } - - @Override - public void clear() { - this.success = null; - } - - public TCloseSessionResp getSuccess() { - return this.success; - } - - public void setSuccess(TCloseSessionResp success) { - this.success = success; - } - - public void unsetSuccess() { - this.success = null; - } - - /** Returns true if field success is set (has been assigned a value) and false otherwise */ - public boolean isSetSuccess() { - return this.success != null; - } - - public void setSuccessIsSet(boolean value) { - if (!value) { - this.success = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case SUCCESS: - if (value == null) { - unsetSuccess(); - } else { - setSuccess((TCloseSessionResp)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case SUCCESS: - return getSuccess(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case SUCCESS: - return isSetSuccess(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof CloseSession_result) - return this.equals((CloseSession_result)that); - return false; - } - - public boolean equals(CloseSession_result that) { - if (that == null) - return false; - - boolean this_present_success = true && this.isSetSuccess(); - boolean that_present_success = true && that.isSetSuccess(); - if (this_present_success || that_present_success) { - if (!(this_present_success && that_present_success)) - return false; - if (!this.success.equals(that.success)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_success = true && (isSetSuccess()); - builder.append(present_success); - if (present_success) - builder.append(success); - - return builder.toHashCode(); - } - - public int compareTo(CloseSession_result other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - CloseSession_result typedOther = (CloseSession_result)other; - - lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(typedOther.isSetSuccess()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSuccess()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, typedOther.success); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("CloseSession_result("); - boolean first = true; - - sb.append("success:"); - if (this.success == null) { - sb.append("null"); - } else { - sb.append(this.success); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (success != null) { - success.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class CloseSession_resultStandardSchemeFactory implements SchemeFactory { - public CloseSession_resultStandardScheme getScheme() { - return new CloseSession_resultStandardScheme(); - } - } - - private static class CloseSession_resultStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, CloseSession_result struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new TCloseSessionResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, CloseSession_result struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.success != null) { - oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - struct.success.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class CloseSession_resultTupleSchemeFactory implements SchemeFactory { - public CloseSession_resultTupleScheme getScheme() { - return new CloseSession_resultTupleScheme(); - } - } - - private static class CloseSession_resultTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, CloseSession_result struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetSuccess()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetSuccess()) { - struct.success.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, CloseSession_result struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.success = new TCloseSessionResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } - } - } - - } - - public static class GetInfo_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetInfo_args"); - - private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new GetInfo_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new GetInfo_argsTupleSchemeFactory()); - } - - private TGetInfoReq req; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - REQ((short)1, "req"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // REQ - return REQ; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TGetInfoReq.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetInfo_args.class, metaDataMap); - } - - public GetInfo_args() { - } - - public GetInfo_args( - TGetInfoReq req) - { - this(); - this.req = req; - } - - /** - * Performs a deep copy on other. - */ - public GetInfo_args(GetInfo_args other) { - if (other.isSetReq()) { - this.req = new TGetInfoReq(other.req); - } - } - - public GetInfo_args deepCopy() { - return new GetInfo_args(this); - } - - @Override - public void clear() { - this.req = null; - } - - public TGetInfoReq getReq() { - return this.req; - } - - public void setReq(TGetInfoReq req) { - this.req = req; - } - - public void unsetReq() { - this.req = null; - } - - /** Returns true if field req is set (has been assigned a value) and false otherwise */ - public boolean isSetReq() { - return this.req != null; - } - - public void setReqIsSet(boolean value) { - if (!value) { - this.req = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case REQ: - if (value == null) { - unsetReq(); - } else { - setReq((TGetInfoReq)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case REQ: - return getReq(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case REQ: - return isSetReq(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof GetInfo_args) - return this.equals((GetInfo_args)that); - return false; - } - - public boolean equals(GetInfo_args that) { - if (that == null) - return false; - - boolean this_present_req = true && this.isSetReq(); - boolean that_present_req = true && that.isSetReq(); - if (this_present_req || that_present_req) { - if (!(this_present_req && that_present_req)) - return false; - if (!this.req.equals(that.req)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_req = true && (isSetReq()); - builder.append(present_req); - if (present_req) - builder.append(req); - - return builder.toHashCode(); - } - - public int compareTo(GetInfo_args other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - GetInfo_args typedOther = (GetInfo_args)other; - - lastComparison = Boolean.valueOf(isSetReq()).compareTo(typedOther.isSetReq()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetReq()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.req, typedOther.req); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("GetInfo_args("); - boolean first = true; - - sb.append("req:"); - if (this.req == null) { - sb.append("null"); - } else { - sb.append(this.req); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (req != null) { - req.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class GetInfo_argsStandardSchemeFactory implements SchemeFactory { - public GetInfo_argsStandardScheme getScheme() { - return new GetInfo_argsStandardScheme(); - } - } - - private static class GetInfo_argsStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, GetInfo_args struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // REQ - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.req = new TGetInfoReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, GetInfo_args struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.req != null) { - oprot.writeFieldBegin(REQ_FIELD_DESC); - struct.req.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class GetInfo_argsTupleSchemeFactory implements SchemeFactory { - public GetInfo_argsTupleScheme getScheme() { - return new GetInfo_argsTupleScheme(); - } - } - - private static class GetInfo_argsTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, GetInfo_args struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetReq()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetReq()) { - struct.req.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, GetInfo_args struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.req = new TGetInfoReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } - } - } - - } - - public static class GetInfo_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetInfo_result"); - - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new GetInfo_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new GetInfo_resultTupleSchemeFactory()); - } - - private TGetInfoResp success; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SUCCESS((short)0, "success"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 0: // SUCCESS - return SUCCESS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TGetInfoResp.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetInfo_result.class, metaDataMap); - } - - public GetInfo_result() { - } - - public GetInfo_result( - TGetInfoResp success) - { - this(); - this.success = success; - } - - /** - * Performs a deep copy on other. - */ - public GetInfo_result(GetInfo_result other) { - if (other.isSetSuccess()) { - this.success = new TGetInfoResp(other.success); - } - } - - public GetInfo_result deepCopy() { - return new GetInfo_result(this); - } - - @Override - public void clear() { - this.success = null; - } - - public TGetInfoResp getSuccess() { - return this.success; - } - - public void setSuccess(TGetInfoResp success) { - this.success = success; - } - - public void unsetSuccess() { - this.success = null; - } - - /** Returns true if field success is set (has been assigned a value) and false otherwise */ - public boolean isSetSuccess() { - return this.success != null; - } - - public void setSuccessIsSet(boolean value) { - if (!value) { - this.success = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case SUCCESS: - if (value == null) { - unsetSuccess(); - } else { - setSuccess((TGetInfoResp)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case SUCCESS: - return getSuccess(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case SUCCESS: - return isSetSuccess(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof GetInfo_result) - return this.equals((GetInfo_result)that); - return false; - } - - public boolean equals(GetInfo_result that) { - if (that == null) - return false; - - boolean this_present_success = true && this.isSetSuccess(); - boolean that_present_success = true && that.isSetSuccess(); - if (this_present_success || that_present_success) { - if (!(this_present_success && that_present_success)) - return false; - if (!this.success.equals(that.success)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_success = true && (isSetSuccess()); - builder.append(present_success); - if (present_success) - builder.append(success); - - return builder.toHashCode(); - } - - public int compareTo(GetInfo_result other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - GetInfo_result typedOther = (GetInfo_result)other; - - lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(typedOther.isSetSuccess()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSuccess()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, typedOther.success); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("GetInfo_result("); - boolean first = true; - - sb.append("success:"); - if (this.success == null) { - sb.append("null"); - } else { - sb.append(this.success); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (success != null) { - success.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class GetInfo_resultStandardSchemeFactory implements SchemeFactory { - public GetInfo_resultStandardScheme getScheme() { - return new GetInfo_resultStandardScheme(); - } - } - - private static class GetInfo_resultStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, GetInfo_result struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new TGetInfoResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, GetInfo_result struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.success != null) { - oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - struct.success.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class GetInfo_resultTupleSchemeFactory implements SchemeFactory { - public GetInfo_resultTupleScheme getScheme() { - return new GetInfo_resultTupleScheme(); - } - } - - private static class GetInfo_resultTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, GetInfo_result struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetSuccess()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetSuccess()) { - struct.success.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, GetInfo_result struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.success = new TGetInfoResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } - } - } - - } - - public static class ExecuteStatement_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ExecuteStatement_args"); - - private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new ExecuteStatement_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new ExecuteStatement_argsTupleSchemeFactory()); - } - - private TExecuteStatementReq req; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - REQ((short)1, "req"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // REQ - return REQ; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TExecuteStatementReq.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ExecuteStatement_args.class, metaDataMap); - } - - public ExecuteStatement_args() { - } - - public ExecuteStatement_args( - TExecuteStatementReq req) - { - this(); - this.req = req; - } - - /** - * Performs a deep copy on other. - */ - public ExecuteStatement_args(ExecuteStatement_args other) { - if (other.isSetReq()) { - this.req = new TExecuteStatementReq(other.req); - } - } - - public ExecuteStatement_args deepCopy() { - return new ExecuteStatement_args(this); - } - - @Override - public void clear() { - this.req = null; - } - - public TExecuteStatementReq getReq() { - return this.req; - } - - public void setReq(TExecuteStatementReq req) { - this.req = req; - } - - public void unsetReq() { - this.req = null; - } - - /** Returns true if field req is set (has been assigned a value) and false otherwise */ - public boolean isSetReq() { - return this.req != null; - } - - public void setReqIsSet(boolean value) { - if (!value) { - this.req = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case REQ: - if (value == null) { - unsetReq(); - } else { - setReq((TExecuteStatementReq)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case REQ: - return getReq(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case REQ: - return isSetReq(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof ExecuteStatement_args) - return this.equals((ExecuteStatement_args)that); - return false; - } - - public boolean equals(ExecuteStatement_args that) { - if (that == null) - return false; - - boolean this_present_req = true && this.isSetReq(); - boolean that_present_req = true && that.isSetReq(); - if (this_present_req || that_present_req) { - if (!(this_present_req && that_present_req)) - return false; - if (!this.req.equals(that.req)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_req = true && (isSetReq()); - builder.append(present_req); - if (present_req) - builder.append(req); - - return builder.toHashCode(); - } - - public int compareTo(ExecuteStatement_args other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - ExecuteStatement_args typedOther = (ExecuteStatement_args)other; - - lastComparison = Boolean.valueOf(isSetReq()).compareTo(typedOther.isSetReq()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetReq()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.req, typedOther.req); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("ExecuteStatement_args("); - boolean first = true; - - sb.append("req:"); - if (this.req == null) { - sb.append("null"); - } else { - sb.append(this.req); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (req != null) { - req.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class ExecuteStatement_argsStandardSchemeFactory implements SchemeFactory { - public ExecuteStatement_argsStandardScheme getScheme() { - return new ExecuteStatement_argsStandardScheme(); - } - } - - private static class ExecuteStatement_argsStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, ExecuteStatement_args struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // REQ - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.req = new TExecuteStatementReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, ExecuteStatement_args struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.req != null) { - oprot.writeFieldBegin(REQ_FIELD_DESC); - struct.req.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class ExecuteStatement_argsTupleSchemeFactory implements SchemeFactory { - public ExecuteStatement_argsTupleScheme getScheme() { - return new ExecuteStatement_argsTupleScheme(); - } - } - - private static class ExecuteStatement_argsTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, ExecuteStatement_args struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetReq()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetReq()) { - struct.req.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, ExecuteStatement_args struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.req = new TExecuteStatementReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } - } - } - - } - - public static class ExecuteStatement_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ExecuteStatement_result"); - - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new ExecuteStatement_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new ExecuteStatement_resultTupleSchemeFactory()); - } - - private TExecuteStatementResp success; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SUCCESS((short)0, "success"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 0: // SUCCESS - return SUCCESS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TExecuteStatementResp.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ExecuteStatement_result.class, metaDataMap); - } - - public ExecuteStatement_result() { - } - - public ExecuteStatement_result( - TExecuteStatementResp success) - { - this(); - this.success = success; - } - - /** - * Performs a deep copy on other. - */ - public ExecuteStatement_result(ExecuteStatement_result other) { - if (other.isSetSuccess()) { - this.success = new TExecuteStatementResp(other.success); - } - } - - public ExecuteStatement_result deepCopy() { - return new ExecuteStatement_result(this); - } - - @Override - public void clear() { - this.success = null; - } - - public TExecuteStatementResp getSuccess() { - return this.success; - } - - public void setSuccess(TExecuteStatementResp success) { - this.success = success; - } - - public void unsetSuccess() { - this.success = null; - } - - /** Returns true if field success is set (has been assigned a value) and false otherwise */ - public boolean isSetSuccess() { - return this.success != null; - } - - public void setSuccessIsSet(boolean value) { - if (!value) { - this.success = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case SUCCESS: - if (value == null) { - unsetSuccess(); - } else { - setSuccess((TExecuteStatementResp)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case SUCCESS: - return getSuccess(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case SUCCESS: - return isSetSuccess(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof ExecuteStatement_result) - return this.equals((ExecuteStatement_result)that); - return false; - } - - public boolean equals(ExecuteStatement_result that) { - if (that == null) - return false; - - boolean this_present_success = true && this.isSetSuccess(); - boolean that_present_success = true && that.isSetSuccess(); - if (this_present_success || that_present_success) { - if (!(this_present_success && that_present_success)) - return false; - if (!this.success.equals(that.success)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_success = true && (isSetSuccess()); - builder.append(present_success); - if (present_success) - builder.append(success); - - return builder.toHashCode(); - } - - public int compareTo(ExecuteStatement_result other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - ExecuteStatement_result typedOther = (ExecuteStatement_result)other; - - lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(typedOther.isSetSuccess()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSuccess()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, typedOther.success); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("ExecuteStatement_result("); - boolean first = true; - - sb.append("success:"); - if (this.success == null) { - sb.append("null"); - } else { - sb.append(this.success); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (success != null) { - success.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class ExecuteStatement_resultStandardSchemeFactory implements SchemeFactory { - public ExecuteStatement_resultStandardScheme getScheme() { - return new ExecuteStatement_resultStandardScheme(); - } - } - - private static class ExecuteStatement_resultStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, ExecuteStatement_result struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new TExecuteStatementResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, ExecuteStatement_result struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.success != null) { - oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - struct.success.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class ExecuteStatement_resultTupleSchemeFactory implements SchemeFactory { - public ExecuteStatement_resultTupleScheme getScheme() { - return new ExecuteStatement_resultTupleScheme(); - } - } - - private static class ExecuteStatement_resultTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, ExecuteStatement_result struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetSuccess()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetSuccess()) { - struct.success.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, ExecuteStatement_result struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.success = new TExecuteStatementResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } - } - } - - } - - public static class GetTypeInfo_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetTypeInfo_args"); - - private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new GetTypeInfo_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new GetTypeInfo_argsTupleSchemeFactory()); - } - - private TGetTypeInfoReq req; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - REQ((short)1, "req"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // REQ - return REQ; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TGetTypeInfoReq.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetTypeInfo_args.class, metaDataMap); - } - - public GetTypeInfo_args() { - } - - public GetTypeInfo_args( - TGetTypeInfoReq req) - { - this(); - this.req = req; - } - - /** - * Performs a deep copy on other. - */ - public GetTypeInfo_args(GetTypeInfo_args other) { - if (other.isSetReq()) { - this.req = new TGetTypeInfoReq(other.req); - } - } - - public GetTypeInfo_args deepCopy() { - return new GetTypeInfo_args(this); - } - - @Override - public void clear() { - this.req = null; - } - - public TGetTypeInfoReq getReq() { - return this.req; - } - - public void setReq(TGetTypeInfoReq req) { - this.req = req; - } - - public void unsetReq() { - this.req = null; - } - - /** Returns true if field req is set (has been assigned a value) and false otherwise */ - public boolean isSetReq() { - return this.req != null; - } - - public void setReqIsSet(boolean value) { - if (!value) { - this.req = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case REQ: - if (value == null) { - unsetReq(); - } else { - setReq((TGetTypeInfoReq)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case REQ: - return getReq(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case REQ: - return isSetReq(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof GetTypeInfo_args) - return this.equals((GetTypeInfo_args)that); - return false; - } - - public boolean equals(GetTypeInfo_args that) { - if (that == null) - return false; - - boolean this_present_req = true && this.isSetReq(); - boolean that_present_req = true && that.isSetReq(); - if (this_present_req || that_present_req) { - if (!(this_present_req && that_present_req)) - return false; - if (!this.req.equals(that.req)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_req = true && (isSetReq()); - builder.append(present_req); - if (present_req) - builder.append(req); - - return builder.toHashCode(); - } - - public int compareTo(GetTypeInfo_args other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - GetTypeInfo_args typedOther = (GetTypeInfo_args)other; - - lastComparison = Boolean.valueOf(isSetReq()).compareTo(typedOther.isSetReq()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetReq()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.req, typedOther.req); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("GetTypeInfo_args("); - boolean first = true; - - sb.append("req:"); - if (this.req == null) { - sb.append("null"); - } else { - sb.append(this.req); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (req != null) { - req.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class GetTypeInfo_argsStandardSchemeFactory implements SchemeFactory { - public GetTypeInfo_argsStandardScheme getScheme() { - return new GetTypeInfo_argsStandardScheme(); - } - } - - private static class GetTypeInfo_argsStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, GetTypeInfo_args struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // REQ - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.req = new TGetTypeInfoReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, GetTypeInfo_args struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.req != null) { - oprot.writeFieldBegin(REQ_FIELD_DESC); - struct.req.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class GetTypeInfo_argsTupleSchemeFactory implements SchemeFactory { - public GetTypeInfo_argsTupleScheme getScheme() { - return new GetTypeInfo_argsTupleScheme(); - } - } - - private static class GetTypeInfo_argsTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, GetTypeInfo_args struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetReq()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetReq()) { - struct.req.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, GetTypeInfo_args struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.req = new TGetTypeInfoReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } - } - } - - } - - public static class GetTypeInfo_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetTypeInfo_result"); - - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new GetTypeInfo_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new GetTypeInfo_resultTupleSchemeFactory()); - } - - private TGetTypeInfoResp success; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SUCCESS((short)0, "success"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 0: // SUCCESS - return SUCCESS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TGetTypeInfoResp.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetTypeInfo_result.class, metaDataMap); - } - - public GetTypeInfo_result() { - } - - public GetTypeInfo_result( - TGetTypeInfoResp success) - { - this(); - this.success = success; - } - - /** - * Performs a deep copy on other. - */ - public GetTypeInfo_result(GetTypeInfo_result other) { - if (other.isSetSuccess()) { - this.success = new TGetTypeInfoResp(other.success); - } - } - - public GetTypeInfo_result deepCopy() { - return new GetTypeInfo_result(this); - } - - @Override - public void clear() { - this.success = null; - } - - public TGetTypeInfoResp getSuccess() { - return this.success; - } - - public void setSuccess(TGetTypeInfoResp success) { - this.success = success; - } - - public void unsetSuccess() { - this.success = null; - } - - /** Returns true if field success is set (has been assigned a value) and false otherwise */ - public boolean isSetSuccess() { - return this.success != null; - } - - public void setSuccessIsSet(boolean value) { - if (!value) { - this.success = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case SUCCESS: - if (value == null) { - unsetSuccess(); - } else { - setSuccess((TGetTypeInfoResp)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case SUCCESS: - return getSuccess(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case SUCCESS: - return isSetSuccess(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof GetTypeInfo_result) - return this.equals((GetTypeInfo_result)that); - return false; - } - - public boolean equals(GetTypeInfo_result that) { - if (that == null) - return false; - - boolean this_present_success = true && this.isSetSuccess(); - boolean that_present_success = true && that.isSetSuccess(); - if (this_present_success || that_present_success) { - if (!(this_present_success && that_present_success)) - return false; - if (!this.success.equals(that.success)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_success = true && (isSetSuccess()); - builder.append(present_success); - if (present_success) - builder.append(success); - - return builder.toHashCode(); - } - - public int compareTo(GetTypeInfo_result other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - GetTypeInfo_result typedOther = (GetTypeInfo_result)other; - - lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(typedOther.isSetSuccess()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSuccess()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, typedOther.success); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("GetTypeInfo_result("); - boolean first = true; - - sb.append("success:"); - if (this.success == null) { - sb.append("null"); - } else { - sb.append(this.success); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (success != null) { - success.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class GetTypeInfo_resultStandardSchemeFactory implements SchemeFactory { - public GetTypeInfo_resultStandardScheme getScheme() { - return new GetTypeInfo_resultStandardScheme(); - } - } - - private static class GetTypeInfo_resultStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, GetTypeInfo_result struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new TGetTypeInfoResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, GetTypeInfo_result struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.success != null) { - oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - struct.success.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class GetTypeInfo_resultTupleSchemeFactory implements SchemeFactory { - public GetTypeInfo_resultTupleScheme getScheme() { - return new GetTypeInfo_resultTupleScheme(); - } - } - - private static class GetTypeInfo_resultTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, GetTypeInfo_result struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetSuccess()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetSuccess()) { - struct.success.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, GetTypeInfo_result struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.success = new TGetTypeInfoResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } - } - } - - } - - public static class GetCatalogs_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetCatalogs_args"); - - private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new GetCatalogs_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new GetCatalogs_argsTupleSchemeFactory()); - } - - private TGetCatalogsReq req; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - REQ((short)1, "req"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // REQ - return REQ; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TGetCatalogsReq.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetCatalogs_args.class, metaDataMap); - } - - public GetCatalogs_args() { - } - - public GetCatalogs_args( - TGetCatalogsReq req) - { - this(); - this.req = req; - } - - /** - * Performs a deep copy on other. - */ - public GetCatalogs_args(GetCatalogs_args other) { - if (other.isSetReq()) { - this.req = new TGetCatalogsReq(other.req); - } - } - - public GetCatalogs_args deepCopy() { - return new GetCatalogs_args(this); - } - - @Override - public void clear() { - this.req = null; - } - - public TGetCatalogsReq getReq() { - return this.req; - } - - public void setReq(TGetCatalogsReq req) { - this.req = req; - } - - public void unsetReq() { - this.req = null; - } - - /** Returns true if field req is set (has been assigned a value) and false otherwise */ - public boolean isSetReq() { - return this.req != null; - } - - public void setReqIsSet(boolean value) { - if (!value) { - this.req = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case REQ: - if (value == null) { - unsetReq(); - } else { - setReq((TGetCatalogsReq)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case REQ: - return getReq(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case REQ: - return isSetReq(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof GetCatalogs_args) - return this.equals((GetCatalogs_args)that); - return false; - } - - public boolean equals(GetCatalogs_args that) { - if (that == null) - return false; - - boolean this_present_req = true && this.isSetReq(); - boolean that_present_req = true && that.isSetReq(); - if (this_present_req || that_present_req) { - if (!(this_present_req && that_present_req)) - return false; - if (!this.req.equals(that.req)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_req = true && (isSetReq()); - builder.append(present_req); - if (present_req) - builder.append(req); - - return builder.toHashCode(); - } - - public int compareTo(GetCatalogs_args other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - GetCatalogs_args typedOther = (GetCatalogs_args)other; - - lastComparison = Boolean.valueOf(isSetReq()).compareTo(typedOther.isSetReq()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetReq()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.req, typedOther.req); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("GetCatalogs_args("); - boolean first = true; - - sb.append("req:"); - if (this.req == null) { - sb.append("null"); - } else { - sb.append(this.req); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (req != null) { - req.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class GetCatalogs_argsStandardSchemeFactory implements SchemeFactory { - public GetCatalogs_argsStandardScheme getScheme() { - return new GetCatalogs_argsStandardScheme(); - } - } - - private static class GetCatalogs_argsStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, GetCatalogs_args struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // REQ - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.req = new TGetCatalogsReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, GetCatalogs_args struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.req != null) { - oprot.writeFieldBegin(REQ_FIELD_DESC); - struct.req.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class GetCatalogs_argsTupleSchemeFactory implements SchemeFactory { - public GetCatalogs_argsTupleScheme getScheme() { - return new GetCatalogs_argsTupleScheme(); - } - } - - private static class GetCatalogs_argsTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, GetCatalogs_args struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetReq()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetReq()) { - struct.req.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, GetCatalogs_args struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.req = new TGetCatalogsReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } - } - } - - } - - public static class GetCatalogs_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetCatalogs_result"); - - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new GetCatalogs_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new GetCatalogs_resultTupleSchemeFactory()); - } - - private TGetCatalogsResp success; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SUCCESS((short)0, "success"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 0: // SUCCESS - return SUCCESS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TGetCatalogsResp.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetCatalogs_result.class, metaDataMap); - } - - public GetCatalogs_result() { - } - - public GetCatalogs_result( - TGetCatalogsResp success) - { - this(); - this.success = success; - } - - /** - * Performs a deep copy on other. - */ - public GetCatalogs_result(GetCatalogs_result other) { - if (other.isSetSuccess()) { - this.success = new TGetCatalogsResp(other.success); - } - } - - public GetCatalogs_result deepCopy() { - return new GetCatalogs_result(this); - } - - @Override - public void clear() { - this.success = null; - } - - public TGetCatalogsResp getSuccess() { - return this.success; - } - - public void setSuccess(TGetCatalogsResp success) { - this.success = success; - } - - public void unsetSuccess() { - this.success = null; - } - - /** Returns true if field success is set (has been assigned a value) and false otherwise */ - public boolean isSetSuccess() { - return this.success != null; - } - - public void setSuccessIsSet(boolean value) { - if (!value) { - this.success = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case SUCCESS: - if (value == null) { - unsetSuccess(); - } else { - setSuccess((TGetCatalogsResp)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case SUCCESS: - return getSuccess(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case SUCCESS: - return isSetSuccess(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof GetCatalogs_result) - return this.equals((GetCatalogs_result)that); - return false; - } - - public boolean equals(GetCatalogs_result that) { - if (that == null) - return false; - - boolean this_present_success = true && this.isSetSuccess(); - boolean that_present_success = true && that.isSetSuccess(); - if (this_present_success || that_present_success) { - if (!(this_present_success && that_present_success)) - return false; - if (!this.success.equals(that.success)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_success = true && (isSetSuccess()); - builder.append(present_success); - if (present_success) - builder.append(success); - - return builder.toHashCode(); - } - - public int compareTo(GetCatalogs_result other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - GetCatalogs_result typedOther = (GetCatalogs_result)other; - - lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(typedOther.isSetSuccess()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSuccess()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, typedOther.success); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("GetCatalogs_result("); - boolean first = true; - - sb.append("success:"); - if (this.success == null) { - sb.append("null"); - } else { - sb.append(this.success); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (success != null) { - success.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class GetCatalogs_resultStandardSchemeFactory implements SchemeFactory { - public GetCatalogs_resultStandardScheme getScheme() { - return new GetCatalogs_resultStandardScheme(); - } - } - - private static class GetCatalogs_resultStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, GetCatalogs_result struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new TGetCatalogsResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, GetCatalogs_result struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.success != null) { - oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - struct.success.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class GetCatalogs_resultTupleSchemeFactory implements SchemeFactory { - public GetCatalogs_resultTupleScheme getScheme() { - return new GetCatalogs_resultTupleScheme(); - } - } - - private static class GetCatalogs_resultTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, GetCatalogs_result struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetSuccess()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetSuccess()) { - struct.success.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, GetCatalogs_result struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.success = new TGetCatalogsResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } - } - } - - } - - public static class GetSchemas_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetSchemas_args"); - - private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new GetSchemas_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new GetSchemas_argsTupleSchemeFactory()); - } - - private TGetSchemasReq req; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - REQ((short)1, "req"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // REQ - return REQ; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TGetSchemasReq.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetSchemas_args.class, metaDataMap); - } - - public GetSchemas_args() { - } - - public GetSchemas_args( - TGetSchemasReq req) - { - this(); - this.req = req; - } - - /** - * Performs a deep copy on other. - */ - public GetSchemas_args(GetSchemas_args other) { - if (other.isSetReq()) { - this.req = new TGetSchemasReq(other.req); - } - } - - public GetSchemas_args deepCopy() { - return new GetSchemas_args(this); - } - - @Override - public void clear() { - this.req = null; - } - - public TGetSchemasReq getReq() { - return this.req; - } - - public void setReq(TGetSchemasReq req) { - this.req = req; - } - - public void unsetReq() { - this.req = null; - } - - /** Returns true if field req is set (has been assigned a value) and false otherwise */ - public boolean isSetReq() { - return this.req != null; - } - - public void setReqIsSet(boolean value) { - if (!value) { - this.req = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case REQ: - if (value == null) { - unsetReq(); - } else { - setReq((TGetSchemasReq)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case REQ: - return getReq(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case REQ: - return isSetReq(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof GetSchemas_args) - return this.equals((GetSchemas_args)that); - return false; - } - - public boolean equals(GetSchemas_args that) { - if (that == null) - return false; - - boolean this_present_req = true && this.isSetReq(); - boolean that_present_req = true && that.isSetReq(); - if (this_present_req || that_present_req) { - if (!(this_present_req && that_present_req)) - return false; - if (!this.req.equals(that.req)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_req = true && (isSetReq()); - builder.append(present_req); - if (present_req) - builder.append(req); - - return builder.toHashCode(); - } - - public int compareTo(GetSchemas_args other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - GetSchemas_args typedOther = (GetSchemas_args)other; - - lastComparison = Boolean.valueOf(isSetReq()).compareTo(typedOther.isSetReq()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetReq()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.req, typedOther.req); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("GetSchemas_args("); - boolean first = true; - - sb.append("req:"); - if (this.req == null) { - sb.append("null"); - } else { - sb.append(this.req); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (req != null) { - req.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class GetSchemas_argsStandardSchemeFactory implements SchemeFactory { - public GetSchemas_argsStandardScheme getScheme() { - return new GetSchemas_argsStandardScheme(); - } - } - - private static class GetSchemas_argsStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, GetSchemas_args struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // REQ - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.req = new TGetSchemasReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, GetSchemas_args struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.req != null) { - oprot.writeFieldBegin(REQ_FIELD_DESC); - struct.req.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class GetSchemas_argsTupleSchemeFactory implements SchemeFactory { - public GetSchemas_argsTupleScheme getScheme() { - return new GetSchemas_argsTupleScheme(); - } - } - - private static class GetSchemas_argsTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, GetSchemas_args struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetReq()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetReq()) { - struct.req.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, GetSchemas_args struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.req = new TGetSchemasReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } - } - } - - } - - public static class GetSchemas_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetSchemas_result"); - - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new GetSchemas_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new GetSchemas_resultTupleSchemeFactory()); - } - - private TGetSchemasResp success; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SUCCESS((short)0, "success"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 0: // SUCCESS - return SUCCESS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TGetSchemasResp.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetSchemas_result.class, metaDataMap); - } - - public GetSchemas_result() { - } - - public GetSchemas_result( - TGetSchemasResp success) - { - this(); - this.success = success; - } - - /** - * Performs a deep copy on other. - */ - public GetSchemas_result(GetSchemas_result other) { - if (other.isSetSuccess()) { - this.success = new TGetSchemasResp(other.success); - } - } - - public GetSchemas_result deepCopy() { - return new GetSchemas_result(this); - } - - @Override - public void clear() { - this.success = null; - } - - public TGetSchemasResp getSuccess() { - return this.success; - } - - public void setSuccess(TGetSchemasResp success) { - this.success = success; - } - - public void unsetSuccess() { - this.success = null; - } - - /** Returns true if field success is set (has been assigned a value) and false otherwise */ - public boolean isSetSuccess() { - return this.success != null; - } - - public void setSuccessIsSet(boolean value) { - if (!value) { - this.success = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case SUCCESS: - if (value == null) { - unsetSuccess(); - } else { - setSuccess((TGetSchemasResp)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case SUCCESS: - return getSuccess(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case SUCCESS: - return isSetSuccess(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof GetSchemas_result) - return this.equals((GetSchemas_result)that); - return false; - } - - public boolean equals(GetSchemas_result that) { - if (that == null) - return false; - - boolean this_present_success = true && this.isSetSuccess(); - boolean that_present_success = true && that.isSetSuccess(); - if (this_present_success || that_present_success) { - if (!(this_present_success && that_present_success)) - return false; - if (!this.success.equals(that.success)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_success = true && (isSetSuccess()); - builder.append(present_success); - if (present_success) - builder.append(success); - - return builder.toHashCode(); - } - - public int compareTo(GetSchemas_result other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - GetSchemas_result typedOther = (GetSchemas_result)other; - - lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(typedOther.isSetSuccess()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSuccess()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, typedOther.success); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("GetSchemas_result("); - boolean first = true; - - sb.append("success:"); - if (this.success == null) { - sb.append("null"); - } else { - sb.append(this.success); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (success != null) { - success.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class GetSchemas_resultStandardSchemeFactory implements SchemeFactory { - public GetSchemas_resultStandardScheme getScheme() { - return new GetSchemas_resultStandardScheme(); - } - } - - private static class GetSchemas_resultStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, GetSchemas_result struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new TGetSchemasResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, GetSchemas_result struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.success != null) { - oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - struct.success.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class GetSchemas_resultTupleSchemeFactory implements SchemeFactory { - public GetSchemas_resultTupleScheme getScheme() { - return new GetSchemas_resultTupleScheme(); - } - } - - private static class GetSchemas_resultTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, GetSchemas_result struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetSuccess()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetSuccess()) { - struct.success.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, GetSchemas_result struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.success = new TGetSchemasResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } - } - } - - } - - public static class GetTables_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetTables_args"); - - private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new GetTables_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new GetTables_argsTupleSchemeFactory()); - } - - private TGetTablesReq req; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - REQ((short)1, "req"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // REQ - return REQ; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TGetTablesReq.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetTables_args.class, metaDataMap); - } - - public GetTables_args() { - } - - public GetTables_args( - TGetTablesReq req) - { - this(); - this.req = req; - } - - /** - * Performs a deep copy on other. - */ - public GetTables_args(GetTables_args other) { - if (other.isSetReq()) { - this.req = new TGetTablesReq(other.req); - } - } - - public GetTables_args deepCopy() { - return new GetTables_args(this); - } - - @Override - public void clear() { - this.req = null; - } - - public TGetTablesReq getReq() { - return this.req; - } - - public void setReq(TGetTablesReq req) { - this.req = req; - } - - public void unsetReq() { - this.req = null; - } - - /** Returns true if field req is set (has been assigned a value) and false otherwise */ - public boolean isSetReq() { - return this.req != null; - } - - public void setReqIsSet(boolean value) { - if (!value) { - this.req = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case REQ: - if (value == null) { - unsetReq(); - } else { - setReq((TGetTablesReq)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case REQ: - return getReq(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case REQ: - return isSetReq(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof GetTables_args) - return this.equals((GetTables_args)that); - return false; - } - - public boolean equals(GetTables_args that) { - if (that == null) - return false; - - boolean this_present_req = true && this.isSetReq(); - boolean that_present_req = true && that.isSetReq(); - if (this_present_req || that_present_req) { - if (!(this_present_req && that_present_req)) - return false; - if (!this.req.equals(that.req)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_req = true && (isSetReq()); - builder.append(present_req); - if (present_req) - builder.append(req); - - return builder.toHashCode(); - } - - public int compareTo(GetTables_args other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - GetTables_args typedOther = (GetTables_args)other; - - lastComparison = Boolean.valueOf(isSetReq()).compareTo(typedOther.isSetReq()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetReq()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.req, typedOther.req); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("GetTables_args("); - boolean first = true; - - sb.append("req:"); - if (this.req == null) { - sb.append("null"); - } else { - sb.append(this.req); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (req != null) { - req.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class GetTables_argsStandardSchemeFactory implements SchemeFactory { - public GetTables_argsStandardScheme getScheme() { - return new GetTables_argsStandardScheme(); - } - } - - private static class GetTables_argsStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, GetTables_args struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // REQ - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.req = new TGetTablesReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, GetTables_args struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.req != null) { - oprot.writeFieldBegin(REQ_FIELD_DESC); - struct.req.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class GetTables_argsTupleSchemeFactory implements SchemeFactory { - public GetTables_argsTupleScheme getScheme() { - return new GetTables_argsTupleScheme(); - } - } - - private static class GetTables_argsTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, GetTables_args struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetReq()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetReq()) { - struct.req.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, GetTables_args struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.req = new TGetTablesReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } - } - } - - } - - public static class GetTables_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetTables_result"); - - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new GetTables_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new GetTables_resultTupleSchemeFactory()); - } - - private TGetTablesResp success; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SUCCESS((short)0, "success"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 0: // SUCCESS - return SUCCESS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TGetTablesResp.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetTables_result.class, metaDataMap); - } - - public GetTables_result() { - } - - public GetTables_result( - TGetTablesResp success) - { - this(); - this.success = success; - } - - /** - * Performs a deep copy on other. - */ - public GetTables_result(GetTables_result other) { - if (other.isSetSuccess()) { - this.success = new TGetTablesResp(other.success); - } - } - - public GetTables_result deepCopy() { - return new GetTables_result(this); - } - - @Override - public void clear() { - this.success = null; - } - - public TGetTablesResp getSuccess() { - return this.success; - } - - public void setSuccess(TGetTablesResp success) { - this.success = success; - } - - public void unsetSuccess() { - this.success = null; - } - - /** Returns true if field success is set (has been assigned a value) and false otherwise */ - public boolean isSetSuccess() { - return this.success != null; - } - - public void setSuccessIsSet(boolean value) { - if (!value) { - this.success = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case SUCCESS: - if (value == null) { - unsetSuccess(); - } else { - setSuccess((TGetTablesResp)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case SUCCESS: - return getSuccess(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case SUCCESS: - return isSetSuccess(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof GetTables_result) - return this.equals((GetTables_result)that); - return false; - } - - public boolean equals(GetTables_result that) { - if (that == null) - return false; - - boolean this_present_success = true && this.isSetSuccess(); - boolean that_present_success = true && that.isSetSuccess(); - if (this_present_success || that_present_success) { - if (!(this_present_success && that_present_success)) - return false; - if (!this.success.equals(that.success)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_success = true && (isSetSuccess()); - builder.append(present_success); - if (present_success) - builder.append(success); - - return builder.toHashCode(); - } - - public int compareTo(GetTables_result other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - GetTables_result typedOther = (GetTables_result)other; - - lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(typedOther.isSetSuccess()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSuccess()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, typedOther.success); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("GetTables_result("); - boolean first = true; - - sb.append("success:"); - if (this.success == null) { - sb.append("null"); - } else { - sb.append(this.success); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (success != null) { - success.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class GetTables_resultStandardSchemeFactory implements SchemeFactory { - public GetTables_resultStandardScheme getScheme() { - return new GetTables_resultStandardScheme(); - } - } - - private static class GetTables_resultStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, GetTables_result struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new TGetTablesResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, GetTables_result struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.success != null) { - oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - struct.success.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class GetTables_resultTupleSchemeFactory implements SchemeFactory { - public GetTables_resultTupleScheme getScheme() { - return new GetTables_resultTupleScheme(); - } - } - - private static class GetTables_resultTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, GetTables_result struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetSuccess()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetSuccess()) { - struct.success.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, GetTables_result struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.success = new TGetTablesResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } - } - } - - } - - public static class GetTableTypes_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetTableTypes_args"); - - private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new GetTableTypes_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new GetTableTypes_argsTupleSchemeFactory()); - } - - private TGetTableTypesReq req; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - REQ((short)1, "req"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // REQ - return REQ; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TGetTableTypesReq.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetTableTypes_args.class, metaDataMap); - } - - public GetTableTypes_args() { - } - - public GetTableTypes_args( - TGetTableTypesReq req) - { - this(); - this.req = req; - } - - /** - * Performs a deep copy on other. - */ - public GetTableTypes_args(GetTableTypes_args other) { - if (other.isSetReq()) { - this.req = new TGetTableTypesReq(other.req); - } - } - - public GetTableTypes_args deepCopy() { - return new GetTableTypes_args(this); - } - - @Override - public void clear() { - this.req = null; - } - - public TGetTableTypesReq getReq() { - return this.req; - } - - public void setReq(TGetTableTypesReq req) { - this.req = req; - } - - public void unsetReq() { - this.req = null; - } - - /** Returns true if field req is set (has been assigned a value) and false otherwise */ - public boolean isSetReq() { - return this.req != null; - } - - public void setReqIsSet(boolean value) { - if (!value) { - this.req = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case REQ: - if (value == null) { - unsetReq(); - } else { - setReq((TGetTableTypesReq)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case REQ: - return getReq(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case REQ: - return isSetReq(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof GetTableTypes_args) - return this.equals((GetTableTypes_args)that); - return false; - } - - public boolean equals(GetTableTypes_args that) { - if (that == null) - return false; - - boolean this_present_req = true && this.isSetReq(); - boolean that_present_req = true && that.isSetReq(); - if (this_present_req || that_present_req) { - if (!(this_present_req && that_present_req)) - return false; - if (!this.req.equals(that.req)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_req = true && (isSetReq()); - builder.append(present_req); - if (present_req) - builder.append(req); - - return builder.toHashCode(); - } - - public int compareTo(GetTableTypes_args other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - GetTableTypes_args typedOther = (GetTableTypes_args)other; - - lastComparison = Boolean.valueOf(isSetReq()).compareTo(typedOther.isSetReq()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetReq()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.req, typedOther.req); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("GetTableTypes_args("); - boolean first = true; - - sb.append("req:"); - if (this.req == null) { - sb.append("null"); - } else { - sb.append(this.req); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (req != null) { - req.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class GetTableTypes_argsStandardSchemeFactory implements SchemeFactory { - public GetTableTypes_argsStandardScheme getScheme() { - return new GetTableTypes_argsStandardScheme(); - } - } - - private static class GetTableTypes_argsStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, GetTableTypes_args struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // REQ - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.req = new TGetTableTypesReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, GetTableTypes_args struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.req != null) { - oprot.writeFieldBegin(REQ_FIELD_DESC); - struct.req.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class GetTableTypes_argsTupleSchemeFactory implements SchemeFactory { - public GetTableTypes_argsTupleScheme getScheme() { - return new GetTableTypes_argsTupleScheme(); - } - } - - private static class GetTableTypes_argsTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, GetTableTypes_args struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetReq()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetReq()) { - struct.req.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, GetTableTypes_args struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.req = new TGetTableTypesReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } - } - } - - } - - public static class GetTableTypes_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetTableTypes_result"); - - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new GetTableTypes_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new GetTableTypes_resultTupleSchemeFactory()); - } - - private TGetTableTypesResp success; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SUCCESS((short)0, "success"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 0: // SUCCESS - return SUCCESS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TGetTableTypesResp.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetTableTypes_result.class, metaDataMap); - } - - public GetTableTypes_result() { - } - - public GetTableTypes_result( - TGetTableTypesResp success) - { - this(); - this.success = success; - } - - /** - * Performs a deep copy on other. - */ - public GetTableTypes_result(GetTableTypes_result other) { - if (other.isSetSuccess()) { - this.success = new TGetTableTypesResp(other.success); - } - } - - public GetTableTypes_result deepCopy() { - return new GetTableTypes_result(this); - } - - @Override - public void clear() { - this.success = null; - } - - public TGetTableTypesResp getSuccess() { - return this.success; - } - - public void setSuccess(TGetTableTypesResp success) { - this.success = success; - } - - public void unsetSuccess() { - this.success = null; - } - - /** Returns true if field success is set (has been assigned a value) and false otherwise */ - public boolean isSetSuccess() { - return this.success != null; - } - - public void setSuccessIsSet(boolean value) { - if (!value) { - this.success = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case SUCCESS: - if (value == null) { - unsetSuccess(); - } else { - setSuccess((TGetTableTypesResp)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case SUCCESS: - return getSuccess(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case SUCCESS: - return isSetSuccess(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof GetTableTypes_result) - return this.equals((GetTableTypes_result)that); - return false; - } - - public boolean equals(GetTableTypes_result that) { - if (that == null) - return false; - - boolean this_present_success = true && this.isSetSuccess(); - boolean that_present_success = true && that.isSetSuccess(); - if (this_present_success || that_present_success) { - if (!(this_present_success && that_present_success)) - return false; - if (!this.success.equals(that.success)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_success = true && (isSetSuccess()); - builder.append(present_success); - if (present_success) - builder.append(success); - - return builder.toHashCode(); - } - - public int compareTo(GetTableTypes_result other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - GetTableTypes_result typedOther = (GetTableTypes_result)other; - - lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(typedOther.isSetSuccess()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSuccess()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, typedOther.success); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("GetTableTypes_result("); - boolean first = true; - - sb.append("success:"); - if (this.success == null) { - sb.append("null"); - } else { - sb.append(this.success); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (success != null) { - success.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class GetTableTypes_resultStandardSchemeFactory implements SchemeFactory { - public GetTableTypes_resultStandardScheme getScheme() { - return new GetTableTypes_resultStandardScheme(); - } - } - - private static class GetTableTypes_resultStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, GetTableTypes_result struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new TGetTableTypesResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, GetTableTypes_result struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.success != null) { - oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - struct.success.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class GetTableTypes_resultTupleSchemeFactory implements SchemeFactory { - public GetTableTypes_resultTupleScheme getScheme() { - return new GetTableTypes_resultTupleScheme(); - } - } - - private static class GetTableTypes_resultTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, GetTableTypes_result struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetSuccess()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetSuccess()) { - struct.success.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, GetTableTypes_result struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.success = new TGetTableTypesResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } - } - } - - } - - public static class GetColumns_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetColumns_args"); - - private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new GetColumns_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new GetColumns_argsTupleSchemeFactory()); - } - - private TGetColumnsReq req; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - REQ((short)1, "req"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // REQ - return REQ; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TGetColumnsReq.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetColumns_args.class, metaDataMap); - } - - public GetColumns_args() { - } - - public GetColumns_args( - TGetColumnsReq req) - { - this(); - this.req = req; - } - - /** - * Performs a deep copy on other. - */ - public GetColumns_args(GetColumns_args other) { - if (other.isSetReq()) { - this.req = new TGetColumnsReq(other.req); - } - } - - public GetColumns_args deepCopy() { - return new GetColumns_args(this); - } - - @Override - public void clear() { - this.req = null; - } - - public TGetColumnsReq getReq() { - return this.req; - } - - public void setReq(TGetColumnsReq req) { - this.req = req; - } - - public void unsetReq() { - this.req = null; - } - - /** Returns true if field req is set (has been assigned a value) and false otherwise */ - public boolean isSetReq() { - return this.req != null; - } - - public void setReqIsSet(boolean value) { - if (!value) { - this.req = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case REQ: - if (value == null) { - unsetReq(); - } else { - setReq((TGetColumnsReq)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case REQ: - return getReq(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case REQ: - return isSetReq(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof GetColumns_args) - return this.equals((GetColumns_args)that); - return false; - } - - public boolean equals(GetColumns_args that) { - if (that == null) - return false; - - boolean this_present_req = true && this.isSetReq(); - boolean that_present_req = true && that.isSetReq(); - if (this_present_req || that_present_req) { - if (!(this_present_req && that_present_req)) - return false; - if (!this.req.equals(that.req)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_req = true && (isSetReq()); - builder.append(present_req); - if (present_req) - builder.append(req); - - return builder.toHashCode(); - } - - public int compareTo(GetColumns_args other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - GetColumns_args typedOther = (GetColumns_args)other; - - lastComparison = Boolean.valueOf(isSetReq()).compareTo(typedOther.isSetReq()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetReq()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.req, typedOther.req); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("GetColumns_args("); - boolean first = true; - - sb.append("req:"); - if (this.req == null) { - sb.append("null"); - } else { - sb.append(this.req); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (req != null) { - req.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class GetColumns_argsStandardSchemeFactory implements SchemeFactory { - public GetColumns_argsStandardScheme getScheme() { - return new GetColumns_argsStandardScheme(); - } - } - - private static class GetColumns_argsStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, GetColumns_args struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // REQ - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.req = new TGetColumnsReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, GetColumns_args struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.req != null) { - oprot.writeFieldBegin(REQ_FIELD_DESC); - struct.req.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class GetColumns_argsTupleSchemeFactory implements SchemeFactory { - public GetColumns_argsTupleScheme getScheme() { - return new GetColumns_argsTupleScheme(); - } - } - - private static class GetColumns_argsTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, GetColumns_args struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetReq()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetReq()) { - struct.req.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, GetColumns_args struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.req = new TGetColumnsReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } - } - } - - } - - public static class GetColumns_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetColumns_result"); - - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new GetColumns_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new GetColumns_resultTupleSchemeFactory()); - } - - private TGetColumnsResp success; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SUCCESS((short)0, "success"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 0: // SUCCESS - return SUCCESS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TGetColumnsResp.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetColumns_result.class, metaDataMap); - } - - public GetColumns_result() { - } - - public GetColumns_result( - TGetColumnsResp success) - { - this(); - this.success = success; - } - - /** - * Performs a deep copy on other. - */ - public GetColumns_result(GetColumns_result other) { - if (other.isSetSuccess()) { - this.success = new TGetColumnsResp(other.success); - } - } - - public GetColumns_result deepCopy() { - return new GetColumns_result(this); - } - - @Override - public void clear() { - this.success = null; - } - - public TGetColumnsResp getSuccess() { - return this.success; - } - - public void setSuccess(TGetColumnsResp success) { - this.success = success; - } - - public void unsetSuccess() { - this.success = null; - } - - /** Returns true if field success is set (has been assigned a value) and false otherwise */ - public boolean isSetSuccess() { - return this.success != null; - } - - public void setSuccessIsSet(boolean value) { - if (!value) { - this.success = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case SUCCESS: - if (value == null) { - unsetSuccess(); - } else { - setSuccess((TGetColumnsResp)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case SUCCESS: - return getSuccess(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case SUCCESS: - return isSetSuccess(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof GetColumns_result) - return this.equals((GetColumns_result)that); - return false; - } - - public boolean equals(GetColumns_result that) { - if (that == null) - return false; - - boolean this_present_success = true && this.isSetSuccess(); - boolean that_present_success = true && that.isSetSuccess(); - if (this_present_success || that_present_success) { - if (!(this_present_success && that_present_success)) - return false; - if (!this.success.equals(that.success)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_success = true && (isSetSuccess()); - builder.append(present_success); - if (present_success) - builder.append(success); - - return builder.toHashCode(); - } - - public int compareTo(GetColumns_result other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - GetColumns_result typedOther = (GetColumns_result)other; - - lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(typedOther.isSetSuccess()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSuccess()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, typedOther.success); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("GetColumns_result("); - boolean first = true; - - sb.append("success:"); - if (this.success == null) { - sb.append("null"); - } else { - sb.append(this.success); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (success != null) { - success.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class GetColumns_resultStandardSchemeFactory implements SchemeFactory { - public GetColumns_resultStandardScheme getScheme() { - return new GetColumns_resultStandardScheme(); - } - } - - private static class GetColumns_resultStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, GetColumns_result struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new TGetColumnsResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, GetColumns_result struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.success != null) { - oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - struct.success.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class GetColumns_resultTupleSchemeFactory implements SchemeFactory { - public GetColumns_resultTupleScheme getScheme() { - return new GetColumns_resultTupleScheme(); - } - } - - private static class GetColumns_resultTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, GetColumns_result struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetSuccess()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetSuccess()) { - struct.success.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, GetColumns_result struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.success = new TGetColumnsResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } - } - } - - } - - public static class GetFunctions_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetFunctions_args"); - - private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new GetFunctions_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new GetFunctions_argsTupleSchemeFactory()); - } - - private TGetFunctionsReq req; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - REQ((short)1, "req"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // REQ - return REQ; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TGetFunctionsReq.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetFunctions_args.class, metaDataMap); - } - - public GetFunctions_args() { - } - - public GetFunctions_args( - TGetFunctionsReq req) - { - this(); - this.req = req; - } - - /** - * Performs a deep copy on other. - */ - public GetFunctions_args(GetFunctions_args other) { - if (other.isSetReq()) { - this.req = new TGetFunctionsReq(other.req); - } - } - - public GetFunctions_args deepCopy() { - return new GetFunctions_args(this); - } - - @Override - public void clear() { - this.req = null; - } - - public TGetFunctionsReq getReq() { - return this.req; - } - - public void setReq(TGetFunctionsReq req) { - this.req = req; - } - - public void unsetReq() { - this.req = null; - } - - /** Returns true if field req is set (has been assigned a value) and false otherwise */ - public boolean isSetReq() { - return this.req != null; - } - - public void setReqIsSet(boolean value) { - if (!value) { - this.req = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case REQ: - if (value == null) { - unsetReq(); - } else { - setReq((TGetFunctionsReq)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case REQ: - return getReq(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case REQ: - return isSetReq(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof GetFunctions_args) - return this.equals((GetFunctions_args)that); - return false; - } - - public boolean equals(GetFunctions_args that) { - if (that == null) - return false; - - boolean this_present_req = true && this.isSetReq(); - boolean that_present_req = true && that.isSetReq(); - if (this_present_req || that_present_req) { - if (!(this_present_req && that_present_req)) - return false; - if (!this.req.equals(that.req)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_req = true && (isSetReq()); - builder.append(present_req); - if (present_req) - builder.append(req); - - return builder.toHashCode(); - } - - public int compareTo(GetFunctions_args other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - GetFunctions_args typedOther = (GetFunctions_args)other; - - lastComparison = Boolean.valueOf(isSetReq()).compareTo(typedOther.isSetReq()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetReq()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.req, typedOther.req); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("GetFunctions_args("); - boolean first = true; - - sb.append("req:"); - if (this.req == null) { - sb.append("null"); - } else { - sb.append(this.req); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (req != null) { - req.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class GetFunctions_argsStandardSchemeFactory implements SchemeFactory { - public GetFunctions_argsStandardScheme getScheme() { - return new GetFunctions_argsStandardScheme(); - } - } - - private static class GetFunctions_argsStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, GetFunctions_args struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // REQ - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.req = new TGetFunctionsReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, GetFunctions_args struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.req != null) { - oprot.writeFieldBegin(REQ_FIELD_DESC); - struct.req.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class GetFunctions_argsTupleSchemeFactory implements SchemeFactory { - public GetFunctions_argsTupleScheme getScheme() { - return new GetFunctions_argsTupleScheme(); - } - } - - private static class GetFunctions_argsTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, GetFunctions_args struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetReq()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetReq()) { - struct.req.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, GetFunctions_args struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.req = new TGetFunctionsReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } - } - } - - } - - public static class GetFunctions_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetFunctions_result"); - - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new GetFunctions_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new GetFunctions_resultTupleSchemeFactory()); - } - - private TGetFunctionsResp success; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SUCCESS((short)0, "success"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 0: // SUCCESS - return SUCCESS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TGetFunctionsResp.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetFunctions_result.class, metaDataMap); - } - - public GetFunctions_result() { - } - - public GetFunctions_result( - TGetFunctionsResp success) - { - this(); - this.success = success; - } - - /** - * Performs a deep copy on other. - */ - public GetFunctions_result(GetFunctions_result other) { - if (other.isSetSuccess()) { - this.success = new TGetFunctionsResp(other.success); - } - } - - public GetFunctions_result deepCopy() { - return new GetFunctions_result(this); - } - - @Override - public void clear() { - this.success = null; - } - - public TGetFunctionsResp getSuccess() { - return this.success; - } - - public void setSuccess(TGetFunctionsResp success) { - this.success = success; - } - - public void unsetSuccess() { - this.success = null; - } - - /** Returns true if field success is set (has been assigned a value) and false otherwise */ - public boolean isSetSuccess() { - return this.success != null; - } - - public void setSuccessIsSet(boolean value) { - if (!value) { - this.success = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case SUCCESS: - if (value == null) { - unsetSuccess(); - } else { - setSuccess((TGetFunctionsResp)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case SUCCESS: - return getSuccess(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case SUCCESS: - return isSetSuccess(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof GetFunctions_result) - return this.equals((GetFunctions_result)that); - return false; - } - - public boolean equals(GetFunctions_result that) { - if (that == null) - return false; - - boolean this_present_success = true && this.isSetSuccess(); - boolean that_present_success = true && that.isSetSuccess(); - if (this_present_success || that_present_success) { - if (!(this_present_success && that_present_success)) - return false; - if (!this.success.equals(that.success)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_success = true && (isSetSuccess()); - builder.append(present_success); - if (present_success) - builder.append(success); - - return builder.toHashCode(); - } - - public int compareTo(GetFunctions_result other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - GetFunctions_result typedOther = (GetFunctions_result)other; - - lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(typedOther.isSetSuccess()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSuccess()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, typedOther.success); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("GetFunctions_result("); - boolean first = true; - - sb.append("success:"); - if (this.success == null) { - sb.append("null"); - } else { - sb.append(this.success); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (success != null) { - success.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class GetFunctions_resultStandardSchemeFactory implements SchemeFactory { - public GetFunctions_resultStandardScheme getScheme() { - return new GetFunctions_resultStandardScheme(); - } - } - - private static class GetFunctions_resultStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, GetFunctions_result struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new TGetFunctionsResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, GetFunctions_result struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.success != null) { - oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - struct.success.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class GetFunctions_resultTupleSchemeFactory implements SchemeFactory { - public GetFunctions_resultTupleScheme getScheme() { - return new GetFunctions_resultTupleScheme(); - } - } - - private static class GetFunctions_resultTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, GetFunctions_result struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetSuccess()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetSuccess()) { - struct.success.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, GetFunctions_result struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.success = new TGetFunctionsResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } - } - } - - } - - public static class GetOperationStatus_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetOperationStatus_args"); - - private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new GetOperationStatus_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new GetOperationStatus_argsTupleSchemeFactory()); - } - - private TGetOperationStatusReq req; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - REQ((short)1, "req"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // REQ - return REQ; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TGetOperationStatusReq.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetOperationStatus_args.class, metaDataMap); - } - - public GetOperationStatus_args() { - } - - public GetOperationStatus_args( - TGetOperationStatusReq req) - { - this(); - this.req = req; - } - - /** - * Performs a deep copy on other. - */ - public GetOperationStatus_args(GetOperationStatus_args other) { - if (other.isSetReq()) { - this.req = new TGetOperationStatusReq(other.req); - } - } - - public GetOperationStatus_args deepCopy() { - return new GetOperationStatus_args(this); - } - - @Override - public void clear() { - this.req = null; - } - - public TGetOperationStatusReq getReq() { - return this.req; - } - - public void setReq(TGetOperationStatusReq req) { - this.req = req; - } - - public void unsetReq() { - this.req = null; - } - - /** Returns true if field req is set (has been assigned a value) and false otherwise */ - public boolean isSetReq() { - return this.req != null; - } - - public void setReqIsSet(boolean value) { - if (!value) { - this.req = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case REQ: - if (value == null) { - unsetReq(); - } else { - setReq((TGetOperationStatusReq)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case REQ: - return getReq(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case REQ: - return isSetReq(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof GetOperationStatus_args) - return this.equals((GetOperationStatus_args)that); - return false; - } - - public boolean equals(GetOperationStatus_args that) { - if (that == null) - return false; - - boolean this_present_req = true && this.isSetReq(); - boolean that_present_req = true && that.isSetReq(); - if (this_present_req || that_present_req) { - if (!(this_present_req && that_present_req)) - return false; - if (!this.req.equals(that.req)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_req = true && (isSetReq()); - builder.append(present_req); - if (present_req) - builder.append(req); - - return builder.toHashCode(); - } - - public int compareTo(GetOperationStatus_args other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - GetOperationStatus_args typedOther = (GetOperationStatus_args)other; - - lastComparison = Boolean.valueOf(isSetReq()).compareTo(typedOther.isSetReq()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetReq()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.req, typedOther.req); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("GetOperationStatus_args("); - boolean first = true; - - sb.append("req:"); - if (this.req == null) { - sb.append("null"); - } else { - sb.append(this.req); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (req != null) { - req.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class GetOperationStatus_argsStandardSchemeFactory implements SchemeFactory { - public GetOperationStatus_argsStandardScheme getScheme() { - return new GetOperationStatus_argsStandardScheme(); - } - } - - private static class GetOperationStatus_argsStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, GetOperationStatus_args struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // REQ - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.req = new TGetOperationStatusReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, GetOperationStatus_args struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.req != null) { - oprot.writeFieldBegin(REQ_FIELD_DESC); - struct.req.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class GetOperationStatus_argsTupleSchemeFactory implements SchemeFactory { - public GetOperationStatus_argsTupleScheme getScheme() { - return new GetOperationStatus_argsTupleScheme(); - } - } - - private static class GetOperationStatus_argsTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, GetOperationStatus_args struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetReq()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetReq()) { - struct.req.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, GetOperationStatus_args struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.req = new TGetOperationStatusReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } - } - } - - } - - public static class GetOperationStatus_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetOperationStatus_result"); - - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new GetOperationStatus_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new GetOperationStatus_resultTupleSchemeFactory()); - } - - private TGetOperationStatusResp success; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SUCCESS((short)0, "success"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 0: // SUCCESS - return SUCCESS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TGetOperationStatusResp.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetOperationStatus_result.class, metaDataMap); - } - - public GetOperationStatus_result() { - } - - public GetOperationStatus_result( - TGetOperationStatusResp success) - { - this(); - this.success = success; - } - - /** - * Performs a deep copy on other. - */ - public GetOperationStatus_result(GetOperationStatus_result other) { - if (other.isSetSuccess()) { - this.success = new TGetOperationStatusResp(other.success); - } - } - - public GetOperationStatus_result deepCopy() { - return new GetOperationStatus_result(this); - } - - @Override - public void clear() { - this.success = null; - } - - public TGetOperationStatusResp getSuccess() { - return this.success; - } - - public void setSuccess(TGetOperationStatusResp success) { - this.success = success; - } - - public void unsetSuccess() { - this.success = null; - } - - /** Returns true if field success is set (has been assigned a value) and false otherwise */ - public boolean isSetSuccess() { - return this.success != null; - } - - public void setSuccessIsSet(boolean value) { - if (!value) { - this.success = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case SUCCESS: - if (value == null) { - unsetSuccess(); - } else { - setSuccess((TGetOperationStatusResp)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case SUCCESS: - return getSuccess(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case SUCCESS: - return isSetSuccess(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof GetOperationStatus_result) - return this.equals((GetOperationStatus_result)that); - return false; - } - - public boolean equals(GetOperationStatus_result that) { - if (that == null) - return false; - - boolean this_present_success = true && this.isSetSuccess(); - boolean that_present_success = true && that.isSetSuccess(); - if (this_present_success || that_present_success) { - if (!(this_present_success && that_present_success)) - return false; - if (!this.success.equals(that.success)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_success = true && (isSetSuccess()); - builder.append(present_success); - if (present_success) - builder.append(success); - - return builder.toHashCode(); - } - - public int compareTo(GetOperationStatus_result other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - GetOperationStatus_result typedOther = (GetOperationStatus_result)other; - - lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(typedOther.isSetSuccess()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSuccess()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, typedOther.success); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("GetOperationStatus_result("); - boolean first = true; - - sb.append("success:"); - if (this.success == null) { - sb.append("null"); - } else { - sb.append(this.success); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (success != null) { - success.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class GetOperationStatus_resultStandardSchemeFactory implements SchemeFactory { - public GetOperationStatus_resultStandardScheme getScheme() { - return new GetOperationStatus_resultStandardScheme(); - } - } - - private static class GetOperationStatus_resultStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, GetOperationStatus_result struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new TGetOperationStatusResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, GetOperationStatus_result struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.success != null) { - oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - struct.success.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class GetOperationStatus_resultTupleSchemeFactory implements SchemeFactory { - public GetOperationStatus_resultTupleScheme getScheme() { - return new GetOperationStatus_resultTupleScheme(); - } - } - - private static class GetOperationStatus_resultTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, GetOperationStatus_result struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetSuccess()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetSuccess()) { - struct.success.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, GetOperationStatus_result struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.success = new TGetOperationStatusResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } - } - } - - } - - public static class CancelOperation_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("CancelOperation_args"); - - private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new CancelOperation_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new CancelOperation_argsTupleSchemeFactory()); - } - - private TCancelOperationReq req; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - REQ((short)1, "req"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // REQ - return REQ; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TCancelOperationReq.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(CancelOperation_args.class, metaDataMap); - } - - public CancelOperation_args() { - } - - public CancelOperation_args( - TCancelOperationReq req) - { - this(); - this.req = req; - } - - /** - * Performs a deep copy on other. - */ - public CancelOperation_args(CancelOperation_args other) { - if (other.isSetReq()) { - this.req = new TCancelOperationReq(other.req); - } - } - - public CancelOperation_args deepCopy() { - return new CancelOperation_args(this); - } - - @Override - public void clear() { - this.req = null; - } - - public TCancelOperationReq getReq() { - return this.req; - } - - public void setReq(TCancelOperationReq req) { - this.req = req; - } - - public void unsetReq() { - this.req = null; - } - - /** Returns true if field req is set (has been assigned a value) and false otherwise */ - public boolean isSetReq() { - return this.req != null; - } - - public void setReqIsSet(boolean value) { - if (!value) { - this.req = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case REQ: - if (value == null) { - unsetReq(); - } else { - setReq((TCancelOperationReq)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case REQ: - return getReq(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case REQ: - return isSetReq(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof CancelOperation_args) - return this.equals((CancelOperation_args)that); - return false; - } - - public boolean equals(CancelOperation_args that) { - if (that == null) - return false; - - boolean this_present_req = true && this.isSetReq(); - boolean that_present_req = true && that.isSetReq(); - if (this_present_req || that_present_req) { - if (!(this_present_req && that_present_req)) - return false; - if (!this.req.equals(that.req)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_req = true && (isSetReq()); - builder.append(present_req); - if (present_req) - builder.append(req); - - return builder.toHashCode(); - } - - public int compareTo(CancelOperation_args other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - CancelOperation_args typedOther = (CancelOperation_args)other; - - lastComparison = Boolean.valueOf(isSetReq()).compareTo(typedOther.isSetReq()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetReq()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.req, typedOther.req); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("CancelOperation_args("); - boolean first = true; - - sb.append("req:"); - if (this.req == null) { - sb.append("null"); - } else { - sb.append(this.req); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (req != null) { - req.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class CancelOperation_argsStandardSchemeFactory implements SchemeFactory { - public CancelOperation_argsStandardScheme getScheme() { - return new CancelOperation_argsStandardScheme(); - } - } - - private static class CancelOperation_argsStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, CancelOperation_args struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // REQ - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.req = new TCancelOperationReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, CancelOperation_args struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.req != null) { - oprot.writeFieldBegin(REQ_FIELD_DESC); - struct.req.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class CancelOperation_argsTupleSchemeFactory implements SchemeFactory { - public CancelOperation_argsTupleScheme getScheme() { - return new CancelOperation_argsTupleScheme(); - } - } - - private static class CancelOperation_argsTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, CancelOperation_args struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetReq()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetReq()) { - struct.req.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, CancelOperation_args struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.req = new TCancelOperationReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } - } - } - - } - - public static class CancelOperation_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("CancelOperation_result"); - - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new CancelOperation_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new CancelOperation_resultTupleSchemeFactory()); - } - - private TCancelOperationResp success; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SUCCESS((short)0, "success"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 0: // SUCCESS - return SUCCESS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TCancelOperationResp.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(CancelOperation_result.class, metaDataMap); - } - - public CancelOperation_result() { - } - - public CancelOperation_result( - TCancelOperationResp success) - { - this(); - this.success = success; - } - - /** - * Performs a deep copy on other. - */ - public CancelOperation_result(CancelOperation_result other) { - if (other.isSetSuccess()) { - this.success = new TCancelOperationResp(other.success); - } - } - - public CancelOperation_result deepCopy() { - return new CancelOperation_result(this); - } - - @Override - public void clear() { - this.success = null; - } - - public TCancelOperationResp getSuccess() { - return this.success; - } - - public void setSuccess(TCancelOperationResp success) { - this.success = success; - } - - public void unsetSuccess() { - this.success = null; - } - - /** Returns true if field success is set (has been assigned a value) and false otherwise */ - public boolean isSetSuccess() { - return this.success != null; - } - - public void setSuccessIsSet(boolean value) { - if (!value) { - this.success = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case SUCCESS: - if (value == null) { - unsetSuccess(); - } else { - setSuccess((TCancelOperationResp)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case SUCCESS: - return getSuccess(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case SUCCESS: - return isSetSuccess(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof CancelOperation_result) - return this.equals((CancelOperation_result)that); - return false; - } - - public boolean equals(CancelOperation_result that) { - if (that == null) - return false; - - boolean this_present_success = true && this.isSetSuccess(); - boolean that_present_success = true && that.isSetSuccess(); - if (this_present_success || that_present_success) { - if (!(this_present_success && that_present_success)) - return false; - if (!this.success.equals(that.success)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_success = true && (isSetSuccess()); - builder.append(present_success); - if (present_success) - builder.append(success); - - return builder.toHashCode(); - } - - public int compareTo(CancelOperation_result other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - CancelOperation_result typedOther = (CancelOperation_result)other; - - lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(typedOther.isSetSuccess()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSuccess()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, typedOther.success); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("CancelOperation_result("); - boolean first = true; - - sb.append("success:"); - if (this.success == null) { - sb.append("null"); - } else { - sb.append(this.success); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (success != null) { - success.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class CancelOperation_resultStandardSchemeFactory implements SchemeFactory { - public CancelOperation_resultStandardScheme getScheme() { - return new CancelOperation_resultStandardScheme(); - } - } - - private static class CancelOperation_resultStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, CancelOperation_result struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new TCancelOperationResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, CancelOperation_result struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.success != null) { - oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - struct.success.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class CancelOperation_resultTupleSchemeFactory implements SchemeFactory { - public CancelOperation_resultTupleScheme getScheme() { - return new CancelOperation_resultTupleScheme(); - } - } - - private static class CancelOperation_resultTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, CancelOperation_result struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetSuccess()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetSuccess()) { - struct.success.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, CancelOperation_result struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.success = new TCancelOperationResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } - } - } - - } - - public static class CloseOperation_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("CloseOperation_args"); - - private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new CloseOperation_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new CloseOperation_argsTupleSchemeFactory()); - } - - private TCloseOperationReq req; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - REQ((short)1, "req"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // REQ - return REQ; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TCloseOperationReq.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(CloseOperation_args.class, metaDataMap); - } - - public CloseOperation_args() { - } - - public CloseOperation_args( - TCloseOperationReq req) - { - this(); - this.req = req; - } - - /** - * Performs a deep copy on other. - */ - public CloseOperation_args(CloseOperation_args other) { - if (other.isSetReq()) { - this.req = new TCloseOperationReq(other.req); - } - } - - public CloseOperation_args deepCopy() { - return new CloseOperation_args(this); - } - - @Override - public void clear() { - this.req = null; - } - - public TCloseOperationReq getReq() { - return this.req; - } - - public void setReq(TCloseOperationReq req) { - this.req = req; - } - - public void unsetReq() { - this.req = null; - } - - /** Returns true if field req is set (has been assigned a value) and false otherwise */ - public boolean isSetReq() { - return this.req != null; - } - - public void setReqIsSet(boolean value) { - if (!value) { - this.req = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case REQ: - if (value == null) { - unsetReq(); - } else { - setReq((TCloseOperationReq)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case REQ: - return getReq(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case REQ: - return isSetReq(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof CloseOperation_args) - return this.equals((CloseOperation_args)that); - return false; - } - - public boolean equals(CloseOperation_args that) { - if (that == null) - return false; - - boolean this_present_req = true && this.isSetReq(); - boolean that_present_req = true && that.isSetReq(); - if (this_present_req || that_present_req) { - if (!(this_present_req && that_present_req)) - return false; - if (!this.req.equals(that.req)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_req = true && (isSetReq()); - builder.append(present_req); - if (present_req) - builder.append(req); - - return builder.toHashCode(); - } - - public int compareTo(CloseOperation_args other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - CloseOperation_args typedOther = (CloseOperation_args)other; - - lastComparison = Boolean.valueOf(isSetReq()).compareTo(typedOther.isSetReq()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetReq()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.req, typedOther.req); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("CloseOperation_args("); - boolean first = true; - - sb.append("req:"); - if (this.req == null) { - sb.append("null"); - } else { - sb.append(this.req); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (req != null) { - req.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class CloseOperation_argsStandardSchemeFactory implements SchemeFactory { - public CloseOperation_argsStandardScheme getScheme() { - return new CloseOperation_argsStandardScheme(); - } - } - - private static class CloseOperation_argsStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, CloseOperation_args struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // REQ - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.req = new TCloseOperationReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, CloseOperation_args struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.req != null) { - oprot.writeFieldBegin(REQ_FIELD_DESC); - struct.req.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class CloseOperation_argsTupleSchemeFactory implements SchemeFactory { - public CloseOperation_argsTupleScheme getScheme() { - return new CloseOperation_argsTupleScheme(); - } - } - - private static class CloseOperation_argsTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, CloseOperation_args struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetReq()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetReq()) { - struct.req.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, CloseOperation_args struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.req = new TCloseOperationReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } - } - } - - } - - public static class CloseOperation_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("CloseOperation_result"); - - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new CloseOperation_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new CloseOperation_resultTupleSchemeFactory()); - } - - private TCloseOperationResp success; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SUCCESS((short)0, "success"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 0: // SUCCESS - return SUCCESS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TCloseOperationResp.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(CloseOperation_result.class, metaDataMap); - } - - public CloseOperation_result() { - } - - public CloseOperation_result( - TCloseOperationResp success) - { - this(); - this.success = success; - } - - /** - * Performs a deep copy on other. - */ - public CloseOperation_result(CloseOperation_result other) { - if (other.isSetSuccess()) { - this.success = new TCloseOperationResp(other.success); - } - } - - public CloseOperation_result deepCopy() { - return new CloseOperation_result(this); - } - - @Override - public void clear() { - this.success = null; - } - - public TCloseOperationResp getSuccess() { - return this.success; - } - - public void setSuccess(TCloseOperationResp success) { - this.success = success; - } - - public void unsetSuccess() { - this.success = null; - } - - /** Returns true if field success is set (has been assigned a value) and false otherwise */ - public boolean isSetSuccess() { - return this.success != null; - } - - public void setSuccessIsSet(boolean value) { - if (!value) { - this.success = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case SUCCESS: - if (value == null) { - unsetSuccess(); - } else { - setSuccess((TCloseOperationResp)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case SUCCESS: - return getSuccess(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case SUCCESS: - return isSetSuccess(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof CloseOperation_result) - return this.equals((CloseOperation_result)that); - return false; - } - - public boolean equals(CloseOperation_result that) { - if (that == null) - return false; - - boolean this_present_success = true && this.isSetSuccess(); - boolean that_present_success = true && that.isSetSuccess(); - if (this_present_success || that_present_success) { - if (!(this_present_success && that_present_success)) - return false; - if (!this.success.equals(that.success)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_success = true && (isSetSuccess()); - builder.append(present_success); - if (present_success) - builder.append(success); - - return builder.toHashCode(); - } - - public int compareTo(CloseOperation_result other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - CloseOperation_result typedOther = (CloseOperation_result)other; - - lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(typedOther.isSetSuccess()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSuccess()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, typedOther.success); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("CloseOperation_result("); - boolean first = true; - - sb.append("success:"); - if (this.success == null) { - sb.append("null"); - } else { - sb.append(this.success); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (success != null) { - success.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class CloseOperation_resultStandardSchemeFactory implements SchemeFactory { - public CloseOperation_resultStandardScheme getScheme() { - return new CloseOperation_resultStandardScheme(); - } - } - - private static class CloseOperation_resultStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, CloseOperation_result struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new TCloseOperationResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, CloseOperation_result struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.success != null) { - oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - struct.success.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class CloseOperation_resultTupleSchemeFactory implements SchemeFactory { - public CloseOperation_resultTupleScheme getScheme() { - return new CloseOperation_resultTupleScheme(); - } - } - - private static class CloseOperation_resultTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, CloseOperation_result struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetSuccess()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetSuccess()) { - struct.success.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, CloseOperation_result struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.success = new TCloseOperationResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } - } - } - - } - - public static class GetResultSetMetadata_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetResultSetMetadata_args"); - - private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new GetResultSetMetadata_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new GetResultSetMetadata_argsTupleSchemeFactory()); - } - - private TGetResultSetMetadataReq req; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - REQ((short)1, "req"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // REQ - return REQ; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TGetResultSetMetadataReq.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetResultSetMetadata_args.class, metaDataMap); - } - - public GetResultSetMetadata_args() { - } - - public GetResultSetMetadata_args( - TGetResultSetMetadataReq req) - { - this(); - this.req = req; - } - - /** - * Performs a deep copy on other. - */ - public GetResultSetMetadata_args(GetResultSetMetadata_args other) { - if (other.isSetReq()) { - this.req = new TGetResultSetMetadataReq(other.req); - } - } - - public GetResultSetMetadata_args deepCopy() { - return new GetResultSetMetadata_args(this); - } - - @Override - public void clear() { - this.req = null; - } - - public TGetResultSetMetadataReq getReq() { - return this.req; - } - - public void setReq(TGetResultSetMetadataReq req) { - this.req = req; - } - - public void unsetReq() { - this.req = null; - } - - /** Returns true if field req is set (has been assigned a value) and false otherwise */ - public boolean isSetReq() { - return this.req != null; - } - - public void setReqIsSet(boolean value) { - if (!value) { - this.req = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case REQ: - if (value == null) { - unsetReq(); - } else { - setReq((TGetResultSetMetadataReq)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case REQ: - return getReq(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case REQ: - return isSetReq(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof GetResultSetMetadata_args) - return this.equals((GetResultSetMetadata_args)that); - return false; - } - - public boolean equals(GetResultSetMetadata_args that) { - if (that == null) - return false; - - boolean this_present_req = true && this.isSetReq(); - boolean that_present_req = true && that.isSetReq(); - if (this_present_req || that_present_req) { - if (!(this_present_req && that_present_req)) - return false; - if (!this.req.equals(that.req)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_req = true && (isSetReq()); - builder.append(present_req); - if (present_req) - builder.append(req); - - return builder.toHashCode(); - } - - public int compareTo(GetResultSetMetadata_args other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - GetResultSetMetadata_args typedOther = (GetResultSetMetadata_args)other; - - lastComparison = Boolean.valueOf(isSetReq()).compareTo(typedOther.isSetReq()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetReq()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.req, typedOther.req); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("GetResultSetMetadata_args("); - boolean first = true; - - sb.append("req:"); - if (this.req == null) { - sb.append("null"); - } else { - sb.append(this.req); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (req != null) { - req.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class GetResultSetMetadata_argsStandardSchemeFactory implements SchemeFactory { - public GetResultSetMetadata_argsStandardScheme getScheme() { - return new GetResultSetMetadata_argsStandardScheme(); - } - } - - private static class GetResultSetMetadata_argsStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, GetResultSetMetadata_args struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // REQ - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.req = new TGetResultSetMetadataReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, GetResultSetMetadata_args struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.req != null) { - oprot.writeFieldBegin(REQ_FIELD_DESC); - struct.req.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class GetResultSetMetadata_argsTupleSchemeFactory implements SchemeFactory { - public GetResultSetMetadata_argsTupleScheme getScheme() { - return new GetResultSetMetadata_argsTupleScheme(); - } - } - - private static class GetResultSetMetadata_argsTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, GetResultSetMetadata_args struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetReq()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetReq()) { - struct.req.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, GetResultSetMetadata_args struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.req = new TGetResultSetMetadataReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } - } - } - - } - - public static class GetResultSetMetadata_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetResultSetMetadata_result"); - - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new GetResultSetMetadata_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new GetResultSetMetadata_resultTupleSchemeFactory()); - } - - private TGetResultSetMetadataResp success; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SUCCESS((short)0, "success"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 0: // SUCCESS - return SUCCESS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TGetResultSetMetadataResp.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetResultSetMetadata_result.class, metaDataMap); - } - - public GetResultSetMetadata_result() { - } - - public GetResultSetMetadata_result( - TGetResultSetMetadataResp success) - { - this(); - this.success = success; - } - - /** - * Performs a deep copy on other. - */ - public GetResultSetMetadata_result(GetResultSetMetadata_result other) { - if (other.isSetSuccess()) { - this.success = new TGetResultSetMetadataResp(other.success); - } - } - - public GetResultSetMetadata_result deepCopy() { - return new GetResultSetMetadata_result(this); - } - - @Override - public void clear() { - this.success = null; - } - - public TGetResultSetMetadataResp getSuccess() { - return this.success; - } - - public void setSuccess(TGetResultSetMetadataResp success) { - this.success = success; - } - - public void unsetSuccess() { - this.success = null; - } - - /** Returns true if field success is set (has been assigned a value) and false otherwise */ - public boolean isSetSuccess() { - return this.success != null; - } - - public void setSuccessIsSet(boolean value) { - if (!value) { - this.success = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case SUCCESS: - if (value == null) { - unsetSuccess(); - } else { - setSuccess((TGetResultSetMetadataResp)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case SUCCESS: - return getSuccess(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case SUCCESS: - return isSetSuccess(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof GetResultSetMetadata_result) - return this.equals((GetResultSetMetadata_result)that); - return false; - } - - public boolean equals(GetResultSetMetadata_result that) { - if (that == null) - return false; - - boolean this_present_success = true && this.isSetSuccess(); - boolean that_present_success = true && that.isSetSuccess(); - if (this_present_success || that_present_success) { - if (!(this_present_success && that_present_success)) - return false; - if (!this.success.equals(that.success)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_success = true && (isSetSuccess()); - builder.append(present_success); - if (present_success) - builder.append(success); - - return builder.toHashCode(); - } - - public int compareTo(GetResultSetMetadata_result other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - GetResultSetMetadata_result typedOther = (GetResultSetMetadata_result)other; - - lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(typedOther.isSetSuccess()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSuccess()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, typedOther.success); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("GetResultSetMetadata_result("); - boolean first = true; - - sb.append("success:"); - if (this.success == null) { - sb.append("null"); - } else { - sb.append(this.success); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (success != null) { - success.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class GetResultSetMetadata_resultStandardSchemeFactory implements SchemeFactory { - public GetResultSetMetadata_resultStandardScheme getScheme() { - return new GetResultSetMetadata_resultStandardScheme(); - } - } - - private static class GetResultSetMetadata_resultStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, GetResultSetMetadata_result struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new TGetResultSetMetadataResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, GetResultSetMetadata_result struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.success != null) { - oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - struct.success.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class GetResultSetMetadata_resultTupleSchemeFactory implements SchemeFactory { - public GetResultSetMetadata_resultTupleScheme getScheme() { - return new GetResultSetMetadata_resultTupleScheme(); - } - } - - private static class GetResultSetMetadata_resultTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, GetResultSetMetadata_result struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetSuccess()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetSuccess()) { - struct.success.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, GetResultSetMetadata_result struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.success = new TGetResultSetMetadataResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } - } - } - - } - - public static class FetchResults_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("FetchResults_args"); - - private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new FetchResults_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new FetchResults_argsTupleSchemeFactory()); - } - - private TFetchResultsReq req; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - REQ((short)1, "req"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // REQ - return REQ; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TFetchResultsReq.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(FetchResults_args.class, metaDataMap); - } - - public FetchResults_args() { - } - - public FetchResults_args( - TFetchResultsReq req) - { - this(); - this.req = req; - } - - /** - * Performs a deep copy on other. - */ - public FetchResults_args(FetchResults_args other) { - if (other.isSetReq()) { - this.req = new TFetchResultsReq(other.req); - } - } - - public FetchResults_args deepCopy() { - return new FetchResults_args(this); - } - - @Override - public void clear() { - this.req = null; - } - - public TFetchResultsReq getReq() { - return this.req; - } - - public void setReq(TFetchResultsReq req) { - this.req = req; - } - - public void unsetReq() { - this.req = null; - } - - /** Returns true if field req is set (has been assigned a value) and false otherwise */ - public boolean isSetReq() { - return this.req != null; - } - - public void setReqIsSet(boolean value) { - if (!value) { - this.req = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case REQ: - if (value == null) { - unsetReq(); - } else { - setReq((TFetchResultsReq)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case REQ: - return getReq(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case REQ: - return isSetReq(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof FetchResults_args) - return this.equals((FetchResults_args)that); - return false; - } - - public boolean equals(FetchResults_args that) { - if (that == null) - return false; - - boolean this_present_req = true && this.isSetReq(); - boolean that_present_req = true && that.isSetReq(); - if (this_present_req || that_present_req) { - if (!(this_present_req && that_present_req)) - return false; - if (!this.req.equals(that.req)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_req = true && (isSetReq()); - builder.append(present_req); - if (present_req) - builder.append(req); - - return builder.toHashCode(); - } - - public int compareTo(FetchResults_args other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - FetchResults_args typedOther = (FetchResults_args)other; - - lastComparison = Boolean.valueOf(isSetReq()).compareTo(typedOther.isSetReq()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetReq()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.req, typedOther.req); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("FetchResults_args("); - boolean first = true; - - sb.append("req:"); - if (this.req == null) { - sb.append("null"); - } else { - sb.append(this.req); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (req != null) { - req.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class FetchResults_argsStandardSchemeFactory implements SchemeFactory { - public FetchResults_argsStandardScheme getScheme() { - return new FetchResults_argsStandardScheme(); - } - } - - private static class FetchResults_argsStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, FetchResults_args struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // REQ - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.req = new TFetchResultsReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, FetchResults_args struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.req != null) { - oprot.writeFieldBegin(REQ_FIELD_DESC); - struct.req.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class FetchResults_argsTupleSchemeFactory implements SchemeFactory { - public FetchResults_argsTupleScheme getScheme() { - return new FetchResults_argsTupleScheme(); - } - } - - private static class FetchResults_argsTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, FetchResults_args struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetReq()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetReq()) { - struct.req.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, FetchResults_args struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.req = new TFetchResultsReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } - } - } - - } - - public static class FetchResults_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("FetchResults_result"); - - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new FetchResults_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new FetchResults_resultTupleSchemeFactory()); - } - - private TFetchResultsResp success; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SUCCESS((short)0, "success"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 0: // SUCCESS - return SUCCESS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TFetchResultsResp.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(FetchResults_result.class, metaDataMap); - } - - public FetchResults_result() { - } - - public FetchResults_result( - TFetchResultsResp success) - { - this(); - this.success = success; - } - - /** - * Performs a deep copy on other. - */ - public FetchResults_result(FetchResults_result other) { - if (other.isSetSuccess()) { - this.success = new TFetchResultsResp(other.success); - } - } - - public FetchResults_result deepCopy() { - return new FetchResults_result(this); - } - - @Override - public void clear() { - this.success = null; - } - - public TFetchResultsResp getSuccess() { - return this.success; - } - - public void setSuccess(TFetchResultsResp success) { - this.success = success; - } - - public void unsetSuccess() { - this.success = null; - } - - /** Returns true if field success is set (has been assigned a value) and false otherwise */ - public boolean isSetSuccess() { - return this.success != null; - } - - public void setSuccessIsSet(boolean value) { - if (!value) { - this.success = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case SUCCESS: - if (value == null) { - unsetSuccess(); - } else { - setSuccess((TFetchResultsResp)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case SUCCESS: - return getSuccess(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case SUCCESS: - return isSetSuccess(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof FetchResults_result) - return this.equals((FetchResults_result)that); - return false; - } - - public boolean equals(FetchResults_result that) { - if (that == null) - return false; - - boolean this_present_success = true && this.isSetSuccess(); - boolean that_present_success = true && that.isSetSuccess(); - if (this_present_success || that_present_success) { - if (!(this_present_success && that_present_success)) - return false; - if (!this.success.equals(that.success)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_success = true && (isSetSuccess()); - builder.append(present_success); - if (present_success) - builder.append(success); - - return builder.toHashCode(); - } - - public int compareTo(FetchResults_result other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - FetchResults_result typedOther = (FetchResults_result)other; - - lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(typedOther.isSetSuccess()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSuccess()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, typedOther.success); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("FetchResults_result("); - boolean first = true; - - sb.append("success:"); - if (this.success == null) { - sb.append("null"); - } else { - sb.append(this.success); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (success != null) { - success.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class FetchResults_resultStandardSchemeFactory implements SchemeFactory { - public FetchResults_resultStandardScheme getScheme() { - return new FetchResults_resultStandardScheme(); - } - } - - private static class FetchResults_resultStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, FetchResults_result struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new TFetchResultsResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, FetchResults_result struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.success != null) { - oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - struct.success.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class FetchResults_resultTupleSchemeFactory implements SchemeFactory { - public FetchResults_resultTupleScheme getScheme() { - return new FetchResults_resultTupleScheme(); - } - } - - private static class FetchResults_resultTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, FetchResults_result struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetSuccess()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetSuccess()) { - struct.success.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, FetchResults_result struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.success = new TFetchResultsResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } - } - } - - } - - public static class GetDelegationToken_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetDelegationToken_args"); - - private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new GetDelegationToken_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new GetDelegationToken_argsTupleSchemeFactory()); - } - - private TGetDelegationTokenReq req; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - REQ((short)1, "req"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // REQ - return REQ; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TGetDelegationTokenReq.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetDelegationToken_args.class, metaDataMap); - } - - public GetDelegationToken_args() { - } - - public GetDelegationToken_args( - TGetDelegationTokenReq req) - { - this(); - this.req = req; - } - - /** - * Performs a deep copy on other. - */ - public GetDelegationToken_args(GetDelegationToken_args other) { - if (other.isSetReq()) { - this.req = new TGetDelegationTokenReq(other.req); - } - } - - public GetDelegationToken_args deepCopy() { - return new GetDelegationToken_args(this); - } - - @Override - public void clear() { - this.req = null; - } - - public TGetDelegationTokenReq getReq() { - return this.req; - } - - public void setReq(TGetDelegationTokenReq req) { - this.req = req; - } - - public void unsetReq() { - this.req = null; - } - - /** Returns true if field req is set (has been assigned a value) and false otherwise */ - public boolean isSetReq() { - return this.req != null; - } - - public void setReqIsSet(boolean value) { - if (!value) { - this.req = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case REQ: - if (value == null) { - unsetReq(); - } else { - setReq((TGetDelegationTokenReq)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case REQ: - return getReq(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case REQ: - return isSetReq(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof GetDelegationToken_args) - return this.equals((GetDelegationToken_args)that); - return false; - } - - public boolean equals(GetDelegationToken_args that) { - if (that == null) - return false; - - boolean this_present_req = true && this.isSetReq(); - boolean that_present_req = true && that.isSetReq(); - if (this_present_req || that_present_req) { - if (!(this_present_req && that_present_req)) - return false; - if (!this.req.equals(that.req)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_req = true && (isSetReq()); - builder.append(present_req); - if (present_req) - builder.append(req); - - return builder.toHashCode(); - } - - public int compareTo(GetDelegationToken_args other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - GetDelegationToken_args typedOther = (GetDelegationToken_args)other; - - lastComparison = Boolean.valueOf(isSetReq()).compareTo(typedOther.isSetReq()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetReq()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.req, typedOther.req); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("GetDelegationToken_args("); - boolean first = true; - - sb.append("req:"); - if (this.req == null) { - sb.append("null"); - } else { - sb.append(this.req); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (req != null) { - req.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class GetDelegationToken_argsStandardSchemeFactory implements SchemeFactory { - public GetDelegationToken_argsStandardScheme getScheme() { - return new GetDelegationToken_argsStandardScheme(); - } - } - - private static class GetDelegationToken_argsStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, GetDelegationToken_args struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // REQ - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.req = new TGetDelegationTokenReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, GetDelegationToken_args struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.req != null) { - oprot.writeFieldBegin(REQ_FIELD_DESC); - struct.req.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class GetDelegationToken_argsTupleSchemeFactory implements SchemeFactory { - public GetDelegationToken_argsTupleScheme getScheme() { - return new GetDelegationToken_argsTupleScheme(); - } - } - - private static class GetDelegationToken_argsTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, GetDelegationToken_args struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetReq()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetReq()) { - struct.req.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, GetDelegationToken_args struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.req = new TGetDelegationTokenReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } - } - } - - } - - public static class GetDelegationToken_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetDelegationToken_result"); - - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new GetDelegationToken_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new GetDelegationToken_resultTupleSchemeFactory()); - } - - private TGetDelegationTokenResp success; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SUCCESS((short)0, "success"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 0: // SUCCESS - return SUCCESS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TGetDelegationTokenResp.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetDelegationToken_result.class, metaDataMap); - } - - public GetDelegationToken_result() { - } - - public GetDelegationToken_result( - TGetDelegationTokenResp success) - { - this(); - this.success = success; - } - - /** - * Performs a deep copy on other. - */ - public GetDelegationToken_result(GetDelegationToken_result other) { - if (other.isSetSuccess()) { - this.success = new TGetDelegationTokenResp(other.success); - } - } - - public GetDelegationToken_result deepCopy() { - return new GetDelegationToken_result(this); - } - - @Override - public void clear() { - this.success = null; - } - - public TGetDelegationTokenResp getSuccess() { - return this.success; - } - - public void setSuccess(TGetDelegationTokenResp success) { - this.success = success; - } - - public void unsetSuccess() { - this.success = null; - } - - /** Returns true if field success is set (has been assigned a value) and false otherwise */ - public boolean isSetSuccess() { - return this.success != null; - } - - public void setSuccessIsSet(boolean value) { - if (!value) { - this.success = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case SUCCESS: - if (value == null) { - unsetSuccess(); - } else { - setSuccess((TGetDelegationTokenResp)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case SUCCESS: - return getSuccess(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case SUCCESS: - return isSetSuccess(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof GetDelegationToken_result) - return this.equals((GetDelegationToken_result)that); - return false; - } - - public boolean equals(GetDelegationToken_result that) { - if (that == null) - return false; - - boolean this_present_success = true && this.isSetSuccess(); - boolean that_present_success = true && that.isSetSuccess(); - if (this_present_success || that_present_success) { - if (!(this_present_success && that_present_success)) - return false; - if (!this.success.equals(that.success)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_success = true && (isSetSuccess()); - builder.append(present_success); - if (present_success) - builder.append(success); - - return builder.toHashCode(); - } - - public int compareTo(GetDelegationToken_result other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - GetDelegationToken_result typedOther = (GetDelegationToken_result)other; - - lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(typedOther.isSetSuccess()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSuccess()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, typedOther.success); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("GetDelegationToken_result("); - boolean first = true; - - sb.append("success:"); - if (this.success == null) { - sb.append("null"); - } else { - sb.append(this.success); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (success != null) { - success.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class GetDelegationToken_resultStandardSchemeFactory implements SchemeFactory { - public GetDelegationToken_resultStandardScheme getScheme() { - return new GetDelegationToken_resultStandardScheme(); - } - } - - private static class GetDelegationToken_resultStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, GetDelegationToken_result struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new TGetDelegationTokenResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, GetDelegationToken_result struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.success != null) { - oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - struct.success.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class GetDelegationToken_resultTupleSchemeFactory implements SchemeFactory { - public GetDelegationToken_resultTupleScheme getScheme() { - return new GetDelegationToken_resultTupleScheme(); - } - } - - private static class GetDelegationToken_resultTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, GetDelegationToken_result struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetSuccess()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetSuccess()) { - struct.success.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, GetDelegationToken_result struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.success = new TGetDelegationTokenResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } - } - } - - } - - public static class CancelDelegationToken_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("CancelDelegationToken_args"); - - private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new CancelDelegationToken_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new CancelDelegationToken_argsTupleSchemeFactory()); - } - - private TCancelDelegationTokenReq req; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - REQ((short)1, "req"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // REQ - return REQ; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TCancelDelegationTokenReq.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(CancelDelegationToken_args.class, metaDataMap); - } - - public CancelDelegationToken_args() { - } - - public CancelDelegationToken_args( - TCancelDelegationTokenReq req) - { - this(); - this.req = req; - } - - /** - * Performs a deep copy on other. - */ - public CancelDelegationToken_args(CancelDelegationToken_args other) { - if (other.isSetReq()) { - this.req = new TCancelDelegationTokenReq(other.req); - } - } - - public CancelDelegationToken_args deepCopy() { - return new CancelDelegationToken_args(this); - } - - @Override - public void clear() { - this.req = null; - } - - public TCancelDelegationTokenReq getReq() { - return this.req; - } - - public void setReq(TCancelDelegationTokenReq req) { - this.req = req; - } - - public void unsetReq() { - this.req = null; - } - - /** Returns true if field req is set (has been assigned a value) and false otherwise */ - public boolean isSetReq() { - return this.req != null; - } - - public void setReqIsSet(boolean value) { - if (!value) { - this.req = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case REQ: - if (value == null) { - unsetReq(); - } else { - setReq((TCancelDelegationTokenReq)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case REQ: - return getReq(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case REQ: - return isSetReq(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof CancelDelegationToken_args) - return this.equals((CancelDelegationToken_args)that); - return false; - } - - public boolean equals(CancelDelegationToken_args that) { - if (that == null) - return false; - - boolean this_present_req = true && this.isSetReq(); - boolean that_present_req = true && that.isSetReq(); - if (this_present_req || that_present_req) { - if (!(this_present_req && that_present_req)) - return false; - if (!this.req.equals(that.req)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_req = true && (isSetReq()); - builder.append(present_req); - if (present_req) - builder.append(req); - - return builder.toHashCode(); - } - - public int compareTo(CancelDelegationToken_args other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - CancelDelegationToken_args typedOther = (CancelDelegationToken_args)other; - - lastComparison = Boolean.valueOf(isSetReq()).compareTo(typedOther.isSetReq()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetReq()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.req, typedOther.req); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("CancelDelegationToken_args("); - boolean first = true; - - sb.append("req:"); - if (this.req == null) { - sb.append("null"); - } else { - sb.append(this.req); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (req != null) { - req.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class CancelDelegationToken_argsStandardSchemeFactory implements SchemeFactory { - public CancelDelegationToken_argsStandardScheme getScheme() { - return new CancelDelegationToken_argsStandardScheme(); - } - } - - private static class CancelDelegationToken_argsStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, CancelDelegationToken_args struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // REQ - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.req = new TCancelDelegationTokenReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, CancelDelegationToken_args struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.req != null) { - oprot.writeFieldBegin(REQ_FIELD_DESC); - struct.req.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class CancelDelegationToken_argsTupleSchemeFactory implements SchemeFactory { - public CancelDelegationToken_argsTupleScheme getScheme() { - return new CancelDelegationToken_argsTupleScheme(); - } - } - - private static class CancelDelegationToken_argsTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, CancelDelegationToken_args struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetReq()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetReq()) { - struct.req.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, CancelDelegationToken_args struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.req = new TCancelDelegationTokenReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } - } - } - - } - - public static class CancelDelegationToken_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("CancelDelegationToken_result"); - - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new CancelDelegationToken_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new CancelDelegationToken_resultTupleSchemeFactory()); - } - - private TCancelDelegationTokenResp success; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SUCCESS((short)0, "success"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 0: // SUCCESS - return SUCCESS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TCancelDelegationTokenResp.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(CancelDelegationToken_result.class, metaDataMap); - } - - public CancelDelegationToken_result() { - } - - public CancelDelegationToken_result( - TCancelDelegationTokenResp success) - { - this(); - this.success = success; - } - - /** - * Performs a deep copy on other. - */ - public CancelDelegationToken_result(CancelDelegationToken_result other) { - if (other.isSetSuccess()) { - this.success = new TCancelDelegationTokenResp(other.success); - } - } - - public CancelDelegationToken_result deepCopy() { - return new CancelDelegationToken_result(this); - } - - @Override - public void clear() { - this.success = null; - } - - public TCancelDelegationTokenResp getSuccess() { - return this.success; - } - - public void setSuccess(TCancelDelegationTokenResp success) { - this.success = success; - } - - public void unsetSuccess() { - this.success = null; - } - - /** Returns true if field success is set (has been assigned a value) and false otherwise */ - public boolean isSetSuccess() { - return this.success != null; - } - - public void setSuccessIsSet(boolean value) { - if (!value) { - this.success = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case SUCCESS: - if (value == null) { - unsetSuccess(); - } else { - setSuccess((TCancelDelegationTokenResp)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case SUCCESS: - return getSuccess(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case SUCCESS: - return isSetSuccess(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof CancelDelegationToken_result) - return this.equals((CancelDelegationToken_result)that); - return false; - } - - public boolean equals(CancelDelegationToken_result that) { - if (that == null) - return false; - - boolean this_present_success = true && this.isSetSuccess(); - boolean that_present_success = true && that.isSetSuccess(); - if (this_present_success || that_present_success) { - if (!(this_present_success && that_present_success)) - return false; - if (!this.success.equals(that.success)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_success = true && (isSetSuccess()); - builder.append(present_success); - if (present_success) - builder.append(success); - - return builder.toHashCode(); - } - - public int compareTo(CancelDelegationToken_result other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - CancelDelegationToken_result typedOther = (CancelDelegationToken_result)other; - - lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(typedOther.isSetSuccess()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSuccess()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, typedOther.success); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("CancelDelegationToken_result("); - boolean first = true; - - sb.append("success:"); - if (this.success == null) { - sb.append("null"); - } else { - sb.append(this.success); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (success != null) { - success.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class CancelDelegationToken_resultStandardSchemeFactory implements SchemeFactory { - public CancelDelegationToken_resultStandardScheme getScheme() { - return new CancelDelegationToken_resultStandardScheme(); - } - } - - private static class CancelDelegationToken_resultStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, CancelDelegationToken_result struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new TCancelDelegationTokenResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, CancelDelegationToken_result struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.success != null) { - oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - struct.success.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class CancelDelegationToken_resultTupleSchemeFactory implements SchemeFactory { - public CancelDelegationToken_resultTupleScheme getScheme() { - return new CancelDelegationToken_resultTupleScheme(); - } - } - - private static class CancelDelegationToken_resultTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, CancelDelegationToken_result struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetSuccess()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetSuccess()) { - struct.success.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, CancelDelegationToken_result struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.success = new TCancelDelegationTokenResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } - } - } - - } - - public static class RenewDelegationToken_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("RenewDelegationToken_args"); - - private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new RenewDelegationToken_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new RenewDelegationToken_argsTupleSchemeFactory()); - } - - private TRenewDelegationTokenReq req; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - REQ((short)1, "req"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // REQ - return REQ; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TRenewDelegationTokenReq.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(RenewDelegationToken_args.class, metaDataMap); - } - - public RenewDelegationToken_args() { - } - - public RenewDelegationToken_args( - TRenewDelegationTokenReq req) - { - this(); - this.req = req; - } - - /** - * Performs a deep copy on other. - */ - public RenewDelegationToken_args(RenewDelegationToken_args other) { - if (other.isSetReq()) { - this.req = new TRenewDelegationTokenReq(other.req); - } - } - - public RenewDelegationToken_args deepCopy() { - return new RenewDelegationToken_args(this); - } - - @Override - public void clear() { - this.req = null; - } - - public TRenewDelegationTokenReq getReq() { - return this.req; - } - - public void setReq(TRenewDelegationTokenReq req) { - this.req = req; - } - - public void unsetReq() { - this.req = null; - } - - /** Returns true if field req is set (has been assigned a value) and false otherwise */ - public boolean isSetReq() { - return this.req != null; - } - - public void setReqIsSet(boolean value) { - if (!value) { - this.req = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case REQ: - if (value == null) { - unsetReq(); - } else { - setReq((TRenewDelegationTokenReq)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case REQ: - return getReq(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case REQ: - return isSetReq(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof RenewDelegationToken_args) - return this.equals((RenewDelegationToken_args)that); - return false; - } - - public boolean equals(RenewDelegationToken_args that) { - if (that == null) - return false; - - boolean this_present_req = true && this.isSetReq(); - boolean that_present_req = true && that.isSetReq(); - if (this_present_req || that_present_req) { - if (!(this_present_req && that_present_req)) - return false; - if (!this.req.equals(that.req)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_req = true && (isSetReq()); - builder.append(present_req); - if (present_req) - builder.append(req); - - return builder.toHashCode(); - } - - public int compareTo(RenewDelegationToken_args other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - RenewDelegationToken_args typedOther = (RenewDelegationToken_args)other; - - lastComparison = Boolean.valueOf(isSetReq()).compareTo(typedOther.isSetReq()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetReq()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.req, typedOther.req); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("RenewDelegationToken_args("); - boolean first = true; - - sb.append("req:"); - if (this.req == null) { - sb.append("null"); - } else { - sb.append(this.req); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (req != null) { - req.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class RenewDelegationToken_argsStandardSchemeFactory implements SchemeFactory { - public RenewDelegationToken_argsStandardScheme getScheme() { - return new RenewDelegationToken_argsStandardScheme(); - } - } - - private static class RenewDelegationToken_argsStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, RenewDelegationToken_args struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // REQ - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.req = new TRenewDelegationTokenReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, RenewDelegationToken_args struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.req != null) { - oprot.writeFieldBegin(REQ_FIELD_DESC); - struct.req.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class RenewDelegationToken_argsTupleSchemeFactory implements SchemeFactory { - public RenewDelegationToken_argsTupleScheme getScheme() { - return new RenewDelegationToken_argsTupleScheme(); - } - } - - private static class RenewDelegationToken_argsTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, RenewDelegationToken_args struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetReq()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetReq()) { - struct.req.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, RenewDelegationToken_args struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.req = new TRenewDelegationTokenReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } - } - } - - } - - public static class RenewDelegationToken_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("RenewDelegationToken_result"); - - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new RenewDelegationToken_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new RenewDelegationToken_resultTupleSchemeFactory()); - } - - private TRenewDelegationTokenResp success; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SUCCESS((short)0, "success"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 0: // SUCCESS - return SUCCESS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TRenewDelegationTokenResp.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(RenewDelegationToken_result.class, metaDataMap); - } - - public RenewDelegationToken_result() { - } - - public RenewDelegationToken_result( - TRenewDelegationTokenResp success) - { - this(); - this.success = success; - } - - /** - * Performs a deep copy on other. - */ - public RenewDelegationToken_result(RenewDelegationToken_result other) { - if (other.isSetSuccess()) { - this.success = new TRenewDelegationTokenResp(other.success); - } - } - - public RenewDelegationToken_result deepCopy() { - return new RenewDelegationToken_result(this); - } - - @Override - public void clear() { - this.success = null; - } - - public TRenewDelegationTokenResp getSuccess() { - return this.success; - } - - public void setSuccess(TRenewDelegationTokenResp success) { - this.success = success; - } - - public void unsetSuccess() { - this.success = null; - } - - /** Returns true if field success is set (has been assigned a value) and false otherwise */ - public boolean isSetSuccess() { - return this.success != null; - } - - public void setSuccessIsSet(boolean value) { - if (!value) { - this.success = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case SUCCESS: - if (value == null) { - unsetSuccess(); - } else { - setSuccess((TRenewDelegationTokenResp)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case SUCCESS: - return getSuccess(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case SUCCESS: - return isSetSuccess(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof RenewDelegationToken_result) - return this.equals((RenewDelegationToken_result)that); - return false; - } - - public boolean equals(RenewDelegationToken_result that) { - if (that == null) - return false; - - boolean this_present_success = true && this.isSetSuccess(); - boolean that_present_success = true && that.isSetSuccess(); - if (this_present_success || that_present_success) { - if (!(this_present_success && that_present_success)) - return false; - if (!this.success.equals(that.success)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_success = true && (isSetSuccess()); - builder.append(present_success); - if (present_success) - builder.append(success); - - return builder.toHashCode(); - } - - public int compareTo(RenewDelegationToken_result other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - RenewDelegationToken_result typedOther = (RenewDelegationToken_result)other; - - lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(typedOther.isSetSuccess()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSuccess()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, typedOther.success); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("RenewDelegationToken_result("); - boolean first = true; - - sb.append("success:"); - if (this.success == null) { - sb.append("null"); - } else { - sb.append(this.success); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (success != null) { - success.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class RenewDelegationToken_resultStandardSchemeFactory implements SchemeFactory { - public RenewDelegationToken_resultStandardScheme getScheme() { - return new RenewDelegationToken_resultStandardScheme(); - } - } - - private static class RenewDelegationToken_resultStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, RenewDelegationToken_result struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new TRenewDelegationTokenResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, RenewDelegationToken_result struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.success != null) { - oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - struct.success.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class RenewDelegationToken_resultTupleSchemeFactory implements SchemeFactory { - public RenewDelegationToken_resultTupleScheme getScheme() { - return new RenewDelegationToken_resultTupleScheme(); - } - } - - private static class RenewDelegationToken_resultTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, RenewDelegationToken_result struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetSuccess()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetSuccess()) { - struct.success.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, RenewDelegationToken_result struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.success = new TRenewDelegationTokenResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } - } - } - - } - -} diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TCLIServiceConstants.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TCLIServiceConstants.java deleted file mode 100644 index 25a38b178428a..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TCLIServiceConstants.java +++ /dev/null @@ -1,103 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TCLIServiceConstants { - - public static final Set PRIMITIVE_TYPES = new HashSet(); - static { - PRIMITIVE_TYPES.add(org.apache.hive.service.cli.thrift.TTypeId.BOOLEAN_TYPE); - PRIMITIVE_TYPES.add(org.apache.hive.service.cli.thrift.TTypeId.TINYINT_TYPE); - PRIMITIVE_TYPES.add(org.apache.hive.service.cli.thrift.TTypeId.SMALLINT_TYPE); - PRIMITIVE_TYPES.add(org.apache.hive.service.cli.thrift.TTypeId.INT_TYPE); - PRIMITIVE_TYPES.add(org.apache.hive.service.cli.thrift.TTypeId.BIGINT_TYPE); - PRIMITIVE_TYPES.add(org.apache.hive.service.cli.thrift.TTypeId.FLOAT_TYPE); - PRIMITIVE_TYPES.add(org.apache.hive.service.cli.thrift.TTypeId.DOUBLE_TYPE); - PRIMITIVE_TYPES.add(org.apache.hive.service.cli.thrift.TTypeId.STRING_TYPE); - PRIMITIVE_TYPES.add(org.apache.hive.service.cli.thrift.TTypeId.TIMESTAMP_TYPE); - PRIMITIVE_TYPES.add(org.apache.hive.service.cli.thrift.TTypeId.BINARY_TYPE); - PRIMITIVE_TYPES.add(org.apache.hive.service.cli.thrift.TTypeId.DECIMAL_TYPE); - PRIMITIVE_TYPES.add(org.apache.hive.service.cli.thrift.TTypeId.NULL_TYPE); - PRIMITIVE_TYPES.add(org.apache.hive.service.cli.thrift.TTypeId.DATE_TYPE); - PRIMITIVE_TYPES.add(org.apache.hive.service.cli.thrift.TTypeId.VARCHAR_TYPE); - PRIMITIVE_TYPES.add(org.apache.hive.service.cli.thrift.TTypeId.CHAR_TYPE); - PRIMITIVE_TYPES.add(org.apache.hive.service.cli.thrift.TTypeId.INTERVAL_YEAR_MONTH_TYPE); - PRIMITIVE_TYPES.add(org.apache.hive.service.cli.thrift.TTypeId.INTERVAL_DAY_TIME_TYPE); - } - - public static final Set COMPLEX_TYPES = new HashSet(); - static { - COMPLEX_TYPES.add(org.apache.hive.service.cli.thrift.TTypeId.ARRAY_TYPE); - COMPLEX_TYPES.add(org.apache.hive.service.cli.thrift.TTypeId.MAP_TYPE); - COMPLEX_TYPES.add(org.apache.hive.service.cli.thrift.TTypeId.STRUCT_TYPE); - COMPLEX_TYPES.add(org.apache.hive.service.cli.thrift.TTypeId.UNION_TYPE); - COMPLEX_TYPES.add(org.apache.hive.service.cli.thrift.TTypeId.USER_DEFINED_TYPE); - } - - public static final Set COLLECTION_TYPES = new HashSet(); - static { - COLLECTION_TYPES.add(org.apache.hive.service.cli.thrift.TTypeId.ARRAY_TYPE); - COLLECTION_TYPES.add(org.apache.hive.service.cli.thrift.TTypeId.MAP_TYPE); - } - - public static final Map TYPE_NAMES = new HashMap(); - static { - TYPE_NAMES.put(org.apache.hive.service.cli.thrift.TTypeId.BOOLEAN_TYPE, "BOOLEAN"); - TYPE_NAMES.put(org.apache.hive.service.cli.thrift.TTypeId.TINYINT_TYPE, "TINYINT"); - TYPE_NAMES.put(org.apache.hive.service.cli.thrift.TTypeId.SMALLINT_TYPE, "SMALLINT"); - TYPE_NAMES.put(org.apache.hive.service.cli.thrift.TTypeId.INT_TYPE, "INT"); - TYPE_NAMES.put(org.apache.hive.service.cli.thrift.TTypeId.BIGINT_TYPE, "BIGINT"); - TYPE_NAMES.put(org.apache.hive.service.cli.thrift.TTypeId.FLOAT_TYPE, "FLOAT"); - TYPE_NAMES.put(org.apache.hive.service.cli.thrift.TTypeId.DOUBLE_TYPE, "DOUBLE"); - TYPE_NAMES.put(org.apache.hive.service.cli.thrift.TTypeId.STRING_TYPE, "STRING"); - TYPE_NAMES.put(org.apache.hive.service.cli.thrift.TTypeId.TIMESTAMP_TYPE, "TIMESTAMP"); - TYPE_NAMES.put(org.apache.hive.service.cli.thrift.TTypeId.BINARY_TYPE, "BINARY"); - TYPE_NAMES.put(org.apache.hive.service.cli.thrift.TTypeId.ARRAY_TYPE, "ARRAY"); - TYPE_NAMES.put(org.apache.hive.service.cli.thrift.TTypeId.MAP_TYPE, "MAP"); - TYPE_NAMES.put(org.apache.hive.service.cli.thrift.TTypeId.STRUCT_TYPE, "STRUCT"); - TYPE_NAMES.put(org.apache.hive.service.cli.thrift.TTypeId.UNION_TYPE, "UNIONTYPE"); - TYPE_NAMES.put(org.apache.hive.service.cli.thrift.TTypeId.DECIMAL_TYPE, "DECIMAL"); - TYPE_NAMES.put(org.apache.hive.service.cli.thrift.TTypeId.NULL_TYPE, "NULL"); - TYPE_NAMES.put(org.apache.hive.service.cli.thrift.TTypeId.DATE_TYPE, "DATE"); - TYPE_NAMES.put(org.apache.hive.service.cli.thrift.TTypeId.VARCHAR_TYPE, "VARCHAR"); - TYPE_NAMES.put(org.apache.hive.service.cli.thrift.TTypeId.CHAR_TYPE, "CHAR"); - TYPE_NAMES.put(org.apache.hive.service.cli.thrift.TTypeId.INTERVAL_YEAR_MONTH_TYPE, "INTERVAL_YEAR_MONTH"); - TYPE_NAMES.put(org.apache.hive.service.cli.thrift.TTypeId.INTERVAL_DAY_TIME_TYPE, "INTERVAL_DAY_TIME"); - } - - public static final String CHARACTER_MAXIMUM_LENGTH = "characterMaximumLength"; - - public static final String PRECISION = "precision"; - - public static final String SCALE = "scale"; - -} diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TCancelDelegationTokenReq.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TCancelDelegationTokenReq.java deleted file mode 100644 index e23fcdd77a1a4..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TCancelDelegationTokenReq.java +++ /dev/null @@ -1,491 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TCancelDelegationTokenReq implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TCancelDelegationTokenReq"); - - private static final org.apache.thrift.protocol.TField SESSION_HANDLE_FIELD_DESC = new org.apache.thrift.protocol.TField("sessionHandle", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField DELEGATION_TOKEN_FIELD_DESC = new org.apache.thrift.protocol.TField("delegationToken", org.apache.thrift.protocol.TType.STRING, (short)2); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TCancelDelegationTokenReqStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TCancelDelegationTokenReqTupleSchemeFactory()); - } - - private TSessionHandle sessionHandle; // required - private String delegationToken; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SESSION_HANDLE((short)1, "sessionHandle"), - DELEGATION_TOKEN((short)2, "delegationToken"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // SESSION_HANDLE - return SESSION_HANDLE; - case 2: // DELEGATION_TOKEN - return DELEGATION_TOKEN; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SESSION_HANDLE, new org.apache.thrift.meta_data.FieldMetaData("sessionHandle", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TSessionHandle.class))); - tmpMap.put(_Fields.DELEGATION_TOKEN, new org.apache.thrift.meta_data.FieldMetaData("delegationToken", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TCancelDelegationTokenReq.class, metaDataMap); - } - - public TCancelDelegationTokenReq() { - } - - public TCancelDelegationTokenReq( - TSessionHandle sessionHandle, - String delegationToken) - { - this(); - this.sessionHandle = sessionHandle; - this.delegationToken = delegationToken; - } - - /** - * Performs a deep copy on other. - */ - public TCancelDelegationTokenReq(TCancelDelegationTokenReq other) { - if (other.isSetSessionHandle()) { - this.sessionHandle = new TSessionHandle(other.sessionHandle); - } - if (other.isSetDelegationToken()) { - this.delegationToken = other.delegationToken; - } - } - - public TCancelDelegationTokenReq deepCopy() { - return new TCancelDelegationTokenReq(this); - } - - @Override - public void clear() { - this.sessionHandle = null; - this.delegationToken = null; - } - - public TSessionHandle getSessionHandle() { - return this.sessionHandle; - } - - public void setSessionHandle(TSessionHandle sessionHandle) { - this.sessionHandle = sessionHandle; - } - - public void unsetSessionHandle() { - this.sessionHandle = null; - } - - /** Returns true if field sessionHandle is set (has been assigned a value) and false otherwise */ - public boolean isSetSessionHandle() { - return this.sessionHandle != null; - } - - public void setSessionHandleIsSet(boolean value) { - if (!value) { - this.sessionHandle = null; - } - } - - public String getDelegationToken() { - return this.delegationToken; - } - - public void setDelegationToken(String delegationToken) { - this.delegationToken = delegationToken; - } - - public void unsetDelegationToken() { - this.delegationToken = null; - } - - /** Returns true if field delegationToken is set (has been assigned a value) and false otherwise */ - public boolean isSetDelegationToken() { - return this.delegationToken != null; - } - - public void setDelegationTokenIsSet(boolean value) { - if (!value) { - this.delegationToken = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case SESSION_HANDLE: - if (value == null) { - unsetSessionHandle(); - } else { - setSessionHandle((TSessionHandle)value); - } - break; - - case DELEGATION_TOKEN: - if (value == null) { - unsetDelegationToken(); - } else { - setDelegationToken((String)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case SESSION_HANDLE: - return getSessionHandle(); - - case DELEGATION_TOKEN: - return getDelegationToken(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case SESSION_HANDLE: - return isSetSessionHandle(); - case DELEGATION_TOKEN: - return isSetDelegationToken(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TCancelDelegationTokenReq) - return this.equals((TCancelDelegationTokenReq)that); - return false; - } - - public boolean equals(TCancelDelegationTokenReq that) { - if (that == null) - return false; - - boolean this_present_sessionHandle = true && this.isSetSessionHandle(); - boolean that_present_sessionHandle = true && that.isSetSessionHandle(); - if (this_present_sessionHandle || that_present_sessionHandle) { - if (!(this_present_sessionHandle && that_present_sessionHandle)) - return false; - if (!this.sessionHandle.equals(that.sessionHandle)) - return false; - } - - boolean this_present_delegationToken = true && this.isSetDelegationToken(); - boolean that_present_delegationToken = true && that.isSetDelegationToken(); - if (this_present_delegationToken || that_present_delegationToken) { - if (!(this_present_delegationToken && that_present_delegationToken)) - return false; - if (!this.delegationToken.equals(that.delegationToken)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_sessionHandle = true && (isSetSessionHandle()); - builder.append(present_sessionHandle); - if (present_sessionHandle) - builder.append(sessionHandle); - - boolean present_delegationToken = true && (isSetDelegationToken()); - builder.append(present_delegationToken); - if (present_delegationToken) - builder.append(delegationToken); - - return builder.toHashCode(); - } - - public int compareTo(TCancelDelegationTokenReq other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - TCancelDelegationTokenReq typedOther = (TCancelDelegationTokenReq)other; - - lastComparison = Boolean.valueOf(isSetSessionHandle()).compareTo(typedOther.isSetSessionHandle()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSessionHandle()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.sessionHandle, typedOther.sessionHandle); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetDelegationToken()).compareTo(typedOther.isSetDelegationToken()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetDelegationToken()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.delegationToken, typedOther.delegationToken); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TCancelDelegationTokenReq("); - boolean first = true; - - sb.append("sessionHandle:"); - if (this.sessionHandle == null) { - sb.append("null"); - } else { - sb.append(this.sessionHandle); - } - first = false; - if (!first) sb.append(", "); - sb.append("delegationToken:"); - if (this.delegationToken == null) { - sb.append("null"); - } else { - sb.append(this.delegationToken); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetSessionHandle()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'sessionHandle' is unset! Struct:" + toString()); - } - - if (!isSetDelegationToken()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'delegationToken' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (sessionHandle != null) { - sessionHandle.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TCancelDelegationTokenReqStandardSchemeFactory implements SchemeFactory { - public TCancelDelegationTokenReqStandardScheme getScheme() { - return new TCancelDelegationTokenReqStandardScheme(); - } - } - - private static class TCancelDelegationTokenReqStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TCancelDelegationTokenReq struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // SESSION_HANDLE - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.sessionHandle = new TSessionHandle(); - struct.sessionHandle.read(iprot); - struct.setSessionHandleIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // DELEGATION_TOKEN - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.delegationToken = iprot.readString(); - struct.setDelegationTokenIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TCancelDelegationTokenReq struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.sessionHandle != null) { - oprot.writeFieldBegin(SESSION_HANDLE_FIELD_DESC); - struct.sessionHandle.write(oprot); - oprot.writeFieldEnd(); - } - if (struct.delegationToken != null) { - oprot.writeFieldBegin(DELEGATION_TOKEN_FIELD_DESC); - oprot.writeString(struct.delegationToken); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TCancelDelegationTokenReqTupleSchemeFactory implements SchemeFactory { - public TCancelDelegationTokenReqTupleScheme getScheme() { - return new TCancelDelegationTokenReqTupleScheme(); - } - } - - private static class TCancelDelegationTokenReqTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TCancelDelegationTokenReq struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.sessionHandle.write(oprot); - oprot.writeString(struct.delegationToken); - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TCancelDelegationTokenReq struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.sessionHandle = new TSessionHandle(); - struct.sessionHandle.read(iprot); - struct.setSessionHandleIsSet(true); - struct.delegationToken = iprot.readString(); - struct.setDelegationTokenIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TCancelDelegationTokenResp.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TCancelDelegationTokenResp.java deleted file mode 100644 index 77c9ee77ec59b..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TCancelDelegationTokenResp.java +++ /dev/null @@ -1,390 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TCancelDelegationTokenResp implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TCancelDelegationTokenResp"); - - private static final org.apache.thrift.protocol.TField STATUS_FIELD_DESC = new org.apache.thrift.protocol.TField("status", org.apache.thrift.protocol.TType.STRUCT, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TCancelDelegationTokenRespStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TCancelDelegationTokenRespTupleSchemeFactory()); - } - - private TStatus status; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - STATUS((short)1, "status"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // STATUS - return STATUS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.STATUS, new org.apache.thrift.meta_data.FieldMetaData("status", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TStatus.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TCancelDelegationTokenResp.class, metaDataMap); - } - - public TCancelDelegationTokenResp() { - } - - public TCancelDelegationTokenResp( - TStatus status) - { - this(); - this.status = status; - } - - /** - * Performs a deep copy on other. - */ - public TCancelDelegationTokenResp(TCancelDelegationTokenResp other) { - if (other.isSetStatus()) { - this.status = new TStatus(other.status); - } - } - - public TCancelDelegationTokenResp deepCopy() { - return new TCancelDelegationTokenResp(this); - } - - @Override - public void clear() { - this.status = null; - } - - public TStatus getStatus() { - return this.status; - } - - public void setStatus(TStatus status) { - this.status = status; - } - - public void unsetStatus() { - this.status = null; - } - - /** Returns true if field status is set (has been assigned a value) and false otherwise */ - public boolean isSetStatus() { - return this.status != null; - } - - public void setStatusIsSet(boolean value) { - if (!value) { - this.status = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case STATUS: - if (value == null) { - unsetStatus(); - } else { - setStatus((TStatus)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case STATUS: - return getStatus(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case STATUS: - return isSetStatus(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TCancelDelegationTokenResp) - return this.equals((TCancelDelegationTokenResp)that); - return false; - } - - public boolean equals(TCancelDelegationTokenResp that) { - if (that == null) - return false; - - boolean this_present_status = true && this.isSetStatus(); - boolean that_present_status = true && that.isSetStatus(); - if (this_present_status || that_present_status) { - if (!(this_present_status && that_present_status)) - return false; - if (!this.status.equals(that.status)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_status = true && (isSetStatus()); - builder.append(present_status); - if (present_status) - builder.append(status); - - return builder.toHashCode(); - } - - public int compareTo(TCancelDelegationTokenResp other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - TCancelDelegationTokenResp typedOther = (TCancelDelegationTokenResp)other; - - lastComparison = Boolean.valueOf(isSetStatus()).compareTo(typedOther.isSetStatus()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetStatus()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.status, typedOther.status); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TCancelDelegationTokenResp("); - boolean first = true; - - sb.append("status:"); - if (this.status == null) { - sb.append("null"); - } else { - sb.append(this.status); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetStatus()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'status' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (status != null) { - status.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TCancelDelegationTokenRespStandardSchemeFactory implements SchemeFactory { - public TCancelDelegationTokenRespStandardScheme getScheme() { - return new TCancelDelegationTokenRespStandardScheme(); - } - } - - private static class TCancelDelegationTokenRespStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TCancelDelegationTokenResp struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // STATUS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TCancelDelegationTokenResp struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.status != null) { - oprot.writeFieldBegin(STATUS_FIELD_DESC); - struct.status.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TCancelDelegationTokenRespTupleSchemeFactory implements SchemeFactory { - public TCancelDelegationTokenRespTupleScheme getScheme() { - return new TCancelDelegationTokenRespTupleScheme(); - } - } - - private static class TCancelDelegationTokenRespTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TCancelDelegationTokenResp struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.status.write(oprot); - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TCancelDelegationTokenResp struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TCancelOperationReq.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TCancelOperationReq.java deleted file mode 100644 index 45eac48ab12d3..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TCancelOperationReq.java +++ /dev/null @@ -1,390 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TCancelOperationReq implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TCancelOperationReq"); - - private static final org.apache.thrift.protocol.TField OPERATION_HANDLE_FIELD_DESC = new org.apache.thrift.protocol.TField("operationHandle", org.apache.thrift.protocol.TType.STRUCT, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TCancelOperationReqStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TCancelOperationReqTupleSchemeFactory()); - } - - private TOperationHandle operationHandle; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - OPERATION_HANDLE((short)1, "operationHandle"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // OPERATION_HANDLE - return OPERATION_HANDLE; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.OPERATION_HANDLE, new org.apache.thrift.meta_data.FieldMetaData("operationHandle", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TOperationHandle.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TCancelOperationReq.class, metaDataMap); - } - - public TCancelOperationReq() { - } - - public TCancelOperationReq( - TOperationHandle operationHandle) - { - this(); - this.operationHandle = operationHandle; - } - - /** - * Performs a deep copy on other. - */ - public TCancelOperationReq(TCancelOperationReq other) { - if (other.isSetOperationHandle()) { - this.operationHandle = new TOperationHandle(other.operationHandle); - } - } - - public TCancelOperationReq deepCopy() { - return new TCancelOperationReq(this); - } - - @Override - public void clear() { - this.operationHandle = null; - } - - public TOperationHandle getOperationHandle() { - return this.operationHandle; - } - - public void setOperationHandle(TOperationHandle operationHandle) { - this.operationHandle = operationHandle; - } - - public void unsetOperationHandle() { - this.operationHandle = null; - } - - /** Returns true if field operationHandle is set (has been assigned a value) and false otherwise */ - public boolean isSetOperationHandle() { - return this.operationHandle != null; - } - - public void setOperationHandleIsSet(boolean value) { - if (!value) { - this.operationHandle = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case OPERATION_HANDLE: - if (value == null) { - unsetOperationHandle(); - } else { - setOperationHandle((TOperationHandle)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case OPERATION_HANDLE: - return getOperationHandle(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case OPERATION_HANDLE: - return isSetOperationHandle(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TCancelOperationReq) - return this.equals((TCancelOperationReq)that); - return false; - } - - public boolean equals(TCancelOperationReq that) { - if (that == null) - return false; - - boolean this_present_operationHandle = true && this.isSetOperationHandle(); - boolean that_present_operationHandle = true && that.isSetOperationHandle(); - if (this_present_operationHandle || that_present_operationHandle) { - if (!(this_present_operationHandle && that_present_operationHandle)) - return false; - if (!this.operationHandle.equals(that.operationHandle)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_operationHandle = true && (isSetOperationHandle()); - builder.append(present_operationHandle); - if (present_operationHandle) - builder.append(operationHandle); - - return builder.toHashCode(); - } - - public int compareTo(TCancelOperationReq other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - TCancelOperationReq typedOther = (TCancelOperationReq)other; - - lastComparison = Boolean.valueOf(isSetOperationHandle()).compareTo(typedOther.isSetOperationHandle()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetOperationHandle()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.operationHandle, typedOther.operationHandle); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TCancelOperationReq("); - boolean first = true; - - sb.append("operationHandle:"); - if (this.operationHandle == null) { - sb.append("null"); - } else { - sb.append(this.operationHandle); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetOperationHandle()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'operationHandle' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (operationHandle != null) { - operationHandle.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TCancelOperationReqStandardSchemeFactory implements SchemeFactory { - public TCancelOperationReqStandardScheme getScheme() { - return new TCancelOperationReqStandardScheme(); - } - } - - private static class TCancelOperationReqStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TCancelOperationReq struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // OPERATION_HANDLE - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.operationHandle = new TOperationHandle(); - struct.operationHandle.read(iprot); - struct.setOperationHandleIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TCancelOperationReq struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.operationHandle != null) { - oprot.writeFieldBegin(OPERATION_HANDLE_FIELD_DESC); - struct.operationHandle.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TCancelOperationReqTupleSchemeFactory implements SchemeFactory { - public TCancelOperationReqTupleScheme getScheme() { - return new TCancelOperationReqTupleScheme(); - } - } - - private static class TCancelOperationReqTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TCancelOperationReq struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.operationHandle.write(oprot); - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TCancelOperationReq struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.operationHandle = new TOperationHandle(); - struct.operationHandle.read(iprot); - struct.setOperationHandleIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TCancelOperationResp.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TCancelOperationResp.java deleted file mode 100644 index 2a39414d601aa..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TCancelOperationResp.java +++ /dev/null @@ -1,390 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TCancelOperationResp implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TCancelOperationResp"); - - private static final org.apache.thrift.protocol.TField STATUS_FIELD_DESC = new org.apache.thrift.protocol.TField("status", org.apache.thrift.protocol.TType.STRUCT, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TCancelOperationRespStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TCancelOperationRespTupleSchemeFactory()); - } - - private TStatus status; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - STATUS((short)1, "status"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // STATUS - return STATUS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.STATUS, new org.apache.thrift.meta_data.FieldMetaData("status", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TStatus.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TCancelOperationResp.class, metaDataMap); - } - - public TCancelOperationResp() { - } - - public TCancelOperationResp( - TStatus status) - { - this(); - this.status = status; - } - - /** - * Performs a deep copy on other. - */ - public TCancelOperationResp(TCancelOperationResp other) { - if (other.isSetStatus()) { - this.status = new TStatus(other.status); - } - } - - public TCancelOperationResp deepCopy() { - return new TCancelOperationResp(this); - } - - @Override - public void clear() { - this.status = null; - } - - public TStatus getStatus() { - return this.status; - } - - public void setStatus(TStatus status) { - this.status = status; - } - - public void unsetStatus() { - this.status = null; - } - - /** Returns true if field status is set (has been assigned a value) and false otherwise */ - public boolean isSetStatus() { - return this.status != null; - } - - public void setStatusIsSet(boolean value) { - if (!value) { - this.status = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case STATUS: - if (value == null) { - unsetStatus(); - } else { - setStatus((TStatus)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case STATUS: - return getStatus(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case STATUS: - return isSetStatus(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TCancelOperationResp) - return this.equals((TCancelOperationResp)that); - return false; - } - - public boolean equals(TCancelOperationResp that) { - if (that == null) - return false; - - boolean this_present_status = true && this.isSetStatus(); - boolean that_present_status = true && that.isSetStatus(); - if (this_present_status || that_present_status) { - if (!(this_present_status && that_present_status)) - return false; - if (!this.status.equals(that.status)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_status = true && (isSetStatus()); - builder.append(present_status); - if (present_status) - builder.append(status); - - return builder.toHashCode(); - } - - public int compareTo(TCancelOperationResp other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - TCancelOperationResp typedOther = (TCancelOperationResp)other; - - lastComparison = Boolean.valueOf(isSetStatus()).compareTo(typedOther.isSetStatus()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetStatus()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.status, typedOther.status); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TCancelOperationResp("); - boolean first = true; - - sb.append("status:"); - if (this.status == null) { - sb.append("null"); - } else { - sb.append(this.status); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetStatus()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'status' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (status != null) { - status.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TCancelOperationRespStandardSchemeFactory implements SchemeFactory { - public TCancelOperationRespStandardScheme getScheme() { - return new TCancelOperationRespStandardScheme(); - } - } - - private static class TCancelOperationRespStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TCancelOperationResp struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // STATUS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TCancelOperationResp struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.status != null) { - oprot.writeFieldBegin(STATUS_FIELD_DESC); - struct.status.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TCancelOperationRespTupleSchemeFactory implements SchemeFactory { - public TCancelOperationRespTupleScheme getScheme() { - return new TCancelOperationRespTupleScheme(); - } - } - - private static class TCancelOperationRespTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TCancelOperationResp struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.status.write(oprot); - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TCancelOperationResp struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TCloseOperationReq.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TCloseOperationReq.java deleted file mode 100644 index 0cbb7ccced073..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TCloseOperationReq.java +++ /dev/null @@ -1,390 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TCloseOperationReq implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TCloseOperationReq"); - - private static final org.apache.thrift.protocol.TField OPERATION_HANDLE_FIELD_DESC = new org.apache.thrift.protocol.TField("operationHandle", org.apache.thrift.protocol.TType.STRUCT, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TCloseOperationReqStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TCloseOperationReqTupleSchemeFactory()); - } - - private TOperationHandle operationHandle; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - OPERATION_HANDLE((short)1, "operationHandle"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // OPERATION_HANDLE - return OPERATION_HANDLE; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.OPERATION_HANDLE, new org.apache.thrift.meta_data.FieldMetaData("operationHandle", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TOperationHandle.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TCloseOperationReq.class, metaDataMap); - } - - public TCloseOperationReq() { - } - - public TCloseOperationReq( - TOperationHandle operationHandle) - { - this(); - this.operationHandle = operationHandle; - } - - /** - * Performs a deep copy on other. - */ - public TCloseOperationReq(TCloseOperationReq other) { - if (other.isSetOperationHandle()) { - this.operationHandle = new TOperationHandle(other.operationHandle); - } - } - - public TCloseOperationReq deepCopy() { - return new TCloseOperationReq(this); - } - - @Override - public void clear() { - this.operationHandle = null; - } - - public TOperationHandle getOperationHandle() { - return this.operationHandle; - } - - public void setOperationHandle(TOperationHandle operationHandle) { - this.operationHandle = operationHandle; - } - - public void unsetOperationHandle() { - this.operationHandle = null; - } - - /** Returns true if field operationHandle is set (has been assigned a value) and false otherwise */ - public boolean isSetOperationHandle() { - return this.operationHandle != null; - } - - public void setOperationHandleIsSet(boolean value) { - if (!value) { - this.operationHandle = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case OPERATION_HANDLE: - if (value == null) { - unsetOperationHandle(); - } else { - setOperationHandle((TOperationHandle)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case OPERATION_HANDLE: - return getOperationHandle(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case OPERATION_HANDLE: - return isSetOperationHandle(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TCloseOperationReq) - return this.equals((TCloseOperationReq)that); - return false; - } - - public boolean equals(TCloseOperationReq that) { - if (that == null) - return false; - - boolean this_present_operationHandle = true && this.isSetOperationHandle(); - boolean that_present_operationHandle = true && that.isSetOperationHandle(); - if (this_present_operationHandle || that_present_operationHandle) { - if (!(this_present_operationHandle && that_present_operationHandle)) - return false; - if (!this.operationHandle.equals(that.operationHandle)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_operationHandle = true && (isSetOperationHandle()); - builder.append(present_operationHandle); - if (present_operationHandle) - builder.append(operationHandle); - - return builder.toHashCode(); - } - - public int compareTo(TCloseOperationReq other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - TCloseOperationReq typedOther = (TCloseOperationReq)other; - - lastComparison = Boolean.valueOf(isSetOperationHandle()).compareTo(typedOther.isSetOperationHandle()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetOperationHandle()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.operationHandle, typedOther.operationHandle); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TCloseOperationReq("); - boolean first = true; - - sb.append("operationHandle:"); - if (this.operationHandle == null) { - sb.append("null"); - } else { - sb.append(this.operationHandle); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetOperationHandle()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'operationHandle' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (operationHandle != null) { - operationHandle.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TCloseOperationReqStandardSchemeFactory implements SchemeFactory { - public TCloseOperationReqStandardScheme getScheme() { - return new TCloseOperationReqStandardScheme(); - } - } - - private static class TCloseOperationReqStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TCloseOperationReq struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // OPERATION_HANDLE - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.operationHandle = new TOperationHandle(); - struct.operationHandle.read(iprot); - struct.setOperationHandleIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TCloseOperationReq struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.operationHandle != null) { - oprot.writeFieldBegin(OPERATION_HANDLE_FIELD_DESC); - struct.operationHandle.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TCloseOperationReqTupleSchemeFactory implements SchemeFactory { - public TCloseOperationReqTupleScheme getScheme() { - return new TCloseOperationReqTupleScheme(); - } - } - - private static class TCloseOperationReqTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TCloseOperationReq struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.operationHandle.write(oprot); - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TCloseOperationReq struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.operationHandle = new TOperationHandle(); - struct.operationHandle.read(iprot); - struct.setOperationHandleIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TCloseOperationResp.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TCloseOperationResp.java deleted file mode 100644 index 7334d67173d7b..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TCloseOperationResp.java +++ /dev/null @@ -1,390 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TCloseOperationResp implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TCloseOperationResp"); - - private static final org.apache.thrift.protocol.TField STATUS_FIELD_DESC = new org.apache.thrift.protocol.TField("status", org.apache.thrift.protocol.TType.STRUCT, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TCloseOperationRespStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TCloseOperationRespTupleSchemeFactory()); - } - - private TStatus status; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - STATUS((short)1, "status"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // STATUS - return STATUS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.STATUS, new org.apache.thrift.meta_data.FieldMetaData("status", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TStatus.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TCloseOperationResp.class, metaDataMap); - } - - public TCloseOperationResp() { - } - - public TCloseOperationResp( - TStatus status) - { - this(); - this.status = status; - } - - /** - * Performs a deep copy on other. - */ - public TCloseOperationResp(TCloseOperationResp other) { - if (other.isSetStatus()) { - this.status = new TStatus(other.status); - } - } - - public TCloseOperationResp deepCopy() { - return new TCloseOperationResp(this); - } - - @Override - public void clear() { - this.status = null; - } - - public TStatus getStatus() { - return this.status; - } - - public void setStatus(TStatus status) { - this.status = status; - } - - public void unsetStatus() { - this.status = null; - } - - /** Returns true if field status is set (has been assigned a value) and false otherwise */ - public boolean isSetStatus() { - return this.status != null; - } - - public void setStatusIsSet(boolean value) { - if (!value) { - this.status = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case STATUS: - if (value == null) { - unsetStatus(); - } else { - setStatus((TStatus)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case STATUS: - return getStatus(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case STATUS: - return isSetStatus(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TCloseOperationResp) - return this.equals((TCloseOperationResp)that); - return false; - } - - public boolean equals(TCloseOperationResp that) { - if (that == null) - return false; - - boolean this_present_status = true && this.isSetStatus(); - boolean that_present_status = true && that.isSetStatus(); - if (this_present_status || that_present_status) { - if (!(this_present_status && that_present_status)) - return false; - if (!this.status.equals(that.status)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_status = true && (isSetStatus()); - builder.append(present_status); - if (present_status) - builder.append(status); - - return builder.toHashCode(); - } - - public int compareTo(TCloseOperationResp other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - TCloseOperationResp typedOther = (TCloseOperationResp)other; - - lastComparison = Boolean.valueOf(isSetStatus()).compareTo(typedOther.isSetStatus()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetStatus()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.status, typedOther.status); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TCloseOperationResp("); - boolean first = true; - - sb.append("status:"); - if (this.status == null) { - sb.append("null"); - } else { - sb.append(this.status); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetStatus()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'status' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (status != null) { - status.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TCloseOperationRespStandardSchemeFactory implements SchemeFactory { - public TCloseOperationRespStandardScheme getScheme() { - return new TCloseOperationRespStandardScheme(); - } - } - - private static class TCloseOperationRespStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TCloseOperationResp struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // STATUS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TCloseOperationResp struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.status != null) { - oprot.writeFieldBegin(STATUS_FIELD_DESC); - struct.status.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TCloseOperationRespTupleSchemeFactory implements SchemeFactory { - public TCloseOperationRespTupleScheme getScheme() { - return new TCloseOperationRespTupleScheme(); - } - } - - private static class TCloseOperationRespTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TCloseOperationResp struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.status.write(oprot); - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TCloseOperationResp struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TCloseSessionReq.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TCloseSessionReq.java deleted file mode 100644 index 027e8295436b0..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TCloseSessionReq.java +++ /dev/null @@ -1,390 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TCloseSessionReq implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TCloseSessionReq"); - - private static final org.apache.thrift.protocol.TField SESSION_HANDLE_FIELD_DESC = new org.apache.thrift.protocol.TField("sessionHandle", org.apache.thrift.protocol.TType.STRUCT, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TCloseSessionReqStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TCloseSessionReqTupleSchemeFactory()); - } - - private TSessionHandle sessionHandle; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SESSION_HANDLE((short)1, "sessionHandle"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // SESSION_HANDLE - return SESSION_HANDLE; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SESSION_HANDLE, new org.apache.thrift.meta_data.FieldMetaData("sessionHandle", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TSessionHandle.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TCloseSessionReq.class, metaDataMap); - } - - public TCloseSessionReq() { - } - - public TCloseSessionReq( - TSessionHandle sessionHandle) - { - this(); - this.sessionHandle = sessionHandle; - } - - /** - * Performs a deep copy on other. - */ - public TCloseSessionReq(TCloseSessionReq other) { - if (other.isSetSessionHandle()) { - this.sessionHandle = new TSessionHandle(other.sessionHandle); - } - } - - public TCloseSessionReq deepCopy() { - return new TCloseSessionReq(this); - } - - @Override - public void clear() { - this.sessionHandle = null; - } - - public TSessionHandle getSessionHandle() { - return this.sessionHandle; - } - - public void setSessionHandle(TSessionHandle sessionHandle) { - this.sessionHandle = sessionHandle; - } - - public void unsetSessionHandle() { - this.sessionHandle = null; - } - - /** Returns true if field sessionHandle is set (has been assigned a value) and false otherwise */ - public boolean isSetSessionHandle() { - return this.sessionHandle != null; - } - - public void setSessionHandleIsSet(boolean value) { - if (!value) { - this.sessionHandle = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case SESSION_HANDLE: - if (value == null) { - unsetSessionHandle(); - } else { - setSessionHandle((TSessionHandle)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case SESSION_HANDLE: - return getSessionHandle(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case SESSION_HANDLE: - return isSetSessionHandle(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TCloseSessionReq) - return this.equals((TCloseSessionReq)that); - return false; - } - - public boolean equals(TCloseSessionReq that) { - if (that == null) - return false; - - boolean this_present_sessionHandle = true && this.isSetSessionHandle(); - boolean that_present_sessionHandle = true && that.isSetSessionHandle(); - if (this_present_sessionHandle || that_present_sessionHandle) { - if (!(this_present_sessionHandle && that_present_sessionHandle)) - return false; - if (!this.sessionHandle.equals(that.sessionHandle)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_sessionHandle = true && (isSetSessionHandle()); - builder.append(present_sessionHandle); - if (present_sessionHandle) - builder.append(sessionHandle); - - return builder.toHashCode(); - } - - public int compareTo(TCloseSessionReq other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - TCloseSessionReq typedOther = (TCloseSessionReq)other; - - lastComparison = Boolean.valueOf(isSetSessionHandle()).compareTo(typedOther.isSetSessionHandle()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSessionHandle()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.sessionHandle, typedOther.sessionHandle); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TCloseSessionReq("); - boolean first = true; - - sb.append("sessionHandle:"); - if (this.sessionHandle == null) { - sb.append("null"); - } else { - sb.append(this.sessionHandle); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetSessionHandle()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'sessionHandle' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (sessionHandle != null) { - sessionHandle.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TCloseSessionReqStandardSchemeFactory implements SchemeFactory { - public TCloseSessionReqStandardScheme getScheme() { - return new TCloseSessionReqStandardScheme(); - } - } - - private static class TCloseSessionReqStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TCloseSessionReq struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // SESSION_HANDLE - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.sessionHandle = new TSessionHandle(); - struct.sessionHandle.read(iprot); - struct.setSessionHandleIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TCloseSessionReq struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.sessionHandle != null) { - oprot.writeFieldBegin(SESSION_HANDLE_FIELD_DESC); - struct.sessionHandle.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TCloseSessionReqTupleSchemeFactory implements SchemeFactory { - public TCloseSessionReqTupleScheme getScheme() { - return new TCloseSessionReqTupleScheme(); - } - } - - private static class TCloseSessionReqTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TCloseSessionReq struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.sessionHandle.write(oprot); - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TCloseSessionReq struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.sessionHandle = new TSessionHandle(); - struct.sessionHandle.read(iprot); - struct.setSessionHandleIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TCloseSessionResp.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TCloseSessionResp.java deleted file mode 100644 index 168c8fc775e33..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TCloseSessionResp.java +++ /dev/null @@ -1,390 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TCloseSessionResp implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TCloseSessionResp"); - - private static final org.apache.thrift.protocol.TField STATUS_FIELD_DESC = new org.apache.thrift.protocol.TField("status", org.apache.thrift.protocol.TType.STRUCT, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TCloseSessionRespStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TCloseSessionRespTupleSchemeFactory()); - } - - private TStatus status; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - STATUS((short)1, "status"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // STATUS - return STATUS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.STATUS, new org.apache.thrift.meta_data.FieldMetaData("status", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TStatus.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TCloseSessionResp.class, metaDataMap); - } - - public TCloseSessionResp() { - } - - public TCloseSessionResp( - TStatus status) - { - this(); - this.status = status; - } - - /** - * Performs a deep copy on other. - */ - public TCloseSessionResp(TCloseSessionResp other) { - if (other.isSetStatus()) { - this.status = new TStatus(other.status); - } - } - - public TCloseSessionResp deepCopy() { - return new TCloseSessionResp(this); - } - - @Override - public void clear() { - this.status = null; - } - - public TStatus getStatus() { - return this.status; - } - - public void setStatus(TStatus status) { - this.status = status; - } - - public void unsetStatus() { - this.status = null; - } - - /** Returns true if field status is set (has been assigned a value) and false otherwise */ - public boolean isSetStatus() { - return this.status != null; - } - - public void setStatusIsSet(boolean value) { - if (!value) { - this.status = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case STATUS: - if (value == null) { - unsetStatus(); - } else { - setStatus((TStatus)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case STATUS: - return getStatus(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case STATUS: - return isSetStatus(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TCloseSessionResp) - return this.equals((TCloseSessionResp)that); - return false; - } - - public boolean equals(TCloseSessionResp that) { - if (that == null) - return false; - - boolean this_present_status = true && this.isSetStatus(); - boolean that_present_status = true && that.isSetStatus(); - if (this_present_status || that_present_status) { - if (!(this_present_status && that_present_status)) - return false; - if (!this.status.equals(that.status)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_status = true && (isSetStatus()); - builder.append(present_status); - if (present_status) - builder.append(status); - - return builder.toHashCode(); - } - - public int compareTo(TCloseSessionResp other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - TCloseSessionResp typedOther = (TCloseSessionResp)other; - - lastComparison = Boolean.valueOf(isSetStatus()).compareTo(typedOther.isSetStatus()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetStatus()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.status, typedOther.status); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TCloseSessionResp("); - boolean first = true; - - sb.append("status:"); - if (this.status == null) { - sb.append("null"); - } else { - sb.append(this.status); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetStatus()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'status' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (status != null) { - status.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TCloseSessionRespStandardSchemeFactory implements SchemeFactory { - public TCloseSessionRespStandardScheme getScheme() { - return new TCloseSessionRespStandardScheme(); - } - } - - private static class TCloseSessionRespStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TCloseSessionResp struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // STATUS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TCloseSessionResp struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.status != null) { - oprot.writeFieldBegin(STATUS_FIELD_DESC); - struct.status.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TCloseSessionRespTupleSchemeFactory implements SchemeFactory { - public TCloseSessionRespTupleScheme getScheme() { - return new TCloseSessionRespTupleScheme(); - } - } - - private static class TCloseSessionRespTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TCloseSessionResp struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.status.write(oprot); - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TCloseSessionResp struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TColumn.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TColumn.java deleted file mode 100644 index fc2171dc99e4c..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TColumn.java +++ /dev/null @@ -1,732 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TColumn extends org.apache.thrift.TUnion { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TColumn"); - private static final org.apache.thrift.protocol.TField BOOL_VAL_FIELD_DESC = new org.apache.thrift.protocol.TField("boolVal", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField BYTE_VAL_FIELD_DESC = new org.apache.thrift.protocol.TField("byteVal", org.apache.thrift.protocol.TType.STRUCT, (short)2); - private static final org.apache.thrift.protocol.TField I16_VAL_FIELD_DESC = new org.apache.thrift.protocol.TField("i16Val", org.apache.thrift.protocol.TType.STRUCT, (short)3); - private static final org.apache.thrift.protocol.TField I32_VAL_FIELD_DESC = new org.apache.thrift.protocol.TField("i32Val", org.apache.thrift.protocol.TType.STRUCT, (short)4); - private static final org.apache.thrift.protocol.TField I64_VAL_FIELD_DESC = new org.apache.thrift.protocol.TField("i64Val", org.apache.thrift.protocol.TType.STRUCT, (short)5); - private static final org.apache.thrift.protocol.TField DOUBLE_VAL_FIELD_DESC = new org.apache.thrift.protocol.TField("doubleVal", org.apache.thrift.protocol.TType.STRUCT, (short)6); - private static final org.apache.thrift.protocol.TField STRING_VAL_FIELD_DESC = new org.apache.thrift.protocol.TField("stringVal", org.apache.thrift.protocol.TType.STRUCT, (short)7); - private static final org.apache.thrift.protocol.TField BINARY_VAL_FIELD_DESC = new org.apache.thrift.protocol.TField("binaryVal", org.apache.thrift.protocol.TType.STRUCT, (short)8); - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - BOOL_VAL((short)1, "boolVal"), - BYTE_VAL((short)2, "byteVal"), - I16_VAL((short)3, "i16Val"), - I32_VAL((short)4, "i32Val"), - I64_VAL((short)5, "i64Val"), - DOUBLE_VAL((short)6, "doubleVal"), - STRING_VAL((short)7, "stringVal"), - BINARY_VAL((short)8, "binaryVal"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // BOOL_VAL - return BOOL_VAL; - case 2: // BYTE_VAL - return BYTE_VAL; - case 3: // I16_VAL - return I16_VAL; - case 4: // I32_VAL - return I32_VAL; - case 5: // I64_VAL - return I64_VAL; - case 6: // DOUBLE_VAL - return DOUBLE_VAL; - case 7: // STRING_VAL - return STRING_VAL; - case 8: // BINARY_VAL - return BINARY_VAL; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.BOOL_VAL, new org.apache.thrift.meta_data.FieldMetaData("boolVal", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TBoolColumn.class))); - tmpMap.put(_Fields.BYTE_VAL, new org.apache.thrift.meta_data.FieldMetaData("byteVal", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TByteColumn.class))); - tmpMap.put(_Fields.I16_VAL, new org.apache.thrift.meta_data.FieldMetaData("i16Val", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TI16Column.class))); - tmpMap.put(_Fields.I32_VAL, new org.apache.thrift.meta_data.FieldMetaData("i32Val", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TI32Column.class))); - tmpMap.put(_Fields.I64_VAL, new org.apache.thrift.meta_data.FieldMetaData("i64Val", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TI64Column.class))); - tmpMap.put(_Fields.DOUBLE_VAL, new org.apache.thrift.meta_data.FieldMetaData("doubleVal", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TDoubleColumn.class))); - tmpMap.put(_Fields.STRING_VAL, new org.apache.thrift.meta_data.FieldMetaData("stringVal", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TStringColumn.class))); - tmpMap.put(_Fields.BINARY_VAL, new org.apache.thrift.meta_data.FieldMetaData("binaryVal", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TBinaryColumn.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TColumn.class, metaDataMap); - } - - public TColumn() { - super(); - } - - public TColumn(TColumn._Fields setField, Object value) { - super(setField, value); - } - - public TColumn(TColumn other) { - super(other); - } - public TColumn deepCopy() { - return new TColumn(this); - } - - public static TColumn boolVal(TBoolColumn value) { - TColumn x = new TColumn(); - x.setBoolVal(value); - return x; - } - - public static TColumn byteVal(TByteColumn value) { - TColumn x = new TColumn(); - x.setByteVal(value); - return x; - } - - public static TColumn i16Val(TI16Column value) { - TColumn x = new TColumn(); - x.setI16Val(value); - return x; - } - - public static TColumn i32Val(TI32Column value) { - TColumn x = new TColumn(); - x.setI32Val(value); - return x; - } - - public static TColumn i64Val(TI64Column value) { - TColumn x = new TColumn(); - x.setI64Val(value); - return x; - } - - public static TColumn doubleVal(TDoubleColumn value) { - TColumn x = new TColumn(); - x.setDoubleVal(value); - return x; - } - - public static TColumn stringVal(TStringColumn value) { - TColumn x = new TColumn(); - x.setStringVal(value); - return x; - } - - public static TColumn binaryVal(TBinaryColumn value) { - TColumn x = new TColumn(); - x.setBinaryVal(value); - return x; - } - - - @Override - protected void checkType(_Fields setField, Object value) throws ClassCastException { - switch (setField) { - case BOOL_VAL: - if (value instanceof TBoolColumn) { - break; - } - throw new ClassCastException("Was expecting value of type TBoolColumn for field 'boolVal', but got " + value.getClass().getSimpleName()); - case BYTE_VAL: - if (value instanceof TByteColumn) { - break; - } - throw new ClassCastException("Was expecting value of type TByteColumn for field 'byteVal', but got " + value.getClass().getSimpleName()); - case I16_VAL: - if (value instanceof TI16Column) { - break; - } - throw new ClassCastException("Was expecting value of type TI16Column for field 'i16Val', but got " + value.getClass().getSimpleName()); - case I32_VAL: - if (value instanceof TI32Column) { - break; - } - throw new ClassCastException("Was expecting value of type TI32Column for field 'i32Val', but got " + value.getClass().getSimpleName()); - case I64_VAL: - if (value instanceof TI64Column) { - break; - } - throw new ClassCastException("Was expecting value of type TI64Column for field 'i64Val', but got " + value.getClass().getSimpleName()); - case DOUBLE_VAL: - if (value instanceof TDoubleColumn) { - break; - } - throw new ClassCastException("Was expecting value of type TDoubleColumn for field 'doubleVal', but got " + value.getClass().getSimpleName()); - case STRING_VAL: - if (value instanceof TStringColumn) { - break; - } - throw new ClassCastException("Was expecting value of type TStringColumn for field 'stringVal', but got " + value.getClass().getSimpleName()); - case BINARY_VAL: - if (value instanceof TBinaryColumn) { - break; - } - throw new ClassCastException("Was expecting value of type TBinaryColumn for field 'binaryVal', but got " + value.getClass().getSimpleName()); - default: - throw new IllegalArgumentException("Unknown field id " + setField); - } - } - - @Override - protected Object standardSchemeReadValue(org.apache.thrift.protocol.TProtocol iprot, org.apache.thrift.protocol.TField field) throws org.apache.thrift.TException { - _Fields setField = _Fields.findByThriftId(field.id); - if (setField != null) { - switch (setField) { - case BOOL_VAL: - if (field.type == BOOL_VAL_FIELD_DESC.type) { - TBoolColumn boolVal; - boolVal = new TBoolColumn(); - boolVal.read(iprot); - return boolVal; - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); - return null; - } - case BYTE_VAL: - if (field.type == BYTE_VAL_FIELD_DESC.type) { - TByteColumn byteVal; - byteVal = new TByteColumn(); - byteVal.read(iprot); - return byteVal; - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); - return null; - } - case I16_VAL: - if (field.type == I16_VAL_FIELD_DESC.type) { - TI16Column i16Val; - i16Val = new TI16Column(); - i16Val.read(iprot); - return i16Val; - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); - return null; - } - case I32_VAL: - if (field.type == I32_VAL_FIELD_DESC.type) { - TI32Column i32Val; - i32Val = new TI32Column(); - i32Val.read(iprot); - return i32Val; - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); - return null; - } - case I64_VAL: - if (field.type == I64_VAL_FIELD_DESC.type) { - TI64Column i64Val; - i64Val = new TI64Column(); - i64Val.read(iprot); - return i64Val; - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); - return null; - } - case DOUBLE_VAL: - if (field.type == DOUBLE_VAL_FIELD_DESC.type) { - TDoubleColumn doubleVal; - doubleVal = new TDoubleColumn(); - doubleVal.read(iprot); - return doubleVal; - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); - return null; - } - case STRING_VAL: - if (field.type == STRING_VAL_FIELD_DESC.type) { - TStringColumn stringVal; - stringVal = new TStringColumn(); - stringVal.read(iprot); - return stringVal; - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); - return null; - } - case BINARY_VAL: - if (field.type == BINARY_VAL_FIELD_DESC.type) { - TBinaryColumn binaryVal; - binaryVal = new TBinaryColumn(); - binaryVal.read(iprot); - return binaryVal; - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); - return null; - } - default: - throw new IllegalStateException("setField wasn't null, but didn't match any of the case statements!"); - } - } else { - return null; - } - } - - @Override - protected void standardSchemeWriteValue(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - switch (setField_) { - case BOOL_VAL: - TBoolColumn boolVal = (TBoolColumn)value_; - boolVal.write(oprot); - return; - case BYTE_VAL: - TByteColumn byteVal = (TByteColumn)value_; - byteVal.write(oprot); - return; - case I16_VAL: - TI16Column i16Val = (TI16Column)value_; - i16Val.write(oprot); - return; - case I32_VAL: - TI32Column i32Val = (TI32Column)value_; - i32Val.write(oprot); - return; - case I64_VAL: - TI64Column i64Val = (TI64Column)value_; - i64Val.write(oprot); - return; - case DOUBLE_VAL: - TDoubleColumn doubleVal = (TDoubleColumn)value_; - doubleVal.write(oprot); - return; - case STRING_VAL: - TStringColumn stringVal = (TStringColumn)value_; - stringVal.write(oprot); - return; - case BINARY_VAL: - TBinaryColumn binaryVal = (TBinaryColumn)value_; - binaryVal.write(oprot); - return; - default: - throw new IllegalStateException("Cannot write union with unknown field " + setField_); - } - } - - @Override - protected Object tupleSchemeReadValue(org.apache.thrift.protocol.TProtocol iprot, short fieldID) throws org.apache.thrift.TException { - _Fields setField = _Fields.findByThriftId(fieldID); - if (setField != null) { - switch (setField) { - case BOOL_VAL: - TBoolColumn boolVal; - boolVal = new TBoolColumn(); - boolVal.read(iprot); - return boolVal; - case BYTE_VAL: - TByteColumn byteVal; - byteVal = new TByteColumn(); - byteVal.read(iprot); - return byteVal; - case I16_VAL: - TI16Column i16Val; - i16Val = new TI16Column(); - i16Val.read(iprot); - return i16Val; - case I32_VAL: - TI32Column i32Val; - i32Val = new TI32Column(); - i32Val.read(iprot); - return i32Val; - case I64_VAL: - TI64Column i64Val; - i64Val = new TI64Column(); - i64Val.read(iprot); - return i64Val; - case DOUBLE_VAL: - TDoubleColumn doubleVal; - doubleVal = new TDoubleColumn(); - doubleVal.read(iprot); - return doubleVal; - case STRING_VAL: - TStringColumn stringVal; - stringVal = new TStringColumn(); - stringVal.read(iprot); - return stringVal; - case BINARY_VAL: - TBinaryColumn binaryVal; - binaryVal = new TBinaryColumn(); - binaryVal.read(iprot); - return binaryVal; - default: - throw new IllegalStateException("setField wasn't null, but didn't match any of the case statements!"); - } - } else { - throw new TProtocolException("Couldn't find a field with field id " + fieldID); - } - } - - @Override - protected void tupleSchemeWriteValue(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - switch (setField_) { - case BOOL_VAL: - TBoolColumn boolVal = (TBoolColumn)value_; - boolVal.write(oprot); - return; - case BYTE_VAL: - TByteColumn byteVal = (TByteColumn)value_; - byteVal.write(oprot); - return; - case I16_VAL: - TI16Column i16Val = (TI16Column)value_; - i16Val.write(oprot); - return; - case I32_VAL: - TI32Column i32Val = (TI32Column)value_; - i32Val.write(oprot); - return; - case I64_VAL: - TI64Column i64Val = (TI64Column)value_; - i64Val.write(oprot); - return; - case DOUBLE_VAL: - TDoubleColumn doubleVal = (TDoubleColumn)value_; - doubleVal.write(oprot); - return; - case STRING_VAL: - TStringColumn stringVal = (TStringColumn)value_; - stringVal.write(oprot); - return; - case BINARY_VAL: - TBinaryColumn binaryVal = (TBinaryColumn)value_; - binaryVal.write(oprot); - return; - default: - throw new IllegalStateException("Cannot write union with unknown field " + setField_); - } - } - - @Override - protected org.apache.thrift.protocol.TField getFieldDesc(_Fields setField) { - switch (setField) { - case BOOL_VAL: - return BOOL_VAL_FIELD_DESC; - case BYTE_VAL: - return BYTE_VAL_FIELD_DESC; - case I16_VAL: - return I16_VAL_FIELD_DESC; - case I32_VAL: - return I32_VAL_FIELD_DESC; - case I64_VAL: - return I64_VAL_FIELD_DESC; - case DOUBLE_VAL: - return DOUBLE_VAL_FIELD_DESC; - case STRING_VAL: - return STRING_VAL_FIELD_DESC; - case BINARY_VAL: - return BINARY_VAL_FIELD_DESC; - default: - throw new IllegalArgumentException("Unknown field id " + setField); - } - } - - @Override - protected org.apache.thrift.protocol.TStruct getStructDesc() { - return STRUCT_DESC; - } - - @Override - protected _Fields enumForId(short id) { - return _Fields.findByThriftIdOrThrow(id); - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - - public TBoolColumn getBoolVal() { - if (getSetField() == _Fields.BOOL_VAL) { - return (TBoolColumn)getFieldValue(); - } else { - throw new RuntimeException("Cannot get field 'boolVal' because union is currently set to " + getFieldDesc(getSetField()).name); - } - } - - public void setBoolVal(TBoolColumn value) { - if (value == null) throw new NullPointerException(); - setField_ = _Fields.BOOL_VAL; - value_ = value; - } - - public TByteColumn getByteVal() { - if (getSetField() == _Fields.BYTE_VAL) { - return (TByteColumn)getFieldValue(); - } else { - throw new RuntimeException("Cannot get field 'byteVal' because union is currently set to " + getFieldDesc(getSetField()).name); - } - } - - public void setByteVal(TByteColumn value) { - if (value == null) throw new NullPointerException(); - setField_ = _Fields.BYTE_VAL; - value_ = value; - } - - public TI16Column getI16Val() { - if (getSetField() == _Fields.I16_VAL) { - return (TI16Column)getFieldValue(); - } else { - throw new RuntimeException("Cannot get field 'i16Val' because union is currently set to " + getFieldDesc(getSetField()).name); - } - } - - public void setI16Val(TI16Column value) { - if (value == null) throw new NullPointerException(); - setField_ = _Fields.I16_VAL; - value_ = value; - } - - public TI32Column getI32Val() { - if (getSetField() == _Fields.I32_VAL) { - return (TI32Column)getFieldValue(); - } else { - throw new RuntimeException("Cannot get field 'i32Val' because union is currently set to " + getFieldDesc(getSetField()).name); - } - } - - public void setI32Val(TI32Column value) { - if (value == null) throw new NullPointerException(); - setField_ = _Fields.I32_VAL; - value_ = value; - } - - public TI64Column getI64Val() { - if (getSetField() == _Fields.I64_VAL) { - return (TI64Column)getFieldValue(); - } else { - throw new RuntimeException("Cannot get field 'i64Val' because union is currently set to " + getFieldDesc(getSetField()).name); - } - } - - public void setI64Val(TI64Column value) { - if (value == null) throw new NullPointerException(); - setField_ = _Fields.I64_VAL; - value_ = value; - } - - public TDoubleColumn getDoubleVal() { - if (getSetField() == _Fields.DOUBLE_VAL) { - return (TDoubleColumn)getFieldValue(); - } else { - throw new RuntimeException("Cannot get field 'doubleVal' because union is currently set to " + getFieldDesc(getSetField()).name); - } - } - - public void setDoubleVal(TDoubleColumn value) { - if (value == null) throw new NullPointerException(); - setField_ = _Fields.DOUBLE_VAL; - value_ = value; - } - - public TStringColumn getStringVal() { - if (getSetField() == _Fields.STRING_VAL) { - return (TStringColumn)getFieldValue(); - } else { - throw new RuntimeException("Cannot get field 'stringVal' because union is currently set to " + getFieldDesc(getSetField()).name); - } - } - - public void setStringVal(TStringColumn value) { - if (value == null) throw new NullPointerException(); - setField_ = _Fields.STRING_VAL; - value_ = value; - } - - public TBinaryColumn getBinaryVal() { - if (getSetField() == _Fields.BINARY_VAL) { - return (TBinaryColumn)getFieldValue(); - } else { - throw new RuntimeException("Cannot get field 'binaryVal' because union is currently set to " + getFieldDesc(getSetField()).name); - } - } - - public void setBinaryVal(TBinaryColumn value) { - if (value == null) throw new NullPointerException(); - setField_ = _Fields.BINARY_VAL; - value_ = value; - } - - public boolean isSetBoolVal() { - return setField_ == _Fields.BOOL_VAL; - } - - - public boolean isSetByteVal() { - return setField_ == _Fields.BYTE_VAL; - } - - - public boolean isSetI16Val() { - return setField_ == _Fields.I16_VAL; - } - - - public boolean isSetI32Val() { - return setField_ == _Fields.I32_VAL; - } - - - public boolean isSetI64Val() { - return setField_ == _Fields.I64_VAL; - } - - - public boolean isSetDoubleVal() { - return setField_ == _Fields.DOUBLE_VAL; - } - - - public boolean isSetStringVal() { - return setField_ == _Fields.STRING_VAL; - } - - - public boolean isSetBinaryVal() { - return setField_ == _Fields.BINARY_VAL; - } - - - public boolean equals(Object other) { - if (other instanceof TColumn) { - return equals((TColumn)other); - } else { - return false; - } - } - - public boolean equals(TColumn other) { - return other != null && getSetField() == other.getSetField() && getFieldValue().equals(other.getFieldValue()); - } - - @Override - public int compareTo(TColumn other) { - int lastComparison = org.apache.thrift.TBaseHelper.compareTo(getSetField(), other.getSetField()); - if (lastComparison == 0) { - return org.apache.thrift.TBaseHelper.compareTo(getFieldValue(), other.getFieldValue()); - } - return lastComparison; - } - - - @Override - public int hashCode() { - HashCodeBuilder hcb = new HashCodeBuilder(); - hcb.append(this.getClass().getName()); - org.apache.thrift.TFieldIdEnum setField = getSetField(); - if (setField != null) { - hcb.append(setField.getThriftFieldId()); - Object value = getFieldValue(); - if (value instanceof org.apache.thrift.TEnum) { - hcb.append(((org.apache.thrift.TEnum)getFieldValue()).getValue()); - } else { - hcb.append(value); - } - } - return hcb.toHashCode(); - } - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - -} diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TColumnDesc.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TColumnDesc.java deleted file mode 100644 index 247db6489457f..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TColumnDesc.java +++ /dev/null @@ -1,700 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TColumnDesc implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TColumnDesc"); - - private static final org.apache.thrift.protocol.TField COLUMN_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("columnName", org.apache.thrift.protocol.TType.STRING, (short)1); - private static final org.apache.thrift.protocol.TField TYPE_DESC_FIELD_DESC = new org.apache.thrift.protocol.TField("typeDesc", org.apache.thrift.protocol.TType.STRUCT, (short)2); - private static final org.apache.thrift.protocol.TField POSITION_FIELD_DESC = new org.apache.thrift.protocol.TField("position", org.apache.thrift.protocol.TType.I32, (short)3); - private static final org.apache.thrift.protocol.TField COMMENT_FIELD_DESC = new org.apache.thrift.protocol.TField("comment", org.apache.thrift.protocol.TType.STRING, (short)4); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TColumnDescStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TColumnDescTupleSchemeFactory()); - } - - private String columnName; // required - private TTypeDesc typeDesc; // required - private int position; // required - private String comment; // optional - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - COLUMN_NAME((short)1, "columnName"), - TYPE_DESC((short)2, "typeDesc"), - POSITION((short)3, "position"), - COMMENT((short)4, "comment"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // COLUMN_NAME - return COLUMN_NAME; - case 2: // TYPE_DESC - return TYPE_DESC; - case 3: // POSITION - return POSITION; - case 4: // COMMENT - return COMMENT; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private static final int __POSITION_ISSET_ID = 0; - private byte __isset_bitfield = 0; - private _Fields optionals[] = {_Fields.COMMENT}; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.COLUMN_NAME, new org.apache.thrift.meta_data.FieldMetaData("columnName", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.TYPE_DESC, new org.apache.thrift.meta_data.FieldMetaData("typeDesc", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TTypeDesc.class))); - tmpMap.put(_Fields.POSITION, new org.apache.thrift.meta_data.FieldMetaData("position", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); - tmpMap.put(_Fields.COMMENT, new org.apache.thrift.meta_data.FieldMetaData("comment", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TColumnDesc.class, metaDataMap); - } - - public TColumnDesc() { - } - - public TColumnDesc( - String columnName, - TTypeDesc typeDesc, - int position) - { - this(); - this.columnName = columnName; - this.typeDesc = typeDesc; - this.position = position; - setPositionIsSet(true); - } - - /** - * Performs a deep copy on other. - */ - public TColumnDesc(TColumnDesc other) { - __isset_bitfield = other.__isset_bitfield; - if (other.isSetColumnName()) { - this.columnName = other.columnName; - } - if (other.isSetTypeDesc()) { - this.typeDesc = new TTypeDesc(other.typeDesc); - } - this.position = other.position; - if (other.isSetComment()) { - this.comment = other.comment; - } - } - - public TColumnDesc deepCopy() { - return new TColumnDesc(this); - } - - @Override - public void clear() { - this.columnName = null; - this.typeDesc = null; - setPositionIsSet(false); - this.position = 0; - this.comment = null; - } - - public String getColumnName() { - return this.columnName; - } - - public void setColumnName(String columnName) { - this.columnName = columnName; - } - - public void unsetColumnName() { - this.columnName = null; - } - - /** Returns true if field columnName is set (has been assigned a value) and false otherwise */ - public boolean isSetColumnName() { - return this.columnName != null; - } - - public void setColumnNameIsSet(boolean value) { - if (!value) { - this.columnName = null; - } - } - - public TTypeDesc getTypeDesc() { - return this.typeDesc; - } - - public void setTypeDesc(TTypeDesc typeDesc) { - this.typeDesc = typeDesc; - } - - public void unsetTypeDesc() { - this.typeDesc = null; - } - - /** Returns true if field typeDesc is set (has been assigned a value) and false otherwise */ - public boolean isSetTypeDesc() { - return this.typeDesc != null; - } - - public void setTypeDescIsSet(boolean value) { - if (!value) { - this.typeDesc = null; - } - } - - public int getPosition() { - return this.position; - } - - public void setPosition(int position) { - this.position = position; - setPositionIsSet(true); - } - - public void unsetPosition() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __POSITION_ISSET_ID); - } - - /** Returns true if field position is set (has been assigned a value) and false otherwise */ - public boolean isSetPosition() { - return EncodingUtils.testBit(__isset_bitfield, __POSITION_ISSET_ID); - } - - public void setPositionIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __POSITION_ISSET_ID, value); - } - - public String getComment() { - return this.comment; - } - - public void setComment(String comment) { - this.comment = comment; - } - - public void unsetComment() { - this.comment = null; - } - - /** Returns true if field comment is set (has been assigned a value) and false otherwise */ - public boolean isSetComment() { - return this.comment != null; - } - - public void setCommentIsSet(boolean value) { - if (!value) { - this.comment = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case COLUMN_NAME: - if (value == null) { - unsetColumnName(); - } else { - setColumnName((String)value); - } - break; - - case TYPE_DESC: - if (value == null) { - unsetTypeDesc(); - } else { - setTypeDesc((TTypeDesc)value); - } - break; - - case POSITION: - if (value == null) { - unsetPosition(); - } else { - setPosition((Integer)value); - } - break; - - case COMMENT: - if (value == null) { - unsetComment(); - } else { - setComment((String)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case COLUMN_NAME: - return getColumnName(); - - case TYPE_DESC: - return getTypeDesc(); - - case POSITION: - return Integer.valueOf(getPosition()); - - case COMMENT: - return getComment(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case COLUMN_NAME: - return isSetColumnName(); - case TYPE_DESC: - return isSetTypeDesc(); - case POSITION: - return isSetPosition(); - case COMMENT: - return isSetComment(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TColumnDesc) - return this.equals((TColumnDesc)that); - return false; - } - - public boolean equals(TColumnDesc that) { - if (that == null) - return false; - - boolean this_present_columnName = true && this.isSetColumnName(); - boolean that_present_columnName = true && that.isSetColumnName(); - if (this_present_columnName || that_present_columnName) { - if (!(this_present_columnName && that_present_columnName)) - return false; - if (!this.columnName.equals(that.columnName)) - return false; - } - - boolean this_present_typeDesc = true && this.isSetTypeDesc(); - boolean that_present_typeDesc = true && that.isSetTypeDesc(); - if (this_present_typeDesc || that_present_typeDesc) { - if (!(this_present_typeDesc && that_present_typeDesc)) - return false; - if (!this.typeDesc.equals(that.typeDesc)) - return false; - } - - boolean this_present_position = true; - boolean that_present_position = true; - if (this_present_position || that_present_position) { - if (!(this_present_position && that_present_position)) - return false; - if (this.position != that.position) - return false; - } - - boolean this_present_comment = true && this.isSetComment(); - boolean that_present_comment = true && that.isSetComment(); - if (this_present_comment || that_present_comment) { - if (!(this_present_comment && that_present_comment)) - return false; - if (!this.comment.equals(that.comment)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_columnName = true && (isSetColumnName()); - builder.append(present_columnName); - if (present_columnName) - builder.append(columnName); - - boolean present_typeDesc = true && (isSetTypeDesc()); - builder.append(present_typeDesc); - if (present_typeDesc) - builder.append(typeDesc); - - boolean present_position = true; - builder.append(present_position); - if (present_position) - builder.append(position); - - boolean present_comment = true && (isSetComment()); - builder.append(present_comment); - if (present_comment) - builder.append(comment); - - return builder.toHashCode(); - } - - public int compareTo(TColumnDesc other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - TColumnDesc typedOther = (TColumnDesc)other; - - lastComparison = Boolean.valueOf(isSetColumnName()).compareTo(typedOther.isSetColumnName()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetColumnName()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.columnName, typedOther.columnName); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetTypeDesc()).compareTo(typedOther.isSetTypeDesc()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetTypeDesc()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.typeDesc, typedOther.typeDesc); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetPosition()).compareTo(typedOther.isSetPosition()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetPosition()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.position, typedOther.position); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetComment()).compareTo(typedOther.isSetComment()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetComment()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.comment, typedOther.comment); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TColumnDesc("); - boolean first = true; - - sb.append("columnName:"); - if (this.columnName == null) { - sb.append("null"); - } else { - sb.append(this.columnName); - } - first = false; - if (!first) sb.append(", "); - sb.append("typeDesc:"); - if (this.typeDesc == null) { - sb.append("null"); - } else { - sb.append(this.typeDesc); - } - first = false; - if (!first) sb.append(", "); - sb.append("position:"); - sb.append(this.position); - first = false; - if (isSetComment()) { - if (!first) sb.append(", "); - sb.append("comment:"); - if (this.comment == null) { - sb.append("null"); - } else { - sb.append(this.comment); - } - first = false; - } - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetColumnName()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'columnName' is unset! Struct:" + toString()); - } - - if (!isSetTypeDesc()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'typeDesc' is unset! Struct:" + toString()); - } - - if (!isSetPosition()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'position' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (typeDesc != null) { - typeDesc.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. - __isset_bitfield = 0; - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TColumnDescStandardSchemeFactory implements SchemeFactory { - public TColumnDescStandardScheme getScheme() { - return new TColumnDescStandardScheme(); - } - } - - private static class TColumnDescStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TColumnDesc struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // COLUMN_NAME - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.columnName = iprot.readString(); - struct.setColumnNameIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // TYPE_DESC - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.typeDesc = new TTypeDesc(); - struct.typeDesc.read(iprot); - struct.setTypeDescIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 3: // POSITION - if (schemeField.type == org.apache.thrift.protocol.TType.I32) { - struct.position = iprot.readI32(); - struct.setPositionIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 4: // COMMENT - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.comment = iprot.readString(); - struct.setCommentIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TColumnDesc struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.columnName != null) { - oprot.writeFieldBegin(COLUMN_NAME_FIELD_DESC); - oprot.writeString(struct.columnName); - oprot.writeFieldEnd(); - } - if (struct.typeDesc != null) { - oprot.writeFieldBegin(TYPE_DESC_FIELD_DESC); - struct.typeDesc.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldBegin(POSITION_FIELD_DESC); - oprot.writeI32(struct.position); - oprot.writeFieldEnd(); - if (struct.comment != null) { - if (struct.isSetComment()) { - oprot.writeFieldBegin(COMMENT_FIELD_DESC); - oprot.writeString(struct.comment); - oprot.writeFieldEnd(); - } - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TColumnDescTupleSchemeFactory implements SchemeFactory { - public TColumnDescTupleScheme getScheme() { - return new TColumnDescTupleScheme(); - } - } - - private static class TColumnDescTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TColumnDesc struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - oprot.writeString(struct.columnName); - struct.typeDesc.write(oprot); - oprot.writeI32(struct.position); - BitSet optionals = new BitSet(); - if (struct.isSetComment()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetComment()) { - oprot.writeString(struct.comment); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TColumnDesc struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.columnName = iprot.readString(); - struct.setColumnNameIsSet(true); - struct.typeDesc = new TTypeDesc(); - struct.typeDesc.read(iprot); - struct.setTypeDescIsSet(true); - struct.position = iprot.readI32(); - struct.setPositionIsSet(true); - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.comment = iprot.readString(); - struct.setCommentIsSet(true); - } - } - } - -} - diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TColumnValue.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TColumnValue.java deleted file mode 100644 index 8504c6d608d42..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TColumnValue.java +++ /dev/null @@ -1,671 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TColumnValue extends org.apache.thrift.TUnion { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TColumnValue"); - private static final org.apache.thrift.protocol.TField BOOL_VAL_FIELD_DESC = new org.apache.thrift.protocol.TField("boolVal", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField BYTE_VAL_FIELD_DESC = new org.apache.thrift.protocol.TField("byteVal", org.apache.thrift.protocol.TType.STRUCT, (short)2); - private static final org.apache.thrift.protocol.TField I16_VAL_FIELD_DESC = new org.apache.thrift.protocol.TField("i16Val", org.apache.thrift.protocol.TType.STRUCT, (short)3); - private static final org.apache.thrift.protocol.TField I32_VAL_FIELD_DESC = new org.apache.thrift.protocol.TField("i32Val", org.apache.thrift.protocol.TType.STRUCT, (short)4); - private static final org.apache.thrift.protocol.TField I64_VAL_FIELD_DESC = new org.apache.thrift.protocol.TField("i64Val", org.apache.thrift.protocol.TType.STRUCT, (short)5); - private static final org.apache.thrift.protocol.TField DOUBLE_VAL_FIELD_DESC = new org.apache.thrift.protocol.TField("doubleVal", org.apache.thrift.protocol.TType.STRUCT, (short)6); - private static final org.apache.thrift.protocol.TField STRING_VAL_FIELD_DESC = new org.apache.thrift.protocol.TField("stringVal", org.apache.thrift.protocol.TType.STRUCT, (short)7); - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - BOOL_VAL((short)1, "boolVal"), - BYTE_VAL((short)2, "byteVal"), - I16_VAL((short)3, "i16Val"), - I32_VAL((short)4, "i32Val"), - I64_VAL((short)5, "i64Val"), - DOUBLE_VAL((short)6, "doubleVal"), - STRING_VAL((short)7, "stringVal"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // BOOL_VAL - return BOOL_VAL; - case 2: // BYTE_VAL - return BYTE_VAL; - case 3: // I16_VAL - return I16_VAL; - case 4: // I32_VAL - return I32_VAL; - case 5: // I64_VAL - return I64_VAL; - case 6: // DOUBLE_VAL - return DOUBLE_VAL; - case 7: // STRING_VAL - return STRING_VAL; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.BOOL_VAL, new org.apache.thrift.meta_data.FieldMetaData("boolVal", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TBoolValue.class))); - tmpMap.put(_Fields.BYTE_VAL, new org.apache.thrift.meta_data.FieldMetaData("byteVal", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TByteValue.class))); - tmpMap.put(_Fields.I16_VAL, new org.apache.thrift.meta_data.FieldMetaData("i16Val", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TI16Value.class))); - tmpMap.put(_Fields.I32_VAL, new org.apache.thrift.meta_data.FieldMetaData("i32Val", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TI32Value.class))); - tmpMap.put(_Fields.I64_VAL, new org.apache.thrift.meta_data.FieldMetaData("i64Val", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TI64Value.class))); - tmpMap.put(_Fields.DOUBLE_VAL, new org.apache.thrift.meta_data.FieldMetaData("doubleVal", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TDoubleValue.class))); - tmpMap.put(_Fields.STRING_VAL, new org.apache.thrift.meta_data.FieldMetaData("stringVal", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TStringValue.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TColumnValue.class, metaDataMap); - } - - public TColumnValue() { - super(); - } - - public TColumnValue(TColumnValue._Fields setField, Object value) { - super(setField, value); - } - - public TColumnValue(TColumnValue other) { - super(other); - } - public TColumnValue deepCopy() { - return new TColumnValue(this); - } - - public static TColumnValue boolVal(TBoolValue value) { - TColumnValue x = new TColumnValue(); - x.setBoolVal(value); - return x; - } - - public static TColumnValue byteVal(TByteValue value) { - TColumnValue x = new TColumnValue(); - x.setByteVal(value); - return x; - } - - public static TColumnValue i16Val(TI16Value value) { - TColumnValue x = new TColumnValue(); - x.setI16Val(value); - return x; - } - - public static TColumnValue i32Val(TI32Value value) { - TColumnValue x = new TColumnValue(); - x.setI32Val(value); - return x; - } - - public static TColumnValue i64Val(TI64Value value) { - TColumnValue x = new TColumnValue(); - x.setI64Val(value); - return x; - } - - public static TColumnValue doubleVal(TDoubleValue value) { - TColumnValue x = new TColumnValue(); - x.setDoubleVal(value); - return x; - } - - public static TColumnValue stringVal(TStringValue value) { - TColumnValue x = new TColumnValue(); - x.setStringVal(value); - return x; - } - - - @Override - protected void checkType(_Fields setField, Object value) throws ClassCastException { - switch (setField) { - case BOOL_VAL: - if (value instanceof TBoolValue) { - break; - } - throw new ClassCastException("Was expecting value of type TBoolValue for field 'boolVal', but got " + value.getClass().getSimpleName()); - case BYTE_VAL: - if (value instanceof TByteValue) { - break; - } - throw new ClassCastException("Was expecting value of type TByteValue for field 'byteVal', but got " + value.getClass().getSimpleName()); - case I16_VAL: - if (value instanceof TI16Value) { - break; - } - throw new ClassCastException("Was expecting value of type TI16Value for field 'i16Val', but got " + value.getClass().getSimpleName()); - case I32_VAL: - if (value instanceof TI32Value) { - break; - } - throw new ClassCastException("Was expecting value of type TI32Value for field 'i32Val', but got " + value.getClass().getSimpleName()); - case I64_VAL: - if (value instanceof TI64Value) { - break; - } - throw new ClassCastException("Was expecting value of type TI64Value for field 'i64Val', but got " + value.getClass().getSimpleName()); - case DOUBLE_VAL: - if (value instanceof TDoubleValue) { - break; - } - throw new ClassCastException("Was expecting value of type TDoubleValue for field 'doubleVal', but got " + value.getClass().getSimpleName()); - case STRING_VAL: - if (value instanceof TStringValue) { - break; - } - throw new ClassCastException("Was expecting value of type TStringValue for field 'stringVal', but got " + value.getClass().getSimpleName()); - default: - throw new IllegalArgumentException("Unknown field id " + setField); - } - } - - @Override - protected Object standardSchemeReadValue(org.apache.thrift.protocol.TProtocol iprot, org.apache.thrift.protocol.TField field) throws org.apache.thrift.TException { - _Fields setField = _Fields.findByThriftId(field.id); - if (setField != null) { - switch (setField) { - case BOOL_VAL: - if (field.type == BOOL_VAL_FIELD_DESC.type) { - TBoolValue boolVal; - boolVal = new TBoolValue(); - boolVal.read(iprot); - return boolVal; - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); - return null; - } - case BYTE_VAL: - if (field.type == BYTE_VAL_FIELD_DESC.type) { - TByteValue byteVal; - byteVal = new TByteValue(); - byteVal.read(iprot); - return byteVal; - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); - return null; - } - case I16_VAL: - if (field.type == I16_VAL_FIELD_DESC.type) { - TI16Value i16Val; - i16Val = new TI16Value(); - i16Val.read(iprot); - return i16Val; - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); - return null; - } - case I32_VAL: - if (field.type == I32_VAL_FIELD_DESC.type) { - TI32Value i32Val; - i32Val = new TI32Value(); - i32Val.read(iprot); - return i32Val; - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); - return null; - } - case I64_VAL: - if (field.type == I64_VAL_FIELD_DESC.type) { - TI64Value i64Val; - i64Val = new TI64Value(); - i64Val.read(iprot); - return i64Val; - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); - return null; - } - case DOUBLE_VAL: - if (field.type == DOUBLE_VAL_FIELD_DESC.type) { - TDoubleValue doubleVal; - doubleVal = new TDoubleValue(); - doubleVal.read(iprot); - return doubleVal; - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); - return null; - } - case STRING_VAL: - if (field.type == STRING_VAL_FIELD_DESC.type) { - TStringValue stringVal; - stringVal = new TStringValue(); - stringVal.read(iprot); - return stringVal; - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); - return null; - } - default: - throw new IllegalStateException("setField wasn't null, but didn't match any of the case statements!"); - } - } else { - return null; - } - } - - @Override - protected void standardSchemeWriteValue(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - switch (setField_) { - case BOOL_VAL: - TBoolValue boolVal = (TBoolValue)value_; - boolVal.write(oprot); - return; - case BYTE_VAL: - TByteValue byteVal = (TByteValue)value_; - byteVal.write(oprot); - return; - case I16_VAL: - TI16Value i16Val = (TI16Value)value_; - i16Val.write(oprot); - return; - case I32_VAL: - TI32Value i32Val = (TI32Value)value_; - i32Val.write(oprot); - return; - case I64_VAL: - TI64Value i64Val = (TI64Value)value_; - i64Val.write(oprot); - return; - case DOUBLE_VAL: - TDoubleValue doubleVal = (TDoubleValue)value_; - doubleVal.write(oprot); - return; - case STRING_VAL: - TStringValue stringVal = (TStringValue)value_; - stringVal.write(oprot); - return; - default: - throw new IllegalStateException("Cannot write union with unknown field " + setField_); - } - } - - @Override - protected Object tupleSchemeReadValue(org.apache.thrift.protocol.TProtocol iprot, short fieldID) throws org.apache.thrift.TException { - _Fields setField = _Fields.findByThriftId(fieldID); - if (setField != null) { - switch (setField) { - case BOOL_VAL: - TBoolValue boolVal; - boolVal = new TBoolValue(); - boolVal.read(iprot); - return boolVal; - case BYTE_VAL: - TByteValue byteVal; - byteVal = new TByteValue(); - byteVal.read(iprot); - return byteVal; - case I16_VAL: - TI16Value i16Val; - i16Val = new TI16Value(); - i16Val.read(iprot); - return i16Val; - case I32_VAL: - TI32Value i32Val; - i32Val = new TI32Value(); - i32Val.read(iprot); - return i32Val; - case I64_VAL: - TI64Value i64Val; - i64Val = new TI64Value(); - i64Val.read(iprot); - return i64Val; - case DOUBLE_VAL: - TDoubleValue doubleVal; - doubleVal = new TDoubleValue(); - doubleVal.read(iprot); - return doubleVal; - case STRING_VAL: - TStringValue stringVal; - stringVal = new TStringValue(); - stringVal.read(iprot); - return stringVal; - default: - throw new IllegalStateException("setField wasn't null, but didn't match any of the case statements!"); - } - } else { - throw new TProtocolException("Couldn't find a field with field id " + fieldID); - } - } - - @Override - protected void tupleSchemeWriteValue(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - switch (setField_) { - case BOOL_VAL: - TBoolValue boolVal = (TBoolValue)value_; - boolVal.write(oprot); - return; - case BYTE_VAL: - TByteValue byteVal = (TByteValue)value_; - byteVal.write(oprot); - return; - case I16_VAL: - TI16Value i16Val = (TI16Value)value_; - i16Val.write(oprot); - return; - case I32_VAL: - TI32Value i32Val = (TI32Value)value_; - i32Val.write(oprot); - return; - case I64_VAL: - TI64Value i64Val = (TI64Value)value_; - i64Val.write(oprot); - return; - case DOUBLE_VAL: - TDoubleValue doubleVal = (TDoubleValue)value_; - doubleVal.write(oprot); - return; - case STRING_VAL: - TStringValue stringVal = (TStringValue)value_; - stringVal.write(oprot); - return; - default: - throw new IllegalStateException("Cannot write union with unknown field " + setField_); - } - } - - @Override - protected org.apache.thrift.protocol.TField getFieldDesc(_Fields setField) { - switch (setField) { - case BOOL_VAL: - return BOOL_VAL_FIELD_DESC; - case BYTE_VAL: - return BYTE_VAL_FIELD_DESC; - case I16_VAL: - return I16_VAL_FIELD_DESC; - case I32_VAL: - return I32_VAL_FIELD_DESC; - case I64_VAL: - return I64_VAL_FIELD_DESC; - case DOUBLE_VAL: - return DOUBLE_VAL_FIELD_DESC; - case STRING_VAL: - return STRING_VAL_FIELD_DESC; - default: - throw new IllegalArgumentException("Unknown field id " + setField); - } - } - - @Override - protected org.apache.thrift.protocol.TStruct getStructDesc() { - return STRUCT_DESC; - } - - @Override - protected _Fields enumForId(short id) { - return _Fields.findByThriftIdOrThrow(id); - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - - public TBoolValue getBoolVal() { - if (getSetField() == _Fields.BOOL_VAL) { - return (TBoolValue)getFieldValue(); - } else { - throw new RuntimeException("Cannot get field 'boolVal' because union is currently set to " + getFieldDesc(getSetField()).name); - } - } - - public void setBoolVal(TBoolValue value) { - if (value == null) throw new NullPointerException(); - setField_ = _Fields.BOOL_VAL; - value_ = value; - } - - public TByteValue getByteVal() { - if (getSetField() == _Fields.BYTE_VAL) { - return (TByteValue)getFieldValue(); - } else { - throw new RuntimeException("Cannot get field 'byteVal' because union is currently set to " + getFieldDesc(getSetField()).name); - } - } - - public void setByteVal(TByteValue value) { - if (value == null) throw new NullPointerException(); - setField_ = _Fields.BYTE_VAL; - value_ = value; - } - - public TI16Value getI16Val() { - if (getSetField() == _Fields.I16_VAL) { - return (TI16Value)getFieldValue(); - } else { - throw new RuntimeException("Cannot get field 'i16Val' because union is currently set to " + getFieldDesc(getSetField()).name); - } - } - - public void setI16Val(TI16Value value) { - if (value == null) throw new NullPointerException(); - setField_ = _Fields.I16_VAL; - value_ = value; - } - - public TI32Value getI32Val() { - if (getSetField() == _Fields.I32_VAL) { - return (TI32Value)getFieldValue(); - } else { - throw new RuntimeException("Cannot get field 'i32Val' because union is currently set to " + getFieldDesc(getSetField()).name); - } - } - - public void setI32Val(TI32Value value) { - if (value == null) throw new NullPointerException(); - setField_ = _Fields.I32_VAL; - value_ = value; - } - - public TI64Value getI64Val() { - if (getSetField() == _Fields.I64_VAL) { - return (TI64Value)getFieldValue(); - } else { - throw new RuntimeException("Cannot get field 'i64Val' because union is currently set to " + getFieldDesc(getSetField()).name); - } - } - - public void setI64Val(TI64Value value) { - if (value == null) throw new NullPointerException(); - setField_ = _Fields.I64_VAL; - value_ = value; - } - - public TDoubleValue getDoubleVal() { - if (getSetField() == _Fields.DOUBLE_VAL) { - return (TDoubleValue)getFieldValue(); - } else { - throw new RuntimeException("Cannot get field 'doubleVal' because union is currently set to " + getFieldDesc(getSetField()).name); - } - } - - public void setDoubleVal(TDoubleValue value) { - if (value == null) throw new NullPointerException(); - setField_ = _Fields.DOUBLE_VAL; - value_ = value; - } - - public TStringValue getStringVal() { - if (getSetField() == _Fields.STRING_VAL) { - return (TStringValue)getFieldValue(); - } else { - throw new RuntimeException("Cannot get field 'stringVal' because union is currently set to " + getFieldDesc(getSetField()).name); - } - } - - public void setStringVal(TStringValue value) { - if (value == null) throw new NullPointerException(); - setField_ = _Fields.STRING_VAL; - value_ = value; - } - - public boolean isSetBoolVal() { - return setField_ == _Fields.BOOL_VAL; - } - - - public boolean isSetByteVal() { - return setField_ == _Fields.BYTE_VAL; - } - - - public boolean isSetI16Val() { - return setField_ == _Fields.I16_VAL; - } - - - public boolean isSetI32Val() { - return setField_ == _Fields.I32_VAL; - } - - - public boolean isSetI64Val() { - return setField_ == _Fields.I64_VAL; - } - - - public boolean isSetDoubleVal() { - return setField_ == _Fields.DOUBLE_VAL; - } - - - public boolean isSetStringVal() { - return setField_ == _Fields.STRING_VAL; - } - - - public boolean equals(Object other) { - if (other instanceof TColumnValue) { - return equals((TColumnValue)other); - } else { - return false; - } - } - - public boolean equals(TColumnValue other) { - return other != null && getSetField() == other.getSetField() && getFieldValue().equals(other.getFieldValue()); - } - - @Override - public int compareTo(TColumnValue other) { - int lastComparison = org.apache.thrift.TBaseHelper.compareTo(getSetField(), other.getSetField()); - if (lastComparison == 0) { - return org.apache.thrift.TBaseHelper.compareTo(getFieldValue(), other.getFieldValue()); - } - return lastComparison; - } - - - @Override - public int hashCode() { - HashCodeBuilder hcb = new HashCodeBuilder(); - hcb.append(this.getClass().getName()); - org.apache.thrift.TFieldIdEnum setField = getSetField(); - if (setField != null) { - hcb.append(setField.getThriftFieldId()); - Object value = getFieldValue(); - if (value instanceof org.apache.thrift.TEnum) { - hcb.append(((org.apache.thrift.TEnum)getFieldValue()).getValue()); - } else { - hcb.append(value); - } - } - return hcb.toHashCode(); - } - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - -} diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TDoubleColumn.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TDoubleColumn.java deleted file mode 100644 index 4fc54544c1bea..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TDoubleColumn.java +++ /dev/null @@ -1,548 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TDoubleColumn implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TDoubleColumn"); - - private static final org.apache.thrift.protocol.TField VALUES_FIELD_DESC = new org.apache.thrift.protocol.TField("values", org.apache.thrift.protocol.TType.LIST, (short)1); - private static final org.apache.thrift.protocol.TField NULLS_FIELD_DESC = new org.apache.thrift.protocol.TField("nulls", org.apache.thrift.protocol.TType.STRING, (short)2); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TDoubleColumnStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TDoubleColumnTupleSchemeFactory()); - } - - private List values; // required - private ByteBuffer nulls; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - VALUES((short)1, "values"), - NULLS((short)2, "nulls"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // VALUES - return VALUES; - case 2: // NULLS - return NULLS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.VALUES, new org.apache.thrift.meta_data.FieldMetaData("values", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.DOUBLE)))); - tmpMap.put(_Fields.NULLS, new org.apache.thrift.meta_data.FieldMetaData("nulls", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TDoubleColumn.class, metaDataMap); - } - - public TDoubleColumn() { - } - - public TDoubleColumn( - List values, - ByteBuffer nulls) - { - this(); - this.values = values; - this.nulls = nulls; - } - - /** - * Performs a deep copy on other. - */ - public TDoubleColumn(TDoubleColumn other) { - if (other.isSetValues()) { - List __this__values = new ArrayList(); - for (Double other_element : other.values) { - __this__values.add(other_element); - } - this.values = __this__values; - } - if (other.isSetNulls()) { - this.nulls = org.apache.thrift.TBaseHelper.copyBinary(other.nulls); -; - } - } - - public TDoubleColumn deepCopy() { - return new TDoubleColumn(this); - } - - @Override - public void clear() { - this.values = null; - this.nulls = null; - } - - public int getValuesSize() { - return (this.values == null) ? 0 : this.values.size(); - } - - public java.util.Iterator getValuesIterator() { - return (this.values == null) ? null : this.values.iterator(); - } - - public void addToValues(double elem) { - if (this.values == null) { - this.values = new ArrayList(); - } - this.values.add(elem); - } - - public List getValues() { - return this.values; - } - - public void setValues(List values) { - this.values = values; - } - - public void unsetValues() { - this.values = null; - } - - /** Returns true if field values is set (has been assigned a value) and false otherwise */ - public boolean isSetValues() { - return this.values != null; - } - - public void setValuesIsSet(boolean value) { - if (!value) { - this.values = null; - } - } - - public byte[] getNulls() { - setNulls(org.apache.thrift.TBaseHelper.rightSize(nulls)); - return nulls == null ? null : nulls.array(); - } - - public ByteBuffer bufferForNulls() { - return nulls; - } - - public void setNulls(byte[] nulls) { - setNulls(nulls == null ? (ByteBuffer)null : ByteBuffer.wrap(nulls)); - } - - public void setNulls(ByteBuffer nulls) { - this.nulls = nulls; - } - - public void unsetNulls() { - this.nulls = null; - } - - /** Returns true if field nulls is set (has been assigned a value) and false otherwise */ - public boolean isSetNulls() { - return this.nulls != null; - } - - public void setNullsIsSet(boolean value) { - if (!value) { - this.nulls = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case VALUES: - if (value == null) { - unsetValues(); - } else { - setValues((List)value); - } - break; - - case NULLS: - if (value == null) { - unsetNulls(); - } else { - setNulls((ByteBuffer)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case VALUES: - return getValues(); - - case NULLS: - return getNulls(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case VALUES: - return isSetValues(); - case NULLS: - return isSetNulls(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TDoubleColumn) - return this.equals((TDoubleColumn)that); - return false; - } - - public boolean equals(TDoubleColumn that) { - if (that == null) - return false; - - boolean this_present_values = true && this.isSetValues(); - boolean that_present_values = true && that.isSetValues(); - if (this_present_values || that_present_values) { - if (!(this_present_values && that_present_values)) - return false; - if (!this.values.equals(that.values)) - return false; - } - - boolean this_present_nulls = true && this.isSetNulls(); - boolean that_present_nulls = true && that.isSetNulls(); - if (this_present_nulls || that_present_nulls) { - if (!(this_present_nulls && that_present_nulls)) - return false; - if (!this.nulls.equals(that.nulls)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_values = true && (isSetValues()); - builder.append(present_values); - if (present_values) - builder.append(values); - - boolean present_nulls = true && (isSetNulls()); - builder.append(present_nulls); - if (present_nulls) - builder.append(nulls); - - return builder.toHashCode(); - } - - public int compareTo(TDoubleColumn other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - TDoubleColumn typedOther = (TDoubleColumn)other; - - lastComparison = Boolean.valueOf(isSetValues()).compareTo(typedOther.isSetValues()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetValues()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.values, typedOther.values); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetNulls()).compareTo(typedOther.isSetNulls()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetNulls()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.nulls, typedOther.nulls); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TDoubleColumn("); - boolean first = true; - - sb.append("values:"); - if (this.values == null) { - sb.append("null"); - } else { - sb.append(this.values); - } - first = false; - if (!first) sb.append(", "); - sb.append("nulls:"); - if (this.nulls == null) { - sb.append("null"); - } else { - org.apache.thrift.TBaseHelper.toString(this.nulls, sb); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetValues()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'values' is unset! Struct:" + toString()); - } - - if (!isSetNulls()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'nulls' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TDoubleColumnStandardSchemeFactory implements SchemeFactory { - public TDoubleColumnStandardScheme getScheme() { - return new TDoubleColumnStandardScheme(); - } - } - - private static class TDoubleColumnStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TDoubleColumn struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // VALUES - if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { - { - org.apache.thrift.protocol.TList _list94 = iprot.readListBegin(); - struct.values = new ArrayList(_list94.size); - for (int _i95 = 0; _i95 < _list94.size; ++_i95) - { - double _elem96; // optional - _elem96 = iprot.readDouble(); - struct.values.add(_elem96); - } - iprot.readListEnd(); - } - struct.setValuesIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // NULLS - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.nulls = iprot.readBinary(); - struct.setNullsIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TDoubleColumn struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.values != null) { - oprot.writeFieldBegin(VALUES_FIELD_DESC); - { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.DOUBLE, struct.values.size())); - for (double _iter97 : struct.values) - { - oprot.writeDouble(_iter97); - } - oprot.writeListEnd(); - } - oprot.writeFieldEnd(); - } - if (struct.nulls != null) { - oprot.writeFieldBegin(NULLS_FIELD_DESC); - oprot.writeBinary(struct.nulls); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TDoubleColumnTupleSchemeFactory implements SchemeFactory { - public TDoubleColumnTupleScheme getScheme() { - return new TDoubleColumnTupleScheme(); - } - } - - private static class TDoubleColumnTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TDoubleColumn struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - { - oprot.writeI32(struct.values.size()); - for (double _iter98 : struct.values) - { - oprot.writeDouble(_iter98); - } - } - oprot.writeBinary(struct.nulls); - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TDoubleColumn struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - { - org.apache.thrift.protocol.TList _list99 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.DOUBLE, iprot.readI32()); - struct.values = new ArrayList(_list99.size); - for (int _i100 = 0; _i100 < _list99.size; ++_i100) - { - double _elem101; // optional - _elem101 = iprot.readDouble(); - struct.values.add(_elem101); - } - } - struct.setValuesIsSet(true); - struct.nulls = iprot.readBinary(); - struct.setNullsIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TDoubleValue.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TDoubleValue.java deleted file mode 100644 index d21573633ef51..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TDoubleValue.java +++ /dev/null @@ -1,386 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TDoubleValue implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TDoubleValue"); - - private static final org.apache.thrift.protocol.TField VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("value", org.apache.thrift.protocol.TType.DOUBLE, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TDoubleValueStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TDoubleValueTupleSchemeFactory()); - } - - private double value; // optional - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - VALUE((short)1, "value"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // VALUE - return VALUE; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private static final int __VALUE_ISSET_ID = 0; - private byte __isset_bitfield = 0; - private _Fields optionals[] = {_Fields.VALUE}; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.VALUE, new org.apache.thrift.meta_data.FieldMetaData("value", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.DOUBLE))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TDoubleValue.class, metaDataMap); - } - - public TDoubleValue() { - } - - /** - * Performs a deep copy on other. - */ - public TDoubleValue(TDoubleValue other) { - __isset_bitfield = other.__isset_bitfield; - this.value = other.value; - } - - public TDoubleValue deepCopy() { - return new TDoubleValue(this); - } - - @Override - public void clear() { - setValueIsSet(false); - this.value = 0.0; - } - - public double getValue() { - return this.value; - } - - public void setValue(double value) { - this.value = value; - setValueIsSet(true); - } - - public void unsetValue() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __VALUE_ISSET_ID); - } - - /** Returns true if field value is set (has been assigned a value) and false otherwise */ - public boolean isSetValue() { - return EncodingUtils.testBit(__isset_bitfield, __VALUE_ISSET_ID); - } - - public void setValueIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __VALUE_ISSET_ID, value); - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case VALUE: - if (value == null) { - unsetValue(); - } else { - setValue((Double)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case VALUE: - return Double.valueOf(getValue()); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case VALUE: - return isSetValue(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TDoubleValue) - return this.equals((TDoubleValue)that); - return false; - } - - public boolean equals(TDoubleValue that) { - if (that == null) - return false; - - boolean this_present_value = true && this.isSetValue(); - boolean that_present_value = true && that.isSetValue(); - if (this_present_value || that_present_value) { - if (!(this_present_value && that_present_value)) - return false; - if (this.value != that.value) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_value = true && (isSetValue()); - builder.append(present_value); - if (present_value) - builder.append(value); - - return builder.toHashCode(); - } - - public int compareTo(TDoubleValue other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - TDoubleValue typedOther = (TDoubleValue)other; - - lastComparison = Boolean.valueOf(isSetValue()).compareTo(typedOther.isSetValue()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetValue()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.value, typedOther.value); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TDoubleValue("); - boolean first = true; - - if (isSetValue()) { - sb.append("value:"); - sb.append(this.value); - first = false; - } - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. - __isset_bitfield = 0; - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TDoubleValueStandardSchemeFactory implements SchemeFactory { - public TDoubleValueStandardScheme getScheme() { - return new TDoubleValueStandardScheme(); - } - } - - private static class TDoubleValueStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TDoubleValue struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // VALUE - if (schemeField.type == org.apache.thrift.protocol.TType.DOUBLE) { - struct.value = iprot.readDouble(); - struct.setValueIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TDoubleValue struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.isSetValue()) { - oprot.writeFieldBegin(VALUE_FIELD_DESC); - oprot.writeDouble(struct.value); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TDoubleValueTupleSchemeFactory implements SchemeFactory { - public TDoubleValueTupleScheme getScheme() { - return new TDoubleValueTupleScheme(); - } - } - - private static class TDoubleValueTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TDoubleValue struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetValue()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetValue()) { - oprot.writeDouble(struct.value); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TDoubleValue struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.value = iprot.readDouble(); - struct.setValueIsSet(true); - } - } - } - -} - diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TExecuteStatementReq.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TExecuteStatementReq.java deleted file mode 100644 index 4f157ad5a6450..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TExecuteStatementReq.java +++ /dev/null @@ -1,769 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TExecuteStatementReq implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TExecuteStatementReq"); - - private static final org.apache.thrift.protocol.TField SESSION_HANDLE_FIELD_DESC = new org.apache.thrift.protocol.TField("sessionHandle", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField STATEMENT_FIELD_DESC = new org.apache.thrift.protocol.TField("statement", org.apache.thrift.protocol.TType.STRING, (short)2); - private static final org.apache.thrift.protocol.TField CONF_OVERLAY_FIELD_DESC = new org.apache.thrift.protocol.TField("confOverlay", org.apache.thrift.protocol.TType.MAP, (short)3); - private static final org.apache.thrift.protocol.TField RUN_ASYNC_FIELD_DESC = new org.apache.thrift.protocol.TField("runAsync", org.apache.thrift.protocol.TType.BOOL, (short)4); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TExecuteStatementReqStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TExecuteStatementReqTupleSchemeFactory()); - } - - private TSessionHandle sessionHandle; // required - private String statement; // required - private Map confOverlay; // optional - private boolean runAsync; // optional - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SESSION_HANDLE((short)1, "sessionHandle"), - STATEMENT((short)2, "statement"), - CONF_OVERLAY((short)3, "confOverlay"), - RUN_ASYNC((short)4, "runAsync"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // SESSION_HANDLE - return SESSION_HANDLE; - case 2: // STATEMENT - return STATEMENT; - case 3: // CONF_OVERLAY - return CONF_OVERLAY; - case 4: // RUN_ASYNC - return RUN_ASYNC; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private static final int __RUNASYNC_ISSET_ID = 0; - private byte __isset_bitfield = 0; - private _Fields optionals[] = {_Fields.CONF_OVERLAY,_Fields.RUN_ASYNC}; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SESSION_HANDLE, new org.apache.thrift.meta_data.FieldMetaData("sessionHandle", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TSessionHandle.class))); - tmpMap.put(_Fields.STATEMENT, new org.apache.thrift.meta_data.FieldMetaData("statement", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.CONF_OVERLAY, new org.apache.thrift.meta_data.FieldMetaData("confOverlay", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); - tmpMap.put(_Fields.RUN_ASYNC, new org.apache.thrift.meta_data.FieldMetaData("runAsync", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TExecuteStatementReq.class, metaDataMap); - } - - public TExecuteStatementReq() { - this.runAsync = false; - - } - - public TExecuteStatementReq( - TSessionHandle sessionHandle, - String statement) - { - this(); - this.sessionHandle = sessionHandle; - this.statement = statement; - } - - /** - * Performs a deep copy on other. - */ - public TExecuteStatementReq(TExecuteStatementReq other) { - __isset_bitfield = other.__isset_bitfield; - if (other.isSetSessionHandle()) { - this.sessionHandle = new TSessionHandle(other.sessionHandle); - } - if (other.isSetStatement()) { - this.statement = other.statement; - } - if (other.isSetConfOverlay()) { - Map __this__confOverlay = new HashMap(); - for (Map.Entry other_element : other.confOverlay.entrySet()) { - - String other_element_key = other_element.getKey(); - String other_element_value = other_element.getValue(); - - String __this__confOverlay_copy_key = other_element_key; - - String __this__confOverlay_copy_value = other_element_value; - - __this__confOverlay.put(__this__confOverlay_copy_key, __this__confOverlay_copy_value); - } - this.confOverlay = __this__confOverlay; - } - this.runAsync = other.runAsync; - } - - public TExecuteStatementReq deepCopy() { - return new TExecuteStatementReq(this); - } - - @Override - public void clear() { - this.sessionHandle = null; - this.statement = null; - this.confOverlay = null; - this.runAsync = false; - - } - - public TSessionHandle getSessionHandle() { - return this.sessionHandle; - } - - public void setSessionHandle(TSessionHandle sessionHandle) { - this.sessionHandle = sessionHandle; - } - - public void unsetSessionHandle() { - this.sessionHandle = null; - } - - /** Returns true if field sessionHandle is set (has been assigned a value) and false otherwise */ - public boolean isSetSessionHandle() { - return this.sessionHandle != null; - } - - public void setSessionHandleIsSet(boolean value) { - if (!value) { - this.sessionHandle = null; - } - } - - public String getStatement() { - return this.statement; - } - - public void setStatement(String statement) { - this.statement = statement; - } - - public void unsetStatement() { - this.statement = null; - } - - /** Returns true if field statement is set (has been assigned a value) and false otherwise */ - public boolean isSetStatement() { - return this.statement != null; - } - - public void setStatementIsSet(boolean value) { - if (!value) { - this.statement = null; - } - } - - public int getConfOverlaySize() { - return (this.confOverlay == null) ? 0 : this.confOverlay.size(); - } - - public void putToConfOverlay(String key, String val) { - if (this.confOverlay == null) { - this.confOverlay = new HashMap(); - } - this.confOverlay.put(key, val); - } - - public Map getConfOverlay() { - return this.confOverlay; - } - - public void setConfOverlay(Map confOverlay) { - this.confOverlay = confOverlay; - } - - public void unsetConfOverlay() { - this.confOverlay = null; - } - - /** Returns true if field confOverlay is set (has been assigned a value) and false otherwise */ - public boolean isSetConfOverlay() { - return this.confOverlay != null; - } - - public void setConfOverlayIsSet(boolean value) { - if (!value) { - this.confOverlay = null; - } - } - - public boolean isRunAsync() { - return this.runAsync; - } - - public void setRunAsync(boolean runAsync) { - this.runAsync = runAsync; - setRunAsyncIsSet(true); - } - - public void unsetRunAsync() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __RUNASYNC_ISSET_ID); - } - - /** Returns true if field runAsync is set (has been assigned a value) and false otherwise */ - public boolean isSetRunAsync() { - return EncodingUtils.testBit(__isset_bitfield, __RUNASYNC_ISSET_ID); - } - - public void setRunAsyncIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __RUNASYNC_ISSET_ID, value); - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case SESSION_HANDLE: - if (value == null) { - unsetSessionHandle(); - } else { - setSessionHandle((TSessionHandle)value); - } - break; - - case STATEMENT: - if (value == null) { - unsetStatement(); - } else { - setStatement((String)value); - } - break; - - case CONF_OVERLAY: - if (value == null) { - unsetConfOverlay(); - } else { - setConfOverlay((Map)value); - } - break; - - case RUN_ASYNC: - if (value == null) { - unsetRunAsync(); - } else { - setRunAsync((Boolean)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case SESSION_HANDLE: - return getSessionHandle(); - - case STATEMENT: - return getStatement(); - - case CONF_OVERLAY: - return getConfOverlay(); - - case RUN_ASYNC: - return Boolean.valueOf(isRunAsync()); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case SESSION_HANDLE: - return isSetSessionHandle(); - case STATEMENT: - return isSetStatement(); - case CONF_OVERLAY: - return isSetConfOverlay(); - case RUN_ASYNC: - return isSetRunAsync(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TExecuteStatementReq) - return this.equals((TExecuteStatementReq)that); - return false; - } - - public boolean equals(TExecuteStatementReq that) { - if (that == null) - return false; - - boolean this_present_sessionHandle = true && this.isSetSessionHandle(); - boolean that_present_sessionHandle = true && that.isSetSessionHandle(); - if (this_present_sessionHandle || that_present_sessionHandle) { - if (!(this_present_sessionHandle && that_present_sessionHandle)) - return false; - if (!this.sessionHandle.equals(that.sessionHandle)) - return false; - } - - boolean this_present_statement = true && this.isSetStatement(); - boolean that_present_statement = true && that.isSetStatement(); - if (this_present_statement || that_present_statement) { - if (!(this_present_statement && that_present_statement)) - return false; - if (!this.statement.equals(that.statement)) - return false; - } - - boolean this_present_confOverlay = true && this.isSetConfOverlay(); - boolean that_present_confOverlay = true && that.isSetConfOverlay(); - if (this_present_confOverlay || that_present_confOverlay) { - if (!(this_present_confOverlay && that_present_confOverlay)) - return false; - if (!this.confOverlay.equals(that.confOverlay)) - return false; - } - - boolean this_present_runAsync = true && this.isSetRunAsync(); - boolean that_present_runAsync = true && that.isSetRunAsync(); - if (this_present_runAsync || that_present_runAsync) { - if (!(this_present_runAsync && that_present_runAsync)) - return false; - if (this.runAsync != that.runAsync) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_sessionHandle = true && (isSetSessionHandle()); - builder.append(present_sessionHandle); - if (present_sessionHandle) - builder.append(sessionHandle); - - boolean present_statement = true && (isSetStatement()); - builder.append(present_statement); - if (present_statement) - builder.append(statement); - - boolean present_confOverlay = true && (isSetConfOverlay()); - builder.append(present_confOverlay); - if (present_confOverlay) - builder.append(confOverlay); - - boolean present_runAsync = true && (isSetRunAsync()); - builder.append(present_runAsync); - if (present_runAsync) - builder.append(runAsync); - - return builder.toHashCode(); - } - - public int compareTo(TExecuteStatementReq other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - TExecuteStatementReq typedOther = (TExecuteStatementReq)other; - - lastComparison = Boolean.valueOf(isSetSessionHandle()).compareTo(typedOther.isSetSessionHandle()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSessionHandle()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.sessionHandle, typedOther.sessionHandle); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetStatement()).compareTo(typedOther.isSetStatement()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetStatement()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.statement, typedOther.statement); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetConfOverlay()).compareTo(typedOther.isSetConfOverlay()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetConfOverlay()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.confOverlay, typedOther.confOverlay); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetRunAsync()).compareTo(typedOther.isSetRunAsync()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetRunAsync()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.runAsync, typedOther.runAsync); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TExecuteStatementReq("); - boolean first = true; - - sb.append("sessionHandle:"); - if (this.sessionHandle == null) { - sb.append("null"); - } else { - sb.append(this.sessionHandle); - } - first = false; - if (!first) sb.append(", "); - sb.append("statement:"); - if (this.statement == null) { - sb.append("null"); - } else { - sb.append(this.statement); - } - first = false; - if (isSetConfOverlay()) { - if (!first) sb.append(", "); - sb.append("confOverlay:"); - if (this.confOverlay == null) { - sb.append("null"); - } else { - sb.append(this.confOverlay); - } - first = false; - } - if (isSetRunAsync()) { - if (!first) sb.append(", "); - sb.append("runAsync:"); - sb.append(this.runAsync); - first = false; - } - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetSessionHandle()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'sessionHandle' is unset! Struct:" + toString()); - } - - if (!isSetStatement()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'statement' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (sessionHandle != null) { - sessionHandle.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. - __isset_bitfield = 0; - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TExecuteStatementReqStandardSchemeFactory implements SchemeFactory { - public TExecuteStatementReqStandardScheme getScheme() { - return new TExecuteStatementReqStandardScheme(); - } - } - - private static class TExecuteStatementReqStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TExecuteStatementReq struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // SESSION_HANDLE - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.sessionHandle = new TSessionHandle(); - struct.sessionHandle.read(iprot); - struct.setSessionHandleIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // STATEMENT - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.statement = iprot.readString(); - struct.setStatementIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 3: // CONF_OVERLAY - if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { - { - org.apache.thrift.protocol.TMap _map162 = iprot.readMapBegin(); - struct.confOverlay = new HashMap(2*_map162.size); - for (int _i163 = 0; _i163 < _map162.size; ++_i163) - { - String _key164; // required - String _val165; // required - _key164 = iprot.readString(); - _val165 = iprot.readString(); - struct.confOverlay.put(_key164, _val165); - } - iprot.readMapEnd(); - } - struct.setConfOverlayIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 4: // RUN_ASYNC - if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { - struct.runAsync = iprot.readBool(); - struct.setRunAsyncIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TExecuteStatementReq struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.sessionHandle != null) { - oprot.writeFieldBegin(SESSION_HANDLE_FIELD_DESC); - struct.sessionHandle.write(oprot); - oprot.writeFieldEnd(); - } - if (struct.statement != null) { - oprot.writeFieldBegin(STATEMENT_FIELD_DESC); - oprot.writeString(struct.statement); - oprot.writeFieldEnd(); - } - if (struct.confOverlay != null) { - if (struct.isSetConfOverlay()) { - oprot.writeFieldBegin(CONF_OVERLAY_FIELD_DESC); - { - oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.confOverlay.size())); - for (Map.Entry _iter166 : struct.confOverlay.entrySet()) - { - oprot.writeString(_iter166.getKey()); - oprot.writeString(_iter166.getValue()); - } - oprot.writeMapEnd(); - } - oprot.writeFieldEnd(); - } - } - if (struct.isSetRunAsync()) { - oprot.writeFieldBegin(RUN_ASYNC_FIELD_DESC); - oprot.writeBool(struct.runAsync); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TExecuteStatementReqTupleSchemeFactory implements SchemeFactory { - public TExecuteStatementReqTupleScheme getScheme() { - return new TExecuteStatementReqTupleScheme(); - } - } - - private static class TExecuteStatementReqTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TExecuteStatementReq struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.sessionHandle.write(oprot); - oprot.writeString(struct.statement); - BitSet optionals = new BitSet(); - if (struct.isSetConfOverlay()) { - optionals.set(0); - } - if (struct.isSetRunAsync()) { - optionals.set(1); - } - oprot.writeBitSet(optionals, 2); - if (struct.isSetConfOverlay()) { - { - oprot.writeI32(struct.confOverlay.size()); - for (Map.Entry _iter167 : struct.confOverlay.entrySet()) - { - oprot.writeString(_iter167.getKey()); - oprot.writeString(_iter167.getValue()); - } - } - } - if (struct.isSetRunAsync()) { - oprot.writeBool(struct.runAsync); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TExecuteStatementReq struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.sessionHandle = new TSessionHandle(); - struct.sessionHandle.read(iprot); - struct.setSessionHandleIsSet(true); - struct.statement = iprot.readString(); - struct.setStatementIsSet(true); - BitSet incoming = iprot.readBitSet(2); - if (incoming.get(0)) { - { - org.apache.thrift.protocol.TMap _map168 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.confOverlay = new HashMap(2*_map168.size); - for (int _i169 = 0; _i169 < _map168.size; ++_i169) - { - String _key170; // required - String _val171; // required - _key170 = iprot.readString(); - _val171 = iprot.readString(); - struct.confOverlay.put(_key170, _val171); - } - } - struct.setConfOverlayIsSet(true); - } - if (incoming.get(1)) { - struct.runAsync = iprot.readBool(); - struct.setRunAsyncIsSet(true); - } - } - } - -} - diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TExecuteStatementResp.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TExecuteStatementResp.java deleted file mode 100644 index fdde51e70f783..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TExecuteStatementResp.java +++ /dev/null @@ -1,505 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TExecuteStatementResp implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TExecuteStatementResp"); - - private static final org.apache.thrift.protocol.TField STATUS_FIELD_DESC = new org.apache.thrift.protocol.TField("status", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField OPERATION_HANDLE_FIELD_DESC = new org.apache.thrift.protocol.TField("operationHandle", org.apache.thrift.protocol.TType.STRUCT, (short)2); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TExecuteStatementRespStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TExecuteStatementRespTupleSchemeFactory()); - } - - private TStatus status; // required - private TOperationHandle operationHandle; // optional - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - STATUS((short)1, "status"), - OPERATION_HANDLE((short)2, "operationHandle"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // STATUS - return STATUS; - case 2: // OPERATION_HANDLE - return OPERATION_HANDLE; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private _Fields optionals[] = {_Fields.OPERATION_HANDLE}; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.STATUS, new org.apache.thrift.meta_data.FieldMetaData("status", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TStatus.class))); - tmpMap.put(_Fields.OPERATION_HANDLE, new org.apache.thrift.meta_data.FieldMetaData("operationHandle", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TOperationHandle.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TExecuteStatementResp.class, metaDataMap); - } - - public TExecuteStatementResp() { - } - - public TExecuteStatementResp( - TStatus status) - { - this(); - this.status = status; - } - - /** - * Performs a deep copy on other. - */ - public TExecuteStatementResp(TExecuteStatementResp other) { - if (other.isSetStatus()) { - this.status = new TStatus(other.status); - } - if (other.isSetOperationHandle()) { - this.operationHandle = new TOperationHandle(other.operationHandle); - } - } - - public TExecuteStatementResp deepCopy() { - return new TExecuteStatementResp(this); - } - - @Override - public void clear() { - this.status = null; - this.operationHandle = null; - } - - public TStatus getStatus() { - return this.status; - } - - public void setStatus(TStatus status) { - this.status = status; - } - - public void unsetStatus() { - this.status = null; - } - - /** Returns true if field status is set (has been assigned a value) and false otherwise */ - public boolean isSetStatus() { - return this.status != null; - } - - public void setStatusIsSet(boolean value) { - if (!value) { - this.status = null; - } - } - - public TOperationHandle getOperationHandle() { - return this.operationHandle; - } - - public void setOperationHandle(TOperationHandle operationHandle) { - this.operationHandle = operationHandle; - } - - public void unsetOperationHandle() { - this.operationHandle = null; - } - - /** Returns true if field operationHandle is set (has been assigned a value) and false otherwise */ - public boolean isSetOperationHandle() { - return this.operationHandle != null; - } - - public void setOperationHandleIsSet(boolean value) { - if (!value) { - this.operationHandle = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case STATUS: - if (value == null) { - unsetStatus(); - } else { - setStatus((TStatus)value); - } - break; - - case OPERATION_HANDLE: - if (value == null) { - unsetOperationHandle(); - } else { - setOperationHandle((TOperationHandle)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case STATUS: - return getStatus(); - - case OPERATION_HANDLE: - return getOperationHandle(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case STATUS: - return isSetStatus(); - case OPERATION_HANDLE: - return isSetOperationHandle(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TExecuteStatementResp) - return this.equals((TExecuteStatementResp)that); - return false; - } - - public boolean equals(TExecuteStatementResp that) { - if (that == null) - return false; - - boolean this_present_status = true && this.isSetStatus(); - boolean that_present_status = true && that.isSetStatus(); - if (this_present_status || that_present_status) { - if (!(this_present_status && that_present_status)) - return false; - if (!this.status.equals(that.status)) - return false; - } - - boolean this_present_operationHandle = true && this.isSetOperationHandle(); - boolean that_present_operationHandle = true && that.isSetOperationHandle(); - if (this_present_operationHandle || that_present_operationHandle) { - if (!(this_present_operationHandle && that_present_operationHandle)) - return false; - if (!this.operationHandle.equals(that.operationHandle)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_status = true && (isSetStatus()); - builder.append(present_status); - if (present_status) - builder.append(status); - - boolean present_operationHandle = true && (isSetOperationHandle()); - builder.append(present_operationHandle); - if (present_operationHandle) - builder.append(operationHandle); - - return builder.toHashCode(); - } - - public int compareTo(TExecuteStatementResp other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - TExecuteStatementResp typedOther = (TExecuteStatementResp)other; - - lastComparison = Boolean.valueOf(isSetStatus()).compareTo(typedOther.isSetStatus()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetStatus()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.status, typedOther.status); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetOperationHandle()).compareTo(typedOther.isSetOperationHandle()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetOperationHandle()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.operationHandle, typedOther.operationHandle); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TExecuteStatementResp("); - boolean first = true; - - sb.append("status:"); - if (this.status == null) { - sb.append("null"); - } else { - sb.append(this.status); - } - first = false; - if (isSetOperationHandle()) { - if (!first) sb.append(", "); - sb.append("operationHandle:"); - if (this.operationHandle == null) { - sb.append("null"); - } else { - sb.append(this.operationHandle); - } - first = false; - } - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetStatus()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'status' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (status != null) { - status.validate(); - } - if (operationHandle != null) { - operationHandle.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TExecuteStatementRespStandardSchemeFactory implements SchemeFactory { - public TExecuteStatementRespStandardScheme getScheme() { - return new TExecuteStatementRespStandardScheme(); - } - } - - private static class TExecuteStatementRespStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TExecuteStatementResp struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // STATUS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // OPERATION_HANDLE - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.operationHandle = new TOperationHandle(); - struct.operationHandle.read(iprot); - struct.setOperationHandleIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TExecuteStatementResp struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.status != null) { - oprot.writeFieldBegin(STATUS_FIELD_DESC); - struct.status.write(oprot); - oprot.writeFieldEnd(); - } - if (struct.operationHandle != null) { - if (struct.isSetOperationHandle()) { - oprot.writeFieldBegin(OPERATION_HANDLE_FIELD_DESC); - struct.operationHandle.write(oprot); - oprot.writeFieldEnd(); - } - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TExecuteStatementRespTupleSchemeFactory implements SchemeFactory { - public TExecuteStatementRespTupleScheme getScheme() { - return new TExecuteStatementRespTupleScheme(); - } - } - - private static class TExecuteStatementRespTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TExecuteStatementResp struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.status.write(oprot); - BitSet optionals = new BitSet(); - if (struct.isSetOperationHandle()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetOperationHandle()) { - struct.operationHandle.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TExecuteStatementResp struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.operationHandle = new TOperationHandle(); - struct.operationHandle.read(iprot); - struct.setOperationHandleIsSet(true); - } - } - } - -} - diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TFetchOrientation.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TFetchOrientation.java deleted file mode 100644 index b2a22effd91af..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TFetchOrientation.java +++ /dev/null @@ -1,57 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - - -import java.util.Map; -import java.util.HashMap; -import org.apache.thrift.TEnum; - -public enum TFetchOrientation implements org.apache.thrift.TEnum { - FETCH_NEXT(0), - FETCH_PRIOR(1), - FETCH_RELATIVE(2), - FETCH_ABSOLUTE(3), - FETCH_FIRST(4), - FETCH_LAST(5); - - private final int value; - - private TFetchOrientation(int value) { - this.value = value; - } - - /** - * Get the integer value of this enum value, as defined in the Thrift IDL. - */ - public int getValue() { - return value; - } - - /** - * Find a the enum type by its integer value, as defined in the Thrift IDL. - * @return null if the value is not found. - */ - public static TFetchOrientation findByValue(int value) { - switch (value) { - case 0: - return FETCH_NEXT; - case 1: - return FETCH_PRIOR; - case 2: - return FETCH_RELATIVE; - case 3: - return FETCH_ABSOLUTE; - case 4: - return FETCH_FIRST; - case 5: - return FETCH_LAST; - default: - return null; - } - } -} diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TFetchResultsReq.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TFetchResultsReq.java deleted file mode 100644 index 068711fc44440..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TFetchResultsReq.java +++ /dev/null @@ -1,710 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TFetchResultsReq implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TFetchResultsReq"); - - private static final org.apache.thrift.protocol.TField OPERATION_HANDLE_FIELD_DESC = new org.apache.thrift.protocol.TField("operationHandle", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField ORIENTATION_FIELD_DESC = new org.apache.thrift.protocol.TField("orientation", org.apache.thrift.protocol.TType.I32, (short)2); - private static final org.apache.thrift.protocol.TField MAX_ROWS_FIELD_DESC = new org.apache.thrift.protocol.TField("maxRows", org.apache.thrift.protocol.TType.I64, (short)3); - private static final org.apache.thrift.protocol.TField FETCH_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("fetchType", org.apache.thrift.protocol.TType.I16, (short)4); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TFetchResultsReqStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TFetchResultsReqTupleSchemeFactory()); - } - - private TOperationHandle operationHandle; // required - private TFetchOrientation orientation; // required - private long maxRows; // required - private short fetchType; // optional - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - OPERATION_HANDLE((short)1, "operationHandle"), - /** - * - * @see TFetchOrientation - */ - ORIENTATION((short)2, "orientation"), - MAX_ROWS((short)3, "maxRows"), - FETCH_TYPE((short)4, "fetchType"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // OPERATION_HANDLE - return OPERATION_HANDLE; - case 2: // ORIENTATION - return ORIENTATION; - case 3: // MAX_ROWS - return MAX_ROWS; - case 4: // FETCH_TYPE - return FETCH_TYPE; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private static final int __MAXROWS_ISSET_ID = 0; - private static final int __FETCHTYPE_ISSET_ID = 1; - private byte __isset_bitfield = 0; - private _Fields optionals[] = {_Fields.FETCH_TYPE}; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.OPERATION_HANDLE, new org.apache.thrift.meta_data.FieldMetaData("operationHandle", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TOperationHandle.class))); - tmpMap.put(_Fields.ORIENTATION, new org.apache.thrift.meta_data.FieldMetaData("orientation", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, TFetchOrientation.class))); - tmpMap.put(_Fields.MAX_ROWS, new org.apache.thrift.meta_data.FieldMetaData("maxRows", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); - tmpMap.put(_Fields.FETCH_TYPE, new org.apache.thrift.meta_data.FieldMetaData("fetchType", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I16))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TFetchResultsReq.class, metaDataMap); - } - - public TFetchResultsReq() { - this.orientation = org.apache.hive.service.cli.thrift.TFetchOrientation.FETCH_NEXT; - - this.fetchType = (short)0; - - } - - public TFetchResultsReq( - TOperationHandle operationHandle, - TFetchOrientation orientation, - long maxRows) - { - this(); - this.operationHandle = operationHandle; - this.orientation = orientation; - this.maxRows = maxRows; - setMaxRowsIsSet(true); - } - - /** - * Performs a deep copy on other. - */ - public TFetchResultsReq(TFetchResultsReq other) { - __isset_bitfield = other.__isset_bitfield; - if (other.isSetOperationHandle()) { - this.operationHandle = new TOperationHandle(other.operationHandle); - } - if (other.isSetOrientation()) { - this.orientation = other.orientation; - } - this.maxRows = other.maxRows; - this.fetchType = other.fetchType; - } - - public TFetchResultsReq deepCopy() { - return new TFetchResultsReq(this); - } - - @Override - public void clear() { - this.operationHandle = null; - this.orientation = org.apache.hive.service.cli.thrift.TFetchOrientation.FETCH_NEXT; - - setMaxRowsIsSet(false); - this.maxRows = 0; - this.fetchType = (short)0; - - } - - public TOperationHandle getOperationHandle() { - return this.operationHandle; - } - - public void setOperationHandle(TOperationHandle operationHandle) { - this.operationHandle = operationHandle; - } - - public void unsetOperationHandle() { - this.operationHandle = null; - } - - /** Returns true if field operationHandle is set (has been assigned a value) and false otherwise */ - public boolean isSetOperationHandle() { - return this.operationHandle != null; - } - - public void setOperationHandleIsSet(boolean value) { - if (!value) { - this.operationHandle = null; - } - } - - /** - * - * @see TFetchOrientation - */ - public TFetchOrientation getOrientation() { - return this.orientation; - } - - /** - * - * @see TFetchOrientation - */ - public void setOrientation(TFetchOrientation orientation) { - this.orientation = orientation; - } - - public void unsetOrientation() { - this.orientation = null; - } - - /** Returns true if field orientation is set (has been assigned a value) and false otherwise */ - public boolean isSetOrientation() { - return this.orientation != null; - } - - public void setOrientationIsSet(boolean value) { - if (!value) { - this.orientation = null; - } - } - - public long getMaxRows() { - return this.maxRows; - } - - public void setMaxRows(long maxRows) { - this.maxRows = maxRows; - setMaxRowsIsSet(true); - } - - public void unsetMaxRows() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __MAXROWS_ISSET_ID); - } - - /** Returns true if field maxRows is set (has been assigned a value) and false otherwise */ - public boolean isSetMaxRows() { - return EncodingUtils.testBit(__isset_bitfield, __MAXROWS_ISSET_ID); - } - - public void setMaxRowsIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MAXROWS_ISSET_ID, value); - } - - public short getFetchType() { - return this.fetchType; - } - - public void setFetchType(short fetchType) { - this.fetchType = fetchType; - setFetchTypeIsSet(true); - } - - public void unsetFetchType() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __FETCHTYPE_ISSET_ID); - } - - /** Returns true if field fetchType is set (has been assigned a value) and false otherwise */ - public boolean isSetFetchType() { - return EncodingUtils.testBit(__isset_bitfield, __FETCHTYPE_ISSET_ID); - } - - public void setFetchTypeIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __FETCHTYPE_ISSET_ID, value); - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case OPERATION_HANDLE: - if (value == null) { - unsetOperationHandle(); - } else { - setOperationHandle((TOperationHandle)value); - } - break; - - case ORIENTATION: - if (value == null) { - unsetOrientation(); - } else { - setOrientation((TFetchOrientation)value); - } - break; - - case MAX_ROWS: - if (value == null) { - unsetMaxRows(); - } else { - setMaxRows((Long)value); - } - break; - - case FETCH_TYPE: - if (value == null) { - unsetFetchType(); - } else { - setFetchType((Short)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case OPERATION_HANDLE: - return getOperationHandle(); - - case ORIENTATION: - return getOrientation(); - - case MAX_ROWS: - return Long.valueOf(getMaxRows()); - - case FETCH_TYPE: - return Short.valueOf(getFetchType()); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case OPERATION_HANDLE: - return isSetOperationHandle(); - case ORIENTATION: - return isSetOrientation(); - case MAX_ROWS: - return isSetMaxRows(); - case FETCH_TYPE: - return isSetFetchType(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TFetchResultsReq) - return this.equals((TFetchResultsReq)that); - return false; - } - - public boolean equals(TFetchResultsReq that) { - if (that == null) - return false; - - boolean this_present_operationHandle = true && this.isSetOperationHandle(); - boolean that_present_operationHandle = true && that.isSetOperationHandle(); - if (this_present_operationHandle || that_present_operationHandle) { - if (!(this_present_operationHandle && that_present_operationHandle)) - return false; - if (!this.operationHandle.equals(that.operationHandle)) - return false; - } - - boolean this_present_orientation = true && this.isSetOrientation(); - boolean that_present_orientation = true && that.isSetOrientation(); - if (this_present_orientation || that_present_orientation) { - if (!(this_present_orientation && that_present_orientation)) - return false; - if (!this.orientation.equals(that.orientation)) - return false; - } - - boolean this_present_maxRows = true; - boolean that_present_maxRows = true; - if (this_present_maxRows || that_present_maxRows) { - if (!(this_present_maxRows && that_present_maxRows)) - return false; - if (this.maxRows != that.maxRows) - return false; - } - - boolean this_present_fetchType = true && this.isSetFetchType(); - boolean that_present_fetchType = true && that.isSetFetchType(); - if (this_present_fetchType || that_present_fetchType) { - if (!(this_present_fetchType && that_present_fetchType)) - return false; - if (this.fetchType != that.fetchType) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_operationHandle = true && (isSetOperationHandle()); - builder.append(present_operationHandle); - if (present_operationHandle) - builder.append(operationHandle); - - boolean present_orientation = true && (isSetOrientation()); - builder.append(present_orientation); - if (present_orientation) - builder.append(orientation.getValue()); - - boolean present_maxRows = true; - builder.append(present_maxRows); - if (present_maxRows) - builder.append(maxRows); - - boolean present_fetchType = true && (isSetFetchType()); - builder.append(present_fetchType); - if (present_fetchType) - builder.append(fetchType); - - return builder.toHashCode(); - } - - public int compareTo(TFetchResultsReq other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - TFetchResultsReq typedOther = (TFetchResultsReq)other; - - lastComparison = Boolean.valueOf(isSetOperationHandle()).compareTo(typedOther.isSetOperationHandle()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetOperationHandle()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.operationHandle, typedOther.operationHandle); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetOrientation()).compareTo(typedOther.isSetOrientation()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetOrientation()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.orientation, typedOther.orientation); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetMaxRows()).compareTo(typedOther.isSetMaxRows()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetMaxRows()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.maxRows, typedOther.maxRows); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetFetchType()).compareTo(typedOther.isSetFetchType()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetFetchType()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.fetchType, typedOther.fetchType); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TFetchResultsReq("); - boolean first = true; - - sb.append("operationHandle:"); - if (this.operationHandle == null) { - sb.append("null"); - } else { - sb.append(this.operationHandle); - } - first = false; - if (!first) sb.append(", "); - sb.append("orientation:"); - if (this.orientation == null) { - sb.append("null"); - } else { - sb.append(this.orientation); - } - first = false; - if (!first) sb.append(", "); - sb.append("maxRows:"); - sb.append(this.maxRows); - first = false; - if (isSetFetchType()) { - if (!first) sb.append(", "); - sb.append("fetchType:"); - sb.append(this.fetchType); - first = false; - } - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetOperationHandle()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'operationHandle' is unset! Struct:" + toString()); - } - - if (!isSetOrientation()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'orientation' is unset! Struct:" + toString()); - } - - if (!isSetMaxRows()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'maxRows' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (operationHandle != null) { - operationHandle.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. - __isset_bitfield = 0; - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TFetchResultsReqStandardSchemeFactory implements SchemeFactory { - public TFetchResultsReqStandardScheme getScheme() { - return new TFetchResultsReqStandardScheme(); - } - } - - private static class TFetchResultsReqStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TFetchResultsReq struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // OPERATION_HANDLE - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.operationHandle = new TOperationHandle(); - struct.operationHandle.read(iprot); - struct.setOperationHandleIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // ORIENTATION - if (schemeField.type == org.apache.thrift.protocol.TType.I32) { - struct.orientation = TFetchOrientation.findByValue(iprot.readI32()); - struct.setOrientationIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 3: // MAX_ROWS - if (schemeField.type == org.apache.thrift.protocol.TType.I64) { - struct.maxRows = iprot.readI64(); - struct.setMaxRowsIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 4: // FETCH_TYPE - if (schemeField.type == org.apache.thrift.protocol.TType.I16) { - struct.fetchType = iprot.readI16(); - struct.setFetchTypeIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TFetchResultsReq struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.operationHandle != null) { - oprot.writeFieldBegin(OPERATION_HANDLE_FIELD_DESC); - struct.operationHandle.write(oprot); - oprot.writeFieldEnd(); - } - if (struct.orientation != null) { - oprot.writeFieldBegin(ORIENTATION_FIELD_DESC); - oprot.writeI32(struct.orientation.getValue()); - oprot.writeFieldEnd(); - } - oprot.writeFieldBegin(MAX_ROWS_FIELD_DESC); - oprot.writeI64(struct.maxRows); - oprot.writeFieldEnd(); - if (struct.isSetFetchType()) { - oprot.writeFieldBegin(FETCH_TYPE_FIELD_DESC); - oprot.writeI16(struct.fetchType); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TFetchResultsReqTupleSchemeFactory implements SchemeFactory { - public TFetchResultsReqTupleScheme getScheme() { - return new TFetchResultsReqTupleScheme(); - } - } - - private static class TFetchResultsReqTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TFetchResultsReq struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.operationHandle.write(oprot); - oprot.writeI32(struct.orientation.getValue()); - oprot.writeI64(struct.maxRows); - BitSet optionals = new BitSet(); - if (struct.isSetFetchType()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetFetchType()) { - oprot.writeI16(struct.fetchType); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TFetchResultsReq struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.operationHandle = new TOperationHandle(); - struct.operationHandle.read(iprot); - struct.setOperationHandleIsSet(true); - struct.orientation = TFetchOrientation.findByValue(iprot.readI32()); - struct.setOrientationIsSet(true); - struct.maxRows = iprot.readI64(); - struct.setMaxRowsIsSet(true); - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.fetchType = iprot.readI16(); - struct.setFetchTypeIsSet(true); - } - } - } - -} - diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TFetchResultsResp.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TFetchResultsResp.java deleted file mode 100644 index 19991f1da3eb3..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TFetchResultsResp.java +++ /dev/null @@ -1,608 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TFetchResultsResp implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TFetchResultsResp"); - - private static final org.apache.thrift.protocol.TField STATUS_FIELD_DESC = new org.apache.thrift.protocol.TField("status", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField HAS_MORE_ROWS_FIELD_DESC = new org.apache.thrift.protocol.TField("hasMoreRows", org.apache.thrift.protocol.TType.BOOL, (short)2); - private static final org.apache.thrift.protocol.TField RESULTS_FIELD_DESC = new org.apache.thrift.protocol.TField("results", org.apache.thrift.protocol.TType.STRUCT, (short)3); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TFetchResultsRespStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TFetchResultsRespTupleSchemeFactory()); - } - - private TStatus status; // required - private boolean hasMoreRows; // optional - private TRowSet results; // optional - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - STATUS((short)1, "status"), - HAS_MORE_ROWS((short)2, "hasMoreRows"), - RESULTS((short)3, "results"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // STATUS - return STATUS; - case 2: // HAS_MORE_ROWS - return HAS_MORE_ROWS; - case 3: // RESULTS - return RESULTS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private static final int __HASMOREROWS_ISSET_ID = 0; - private byte __isset_bitfield = 0; - private _Fields optionals[] = {_Fields.HAS_MORE_ROWS,_Fields.RESULTS}; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.STATUS, new org.apache.thrift.meta_data.FieldMetaData("status", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TStatus.class))); - tmpMap.put(_Fields.HAS_MORE_ROWS, new org.apache.thrift.meta_data.FieldMetaData("hasMoreRows", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); - tmpMap.put(_Fields.RESULTS, new org.apache.thrift.meta_data.FieldMetaData("results", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TRowSet.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TFetchResultsResp.class, metaDataMap); - } - - public TFetchResultsResp() { - } - - public TFetchResultsResp( - TStatus status) - { - this(); - this.status = status; - } - - /** - * Performs a deep copy on other. - */ - public TFetchResultsResp(TFetchResultsResp other) { - __isset_bitfield = other.__isset_bitfield; - if (other.isSetStatus()) { - this.status = new TStatus(other.status); - } - this.hasMoreRows = other.hasMoreRows; - if (other.isSetResults()) { - this.results = new TRowSet(other.results); - } - } - - public TFetchResultsResp deepCopy() { - return new TFetchResultsResp(this); - } - - @Override - public void clear() { - this.status = null; - setHasMoreRowsIsSet(false); - this.hasMoreRows = false; - this.results = null; - } - - public TStatus getStatus() { - return this.status; - } - - public void setStatus(TStatus status) { - this.status = status; - } - - public void unsetStatus() { - this.status = null; - } - - /** Returns true if field status is set (has been assigned a value) and false otherwise */ - public boolean isSetStatus() { - return this.status != null; - } - - public void setStatusIsSet(boolean value) { - if (!value) { - this.status = null; - } - } - - public boolean isHasMoreRows() { - return this.hasMoreRows; - } - - public void setHasMoreRows(boolean hasMoreRows) { - this.hasMoreRows = hasMoreRows; - setHasMoreRowsIsSet(true); - } - - public void unsetHasMoreRows() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __HASMOREROWS_ISSET_ID); - } - - /** Returns true if field hasMoreRows is set (has been assigned a value) and false otherwise */ - public boolean isSetHasMoreRows() { - return EncodingUtils.testBit(__isset_bitfield, __HASMOREROWS_ISSET_ID); - } - - public void setHasMoreRowsIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __HASMOREROWS_ISSET_ID, value); - } - - public TRowSet getResults() { - return this.results; - } - - public void setResults(TRowSet results) { - this.results = results; - } - - public void unsetResults() { - this.results = null; - } - - /** Returns true if field results is set (has been assigned a value) and false otherwise */ - public boolean isSetResults() { - return this.results != null; - } - - public void setResultsIsSet(boolean value) { - if (!value) { - this.results = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case STATUS: - if (value == null) { - unsetStatus(); - } else { - setStatus((TStatus)value); - } - break; - - case HAS_MORE_ROWS: - if (value == null) { - unsetHasMoreRows(); - } else { - setHasMoreRows((Boolean)value); - } - break; - - case RESULTS: - if (value == null) { - unsetResults(); - } else { - setResults((TRowSet)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case STATUS: - return getStatus(); - - case HAS_MORE_ROWS: - return Boolean.valueOf(isHasMoreRows()); - - case RESULTS: - return getResults(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case STATUS: - return isSetStatus(); - case HAS_MORE_ROWS: - return isSetHasMoreRows(); - case RESULTS: - return isSetResults(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TFetchResultsResp) - return this.equals((TFetchResultsResp)that); - return false; - } - - public boolean equals(TFetchResultsResp that) { - if (that == null) - return false; - - boolean this_present_status = true && this.isSetStatus(); - boolean that_present_status = true && that.isSetStatus(); - if (this_present_status || that_present_status) { - if (!(this_present_status && that_present_status)) - return false; - if (!this.status.equals(that.status)) - return false; - } - - boolean this_present_hasMoreRows = true && this.isSetHasMoreRows(); - boolean that_present_hasMoreRows = true && that.isSetHasMoreRows(); - if (this_present_hasMoreRows || that_present_hasMoreRows) { - if (!(this_present_hasMoreRows && that_present_hasMoreRows)) - return false; - if (this.hasMoreRows != that.hasMoreRows) - return false; - } - - boolean this_present_results = true && this.isSetResults(); - boolean that_present_results = true && that.isSetResults(); - if (this_present_results || that_present_results) { - if (!(this_present_results && that_present_results)) - return false; - if (!this.results.equals(that.results)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_status = true && (isSetStatus()); - builder.append(present_status); - if (present_status) - builder.append(status); - - boolean present_hasMoreRows = true && (isSetHasMoreRows()); - builder.append(present_hasMoreRows); - if (present_hasMoreRows) - builder.append(hasMoreRows); - - boolean present_results = true && (isSetResults()); - builder.append(present_results); - if (present_results) - builder.append(results); - - return builder.toHashCode(); - } - - public int compareTo(TFetchResultsResp other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - TFetchResultsResp typedOther = (TFetchResultsResp)other; - - lastComparison = Boolean.valueOf(isSetStatus()).compareTo(typedOther.isSetStatus()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetStatus()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.status, typedOther.status); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetHasMoreRows()).compareTo(typedOther.isSetHasMoreRows()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetHasMoreRows()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.hasMoreRows, typedOther.hasMoreRows); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetResults()).compareTo(typedOther.isSetResults()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetResults()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.results, typedOther.results); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TFetchResultsResp("); - boolean first = true; - - sb.append("status:"); - if (this.status == null) { - sb.append("null"); - } else { - sb.append(this.status); - } - first = false; - if (isSetHasMoreRows()) { - if (!first) sb.append(", "); - sb.append("hasMoreRows:"); - sb.append(this.hasMoreRows); - first = false; - } - if (isSetResults()) { - if (!first) sb.append(", "); - sb.append("results:"); - if (this.results == null) { - sb.append("null"); - } else { - sb.append(this.results); - } - first = false; - } - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetStatus()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'status' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (status != null) { - status.validate(); - } - if (results != null) { - results.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. - __isset_bitfield = 0; - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TFetchResultsRespStandardSchemeFactory implements SchemeFactory { - public TFetchResultsRespStandardScheme getScheme() { - return new TFetchResultsRespStandardScheme(); - } - } - - private static class TFetchResultsRespStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TFetchResultsResp struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // STATUS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // HAS_MORE_ROWS - if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { - struct.hasMoreRows = iprot.readBool(); - struct.setHasMoreRowsIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 3: // RESULTS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.results = new TRowSet(); - struct.results.read(iprot); - struct.setResultsIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TFetchResultsResp struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.status != null) { - oprot.writeFieldBegin(STATUS_FIELD_DESC); - struct.status.write(oprot); - oprot.writeFieldEnd(); - } - if (struct.isSetHasMoreRows()) { - oprot.writeFieldBegin(HAS_MORE_ROWS_FIELD_DESC); - oprot.writeBool(struct.hasMoreRows); - oprot.writeFieldEnd(); - } - if (struct.results != null) { - if (struct.isSetResults()) { - oprot.writeFieldBegin(RESULTS_FIELD_DESC); - struct.results.write(oprot); - oprot.writeFieldEnd(); - } - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TFetchResultsRespTupleSchemeFactory implements SchemeFactory { - public TFetchResultsRespTupleScheme getScheme() { - return new TFetchResultsRespTupleScheme(); - } - } - - private static class TFetchResultsRespTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TFetchResultsResp struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.status.write(oprot); - BitSet optionals = new BitSet(); - if (struct.isSetHasMoreRows()) { - optionals.set(0); - } - if (struct.isSetResults()) { - optionals.set(1); - } - oprot.writeBitSet(optionals, 2); - if (struct.isSetHasMoreRows()) { - oprot.writeBool(struct.hasMoreRows); - } - if (struct.isSetResults()) { - struct.results.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TFetchResultsResp struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - BitSet incoming = iprot.readBitSet(2); - if (incoming.get(0)) { - struct.hasMoreRows = iprot.readBool(); - struct.setHasMoreRowsIsSet(true); - } - if (incoming.get(1)) { - struct.results = new TRowSet(); - struct.results.read(iprot); - struct.setResultsIsSet(true); - } - } - } - -} - diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TGetCatalogsReq.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TGetCatalogsReq.java deleted file mode 100644 index cfd157f701b26..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TGetCatalogsReq.java +++ /dev/null @@ -1,390 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TGetCatalogsReq implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetCatalogsReq"); - - private static final org.apache.thrift.protocol.TField SESSION_HANDLE_FIELD_DESC = new org.apache.thrift.protocol.TField("sessionHandle", org.apache.thrift.protocol.TType.STRUCT, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TGetCatalogsReqStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TGetCatalogsReqTupleSchemeFactory()); - } - - private TSessionHandle sessionHandle; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SESSION_HANDLE((short)1, "sessionHandle"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // SESSION_HANDLE - return SESSION_HANDLE; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SESSION_HANDLE, new org.apache.thrift.meta_data.FieldMetaData("sessionHandle", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TSessionHandle.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TGetCatalogsReq.class, metaDataMap); - } - - public TGetCatalogsReq() { - } - - public TGetCatalogsReq( - TSessionHandle sessionHandle) - { - this(); - this.sessionHandle = sessionHandle; - } - - /** - * Performs a deep copy on other. - */ - public TGetCatalogsReq(TGetCatalogsReq other) { - if (other.isSetSessionHandle()) { - this.sessionHandle = new TSessionHandle(other.sessionHandle); - } - } - - public TGetCatalogsReq deepCopy() { - return new TGetCatalogsReq(this); - } - - @Override - public void clear() { - this.sessionHandle = null; - } - - public TSessionHandle getSessionHandle() { - return this.sessionHandle; - } - - public void setSessionHandle(TSessionHandle sessionHandle) { - this.sessionHandle = sessionHandle; - } - - public void unsetSessionHandle() { - this.sessionHandle = null; - } - - /** Returns true if field sessionHandle is set (has been assigned a value) and false otherwise */ - public boolean isSetSessionHandle() { - return this.sessionHandle != null; - } - - public void setSessionHandleIsSet(boolean value) { - if (!value) { - this.sessionHandle = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case SESSION_HANDLE: - if (value == null) { - unsetSessionHandle(); - } else { - setSessionHandle((TSessionHandle)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case SESSION_HANDLE: - return getSessionHandle(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case SESSION_HANDLE: - return isSetSessionHandle(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TGetCatalogsReq) - return this.equals((TGetCatalogsReq)that); - return false; - } - - public boolean equals(TGetCatalogsReq that) { - if (that == null) - return false; - - boolean this_present_sessionHandle = true && this.isSetSessionHandle(); - boolean that_present_sessionHandle = true && that.isSetSessionHandle(); - if (this_present_sessionHandle || that_present_sessionHandle) { - if (!(this_present_sessionHandle && that_present_sessionHandle)) - return false; - if (!this.sessionHandle.equals(that.sessionHandle)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_sessionHandle = true && (isSetSessionHandle()); - builder.append(present_sessionHandle); - if (present_sessionHandle) - builder.append(sessionHandle); - - return builder.toHashCode(); - } - - public int compareTo(TGetCatalogsReq other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - TGetCatalogsReq typedOther = (TGetCatalogsReq)other; - - lastComparison = Boolean.valueOf(isSetSessionHandle()).compareTo(typedOther.isSetSessionHandle()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSessionHandle()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.sessionHandle, typedOther.sessionHandle); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TGetCatalogsReq("); - boolean first = true; - - sb.append("sessionHandle:"); - if (this.sessionHandle == null) { - sb.append("null"); - } else { - sb.append(this.sessionHandle); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetSessionHandle()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'sessionHandle' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (sessionHandle != null) { - sessionHandle.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TGetCatalogsReqStandardSchemeFactory implements SchemeFactory { - public TGetCatalogsReqStandardScheme getScheme() { - return new TGetCatalogsReqStandardScheme(); - } - } - - private static class TGetCatalogsReqStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TGetCatalogsReq struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // SESSION_HANDLE - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.sessionHandle = new TSessionHandle(); - struct.sessionHandle.read(iprot); - struct.setSessionHandleIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TGetCatalogsReq struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.sessionHandle != null) { - oprot.writeFieldBegin(SESSION_HANDLE_FIELD_DESC); - struct.sessionHandle.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TGetCatalogsReqTupleSchemeFactory implements SchemeFactory { - public TGetCatalogsReqTupleScheme getScheme() { - return new TGetCatalogsReqTupleScheme(); - } - } - - private static class TGetCatalogsReqTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TGetCatalogsReq struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.sessionHandle.write(oprot); - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TGetCatalogsReq struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.sessionHandle = new TSessionHandle(); - struct.sessionHandle.read(iprot); - struct.setSessionHandleIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TGetCatalogsResp.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TGetCatalogsResp.java deleted file mode 100644 index 1c5a35437d416..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TGetCatalogsResp.java +++ /dev/null @@ -1,505 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TGetCatalogsResp implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetCatalogsResp"); - - private static final org.apache.thrift.protocol.TField STATUS_FIELD_DESC = new org.apache.thrift.protocol.TField("status", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField OPERATION_HANDLE_FIELD_DESC = new org.apache.thrift.protocol.TField("operationHandle", org.apache.thrift.protocol.TType.STRUCT, (short)2); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TGetCatalogsRespStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TGetCatalogsRespTupleSchemeFactory()); - } - - private TStatus status; // required - private TOperationHandle operationHandle; // optional - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - STATUS((short)1, "status"), - OPERATION_HANDLE((short)2, "operationHandle"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // STATUS - return STATUS; - case 2: // OPERATION_HANDLE - return OPERATION_HANDLE; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private _Fields optionals[] = {_Fields.OPERATION_HANDLE}; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.STATUS, new org.apache.thrift.meta_data.FieldMetaData("status", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TStatus.class))); - tmpMap.put(_Fields.OPERATION_HANDLE, new org.apache.thrift.meta_data.FieldMetaData("operationHandle", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TOperationHandle.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TGetCatalogsResp.class, metaDataMap); - } - - public TGetCatalogsResp() { - } - - public TGetCatalogsResp( - TStatus status) - { - this(); - this.status = status; - } - - /** - * Performs a deep copy on other. - */ - public TGetCatalogsResp(TGetCatalogsResp other) { - if (other.isSetStatus()) { - this.status = new TStatus(other.status); - } - if (other.isSetOperationHandle()) { - this.operationHandle = new TOperationHandle(other.operationHandle); - } - } - - public TGetCatalogsResp deepCopy() { - return new TGetCatalogsResp(this); - } - - @Override - public void clear() { - this.status = null; - this.operationHandle = null; - } - - public TStatus getStatus() { - return this.status; - } - - public void setStatus(TStatus status) { - this.status = status; - } - - public void unsetStatus() { - this.status = null; - } - - /** Returns true if field status is set (has been assigned a value) and false otherwise */ - public boolean isSetStatus() { - return this.status != null; - } - - public void setStatusIsSet(boolean value) { - if (!value) { - this.status = null; - } - } - - public TOperationHandle getOperationHandle() { - return this.operationHandle; - } - - public void setOperationHandle(TOperationHandle operationHandle) { - this.operationHandle = operationHandle; - } - - public void unsetOperationHandle() { - this.operationHandle = null; - } - - /** Returns true if field operationHandle is set (has been assigned a value) and false otherwise */ - public boolean isSetOperationHandle() { - return this.operationHandle != null; - } - - public void setOperationHandleIsSet(boolean value) { - if (!value) { - this.operationHandle = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case STATUS: - if (value == null) { - unsetStatus(); - } else { - setStatus((TStatus)value); - } - break; - - case OPERATION_HANDLE: - if (value == null) { - unsetOperationHandle(); - } else { - setOperationHandle((TOperationHandle)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case STATUS: - return getStatus(); - - case OPERATION_HANDLE: - return getOperationHandle(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case STATUS: - return isSetStatus(); - case OPERATION_HANDLE: - return isSetOperationHandle(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TGetCatalogsResp) - return this.equals((TGetCatalogsResp)that); - return false; - } - - public boolean equals(TGetCatalogsResp that) { - if (that == null) - return false; - - boolean this_present_status = true && this.isSetStatus(); - boolean that_present_status = true && that.isSetStatus(); - if (this_present_status || that_present_status) { - if (!(this_present_status && that_present_status)) - return false; - if (!this.status.equals(that.status)) - return false; - } - - boolean this_present_operationHandle = true && this.isSetOperationHandle(); - boolean that_present_operationHandle = true && that.isSetOperationHandle(); - if (this_present_operationHandle || that_present_operationHandle) { - if (!(this_present_operationHandle && that_present_operationHandle)) - return false; - if (!this.operationHandle.equals(that.operationHandle)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_status = true && (isSetStatus()); - builder.append(present_status); - if (present_status) - builder.append(status); - - boolean present_operationHandle = true && (isSetOperationHandle()); - builder.append(present_operationHandle); - if (present_operationHandle) - builder.append(operationHandle); - - return builder.toHashCode(); - } - - public int compareTo(TGetCatalogsResp other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - TGetCatalogsResp typedOther = (TGetCatalogsResp)other; - - lastComparison = Boolean.valueOf(isSetStatus()).compareTo(typedOther.isSetStatus()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetStatus()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.status, typedOther.status); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetOperationHandle()).compareTo(typedOther.isSetOperationHandle()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetOperationHandle()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.operationHandle, typedOther.operationHandle); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TGetCatalogsResp("); - boolean first = true; - - sb.append("status:"); - if (this.status == null) { - sb.append("null"); - } else { - sb.append(this.status); - } - first = false; - if (isSetOperationHandle()) { - if (!first) sb.append(", "); - sb.append("operationHandle:"); - if (this.operationHandle == null) { - sb.append("null"); - } else { - sb.append(this.operationHandle); - } - first = false; - } - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetStatus()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'status' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (status != null) { - status.validate(); - } - if (operationHandle != null) { - operationHandle.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TGetCatalogsRespStandardSchemeFactory implements SchemeFactory { - public TGetCatalogsRespStandardScheme getScheme() { - return new TGetCatalogsRespStandardScheme(); - } - } - - private static class TGetCatalogsRespStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TGetCatalogsResp struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // STATUS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // OPERATION_HANDLE - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.operationHandle = new TOperationHandle(); - struct.operationHandle.read(iprot); - struct.setOperationHandleIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TGetCatalogsResp struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.status != null) { - oprot.writeFieldBegin(STATUS_FIELD_DESC); - struct.status.write(oprot); - oprot.writeFieldEnd(); - } - if (struct.operationHandle != null) { - if (struct.isSetOperationHandle()) { - oprot.writeFieldBegin(OPERATION_HANDLE_FIELD_DESC); - struct.operationHandle.write(oprot); - oprot.writeFieldEnd(); - } - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TGetCatalogsRespTupleSchemeFactory implements SchemeFactory { - public TGetCatalogsRespTupleScheme getScheme() { - return new TGetCatalogsRespTupleScheme(); - } - } - - private static class TGetCatalogsRespTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TGetCatalogsResp struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.status.write(oprot); - BitSet optionals = new BitSet(); - if (struct.isSetOperationHandle()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetOperationHandle()) { - struct.operationHandle.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TGetCatalogsResp struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.operationHandle = new TOperationHandle(); - struct.operationHandle.read(iprot); - struct.setOperationHandleIsSet(true); - } - } - } - -} - diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TGetColumnsReq.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TGetColumnsReq.java deleted file mode 100644 index a2c793bd95927..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TGetColumnsReq.java +++ /dev/null @@ -1,818 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TGetColumnsReq implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetColumnsReq"); - - private static final org.apache.thrift.protocol.TField SESSION_HANDLE_FIELD_DESC = new org.apache.thrift.protocol.TField("sessionHandle", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField CATALOG_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catalogName", org.apache.thrift.protocol.TType.STRING, (short)2); - private static final org.apache.thrift.protocol.TField SCHEMA_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("schemaName", org.apache.thrift.protocol.TType.STRING, (short)3); - private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)4); - private static final org.apache.thrift.protocol.TField COLUMN_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("columnName", org.apache.thrift.protocol.TType.STRING, (short)5); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TGetColumnsReqStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TGetColumnsReqTupleSchemeFactory()); - } - - private TSessionHandle sessionHandle; // required - private String catalogName; // optional - private String schemaName; // optional - private String tableName; // optional - private String columnName; // optional - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SESSION_HANDLE((short)1, "sessionHandle"), - CATALOG_NAME((short)2, "catalogName"), - SCHEMA_NAME((short)3, "schemaName"), - TABLE_NAME((short)4, "tableName"), - COLUMN_NAME((short)5, "columnName"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // SESSION_HANDLE - return SESSION_HANDLE; - case 2: // CATALOG_NAME - return CATALOG_NAME; - case 3: // SCHEMA_NAME - return SCHEMA_NAME; - case 4: // TABLE_NAME - return TABLE_NAME; - case 5: // COLUMN_NAME - return COLUMN_NAME; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private _Fields optionals[] = {_Fields.CATALOG_NAME,_Fields.SCHEMA_NAME,_Fields.TABLE_NAME,_Fields.COLUMN_NAME}; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SESSION_HANDLE, new org.apache.thrift.meta_data.FieldMetaData("sessionHandle", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TSessionHandle.class))); - tmpMap.put(_Fields.CATALOG_NAME, new org.apache.thrift.meta_data.FieldMetaData("catalogName", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "TIdentifier"))); - tmpMap.put(_Fields.SCHEMA_NAME, new org.apache.thrift.meta_data.FieldMetaData("schemaName", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "TPatternOrIdentifier"))); - tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "TPatternOrIdentifier"))); - tmpMap.put(_Fields.COLUMN_NAME, new org.apache.thrift.meta_data.FieldMetaData("columnName", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "TPatternOrIdentifier"))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TGetColumnsReq.class, metaDataMap); - } - - public TGetColumnsReq() { - } - - public TGetColumnsReq( - TSessionHandle sessionHandle) - { - this(); - this.sessionHandle = sessionHandle; - } - - /** - * Performs a deep copy on other. - */ - public TGetColumnsReq(TGetColumnsReq other) { - if (other.isSetSessionHandle()) { - this.sessionHandle = new TSessionHandle(other.sessionHandle); - } - if (other.isSetCatalogName()) { - this.catalogName = other.catalogName; - } - if (other.isSetSchemaName()) { - this.schemaName = other.schemaName; - } - if (other.isSetTableName()) { - this.tableName = other.tableName; - } - if (other.isSetColumnName()) { - this.columnName = other.columnName; - } - } - - public TGetColumnsReq deepCopy() { - return new TGetColumnsReq(this); - } - - @Override - public void clear() { - this.sessionHandle = null; - this.catalogName = null; - this.schemaName = null; - this.tableName = null; - this.columnName = null; - } - - public TSessionHandle getSessionHandle() { - return this.sessionHandle; - } - - public void setSessionHandle(TSessionHandle sessionHandle) { - this.sessionHandle = sessionHandle; - } - - public void unsetSessionHandle() { - this.sessionHandle = null; - } - - /** Returns true if field sessionHandle is set (has been assigned a value) and false otherwise */ - public boolean isSetSessionHandle() { - return this.sessionHandle != null; - } - - public void setSessionHandleIsSet(boolean value) { - if (!value) { - this.sessionHandle = null; - } - } - - public String getCatalogName() { - return this.catalogName; - } - - public void setCatalogName(String catalogName) { - this.catalogName = catalogName; - } - - public void unsetCatalogName() { - this.catalogName = null; - } - - /** Returns true if field catalogName is set (has been assigned a value) and false otherwise */ - public boolean isSetCatalogName() { - return this.catalogName != null; - } - - public void setCatalogNameIsSet(boolean value) { - if (!value) { - this.catalogName = null; - } - } - - public String getSchemaName() { - return this.schemaName; - } - - public void setSchemaName(String schemaName) { - this.schemaName = schemaName; - } - - public void unsetSchemaName() { - this.schemaName = null; - } - - /** Returns true if field schemaName is set (has been assigned a value) and false otherwise */ - public boolean isSetSchemaName() { - return this.schemaName != null; - } - - public void setSchemaNameIsSet(boolean value) { - if (!value) { - this.schemaName = null; - } - } - - public String getTableName() { - return this.tableName; - } - - public void setTableName(String tableName) { - this.tableName = tableName; - } - - public void unsetTableName() { - this.tableName = null; - } - - /** Returns true if field tableName is set (has been assigned a value) and false otherwise */ - public boolean isSetTableName() { - return this.tableName != null; - } - - public void setTableNameIsSet(boolean value) { - if (!value) { - this.tableName = null; - } - } - - public String getColumnName() { - return this.columnName; - } - - public void setColumnName(String columnName) { - this.columnName = columnName; - } - - public void unsetColumnName() { - this.columnName = null; - } - - /** Returns true if field columnName is set (has been assigned a value) and false otherwise */ - public boolean isSetColumnName() { - return this.columnName != null; - } - - public void setColumnNameIsSet(boolean value) { - if (!value) { - this.columnName = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case SESSION_HANDLE: - if (value == null) { - unsetSessionHandle(); - } else { - setSessionHandle((TSessionHandle)value); - } - break; - - case CATALOG_NAME: - if (value == null) { - unsetCatalogName(); - } else { - setCatalogName((String)value); - } - break; - - case SCHEMA_NAME: - if (value == null) { - unsetSchemaName(); - } else { - setSchemaName((String)value); - } - break; - - case TABLE_NAME: - if (value == null) { - unsetTableName(); - } else { - setTableName((String)value); - } - break; - - case COLUMN_NAME: - if (value == null) { - unsetColumnName(); - } else { - setColumnName((String)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case SESSION_HANDLE: - return getSessionHandle(); - - case CATALOG_NAME: - return getCatalogName(); - - case SCHEMA_NAME: - return getSchemaName(); - - case TABLE_NAME: - return getTableName(); - - case COLUMN_NAME: - return getColumnName(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case SESSION_HANDLE: - return isSetSessionHandle(); - case CATALOG_NAME: - return isSetCatalogName(); - case SCHEMA_NAME: - return isSetSchemaName(); - case TABLE_NAME: - return isSetTableName(); - case COLUMN_NAME: - return isSetColumnName(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TGetColumnsReq) - return this.equals((TGetColumnsReq)that); - return false; - } - - public boolean equals(TGetColumnsReq that) { - if (that == null) - return false; - - boolean this_present_sessionHandle = true && this.isSetSessionHandle(); - boolean that_present_sessionHandle = true && that.isSetSessionHandle(); - if (this_present_sessionHandle || that_present_sessionHandle) { - if (!(this_present_sessionHandle && that_present_sessionHandle)) - return false; - if (!this.sessionHandle.equals(that.sessionHandle)) - return false; - } - - boolean this_present_catalogName = true && this.isSetCatalogName(); - boolean that_present_catalogName = true && that.isSetCatalogName(); - if (this_present_catalogName || that_present_catalogName) { - if (!(this_present_catalogName && that_present_catalogName)) - return false; - if (!this.catalogName.equals(that.catalogName)) - return false; - } - - boolean this_present_schemaName = true && this.isSetSchemaName(); - boolean that_present_schemaName = true && that.isSetSchemaName(); - if (this_present_schemaName || that_present_schemaName) { - if (!(this_present_schemaName && that_present_schemaName)) - return false; - if (!this.schemaName.equals(that.schemaName)) - return false; - } - - boolean this_present_tableName = true && this.isSetTableName(); - boolean that_present_tableName = true && that.isSetTableName(); - if (this_present_tableName || that_present_tableName) { - if (!(this_present_tableName && that_present_tableName)) - return false; - if (!this.tableName.equals(that.tableName)) - return false; - } - - boolean this_present_columnName = true && this.isSetColumnName(); - boolean that_present_columnName = true && that.isSetColumnName(); - if (this_present_columnName || that_present_columnName) { - if (!(this_present_columnName && that_present_columnName)) - return false; - if (!this.columnName.equals(that.columnName)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_sessionHandle = true && (isSetSessionHandle()); - builder.append(present_sessionHandle); - if (present_sessionHandle) - builder.append(sessionHandle); - - boolean present_catalogName = true && (isSetCatalogName()); - builder.append(present_catalogName); - if (present_catalogName) - builder.append(catalogName); - - boolean present_schemaName = true && (isSetSchemaName()); - builder.append(present_schemaName); - if (present_schemaName) - builder.append(schemaName); - - boolean present_tableName = true && (isSetTableName()); - builder.append(present_tableName); - if (present_tableName) - builder.append(tableName); - - boolean present_columnName = true && (isSetColumnName()); - builder.append(present_columnName); - if (present_columnName) - builder.append(columnName); - - return builder.toHashCode(); - } - - public int compareTo(TGetColumnsReq other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - TGetColumnsReq typedOther = (TGetColumnsReq)other; - - lastComparison = Boolean.valueOf(isSetSessionHandle()).compareTo(typedOther.isSetSessionHandle()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSessionHandle()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.sessionHandle, typedOther.sessionHandle); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetCatalogName()).compareTo(typedOther.isSetCatalogName()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetCatalogName()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catalogName, typedOther.catalogName); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetSchemaName()).compareTo(typedOther.isSetSchemaName()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSchemaName()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.schemaName, typedOther.schemaName); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetTableName()).compareTo(typedOther.isSetTableName()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetTableName()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tableName, typedOther.tableName); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetColumnName()).compareTo(typedOther.isSetColumnName()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetColumnName()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.columnName, typedOther.columnName); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TGetColumnsReq("); - boolean first = true; - - sb.append("sessionHandle:"); - if (this.sessionHandle == null) { - sb.append("null"); - } else { - sb.append(this.sessionHandle); - } - first = false; - if (isSetCatalogName()) { - if (!first) sb.append(", "); - sb.append("catalogName:"); - if (this.catalogName == null) { - sb.append("null"); - } else { - sb.append(this.catalogName); - } - first = false; - } - if (isSetSchemaName()) { - if (!first) sb.append(", "); - sb.append("schemaName:"); - if (this.schemaName == null) { - sb.append("null"); - } else { - sb.append(this.schemaName); - } - first = false; - } - if (isSetTableName()) { - if (!first) sb.append(", "); - sb.append("tableName:"); - if (this.tableName == null) { - sb.append("null"); - } else { - sb.append(this.tableName); - } - first = false; - } - if (isSetColumnName()) { - if (!first) sb.append(", "); - sb.append("columnName:"); - if (this.columnName == null) { - sb.append("null"); - } else { - sb.append(this.columnName); - } - first = false; - } - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetSessionHandle()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'sessionHandle' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (sessionHandle != null) { - sessionHandle.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TGetColumnsReqStandardSchemeFactory implements SchemeFactory { - public TGetColumnsReqStandardScheme getScheme() { - return new TGetColumnsReqStandardScheme(); - } - } - - private static class TGetColumnsReqStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TGetColumnsReq struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // SESSION_HANDLE - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.sessionHandle = new TSessionHandle(); - struct.sessionHandle.read(iprot); - struct.setSessionHandleIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // CATALOG_NAME - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.catalogName = iprot.readString(); - struct.setCatalogNameIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 3: // SCHEMA_NAME - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.schemaName = iprot.readString(); - struct.setSchemaNameIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 4: // TABLE_NAME - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.tableName = iprot.readString(); - struct.setTableNameIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 5: // COLUMN_NAME - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.columnName = iprot.readString(); - struct.setColumnNameIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TGetColumnsReq struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.sessionHandle != null) { - oprot.writeFieldBegin(SESSION_HANDLE_FIELD_DESC); - struct.sessionHandle.write(oprot); - oprot.writeFieldEnd(); - } - if (struct.catalogName != null) { - if (struct.isSetCatalogName()) { - oprot.writeFieldBegin(CATALOG_NAME_FIELD_DESC); - oprot.writeString(struct.catalogName); - oprot.writeFieldEnd(); - } - } - if (struct.schemaName != null) { - if (struct.isSetSchemaName()) { - oprot.writeFieldBegin(SCHEMA_NAME_FIELD_DESC); - oprot.writeString(struct.schemaName); - oprot.writeFieldEnd(); - } - } - if (struct.tableName != null) { - if (struct.isSetTableName()) { - oprot.writeFieldBegin(TABLE_NAME_FIELD_DESC); - oprot.writeString(struct.tableName); - oprot.writeFieldEnd(); - } - } - if (struct.columnName != null) { - if (struct.isSetColumnName()) { - oprot.writeFieldBegin(COLUMN_NAME_FIELD_DESC); - oprot.writeString(struct.columnName); - oprot.writeFieldEnd(); - } - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TGetColumnsReqTupleSchemeFactory implements SchemeFactory { - public TGetColumnsReqTupleScheme getScheme() { - return new TGetColumnsReqTupleScheme(); - } - } - - private static class TGetColumnsReqTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TGetColumnsReq struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.sessionHandle.write(oprot); - BitSet optionals = new BitSet(); - if (struct.isSetCatalogName()) { - optionals.set(0); - } - if (struct.isSetSchemaName()) { - optionals.set(1); - } - if (struct.isSetTableName()) { - optionals.set(2); - } - if (struct.isSetColumnName()) { - optionals.set(3); - } - oprot.writeBitSet(optionals, 4); - if (struct.isSetCatalogName()) { - oprot.writeString(struct.catalogName); - } - if (struct.isSetSchemaName()) { - oprot.writeString(struct.schemaName); - } - if (struct.isSetTableName()) { - oprot.writeString(struct.tableName); - } - if (struct.isSetColumnName()) { - oprot.writeString(struct.columnName); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TGetColumnsReq struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.sessionHandle = new TSessionHandle(); - struct.sessionHandle.read(iprot); - struct.setSessionHandleIsSet(true); - BitSet incoming = iprot.readBitSet(4); - if (incoming.get(0)) { - struct.catalogName = iprot.readString(); - struct.setCatalogNameIsSet(true); - } - if (incoming.get(1)) { - struct.schemaName = iprot.readString(); - struct.setSchemaNameIsSet(true); - } - if (incoming.get(2)) { - struct.tableName = iprot.readString(); - struct.setTableNameIsSet(true); - } - if (incoming.get(3)) { - struct.columnName = iprot.readString(); - struct.setColumnNameIsSet(true); - } - } - } - -} - diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TGetColumnsResp.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TGetColumnsResp.java deleted file mode 100644 index d6cf1be6d304b..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TGetColumnsResp.java +++ /dev/null @@ -1,505 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TGetColumnsResp implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetColumnsResp"); - - private static final org.apache.thrift.protocol.TField STATUS_FIELD_DESC = new org.apache.thrift.protocol.TField("status", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField OPERATION_HANDLE_FIELD_DESC = new org.apache.thrift.protocol.TField("operationHandle", org.apache.thrift.protocol.TType.STRUCT, (short)2); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TGetColumnsRespStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TGetColumnsRespTupleSchemeFactory()); - } - - private TStatus status; // required - private TOperationHandle operationHandle; // optional - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - STATUS((short)1, "status"), - OPERATION_HANDLE((short)2, "operationHandle"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // STATUS - return STATUS; - case 2: // OPERATION_HANDLE - return OPERATION_HANDLE; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private _Fields optionals[] = {_Fields.OPERATION_HANDLE}; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.STATUS, new org.apache.thrift.meta_data.FieldMetaData("status", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TStatus.class))); - tmpMap.put(_Fields.OPERATION_HANDLE, new org.apache.thrift.meta_data.FieldMetaData("operationHandle", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TOperationHandle.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TGetColumnsResp.class, metaDataMap); - } - - public TGetColumnsResp() { - } - - public TGetColumnsResp( - TStatus status) - { - this(); - this.status = status; - } - - /** - * Performs a deep copy on other. - */ - public TGetColumnsResp(TGetColumnsResp other) { - if (other.isSetStatus()) { - this.status = new TStatus(other.status); - } - if (other.isSetOperationHandle()) { - this.operationHandle = new TOperationHandle(other.operationHandle); - } - } - - public TGetColumnsResp deepCopy() { - return new TGetColumnsResp(this); - } - - @Override - public void clear() { - this.status = null; - this.operationHandle = null; - } - - public TStatus getStatus() { - return this.status; - } - - public void setStatus(TStatus status) { - this.status = status; - } - - public void unsetStatus() { - this.status = null; - } - - /** Returns true if field status is set (has been assigned a value) and false otherwise */ - public boolean isSetStatus() { - return this.status != null; - } - - public void setStatusIsSet(boolean value) { - if (!value) { - this.status = null; - } - } - - public TOperationHandle getOperationHandle() { - return this.operationHandle; - } - - public void setOperationHandle(TOperationHandle operationHandle) { - this.operationHandle = operationHandle; - } - - public void unsetOperationHandle() { - this.operationHandle = null; - } - - /** Returns true if field operationHandle is set (has been assigned a value) and false otherwise */ - public boolean isSetOperationHandle() { - return this.operationHandle != null; - } - - public void setOperationHandleIsSet(boolean value) { - if (!value) { - this.operationHandle = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case STATUS: - if (value == null) { - unsetStatus(); - } else { - setStatus((TStatus)value); - } - break; - - case OPERATION_HANDLE: - if (value == null) { - unsetOperationHandle(); - } else { - setOperationHandle((TOperationHandle)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case STATUS: - return getStatus(); - - case OPERATION_HANDLE: - return getOperationHandle(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case STATUS: - return isSetStatus(); - case OPERATION_HANDLE: - return isSetOperationHandle(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TGetColumnsResp) - return this.equals((TGetColumnsResp)that); - return false; - } - - public boolean equals(TGetColumnsResp that) { - if (that == null) - return false; - - boolean this_present_status = true && this.isSetStatus(); - boolean that_present_status = true && that.isSetStatus(); - if (this_present_status || that_present_status) { - if (!(this_present_status && that_present_status)) - return false; - if (!this.status.equals(that.status)) - return false; - } - - boolean this_present_operationHandle = true && this.isSetOperationHandle(); - boolean that_present_operationHandle = true && that.isSetOperationHandle(); - if (this_present_operationHandle || that_present_operationHandle) { - if (!(this_present_operationHandle && that_present_operationHandle)) - return false; - if (!this.operationHandle.equals(that.operationHandle)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_status = true && (isSetStatus()); - builder.append(present_status); - if (present_status) - builder.append(status); - - boolean present_operationHandle = true && (isSetOperationHandle()); - builder.append(present_operationHandle); - if (present_operationHandle) - builder.append(operationHandle); - - return builder.toHashCode(); - } - - public int compareTo(TGetColumnsResp other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - TGetColumnsResp typedOther = (TGetColumnsResp)other; - - lastComparison = Boolean.valueOf(isSetStatus()).compareTo(typedOther.isSetStatus()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetStatus()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.status, typedOther.status); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetOperationHandle()).compareTo(typedOther.isSetOperationHandle()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetOperationHandle()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.operationHandle, typedOther.operationHandle); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TGetColumnsResp("); - boolean first = true; - - sb.append("status:"); - if (this.status == null) { - sb.append("null"); - } else { - sb.append(this.status); - } - first = false; - if (isSetOperationHandle()) { - if (!first) sb.append(", "); - sb.append("operationHandle:"); - if (this.operationHandle == null) { - sb.append("null"); - } else { - sb.append(this.operationHandle); - } - first = false; - } - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetStatus()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'status' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (status != null) { - status.validate(); - } - if (operationHandle != null) { - operationHandle.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TGetColumnsRespStandardSchemeFactory implements SchemeFactory { - public TGetColumnsRespStandardScheme getScheme() { - return new TGetColumnsRespStandardScheme(); - } - } - - private static class TGetColumnsRespStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TGetColumnsResp struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // STATUS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // OPERATION_HANDLE - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.operationHandle = new TOperationHandle(); - struct.operationHandle.read(iprot); - struct.setOperationHandleIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TGetColumnsResp struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.status != null) { - oprot.writeFieldBegin(STATUS_FIELD_DESC); - struct.status.write(oprot); - oprot.writeFieldEnd(); - } - if (struct.operationHandle != null) { - if (struct.isSetOperationHandle()) { - oprot.writeFieldBegin(OPERATION_HANDLE_FIELD_DESC); - struct.operationHandle.write(oprot); - oprot.writeFieldEnd(); - } - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TGetColumnsRespTupleSchemeFactory implements SchemeFactory { - public TGetColumnsRespTupleScheme getScheme() { - return new TGetColumnsRespTupleScheme(); - } - } - - private static class TGetColumnsRespTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TGetColumnsResp struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.status.write(oprot); - BitSet optionals = new BitSet(); - if (struct.isSetOperationHandle()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetOperationHandle()) { - struct.operationHandle.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TGetColumnsResp struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.operationHandle = new TOperationHandle(); - struct.operationHandle.read(iprot); - struct.setOperationHandleIsSet(true); - } - } - } - -} - diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TGetDelegationTokenReq.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TGetDelegationTokenReq.java deleted file mode 100644 index 6c6bb00e43e43..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TGetDelegationTokenReq.java +++ /dev/null @@ -1,592 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TGetDelegationTokenReq implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetDelegationTokenReq"); - - private static final org.apache.thrift.protocol.TField SESSION_HANDLE_FIELD_DESC = new org.apache.thrift.protocol.TField("sessionHandle", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField OWNER_FIELD_DESC = new org.apache.thrift.protocol.TField("owner", org.apache.thrift.protocol.TType.STRING, (short)2); - private static final org.apache.thrift.protocol.TField RENEWER_FIELD_DESC = new org.apache.thrift.protocol.TField("renewer", org.apache.thrift.protocol.TType.STRING, (short)3); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TGetDelegationTokenReqStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TGetDelegationTokenReqTupleSchemeFactory()); - } - - private TSessionHandle sessionHandle; // required - private String owner; // required - private String renewer; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SESSION_HANDLE((short)1, "sessionHandle"), - OWNER((short)2, "owner"), - RENEWER((short)3, "renewer"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // SESSION_HANDLE - return SESSION_HANDLE; - case 2: // OWNER - return OWNER; - case 3: // RENEWER - return RENEWER; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SESSION_HANDLE, new org.apache.thrift.meta_data.FieldMetaData("sessionHandle", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TSessionHandle.class))); - tmpMap.put(_Fields.OWNER, new org.apache.thrift.meta_data.FieldMetaData("owner", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.RENEWER, new org.apache.thrift.meta_data.FieldMetaData("renewer", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TGetDelegationTokenReq.class, metaDataMap); - } - - public TGetDelegationTokenReq() { - } - - public TGetDelegationTokenReq( - TSessionHandle sessionHandle, - String owner, - String renewer) - { - this(); - this.sessionHandle = sessionHandle; - this.owner = owner; - this.renewer = renewer; - } - - /** - * Performs a deep copy on other. - */ - public TGetDelegationTokenReq(TGetDelegationTokenReq other) { - if (other.isSetSessionHandle()) { - this.sessionHandle = new TSessionHandle(other.sessionHandle); - } - if (other.isSetOwner()) { - this.owner = other.owner; - } - if (other.isSetRenewer()) { - this.renewer = other.renewer; - } - } - - public TGetDelegationTokenReq deepCopy() { - return new TGetDelegationTokenReq(this); - } - - @Override - public void clear() { - this.sessionHandle = null; - this.owner = null; - this.renewer = null; - } - - public TSessionHandle getSessionHandle() { - return this.sessionHandle; - } - - public void setSessionHandle(TSessionHandle sessionHandle) { - this.sessionHandle = sessionHandle; - } - - public void unsetSessionHandle() { - this.sessionHandle = null; - } - - /** Returns true if field sessionHandle is set (has been assigned a value) and false otherwise */ - public boolean isSetSessionHandle() { - return this.sessionHandle != null; - } - - public void setSessionHandleIsSet(boolean value) { - if (!value) { - this.sessionHandle = null; - } - } - - public String getOwner() { - return this.owner; - } - - public void setOwner(String owner) { - this.owner = owner; - } - - public void unsetOwner() { - this.owner = null; - } - - /** Returns true if field owner is set (has been assigned a value) and false otherwise */ - public boolean isSetOwner() { - return this.owner != null; - } - - public void setOwnerIsSet(boolean value) { - if (!value) { - this.owner = null; - } - } - - public String getRenewer() { - return this.renewer; - } - - public void setRenewer(String renewer) { - this.renewer = renewer; - } - - public void unsetRenewer() { - this.renewer = null; - } - - /** Returns true if field renewer is set (has been assigned a value) and false otherwise */ - public boolean isSetRenewer() { - return this.renewer != null; - } - - public void setRenewerIsSet(boolean value) { - if (!value) { - this.renewer = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case SESSION_HANDLE: - if (value == null) { - unsetSessionHandle(); - } else { - setSessionHandle((TSessionHandle)value); - } - break; - - case OWNER: - if (value == null) { - unsetOwner(); - } else { - setOwner((String)value); - } - break; - - case RENEWER: - if (value == null) { - unsetRenewer(); - } else { - setRenewer((String)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case SESSION_HANDLE: - return getSessionHandle(); - - case OWNER: - return getOwner(); - - case RENEWER: - return getRenewer(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case SESSION_HANDLE: - return isSetSessionHandle(); - case OWNER: - return isSetOwner(); - case RENEWER: - return isSetRenewer(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TGetDelegationTokenReq) - return this.equals((TGetDelegationTokenReq)that); - return false; - } - - public boolean equals(TGetDelegationTokenReq that) { - if (that == null) - return false; - - boolean this_present_sessionHandle = true && this.isSetSessionHandle(); - boolean that_present_sessionHandle = true && that.isSetSessionHandle(); - if (this_present_sessionHandle || that_present_sessionHandle) { - if (!(this_present_sessionHandle && that_present_sessionHandle)) - return false; - if (!this.sessionHandle.equals(that.sessionHandle)) - return false; - } - - boolean this_present_owner = true && this.isSetOwner(); - boolean that_present_owner = true && that.isSetOwner(); - if (this_present_owner || that_present_owner) { - if (!(this_present_owner && that_present_owner)) - return false; - if (!this.owner.equals(that.owner)) - return false; - } - - boolean this_present_renewer = true && this.isSetRenewer(); - boolean that_present_renewer = true && that.isSetRenewer(); - if (this_present_renewer || that_present_renewer) { - if (!(this_present_renewer && that_present_renewer)) - return false; - if (!this.renewer.equals(that.renewer)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_sessionHandle = true && (isSetSessionHandle()); - builder.append(present_sessionHandle); - if (present_sessionHandle) - builder.append(sessionHandle); - - boolean present_owner = true && (isSetOwner()); - builder.append(present_owner); - if (present_owner) - builder.append(owner); - - boolean present_renewer = true && (isSetRenewer()); - builder.append(present_renewer); - if (present_renewer) - builder.append(renewer); - - return builder.toHashCode(); - } - - public int compareTo(TGetDelegationTokenReq other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - TGetDelegationTokenReq typedOther = (TGetDelegationTokenReq)other; - - lastComparison = Boolean.valueOf(isSetSessionHandle()).compareTo(typedOther.isSetSessionHandle()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSessionHandle()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.sessionHandle, typedOther.sessionHandle); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetOwner()).compareTo(typedOther.isSetOwner()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetOwner()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.owner, typedOther.owner); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetRenewer()).compareTo(typedOther.isSetRenewer()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetRenewer()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.renewer, typedOther.renewer); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TGetDelegationTokenReq("); - boolean first = true; - - sb.append("sessionHandle:"); - if (this.sessionHandle == null) { - sb.append("null"); - } else { - sb.append(this.sessionHandle); - } - first = false; - if (!first) sb.append(", "); - sb.append("owner:"); - if (this.owner == null) { - sb.append("null"); - } else { - sb.append(this.owner); - } - first = false; - if (!first) sb.append(", "); - sb.append("renewer:"); - if (this.renewer == null) { - sb.append("null"); - } else { - sb.append(this.renewer); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetSessionHandle()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'sessionHandle' is unset! Struct:" + toString()); - } - - if (!isSetOwner()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'owner' is unset! Struct:" + toString()); - } - - if (!isSetRenewer()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'renewer' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (sessionHandle != null) { - sessionHandle.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TGetDelegationTokenReqStandardSchemeFactory implements SchemeFactory { - public TGetDelegationTokenReqStandardScheme getScheme() { - return new TGetDelegationTokenReqStandardScheme(); - } - } - - private static class TGetDelegationTokenReqStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TGetDelegationTokenReq struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // SESSION_HANDLE - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.sessionHandle = new TSessionHandle(); - struct.sessionHandle.read(iprot); - struct.setSessionHandleIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // OWNER - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.owner = iprot.readString(); - struct.setOwnerIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 3: // RENEWER - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.renewer = iprot.readString(); - struct.setRenewerIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TGetDelegationTokenReq struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.sessionHandle != null) { - oprot.writeFieldBegin(SESSION_HANDLE_FIELD_DESC); - struct.sessionHandle.write(oprot); - oprot.writeFieldEnd(); - } - if (struct.owner != null) { - oprot.writeFieldBegin(OWNER_FIELD_DESC); - oprot.writeString(struct.owner); - oprot.writeFieldEnd(); - } - if (struct.renewer != null) { - oprot.writeFieldBegin(RENEWER_FIELD_DESC); - oprot.writeString(struct.renewer); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TGetDelegationTokenReqTupleSchemeFactory implements SchemeFactory { - public TGetDelegationTokenReqTupleScheme getScheme() { - return new TGetDelegationTokenReqTupleScheme(); - } - } - - private static class TGetDelegationTokenReqTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TGetDelegationTokenReq struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.sessionHandle.write(oprot); - oprot.writeString(struct.owner); - oprot.writeString(struct.renewer); - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TGetDelegationTokenReq struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.sessionHandle = new TSessionHandle(); - struct.sessionHandle.read(iprot); - struct.setSessionHandleIsSet(true); - struct.owner = iprot.readString(); - struct.setOwnerIsSet(true); - struct.renewer = iprot.readString(); - struct.setRenewerIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TGetDelegationTokenResp.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TGetDelegationTokenResp.java deleted file mode 100644 index d14c5e029a35d..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TGetDelegationTokenResp.java +++ /dev/null @@ -1,500 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TGetDelegationTokenResp implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetDelegationTokenResp"); - - private static final org.apache.thrift.protocol.TField STATUS_FIELD_DESC = new org.apache.thrift.protocol.TField("status", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField DELEGATION_TOKEN_FIELD_DESC = new org.apache.thrift.protocol.TField("delegationToken", org.apache.thrift.protocol.TType.STRING, (short)2); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TGetDelegationTokenRespStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TGetDelegationTokenRespTupleSchemeFactory()); - } - - private TStatus status; // required - private String delegationToken; // optional - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - STATUS((short)1, "status"), - DELEGATION_TOKEN((short)2, "delegationToken"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // STATUS - return STATUS; - case 2: // DELEGATION_TOKEN - return DELEGATION_TOKEN; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private _Fields optionals[] = {_Fields.DELEGATION_TOKEN}; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.STATUS, new org.apache.thrift.meta_data.FieldMetaData("status", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TStatus.class))); - tmpMap.put(_Fields.DELEGATION_TOKEN, new org.apache.thrift.meta_data.FieldMetaData("delegationToken", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TGetDelegationTokenResp.class, metaDataMap); - } - - public TGetDelegationTokenResp() { - } - - public TGetDelegationTokenResp( - TStatus status) - { - this(); - this.status = status; - } - - /** - * Performs a deep copy on other. - */ - public TGetDelegationTokenResp(TGetDelegationTokenResp other) { - if (other.isSetStatus()) { - this.status = new TStatus(other.status); - } - if (other.isSetDelegationToken()) { - this.delegationToken = other.delegationToken; - } - } - - public TGetDelegationTokenResp deepCopy() { - return new TGetDelegationTokenResp(this); - } - - @Override - public void clear() { - this.status = null; - this.delegationToken = null; - } - - public TStatus getStatus() { - return this.status; - } - - public void setStatus(TStatus status) { - this.status = status; - } - - public void unsetStatus() { - this.status = null; - } - - /** Returns true if field status is set (has been assigned a value) and false otherwise */ - public boolean isSetStatus() { - return this.status != null; - } - - public void setStatusIsSet(boolean value) { - if (!value) { - this.status = null; - } - } - - public String getDelegationToken() { - return this.delegationToken; - } - - public void setDelegationToken(String delegationToken) { - this.delegationToken = delegationToken; - } - - public void unsetDelegationToken() { - this.delegationToken = null; - } - - /** Returns true if field delegationToken is set (has been assigned a value) and false otherwise */ - public boolean isSetDelegationToken() { - return this.delegationToken != null; - } - - public void setDelegationTokenIsSet(boolean value) { - if (!value) { - this.delegationToken = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case STATUS: - if (value == null) { - unsetStatus(); - } else { - setStatus((TStatus)value); - } - break; - - case DELEGATION_TOKEN: - if (value == null) { - unsetDelegationToken(); - } else { - setDelegationToken((String)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case STATUS: - return getStatus(); - - case DELEGATION_TOKEN: - return getDelegationToken(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case STATUS: - return isSetStatus(); - case DELEGATION_TOKEN: - return isSetDelegationToken(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TGetDelegationTokenResp) - return this.equals((TGetDelegationTokenResp)that); - return false; - } - - public boolean equals(TGetDelegationTokenResp that) { - if (that == null) - return false; - - boolean this_present_status = true && this.isSetStatus(); - boolean that_present_status = true && that.isSetStatus(); - if (this_present_status || that_present_status) { - if (!(this_present_status && that_present_status)) - return false; - if (!this.status.equals(that.status)) - return false; - } - - boolean this_present_delegationToken = true && this.isSetDelegationToken(); - boolean that_present_delegationToken = true && that.isSetDelegationToken(); - if (this_present_delegationToken || that_present_delegationToken) { - if (!(this_present_delegationToken && that_present_delegationToken)) - return false; - if (!this.delegationToken.equals(that.delegationToken)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_status = true && (isSetStatus()); - builder.append(present_status); - if (present_status) - builder.append(status); - - boolean present_delegationToken = true && (isSetDelegationToken()); - builder.append(present_delegationToken); - if (present_delegationToken) - builder.append(delegationToken); - - return builder.toHashCode(); - } - - public int compareTo(TGetDelegationTokenResp other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - TGetDelegationTokenResp typedOther = (TGetDelegationTokenResp)other; - - lastComparison = Boolean.valueOf(isSetStatus()).compareTo(typedOther.isSetStatus()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetStatus()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.status, typedOther.status); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetDelegationToken()).compareTo(typedOther.isSetDelegationToken()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetDelegationToken()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.delegationToken, typedOther.delegationToken); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TGetDelegationTokenResp("); - boolean first = true; - - sb.append("status:"); - if (this.status == null) { - sb.append("null"); - } else { - sb.append(this.status); - } - first = false; - if (isSetDelegationToken()) { - if (!first) sb.append(", "); - sb.append("delegationToken:"); - if (this.delegationToken == null) { - sb.append("null"); - } else { - sb.append(this.delegationToken); - } - first = false; - } - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetStatus()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'status' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (status != null) { - status.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TGetDelegationTokenRespStandardSchemeFactory implements SchemeFactory { - public TGetDelegationTokenRespStandardScheme getScheme() { - return new TGetDelegationTokenRespStandardScheme(); - } - } - - private static class TGetDelegationTokenRespStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TGetDelegationTokenResp struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // STATUS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // DELEGATION_TOKEN - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.delegationToken = iprot.readString(); - struct.setDelegationTokenIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TGetDelegationTokenResp struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.status != null) { - oprot.writeFieldBegin(STATUS_FIELD_DESC); - struct.status.write(oprot); - oprot.writeFieldEnd(); - } - if (struct.delegationToken != null) { - if (struct.isSetDelegationToken()) { - oprot.writeFieldBegin(DELEGATION_TOKEN_FIELD_DESC); - oprot.writeString(struct.delegationToken); - oprot.writeFieldEnd(); - } - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TGetDelegationTokenRespTupleSchemeFactory implements SchemeFactory { - public TGetDelegationTokenRespTupleScheme getScheme() { - return new TGetDelegationTokenRespTupleScheme(); - } - } - - private static class TGetDelegationTokenRespTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TGetDelegationTokenResp struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.status.write(oprot); - BitSet optionals = new BitSet(); - if (struct.isSetDelegationToken()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetDelegationToken()) { - oprot.writeString(struct.delegationToken); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TGetDelegationTokenResp struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.delegationToken = iprot.readString(); - struct.setDelegationTokenIsSet(true); - } - } - } - -} - diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TGetFunctionsReq.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TGetFunctionsReq.java deleted file mode 100644 index ff45ee0386cb9..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TGetFunctionsReq.java +++ /dev/null @@ -1,707 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TGetFunctionsReq implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetFunctionsReq"); - - private static final org.apache.thrift.protocol.TField SESSION_HANDLE_FIELD_DESC = new org.apache.thrift.protocol.TField("sessionHandle", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField CATALOG_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catalogName", org.apache.thrift.protocol.TType.STRING, (short)2); - private static final org.apache.thrift.protocol.TField SCHEMA_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("schemaName", org.apache.thrift.protocol.TType.STRING, (short)3); - private static final org.apache.thrift.protocol.TField FUNCTION_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("functionName", org.apache.thrift.protocol.TType.STRING, (short)4); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TGetFunctionsReqStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TGetFunctionsReqTupleSchemeFactory()); - } - - private TSessionHandle sessionHandle; // required - private String catalogName; // optional - private String schemaName; // optional - private String functionName; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SESSION_HANDLE((short)1, "sessionHandle"), - CATALOG_NAME((short)2, "catalogName"), - SCHEMA_NAME((short)3, "schemaName"), - FUNCTION_NAME((short)4, "functionName"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // SESSION_HANDLE - return SESSION_HANDLE; - case 2: // CATALOG_NAME - return CATALOG_NAME; - case 3: // SCHEMA_NAME - return SCHEMA_NAME; - case 4: // FUNCTION_NAME - return FUNCTION_NAME; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private _Fields optionals[] = {_Fields.CATALOG_NAME,_Fields.SCHEMA_NAME}; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SESSION_HANDLE, new org.apache.thrift.meta_data.FieldMetaData("sessionHandle", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TSessionHandle.class))); - tmpMap.put(_Fields.CATALOG_NAME, new org.apache.thrift.meta_data.FieldMetaData("catalogName", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "TIdentifier"))); - tmpMap.put(_Fields.SCHEMA_NAME, new org.apache.thrift.meta_data.FieldMetaData("schemaName", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "TPatternOrIdentifier"))); - tmpMap.put(_Fields.FUNCTION_NAME, new org.apache.thrift.meta_data.FieldMetaData("functionName", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "TPatternOrIdentifier"))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TGetFunctionsReq.class, metaDataMap); - } - - public TGetFunctionsReq() { - } - - public TGetFunctionsReq( - TSessionHandle sessionHandle, - String functionName) - { - this(); - this.sessionHandle = sessionHandle; - this.functionName = functionName; - } - - /** - * Performs a deep copy on other. - */ - public TGetFunctionsReq(TGetFunctionsReq other) { - if (other.isSetSessionHandle()) { - this.sessionHandle = new TSessionHandle(other.sessionHandle); - } - if (other.isSetCatalogName()) { - this.catalogName = other.catalogName; - } - if (other.isSetSchemaName()) { - this.schemaName = other.schemaName; - } - if (other.isSetFunctionName()) { - this.functionName = other.functionName; - } - } - - public TGetFunctionsReq deepCopy() { - return new TGetFunctionsReq(this); - } - - @Override - public void clear() { - this.sessionHandle = null; - this.catalogName = null; - this.schemaName = null; - this.functionName = null; - } - - public TSessionHandle getSessionHandle() { - return this.sessionHandle; - } - - public void setSessionHandle(TSessionHandle sessionHandle) { - this.sessionHandle = sessionHandle; - } - - public void unsetSessionHandle() { - this.sessionHandle = null; - } - - /** Returns true if field sessionHandle is set (has been assigned a value) and false otherwise */ - public boolean isSetSessionHandle() { - return this.sessionHandle != null; - } - - public void setSessionHandleIsSet(boolean value) { - if (!value) { - this.sessionHandle = null; - } - } - - public String getCatalogName() { - return this.catalogName; - } - - public void setCatalogName(String catalogName) { - this.catalogName = catalogName; - } - - public void unsetCatalogName() { - this.catalogName = null; - } - - /** Returns true if field catalogName is set (has been assigned a value) and false otherwise */ - public boolean isSetCatalogName() { - return this.catalogName != null; - } - - public void setCatalogNameIsSet(boolean value) { - if (!value) { - this.catalogName = null; - } - } - - public String getSchemaName() { - return this.schemaName; - } - - public void setSchemaName(String schemaName) { - this.schemaName = schemaName; - } - - public void unsetSchemaName() { - this.schemaName = null; - } - - /** Returns true if field schemaName is set (has been assigned a value) and false otherwise */ - public boolean isSetSchemaName() { - return this.schemaName != null; - } - - public void setSchemaNameIsSet(boolean value) { - if (!value) { - this.schemaName = null; - } - } - - public String getFunctionName() { - return this.functionName; - } - - public void setFunctionName(String functionName) { - this.functionName = functionName; - } - - public void unsetFunctionName() { - this.functionName = null; - } - - /** Returns true if field functionName is set (has been assigned a value) and false otherwise */ - public boolean isSetFunctionName() { - return this.functionName != null; - } - - public void setFunctionNameIsSet(boolean value) { - if (!value) { - this.functionName = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case SESSION_HANDLE: - if (value == null) { - unsetSessionHandle(); - } else { - setSessionHandle((TSessionHandle)value); - } - break; - - case CATALOG_NAME: - if (value == null) { - unsetCatalogName(); - } else { - setCatalogName((String)value); - } - break; - - case SCHEMA_NAME: - if (value == null) { - unsetSchemaName(); - } else { - setSchemaName((String)value); - } - break; - - case FUNCTION_NAME: - if (value == null) { - unsetFunctionName(); - } else { - setFunctionName((String)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case SESSION_HANDLE: - return getSessionHandle(); - - case CATALOG_NAME: - return getCatalogName(); - - case SCHEMA_NAME: - return getSchemaName(); - - case FUNCTION_NAME: - return getFunctionName(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case SESSION_HANDLE: - return isSetSessionHandle(); - case CATALOG_NAME: - return isSetCatalogName(); - case SCHEMA_NAME: - return isSetSchemaName(); - case FUNCTION_NAME: - return isSetFunctionName(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TGetFunctionsReq) - return this.equals((TGetFunctionsReq)that); - return false; - } - - public boolean equals(TGetFunctionsReq that) { - if (that == null) - return false; - - boolean this_present_sessionHandle = true && this.isSetSessionHandle(); - boolean that_present_sessionHandle = true && that.isSetSessionHandle(); - if (this_present_sessionHandle || that_present_sessionHandle) { - if (!(this_present_sessionHandle && that_present_sessionHandle)) - return false; - if (!this.sessionHandle.equals(that.sessionHandle)) - return false; - } - - boolean this_present_catalogName = true && this.isSetCatalogName(); - boolean that_present_catalogName = true && that.isSetCatalogName(); - if (this_present_catalogName || that_present_catalogName) { - if (!(this_present_catalogName && that_present_catalogName)) - return false; - if (!this.catalogName.equals(that.catalogName)) - return false; - } - - boolean this_present_schemaName = true && this.isSetSchemaName(); - boolean that_present_schemaName = true && that.isSetSchemaName(); - if (this_present_schemaName || that_present_schemaName) { - if (!(this_present_schemaName && that_present_schemaName)) - return false; - if (!this.schemaName.equals(that.schemaName)) - return false; - } - - boolean this_present_functionName = true && this.isSetFunctionName(); - boolean that_present_functionName = true && that.isSetFunctionName(); - if (this_present_functionName || that_present_functionName) { - if (!(this_present_functionName && that_present_functionName)) - return false; - if (!this.functionName.equals(that.functionName)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_sessionHandle = true && (isSetSessionHandle()); - builder.append(present_sessionHandle); - if (present_sessionHandle) - builder.append(sessionHandle); - - boolean present_catalogName = true && (isSetCatalogName()); - builder.append(present_catalogName); - if (present_catalogName) - builder.append(catalogName); - - boolean present_schemaName = true && (isSetSchemaName()); - builder.append(present_schemaName); - if (present_schemaName) - builder.append(schemaName); - - boolean present_functionName = true && (isSetFunctionName()); - builder.append(present_functionName); - if (present_functionName) - builder.append(functionName); - - return builder.toHashCode(); - } - - public int compareTo(TGetFunctionsReq other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - TGetFunctionsReq typedOther = (TGetFunctionsReq)other; - - lastComparison = Boolean.valueOf(isSetSessionHandle()).compareTo(typedOther.isSetSessionHandle()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSessionHandle()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.sessionHandle, typedOther.sessionHandle); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetCatalogName()).compareTo(typedOther.isSetCatalogName()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetCatalogName()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catalogName, typedOther.catalogName); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetSchemaName()).compareTo(typedOther.isSetSchemaName()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSchemaName()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.schemaName, typedOther.schemaName); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetFunctionName()).compareTo(typedOther.isSetFunctionName()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetFunctionName()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.functionName, typedOther.functionName); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TGetFunctionsReq("); - boolean first = true; - - sb.append("sessionHandle:"); - if (this.sessionHandle == null) { - sb.append("null"); - } else { - sb.append(this.sessionHandle); - } - first = false; - if (isSetCatalogName()) { - if (!first) sb.append(", "); - sb.append("catalogName:"); - if (this.catalogName == null) { - sb.append("null"); - } else { - sb.append(this.catalogName); - } - first = false; - } - if (isSetSchemaName()) { - if (!first) sb.append(", "); - sb.append("schemaName:"); - if (this.schemaName == null) { - sb.append("null"); - } else { - sb.append(this.schemaName); - } - first = false; - } - if (!first) sb.append(", "); - sb.append("functionName:"); - if (this.functionName == null) { - sb.append("null"); - } else { - sb.append(this.functionName); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetSessionHandle()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'sessionHandle' is unset! Struct:" + toString()); - } - - if (!isSetFunctionName()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'functionName' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (sessionHandle != null) { - sessionHandle.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TGetFunctionsReqStandardSchemeFactory implements SchemeFactory { - public TGetFunctionsReqStandardScheme getScheme() { - return new TGetFunctionsReqStandardScheme(); - } - } - - private static class TGetFunctionsReqStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TGetFunctionsReq struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // SESSION_HANDLE - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.sessionHandle = new TSessionHandle(); - struct.sessionHandle.read(iprot); - struct.setSessionHandleIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // CATALOG_NAME - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.catalogName = iprot.readString(); - struct.setCatalogNameIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 3: // SCHEMA_NAME - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.schemaName = iprot.readString(); - struct.setSchemaNameIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 4: // FUNCTION_NAME - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.functionName = iprot.readString(); - struct.setFunctionNameIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TGetFunctionsReq struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.sessionHandle != null) { - oprot.writeFieldBegin(SESSION_HANDLE_FIELD_DESC); - struct.sessionHandle.write(oprot); - oprot.writeFieldEnd(); - } - if (struct.catalogName != null) { - if (struct.isSetCatalogName()) { - oprot.writeFieldBegin(CATALOG_NAME_FIELD_DESC); - oprot.writeString(struct.catalogName); - oprot.writeFieldEnd(); - } - } - if (struct.schemaName != null) { - if (struct.isSetSchemaName()) { - oprot.writeFieldBegin(SCHEMA_NAME_FIELD_DESC); - oprot.writeString(struct.schemaName); - oprot.writeFieldEnd(); - } - } - if (struct.functionName != null) { - oprot.writeFieldBegin(FUNCTION_NAME_FIELD_DESC); - oprot.writeString(struct.functionName); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TGetFunctionsReqTupleSchemeFactory implements SchemeFactory { - public TGetFunctionsReqTupleScheme getScheme() { - return new TGetFunctionsReqTupleScheme(); - } - } - - private static class TGetFunctionsReqTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TGetFunctionsReq struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.sessionHandle.write(oprot); - oprot.writeString(struct.functionName); - BitSet optionals = new BitSet(); - if (struct.isSetCatalogName()) { - optionals.set(0); - } - if (struct.isSetSchemaName()) { - optionals.set(1); - } - oprot.writeBitSet(optionals, 2); - if (struct.isSetCatalogName()) { - oprot.writeString(struct.catalogName); - } - if (struct.isSetSchemaName()) { - oprot.writeString(struct.schemaName); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TGetFunctionsReq struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.sessionHandle = new TSessionHandle(); - struct.sessionHandle.read(iprot); - struct.setSessionHandleIsSet(true); - struct.functionName = iprot.readString(); - struct.setFunctionNameIsSet(true); - BitSet incoming = iprot.readBitSet(2); - if (incoming.get(0)) { - struct.catalogName = iprot.readString(); - struct.setCatalogNameIsSet(true); - } - if (incoming.get(1)) { - struct.schemaName = iprot.readString(); - struct.setSchemaNameIsSet(true); - } - } - } - -} - diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TGetFunctionsResp.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TGetFunctionsResp.java deleted file mode 100644 index 3adafdacb54ef..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TGetFunctionsResp.java +++ /dev/null @@ -1,505 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TGetFunctionsResp implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetFunctionsResp"); - - private static final org.apache.thrift.protocol.TField STATUS_FIELD_DESC = new org.apache.thrift.protocol.TField("status", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField OPERATION_HANDLE_FIELD_DESC = new org.apache.thrift.protocol.TField("operationHandle", org.apache.thrift.protocol.TType.STRUCT, (short)2); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TGetFunctionsRespStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TGetFunctionsRespTupleSchemeFactory()); - } - - private TStatus status; // required - private TOperationHandle operationHandle; // optional - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - STATUS((short)1, "status"), - OPERATION_HANDLE((short)2, "operationHandle"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // STATUS - return STATUS; - case 2: // OPERATION_HANDLE - return OPERATION_HANDLE; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private _Fields optionals[] = {_Fields.OPERATION_HANDLE}; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.STATUS, new org.apache.thrift.meta_data.FieldMetaData("status", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TStatus.class))); - tmpMap.put(_Fields.OPERATION_HANDLE, new org.apache.thrift.meta_data.FieldMetaData("operationHandle", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TOperationHandle.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TGetFunctionsResp.class, metaDataMap); - } - - public TGetFunctionsResp() { - } - - public TGetFunctionsResp( - TStatus status) - { - this(); - this.status = status; - } - - /** - * Performs a deep copy on other. - */ - public TGetFunctionsResp(TGetFunctionsResp other) { - if (other.isSetStatus()) { - this.status = new TStatus(other.status); - } - if (other.isSetOperationHandle()) { - this.operationHandle = new TOperationHandle(other.operationHandle); - } - } - - public TGetFunctionsResp deepCopy() { - return new TGetFunctionsResp(this); - } - - @Override - public void clear() { - this.status = null; - this.operationHandle = null; - } - - public TStatus getStatus() { - return this.status; - } - - public void setStatus(TStatus status) { - this.status = status; - } - - public void unsetStatus() { - this.status = null; - } - - /** Returns true if field status is set (has been assigned a value) and false otherwise */ - public boolean isSetStatus() { - return this.status != null; - } - - public void setStatusIsSet(boolean value) { - if (!value) { - this.status = null; - } - } - - public TOperationHandle getOperationHandle() { - return this.operationHandle; - } - - public void setOperationHandle(TOperationHandle operationHandle) { - this.operationHandle = operationHandle; - } - - public void unsetOperationHandle() { - this.operationHandle = null; - } - - /** Returns true if field operationHandle is set (has been assigned a value) and false otherwise */ - public boolean isSetOperationHandle() { - return this.operationHandle != null; - } - - public void setOperationHandleIsSet(boolean value) { - if (!value) { - this.operationHandle = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case STATUS: - if (value == null) { - unsetStatus(); - } else { - setStatus((TStatus)value); - } - break; - - case OPERATION_HANDLE: - if (value == null) { - unsetOperationHandle(); - } else { - setOperationHandle((TOperationHandle)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case STATUS: - return getStatus(); - - case OPERATION_HANDLE: - return getOperationHandle(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case STATUS: - return isSetStatus(); - case OPERATION_HANDLE: - return isSetOperationHandle(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TGetFunctionsResp) - return this.equals((TGetFunctionsResp)that); - return false; - } - - public boolean equals(TGetFunctionsResp that) { - if (that == null) - return false; - - boolean this_present_status = true && this.isSetStatus(); - boolean that_present_status = true && that.isSetStatus(); - if (this_present_status || that_present_status) { - if (!(this_present_status && that_present_status)) - return false; - if (!this.status.equals(that.status)) - return false; - } - - boolean this_present_operationHandle = true && this.isSetOperationHandle(); - boolean that_present_operationHandle = true && that.isSetOperationHandle(); - if (this_present_operationHandle || that_present_operationHandle) { - if (!(this_present_operationHandle && that_present_operationHandle)) - return false; - if (!this.operationHandle.equals(that.operationHandle)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_status = true && (isSetStatus()); - builder.append(present_status); - if (present_status) - builder.append(status); - - boolean present_operationHandle = true && (isSetOperationHandle()); - builder.append(present_operationHandle); - if (present_operationHandle) - builder.append(operationHandle); - - return builder.toHashCode(); - } - - public int compareTo(TGetFunctionsResp other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - TGetFunctionsResp typedOther = (TGetFunctionsResp)other; - - lastComparison = Boolean.valueOf(isSetStatus()).compareTo(typedOther.isSetStatus()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetStatus()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.status, typedOther.status); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetOperationHandle()).compareTo(typedOther.isSetOperationHandle()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetOperationHandle()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.operationHandle, typedOther.operationHandle); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TGetFunctionsResp("); - boolean first = true; - - sb.append("status:"); - if (this.status == null) { - sb.append("null"); - } else { - sb.append(this.status); - } - first = false; - if (isSetOperationHandle()) { - if (!first) sb.append(", "); - sb.append("operationHandle:"); - if (this.operationHandle == null) { - sb.append("null"); - } else { - sb.append(this.operationHandle); - } - first = false; - } - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetStatus()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'status' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (status != null) { - status.validate(); - } - if (operationHandle != null) { - operationHandle.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TGetFunctionsRespStandardSchemeFactory implements SchemeFactory { - public TGetFunctionsRespStandardScheme getScheme() { - return new TGetFunctionsRespStandardScheme(); - } - } - - private static class TGetFunctionsRespStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TGetFunctionsResp struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // STATUS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // OPERATION_HANDLE - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.operationHandle = new TOperationHandle(); - struct.operationHandle.read(iprot); - struct.setOperationHandleIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TGetFunctionsResp struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.status != null) { - oprot.writeFieldBegin(STATUS_FIELD_DESC); - struct.status.write(oprot); - oprot.writeFieldEnd(); - } - if (struct.operationHandle != null) { - if (struct.isSetOperationHandle()) { - oprot.writeFieldBegin(OPERATION_HANDLE_FIELD_DESC); - struct.operationHandle.write(oprot); - oprot.writeFieldEnd(); - } - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TGetFunctionsRespTupleSchemeFactory implements SchemeFactory { - public TGetFunctionsRespTupleScheme getScheme() { - return new TGetFunctionsRespTupleScheme(); - } - } - - private static class TGetFunctionsRespTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TGetFunctionsResp struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.status.write(oprot); - BitSet optionals = new BitSet(); - if (struct.isSetOperationHandle()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetOperationHandle()) { - struct.operationHandle.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TGetFunctionsResp struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.operationHandle = new TOperationHandle(); - struct.operationHandle.read(iprot); - struct.setOperationHandleIsSet(true); - } - } - } - -} - diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TGetInfoReq.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TGetInfoReq.java deleted file mode 100644 index 0139bf04ec7db..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TGetInfoReq.java +++ /dev/null @@ -1,503 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TGetInfoReq implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetInfoReq"); - - private static final org.apache.thrift.protocol.TField SESSION_HANDLE_FIELD_DESC = new org.apache.thrift.protocol.TField("sessionHandle", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField INFO_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("infoType", org.apache.thrift.protocol.TType.I32, (short)2); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TGetInfoReqStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TGetInfoReqTupleSchemeFactory()); - } - - private TSessionHandle sessionHandle; // required - private TGetInfoType infoType; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SESSION_HANDLE((short)1, "sessionHandle"), - /** - * - * @see TGetInfoType - */ - INFO_TYPE((short)2, "infoType"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // SESSION_HANDLE - return SESSION_HANDLE; - case 2: // INFO_TYPE - return INFO_TYPE; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SESSION_HANDLE, new org.apache.thrift.meta_data.FieldMetaData("sessionHandle", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TSessionHandle.class))); - tmpMap.put(_Fields.INFO_TYPE, new org.apache.thrift.meta_data.FieldMetaData("infoType", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, TGetInfoType.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TGetInfoReq.class, metaDataMap); - } - - public TGetInfoReq() { - } - - public TGetInfoReq( - TSessionHandle sessionHandle, - TGetInfoType infoType) - { - this(); - this.sessionHandle = sessionHandle; - this.infoType = infoType; - } - - /** - * Performs a deep copy on other. - */ - public TGetInfoReq(TGetInfoReq other) { - if (other.isSetSessionHandle()) { - this.sessionHandle = new TSessionHandle(other.sessionHandle); - } - if (other.isSetInfoType()) { - this.infoType = other.infoType; - } - } - - public TGetInfoReq deepCopy() { - return new TGetInfoReq(this); - } - - @Override - public void clear() { - this.sessionHandle = null; - this.infoType = null; - } - - public TSessionHandle getSessionHandle() { - return this.sessionHandle; - } - - public void setSessionHandle(TSessionHandle sessionHandle) { - this.sessionHandle = sessionHandle; - } - - public void unsetSessionHandle() { - this.sessionHandle = null; - } - - /** Returns true if field sessionHandle is set (has been assigned a value) and false otherwise */ - public boolean isSetSessionHandle() { - return this.sessionHandle != null; - } - - public void setSessionHandleIsSet(boolean value) { - if (!value) { - this.sessionHandle = null; - } - } - - /** - * - * @see TGetInfoType - */ - public TGetInfoType getInfoType() { - return this.infoType; - } - - /** - * - * @see TGetInfoType - */ - public void setInfoType(TGetInfoType infoType) { - this.infoType = infoType; - } - - public void unsetInfoType() { - this.infoType = null; - } - - /** Returns true if field infoType is set (has been assigned a value) and false otherwise */ - public boolean isSetInfoType() { - return this.infoType != null; - } - - public void setInfoTypeIsSet(boolean value) { - if (!value) { - this.infoType = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case SESSION_HANDLE: - if (value == null) { - unsetSessionHandle(); - } else { - setSessionHandle((TSessionHandle)value); - } - break; - - case INFO_TYPE: - if (value == null) { - unsetInfoType(); - } else { - setInfoType((TGetInfoType)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case SESSION_HANDLE: - return getSessionHandle(); - - case INFO_TYPE: - return getInfoType(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case SESSION_HANDLE: - return isSetSessionHandle(); - case INFO_TYPE: - return isSetInfoType(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TGetInfoReq) - return this.equals((TGetInfoReq)that); - return false; - } - - public boolean equals(TGetInfoReq that) { - if (that == null) - return false; - - boolean this_present_sessionHandle = true && this.isSetSessionHandle(); - boolean that_present_sessionHandle = true && that.isSetSessionHandle(); - if (this_present_sessionHandle || that_present_sessionHandle) { - if (!(this_present_sessionHandle && that_present_sessionHandle)) - return false; - if (!this.sessionHandle.equals(that.sessionHandle)) - return false; - } - - boolean this_present_infoType = true && this.isSetInfoType(); - boolean that_present_infoType = true && that.isSetInfoType(); - if (this_present_infoType || that_present_infoType) { - if (!(this_present_infoType && that_present_infoType)) - return false; - if (!this.infoType.equals(that.infoType)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_sessionHandle = true && (isSetSessionHandle()); - builder.append(present_sessionHandle); - if (present_sessionHandle) - builder.append(sessionHandle); - - boolean present_infoType = true && (isSetInfoType()); - builder.append(present_infoType); - if (present_infoType) - builder.append(infoType.getValue()); - - return builder.toHashCode(); - } - - public int compareTo(TGetInfoReq other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - TGetInfoReq typedOther = (TGetInfoReq)other; - - lastComparison = Boolean.valueOf(isSetSessionHandle()).compareTo(typedOther.isSetSessionHandle()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSessionHandle()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.sessionHandle, typedOther.sessionHandle); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetInfoType()).compareTo(typedOther.isSetInfoType()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetInfoType()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.infoType, typedOther.infoType); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TGetInfoReq("); - boolean first = true; - - sb.append("sessionHandle:"); - if (this.sessionHandle == null) { - sb.append("null"); - } else { - sb.append(this.sessionHandle); - } - first = false; - if (!first) sb.append(", "); - sb.append("infoType:"); - if (this.infoType == null) { - sb.append("null"); - } else { - sb.append(this.infoType); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetSessionHandle()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'sessionHandle' is unset! Struct:" + toString()); - } - - if (!isSetInfoType()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'infoType' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (sessionHandle != null) { - sessionHandle.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TGetInfoReqStandardSchemeFactory implements SchemeFactory { - public TGetInfoReqStandardScheme getScheme() { - return new TGetInfoReqStandardScheme(); - } - } - - private static class TGetInfoReqStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TGetInfoReq struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // SESSION_HANDLE - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.sessionHandle = new TSessionHandle(); - struct.sessionHandle.read(iprot); - struct.setSessionHandleIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // INFO_TYPE - if (schemeField.type == org.apache.thrift.protocol.TType.I32) { - struct.infoType = TGetInfoType.findByValue(iprot.readI32()); - struct.setInfoTypeIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TGetInfoReq struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.sessionHandle != null) { - oprot.writeFieldBegin(SESSION_HANDLE_FIELD_DESC); - struct.sessionHandle.write(oprot); - oprot.writeFieldEnd(); - } - if (struct.infoType != null) { - oprot.writeFieldBegin(INFO_TYPE_FIELD_DESC); - oprot.writeI32(struct.infoType.getValue()); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TGetInfoReqTupleSchemeFactory implements SchemeFactory { - public TGetInfoReqTupleScheme getScheme() { - return new TGetInfoReqTupleScheme(); - } - } - - private static class TGetInfoReqTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TGetInfoReq struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.sessionHandle.write(oprot); - oprot.writeI32(struct.infoType.getValue()); - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TGetInfoReq struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.sessionHandle = new TSessionHandle(); - struct.sessionHandle.read(iprot); - struct.setSessionHandleIsSet(true); - struct.infoType = TGetInfoType.findByValue(iprot.readI32()); - struct.setInfoTypeIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TGetInfoResp.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TGetInfoResp.java deleted file mode 100644 index 2faaa9211b3ba..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TGetInfoResp.java +++ /dev/null @@ -1,493 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TGetInfoResp implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetInfoResp"); - - private static final org.apache.thrift.protocol.TField STATUS_FIELD_DESC = new org.apache.thrift.protocol.TField("status", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField INFO_VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("infoValue", org.apache.thrift.protocol.TType.STRUCT, (short)2); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TGetInfoRespStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TGetInfoRespTupleSchemeFactory()); - } - - private TStatus status; // required - private TGetInfoValue infoValue; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - STATUS((short)1, "status"), - INFO_VALUE((short)2, "infoValue"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // STATUS - return STATUS; - case 2: // INFO_VALUE - return INFO_VALUE; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.STATUS, new org.apache.thrift.meta_data.FieldMetaData("status", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TStatus.class))); - tmpMap.put(_Fields.INFO_VALUE, new org.apache.thrift.meta_data.FieldMetaData("infoValue", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TGetInfoValue.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TGetInfoResp.class, metaDataMap); - } - - public TGetInfoResp() { - } - - public TGetInfoResp( - TStatus status, - TGetInfoValue infoValue) - { - this(); - this.status = status; - this.infoValue = infoValue; - } - - /** - * Performs a deep copy on other. - */ - public TGetInfoResp(TGetInfoResp other) { - if (other.isSetStatus()) { - this.status = new TStatus(other.status); - } - if (other.isSetInfoValue()) { - this.infoValue = new TGetInfoValue(other.infoValue); - } - } - - public TGetInfoResp deepCopy() { - return new TGetInfoResp(this); - } - - @Override - public void clear() { - this.status = null; - this.infoValue = null; - } - - public TStatus getStatus() { - return this.status; - } - - public void setStatus(TStatus status) { - this.status = status; - } - - public void unsetStatus() { - this.status = null; - } - - /** Returns true if field status is set (has been assigned a value) and false otherwise */ - public boolean isSetStatus() { - return this.status != null; - } - - public void setStatusIsSet(boolean value) { - if (!value) { - this.status = null; - } - } - - public TGetInfoValue getInfoValue() { - return this.infoValue; - } - - public void setInfoValue(TGetInfoValue infoValue) { - this.infoValue = infoValue; - } - - public void unsetInfoValue() { - this.infoValue = null; - } - - /** Returns true if field infoValue is set (has been assigned a value) and false otherwise */ - public boolean isSetInfoValue() { - return this.infoValue != null; - } - - public void setInfoValueIsSet(boolean value) { - if (!value) { - this.infoValue = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case STATUS: - if (value == null) { - unsetStatus(); - } else { - setStatus((TStatus)value); - } - break; - - case INFO_VALUE: - if (value == null) { - unsetInfoValue(); - } else { - setInfoValue((TGetInfoValue)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case STATUS: - return getStatus(); - - case INFO_VALUE: - return getInfoValue(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case STATUS: - return isSetStatus(); - case INFO_VALUE: - return isSetInfoValue(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TGetInfoResp) - return this.equals((TGetInfoResp)that); - return false; - } - - public boolean equals(TGetInfoResp that) { - if (that == null) - return false; - - boolean this_present_status = true && this.isSetStatus(); - boolean that_present_status = true && that.isSetStatus(); - if (this_present_status || that_present_status) { - if (!(this_present_status && that_present_status)) - return false; - if (!this.status.equals(that.status)) - return false; - } - - boolean this_present_infoValue = true && this.isSetInfoValue(); - boolean that_present_infoValue = true && that.isSetInfoValue(); - if (this_present_infoValue || that_present_infoValue) { - if (!(this_present_infoValue && that_present_infoValue)) - return false; - if (!this.infoValue.equals(that.infoValue)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_status = true && (isSetStatus()); - builder.append(present_status); - if (present_status) - builder.append(status); - - boolean present_infoValue = true && (isSetInfoValue()); - builder.append(present_infoValue); - if (present_infoValue) - builder.append(infoValue); - - return builder.toHashCode(); - } - - public int compareTo(TGetInfoResp other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - TGetInfoResp typedOther = (TGetInfoResp)other; - - lastComparison = Boolean.valueOf(isSetStatus()).compareTo(typedOther.isSetStatus()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetStatus()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.status, typedOther.status); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetInfoValue()).compareTo(typedOther.isSetInfoValue()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetInfoValue()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.infoValue, typedOther.infoValue); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TGetInfoResp("); - boolean first = true; - - sb.append("status:"); - if (this.status == null) { - sb.append("null"); - } else { - sb.append(this.status); - } - first = false; - if (!first) sb.append(", "); - sb.append("infoValue:"); - if (this.infoValue == null) { - sb.append("null"); - } else { - sb.append(this.infoValue); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetStatus()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'status' is unset! Struct:" + toString()); - } - - if (!isSetInfoValue()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'infoValue' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (status != null) { - status.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TGetInfoRespStandardSchemeFactory implements SchemeFactory { - public TGetInfoRespStandardScheme getScheme() { - return new TGetInfoRespStandardScheme(); - } - } - - private static class TGetInfoRespStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TGetInfoResp struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // STATUS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // INFO_VALUE - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.infoValue = new TGetInfoValue(); - struct.infoValue.read(iprot); - struct.setInfoValueIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TGetInfoResp struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.status != null) { - oprot.writeFieldBegin(STATUS_FIELD_DESC); - struct.status.write(oprot); - oprot.writeFieldEnd(); - } - if (struct.infoValue != null) { - oprot.writeFieldBegin(INFO_VALUE_FIELD_DESC); - struct.infoValue.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TGetInfoRespTupleSchemeFactory implements SchemeFactory { - public TGetInfoRespTupleScheme getScheme() { - return new TGetInfoRespTupleScheme(); - } - } - - private static class TGetInfoRespTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TGetInfoResp struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.status.write(oprot); - struct.infoValue.write(oprot); - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TGetInfoResp struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - struct.infoValue = new TGetInfoValue(); - struct.infoValue.read(iprot); - struct.setInfoValueIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TGetInfoType.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TGetInfoType.java deleted file mode 100644 index d9dd62414f001..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TGetInfoType.java +++ /dev/null @@ -1,180 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - - -import java.util.Map; -import java.util.HashMap; -import org.apache.thrift.TEnum; - -public enum TGetInfoType implements org.apache.thrift.TEnum { - CLI_MAX_DRIVER_CONNECTIONS(0), - CLI_MAX_CONCURRENT_ACTIVITIES(1), - CLI_DATA_SOURCE_NAME(2), - CLI_FETCH_DIRECTION(8), - CLI_SERVER_NAME(13), - CLI_SEARCH_PATTERN_ESCAPE(14), - CLI_DBMS_NAME(17), - CLI_DBMS_VER(18), - CLI_ACCESSIBLE_TABLES(19), - CLI_ACCESSIBLE_PROCEDURES(20), - CLI_CURSOR_COMMIT_BEHAVIOR(23), - CLI_DATA_SOURCE_READ_ONLY(25), - CLI_DEFAULT_TXN_ISOLATION(26), - CLI_IDENTIFIER_CASE(28), - CLI_IDENTIFIER_QUOTE_CHAR(29), - CLI_MAX_COLUMN_NAME_LEN(30), - CLI_MAX_CURSOR_NAME_LEN(31), - CLI_MAX_SCHEMA_NAME_LEN(32), - CLI_MAX_CATALOG_NAME_LEN(34), - CLI_MAX_TABLE_NAME_LEN(35), - CLI_SCROLL_CONCURRENCY(43), - CLI_TXN_CAPABLE(46), - CLI_USER_NAME(47), - CLI_TXN_ISOLATION_OPTION(72), - CLI_INTEGRITY(73), - CLI_GETDATA_EXTENSIONS(81), - CLI_NULL_COLLATION(85), - CLI_ALTER_TABLE(86), - CLI_ORDER_BY_COLUMNS_IN_SELECT(90), - CLI_SPECIAL_CHARACTERS(94), - CLI_MAX_COLUMNS_IN_GROUP_BY(97), - CLI_MAX_COLUMNS_IN_INDEX(98), - CLI_MAX_COLUMNS_IN_ORDER_BY(99), - CLI_MAX_COLUMNS_IN_SELECT(100), - CLI_MAX_COLUMNS_IN_TABLE(101), - CLI_MAX_INDEX_SIZE(102), - CLI_MAX_ROW_SIZE(104), - CLI_MAX_STATEMENT_LEN(105), - CLI_MAX_TABLES_IN_SELECT(106), - CLI_MAX_USER_NAME_LEN(107), - CLI_OJ_CAPABILITIES(115), - CLI_XOPEN_CLI_YEAR(10000), - CLI_CURSOR_SENSITIVITY(10001), - CLI_DESCRIBE_PARAMETER(10002), - CLI_CATALOG_NAME(10003), - CLI_COLLATION_SEQ(10004), - CLI_MAX_IDENTIFIER_LEN(10005); - - private final int value; - - private TGetInfoType(int value) { - this.value = value; - } - - /** - * Get the integer value of this enum value, as defined in the Thrift IDL. - */ - public int getValue() { - return value; - } - - /** - * Find a the enum type by its integer value, as defined in the Thrift IDL. - * @return null if the value is not found. - */ - public static TGetInfoType findByValue(int value) { - switch (value) { - case 0: - return CLI_MAX_DRIVER_CONNECTIONS; - case 1: - return CLI_MAX_CONCURRENT_ACTIVITIES; - case 2: - return CLI_DATA_SOURCE_NAME; - case 8: - return CLI_FETCH_DIRECTION; - case 13: - return CLI_SERVER_NAME; - case 14: - return CLI_SEARCH_PATTERN_ESCAPE; - case 17: - return CLI_DBMS_NAME; - case 18: - return CLI_DBMS_VER; - case 19: - return CLI_ACCESSIBLE_TABLES; - case 20: - return CLI_ACCESSIBLE_PROCEDURES; - case 23: - return CLI_CURSOR_COMMIT_BEHAVIOR; - case 25: - return CLI_DATA_SOURCE_READ_ONLY; - case 26: - return CLI_DEFAULT_TXN_ISOLATION; - case 28: - return CLI_IDENTIFIER_CASE; - case 29: - return CLI_IDENTIFIER_QUOTE_CHAR; - case 30: - return CLI_MAX_COLUMN_NAME_LEN; - case 31: - return CLI_MAX_CURSOR_NAME_LEN; - case 32: - return CLI_MAX_SCHEMA_NAME_LEN; - case 34: - return CLI_MAX_CATALOG_NAME_LEN; - case 35: - return CLI_MAX_TABLE_NAME_LEN; - case 43: - return CLI_SCROLL_CONCURRENCY; - case 46: - return CLI_TXN_CAPABLE; - case 47: - return CLI_USER_NAME; - case 72: - return CLI_TXN_ISOLATION_OPTION; - case 73: - return CLI_INTEGRITY; - case 81: - return CLI_GETDATA_EXTENSIONS; - case 85: - return CLI_NULL_COLLATION; - case 86: - return CLI_ALTER_TABLE; - case 90: - return CLI_ORDER_BY_COLUMNS_IN_SELECT; - case 94: - return CLI_SPECIAL_CHARACTERS; - case 97: - return CLI_MAX_COLUMNS_IN_GROUP_BY; - case 98: - return CLI_MAX_COLUMNS_IN_INDEX; - case 99: - return CLI_MAX_COLUMNS_IN_ORDER_BY; - case 100: - return CLI_MAX_COLUMNS_IN_SELECT; - case 101: - return CLI_MAX_COLUMNS_IN_TABLE; - case 102: - return CLI_MAX_INDEX_SIZE; - case 104: - return CLI_MAX_ROW_SIZE; - case 105: - return CLI_MAX_STATEMENT_LEN; - case 106: - return CLI_MAX_TABLES_IN_SELECT; - case 107: - return CLI_MAX_USER_NAME_LEN; - case 115: - return CLI_OJ_CAPABILITIES; - case 10000: - return CLI_XOPEN_CLI_YEAR; - case 10001: - return CLI_CURSOR_SENSITIVITY; - case 10002: - return CLI_DESCRIBE_PARAMETER; - case 10003: - return CLI_CATALOG_NAME; - case 10004: - return CLI_COLLATION_SEQ; - case 10005: - return CLI_MAX_IDENTIFIER_LEN; - default: - return null; - } - } -} diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TGetInfoValue.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TGetInfoValue.java deleted file mode 100644 index fe2a211c46309..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TGetInfoValue.java +++ /dev/null @@ -1,593 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TGetInfoValue extends org.apache.thrift.TUnion { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetInfoValue"); - private static final org.apache.thrift.protocol.TField STRING_VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("stringValue", org.apache.thrift.protocol.TType.STRING, (short)1); - private static final org.apache.thrift.protocol.TField SMALL_INT_VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("smallIntValue", org.apache.thrift.protocol.TType.I16, (short)2); - private static final org.apache.thrift.protocol.TField INTEGER_BITMASK_FIELD_DESC = new org.apache.thrift.protocol.TField("integerBitmask", org.apache.thrift.protocol.TType.I32, (short)3); - private static final org.apache.thrift.protocol.TField INTEGER_FLAG_FIELD_DESC = new org.apache.thrift.protocol.TField("integerFlag", org.apache.thrift.protocol.TType.I32, (short)4); - private static final org.apache.thrift.protocol.TField BINARY_VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("binaryValue", org.apache.thrift.protocol.TType.I32, (short)5); - private static final org.apache.thrift.protocol.TField LEN_VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("lenValue", org.apache.thrift.protocol.TType.I64, (short)6); - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - STRING_VALUE((short)1, "stringValue"), - SMALL_INT_VALUE((short)2, "smallIntValue"), - INTEGER_BITMASK((short)3, "integerBitmask"), - INTEGER_FLAG((short)4, "integerFlag"), - BINARY_VALUE((short)5, "binaryValue"), - LEN_VALUE((short)6, "lenValue"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // STRING_VALUE - return STRING_VALUE; - case 2: // SMALL_INT_VALUE - return SMALL_INT_VALUE; - case 3: // INTEGER_BITMASK - return INTEGER_BITMASK; - case 4: // INTEGER_FLAG - return INTEGER_FLAG; - case 5: // BINARY_VALUE - return BINARY_VALUE; - case 6: // LEN_VALUE - return LEN_VALUE; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.STRING_VALUE, new org.apache.thrift.meta_data.FieldMetaData("stringValue", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.SMALL_INT_VALUE, new org.apache.thrift.meta_data.FieldMetaData("smallIntValue", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I16))); - tmpMap.put(_Fields.INTEGER_BITMASK, new org.apache.thrift.meta_data.FieldMetaData("integerBitmask", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); - tmpMap.put(_Fields.INTEGER_FLAG, new org.apache.thrift.meta_data.FieldMetaData("integerFlag", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); - tmpMap.put(_Fields.BINARY_VALUE, new org.apache.thrift.meta_data.FieldMetaData("binaryValue", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); - tmpMap.put(_Fields.LEN_VALUE, new org.apache.thrift.meta_data.FieldMetaData("lenValue", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TGetInfoValue.class, metaDataMap); - } - - public TGetInfoValue() { - super(); - } - - public TGetInfoValue(TGetInfoValue._Fields setField, Object value) { - super(setField, value); - } - - public TGetInfoValue(TGetInfoValue other) { - super(other); - } - public TGetInfoValue deepCopy() { - return new TGetInfoValue(this); - } - - public static TGetInfoValue stringValue(String value) { - TGetInfoValue x = new TGetInfoValue(); - x.setStringValue(value); - return x; - } - - public static TGetInfoValue smallIntValue(short value) { - TGetInfoValue x = new TGetInfoValue(); - x.setSmallIntValue(value); - return x; - } - - public static TGetInfoValue integerBitmask(int value) { - TGetInfoValue x = new TGetInfoValue(); - x.setIntegerBitmask(value); - return x; - } - - public static TGetInfoValue integerFlag(int value) { - TGetInfoValue x = new TGetInfoValue(); - x.setIntegerFlag(value); - return x; - } - - public static TGetInfoValue binaryValue(int value) { - TGetInfoValue x = new TGetInfoValue(); - x.setBinaryValue(value); - return x; - } - - public static TGetInfoValue lenValue(long value) { - TGetInfoValue x = new TGetInfoValue(); - x.setLenValue(value); - return x; - } - - - @Override - protected void checkType(_Fields setField, Object value) throws ClassCastException { - switch (setField) { - case STRING_VALUE: - if (value instanceof String) { - break; - } - throw new ClassCastException("Was expecting value of type String for field 'stringValue', but got " + value.getClass().getSimpleName()); - case SMALL_INT_VALUE: - if (value instanceof Short) { - break; - } - throw new ClassCastException("Was expecting value of type Short for field 'smallIntValue', but got " + value.getClass().getSimpleName()); - case INTEGER_BITMASK: - if (value instanceof Integer) { - break; - } - throw new ClassCastException("Was expecting value of type Integer for field 'integerBitmask', but got " + value.getClass().getSimpleName()); - case INTEGER_FLAG: - if (value instanceof Integer) { - break; - } - throw new ClassCastException("Was expecting value of type Integer for field 'integerFlag', but got " + value.getClass().getSimpleName()); - case BINARY_VALUE: - if (value instanceof Integer) { - break; - } - throw new ClassCastException("Was expecting value of type Integer for field 'binaryValue', but got " + value.getClass().getSimpleName()); - case LEN_VALUE: - if (value instanceof Long) { - break; - } - throw new ClassCastException("Was expecting value of type Long for field 'lenValue', but got " + value.getClass().getSimpleName()); - default: - throw new IllegalArgumentException("Unknown field id " + setField); - } - } - - @Override - protected Object standardSchemeReadValue(org.apache.thrift.protocol.TProtocol iprot, org.apache.thrift.protocol.TField field) throws org.apache.thrift.TException { - _Fields setField = _Fields.findByThriftId(field.id); - if (setField != null) { - switch (setField) { - case STRING_VALUE: - if (field.type == STRING_VALUE_FIELD_DESC.type) { - String stringValue; - stringValue = iprot.readString(); - return stringValue; - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); - return null; - } - case SMALL_INT_VALUE: - if (field.type == SMALL_INT_VALUE_FIELD_DESC.type) { - Short smallIntValue; - smallIntValue = iprot.readI16(); - return smallIntValue; - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); - return null; - } - case INTEGER_BITMASK: - if (field.type == INTEGER_BITMASK_FIELD_DESC.type) { - Integer integerBitmask; - integerBitmask = iprot.readI32(); - return integerBitmask; - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); - return null; - } - case INTEGER_FLAG: - if (field.type == INTEGER_FLAG_FIELD_DESC.type) { - Integer integerFlag; - integerFlag = iprot.readI32(); - return integerFlag; - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); - return null; - } - case BINARY_VALUE: - if (field.type == BINARY_VALUE_FIELD_DESC.type) { - Integer binaryValue; - binaryValue = iprot.readI32(); - return binaryValue; - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); - return null; - } - case LEN_VALUE: - if (field.type == LEN_VALUE_FIELD_DESC.type) { - Long lenValue; - lenValue = iprot.readI64(); - return lenValue; - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); - return null; - } - default: - throw new IllegalStateException("setField wasn't null, but didn't match any of the case statements!"); - } - } else { - return null; - } - } - - @Override - protected void standardSchemeWriteValue(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - switch (setField_) { - case STRING_VALUE: - String stringValue = (String)value_; - oprot.writeString(stringValue); - return; - case SMALL_INT_VALUE: - Short smallIntValue = (Short)value_; - oprot.writeI16(smallIntValue); - return; - case INTEGER_BITMASK: - Integer integerBitmask = (Integer)value_; - oprot.writeI32(integerBitmask); - return; - case INTEGER_FLAG: - Integer integerFlag = (Integer)value_; - oprot.writeI32(integerFlag); - return; - case BINARY_VALUE: - Integer binaryValue = (Integer)value_; - oprot.writeI32(binaryValue); - return; - case LEN_VALUE: - Long lenValue = (Long)value_; - oprot.writeI64(lenValue); - return; - default: - throw new IllegalStateException("Cannot write union with unknown field " + setField_); - } - } - - @Override - protected Object tupleSchemeReadValue(org.apache.thrift.protocol.TProtocol iprot, short fieldID) throws org.apache.thrift.TException { - _Fields setField = _Fields.findByThriftId(fieldID); - if (setField != null) { - switch (setField) { - case STRING_VALUE: - String stringValue; - stringValue = iprot.readString(); - return stringValue; - case SMALL_INT_VALUE: - Short smallIntValue; - smallIntValue = iprot.readI16(); - return smallIntValue; - case INTEGER_BITMASK: - Integer integerBitmask; - integerBitmask = iprot.readI32(); - return integerBitmask; - case INTEGER_FLAG: - Integer integerFlag; - integerFlag = iprot.readI32(); - return integerFlag; - case BINARY_VALUE: - Integer binaryValue; - binaryValue = iprot.readI32(); - return binaryValue; - case LEN_VALUE: - Long lenValue; - lenValue = iprot.readI64(); - return lenValue; - default: - throw new IllegalStateException("setField wasn't null, but didn't match any of the case statements!"); - } - } else { - throw new TProtocolException("Couldn't find a field with field id " + fieldID); - } - } - - @Override - protected void tupleSchemeWriteValue(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - switch (setField_) { - case STRING_VALUE: - String stringValue = (String)value_; - oprot.writeString(stringValue); - return; - case SMALL_INT_VALUE: - Short smallIntValue = (Short)value_; - oprot.writeI16(smallIntValue); - return; - case INTEGER_BITMASK: - Integer integerBitmask = (Integer)value_; - oprot.writeI32(integerBitmask); - return; - case INTEGER_FLAG: - Integer integerFlag = (Integer)value_; - oprot.writeI32(integerFlag); - return; - case BINARY_VALUE: - Integer binaryValue = (Integer)value_; - oprot.writeI32(binaryValue); - return; - case LEN_VALUE: - Long lenValue = (Long)value_; - oprot.writeI64(lenValue); - return; - default: - throw new IllegalStateException("Cannot write union with unknown field " + setField_); - } - } - - @Override - protected org.apache.thrift.protocol.TField getFieldDesc(_Fields setField) { - switch (setField) { - case STRING_VALUE: - return STRING_VALUE_FIELD_DESC; - case SMALL_INT_VALUE: - return SMALL_INT_VALUE_FIELD_DESC; - case INTEGER_BITMASK: - return INTEGER_BITMASK_FIELD_DESC; - case INTEGER_FLAG: - return INTEGER_FLAG_FIELD_DESC; - case BINARY_VALUE: - return BINARY_VALUE_FIELD_DESC; - case LEN_VALUE: - return LEN_VALUE_FIELD_DESC; - default: - throw new IllegalArgumentException("Unknown field id " + setField); - } - } - - @Override - protected org.apache.thrift.protocol.TStruct getStructDesc() { - return STRUCT_DESC; - } - - @Override - protected _Fields enumForId(short id) { - return _Fields.findByThriftIdOrThrow(id); - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - - public String getStringValue() { - if (getSetField() == _Fields.STRING_VALUE) { - return (String)getFieldValue(); - } else { - throw new RuntimeException("Cannot get field 'stringValue' because union is currently set to " + getFieldDesc(getSetField()).name); - } - } - - public void setStringValue(String value) { - if (value == null) throw new NullPointerException(); - setField_ = _Fields.STRING_VALUE; - value_ = value; - } - - public short getSmallIntValue() { - if (getSetField() == _Fields.SMALL_INT_VALUE) { - return (Short)getFieldValue(); - } else { - throw new RuntimeException("Cannot get field 'smallIntValue' because union is currently set to " + getFieldDesc(getSetField()).name); - } - } - - public void setSmallIntValue(short value) { - setField_ = _Fields.SMALL_INT_VALUE; - value_ = value; - } - - public int getIntegerBitmask() { - if (getSetField() == _Fields.INTEGER_BITMASK) { - return (Integer)getFieldValue(); - } else { - throw new RuntimeException("Cannot get field 'integerBitmask' because union is currently set to " + getFieldDesc(getSetField()).name); - } - } - - public void setIntegerBitmask(int value) { - setField_ = _Fields.INTEGER_BITMASK; - value_ = value; - } - - public int getIntegerFlag() { - if (getSetField() == _Fields.INTEGER_FLAG) { - return (Integer)getFieldValue(); - } else { - throw new RuntimeException("Cannot get field 'integerFlag' because union is currently set to " + getFieldDesc(getSetField()).name); - } - } - - public void setIntegerFlag(int value) { - setField_ = _Fields.INTEGER_FLAG; - value_ = value; - } - - public int getBinaryValue() { - if (getSetField() == _Fields.BINARY_VALUE) { - return (Integer)getFieldValue(); - } else { - throw new RuntimeException("Cannot get field 'binaryValue' because union is currently set to " + getFieldDesc(getSetField()).name); - } - } - - public void setBinaryValue(int value) { - setField_ = _Fields.BINARY_VALUE; - value_ = value; - } - - public long getLenValue() { - if (getSetField() == _Fields.LEN_VALUE) { - return (Long)getFieldValue(); - } else { - throw new RuntimeException("Cannot get field 'lenValue' because union is currently set to " + getFieldDesc(getSetField()).name); - } - } - - public void setLenValue(long value) { - setField_ = _Fields.LEN_VALUE; - value_ = value; - } - - public boolean isSetStringValue() { - return setField_ == _Fields.STRING_VALUE; - } - - - public boolean isSetSmallIntValue() { - return setField_ == _Fields.SMALL_INT_VALUE; - } - - - public boolean isSetIntegerBitmask() { - return setField_ == _Fields.INTEGER_BITMASK; - } - - - public boolean isSetIntegerFlag() { - return setField_ == _Fields.INTEGER_FLAG; - } - - - public boolean isSetBinaryValue() { - return setField_ == _Fields.BINARY_VALUE; - } - - - public boolean isSetLenValue() { - return setField_ == _Fields.LEN_VALUE; - } - - - public boolean equals(Object other) { - if (other instanceof TGetInfoValue) { - return equals((TGetInfoValue)other); - } else { - return false; - } - } - - public boolean equals(TGetInfoValue other) { - return other != null && getSetField() == other.getSetField() && getFieldValue().equals(other.getFieldValue()); - } - - @Override - public int compareTo(TGetInfoValue other) { - int lastComparison = org.apache.thrift.TBaseHelper.compareTo(getSetField(), other.getSetField()); - if (lastComparison == 0) { - return org.apache.thrift.TBaseHelper.compareTo(getFieldValue(), other.getFieldValue()); - } - return lastComparison; - } - - - @Override - public int hashCode() { - HashCodeBuilder hcb = new HashCodeBuilder(); - hcb.append(this.getClass().getName()); - org.apache.thrift.TFieldIdEnum setField = getSetField(); - if (setField != null) { - hcb.append(setField.getThriftFieldId()); - Object value = getFieldValue(); - if (value instanceof org.apache.thrift.TEnum) { - hcb.append(((org.apache.thrift.TEnum)getFieldValue()).getValue()); - } else { - hcb.append(value); - } - } - return hcb.toHashCode(); - } - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - -} diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TGetOperationStatusReq.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TGetOperationStatusReq.java deleted file mode 100644 index b88591ea1945b..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TGetOperationStatusReq.java +++ /dev/null @@ -1,390 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TGetOperationStatusReq implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetOperationStatusReq"); - - private static final org.apache.thrift.protocol.TField OPERATION_HANDLE_FIELD_DESC = new org.apache.thrift.protocol.TField("operationHandle", org.apache.thrift.protocol.TType.STRUCT, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TGetOperationStatusReqStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TGetOperationStatusReqTupleSchemeFactory()); - } - - private TOperationHandle operationHandle; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - OPERATION_HANDLE((short)1, "operationHandle"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // OPERATION_HANDLE - return OPERATION_HANDLE; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.OPERATION_HANDLE, new org.apache.thrift.meta_data.FieldMetaData("operationHandle", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TOperationHandle.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TGetOperationStatusReq.class, metaDataMap); - } - - public TGetOperationStatusReq() { - } - - public TGetOperationStatusReq( - TOperationHandle operationHandle) - { - this(); - this.operationHandle = operationHandle; - } - - /** - * Performs a deep copy on other. - */ - public TGetOperationStatusReq(TGetOperationStatusReq other) { - if (other.isSetOperationHandle()) { - this.operationHandle = new TOperationHandle(other.operationHandle); - } - } - - public TGetOperationStatusReq deepCopy() { - return new TGetOperationStatusReq(this); - } - - @Override - public void clear() { - this.operationHandle = null; - } - - public TOperationHandle getOperationHandle() { - return this.operationHandle; - } - - public void setOperationHandle(TOperationHandle operationHandle) { - this.operationHandle = operationHandle; - } - - public void unsetOperationHandle() { - this.operationHandle = null; - } - - /** Returns true if field operationHandle is set (has been assigned a value) and false otherwise */ - public boolean isSetOperationHandle() { - return this.operationHandle != null; - } - - public void setOperationHandleIsSet(boolean value) { - if (!value) { - this.operationHandle = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case OPERATION_HANDLE: - if (value == null) { - unsetOperationHandle(); - } else { - setOperationHandle((TOperationHandle)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case OPERATION_HANDLE: - return getOperationHandle(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case OPERATION_HANDLE: - return isSetOperationHandle(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TGetOperationStatusReq) - return this.equals((TGetOperationStatusReq)that); - return false; - } - - public boolean equals(TGetOperationStatusReq that) { - if (that == null) - return false; - - boolean this_present_operationHandle = true && this.isSetOperationHandle(); - boolean that_present_operationHandle = true && that.isSetOperationHandle(); - if (this_present_operationHandle || that_present_operationHandle) { - if (!(this_present_operationHandle && that_present_operationHandle)) - return false; - if (!this.operationHandle.equals(that.operationHandle)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_operationHandle = true && (isSetOperationHandle()); - builder.append(present_operationHandle); - if (present_operationHandle) - builder.append(operationHandle); - - return builder.toHashCode(); - } - - public int compareTo(TGetOperationStatusReq other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - TGetOperationStatusReq typedOther = (TGetOperationStatusReq)other; - - lastComparison = Boolean.valueOf(isSetOperationHandle()).compareTo(typedOther.isSetOperationHandle()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetOperationHandle()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.operationHandle, typedOther.operationHandle); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TGetOperationStatusReq("); - boolean first = true; - - sb.append("operationHandle:"); - if (this.operationHandle == null) { - sb.append("null"); - } else { - sb.append(this.operationHandle); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetOperationHandle()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'operationHandle' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (operationHandle != null) { - operationHandle.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TGetOperationStatusReqStandardSchemeFactory implements SchemeFactory { - public TGetOperationStatusReqStandardScheme getScheme() { - return new TGetOperationStatusReqStandardScheme(); - } - } - - private static class TGetOperationStatusReqStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TGetOperationStatusReq struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // OPERATION_HANDLE - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.operationHandle = new TOperationHandle(); - struct.operationHandle.read(iprot); - struct.setOperationHandleIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TGetOperationStatusReq struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.operationHandle != null) { - oprot.writeFieldBegin(OPERATION_HANDLE_FIELD_DESC); - struct.operationHandle.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TGetOperationStatusReqTupleSchemeFactory implements SchemeFactory { - public TGetOperationStatusReqTupleScheme getScheme() { - return new TGetOperationStatusReqTupleScheme(); - } - } - - private static class TGetOperationStatusReqTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TGetOperationStatusReq struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.operationHandle.write(oprot); - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TGetOperationStatusReq struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.operationHandle = new TOperationHandle(); - struct.operationHandle.read(iprot); - struct.setOperationHandleIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TGetOperationStatusResp.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TGetOperationStatusResp.java deleted file mode 100644 index 94ba6bb1146de..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TGetOperationStatusResp.java +++ /dev/null @@ -1,827 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TGetOperationStatusResp implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetOperationStatusResp"); - - private static final org.apache.thrift.protocol.TField STATUS_FIELD_DESC = new org.apache.thrift.protocol.TField("status", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField OPERATION_STATE_FIELD_DESC = new org.apache.thrift.protocol.TField("operationState", org.apache.thrift.protocol.TType.I32, (short)2); - private static final org.apache.thrift.protocol.TField SQL_STATE_FIELD_DESC = new org.apache.thrift.protocol.TField("sqlState", org.apache.thrift.protocol.TType.STRING, (short)3); - private static final org.apache.thrift.protocol.TField ERROR_CODE_FIELD_DESC = new org.apache.thrift.protocol.TField("errorCode", org.apache.thrift.protocol.TType.I32, (short)4); - private static final org.apache.thrift.protocol.TField ERROR_MESSAGE_FIELD_DESC = new org.apache.thrift.protocol.TField("errorMessage", org.apache.thrift.protocol.TType.STRING, (short)5); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TGetOperationStatusRespStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TGetOperationStatusRespTupleSchemeFactory()); - } - - private TStatus status; // required - private TOperationState operationState; // optional - private String sqlState; // optional - private int errorCode; // optional - private String errorMessage; // optional - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - STATUS((short)1, "status"), - /** - * - * @see TOperationState - */ - OPERATION_STATE((short)2, "operationState"), - SQL_STATE((short)3, "sqlState"), - ERROR_CODE((short)4, "errorCode"), - ERROR_MESSAGE((short)5, "errorMessage"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // STATUS - return STATUS; - case 2: // OPERATION_STATE - return OPERATION_STATE; - case 3: // SQL_STATE - return SQL_STATE; - case 4: // ERROR_CODE - return ERROR_CODE; - case 5: // ERROR_MESSAGE - return ERROR_MESSAGE; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private static final int __ERRORCODE_ISSET_ID = 0; - private byte __isset_bitfield = 0; - private _Fields optionals[] = {_Fields.OPERATION_STATE,_Fields.SQL_STATE,_Fields.ERROR_CODE,_Fields.ERROR_MESSAGE}; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.STATUS, new org.apache.thrift.meta_data.FieldMetaData("status", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TStatus.class))); - tmpMap.put(_Fields.OPERATION_STATE, new org.apache.thrift.meta_data.FieldMetaData("operationState", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, TOperationState.class))); - tmpMap.put(_Fields.SQL_STATE, new org.apache.thrift.meta_data.FieldMetaData("sqlState", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.ERROR_CODE, new org.apache.thrift.meta_data.FieldMetaData("errorCode", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); - tmpMap.put(_Fields.ERROR_MESSAGE, new org.apache.thrift.meta_data.FieldMetaData("errorMessage", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TGetOperationStatusResp.class, metaDataMap); - } - - public TGetOperationStatusResp() { - } - - public TGetOperationStatusResp( - TStatus status) - { - this(); - this.status = status; - } - - /** - * Performs a deep copy on other. - */ - public TGetOperationStatusResp(TGetOperationStatusResp other) { - __isset_bitfield = other.__isset_bitfield; - if (other.isSetStatus()) { - this.status = new TStatus(other.status); - } - if (other.isSetOperationState()) { - this.operationState = other.operationState; - } - if (other.isSetSqlState()) { - this.sqlState = other.sqlState; - } - this.errorCode = other.errorCode; - if (other.isSetErrorMessage()) { - this.errorMessage = other.errorMessage; - } - } - - public TGetOperationStatusResp deepCopy() { - return new TGetOperationStatusResp(this); - } - - @Override - public void clear() { - this.status = null; - this.operationState = null; - this.sqlState = null; - setErrorCodeIsSet(false); - this.errorCode = 0; - this.errorMessage = null; - } - - public TStatus getStatus() { - return this.status; - } - - public void setStatus(TStatus status) { - this.status = status; - } - - public void unsetStatus() { - this.status = null; - } - - /** Returns true if field status is set (has been assigned a value) and false otherwise */ - public boolean isSetStatus() { - return this.status != null; - } - - public void setStatusIsSet(boolean value) { - if (!value) { - this.status = null; - } - } - - /** - * - * @see TOperationState - */ - public TOperationState getOperationState() { - return this.operationState; - } - - /** - * - * @see TOperationState - */ - public void setOperationState(TOperationState operationState) { - this.operationState = operationState; - } - - public void unsetOperationState() { - this.operationState = null; - } - - /** Returns true if field operationState is set (has been assigned a value) and false otherwise */ - public boolean isSetOperationState() { - return this.operationState != null; - } - - public void setOperationStateIsSet(boolean value) { - if (!value) { - this.operationState = null; - } - } - - public String getSqlState() { - return this.sqlState; - } - - public void setSqlState(String sqlState) { - this.sqlState = sqlState; - } - - public void unsetSqlState() { - this.sqlState = null; - } - - /** Returns true if field sqlState is set (has been assigned a value) and false otherwise */ - public boolean isSetSqlState() { - return this.sqlState != null; - } - - public void setSqlStateIsSet(boolean value) { - if (!value) { - this.sqlState = null; - } - } - - public int getErrorCode() { - return this.errorCode; - } - - public void setErrorCode(int errorCode) { - this.errorCode = errorCode; - setErrorCodeIsSet(true); - } - - public void unsetErrorCode() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ERRORCODE_ISSET_ID); - } - - /** Returns true if field errorCode is set (has been assigned a value) and false otherwise */ - public boolean isSetErrorCode() { - return EncodingUtils.testBit(__isset_bitfield, __ERRORCODE_ISSET_ID); - } - - public void setErrorCodeIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ERRORCODE_ISSET_ID, value); - } - - public String getErrorMessage() { - return this.errorMessage; - } - - public void setErrorMessage(String errorMessage) { - this.errorMessage = errorMessage; - } - - public void unsetErrorMessage() { - this.errorMessage = null; - } - - /** Returns true if field errorMessage is set (has been assigned a value) and false otherwise */ - public boolean isSetErrorMessage() { - return this.errorMessage != null; - } - - public void setErrorMessageIsSet(boolean value) { - if (!value) { - this.errorMessage = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case STATUS: - if (value == null) { - unsetStatus(); - } else { - setStatus((TStatus)value); - } - break; - - case OPERATION_STATE: - if (value == null) { - unsetOperationState(); - } else { - setOperationState((TOperationState)value); - } - break; - - case SQL_STATE: - if (value == null) { - unsetSqlState(); - } else { - setSqlState((String)value); - } - break; - - case ERROR_CODE: - if (value == null) { - unsetErrorCode(); - } else { - setErrorCode((Integer)value); - } - break; - - case ERROR_MESSAGE: - if (value == null) { - unsetErrorMessage(); - } else { - setErrorMessage((String)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case STATUS: - return getStatus(); - - case OPERATION_STATE: - return getOperationState(); - - case SQL_STATE: - return getSqlState(); - - case ERROR_CODE: - return Integer.valueOf(getErrorCode()); - - case ERROR_MESSAGE: - return getErrorMessage(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case STATUS: - return isSetStatus(); - case OPERATION_STATE: - return isSetOperationState(); - case SQL_STATE: - return isSetSqlState(); - case ERROR_CODE: - return isSetErrorCode(); - case ERROR_MESSAGE: - return isSetErrorMessage(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TGetOperationStatusResp) - return this.equals((TGetOperationStatusResp)that); - return false; - } - - public boolean equals(TGetOperationStatusResp that) { - if (that == null) - return false; - - boolean this_present_status = true && this.isSetStatus(); - boolean that_present_status = true && that.isSetStatus(); - if (this_present_status || that_present_status) { - if (!(this_present_status && that_present_status)) - return false; - if (!this.status.equals(that.status)) - return false; - } - - boolean this_present_operationState = true && this.isSetOperationState(); - boolean that_present_operationState = true && that.isSetOperationState(); - if (this_present_operationState || that_present_operationState) { - if (!(this_present_operationState && that_present_operationState)) - return false; - if (!this.operationState.equals(that.operationState)) - return false; - } - - boolean this_present_sqlState = true && this.isSetSqlState(); - boolean that_present_sqlState = true && that.isSetSqlState(); - if (this_present_sqlState || that_present_sqlState) { - if (!(this_present_sqlState && that_present_sqlState)) - return false; - if (!this.sqlState.equals(that.sqlState)) - return false; - } - - boolean this_present_errorCode = true && this.isSetErrorCode(); - boolean that_present_errorCode = true && that.isSetErrorCode(); - if (this_present_errorCode || that_present_errorCode) { - if (!(this_present_errorCode && that_present_errorCode)) - return false; - if (this.errorCode != that.errorCode) - return false; - } - - boolean this_present_errorMessage = true && this.isSetErrorMessage(); - boolean that_present_errorMessage = true && that.isSetErrorMessage(); - if (this_present_errorMessage || that_present_errorMessage) { - if (!(this_present_errorMessage && that_present_errorMessage)) - return false; - if (!this.errorMessage.equals(that.errorMessage)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_status = true && (isSetStatus()); - builder.append(present_status); - if (present_status) - builder.append(status); - - boolean present_operationState = true && (isSetOperationState()); - builder.append(present_operationState); - if (present_operationState) - builder.append(operationState.getValue()); - - boolean present_sqlState = true && (isSetSqlState()); - builder.append(present_sqlState); - if (present_sqlState) - builder.append(sqlState); - - boolean present_errorCode = true && (isSetErrorCode()); - builder.append(present_errorCode); - if (present_errorCode) - builder.append(errorCode); - - boolean present_errorMessage = true && (isSetErrorMessage()); - builder.append(present_errorMessage); - if (present_errorMessage) - builder.append(errorMessage); - - return builder.toHashCode(); - } - - public int compareTo(TGetOperationStatusResp other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - TGetOperationStatusResp typedOther = (TGetOperationStatusResp)other; - - lastComparison = Boolean.valueOf(isSetStatus()).compareTo(typedOther.isSetStatus()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetStatus()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.status, typedOther.status); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetOperationState()).compareTo(typedOther.isSetOperationState()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetOperationState()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.operationState, typedOther.operationState); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetSqlState()).compareTo(typedOther.isSetSqlState()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSqlState()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.sqlState, typedOther.sqlState); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetErrorCode()).compareTo(typedOther.isSetErrorCode()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetErrorCode()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.errorCode, typedOther.errorCode); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetErrorMessage()).compareTo(typedOther.isSetErrorMessage()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetErrorMessage()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.errorMessage, typedOther.errorMessage); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TGetOperationStatusResp("); - boolean first = true; - - sb.append("status:"); - if (this.status == null) { - sb.append("null"); - } else { - sb.append(this.status); - } - first = false; - if (isSetOperationState()) { - if (!first) sb.append(", "); - sb.append("operationState:"); - if (this.operationState == null) { - sb.append("null"); - } else { - sb.append(this.operationState); - } - first = false; - } - if (isSetSqlState()) { - if (!first) sb.append(", "); - sb.append("sqlState:"); - if (this.sqlState == null) { - sb.append("null"); - } else { - sb.append(this.sqlState); - } - first = false; - } - if (isSetErrorCode()) { - if (!first) sb.append(", "); - sb.append("errorCode:"); - sb.append(this.errorCode); - first = false; - } - if (isSetErrorMessage()) { - if (!first) sb.append(", "); - sb.append("errorMessage:"); - if (this.errorMessage == null) { - sb.append("null"); - } else { - sb.append(this.errorMessage); - } - first = false; - } - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetStatus()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'status' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (status != null) { - status.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. - __isset_bitfield = 0; - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TGetOperationStatusRespStandardSchemeFactory implements SchemeFactory { - public TGetOperationStatusRespStandardScheme getScheme() { - return new TGetOperationStatusRespStandardScheme(); - } - } - - private static class TGetOperationStatusRespStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TGetOperationStatusResp struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // STATUS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // OPERATION_STATE - if (schemeField.type == org.apache.thrift.protocol.TType.I32) { - struct.operationState = TOperationState.findByValue(iprot.readI32()); - struct.setOperationStateIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 3: // SQL_STATE - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.sqlState = iprot.readString(); - struct.setSqlStateIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 4: // ERROR_CODE - if (schemeField.type == org.apache.thrift.protocol.TType.I32) { - struct.errorCode = iprot.readI32(); - struct.setErrorCodeIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 5: // ERROR_MESSAGE - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.errorMessage = iprot.readString(); - struct.setErrorMessageIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TGetOperationStatusResp struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.status != null) { - oprot.writeFieldBegin(STATUS_FIELD_DESC); - struct.status.write(oprot); - oprot.writeFieldEnd(); - } - if (struct.operationState != null) { - if (struct.isSetOperationState()) { - oprot.writeFieldBegin(OPERATION_STATE_FIELD_DESC); - oprot.writeI32(struct.operationState.getValue()); - oprot.writeFieldEnd(); - } - } - if (struct.sqlState != null) { - if (struct.isSetSqlState()) { - oprot.writeFieldBegin(SQL_STATE_FIELD_DESC); - oprot.writeString(struct.sqlState); - oprot.writeFieldEnd(); - } - } - if (struct.isSetErrorCode()) { - oprot.writeFieldBegin(ERROR_CODE_FIELD_DESC); - oprot.writeI32(struct.errorCode); - oprot.writeFieldEnd(); - } - if (struct.errorMessage != null) { - if (struct.isSetErrorMessage()) { - oprot.writeFieldBegin(ERROR_MESSAGE_FIELD_DESC); - oprot.writeString(struct.errorMessage); - oprot.writeFieldEnd(); - } - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TGetOperationStatusRespTupleSchemeFactory implements SchemeFactory { - public TGetOperationStatusRespTupleScheme getScheme() { - return new TGetOperationStatusRespTupleScheme(); - } - } - - private static class TGetOperationStatusRespTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TGetOperationStatusResp struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.status.write(oprot); - BitSet optionals = new BitSet(); - if (struct.isSetOperationState()) { - optionals.set(0); - } - if (struct.isSetSqlState()) { - optionals.set(1); - } - if (struct.isSetErrorCode()) { - optionals.set(2); - } - if (struct.isSetErrorMessage()) { - optionals.set(3); - } - oprot.writeBitSet(optionals, 4); - if (struct.isSetOperationState()) { - oprot.writeI32(struct.operationState.getValue()); - } - if (struct.isSetSqlState()) { - oprot.writeString(struct.sqlState); - } - if (struct.isSetErrorCode()) { - oprot.writeI32(struct.errorCode); - } - if (struct.isSetErrorMessage()) { - oprot.writeString(struct.errorMessage); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TGetOperationStatusResp struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - BitSet incoming = iprot.readBitSet(4); - if (incoming.get(0)) { - struct.operationState = TOperationState.findByValue(iprot.readI32()); - struct.setOperationStateIsSet(true); - } - if (incoming.get(1)) { - struct.sqlState = iprot.readString(); - struct.setSqlStateIsSet(true); - } - if (incoming.get(2)) { - struct.errorCode = iprot.readI32(); - struct.setErrorCodeIsSet(true); - } - if (incoming.get(3)) { - struct.errorMessage = iprot.readString(); - struct.setErrorMessageIsSet(true); - } - } - } - -} - diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TGetResultSetMetadataReq.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TGetResultSetMetadataReq.java deleted file mode 100644 index 3bf363c958468..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TGetResultSetMetadataReq.java +++ /dev/null @@ -1,390 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TGetResultSetMetadataReq implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetResultSetMetadataReq"); - - private static final org.apache.thrift.protocol.TField OPERATION_HANDLE_FIELD_DESC = new org.apache.thrift.protocol.TField("operationHandle", org.apache.thrift.protocol.TType.STRUCT, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TGetResultSetMetadataReqStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TGetResultSetMetadataReqTupleSchemeFactory()); - } - - private TOperationHandle operationHandle; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - OPERATION_HANDLE((short)1, "operationHandle"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // OPERATION_HANDLE - return OPERATION_HANDLE; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.OPERATION_HANDLE, new org.apache.thrift.meta_data.FieldMetaData("operationHandle", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TOperationHandle.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TGetResultSetMetadataReq.class, metaDataMap); - } - - public TGetResultSetMetadataReq() { - } - - public TGetResultSetMetadataReq( - TOperationHandle operationHandle) - { - this(); - this.operationHandle = operationHandle; - } - - /** - * Performs a deep copy on other. - */ - public TGetResultSetMetadataReq(TGetResultSetMetadataReq other) { - if (other.isSetOperationHandle()) { - this.operationHandle = new TOperationHandle(other.operationHandle); - } - } - - public TGetResultSetMetadataReq deepCopy() { - return new TGetResultSetMetadataReq(this); - } - - @Override - public void clear() { - this.operationHandle = null; - } - - public TOperationHandle getOperationHandle() { - return this.operationHandle; - } - - public void setOperationHandle(TOperationHandle operationHandle) { - this.operationHandle = operationHandle; - } - - public void unsetOperationHandle() { - this.operationHandle = null; - } - - /** Returns true if field operationHandle is set (has been assigned a value) and false otherwise */ - public boolean isSetOperationHandle() { - return this.operationHandle != null; - } - - public void setOperationHandleIsSet(boolean value) { - if (!value) { - this.operationHandle = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case OPERATION_HANDLE: - if (value == null) { - unsetOperationHandle(); - } else { - setOperationHandle((TOperationHandle)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case OPERATION_HANDLE: - return getOperationHandle(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case OPERATION_HANDLE: - return isSetOperationHandle(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TGetResultSetMetadataReq) - return this.equals((TGetResultSetMetadataReq)that); - return false; - } - - public boolean equals(TGetResultSetMetadataReq that) { - if (that == null) - return false; - - boolean this_present_operationHandle = true && this.isSetOperationHandle(); - boolean that_present_operationHandle = true && that.isSetOperationHandle(); - if (this_present_operationHandle || that_present_operationHandle) { - if (!(this_present_operationHandle && that_present_operationHandle)) - return false; - if (!this.operationHandle.equals(that.operationHandle)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_operationHandle = true && (isSetOperationHandle()); - builder.append(present_operationHandle); - if (present_operationHandle) - builder.append(operationHandle); - - return builder.toHashCode(); - } - - public int compareTo(TGetResultSetMetadataReq other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - TGetResultSetMetadataReq typedOther = (TGetResultSetMetadataReq)other; - - lastComparison = Boolean.valueOf(isSetOperationHandle()).compareTo(typedOther.isSetOperationHandle()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetOperationHandle()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.operationHandle, typedOther.operationHandle); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TGetResultSetMetadataReq("); - boolean first = true; - - sb.append("operationHandle:"); - if (this.operationHandle == null) { - sb.append("null"); - } else { - sb.append(this.operationHandle); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetOperationHandle()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'operationHandle' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (operationHandle != null) { - operationHandle.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TGetResultSetMetadataReqStandardSchemeFactory implements SchemeFactory { - public TGetResultSetMetadataReqStandardScheme getScheme() { - return new TGetResultSetMetadataReqStandardScheme(); - } - } - - private static class TGetResultSetMetadataReqStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TGetResultSetMetadataReq struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // OPERATION_HANDLE - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.operationHandle = new TOperationHandle(); - struct.operationHandle.read(iprot); - struct.setOperationHandleIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TGetResultSetMetadataReq struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.operationHandle != null) { - oprot.writeFieldBegin(OPERATION_HANDLE_FIELD_DESC); - struct.operationHandle.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TGetResultSetMetadataReqTupleSchemeFactory implements SchemeFactory { - public TGetResultSetMetadataReqTupleScheme getScheme() { - return new TGetResultSetMetadataReqTupleScheme(); - } - } - - private static class TGetResultSetMetadataReqTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TGetResultSetMetadataReq struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.operationHandle.write(oprot); - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TGetResultSetMetadataReq struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.operationHandle = new TOperationHandle(); - struct.operationHandle.read(iprot); - struct.setOperationHandleIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TGetResultSetMetadataResp.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TGetResultSetMetadataResp.java deleted file mode 100644 index a9bef9f722c16..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TGetResultSetMetadataResp.java +++ /dev/null @@ -1,505 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TGetResultSetMetadataResp implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetResultSetMetadataResp"); - - private static final org.apache.thrift.protocol.TField STATUS_FIELD_DESC = new org.apache.thrift.protocol.TField("status", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField SCHEMA_FIELD_DESC = new org.apache.thrift.protocol.TField("schema", org.apache.thrift.protocol.TType.STRUCT, (short)2); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TGetResultSetMetadataRespStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TGetResultSetMetadataRespTupleSchemeFactory()); - } - - private TStatus status; // required - private TTableSchema schema; // optional - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - STATUS((short)1, "status"), - SCHEMA((short)2, "schema"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // STATUS - return STATUS; - case 2: // SCHEMA - return SCHEMA; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private _Fields optionals[] = {_Fields.SCHEMA}; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.STATUS, new org.apache.thrift.meta_data.FieldMetaData("status", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TStatus.class))); - tmpMap.put(_Fields.SCHEMA, new org.apache.thrift.meta_data.FieldMetaData("schema", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TTableSchema.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TGetResultSetMetadataResp.class, metaDataMap); - } - - public TGetResultSetMetadataResp() { - } - - public TGetResultSetMetadataResp( - TStatus status) - { - this(); - this.status = status; - } - - /** - * Performs a deep copy on other. - */ - public TGetResultSetMetadataResp(TGetResultSetMetadataResp other) { - if (other.isSetStatus()) { - this.status = new TStatus(other.status); - } - if (other.isSetSchema()) { - this.schema = new TTableSchema(other.schema); - } - } - - public TGetResultSetMetadataResp deepCopy() { - return new TGetResultSetMetadataResp(this); - } - - @Override - public void clear() { - this.status = null; - this.schema = null; - } - - public TStatus getStatus() { - return this.status; - } - - public void setStatus(TStatus status) { - this.status = status; - } - - public void unsetStatus() { - this.status = null; - } - - /** Returns true if field status is set (has been assigned a value) and false otherwise */ - public boolean isSetStatus() { - return this.status != null; - } - - public void setStatusIsSet(boolean value) { - if (!value) { - this.status = null; - } - } - - public TTableSchema getSchema() { - return this.schema; - } - - public void setSchema(TTableSchema schema) { - this.schema = schema; - } - - public void unsetSchema() { - this.schema = null; - } - - /** Returns true if field schema is set (has been assigned a value) and false otherwise */ - public boolean isSetSchema() { - return this.schema != null; - } - - public void setSchemaIsSet(boolean value) { - if (!value) { - this.schema = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case STATUS: - if (value == null) { - unsetStatus(); - } else { - setStatus((TStatus)value); - } - break; - - case SCHEMA: - if (value == null) { - unsetSchema(); - } else { - setSchema((TTableSchema)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case STATUS: - return getStatus(); - - case SCHEMA: - return getSchema(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case STATUS: - return isSetStatus(); - case SCHEMA: - return isSetSchema(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TGetResultSetMetadataResp) - return this.equals((TGetResultSetMetadataResp)that); - return false; - } - - public boolean equals(TGetResultSetMetadataResp that) { - if (that == null) - return false; - - boolean this_present_status = true && this.isSetStatus(); - boolean that_present_status = true && that.isSetStatus(); - if (this_present_status || that_present_status) { - if (!(this_present_status && that_present_status)) - return false; - if (!this.status.equals(that.status)) - return false; - } - - boolean this_present_schema = true && this.isSetSchema(); - boolean that_present_schema = true && that.isSetSchema(); - if (this_present_schema || that_present_schema) { - if (!(this_present_schema && that_present_schema)) - return false; - if (!this.schema.equals(that.schema)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_status = true && (isSetStatus()); - builder.append(present_status); - if (present_status) - builder.append(status); - - boolean present_schema = true && (isSetSchema()); - builder.append(present_schema); - if (present_schema) - builder.append(schema); - - return builder.toHashCode(); - } - - public int compareTo(TGetResultSetMetadataResp other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - TGetResultSetMetadataResp typedOther = (TGetResultSetMetadataResp)other; - - lastComparison = Boolean.valueOf(isSetStatus()).compareTo(typedOther.isSetStatus()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetStatus()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.status, typedOther.status); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetSchema()).compareTo(typedOther.isSetSchema()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSchema()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.schema, typedOther.schema); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TGetResultSetMetadataResp("); - boolean first = true; - - sb.append("status:"); - if (this.status == null) { - sb.append("null"); - } else { - sb.append(this.status); - } - first = false; - if (isSetSchema()) { - if (!first) sb.append(", "); - sb.append("schema:"); - if (this.schema == null) { - sb.append("null"); - } else { - sb.append(this.schema); - } - first = false; - } - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetStatus()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'status' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (status != null) { - status.validate(); - } - if (schema != null) { - schema.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TGetResultSetMetadataRespStandardSchemeFactory implements SchemeFactory { - public TGetResultSetMetadataRespStandardScheme getScheme() { - return new TGetResultSetMetadataRespStandardScheme(); - } - } - - private static class TGetResultSetMetadataRespStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TGetResultSetMetadataResp struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // STATUS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // SCHEMA - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.schema = new TTableSchema(); - struct.schema.read(iprot); - struct.setSchemaIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TGetResultSetMetadataResp struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.status != null) { - oprot.writeFieldBegin(STATUS_FIELD_DESC); - struct.status.write(oprot); - oprot.writeFieldEnd(); - } - if (struct.schema != null) { - if (struct.isSetSchema()) { - oprot.writeFieldBegin(SCHEMA_FIELD_DESC); - struct.schema.write(oprot); - oprot.writeFieldEnd(); - } - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TGetResultSetMetadataRespTupleSchemeFactory implements SchemeFactory { - public TGetResultSetMetadataRespTupleScheme getScheme() { - return new TGetResultSetMetadataRespTupleScheme(); - } - } - - private static class TGetResultSetMetadataRespTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TGetResultSetMetadataResp struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.status.write(oprot); - BitSet optionals = new BitSet(); - if (struct.isSetSchema()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetSchema()) { - struct.schema.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TGetResultSetMetadataResp struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.schema = new TTableSchema(); - struct.schema.read(iprot); - struct.setSchemaIsSet(true); - } - } - } - -} - diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TGetSchemasReq.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TGetSchemasReq.java deleted file mode 100644 index c2aadaa49a1e9..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TGetSchemasReq.java +++ /dev/null @@ -1,606 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TGetSchemasReq implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetSchemasReq"); - - private static final org.apache.thrift.protocol.TField SESSION_HANDLE_FIELD_DESC = new org.apache.thrift.protocol.TField("sessionHandle", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField CATALOG_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catalogName", org.apache.thrift.protocol.TType.STRING, (short)2); - private static final org.apache.thrift.protocol.TField SCHEMA_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("schemaName", org.apache.thrift.protocol.TType.STRING, (short)3); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TGetSchemasReqStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TGetSchemasReqTupleSchemeFactory()); - } - - private TSessionHandle sessionHandle; // required - private String catalogName; // optional - private String schemaName; // optional - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SESSION_HANDLE((short)1, "sessionHandle"), - CATALOG_NAME((short)2, "catalogName"), - SCHEMA_NAME((short)3, "schemaName"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // SESSION_HANDLE - return SESSION_HANDLE; - case 2: // CATALOG_NAME - return CATALOG_NAME; - case 3: // SCHEMA_NAME - return SCHEMA_NAME; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private _Fields optionals[] = {_Fields.CATALOG_NAME,_Fields.SCHEMA_NAME}; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SESSION_HANDLE, new org.apache.thrift.meta_data.FieldMetaData("sessionHandle", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TSessionHandle.class))); - tmpMap.put(_Fields.CATALOG_NAME, new org.apache.thrift.meta_data.FieldMetaData("catalogName", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "TIdentifier"))); - tmpMap.put(_Fields.SCHEMA_NAME, new org.apache.thrift.meta_data.FieldMetaData("schemaName", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "TPatternOrIdentifier"))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TGetSchemasReq.class, metaDataMap); - } - - public TGetSchemasReq() { - } - - public TGetSchemasReq( - TSessionHandle sessionHandle) - { - this(); - this.sessionHandle = sessionHandle; - } - - /** - * Performs a deep copy on other. - */ - public TGetSchemasReq(TGetSchemasReq other) { - if (other.isSetSessionHandle()) { - this.sessionHandle = new TSessionHandle(other.sessionHandle); - } - if (other.isSetCatalogName()) { - this.catalogName = other.catalogName; - } - if (other.isSetSchemaName()) { - this.schemaName = other.schemaName; - } - } - - public TGetSchemasReq deepCopy() { - return new TGetSchemasReq(this); - } - - @Override - public void clear() { - this.sessionHandle = null; - this.catalogName = null; - this.schemaName = null; - } - - public TSessionHandle getSessionHandle() { - return this.sessionHandle; - } - - public void setSessionHandle(TSessionHandle sessionHandle) { - this.sessionHandle = sessionHandle; - } - - public void unsetSessionHandle() { - this.sessionHandle = null; - } - - /** Returns true if field sessionHandle is set (has been assigned a value) and false otherwise */ - public boolean isSetSessionHandle() { - return this.sessionHandle != null; - } - - public void setSessionHandleIsSet(boolean value) { - if (!value) { - this.sessionHandle = null; - } - } - - public String getCatalogName() { - return this.catalogName; - } - - public void setCatalogName(String catalogName) { - this.catalogName = catalogName; - } - - public void unsetCatalogName() { - this.catalogName = null; - } - - /** Returns true if field catalogName is set (has been assigned a value) and false otherwise */ - public boolean isSetCatalogName() { - return this.catalogName != null; - } - - public void setCatalogNameIsSet(boolean value) { - if (!value) { - this.catalogName = null; - } - } - - public String getSchemaName() { - return this.schemaName; - } - - public void setSchemaName(String schemaName) { - this.schemaName = schemaName; - } - - public void unsetSchemaName() { - this.schemaName = null; - } - - /** Returns true if field schemaName is set (has been assigned a value) and false otherwise */ - public boolean isSetSchemaName() { - return this.schemaName != null; - } - - public void setSchemaNameIsSet(boolean value) { - if (!value) { - this.schemaName = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case SESSION_HANDLE: - if (value == null) { - unsetSessionHandle(); - } else { - setSessionHandle((TSessionHandle)value); - } - break; - - case CATALOG_NAME: - if (value == null) { - unsetCatalogName(); - } else { - setCatalogName((String)value); - } - break; - - case SCHEMA_NAME: - if (value == null) { - unsetSchemaName(); - } else { - setSchemaName((String)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case SESSION_HANDLE: - return getSessionHandle(); - - case CATALOG_NAME: - return getCatalogName(); - - case SCHEMA_NAME: - return getSchemaName(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case SESSION_HANDLE: - return isSetSessionHandle(); - case CATALOG_NAME: - return isSetCatalogName(); - case SCHEMA_NAME: - return isSetSchemaName(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TGetSchemasReq) - return this.equals((TGetSchemasReq)that); - return false; - } - - public boolean equals(TGetSchemasReq that) { - if (that == null) - return false; - - boolean this_present_sessionHandle = true && this.isSetSessionHandle(); - boolean that_present_sessionHandle = true && that.isSetSessionHandle(); - if (this_present_sessionHandle || that_present_sessionHandle) { - if (!(this_present_sessionHandle && that_present_sessionHandle)) - return false; - if (!this.sessionHandle.equals(that.sessionHandle)) - return false; - } - - boolean this_present_catalogName = true && this.isSetCatalogName(); - boolean that_present_catalogName = true && that.isSetCatalogName(); - if (this_present_catalogName || that_present_catalogName) { - if (!(this_present_catalogName && that_present_catalogName)) - return false; - if (!this.catalogName.equals(that.catalogName)) - return false; - } - - boolean this_present_schemaName = true && this.isSetSchemaName(); - boolean that_present_schemaName = true && that.isSetSchemaName(); - if (this_present_schemaName || that_present_schemaName) { - if (!(this_present_schemaName && that_present_schemaName)) - return false; - if (!this.schemaName.equals(that.schemaName)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_sessionHandle = true && (isSetSessionHandle()); - builder.append(present_sessionHandle); - if (present_sessionHandle) - builder.append(sessionHandle); - - boolean present_catalogName = true && (isSetCatalogName()); - builder.append(present_catalogName); - if (present_catalogName) - builder.append(catalogName); - - boolean present_schemaName = true && (isSetSchemaName()); - builder.append(present_schemaName); - if (present_schemaName) - builder.append(schemaName); - - return builder.toHashCode(); - } - - public int compareTo(TGetSchemasReq other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - TGetSchemasReq typedOther = (TGetSchemasReq)other; - - lastComparison = Boolean.valueOf(isSetSessionHandle()).compareTo(typedOther.isSetSessionHandle()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSessionHandle()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.sessionHandle, typedOther.sessionHandle); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetCatalogName()).compareTo(typedOther.isSetCatalogName()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetCatalogName()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catalogName, typedOther.catalogName); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetSchemaName()).compareTo(typedOther.isSetSchemaName()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSchemaName()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.schemaName, typedOther.schemaName); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TGetSchemasReq("); - boolean first = true; - - sb.append("sessionHandle:"); - if (this.sessionHandle == null) { - sb.append("null"); - } else { - sb.append(this.sessionHandle); - } - first = false; - if (isSetCatalogName()) { - if (!first) sb.append(", "); - sb.append("catalogName:"); - if (this.catalogName == null) { - sb.append("null"); - } else { - sb.append(this.catalogName); - } - first = false; - } - if (isSetSchemaName()) { - if (!first) sb.append(", "); - sb.append("schemaName:"); - if (this.schemaName == null) { - sb.append("null"); - } else { - sb.append(this.schemaName); - } - first = false; - } - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetSessionHandle()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'sessionHandle' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (sessionHandle != null) { - sessionHandle.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TGetSchemasReqStandardSchemeFactory implements SchemeFactory { - public TGetSchemasReqStandardScheme getScheme() { - return new TGetSchemasReqStandardScheme(); - } - } - - private static class TGetSchemasReqStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TGetSchemasReq struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // SESSION_HANDLE - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.sessionHandle = new TSessionHandle(); - struct.sessionHandle.read(iprot); - struct.setSessionHandleIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // CATALOG_NAME - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.catalogName = iprot.readString(); - struct.setCatalogNameIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 3: // SCHEMA_NAME - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.schemaName = iprot.readString(); - struct.setSchemaNameIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TGetSchemasReq struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.sessionHandle != null) { - oprot.writeFieldBegin(SESSION_HANDLE_FIELD_DESC); - struct.sessionHandle.write(oprot); - oprot.writeFieldEnd(); - } - if (struct.catalogName != null) { - if (struct.isSetCatalogName()) { - oprot.writeFieldBegin(CATALOG_NAME_FIELD_DESC); - oprot.writeString(struct.catalogName); - oprot.writeFieldEnd(); - } - } - if (struct.schemaName != null) { - if (struct.isSetSchemaName()) { - oprot.writeFieldBegin(SCHEMA_NAME_FIELD_DESC); - oprot.writeString(struct.schemaName); - oprot.writeFieldEnd(); - } - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TGetSchemasReqTupleSchemeFactory implements SchemeFactory { - public TGetSchemasReqTupleScheme getScheme() { - return new TGetSchemasReqTupleScheme(); - } - } - - private static class TGetSchemasReqTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TGetSchemasReq struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.sessionHandle.write(oprot); - BitSet optionals = new BitSet(); - if (struct.isSetCatalogName()) { - optionals.set(0); - } - if (struct.isSetSchemaName()) { - optionals.set(1); - } - oprot.writeBitSet(optionals, 2); - if (struct.isSetCatalogName()) { - oprot.writeString(struct.catalogName); - } - if (struct.isSetSchemaName()) { - oprot.writeString(struct.schemaName); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TGetSchemasReq struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.sessionHandle = new TSessionHandle(); - struct.sessionHandle.read(iprot); - struct.setSessionHandleIsSet(true); - BitSet incoming = iprot.readBitSet(2); - if (incoming.get(0)) { - struct.catalogName = iprot.readString(); - struct.setCatalogNameIsSet(true); - } - if (incoming.get(1)) { - struct.schemaName = iprot.readString(); - struct.setSchemaNameIsSet(true); - } - } - } - -} - diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TGetSchemasResp.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TGetSchemasResp.java deleted file mode 100644 index ac1ea3e7cc7af..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TGetSchemasResp.java +++ /dev/null @@ -1,505 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TGetSchemasResp implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetSchemasResp"); - - private static final org.apache.thrift.protocol.TField STATUS_FIELD_DESC = new org.apache.thrift.protocol.TField("status", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField OPERATION_HANDLE_FIELD_DESC = new org.apache.thrift.protocol.TField("operationHandle", org.apache.thrift.protocol.TType.STRUCT, (short)2); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TGetSchemasRespStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TGetSchemasRespTupleSchemeFactory()); - } - - private TStatus status; // required - private TOperationHandle operationHandle; // optional - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - STATUS((short)1, "status"), - OPERATION_HANDLE((short)2, "operationHandle"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // STATUS - return STATUS; - case 2: // OPERATION_HANDLE - return OPERATION_HANDLE; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private _Fields optionals[] = {_Fields.OPERATION_HANDLE}; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.STATUS, new org.apache.thrift.meta_data.FieldMetaData("status", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TStatus.class))); - tmpMap.put(_Fields.OPERATION_HANDLE, new org.apache.thrift.meta_data.FieldMetaData("operationHandle", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TOperationHandle.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TGetSchemasResp.class, metaDataMap); - } - - public TGetSchemasResp() { - } - - public TGetSchemasResp( - TStatus status) - { - this(); - this.status = status; - } - - /** - * Performs a deep copy on other. - */ - public TGetSchemasResp(TGetSchemasResp other) { - if (other.isSetStatus()) { - this.status = new TStatus(other.status); - } - if (other.isSetOperationHandle()) { - this.operationHandle = new TOperationHandle(other.operationHandle); - } - } - - public TGetSchemasResp deepCopy() { - return new TGetSchemasResp(this); - } - - @Override - public void clear() { - this.status = null; - this.operationHandle = null; - } - - public TStatus getStatus() { - return this.status; - } - - public void setStatus(TStatus status) { - this.status = status; - } - - public void unsetStatus() { - this.status = null; - } - - /** Returns true if field status is set (has been assigned a value) and false otherwise */ - public boolean isSetStatus() { - return this.status != null; - } - - public void setStatusIsSet(boolean value) { - if (!value) { - this.status = null; - } - } - - public TOperationHandle getOperationHandle() { - return this.operationHandle; - } - - public void setOperationHandle(TOperationHandle operationHandle) { - this.operationHandle = operationHandle; - } - - public void unsetOperationHandle() { - this.operationHandle = null; - } - - /** Returns true if field operationHandle is set (has been assigned a value) and false otherwise */ - public boolean isSetOperationHandle() { - return this.operationHandle != null; - } - - public void setOperationHandleIsSet(boolean value) { - if (!value) { - this.operationHandle = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case STATUS: - if (value == null) { - unsetStatus(); - } else { - setStatus((TStatus)value); - } - break; - - case OPERATION_HANDLE: - if (value == null) { - unsetOperationHandle(); - } else { - setOperationHandle((TOperationHandle)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case STATUS: - return getStatus(); - - case OPERATION_HANDLE: - return getOperationHandle(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case STATUS: - return isSetStatus(); - case OPERATION_HANDLE: - return isSetOperationHandle(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TGetSchemasResp) - return this.equals((TGetSchemasResp)that); - return false; - } - - public boolean equals(TGetSchemasResp that) { - if (that == null) - return false; - - boolean this_present_status = true && this.isSetStatus(); - boolean that_present_status = true && that.isSetStatus(); - if (this_present_status || that_present_status) { - if (!(this_present_status && that_present_status)) - return false; - if (!this.status.equals(that.status)) - return false; - } - - boolean this_present_operationHandle = true && this.isSetOperationHandle(); - boolean that_present_operationHandle = true && that.isSetOperationHandle(); - if (this_present_operationHandle || that_present_operationHandle) { - if (!(this_present_operationHandle && that_present_operationHandle)) - return false; - if (!this.operationHandle.equals(that.operationHandle)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_status = true && (isSetStatus()); - builder.append(present_status); - if (present_status) - builder.append(status); - - boolean present_operationHandle = true && (isSetOperationHandle()); - builder.append(present_operationHandle); - if (present_operationHandle) - builder.append(operationHandle); - - return builder.toHashCode(); - } - - public int compareTo(TGetSchemasResp other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - TGetSchemasResp typedOther = (TGetSchemasResp)other; - - lastComparison = Boolean.valueOf(isSetStatus()).compareTo(typedOther.isSetStatus()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetStatus()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.status, typedOther.status); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetOperationHandle()).compareTo(typedOther.isSetOperationHandle()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetOperationHandle()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.operationHandle, typedOther.operationHandle); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TGetSchemasResp("); - boolean first = true; - - sb.append("status:"); - if (this.status == null) { - sb.append("null"); - } else { - sb.append(this.status); - } - first = false; - if (isSetOperationHandle()) { - if (!first) sb.append(", "); - sb.append("operationHandle:"); - if (this.operationHandle == null) { - sb.append("null"); - } else { - sb.append(this.operationHandle); - } - first = false; - } - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetStatus()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'status' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (status != null) { - status.validate(); - } - if (operationHandle != null) { - operationHandle.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TGetSchemasRespStandardSchemeFactory implements SchemeFactory { - public TGetSchemasRespStandardScheme getScheme() { - return new TGetSchemasRespStandardScheme(); - } - } - - private static class TGetSchemasRespStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TGetSchemasResp struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // STATUS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // OPERATION_HANDLE - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.operationHandle = new TOperationHandle(); - struct.operationHandle.read(iprot); - struct.setOperationHandleIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TGetSchemasResp struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.status != null) { - oprot.writeFieldBegin(STATUS_FIELD_DESC); - struct.status.write(oprot); - oprot.writeFieldEnd(); - } - if (struct.operationHandle != null) { - if (struct.isSetOperationHandle()) { - oprot.writeFieldBegin(OPERATION_HANDLE_FIELD_DESC); - struct.operationHandle.write(oprot); - oprot.writeFieldEnd(); - } - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TGetSchemasRespTupleSchemeFactory implements SchemeFactory { - public TGetSchemasRespTupleScheme getScheme() { - return new TGetSchemasRespTupleScheme(); - } - } - - private static class TGetSchemasRespTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TGetSchemasResp struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.status.write(oprot); - BitSet optionals = new BitSet(); - if (struct.isSetOperationHandle()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetOperationHandle()) { - struct.operationHandle.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TGetSchemasResp struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.operationHandle = new TOperationHandle(); - struct.operationHandle.read(iprot); - struct.setOperationHandleIsSet(true); - } - } - } - -} - diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TGetTableTypesReq.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TGetTableTypesReq.java deleted file mode 100644 index 6f2c713e0be6a..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TGetTableTypesReq.java +++ /dev/null @@ -1,390 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TGetTableTypesReq implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetTableTypesReq"); - - private static final org.apache.thrift.protocol.TField SESSION_HANDLE_FIELD_DESC = new org.apache.thrift.protocol.TField("sessionHandle", org.apache.thrift.protocol.TType.STRUCT, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TGetTableTypesReqStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TGetTableTypesReqTupleSchemeFactory()); - } - - private TSessionHandle sessionHandle; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SESSION_HANDLE((short)1, "sessionHandle"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // SESSION_HANDLE - return SESSION_HANDLE; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SESSION_HANDLE, new org.apache.thrift.meta_data.FieldMetaData("sessionHandle", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TSessionHandle.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TGetTableTypesReq.class, metaDataMap); - } - - public TGetTableTypesReq() { - } - - public TGetTableTypesReq( - TSessionHandle sessionHandle) - { - this(); - this.sessionHandle = sessionHandle; - } - - /** - * Performs a deep copy on other. - */ - public TGetTableTypesReq(TGetTableTypesReq other) { - if (other.isSetSessionHandle()) { - this.sessionHandle = new TSessionHandle(other.sessionHandle); - } - } - - public TGetTableTypesReq deepCopy() { - return new TGetTableTypesReq(this); - } - - @Override - public void clear() { - this.sessionHandle = null; - } - - public TSessionHandle getSessionHandle() { - return this.sessionHandle; - } - - public void setSessionHandle(TSessionHandle sessionHandle) { - this.sessionHandle = sessionHandle; - } - - public void unsetSessionHandle() { - this.sessionHandle = null; - } - - /** Returns true if field sessionHandle is set (has been assigned a value) and false otherwise */ - public boolean isSetSessionHandle() { - return this.sessionHandle != null; - } - - public void setSessionHandleIsSet(boolean value) { - if (!value) { - this.sessionHandle = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case SESSION_HANDLE: - if (value == null) { - unsetSessionHandle(); - } else { - setSessionHandle((TSessionHandle)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case SESSION_HANDLE: - return getSessionHandle(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case SESSION_HANDLE: - return isSetSessionHandle(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TGetTableTypesReq) - return this.equals((TGetTableTypesReq)that); - return false; - } - - public boolean equals(TGetTableTypesReq that) { - if (that == null) - return false; - - boolean this_present_sessionHandle = true && this.isSetSessionHandle(); - boolean that_present_sessionHandle = true && that.isSetSessionHandle(); - if (this_present_sessionHandle || that_present_sessionHandle) { - if (!(this_present_sessionHandle && that_present_sessionHandle)) - return false; - if (!this.sessionHandle.equals(that.sessionHandle)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_sessionHandle = true && (isSetSessionHandle()); - builder.append(present_sessionHandle); - if (present_sessionHandle) - builder.append(sessionHandle); - - return builder.toHashCode(); - } - - public int compareTo(TGetTableTypesReq other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - TGetTableTypesReq typedOther = (TGetTableTypesReq)other; - - lastComparison = Boolean.valueOf(isSetSessionHandle()).compareTo(typedOther.isSetSessionHandle()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSessionHandle()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.sessionHandle, typedOther.sessionHandle); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TGetTableTypesReq("); - boolean first = true; - - sb.append("sessionHandle:"); - if (this.sessionHandle == null) { - sb.append("null"); - } else { - sb.append(this.sessionHandle); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetSessionHandle()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'sessionHandle' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (sessionHandle != null) { - sessionHandle.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TGetTableTypesReqStandardSchemeFactory implements SchemeFactory { - public TGetTableTypesReqStandardScheme getScheme() { - return new TGetTableTypesReqStandardScheme(); - } - } - - private static class TGetTableTypesReqStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TGetTableTypesReq struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // SESSION_HANDLE - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.sessionHandle = new TSessionHandle(); - struct.sessionHandle.read(iprot); - struct.setSessionHandleIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TGetTableTypesReq struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.sessionHandle != null) { - oprot.writeFieldBegin(SESSION_HANDLE_FIELD_DESC); - struct.sessionHandle.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TGetTableTypesReqTupleSchemeFactory implements SchemeFactory { - public TGetTableTypesReqTupleScheme getScheme() { - return new TGetTableTypesReqTupleScheme(); - } - } - - private static class TGetTableTypesReqTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TGetTableTypesReq struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.sessionHandle.write(oprot); - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TGetTableTypesReq struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.sessionHandle = new TSessionHandle(); - struct.sessionHandle.read(iprot); - struct.setSessionHandleIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TGetTableTypesResp.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TGetTableTypesResp.java deleted file mode 100644 index 6f33fbcf5dadc..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TGetTableTypesResp.java +++ /dev/null @@ -1,505 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TGetTableTypesResp implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetTableTypesResp"); - - private static final org.apache.thrift.protocol.TField STATUS_FIELD_DESC = new org.apache.thrift.protocol.TField("status", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField OPERATION_HANDLE_FIELD_DESC = new org.apache.thrift.protocol.TField("operationHandle", org.apache.thrift.protocol.TType.STRUCT, (short)2); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TGetTableTypesRespStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TGetTableTypesRespTupleSchemeFactory()); - } - - private TStatus status; // required - private TOperationHandle operationHandle; // optional - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - STATUS((short)1, "status"), - OPERATION_HANDLE((short)2, "operationHandle"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // STATUS - return STATUS; - case 2: // OPERATION_HANDLE - return OPERATION_HANDLE; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private _Fields optionals[] = {_Fields.OPERATION_HANDLE}; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.STATUS, new org.apache.thrift.meta_data.FieldMetaData("status", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TStatus.class))); - tmpMap.put(_Fields.OPERATION_HANDLE, new org.apache.thrift.meta_data.FieldMetaData("operationHandle", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TOperationHandle.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TGetTableTypesResp.class, metaDataMap); - } - - public TGetTableTypesResp() { - } - - public TGetTableTypesResp( - TStatus status) - { - this(); - this.status = status; - } - - /** - * Performs a deep copy on other. - */ - public TGetTableTypesResp(TGetTableTypesResp other) { - if (other.isSetStatus()) { - this.status = new TStatus(other.status); - } - if (other.isSetOperationHandle()) { - this.operationHandle = new TOperationHandle(other.operationHandle); - } - } - - public TGetTableTypesResp deepCopy() { - return new TGetTableTypesResp(this); - } - - @Override - public void clear() { - this.status = null; - this.operationHandle = null; - } - - public TStatus getStatus() { - return this.status; - } - - public void setStatus(TStatus status) { - this.status = status; - } - - public void unsetStatus() { - this.status = null; - } - - /** Returns true if field status is set (has been assigned a value) and false otherwise */ - public boolean isSetStatus() { - return this.status != null; - } - - public void setStatusIsSet(boolean value) { - if (!value) { - this.status = null; - } - } - - public TOperationHandle getOperationHandle() { - return this.operationHandle; - } - - public void setOperationHandle(TOperationHandle operationHandle) { - this.operationHandle = operationHandle; - } - - public void unsetOperationHandle() { - this.operationHandle = null; - } - - /** Returns true if field operationHandle is set (has been assigned a value) and false otherwise */ - public boolean isSetOperationHandle() { - return this.operationHandle != null; - } - - public void setOperationHandleIsSet(boolean value) { - if (!value) { - this.operationHandle = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case STATUS: - if (value == null) { - unsetStatus(); - } else { - setStatus((TStatus)value); - } - break; - - case OPERATION_HANDLE: - if (value == null) { - unsetOperationHandle(); - } else { - setOperationHandle((TOperationHandle)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case STATUS: - return getStatus(); - - case OPERATION_HANDLE: - return getOperationHandle(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case STATUS: - return isSetStatus(); - case OPERATION_HANDLE: - return isSetOperationHandle(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TGetTableTypesResp) - return this.equals((TGetTableTypesResp)that); - return false; - } - - public boolean equals(TGetTableTypesResp that) { - if (that == null) - return false; - - boolean this_present_status = true && this.isSetStatus(); - boolean that_present_status = true && that.isSetStatus(); - if (this_present_status || that_present_status) { - if (!(this_present_status && that_present_status)) - return false; - if (!this.status.equals(that.status)) - return false; - } - - boolean this_present_operationHandle = true && this.isSetOperationHandle(); - boolean that_present_operationHandle = true && that.isSetOperationHandle(); - if (this_present_operationHandle || that_present_operationHandle) { - if (!(this_present_operationHandle && that_present_operationHandle)) - return false; - if (!this.operationHandle.equals(that.operationHandle)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_status = true && (isSetStatus()); - builder.append(present_status); - if (present_status) - builder.append(status); - - boolean present_operationHandle = true && (isSetOperationHandle()); - builder.append(present_operationHandle); - if (present_operationHandle) - builder.append(operationHandle); - - return builder.toHashCode(); - } - - public int compareTo(TGetTableTypesResp other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - TGetTableTypesResp typedOther = (TGetTableTypesResp)other; - - lastComparison = Boolean.valueOf(isSetStatus()).compareTo(typedOther.isSetStatus()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetStatus()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.status, typedOther.status); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetOperationHandle()).compareTo(typedOther.isSetOperationHandle()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetOperationHandle()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.operationHandle, typedOther.operationHandle); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TGetTableTypesResp("); - boolean first = true; - - sb.append("status:"); - if (this.status == null) { - sb.append("null"); - } else { - sb.append(this.status); - } - first = false; - if (isSetOperationHandle()) { - if (!first) sb.append(", "); - sb.append("operationHandle:"); - if (this.operationHandle == null) { - sb.append("null"); - } else { - sb.append(this.operationHandle); - } - first = false; - } - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetStatus()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'status' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (status != null) { - status.validate(); - } - if (operationHandle != null) { - operationHandle.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TGetTableTypesRespStandardSchemeFactory implements SchemeFactory { - public TGetTableTypesRespStandardScheme getScheme() { - return new TGetTableTypesRespStandardScheme(); - } - } - - private static class TGetTableTypesRespStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TGetTableTypesResp struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // STATUS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // OPERATION_HANDLE - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.operationHandle = new TOperationHandle(); - struct.operationHandle.read(iprot); - struct.setOperationHandleIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TGetTableTypesResp struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.status != null) { - oprot.writeFieldBegin(STATUS_FIELD_DESC); - struct.status.write(oprot); - oprot.writeFieldEnd(); - } - if (struct.operationHandle != null) { - if (struct.isSetOperationHandle()) { - oprot.writeFieldBegin(OPERATION_HANDLE_FIELD_DESC); - struct.operationHandle.write(oprot); - oprot.writeFieldEnd(); - } - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TGetTableTypesRespTupleSchemeFactory implements SchemeFactory { - public TGetTableTypesRespTupleScheme getScheme() { - return new TGetTableTypesRespTupleScheme(); - } - } - - private static class TGetTableTypesRespTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TGetTableTypesResp struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.status.write(oprot); - BitSet optionals = new BitSet(); - if (struct.isSetOperationHandle()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetOperationHandle()) { - struct.operationHandle.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TGetTableTypesResp struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.operationHandle = new TOperationHandle(); - struct.operationHandle.read(iprot); - struct.setOperationHandleIsSet(true); - } - } - } - -} - diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TGetTablesReq.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TGetTablesReq.java deleted file mode 100644 index c973fcc24cb10..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TGetTablesReq.java +++ /dev/null @@ -1,870 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TGetTablesReq implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetTablesReq"); - - private static final org.apache.thrift.protocol.TField SESSION_HANDLE_FIELD_DESC = new org.apache.thrift.protocol.TField("sessionHandle", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField CATALOG_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catalogName", org.apache.thrift.protocol.TType.STRING, (short)2); - private static final org.apache.thrift.protocol.TField SCHEMA_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("schemaName", org.apache.thrift.protocol.TType.STRING, (short)3); - private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)4); - private static final org.apache.thrift.protocol.TField TABLE_TYPES_FIELD_DESC = new org.apache.thrift.protocol.TField("tableTypes", org.apache.thrift.protocol.TType.LIST, (short)5); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TGetTablesReqStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TGetTablesReqTupleSchemeFactory()); - } - - private TSessionHandle sessionHandle; // required - private String catalogName; // optional - private String schemaName; // optional - private String tableName; // optional - private List tableTypes; // optional - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SESSION_HANDLE((short)1, "sessionHandle"), - CATALOG_NAME((short)2, "catalogName"), - SCHEMA_NAME((short)3, "schemaName"), - TABLE_NAME((short)4, "tableName"), - TABLE_TYPES((short)5, "tableTypes"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // SESSION_HANDLE - return SESSION_HANDLE; - case 2: // CATALOG_NAME - return CATALOG_NAME; - case 3: // SCHEMA_NAME - return SCHEMA_NAME; - case 4: // TABLE_NAME - return TABLE_NAME; - case 5: // TABLE_TYPES - return TABLE_TYPES; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private _Fields optionals[] = {_Fields.CATALOG_NAME,_Fields.SCHEMA_NAME,_Fields.TABLE_NAME,_Fields.TABLE_TYPES}; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SESSION_HANDLE, new org.apache.thrift.meta_data.FieldMetaData("sessionHandle", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TSessionHandle.class))); - tmpMap.put(_Fields.CATALOG_NAME, new org.apache.thrift.meta_data.FieldMetaData("catalogName", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "TPatternOrIdentifier"))); - tmpMap.put(_Fields.SCHEMA_NAME, new org.apache.thrift.meta_data.FieldMetaData("schemaName", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "TPatternOrIdentifier"))); - tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "TPatternOrIdentifier"))); - tmpMap.put(_Fields.TABLE_TYPES, new org.apache.thrift.meta_data.FieldMetaData("tableTypes", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TGetTablesReq.class, metaDataMap); - } - - public TGetTablesReq() { - } - - public TGetTablesReq( - TSessionHandle sessionHandle) - { - this(); - this.sessionHandle = sessionHandle; - } - - /** - * Performs a deep copy on other. - */ - public TGetTablesReq(TGetTablesReq other) { - if (other.isSetSessionHandle()) { - this.sessionHandle = new TSessionHandle(other.sessionHandle); - } - if (other.isSetCatalogName()) { - this.catalogName = other.catalogName; - } - if (other.isSetSchemaName()) { - this.schemaName = other.schemaName; - } - if (other.isSetTableName()) { - this.tableName = other.tableName; - } - if (other.isSetTableTypes()) { - List __this__tableTypes = new ArrayList(); - for (String other_element : other.tableTypes) { - __this__tableTypes.add(other_element); - } - this.tableTypes = __this__tableTypes; - } - } - - public TGetTablesReq deepCopy() { - return new TGetTablesReq(this); - } - - @Override - public void clear() { - this.sessionHandle = null; - this.catalogName = null; - this.schemaName = null; - this.tableName = null; - this.tableTypes = null; - } - - public TSessionHandle getSessionHandle() { - return this.sessionHandle; - } - - public void setSessionHandle(TSessionHandle sessionHandle) { - this.sessionHandle = sessionHandle; - } - - public void unsetSessionHandle() { - this.sessionHandle = null; - } - - /** Returns true if field sessionHandle is set (has been assigned a value) and false otherwise */ - public boolean isSetSessionHandle() { - return this.sessionHandle != null; - } - - public void setSessionHandleIsSet(boolean value) { - if (!value) { - this.sessionHandle = null; - } - } - - public String getCatalogName() { - return this.catalogName; - } - - public void setCatalogName(String catalogName) { - this.catalogName = catalogName; - } - - public void unsetCatalogName() { - this.catalogName = null; - } - - /** Returns true if field catalogName is set (has been assigned a value) and false otherwise */ - public boolean isSetCatalogName() { - return this.catalogName != null; - } - - public void setCatalogNameIsSet(boolean value) { - if (!value) { - this.catalogName = null; - } - } - - public String getSchemaName() { - return this.schemaName; - } - - public void setSchemaName(String schemaName) { - this.schemaName = schemaName; - } - - public void unsetSchemaName() { - this.schemaName = null; - } - - /** Returns true if field schemaName is set (has been assigned a value) and false otherwise */ - public boolean isSetSchemaName() { - return this.schemaName != null; - } - - public void setSchemaNameIsSet(boolean value) { - if (!value) { - this.schemaName = null; - } - } - - public String getTableName() { - return this.tableName; - } - - public void setTableName(String tableName) { - this.tableName = tableName; - } - - public void unsetTableName() { - this.tableName = null; - } - - /** Returns true if field tableName is set (has been assigned a value) and false otherwise */ - public boolean isSetTableName() { - return this.tableName != null; - } - - public void setTableNameIsSet(boolean value) { - if (!value) { - this.tableName = null; - } - } - - public int getTableTypesSize() { - return (this.tableTypes == null) ? 0 : this.tableTypes.size(); - } - - public java.util.Iterator getTableTypesIterator() { - return (this.tableTypes == null) ? null : this.tableTypes.iterator(); - } - - public void addToTableTypes(String elem) { - if (this.tableTypes == null) { - this.tableTypes = new ArrayList(); - } - this.tableTypes.add(elem); - } - - public List getTableTypes() { - return this.tableTypes; - } - - public void setTableTypes(List tableTypes) { - this.tableTypes = tableTypes; - } - - public void unsetTableTypes() { - this.tableTypes = null; - } - - /** Returns true if field tableTypes is set (has been assigned a value) and false otherwise */ - public boolean isSetTableTypes() { - return this.tableTypes != null; - } - - public void setTableTypesIsSet(boolean value) { - if (!value) { - this.tableTypes = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case SESSION_HANDLE: - if (value == null) { - unsetSessionHandle(); - } else { - setSessionHandle((TSessionHandle)value); - } - break; - - case CATALOG_NAME: - if (value == null) { - unsetCatalogName(); - } else { - setCatalogName((String)value); - } - break; - - case SCHEMA_NAME: - if (value == null) { - unsetSchemaName(); - } else { - setSchemaName((String)value); - } - break; - - case TABLE_NAME: - if (value == null) { - unsetTableName(); - } else { - setTableName((String)value); - } - break; - - case TABLE_TYPES: - if (value == null) { - unsetTableTypes(); - } else { - setTableTypes((List)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case SESSION_HANDLE: - return getSessionHandle(); - - case CATALOG_NAME: - return getCatalogName(); - - case SCHEMA_NAME: - return getSchemaName(); - - case TABLE_NAME: - return getTableName(); - - case TABLE_TYPES: - return getTableTypes(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case SESSION_HANDLE: - return isSetSessionHandle(); - case CATALOG_NAME: - return isSetCatalogName(); - case SCHEMA_NAME: - return isSetSchemaName(); - case TABLE_NAME: - return isSetTableName(); - case TABLE_TYPES: - return isSetTableTypes(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TGetTablesReq) - return this.equals((TGetTablesReq)that); - return false; - } - - public boolean equals(TGetTablesReq that) { - if (that == null) - return false; - - boolean this_present_sessionHandle = true && this.isSetSessionHandle(); - boolean that_present_sessionHandle = true && that.isSetSessionHandle(); - if (this_present_sessionHandle || that_present_sessionHandle) { - if (!(this_present_sessionHandle && that_present_sessionHandle)) - return false; - if (!this.sessionHandle.equals(that.sessionHandle)) - return false; - } - - boolean this_present_catalogName = true && this.isSetCatalogName(); - boolean that_present_catalogName = true && that.isSetCatalogName(); - if (this_present_catalogName || that_present_catalogName) { - if (!(this_present_catalogName && that_present_catalogName)) - return false; - if (!this.catalogName.equals(that.catalogName)) - return false; - } - - boolean this_present_schemaName = true && this.isSetSchemaName(); - boolean that_present_schemaName = true && that.isSetSchemaName(); - if (this_present_schemaName || that_present_schemaName) { - if (!(this_present_schemaName && that_present_schemaName)) - return false; - if (!this.schemaName.equals(that.schemaName)) - return false; - } - - boolean this_present_tableName = true && this.isSetTableName(); - boolean that_present_tableName = true && that.isSetTableName(); - if (this_present_tableName || that_present_tableName) { - if (!(this_present_tableName && that_present_tableName)) - return false; - if (!this.tableName.equals(that.tableName)) - return false; - } - - boolean this_present_tableTypes = true && this.isSetTableTypes(); - boolean that_present_tableTypes = true && that.isSetTableTypes(); - if (this_present_tableTypes || that_present_tableTypes) { - if (!(this_present_tableTypes && that_present_tableTypes)) - return false; - if (!this.tableTypes.equals(that.tableTypes)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_sessionHandle = true && (isSetSessionHandle()); - builder.append(present_sessionHandle); - if (present_sessionHandle) - builder.append(sessionHandle); - - boolean present_catalogName = true && (isSetCatalogName()); - builder.append(present_catalogName); - if (present_catalogName) - builder.append(catalogName); - - boolean present_schemaName = true && (isSetSchemaName()); - builder.append(present_schemaName); - if (present_schemaName) - builder.append(schemaName); - - boolean present_tableName = true && (isSetTableName()); - builder.append(present_tableName); - if (present_tableName) - builder.append(tableName); - - boolean present_tableTypes = true && (isSetTableTypes()); - builder.append(present_tableTypes); - if (present_tableTypes) - builder.append(tableTypes); - - return builder.toHashCode(); - } - - public int compareTo(TGetTablesReq other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - TGetTablesReq typedOther = (TGetTablesReq)other; - - lastComparison = Boolean.valueOf(isSetSessionHandle()).compareTo(typedOther.isSetSessionHandle()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSessionHandle()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.sessionHandle, typedOther.sessionHandle); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetCatalogName()).compareTo(typedOther.isSetCatalogName()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetCatalogName()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catalogName, typedOther.catalogName); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetSchemaName()).compareTo(typedOther.isSetSchemaName()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSchemaName()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.schemaName, typedOther.schemaName); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetTableName()).compareTo(typedOther.isSetTableName()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetTableName()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tableName, typedOther.tableName); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetTableTypes()).compareTo(typedOther.isSetTableTypes()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetTableTypes()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tableTypes, typedOther.tableTypes); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TGetTablesReq("); - boolean first = true; - - sb.append("sessionHandle:"); - if (this.sessionHandle == null) { - sb.append("null"); - } else { - sb.append(this.sessionHandle); - } - first = false; - if (isSetCatalogName()) { - if (!first) sb.append(", "); - sb.append("catalogName:"); - if (this.catalogName == null) { - sb.append("null"); - } else { - sb.append(this.catalogName); - } - first = false; - } - if (isSetSchemaName()) { - if (!first) sb.append(", "); - sb.append("schemaName:"); - if (this.schemaName == null) { - sb.append("null"); - } else { - sb.append(this.schemaName); - } - first = false; - } - if (isSetTableName()) { - if (!first) sb.append(", "); - sb.append("tableName:"); - if (this.tableName == null) { - sb.append("null"); - } else { - sb.append(this.tableName); - } - first = false; - } - if (isSetTableTypes()) { - if (!first) sb.append(", "); - sb.append("tableTypes:"); - if (this.tableTypes == null) { - sb.append("null"); - } else { - sb.append(this.tableTypes); - } - first = false; - } - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetSessionHandle()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'sessionHandle' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (sessionHandle != null) { - sessionHandle.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TGetTablesReqStandardSchemeFactory implements SchemeFactory { - public TGetTablesReqStandardScheme getScheme() { - return new TGetTablesReqStandardScheme(); - } - } - - private static class TGetTablesReqStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TGetTablesReq struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // SESSION_HANDLE - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.sessionHandle = new TSessionHandle(); - struct.sessionHandle.read(iprot); - struct.setSessionHandleIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // CATALOG_NAME - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.catalogName = iprot.readString(); - struct.setCatalogNameIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 3: // SCHEMA_NAME - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.schemaName = iprot.readString(); - struct.setSchemaNameIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 4: // TABLE_NAME - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.tableName = iprot.readString(); - struct.setTableNameIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 5: // TABLE_TYPES - if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { - { - org.apache.thrift.protocol.TList _list172 = iprot.readListBegin(); - struct.tableTypes = new ArrayList(_list172.size); - for (int _i173 = 0; _i173 < _list172.size; ++_i173) - { - String _elem174; // optional - _elem174 = iprot.readString(); - struct.tableTypes.add(_elem174); - } - iprot.readListEnd(); - } - struct.setTableTypesIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TGetTablesReq struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.sessionHandle != null) { - oprot.writeFieldBegin(SESSION_HANDLE_FIELD_DESC); - struct.sessionHandle.write(oprot); - oprot.writeFieldEnd(); - } - if (struct.catalogName != null) { - if (struct.isSetCatalogName()) { - oprot.writeFieldBegin(CATALOG_NAME_FIELD_DESC); - oprot.writeString(struct.catalogName); - oprot.writeFieldEnd(); - } - } - if (struct.schemaName != null) { - if (struct.isSetSchemaName()) { - oprot.writeFieldBegin(SCHEMA_NAME_FIELD_DESC); - oprot.writeString(struct.schemaName); - oprot.writeFieldEnd(); - } - } - if (struct.tableName != null) { - if (struct.isSetTableName()) { - oprot.writeFieldBegin(TABLE_NAME_FIELD_DESC); - oprot.writeString(struct.tableName); - oprot.writeFieldEnd(); - } - } - if (struct.tableTypes != null) { - if (struct.isSetTableTypes()) { - oprot.writeFieldBegin(TABLE_TYPES_FIELD_DESC); - { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tableTypes.size())); - for (String _iter175 : struct.tableTypes) - { - oprot.writeString(_iter175); - } - oprot.writeListEnd(); - } - oprot.writeFieldEnd(); - } - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TGetTablesReqTupleSchemeFactory implements SchemeFactory { - public TGetTablesReqTupleScheme getScheme() { - return new TGetTablesReqTupleScheme(); - } - } - - private static class TGetTablesReqTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TGetTablesReq struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.sessionHandle.write(oprot); - BitSet optionals = new BitSet(); - if (struct.isSetCatalogName()) { - optionals.set(0); - } - if (struct.isSetSchemaName()) { - optionals.set(1); - } - if (struct.isSetTableName()) { - optionals.set(2); - } - if (struct.isSetTableTypes()) { - optionals.set(3); - } - oprot.writeBitSet(optionals, 4); - if (struct.isSetCatalogName()) { - oprot.writeString(struct.catalogName); - } - if (struct.isSetSchemaName()) { - oprot.writeString(struct.schemaName); - } - if (struct.isSetTableName()) { - oprot.writeString(struct.tableName); - } - if (struct.isSetTableTypes()) { - { - oprot.writeI32(struct.tableTypes.size()); - for (String _iter176 : struct.tableTypes) - { - oprot.writeString(_iter176); - } - } - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TGetTablesReq struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.sessionHandle = new TSessionHandle(); - struct.sessionHandle.read(iprot); - struct.setSessionHandleIsSet(true); - BitSet incoming = iprot.readBitSet(4); - if (incoming.get(0)) { - struct.catalogName = iprot.readString(); - struct.setCatalogNameIsSet(true); - } - if (incoming.get(1)) { - struct.schemaName = iprot.readString(); - struct.setSchemaNameIsSet(true); - } - if (incoming.get(2)) { - struct.tableName = iprot.readString(); - struct.setTableNameIsSet(true); - } - if (incoming.get(3)) { - { - org.apache.thrift.protocol.TList _list177 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.tableTypes = new ArrayList(_list177.size); - for (int _i178 = 0; _i178 < _list177.size; ++_i178) - { - String _elem179; // optional - _elem179 = iprot.readString(); - struct.tableTypes.add(_elem179); - } - } - struct.setTableTypesIsSet(true); - } - } - } - -} - diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TGetTablesResp.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TGetTablesResp.java deleted file mode 100644 index d526f4478a24e..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TGetTablesResp.java +++ /dev/null @@ -1,505 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TGetTablesResp implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetTablesResp"); - - private static final org.apache.thrift.protocol.TField STATUS_FIELD_DESC = new org.apache.thrift.protocol.TField("status", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField OPERATION_HANDLE_FIELD_DESC = new org.apache.thrift.protocol.TField("operationHandle", org.apache.thrift.protocol.TType.STRUCT, (short)2); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TGetTablesRespStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TGetTablesRespTupleSchemeFactory()); - } - - private TStatus status; // required - private TOperationHandle operationHandle; // optional - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - STATUS((short)1, "status"), - OPERATION_HANDLE((short)2, "operationHandle"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // STATUS - return STATUS; - case 2: // OPERATION_HANDLE - return OPERATION_HANDLE; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private _Fields optionals[] = {_Fields.OPERATION_HANDLE}; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.STATUS, new org.apache.thrift.meta_data.FieldMetaData("status", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TStatus.class))); - tmpMap.put(_Fields.OPERATION_HANDLE, new org.apache.thrift.meta_data.FieldMetaData("operationHandle", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TOperationHandle.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TGetTablesResp.class, metaDataMap); - } - - public TGetTablesResp() { - } - - public TGetTablesResp( - TStatus status) - { - this(); - this.status = status; - } - - /** - * Performs a deep copy on other. - */ - public TGetTablesResp(TGetTablesResp other) { - if (other.isSetStatus()) { - this.status = new TStatus(other.status); - } - if (other.isSetOperationHandle()) { - this.operationHandle = new TOperationHandle(other.operationHandle); - } - } - - public TGetTablesResp deepCopy() { - return new TGetTablesResp(this); - } - - @Override - public void clear() { - this.status = null; - this.operationHandle = null; - } - - public TStatus getStatus() { - return this.status; - } - - public void setStatus(TStatus status) { - this.status = status; - } - - public void unsetStatus() { - this.status = null; - } - - /** Returns true if field status is set (has been assigned a value) and false otherwise */ - public boolean isSetStatus() { - return this.status != null; - } - - public void setStatusIsSet(boolean value) { - if (!value) { - this.status = null; - } - } - - public TOperationHandle getOperationHandle() { - return this.operationHandle; - } - - public void setOperationHandle(TOperationHandle operationHandle) { - this.operationHandle = operationHandle; - } - - public void unsetOperationHandle() { - this.operationHandle = null; - } - - /** Returns true if field operationHandle is set (has been assigned a value) and false otherwise */ - public boolean isSetOperationHandle() { - return this.operationHandle != null; - } - - public void setOperationHandleIsSet(boolean value) { - if (!value) { - this.operationHandle = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case STATUS: - if (value == null) { - unsetStatus(); - } else { - setStatus((TStatus)value); - } - break; - - case OPERATION_HANDLE: - if (value == null) { - unsetOperationHandle(); - } else { - setOperationHandle((TOperationHandle)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case STATUS: - return getStatus(); - - case OPERATION_HANDLE: - return getOperationHandle(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case STATUS: - return isSetStatus(); - case OPERATION_HANDLE: - return isSetOperationHandle(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TGetTablesResp) - return this.equals((TGetTablesResp)that); - return false; - } - - public boolean equals(TGetTablesResp that) { - if (that == null) - return false; - - boolean this_present_status = true && this.isSetStatus(); - boolean that_present_status = true && that.isSetStatus(); - if (this_present_status || that_present_status) { - if (!(this_present_status && that_present_status)) - return false; - if (!this.status.equals(that.status)) - return false; - } - - boolean this_present_operationHandle = true && this.isSetOperationHandle(); - boolean that_present_operationHandle = true && that.isSetOperationHandle(); - if (this_present_operationHandle || that_present_operationHandle) { - if (!(this_present_operationHandle && that_present_operationHandle)) - return false; - if (!this.operationHandle.equals(that.operationHandle)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_status = true && (isSetStatus()); - builder.append(present_status); - if (present_status) - builder.append(status); - - boolean present_operationHandle = true && (isSetOperationHandle()); - builder.append(present_operationHandle); - if (present_operationHandle) - builder.append(operationHandle); - - return builder.toHashCode(); - } - - public int compareTo(TGetTablesResp other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - TGetTablesResp typedOther = (TGetTablesResp)other; - - lastComparison = Boolean.valueOf(isSetStatus()).compareTo(typedOther.isSetStatus()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetStatus()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.status, typedOther.status); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetOperationHandle()).compareTo(typedOther.isSetOperationHandle()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetOperationHandle()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.operationHandle, typedOther.operationHandle); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TGetTablesResp("); - boolean first = true; - - sb.append("status:"); - if (this.status == null) { - sb.append("null"); - } else { - sb.append(this.status); - } - first = false; - if (isSetOperationHandle()) { - if (!first) sb.append(", "); - sb.append("operationHandle:"); - if (this.operationHandle == null) { - sb.append("null"); - } else { - sb.append(this.operationHandle); - } - first = false; - } - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetStatus()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'status' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (status != null) { - status.validate(); - } - if (operationHandle != null) { - operationHandle.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TGetTablesRespStandardSchemeFactory implements SchemeFactory { - public TGetTablesRespStandardScheme getScheme() { - return new TGetTablesRespStandardScheme(); - } - } - - private static class TGetTablesRespStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TGetTablesResp struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // STATUS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // OPERATION_HANDLE - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.operationHandle = new TOperationHandle(); - struct.operationHandle.read(iprot); - struct.setOperationHandleIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TGetTablesResp struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.status != null) { - oprot.writeFieldBegin(STATUS_FIELD_DESC); - struct.status.write(oprot); - oprot.writeFieldEnd(); - } - if (struct.operationHandle != null) { - if (struct.isSetOperationHandle()) { - oprot.writeFieldBegin(OPERATION_HANDLE_FIELD_DESC); - struct.operationHandle.write(oprot); - oprot.writeFieldEnd(); - } - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TGetTablesRespTupleSchemeFactory implements SchemeFactory { - public TGetTablesRespTupleScheme getScheme() { - return new TGetTablesRespTupleScheme(); - } - } - - private static class TGetTablesRespTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TGetTablesResp struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.status.write(oprot); - BitSet optionals = new BitSet(); - if (struct.isSetOperationHandle()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetOperationHandle()) { - struct.operationHandle.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TGetTablesResp struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.operationHandle = new TOperationHandle(); - struct.operationHandle.read(iprot); - struct.setOperationHandleIsSet(true); - } - } - } - -} - diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TGetTypeInfoReq.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TGetTypeInfoReq.java deleted file mode 100644 index d40115e83ec45..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TGetTypeInfoReq.java +++ /dev/null @@ -1,390 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TGetTypeInfoReq implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetTypeInfoReq"); - - private static final org.apache.thrift.protocol.TField SESSION_HANDLE_FIELD_DESC = new org.apache.thrift.protocol.TField("sessionHandle", org.apache.thrift.protocol.TType.STRUCT, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TGetTypeInfoReqStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TGetTypeInfoReqTupleSchemeFactory()); - } - - private TSessionHandle sessionHandle; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SESSION_HANDLE((short)1, "sessionHandle"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // SESSION_HANDLE - return SESSION_HANDLE; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SESSION_HANDLE, new org.apache.thrift.meta_data.FieldMetaData("sessionHandle", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TSessionHandle.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TGetTypeInfoReq.class, metaDataMap); - } - - public TGetTypeInfoReq() { - } - - public TGetTypeInfoReq( - TSessionHandle sessionHandle) - { - this(); - this.sessionHandle = sessionHandle; - } - - /** - * Performs a deep copy on other. - */ - public TGetTypeInfoReq(TGetTypeInfoReq other) { - if (other.isSetSessionHandle()) { - this.sessionHandle = new TSessionHandle(other.sessionHandle); - } - } - - public TGetTypeInfoReq deepCopy() { - return new TGetTypeInfoReq(this); - } - - @Override - public void clear() { - this.sessionHandle = null; - } - - public TSessionHandle getSessionHandle() { - return this.sessionHandle; - } - - public void setSessionHandle(TSessionHandle sessionHandle) { - this.sessionHandle = sessionHandle; - } - - public void unsetSessionHandle() { - this.sessionHandle = null; - } - - /** Returns true if field sessionHandle is set (has been assigned a value) and false otherwise */ - public boolean isSetSessionHandle() { - return this.sessionHandle != null; - } - - public void setSessionHandleIsSet(boolean value) { - if (!value) { - this.sessionHandle = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case SESSION_HANDLE: - if (value == null) { - unsetSessionHandle(); - } else { - setSessionHandle((TSessionHandle)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case SESSION_HANDLE: - return getSessionHandle(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case SESSION_HANDLE: - return isSetSessionHandle(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TGetTypeInfoReq) - return this.equals((TGetTypeInfoReq)that); - return false; - } - - public boolean equals(TGetTypeInfoReq that) { - if (that == null) - return false; - - boolean this_present_sessionHandle = true && this.isSetSessionHandle(); - boolean that_present_sessionHandle = true && that.isSetSessionHandle(); - if (this_present_sessionHandle || that_present_sessionHandle) { - if (!(this_present_sessionHandle && that_present_sessionHandle)) - return false; - if (!this.sessionHandle.equals(that.sessionHandle)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_sessionHandle = true && (isSetSessionHandle()); - builder.append(present_sessionHandle); - if (present_sessionHandle) - builder.append(sessionHandle); - - return builder.toHashCode(); - } - - public int compareTo(TGetTypeInfoReq other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - TGetTypeInfoReq typedOther = (TGetTypeInfoReq)other; - - lastComparison = Boolean.valueOf(isSetSessionHandle()).compareTo(typedOther.isSetSessionHandle()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSessionHandle()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.sessionHandle, typedOther.sessionHandle); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TGetTypeInfoReq("); - boolean first = true; - - sb.append("sessionHandle:"); - if (this.sessionHandle == null) { - sb.append("null"); - } else { - sb.append(this.sessionHandle); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetSessionHandle()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'sessionHandle' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (sessionHandle != null) { - sessionHandle.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TGetTypeInfoReqStandardSchemeFactory implements SchemeFactory { - public TGetTypeInfoReqStandardScheme getScheme() { - return new TGetTypeInfoReqStandardScheme(); - } - } - - private static class TGetTypeInfoReqStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TGetTypeInfoReq struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // SESSION_HANDLE - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.sessionHandle = new TSessionHandle(); - struct.sessionHandle.read(iprot); - struct.setSessionHandleIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TGetTypeInfoReq struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.sessionHandle != null) { - oprot.writeFieldBegin(SESSION_HANDLE_FIELD_DESC); - struct.sessionHandle.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TGetTypeInfoReqTupleSchemeFactory implements SchemeFactory { - public TGetTypeInfoReqTupleScheme getScheme() { - return new TGetTypeInfoReqTupleScheme(); - } - } - - private static class TGetTypeInfoReqTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TGetTypeInfoReq struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.sessionHandle.write(oprot); - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TGetTypeInfoReq struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.sessionHandle = new TSessionHandle(); - struct.sessionHandle.read(iprot); - struct.setSessionHandleIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TGetTypeInfoResp.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TGetTypeInfoResp.java deleted file mode 100644 index 59be1a33b55e2..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TGetTypeInfoResp.java +++ /dev/null @@ -1,505 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TGetTypeInfoResp implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetTypeInfoResp"); - - private static final org.apache.thrift.protocol.TField STATUS_FIELD_DESC = new org.apache.thrift.protocol.TField("status", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField OPERATION_HANDLE_FIELD_DESC = new org.apache.thrift.protocol.TField("operationHandle", org.apache.thrift.protocol.TType.STRUCT, (short)2); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TGetTypeInfoRespStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TGetTypeInfoRespTupleSchemeFactory()); - } - - private TStatus status; // required - private TOperationHandle operationHandle; // optional - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - STATUS((short)1, "status"), - OPERATION_HANDLE((short)2, "operationHandle"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // STATUS - return STATUS; - case 2: // OPERATION_HANDLE - return OPERATION_HANDLE; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private _Fields optionals[] = {_Fields.OPERATION_HANDLE}; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.STATUS, new org.apache.thrift.meta_data.FieldMetaData("status", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TStatus.class))); - tmpMap.put(_Fields.OPERATION_HANDLE, new org.apache.thrift.meta_data.FieldMetaData("operationHandle", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TOperationHandle.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TGetTypeInfoResp.class, metaDataMap); - } - - public TGetTypeInfoResp() { - } - - public TGetTypeInfoResp( - TStatus status) - { - this(); - this.status = status; - } - - /** - * Performs a deep copy on other. - */ - public TGetTypeInfoResp(TGetTypeInfoResp other) { - if (other.isSetStatus()) { - this.status = new TStatus(other.status); - } - if (other.isSetOperationHandle()) { - this.operationHandle = new TOperationHandle(other.operationHandle); - } - } - - public TGetTypeInfoResp deepCopy() { - return new TGetTypeInfoResp(this); - } - - @Override - public void clear() { - this.status = null; - this.operationHandle = null; - } - - public TStatus getStatus() { - return this.status; - } - - public void setStatus(TStatus status) { - this.status = status; - } - - public void unsetStatus() { - this.status = null; - } - - /** Returns true if field status is set (has been assigned a value) and false otherwise */ - public boolean isSetStatus() { - return this.status != null; - } - - public void setStatusIsSet(boolean value) { - if (!value) { - this.status = null; - } - } - - public TOperationHandle getOperationHandle() { - return this.operationHandle; - } - - public void setOperationHandle(TOperationHandle operationHandle) { - this.operationHandle = operationHandle; - } - - public void unsetOperationHandle() { - this.operationHandle = null; - } - - /** Returns true if field operationHandle is set (has been assigned a value) and false otherwise */ - public boolean isSetOperationHandle() { - return this.operationHandle != null; - } - - public void setOperationHandleIsSet(boolean value) { - if (!value) { - this.operationHandle = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case STATUS: - if (value == null) { - unsetStatus(); - } else { - setStatus((TStatus)value); - } - break; - - case OPERATION_HANDLE: - if (value == null) { - unsetOperationHandle(); - } else { - setOperationHandle((TOperationHandle)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case STATUS: - return getStatus(); - - case OPERATION_HANDLE: - return getOperationHandle(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case STATUS: - return isSetStatus(); - case OPERATION_HANDLE: - return isSetOperationHandle(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TGetTypeInfoResp) - return this.equals((TGetTypeInfoResp)that); - return false; - } - - public boolean equals(TGetTypeInfoResp that) { - if (that == null) - return false; - - boolean this_present_status = true && this.isSetStatus(); - boolean that_present_status = true && that.isSetStatus(); - if (this_present_status || that_present_status) { - if (!(this_present_status && that_present_status)) - return false; - if (!this.status.equals(that.status)) - return false; - } - - boolean this_present_operationHandle = true && this.isSetOperationHandle(); - boolean that_present_operationHandle = true && that.isSetOperationHandle(); - if (this_present_operationHandle || that_present_operationHandle) { - if (!(this_present_operationHandle && that_present_operationHandle)) - return false; - if (!this.operationHandle.equals(that.operationHandle)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_status = true && (isSetStatus()); - builder.append(present_status); - if (present_status) - builder.append(status); - - boolean present_operationHandle = true && (isSetOperationHandle()); - builder.append(present_operationHandle); - if (present_operationHandle) - builder.append(operationHandle); - - return builder.toHashCode(); - } - - public int compareTo(TGetTypeInfoResp other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - TGetTypeInfoResp typedOther = (TGetTypeInfoResp)other; - - lastComparison = Boolean.valueOf(isSetStatus()).compareTo(typedOther.isSetStatus()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetStatus()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.status, typedOther.status); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetOperationHandle()).compareTo(typedOther.isSetOperationHandle()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetOperationHandle()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.operationHandle, typedOther.operationHandle); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TGetTypeInfoResp("); - boolean first = true; - - sb.append("status:"); - if (this.status == null) { - sb.append("null"); - } else { - sb.append(this.status); - } - first = false; - if (isSetOperationHandle()) { - if (!first) sb.append(", "); - sb.append("operationHandle:"); - if (this.operationHandle == null) { - sb.append("null"); - } else { - sb.append(this.operationHandle); - } - first = false; - } - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetStatus()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'status' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (status != null) { - status.validate(); - } - if (operationHandle != null) { - operationHandle.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TGetTypeInfoRespStandardSchemeFactory implements SchemeFactory { - public TGetTypeInfoRespStandardScheme getScheme() { - return new TGetTypeInfoRespStandardScheme(); - } - } - - private static class TGetTypeInfoRespStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TGetTypeInfoResp struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // STATUS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // OPERATION_HANDLE - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.operationHandle = new TOperationHandle(); - struct.operationHandle.read(iprot); - struct.setOperationHandleIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TGetTypeInfoResp struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.status != null) { - oprot.writeFieldBegin(STATUS_FIELD_DESC); - struct.status.write(oprot); - oprot.writeFieldEnd(); - } - if (struct.operationHandle != null) { - if (struct.isSetOperationHandle()) { - oprot.writeFieldBegin(OPERATION_HANDLE_FIELD_DESC); - struct.operationHandle.write(oprot); - oprot.writeFieldEnd(); - } - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TGetTypeInfoRespTupleSchemeFactory implements SchemeFactory { - public TGetTypeInfoRespTupleScheme getScheme() { - return new TGetTypeInfoRespTupleScheme(); - } - } - - private static class TGetTypeInfoRespTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TGetTypeInfoResp struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.status.write(oprot); - BitSet optionals = new BitSet(); - if (struct.isSetOperationHandle()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetOperationHandle()) { - struct.operationHandle.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TGetTypeInfoResp struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.operationHandle = new TOperationHandle(); - struct.operationHandle.read(iprot); - struct.setOperationHandleIsSet(true); - } - } - } - -} - diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/THandleIdentifier.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/THandleIdentifier.java deleted file mode 100644 index 368273c341c7b..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/THandleIdentifier.java +++ /dev/null @@ -1,506 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class THandleIdentifier implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("THandleIdentifier"); - - private static final org.apache.thrift.protocol.TField GUID_FIELD_DESC = new org.apache.thrift.protocol.TField("guid", org.apache.thrift.protocol.TType.STRING, (short)1); - private static final org.apache.thrift.protocol.TField SECRET_FIELD_DESC = new org.apache.thrift.protocol.TField("secret", org.apache.thrift.protocol.TType.STRING, (short)2); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new THandleIdentifierStandardSchemeFactory()); - schemes.put(TupleScheme.class, new THandleIdentifierTupleSchemeFactory()); - } - - private ByteBuffer guid; // required - private ByteBuffer secret; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - GUID((short)1, "guid"), - SECRET((short)2, "secret"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // GUID - return GUID; - case 2: // SECRET - return SECRET; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.GUID, new org.apache.thrift.meta_data.FieldMetaData("guid", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); - tmpMap.put(_Fields.SECRET, new org.apache.thrift.meta_data.FieldMetaData("secret", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(THandleIdentifier.class, metaDataMap); - } - - public THandleIdentifier() { - } - - public THandleIdentifier( - ByteBuffer guid, - ByteBuffer secret) - { - this(); - this.guid = guid; - this.secret = secret; - } - - /** - * Performs a deep copy on other. - */ - public THandleIdentifier(THandleIdentifier other) { - if (other.isSetGuid()) { - this.guid = org.apache.thrift.TBaseHelper.copyBinary(other.guid); -; - } - if (other.isSetSecret()) { - this.secret = org.apache.thrift.TBaseHelper.copyBinary(other.secret); -; - } - } - - public THandleIdentifier deepCopy() { - return new THandleIdentifier(this); - } - - @Override - public void clear() { - this.guid = null; - this.secret = null; - } - - public byte[] getGuid() { - setGuid(org.apache.thrift.TBaseHelper.rightSize(guid)); - return guid == null ? null : guid.array(); - } - - public ByteBuffer bufferForGuid() { - return guid; - } - - public void setGuid(byte[] guid) { - setGuid(guid == null ? (ByteBuffer)null : ByteBuffer.wrap(guid)); - } - - public void setGuid(ByteBuffer guid) { - this.guid = guid; - } - - public void unsetGuid() { - this.guid = null; - } - - /** Returns true if field guid is set (has been assigned a value) and false otherwise */ - public boolean isSetGuid() { - return this.guid != null; - } - - public void setGuidIsSet(boolean value) { - if (!value) { - this.guid = null; - } - } - - public byte[] getSecret() { - setSecret(org.apache.thrift.TBaseHelper.rightSize(secret)); - return secret == null ? null : secret.array(); - } - - public ByteBuffer bufferForSecret() { - return secret; - } - - public void setSecret(byte[] secret) { - setSecret(secret == null ? (ByteBuffer)null : ByteBuffer.wrap(secret)); - } - - public void setSecret(ByteBuffer secret) { - this.secret = secret; - } - - public void unsetSecret() { - this.secret = null; - } - - /** Returns true if field secret is set (has been assigned a value) and false otherwise */ - public boolean isSetSecret() { - return this.secret != null; - } - - public void setSecretIsSet(boolean value) { - if (!value) { - this.secret = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case GUID: - if (value == null) { - unsetGuid(); - } else { - setGuid((ByteBuffer)value); - } - break; - - case SECRET: - if (value == null) { - unsetSecret(); - } else { - setSecret((ByteBuffer)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case GUID: - return getGuid(); - - case SECRET: - return getSecret(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case GUID: - return isSetGuid(); - case SECRET: - return isSetSecret(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof THandleIdentifier) - return this.equals((THandleIdentifier)that); - return false; - } - - public boolean equals(THandleIdentifier that) { - if (that == null) - return false; - - boolean this_present_guid = true && this.isSetGuid(); - boolean that_present_guid = true && that.isSetGuid(); - if (this_present_guid || that_present_guid) { - if (!(this_present_guid && that_present_guid)) - return false; - if (!this.guid.equals(that.guid)) - return false; - } - - boolean this_present_secret = true && this.isSetSecret(); - boolean that_present_secret = true && that.isSetSecret(); - if (this_present_secret || that_present_secret) { - if (!(this_present_secret && that_present_secret)) - return false; - if (!this.secret.equals(that.secret)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_guid = true && (isSetGuid()); - builder.append(present_guid); - if (present_guid) - builder.append(guid); - - boolean present_secret = true && (isSetSecret()); - builder.append(present_secret); - if (present_secret) - builder.append(secret); - - return builder.toHashCode(); - } - - public int compareTo(THandleIdentifier other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - THandleIdentifier typedOther = (THandleIdentifier)other; - - lastComparison = Boolean.valueOf(isSetGuid()).compareTo(typedOther.isSetGuid()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetGuid()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.guid, typedOther.guid); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetSecret()).compareTo(typedOther.isSetSecret()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSecret()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.secret, typedOther.secret); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("THandleIdentifier("); - boolean first = true; - - sb.append("guid:"); - if (this.guid == null) { - sb.append("null"); - } else { - org.apache.thrift.TBaseHelper.toString(this.guid, sb); - } - first = false; - if (!first) sb.append(", "); - sb.append("secret:"); - if (this.secret == null) { - sb.append("null"); - } else { - org.apache.thrift.TBaseHelper.toString(this.secret, sb); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetGuid()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'guid' is unset! Struct:" + toString()); - } - - if (!isSetSecret()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'secret' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class THandleIdentifierStandardSchemeFactory implements SchemeFactory { - public THandleIdentifierStandardScheme getScheme() { - return new THandleIdentifierStandardScheme(); - } - } - - private static class THandleIdentifierStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, THandleIdentifier struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // GUID - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.guid = iprot.readBinary(); - struct.setGuidIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // SECRET - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.secret = iprot.readBinary(); - struct.setSecretIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, THandleIdentifier struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.guid != null) { - oprot.writeFieldBegin(GUID_FIELD_DESC); - oprot.writeBinary(struct.guid); - oprot.writeFieldEnd(); - } - if (struct.secret != null) { - oprot.writeFieldBegin(SECRET_FIELD_DESC); - oprot.writeBinary(struct.secret); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class THandleIdentifierTupleSchemeFactory implements SchemeFactory { - public THandleIdentifierTupleScheme getScheme() { - return new THandleIdentifierTupleScheme(); - } - } - - private static class THandleIdentifierTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, THandleIdentifier struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - oprot.writeBinary(struct.guid); - oprot.writeBinary(struct.secret); - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, THandleIdentifier struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.guid = iprot.readBinary(); - struct.setGuidIsSet(true); - struct.secret = iprot.readBinary(); - struct.setSecretIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TI16Column.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TI16Column.java deleted file mode 100644 index c83663072f877..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TI16Column.java +++ /dev/null @@ -1,548 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TI16Column implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TI16Column"); - - private static final org.apache.thrift.protocol.TField VALUES_FIELD_DESC = new org.apache.thrift.protocol.TField("values", org.apache.thrift.protocol.TType.LIST, (short)1); - private static final org.apache.thrift.protocol.TField NULLS_FIELD_DESC = new org.apache.thrift.protocol.TField("nulls", org.apache.thrift.protocol.TType.STRING, (short)2); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TI16ColumnStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TI16ColumnTupleSchemeFactory()); - } - - private List values; // required - private ByteBuffer nulls; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - VALUES((short)1, "values"), - NULLS((short)2, "nulls"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // VALUES - return VALUES; - case 2: // NULLS - return NULLS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.VALUES, new org.apache.thrift.meta_data.FieldMetaData("values", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I16)))); - tmpMap.put(_Fields.NULLS, new org.apache.thrift.meta_data.FieldMetaData("nulls", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TI16Column.class, metaDataMap); - } - - public TI16Column() { - } - - public TI16Column( - List values, - ByteBuffer nulls) - { - this(); - this.values = values; - this.nulls = nulls; - } - - /** - * Performs a deep copy on other. - */ - public TI16Column(TI16Column other) { - if (other.isSetValues()) { - List __this__values = new ArrayList(); - for (Short other_element : other.values) { - __this__values.add(other_element); - } - this.values = __this__values; - } - if (other.isSetNulls()) { - this.nulls = org.apache.thrift.TBaseHelper.copyBinary(other.nulls); -; - } - } - - public TI16Column deepCopy() { - return new TI16Column(this); - } - - @Override - public void clear() { - this.values = null; - this.nulls = null; - } - - public int getValuesSize() { - return (this.values == null) ? 0 : this.values.size(); - } - - public java.util.Iterator getValuesIterator() { - return (this.values == null) ? null : this.values.iterator(); - } - - public void addToValues(short elem) { - if (this.values == null) { - this.values = new ArrayList(); - } - this.values.add(elem); - } - - public List getValues() { - return this.values; - } - - public void setValues(List values) { - this.values = values; - } - - public void unsetValues() { - this.values = null; - } - - /** Returns true if field values is set (has been assigned a value) and false otherwise */ - public boolean isSetValues() { - return this.values != null; - } - - public void setValuesIsSet(boolean value) { - if (!value) { - this.values = null; - } - } - - public byte[] getNulls() { - setNulls(org.apache.thrift.TBaseHelper.rightSize(nulls)); - return nulls == null ? null : nulls.array(); - } - - public ByteBuffer bufferForNulls() { - return nulls; - } - - public void setNulls(byte[] nulls) { - setNulls(nulls == null ? (ByteBuffer)null : ByteBuffer.wrap(nulls)); - } - - public void setNulls(ByteBuffer nulls) { - this.nulls = nulls; - } - - public void unsetNulls() { - this.nulls = null; - } - - /** Returns true if field nulls is set (has been assigned a value) and false otherwise */ - public boolean isSetNulls() { - return this.nulls != null; - } - - public void setNullsIsSet(boolean value) { - if (!value) { - this.nulls = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case VALUES: - if (value == null) { - unsetValues(); - } else { - setValues((List)value); - } - break; - - case NULLS: - if (value == null) { - unsetNulls(); - } else { - setNulls((ByteBuffer)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case VALUES: - return getValues(); - - case NULLS: - return getNulls(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case VALUES: - return isSetValues(); - case NULLS: - return isSetNulls(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TI16Column) - return this.equals((TI16Column)that); - return false; - } - - public boolean equals(TI16Column that) { - if (that == null) - return false; - - boolean this_present_values = true && this.isSetValues(); - boolean that_present_values = true && that.isSetValues(); - if (this_present_values || that_present_values) { - if (!(this_present_values && that_present_values)) - return false; - if (!this.values.equals(that.values)) - return false; - } - - boolean this_present_nulls = true && this.isSetNulls(); - boolean that_present_nulls = true && that.isSetNulls(); - if (this_present_nulls || that_present_nulls) { - if (!(this_present_nulls && that_present_nulls)) - return false; - if (!this.nulls.equals(that.nulls)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_values = true && (isSetValues()); - builder.append(present_values); - if (present_values) - builder.append(values); - - boolean present_nulls = true && (isSetNulls()); - builder.append(present_nulls); - if (present_nulls) - builder.append(nulls); - - return builder.toHashCode(); - } - - public int compareTo(TI16Column other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - TI16Column typedOther = (TI16Column)other; - - lastComparison = Boolean.valueOf(isSetValues()).compareTo(typedOther.isSetValues()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetValues()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.values, typedOther.values); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetNulls()).compareTo(typedOther.isSetNulls()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetNulls()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.nulls, typedOther.nulls); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TI16Column("); - boolean first = true; - - sb.append("values:"); - if (this.values == null) { - sb.append("null"); - } else { - sb.append(this.values); - } - first = false; - if (!first) sb.append(", "); - sb.append("nulls:"); - if (this.nulls == null) { - sb.append("null"); - } else { - org.apache.thrift.TBaseHelper.toString(this.nulls, sb); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetValues()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'values' is unset! Struct:" + toString()); - } - - if (!isSetNulls()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'nulls' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TI16ColumnStandardSchemeFactory implements SchemeFactory { - public TI16ColumnStandardScheme getScheme() { - return new TI16ColumnStandardScheme(); - } - } - - private static class TI16ColumnStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TI16Column struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // VALUES - if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { - { - org.apache.thrift.protocol.TList _list70 = iprot.readListBegin(); - struct.values = new ArrayList(_list70.size); - for (int _i71 = 0; _i71 < _list70.size; ++_i71) - { - short _elem72; // optional - _elem72 = iprot.readI16(); - struct.values.add(_elem72); - } - iprot.readListEnd(); - } - struct.setValuesIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // NULLS - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.nulls = iprot.readBinary(); - struct.setNullsIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TI16Column struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.values != null) { - oprot.writeFieldBegin(VALUES_FIELD_DESC); - { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I16, struct.values.size())); - for (short _iter73 : struct.values) - { - oprot.writeI16(_iter73); - } - oprot.writeListEnd(); - } - oprot.writeFieldEnd(); - } - if (struct.nulls != null) { - oprot.writeFieldBegin(NULLS_FIELD_DESC); - oprot.writeBinary(struct.nulls); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TI16ColumnTupleSchemeFactory implements SchemeFactory { - public TI16ColumnTupleScheme getScheme() { - return new TI16ColumnTupleScheme(); - } - } - - private static class TI16ColumnTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TI16Column struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - { - oprot.writeI32(struct.values.size()); - for (short _iter74 : struct.values) - { - oprot.writeI16(_iter74); - } - } - oprot.writeBinary(struct.nulls); - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TI16Column struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - { - org.apache.thrift.protocol.TList _list75 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I16, iprot.readI32()); - struct.values = new ArrayList(_list75.size); - for (int _i76 = 0; _i76 < _list75.size; ++_i76) - { - short _elem77; // optional - _elem77 = iprot.readI16(); - struct.values.add(_elem77); - } - } - struct.setValuesIsSet(true); - struct.nulls = iprot.readBinary(); - struct.setNullsIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TI16Value.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TI16Value.java deleted file mode 100644 index bb5ae9609de86..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TI16Value.java +++ /dev/null @@ -1,386 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TI16Value implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TI16Value"); - - private static final org.apache.thrift.protocol.TField VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("value", org.apache.thrift.protocol.TType.I16, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TI16ValueStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TI16ValueTupleSchemeFactory()); - } - - private short value; // optional - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - VALUE((short)1, "value"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // VALUE - return VALUE; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private static final int __VALUE_ISSET_ID = 0; - private byte __isset_bitfield = 0; - private _Fields optionals[] = {_Fields.VALUE}; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.VALUE, new org.apache.thrift.meta_data.FieldMetaData("value", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I16))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TI16Value.class, metaDataMap); - } - - public TI16Value() { - } - - /** - * Performs a deep copy on other. - */ - public TI16Value(TI16Value other) { - __isset_bitfield = other.__isset_bitfield; - this.value = other.value; - } - - public TI16Value deepCopy() { - return new TI16Value(this); - } - - @Override - public void clear() { - setValueIsSet(false); - this.value = 0; - } - - public short getValue() { - return this.value; - } - - public void setValue(short value) { - this.value = value; - setValueIsSet(true); - } - - public void unsetValue() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __VALUE_ISSET_ID); - } - - /** Returns true if field value is set (has been assigned a value) and false otherwise */ - public boolean isSetValue() { - return EncodingUtils.testBit(__isset_bitfield, __VALUE_ISSET_ID); - } - - public void setValueIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __VALUE_ISSET_ID, value); - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case VALUE: - if (value == null) { - unsetValue(); - } else { - setValue((Short)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case VALUE: - return Short.valueOf(getValue()); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case VALUE: - return isSetValue(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TI16Value) - return this.equals((TI16Value)that); - return false; - } - - public boolean equals(TI16Value that) { - if (that == null) - return false; - - boolean this_present_value = true && this.isSetValue(); - boolean that_present_value = true && that.isSetValue(); - if (this_present_value || that_present_value) { - if (!(this_present_value && that_present_value)) - return false; - if (this.value != that.value) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_value = true && (isSetValue()); - builder.append(present_value); - if (present_value) - builder.append(value); - - return builder.toHashCode(); - } - - public int compareTo(TI16Value other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - TI16Value typedOther = (TI16Value)other; - - lastComparison = Boolean.valueOf(isSetValue()).compareTo(typedOther.isSetValue()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetValue()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.value, typedOther.value); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TI16Value("); - boolean first = true; - - if (isSetValue()) { - sb.append("value:"); - sb.append(this.value); - first = false; - } - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. - __isset_bitfield = 0; - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TI16ValueStandardSchemeFactory implements SchemeFactory { - public TI16ValueStandardScheme getScheme() { - return new TI16ValueStandardScheme(); - } - } - - private static class TI16ValueStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TI16Value struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // VALUE - if (schemeField.type == org.apache.thrift.protocol.TType.I16) { - struct.value = iprot.readI16(); - struct.setValueIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TI16Value struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.isSetValue()) { - oprot.writeFieldBegin(VALUE_FIELD_DESC); - oprot.writeI16(struct.value); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TI16ValueTupleSchemeFactory implements SchemeFactory { - public TI16ValueTupleScheme getScheme() { - return new TI16ValueTupleScheme(); - } - } - - private static class TI16ValueTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TI16Value struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetValue()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetValue()) { - oprot.writeI16(struct.value); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TI16Value struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.value = iprot.readI16(); - struct.setValueIsSet(true); - } - } - } - -} - diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TI32Column.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TI32Column.java deleted file mode 100644 index 6c6c5f35b7c8e..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TI32Column.java +++ /dev/null @@ -1,548 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TI32Column implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TI32Column"); - - private static final org.apache.thrift.protocol.TField VALUES_FIELD_DESC = new org.apache.thrift.protocol.TField("values", org.apache.thrift.protocol.TType.LIST, (short)1); - private static final org.apache.thrift.protocol.TField NULLS_FIELD_DESC = new org.apache.thrift.protocol.TField("nulls", org.apache.thrift.protocol.TType.STRING, (short)2); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TI32ColumnStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TI32ColumnTupleSchemeFactory()); - } - - private List values; // required - private ByteBuffer nulls; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - VALUES((short)1, "values"), - NULLS((short)2, "nulls"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // VALUES - return VALUES; - case 2: // NULLS - return NULLS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.VALUES, new org.apache.thrift.meta_data.FieldMetaData("values", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)))); - tmpMap.put(_Fields.NULLS, new org.apache.thrift.meta_data.FieldMetaData("nulls", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TI32Column.class, metaDataMap); - } - - public TI32Column() { - } - - public TI32Column( - List values, - ByteBuffer nulls) - { - this(); - this.values = values; - this.nulls = nulls; - } - - /** - * Performs a deep copy on other. - */ - public TI32Column(TI32Column other) { - if (other.isSetValues()) { - List __this__values = new ArrayList(); - for (Integer other_element : other.values) { - __this__values.add(other_element); - } - this.values = __this__values; - } - if (other.isSetNulls()) { - this.nulls = org.apache.thrift.TBaseHelper.copyBinary(other.nulls); -; - } - } - - public TI32Column deepCopy() { - return new TI32Column(this); - } - - @Override - public void clear() { - this.values = null; - this.nulls = null; - } - - public int getValuesSize() { - return (this.values == null) ? 0 : this.values.size(); - } - - public java.util.Iterator getValuesIterator() { - return (this.values == null) ? null : this.values.iterator(); - } - - public void addToValues(int elem) { - if (this.values == null) { - this.values = new ArrayList(); - } - this.values.add(elem); - } - - public List getValues() { - return this.values; - } - - public void setValues(List values) { - this.values = values; - } - - public void unsetValues() { - this.values = null; - } - - /** Returns true if field values is set (has been assigned a value) and false otherwise */ - public boolean isSetValues() { - return this.values != null; - } - - public void setValuesIsSet(boolean value) { - if (!value) { - this.values = null; - } - } - - public byte[] getNulls() { - setNulls(org.apache.thrift.TBaseHelper.rightSize(nulls)); - return nulls == null ? null : nulls.array(); - } - - public ByteBuffer bufferForNulls() { - return nulls; - } - - public void setNulls(byte[] nulls) { - setNulls(nulls == null ? (ByteBuffer)null : ByteBuffer.wrap(nulls)); - } - - public void setNulls(ByteBuffer nulls) { - this.nulls = nulls; - } - - public void unsetNulls() { - this.nulls = null; - } - - /** Returns true if field nulls is set (has been assigned a value) and false otherwise */ - public boolean isSetNulls() { - return this.nulls != null; - } - - public void setNullsIsSet(boolean value) { - if (!value) { - this.nulls = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case VALUES: - if (value == null) { - unsetValues(); - } else { - setValues((List)value); - } - break; - - case NULLS: - if (value == null) { - unsetNulls(); - } else { - setNulls((ByteBuffer)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case VALUES: - return getValues(); - - case NULLS: - return getNulls(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case VALUES: - return isSetValues(); - case NULLS: - return isSetNulls(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TI32Column) - return this.equals((TI32Column)that); - return false; - } - - public boolean equals(TI32Column that) { - if (that == null) - return false; - - boolean this_present_values = true && this.isSetValues(); - boolean that_present_values = true && that.isSetValues(); - if (this_present_values || that_present_values) { - if (!(this_present_values && that_present_values)) - return false; - if (!this.values.equals(that.values)) - return false; - } - - boolean this_present_nulls = true && this.isSetNulls(); - boolean that_present_nulls = true && that.isSetNulls(); - if (this_present_nulls || that_present_nulls) { - if (!(this_present_nulls && that_present_nulls)) - return false; - if (!this.nulls.equals(that.nulls)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_values = true && (isSetValues()); - builder.append(present_values); - if (present_values) - builder.append(values); - - boolean present_nulls = true && (isSetNulls()); - builder.append(present_nulls); - if (present_nulls) - builder.append(nulls); - - return builder.toHashCode(); - } - - public int compareTo(TI32Column other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - TI32Column typedOther = (TI32Column)other; - - lastComparison = Boolean.valueOf(isSetValues()).compareTo(typedOther.isSetValues()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetValues()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.values, typedOther.values); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetNulls()).compareTo(typedOther.isSetNulls()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetNulls()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.nulls, typedOther.nulls); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TI32Column("); - boolean first = true; - - sb.append("values:"); - if (this.values == null) { - sb.append("null"); - } else { - sb.append(this.values); - } - first = false; - if (!first) sb.append(", "); - sb.append("nulls:"); - if (this.nulls == null) { - sb.append("null"); - } else { - org.apache.thrift.TBaseHelper.toString(this.nulls, sb); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetValues()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'values' is unset! Struct:" + toString()); - } - - if (!isSetNulls()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'nulls' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TI32ColumnStandardSchemeFactory implements SchemeFactory { - public TI32ColumnStandardScheme getScheme() { - return new TI32ColumnStandardScheme(); - } - } - - private static class TI32ColumnStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TI32Column struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // VALUES - if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { - { - org.apache.thrift.protocol.TList _list78 = iprot.readListBegin(); - struct.values = new ArrayList(_list78.size); - for (int _i79 = 0; _i79 < _list78.size; ++_i79) - { - int _elem80; // optional - _elem80 = iprot.readI32(); - struct.values.add(_elem80); - } - iprot.readListEnd(); - } - struct.setValuesIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // NULLS - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.nulls = iprot.readBinary(); - struct.setNullsIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TI32Column struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.values != null) { - oprot.writeFieldBegin(VALUES_FIELD_DESC); - { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I32, struct.values.size())); - for (int _iter81 : struct.values) - { - oprot.writeI32(_iter81); - } - oprot.writeListEnd(); - } - oprot.writeFieldEnd(); - } - if (struct.nulls != null) { - oprot.writeFieldBegin(NULLS_FIELD_DESC); - oprot.writeBinary(struct.nulls); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TI32ColumnTupleSchemeFactory implements SchemeFactory { - public TI32ColumnTupleScheme getScheme() { - return new TI32ColumnTupleScheme(); - } - } - - private static class TI32ColumnTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TI32Column struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - { - oprot.writeI32(struct.values.size()); - for (int _iter82 : struct.values) - { - oprot.writeI32(_iter82); - } - } - oprot.writeBinary(struct.nulls); - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TI32Column struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - { - org.apache.thrift.protocol.TList _list83 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I32, iprot.readI32()); - struct.values = new ArrayList(_list83.size); - for (int _i84 = 0; _i84 < _list83.size; ++_i84) - { - int _elem85; // optional - _elem85 = iprot.readI32(); - struct.values.add(_elem85); - } - } - struct.setValuesIsSet(true); - struct.nulls = iprot.readBinary(); - struct.setNullsIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TI32Value.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TI32Value.java deleted file mode 100644 index 059408b96c8ce..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TI32Value.java +++ /dev/null @@ -1,386 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TI32Value implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TI32Value"); - - private static final org.apache.thrift.protocol.TField VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("value", org.apache.thrift.protocol.TType.I32, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TI32ValueStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TI32ValueTupleSchemeFactory()); - } - - private int value; // optional - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - VALUE((short)1, "value"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // VALUE - return VALUE; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private static final int __VALUE_ISSET_ID = 0; - private byte __isset_bitfield = 0; - private _Fields optionals[] = {_Fields.VALUE}; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.VALUE, new org.apache.thrift.meta_data.FieldMetaData("value", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TI32Value.class, metaDataMap); - } - - public TI32Value() { - } - - /** - * Performs a deep copy on other. - */ - public TI32Value(TI32Value other) { - __isset_bitfield = other.__isset_bitfield; - this.value = other.value; - } - - public TI32Value deepCopy() { - return new TI32Value(this); - } - - @Override - public void clear() { - setValueIsSet(false); - this.value = 0; - } - - public int getValue() { - return this.value; - } - - public void setValue(int value) { - this.value = value; - setValueIsSet(true); - } - - public void unsetValue() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __VALUE_ISSET_ID); - } - - /** Returns true if field value is set (has been assigned a value) and false otherwise */ - public boolean isSetValue() { - return EncodingUtils.testBit(__isset_bitfield, __VALUE_ISSET_ID); - } - - public void setValueIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __VALUE_ISSET_ID, value); - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case VALUE: - if (value == null) { - unsetValue(); - } else { - setValue((Integer)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case VALUE: - return Integer.valueOf(getValue()); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case VALUE: - return isSetValue(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TI32Value) - return this.equals((TI32Value)that); - return false; - } - - public boolean equals(TI32Value that) { - if (that == null) - return false; - - boolean this_present_value = true && this.isSetValue(); - boolean that_present_value = true && that.isSetValue(); - if (this_present_value || that_present_value) { - if (!(this_present_value && that_present_value)) - return false; - if (this.value != that.value) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_value = true && (isSetValue()); - builder.append(present_value); - if (present_value) - builder.append(value); - - return builder.toHashCode(); - } - - public int compareTo(TI32Value other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - TI32Value typedOther = (TI32Value)other; - - lastComparison = Boolean.valueOf(isSetValue()).compareTo(typedOther.isSetValue()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetValue()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.value, typedOther.value); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TI32Value("); - boolean first = true; - - if (isSetValue()) { - sb.append("value:"); - sb.append(this.value); - first = false; - } - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. - __isset_bitfield = 0; - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TI32ValueStandardSchemeFactory implements SchemeFactory { - public TI32ValueStandardScheme getScheme() { - return new TI32ValueStandardScheme(); - } - } - - private static class TI32ValueStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TI32Value struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // VALUE - if (schemeField.type == org.apache.thrift.protocol.TType.I32) { - struct.value = iprot.readI32(); - struct.setValueIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TI32Value struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.isSetValue()) { - oprot.writeFieldBegin(VALUE_FIELD_DESC); - oprot.writeI32(struct.value); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TI32ValueTupleSchemeFactory implements SchemeFactory { - public TI32ValueTupleScheme getScheme() { - return new TI32ValueTupleScheme(); - } - } - - private static class TI32ValueTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TI32Value struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetValue()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetValue()) { - oprot.writeI32(struct.value); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TI32Value struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.value = iprot.readI32(); - struct.setValueIsSet(true); - } - } - } - -} - diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TI64Column.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TI64Column.java deleted file mode 100644 index cc383ed089fa4..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TI64Column.java +++ /dev/null @@ -1,548 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TI64Column implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TI64Column"); - - private static final org.apache.thrift.protocol.TField VALUES_FIELD_DESC = new org.apache.thrift.protocol.TField("values", org.apache.thrift.protocol.TType.LIST, (short)1); - private static final org.apache.thrift.protocol.TField NULLS_FIELD_DESC = new org.apache.thrift.protocol.TField("nulls", org.apache.thrift.protocol.TType.STRING, (short)2); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TI64ColumnStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TI64ColumnTupleSchemeFactory()); - } - - private List values; // required - private ByteBuffer nulls; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - VALUES((short)1, "values"), - NULLS((short)2, "nulls"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // VALUES - return VALUES; - case 2: // NULLS - return NULLS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.VALUES, new org.apache.thrift.meta_data.FieldMetaData("values", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)))); - tmpMap.put(_Fields.NULLS, new org.apache.thrift.meta_data.FieldMetaData("nulls", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TI64Column.class, metaDataMap); - } - - public TI64Column() { - } - - public TI64Column( - List values, - ByteBuffer nulls) - { - this(); - this.values = values; - this.nulls = nulls; - } - - /** - * Performs a deep copy on other. - */ - public TI64Column(TI64Column other) { - if (other.isSetValues()) { - List __this__values = new ArrayList(); - for (Long other_element : other.values) { - __this__values.add(other_element); - } - this.values = __this__values; - } - if (other.isSetNulls()) { - this.nulls = org.apache.thrift.TBaseHelper.copyBinary(other.nulls); -; - } - } - - public TI64Column deepCopy() { - return new TI64Column(this); - } - - @Override - public void clear() { - this.values = null; - this.nulls = null; - } - - public int getValuesSize() { - return (this.values == null) ? 0 : this.values.size(); - } - - public java.util.Iterator getValuesIterator() { - return (this.values == null) ? null : this.values.iterator(); - } - - public void addToValues(long elem) { - if (this.values == null) { - this.values = new ArrayList(); - } - this.values.add(elem); - } - - public List getValues() { - return this.values; - } - - public void setValues(List values) { - this.values = values; - } - - public void unsetValues() { - this.values = null; - } - - /** Returns true if field values is set (has been assigned a value) and false otherwise */ - public boolean isSetValues() { - return this.values != null; - } - - public void setValuesIsSet(boolean value) { - if (!value) { - this.values = null; - } - } - - public byte[] getNulls() { - setNulls(org.apache.thrift.TBaseHelper.rightSize(nulls)); - return nulls == null ? null : nulls.array(); - } - - public ByteBuffer bufferForNulls() { - return nulls; - } - - public void setNulls(byte[] nulls) { - setNulls(nulls == null ? (ByteBuffer)null : ByteBuffer.wrap(nulls)); - } - - public void setNulls(ByteBuffer nulls) { - this.nulls = nulls; - } - - public void unsetNulls() { - this.nulls = null; - } - - /** Returns true if field nulls is set (has been assigned a value) and false otherwise */ - public boolean isSetNulls() { - return this.nulls != null; - } - - public void setNullsIsSet(boolean value) { - if (!value) { - this.nulls = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case VALUES: - if (value == null) { - unsetValues(); - } else { - setValues((List)value); - } - break; - - case NULLS: - if (value == null) { - unsetNulls(); - } else { - setNulls((ByteBuffer)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case VALUES: - return getValues(); - - case NULLS: - return getNulls(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case VALUES: - return isSetValues(); - case NULLS: - return isSetNulls(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TI64Column) - return this.equals((TI64Column)that); - return false; - } - - public boolean equals(TI64Column that) { - if (that == null) - return false; - - boolean this_present_values = true && this.isSetValues(); - boolean that_present_values = true && that.isSetValues(); - if (this_present_values || that_present_values) { - if (!(this_present_values && that_present_values)) - return false; - if (!this.values.equals(that.values)) - return false; - } - - boolean this_present_nulls = true && this.isSetNulls(); - boolean that_present_nulls = true && that.isSetNulls(); - if (this_present_nulls || that_present_nulls) { - if (!(this_present_nulls && that_present_nulls)) - return false; - if (!this.nulls.equals(that.nulls)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_values = true && (isSetValues()); - builder.append(present_values); - if (present_values) - builder.append(values); - - boolean present_nulls = true && (isSetNulls()); - builder.append(present_nulls); - if (present_nulls) - builder.append(nulls); - - return builder.toHashCode(); - } - - public int compareTo(TI64Column other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - TI64Column typedOther = (TI64Column)other; - - lastComparison = Boolean.valueOf(isSetValues()).compareTo(typedOther.isSetValues()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetValues()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.values, typedOther.values); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetNulls()).compareTo(typedOther.isSetNulls()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetNulls()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.nulls, typedOther.nulls); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TI64Column("); - boolean first = true; - - sb.append("values:"); - if (this.values == null) { - sb.append("null"); - } else { - sb.append(this.values); - } - first = false; - if (!first) sb.append(", "); - sb.append("nulls:"); - if (this.nulls == null) { - sb.append("null"); - } else { - org.apache.thrift.TBaseHelper.toString(this.nulls, sb); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetValues()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'values' is unset! Struct:" + toString()); - } - - if (!isSetNulls()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'nulls' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TI64ColumnStandardSchemeFactory implements SchemeFactory { - public TI64ColumnStandardScheme getScheme() { - return new TI64ColumnStandardScheme(); - } - } - - private static class TI64ColumnStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TI64Column struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // VALUES - if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { - { - org.apache.thrift.protocol.TList _list86 = iprot.readListBegin(); - struct.values = new ArrayList(_list86.size); - for (int _i87 = 0; _i87 < _list86.size; ++_i87) - { - long _elem88; // optional - _elem88 = iprot.readI64(); - struct.values.add(_elem88); - } - iprot.readListEnd(); - } - struct.setValuesIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // NULLS - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.nulls = iprot.readBinary(); - struct.setNullsIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TI64Column struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.values != null) { - oprot.writeFieldBegin(VALUES_FIELD_DESC); - { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.values.size())); - for (long _iter89 : struct.values) - { - oprot.writeI64(_iter89); - } - oprot.writeListEnd(); - } - oprot.writeFieldEnd(); - } - if (struct.nulls != null) { - oprot.writeFieldBegin(NULLS_FIELD_DESC); - oprot.writeBinary(struct.nulls); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TI64ColumnTupleSchemeFactory implements SchemeFactory { - public TI64ColumnTupleScheme getScheme() { - return new TI64ColumnTupleScheme(); - } - } - - private static class TI64ColumnTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TI64Column struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - { - oprot.writeI32(struct.values.size()); - for (long _iter90 : struct.values) - { - oprot.writeI64(_iter90); - } - } - oprot.writeBinary(struct.nulls); - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TI64Column struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - { - org.apache.thrift.protocol.TList _list91 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.values = new ArrayList(_list91.size); - for (int _i92 = 0; _i92 < _list91.size; ++_i92) - { - long _elem93; // optional - _elem93 = iprot.readI64(); - struct.values.add(_elem93); - } - } - struct.setValuesIsSet(true); - struct.nulls = iprot.readBinary(); - struct.setNullsIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TI64Value.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TI64Value.java deleted file mode 100644 index 9a941cce0c077..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TI64Value.java +++ /dev/null @@ -1,386 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TI64Value implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TI64Value"); - - private static final org.apache.thrift.protocol.TField VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("value", org.apache.thrift.protocol.TType.I64, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TI64ValueStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TI64ValueTupleSchemeFactory()); - } - - private long value; // optional - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - VALUE((short)1, "value"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // VALUE - return VALUE; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private static final int __VALUE_ISSET_ID = 0; - private byte __isset_bitfield = 0; - private _Fields optionals[] = {_Fields.VALUE}; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.VALUE, new org.apache.thrift.meta_data.FieldMetaData("value", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TI64Value.class, metaDataMap); - } - - public TI64Value() { - } - - /** - * Performs a deep copy on other. - */ - public TI64Value(TI64Value other) { - __isset_bitfield = other.__isset_bitfield; - this.value = other.value; - } - - public TI64Value deepCopy() { - return new TI64Value(this); - } - - @Override - public void clear() { - setValueIsSet(false); - this.value = 0; - } - - public long getValue() { - return this.value; - } - - public void setValue(long value) { - this.value = value; - setValueIsSet(true); - } - - public void unsetValue() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __VALUE_ISSET_ID); - } - - /** Returns true if field value is set (has been assigned a value) and false otherwise */ - public boolean isSetValue() { - return EncodingUtils.testBit(__isset_bitfield, __VALUE_ISSET_ID); - } - - public void setValueIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __VALUE_ISSET_ID, value); - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case VALUE: - if (value == null) { - unsetValue(); - } else { - setValue((Long)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case VALUE: - return Long.valueOf(getValue()); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case VALUE: - return isSetValue(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TI64Value) - return this.equals((TI64Value)that); - return false; - } - - public boolean equals(TI64Value that) { - if (that == null) - return false; - - boolean this_present_value = true && this.isSetValue(); - boolean that_present_value = true && that.isSetValue(); - if (this_present_value || that_present_value) { - if (!(this_present_value && that_present_value)) - return false; - if (this.value != that.value) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_value = true && (isSetValue()); - builder.append(present_value); - if (present_value) - builder.append(value); - - return builder.toHashCode(); - } - - public int compareTo(TI64Value other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - TI64Value typedOther = (TI64Value)other; - - lastComparison = Boolean.valueOf(isSetValue()).compareTo(typedOther.isSetValue()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetValue()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.value, typedOther.value); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TI64Value("); - boolean first = true; - - if (isSetValue()) { - sb.append("value:"); - sb.append(this.value); - first = false; - } - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. - __isset_bitfield = 0; - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TI64ValueStandardSchemeFactory implements SchemeFactory { - public TI64ValueStandardScheme getScheme() { - return new TI64ValueStandardScheme(); - } - } - - private static class TI64ValueStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TI64Value struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // VALUE - if (schemeField.type == org.apache.thrift.protocol.TType.I64) { - struct.value = iprot.readI64(); - struct.setValueIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TI64Value struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.isSetValue()) { - oprot.writeFieldBegin(VALUE_FIELD_DESC); - oprot.writeI64(struct.value); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TI64ValueTupleSchemeFactory implements SchemeFactory { - public TI64ValueTupleScheme getScheme() { - return new TI64ValueTupleScheme(); - } - } - - private static class TI64ValueTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TI64Value struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetValue()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetValue()) { - oprot.writeI64(struct.value); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TI64Value struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.value = iprot.readI64(); - struct.setValueIsSet(true); - } - } - } - -} - diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TMapTypeEntry.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TMapTypeEntry.java deleted file mode 100644 index 425603cbdecbd..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TMapTypeEntry.java +++ /dev/null @@ -1,478 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TMapTypeEntry implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TMapTypeEntry"); - - private static final org.apache.thrift.protocol.TField KEY_TYPE_PTR_FIELD_DESC = new org.apache.thrift.protocol.TField("keyTypePtr", org.apache.thrift.protocol.TType.I32, (short)1); - private static final org.apache.thrift.protocol.TField VALUE_TYPE_PTR_FIELD_DESC = new org.apache.thrift.protocol.TField("valueTypePtr", org.apache.thrift.protocol.TType.I32, (short)2); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TMapTypeEntryStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TMapTypeEntryTupleSchemeFactory()); - } - - private int keyTypePtr; // required - private int valueTypePtr; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - KEY_TYPE_PTR((short)1, "keyTypePtr"), - VALUE_TYPE_PTR((short)2, "valueTypePtr"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // KEY_TYPE_PTR - return KEY_TYPE_PTR; - case 2: // VALUE_TYPE_PTR - return VALUE_TYPE_PTR; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private static final int __KEYTYPEPTR_ISSET_ID = 0; - private static final int __VALUETYPEPTR_ISSET_ID = 1; - private byte __isset_bitfield = 0; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.KEY_TYPE_PTR, new org.apache.thrift.meta_data.FieldMetaData("keyTypePtr", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32 , "TTypeEntryPtr"))); - tmpMap.put(_Fields.VALUE_TYPE_PTR, new org.apache.thrift.meta_data.FieldMetaData("valueTypePtr", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32 , "TTypeEntryPtr"))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TMapTypeEntry.class, metaDataMap); - } - - public TMapTypeEntry() { - } - - public TMapTypeEntry( - int keyTypePtr, - int valueTypePtr) - { - this(); - this.keyTypePtr = keyTypePtr; - setKeyTypePtrIsSet(true); - this.valueTypePtr = valueTypePtr; - setValueTypePtrIsSet(true); - } - - /** - * Performs a deep copy on other. - */ - public TMapTypeEntry(TMapTypeEntry other) { - __isset_bitfield = other.__isset_bitfield; - this.keyTypePtr = other.keyTypePtr; - this.valueTypePtr = other.valueTypePtr; - } - - public TMapTypeEntry deepCopy() { - return new TMapTypeEntry(this); - } - - @Override - public void clear() { - setKeyTypePtrIsSet(false); - this.keyTypePtr = 0; - setValueTypePtrIsSet(false); - this.valueTypePtr = 0; - } - - public int getKeyTypePtr() { - return this.keyTypePtr; - } - - public void setKeyTypePtr(int keyTypePtr) { - this.keyTypePtr = keyTypePtr; - setKeyTypePtrIsSet(true); - } - - public void unsetKeyTypePtr() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __KEYTYPEPTR_ISSET_ID); - } - - /** Returns true if field keyTypePtr is set (has been assigned a value) and false otherwise */ - public boolean isSetKeyTypePtr() { - return EncodingUtils.testBit(__isset_bitfield, __KEYTYPEPTR_ISSET_ID); - } - - public void setKeyTypePtrIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __KEYTYPEPTR_ISSET_ID, value); - } - - public int getValueTypePtr() { - return this.valueTypePtr; - } - - public void setValueTypePtr(int valueTypePtr) { - this.valueTypePtr = valueTypePtr; - setValueTypePtrIsSet(true); - } - - public void unsetValueTypePtr() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __VALUETYPEPTR_ISSET_ID); - } - - /** Returns true if field valueTypePtr is set (has been assigned a value) and false otherwise */ - public boolean isSetValueTypePtr() { - return EncodingUtils.testBit(__isset_bitfield, __VALUETYPEPTR_ISSET_ID); - } - - public void setValueTypePtrIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __VALUETYPEPTR_ISSET_ID, value); - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case KEY_TYPE_PTR: - if (value == null) { - unsetKeyTypePtr(); - } else { - setKeyTypePtr((Integer)value); - } - break; - - case VALUE_TYPE_PTR: - if (value == null) { - unsetValueTypePtr(); - } else { - setValueTypePtr((Integer)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case KEY_TYPE_PTR: - return Integer.valueOf(getKeyTypePtr()); - - case VALUE_TYPE_PTR: - return Integer.valueOf(getValueTypePtr()); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case KEY_TYPE_PTR: - return isSetKeyTypePtr(); - case VALUE_TYPE_PTR: - return isSetValueTypePtr(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TMapTypeEntry) - return this.equals((TMapTypeEntry)that); - return false; - } - - public boolean equals(TMapTypeEntry that) { - if (that == null) - return false; - - boolean this_present_keyTypePtr = true; - boolean that_present_keyTypePtr = true; - if (this_present_keyTypePtr || that_present_keyTypePtr) { - if (!(this_present_keyTypePtr && that_present_keyTypePtr)) - return false; - if (this.keyTypePtr != that.keyTypePtr) - return false; - } - - boolean this_present_valueTypePtr = true; - boolean that_present_valueTypePtr = true; - if (this_present_valueTypePtr || that_present_valueTypePtr) { - if (!(this_present_valueTypePtr && that_present_valueTypePtr)) - return false; - if (this.valueTypePtr != that.valueTypePtr) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_keyTypePtr = true; - builder.append(present_keyTypePtr); - if (present_keyTypePtr) - builder.append(keyTypePtr); - - boolean present_valueTypePtr = true; - builder.append(present_valueTypePtr); - if (present_valueTypePtr) - builder.append(valueTypePtr); - - return builder.toHashCode(); - } - - public int compareTo(TMapTypeEntry other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - TMapTypeEntry typedOther = (TMapTypeEntry)other; - - lastComparison = Boolean.valueOf(isSetKeyTypePtr()).compareTo(typedOther.isSetKeyTypePtr()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetKeyTypePtr()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.keyTypePtr, typedOther.keyTypePtr); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetValueTypePtr()).compareTo(typedOther.isSetValueTypePtr()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetValueTypePtr()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.valueTypePtr, typedOther.valueTypePtr); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TMapTypeEntry("); - boolean first = true; - - sb.append("keyTypePtr:"); - sb.append(this.keyTypePtr); - first = false; - if (!first) sb.append(", "); - sb.append("valueTypePtr:"); - sb.append(this.valueTypePtr); - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetKeyTypePtr()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'keyTypePtr' is unset! Struct:" + toString()); - } - - if (!isSetValueTypePtr()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'valueTypePtr' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. - __isset_bitfield = 0; - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TMapTypeEntryStandardSchemeFactory implements SchemeFactory { - public TMapTypeEntryStandardScheme getScheme() { - return new TMapTypeEntryStandardScheme(); - } - } - - private static class TMapTypeEntryStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TMapTypeEntry struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // KEY_TYPE_PTR - if (schemeField.type == org.apache.thrift.protocol.TType.I32) { - struct.keyTypePtr = iprot.readI32(); - struct.setKeyTypePtrIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // VALUE_TYPE_PTR - if (schemeField.type == org.apache.thrift.protocol.TType.I32) { - struct.valueTypePtr = iprot.readI32(); - struct.setValueTypePtrIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TMapTypeEntry struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - oprot.writeFieldBegin(KEY_TYPE_PTR_FIELD_DESC); - oprot.writeI32(struct.keyTypePtr); - oprot.writeFieldEnd(); - oprot.writeFieldBegin(VALUE_TYPE_PTR_FIELD_DESC); - oprot.writeI32(struct.valueTypePtr); - oprot.writeFieldEnd(); - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TMapTypeEntryTupleSchemeFactory implements SchemeFactory { - public TMapTypeEntryTupleScheme getScheme() { - return new TMapTypeEntryTupleScheme(); - } - } - - private static class TMapTypeEntryTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TMapTypeEntry struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - oprot.writeI32(struct.keyTypePtr); - oprot.writeI32(struct.valueTypePtr); - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TMapTypeEntry struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.keyTypePtr = iprot.readI32(); - struct.setKeyTypePtrIsSet(true); - struct.valueTypePtr = iprot.readI32(); - struct.setValueTypePtrIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TOpenSessionReq.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TOpenSessionReq.java deleted file mode 100644 index c0481615b06d3..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TOpenSessionReq.java +++ /dev/null @@ -1,785 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TOpenSessionReq implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TOpenSessionReq"); - - private static final org.apache.thrift.protocol.TField CLIENT_PROTOCOL_FIELD_DESC = new org.apache.thrift.protocol.TField("client_protocol", org.apache.thrift.protocol.TType.I32, (short)1); - private static final org.apache.thrift.protocol.TField USERNAME_FIELD_DESC = new org.apache.thrift.protocol.TField("username", org.apache.thrift.protocol.TType.STRING, (short)2); - private static final org.apache.thrift.protocol.TField PASSWORD_FIELD_DESC = new org.apache.thrift.protocol.TField("password", org.apache.thrift.protocol.TType.STRING, (short)3); - private static final org.apache.thrift.protocol.TField CONFIGURATION_FIELD_DESC = new org.apache.thrift.protocol.TField("configuration", org.apache.thrift.protocol.TType.MAP, (short)4); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TOpenSessionReqStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TOpenSessionReqTupleSchemeFactory()); - } - - private TProtocolVersion client_protocol; // required - private String username; // optional - private String password; // optional - private Map configuration; // optional - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - /** - * - * @see TProtocolVersion - */ - CLIENT_PROTOCOL((short)1, "client_protocol"), - USERNAME((short)2, "username"), - PASSWORD((short)3, "password"), - CONFIGURATION((short)4, "configuration"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // CLIENT_PROTOCOL - return CLIENT_PROTOCOL; - case 2: // USERNAME - return USERNAME; - case 3: // PASSWORD - return PASSWORD; - case 4: // CONFIGURATION - return CONFIGURATION; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private _Fields optionals[] = {_Fields.USERNAME,_Fields.PASSWORD,_Fields.CONFIGURATION}; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.CLIENT_PROTOCOL, new org.apache.thrift.meta_data.FieldMetaData("client_protocol", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, TProtocolVersion.class))); - tmpMap.put(_Fields.USERNAME, new org.apache.thrift.meta_data.FieldMetaData("username", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.PASSWORD, new org.apache.thrift.meta_data.FieldMetaData("password", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.CONFIGURATION, new org.apache.thrift.meta_data.FieldMetaData("configuration", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TOpenSessionReq.class, metaDataMap); - } - - public TOpenSessionReq() { - this.client_protocol = org.apache.hive.service.cli.thrift.TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V8; - - } - - public TOpenSessionReq( - TProtocolVersion client_protocol) - { - this(); - this.client_protocol = client_protocol; - } - - /** - * Performs a deep copy on other. - */ - public TOpenSessionReq(TOpenSessionReq other) { - if (other.isSetClient_protocol()) { - this.client_protocol = other.client_protocol; - } - if (other.isSetUsername()) { - this.username = other.username; - } - if (other.isSetPassword()) { - this.password = other.password; - } - if (other.isSetConfiguration()) { - Map __this__configuration = new HashMap(); - for (Map.Entry other_element : other.configuration.entrySet()) { - - String other_element_key = other_element.getKey(); - String other_element_value = other_element.getValue(); - - String __this__configuration_copy_key = other_element_key; - - String __this__configuration_copy_value = other_element_value; - - __this__configuration.put(__this__configuration_copy_key, __this__configuration_copy_value); - } - this.configuration = __this__configuration; - } - } - - public TOpenSessionReq deepCopy() { - return new TOpenSessionReq(this); - } - - @Override - public void clear() { - this.client_protocol = org.apache.hive.service.cli.thrift.TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V8; - - this.username = null; - this.password = null; - this.configuration = null; - } - - /** - * - * @see TProtocolVersion - */ - public TProtocolVersion getClient_protocol() { - return this.client_protocol; - } - - /** - * - * @see TProtocolVersion - */ - public void setClient_protocol(TProtocolVersion client_protocol) { - this.client_protocol = client_protocol; - } - - public void unsetClient_protocol() { - this.client_protocol = null; - } - - /** Returns true if field client_protocol is set (has been assigned a value) and false otherwise */ - public boolean isSetClient_protocol() { - return this.client_protocol != null; - } - - public void setClient_protocolIsSet(boolean value) { - if (!value) { - this.client_protocol = null; - } - } - - public String getUsername() { - return this.username; - } - - public void setUsername(String username) { - this.username = username; - } - - public void unsetUsername() { - this.username = null; - } - - /** Returns true if field username is set (has been assigned a value) and false otherwise */ - public boolean isSetUsername() { - return this.username != null; - } - - public void setUsernameIsSet(boolean value) { - if (!value) { - this.username = null; - } - } - - public String getPassword() { - return this.password; - } - - public void setPassword(String password) { - this.password = password; - } - - public void unsetPassword() { - this.password = null; - } - - /** Returns true if field password is set (has been assigned a value) and false otherwise */ - public boolean isSetPassword() { - return this.password != null; - } - - public void setPasswordIsSet(boolean value) { - if (!value) { - this.password = null; - } - } - - public int getConfigurationSize() { - return (this.configuration == null) ? 0 : this.configuration.size(); - } - - public void putToConfiguration(String key, String val) { - if (this.configuration == null) { - this.configuration = new HashMap(); - } - this.configuration.put(key, val); - } - - public Map getConfiguration() { - return this.configuration; - } - - public void setConfiguration(Map configuration) { - this.configuration = configuration; - } - - public void unsetConfiguration() { - this.configuration = null; - } - - /** Returns true if field configuration is set (has been assigned a value) and false otherwise */ - public boolean isSetConfiguration() { - return this.configuration != null; - } - - public void setConfigurationIsSet(boolean value) { - if (!value) { - this.configuration = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case CLIENT_PROTOCOL: - if (value == null) { - unsetClient_protocol(); - } else { - setClient_protocol((TProtocolVersion)value); - } - break; - - case USERNAME: - if (value == null) { - unsetUsername(); - } else { - setUsername((String)value); - } - break; - - case PASSWORD: - if (value == null) { - unsetPassword(); - } else { - setPassword((String)value); - } - break; - - case CONFIGURATION: - if (value == null) { - unsetConfiguration(); - } else { - setConfiguration((Map)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case CLIENT_PROTOCOL: - return getClient_protocol(); - - case USERNAME: - return getUsername(); - - case PASSWORD: - return getPassword(); - - case CONFIGURATION: - return getConfiguration(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case CLIENT_PROTOCOL: - return isSetClient_protocol(); - case USERNAME: - return isSetUsername(); - case PASSWORD: - return isSetPassword(); - case CONFIGURATION: - return isSetConfiguration(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TOpenSessionReq) - return this.equals((TOpenSessionReq)that); - return false; - } - - public boolean equals(TOpenSessionReq that) { - if (that == null) - return false; - - boolean this_present_client_protocol = true && this.isSetClient_protocol(); - boolean that_present_client_protocol = true && that.isSetClient_protocol(); - if (this_present_client_protocol || that_present_client_protocol) { - if (!(this_present_client_protocol && that_present_client_protocol)) - return false; - if (!this.client_protocol.equals(that.client_protocol)) - return false; - } - - boolean this_present_username = true && this.isSetUsername(); - boolean that_present_username = true && that.isSetUsername(); - if (this_present_username || that_present_username) { - if (!(this_present_username && that_present_username)) - return false; - if (!this.username.equals(that.username)) - return false; - } - - boolean this_present_password = true && this.isSetPassword(); - boolean that_present_password = true && that.isSetPassword(); - if (this_present_password || that_present_password) { - if (!(this_present_password && that_present_password)) - return false; - if (!this.password.equals(that.password)) - return false; - } - - boolean this_present_configuration = true && this.isSetConfiguration(); - boolean that_present_configuration = true && that.isSetConfiguration(); - if (this_present_configuration || that_present_configuration) { - if (!(this_present_configuration && that_present_configuration)) - return false; - if (!this.configuration.equals(that.configuration)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_client_protocol = true && (isSetClient_protocol()); - builder.append(present_client_protocol); - if (present_client_protocol) - builder.append(client_protocol.getValue()); - - boolean present_username = true && (isSetUsername()); - builder.append(present_username); - if (present_username) - builder.append(username); - - boolean present_password = true && (isSetPassword()); - builder.append(present_password); - if (present_password) - builder.append(password); - - boolean present_configuration = true && (isSetConfiguration()); - builder.append(present_configuration); - if (present_configuration) - builder.append(configuration); - - return builder.toHashCode(); - } - - public int compareTo(TOpenSessionReq other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - TOpenSessionReq typedOther = (TOpenSessionReq)other; - - lastComparison = Boolean.valueOf(isSetClient_protocol()).compareTo(typedOther.isSetClient_protocol()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetClient_protocol()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.client_protocol, typedOther.client_protocol); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetUsername()).compareTo(typedOther.isSetUsername()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetUsername()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.username, typedOther.username); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetPassword()).compareTo(typedOther.isSetPassword()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetPassword()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.password, typedOther.password); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetConfiguration()).compareTo(typedOther.isSetConfiguration()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetConfiguration()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.configuration, typedOther.configuration); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TOpenSessionReq("); - boolean first = true; - - sb.append("client_protocol:"); - if (this.client_protocol == null) { - sb.append("null"); - } else { - sb.append(this.client_protocol); - } - first = false; - if (isSetUsername()) { - if (!first) sb.append(", "); - sb.append("username:"); - if (this.username == null) { - sb.append("null"); - } else { - sb.append(this.username); - } - first = false; - } - if (isSetPassword()) { - if (!first) sb.append(", "); - sb.append("password:"); - if (this.password == null) { - sb.append("null"); - } else { - sb.append(this.password); - } - first = false; - } - if (isSetConfiguration()) { - if (!first) sb.append(", "); - sb.append("configuration:"); - if (this.configuration == null) { - sb.append("null"); - } else { - sb.append(this.configuration); - } - first = false; - } - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetClient_protocol()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'client_protocol' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TOpenSessionReqStandardSchemeFactory implements SchemeFactory { - public TOpenSessionReqStandardScheme getScheme() { - return new TOpenSessionReqStandardScheme(); - } - } - - private static class TOpenSessionReqStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TOpenSessionReq struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // CLIENT_PROTOCOL - if (schemeField.type == org.apache.thrift.protocol.TType.I32) { - struct.client_protocol = TProtocolVersion.findByValue(iprot.readI32()); - struct.setClient_protocolIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // USERNAME - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.username = iprot.readString(); - struct.setUsernameIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 3: // PASSWORD - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.password = iprot.readString(); - struct.setPasswordIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 4: // CONFIGURATION - if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { - { - org.apache.thrift.protocol.TMap _map142 = iprot.readMapBegin(); - struct.configuration = new HashMap(2*_map142.size); - for (int _i143 = 0; _i143 < _map142.size; ++_i143) - { - String _key144; // required - String _val145; // required - _key144 = iprot.readString(); - _val145 = iprot.readString(); - struct.configuration.put(_key144, _val145); - } - iprot.readMapEnd(); - } - struct.setConfigurationIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TOpenSessionReq struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.client_protocol != null) { - oprot.writeFieldBegin(CLIENT_PROTOCOL_FIELD_DESC); - oprot.writeI32(struct.client_protocol.getValue()); - oprot.writeFieldEnd(); - } - if (struct.username != null) { - if (struct.isSetUsername()) { - oprot.writeFieldBegin(USERNAME_FIELD_DESC); - oprot.writeString(struct.username); - oprot.writeFieldEnd(); - } - } - if (struct.password != null) { - if (struct.isSetPassword()) { - oprot.writeFieldBegin(PASSWORD_FIELD_DESC); - oprot.writeString(struct.password); - oprot.writeFieldEnd(); - } - } - if (struct.configuration != null) { - if (struct.isSetConfiguration()) { - oprot.writeFieldBegin(CONFIGURATION_FIELD_DESC); - { - oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.configuration.size())); - for (Map.Entry _iter146 : struct.configuration.entrySet()) - { - oprot.writeString(_iter146.getKey()); - oprot.writeString(_iter146.getValue()); - } - oprot.writeMapEnd(); - } - oprot.writeFieldEnd(); - } - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TOpenSessionReqTupleSchemeFactory implements SchemeFactory { - public TOpenSessionReqTupleScheme getScheme() { - return new TOpenSessionReqTupleScheme(); - } - } - - private static class TOpenSessionReqTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TOpenSessionReq struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - oprot.writeI32(struct.client_protocol.getValue()); - BitSet optionals = new BitSet(); - if (struct.isSetUsername()) { - optionals.set(0); - } - if (struct.isSetPassword()) { - optionals.set(1); - } - if (struct.isSetConfiguration()) { - optionals.set(2); - } - oprot.writeBitSet(optionals, 3); - if (struct.isSetUsername()) { - oprot.writeString(struct.username); - } - if (struct.isSetPassword()) { - oprot.writeString(struct.password); - } - if (struct.isSetConfiguration()) { - { - oprot.writeI32(struct.configuration.size()); - for (Map.Entry _iter147 : struct.configuration.entrySet()) - { - oprot.writeString(_iter147.getKey()); - oprot.writeString(_iter147.getValue()); - } - } - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TOpenSessionReq struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.client_protocol = TProtocolVersion.findByValue(iprot.readI32()); - struct.setClient_protocolIsSet(true); - BitSet incoming = iprot.readBitSet(3); - if (incoming.get(0)) { - struct.username = iprot.readString(); - struct.setUsernameIsSet(true); - } - if (incoming.get(1)) { - struct.password = iprot.readString(); - struct.setPasswordIsSet(true); - } - if (incoming.get(2)) { - { - org.apache.thrift.protocol.TMap _map148 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.configuration = new HashMap(2*_map148.size); - for (int _i149 = 0; _i149 < _map148.size; ++_i149) - { - String _key150; // required - String _val151; // required - _key150 = iprot.readString(); - _val151 = iprot.readString(); - struct.configuration.put(_key150, _val151); - } - } - struct.setConfigurationIsSet(true); - } - } - } - -} - diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TOpenSessionResp.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TOpenSessionResp.java deleted file mode 100644 index 351f78b2de20c..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TOpenSessionResp.java +++ /dev/null @@ -1,790 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TOpenSessionResp implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TOpenSessionResp"); - - private static final org.apache.thrift.protocol.TField STATUS_FIELD_DESC = new org.apache.thrift.protocol.TField("status", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField SERVER_PROTOCOL_VERSION_FIELD_DESC = new org.apache.thrift.protocol.TField("serverProtocolVersion", org.apache.thrift.protocol.TType.I32, (short)2); - private static final org.apache.thrift.protocol.TField SESSION_HANDLE_FIELD_DESC = new org.apache.thrift.protocol.TField("sessionHandle", org.apache.thrift.protocol.TType.STRUCT, (short)3); - private static final org.apache.thrift.protocol.TField CONFIGURATION_FIELD_DESC = new org.apache.thrift.protocol.TField("configuration", org.apache.thrift.protocol.TType.MAP, (short)4); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TOpenSessionRespStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TOpenSessionRespTupleSchemeFactory()); - } - - private TStatus status; // required - private TProtocolVersion serverProtocolVersion; // required - private TSessionHandle sessionHandle; // optional - private Map configuration; // optional - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - STATUS((short)1, "status"), - /** - * - * @see TProtocolVersion - */ - SERVER_PROTOCOL_VERSION((short)2, "serverProtocolVersion"), - SESSION_HANDLE((short)3, "sessionHandle"), - CONFIGURATION((short)4, "configuration"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // STATUS - return STATUS; - case 2: // SERVER_PROTOCOL_VERSION - return SERVER_PROTOCOL_VERSION; - case 3: // SESSION_HANDLE - return SESSION_HANDLE; - case 4: // CONFIGURATION - return CONFIGURATION; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private _Fields optionals[] = {_Fields.SESSION_HANDLE,_Fields.CONFIGURATION}; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.STATUS, new org.apache.thrift.meta_data.FieldMetaData("status", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TStatus.class))); - tmpMap.put(_Fields.SERVER_PROTOCOL_VERSION, new org.apache.thrift.meta_data.FieldMetaData("serverProtocolVersion", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, TProtocolVersion.class))); - tmpMap.put(_Fields.SESSION_HANDLE, new org.apache.thrift.meta_data.FieldMetaData("sessionHandle", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TSessionHandle.class))); - tmpMap.put(_Fields.CONFIGURATION, new org.apache.thrift.meta_data.FieldMetaData("configuration", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TOpenSessionResp.class, metaDataMap); - } - - public TOpenSessionResp() { - this.serverProtocolVersion = org.apache.hive.service.cli.thrift.TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V8; - - } - - public TOpenSessionResp( - TStatus status, - TProtocolVersion serverProtocolVersion) - { - this(); - this.status = status; - this.serverProtocolVersion = serverProtocolVersion; - } - - /** - * Performs a deep copy on other. - */ - public TOpenSessionResp(TOpenSessionResp other) { - if (other.isSetStatus()) { - this.status = new TStatus(other.status); - } - if (other.isSetServerProtocolVersion()) { - this.serverProtocolVersion = other.serverProtocolVersion; - } - if (other.isSetSessionHandle()) { - this.sessionHandle = new TSessionHandle(other.sessionHandle); - } - if (other.isSetConfiguration()) { - Map __this__configuration = new HashMap(); - for (Map.Entry other_element : other.configuration.entrySet()) { - - String other_element_key = other_element.getKey(); - String other_element_value = other_element.getValue(); - - String __this__configuration_copy_key = other_element_key; - - String __this__configuration_copy_value = other_element_value; - - __this__configuration.put(__this__configuration_copy_key, __this__configuration_copy_value); - } - this.configuration = __this__configuration; - } - } - - public TOpenSessionResp deepCopy() { - return new TOpenSessionResp(this); - } - - @Override - public void clear() { - this.status = null; - this.serverProtocolVersion = org.apache.hive.service.cli.thrift.TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V8; - - this.sessionHandle = null; - this.configuration = null; - } - - public TStatus getStatus() { - return this.status; - } - - public void setStatus(TStatus status) { - this.status = status; - } - - public void unsetStatus() { - this.status = null; - } - - /** Returns true if field status is set (has been assigned a value) and false otherwise */ - public boolean isSetStatus() { - return this.status != null; - } - - public void setStatusIsSet(boolean value) { - if (!value) { - this.status = null; - } - } - - /** - * - * @see TProtocolVersion - */ - public TProtocolVersion getServerProtocolVersion() { - return this.serverProtocolVersion; - } - - /** - * - * @see TProtocolVersion - */ - public void setServerProtocolVersion(TProtocolVersion serverProtocolVersion) { - this.serverProtocolVersion = serverProtocolVersion; - } - - public void unsetServerProtocolVersion() { - this.serverProtocolVersion = null; - } - - /** Returns true if field serverProtocolVersion is set (has been assigned a value) and false otherwise */ - public boolean isSetServerProtocolVersion() { - return this.serverProtocolVersion != null; - } - - public void setServerProtocolVersionIsSet(boolean value) { - if (!value) { - this.serverProtocolVersion = null; - } - } - - public TSessionHandle getSessionHandle() { - return this.sessionHandle; - } - - public void setSessionHandle(TSessionHandle sessionHandle) { - this.sessionHandle = sessionHandle; - } - - public void unsetSessionHandle() { - this.sessionHandle = null; - } - - /** Returns true if field sessionHandle is set (has been assigned a value) and false otherwise */ - public boolean isSetSessionHandle() { - return this.sessionHandle != null; - } - - public void setSessionHandleIsSet(boolean value) { - if (!value) { - this.sessionHandle = null; - } - } - - public int getConfigurationSize() { - return (this.configuration == null) ? 0 : this.configuration.size(); - } - - public void putToConfiguration(String key, String val) { - if (this.configuration == null) { - this.configuration = new HashMap(); - } - this.configuration.put(key, val); - } - - public Map getConfiguration() { - return this.configuration; - } - - public void setConfiguration(Map configuration) { - this.configuration = configuration; - } - - public void unsetConfiguration() { - this.configuration = null; - } - - /** Returns true if field configuration is set (has been assigned a value) and false otherwise */ - public boolean isSetConfiguration() { - return this.configuration != null; - } - - public void setConfigurationIsSet(boolean value) { - if (!value) { - this.configuration = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case STATUS: - if (value == null) { - unsetStatus(); - } else { - setStatus((TStatus)value); - } - break; - - case SERVER_PROTOCOL_VERSION: - if (value == null) { - unsetServerProtocolVersion(); - } else { - setServerProtocolVersion((TProtocolVersion)value); - } - break; - - case SESSION_HANDLE: - if (value == null) { - unsetSessionHandle(); - } else { - setSessionHandle((TSessionHandle)value); - } - break; - - case CONFIGURATION: - if (value == null) { - unsetConfiguration(); - } else { - setConfiguration((Map)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case STATUS: - return getStatus(); - - case SERVER_PROTOCOL_VERSION: - return getServerProtocolVersion(); - - case SESSION_HANDLE: - return getSessionHandle(); - - case CONFIGURATION: - return getConfiguration(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case STATUS: - return isSetStatus(); - case SERVER_PROTOCOL_VERSION: - return isSetServerProtocolVersion(); - case SESSION_HANDLE: - return isSetSessionHandle(); - case CONFIGURATION: - return isSetConfiguration(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TOpenSessionResp) - return this.equals((TOpenSessionResp)that); - return false; - } - - public boolean equals(TOpenSessionResp that) { - if (that == null) - return false; - - boolean this_present_status = true && this.isSetStatus(); - boolean that_present_status = true && that.isSetStatus(); - if (this_present_status || that_present_status) { - if (!(this_present_status && that_present_status)) - return false; - if (!this.status.equals(that.status)) - return false; - } - - boolean this_present_serverProtocolVersion = true && this.isSetServerProtocolVersion(); - boolean that_present_serverProtocolVersion = true && that.isSetServerProtocolVersion(); - if (this_present_serverProtocolVersion || that_present_serverProtocolVersion) { - if (!(this_present_serverProtocolVersion && that_present_serverProtocolVersion)) - return false; - if (!this.serverProtocolVersion.equals(that.serverProtocolVersion)) - return false; - } - - boolean this_present_sessionHandle = true && this.isSetSessionHandle(); - boolean that_present_sessionHandle = true && that.isSetSessionHandle(); - if (this_present_sessionHandle || that_present_sessionHandle) { - if (!(this_present_sessionHandle && that_present_sessionHandle)) - return false; - if (!this.sessionHandle.equals(that.sessionHandle)) - return false; - } - - boolean this_present_configuration = true && this.isSetConfiguration(); - boolean that_present_configuration = true && that.isSetConfiguration(); - if (this_present_configuration || that_present_configuration) { - if (!(this_present_configuration && that_present_configuration)) - return false; - if (!this.configuration.equals(that.configuration)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_status = true && (isSetStatus()); - builder.append(present_status); - if (present_status) - builder.append(status); - - boolean present_serverProtocolVersion = true && (isSetServerProtocolVersion()); - builder.append(present_serverProtocolVersion); - if (present_serverProtocolVersion) - builder.append(serverProtocolVersion.getValue()); - - boolean present_sessionHandle = true && (isSetSessionHandle()); - builder.append(present_sessionHandle); - if (present_sessionHandle) - builder.append(sessionHandle); - - boolean present_configuration = true && (isSetConfiguration()); - builder.append(present_configuration); - if (present_configuration) - builder.append(configuration); - - return builder.toHashCode(); - } - - public int compareTo(TOpenSessionResp other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - TOpenSessionResp typedOther = (TOpenSessionResp)other; - - lastComparison = Boolean.valueOf(isSetStatus()).compareTo(typedOther.isSetStatus()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetStatus()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.status, typedOther.status); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetServerProtocolVersion()).compareTo(typedOther.isSetServerProtocolVersion()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetServerProtocolVersion()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.serverProtocolVersion, typedOther.serverProtocolVersion); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetSessionHandle()).compareTo(typedOther.isSetSessionHandle()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSessionHandle()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.sessionHandle, typedOther.sessionHandle); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetConfiguration()).compareTo(typedOther.isSetConfiguration()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetConfiguration()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.configuration, typedOther.configuration); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TOpenSessionResp("); - boolean first = true; - - sb.append("status:"); - if (this.status == null) { - sb.append("null"); - } else { - sb.append(this.status); - } - first = false; - if (!first) sb.append(", "); - sb.append("serverProtocolVersion:"); - if (this.serverProtocolVersion == null) { - sb.append("null"); - } else { - sb.append(this.serverProtocolVersion); - } - first = false; - if (isSetSessionHandle()) { - if (!first) sb.append(", "); - sb.append("sessionHandle:"); - if (this.sessionHandle == null) { - sb.append("null"); - } else { - sb.append(this.sessionHandle); - } - first = false; - } - if (isSetConfiguration()) { - if (!first) sb.append(", "); - sb.append("configuration:"); - if (this.configuration == null) { - sb.append("null"); - } else { - sb.append(this.configuration); - } - first = false; - } - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetStatus()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'status' is unset! Struct:" + toString()); - } - - if (!isSetServerProtocolVersion()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'serverProtocolVersion' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (status != null) { - status.validate(); - } - if (sessionHandle != null) { - sessionHandle.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TOpenSessionRespStandardSchemeFactory implements SchemeFactory { - public TOpenSessionRespStandardScheme getScheme() { - return new TOpenSessionRespStandardScheme(); - } - } - - private static class TOpenSessionRespStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TOpenSessionResp struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // STATUS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // SERVER_PROTOCOL_VERSION - if (schemeField.type == org.apache.thrift.protocol.TType.I32) { - struct.serverProtocolVersion = TProtocolVersion.findByValue(iprot.readI32()); - struct.setServerProtocolVersionIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 3: // SESSION_HANDLE - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.sessionHandle = new TSessionHandle(); - struct.sessionHandle.read(iprot); - struct.setSessionHandleIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 4: // CONFIGURATION - if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { - { - org.apache.thrift.protocol.TMap _map152 = iprot.readMapBegin(); - struct.configuration = new HashMap(2*_map152.size); - for (int _i153 = 0; _i153 < _map152.size; ++_i153) - { - String _key154; // required - String _val155; // required - _key154 = iprot.readString(); - _val155 = iprot.readString(); - struct.configuration.put(_key154, _val155); - } - iprot.readMapEnd(); - } - struct.setConfigurationIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TOpenSessionResp struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.status != null) { - oprot.writeFieldBegin(STATUS_FIELD_DESC); - struct.status.write(oprot); - oprot.writeFieldEnd(); - } - if (struct.serverProtocolVersion != null) { - oprot.writeFieldBegin(SERVER_PROTOCOL_VERSION_FIELD_DESC); - oprot.writeI32(struct.serverProtocolVersion.getValue()); - oprot.writeFieldEnd(); - } - if (struct.sessionHandle != null) { - if (struct.isSetSessionHandle()) { - oprot.writeFieldBegin(SESSION_HANDLE_FIELD_DESC); - struct.sessionHandle.write(oprot); - oprot.writeFieldEnd(); - } - } - if (struct.configuration != null) { - if (struct.isSetConfiguration()) { - oprot.writeFieldBegin(CONFIGURATION_FIELD_DESC); - { - oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.configuration.size())); - for (Map.Entry _iter156 : struct.configuration.entrySet()) - { - oprot.writeString(_iter156.getKey()); - oprot.writeString(_iter156.getValue()); - } - oprot.writeMapEnd(); - } - oprot.writeFieldEnd(); - } - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TOpenSessionRespTupleSchemeFactory implements SchemeFactory { - public TOpenSessionRespTupleScheme getScheme() { - return new TOpenSessionRespTupleScheme(); - } - } - - private static class TOpenSessionRespTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TOpenSessionResp struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.status.write(oprot); - oprot.writeI32(struct.serverProtocolVersion.getValue()); - BitSet optionals = new BitSet(); - if (struct.isSetSessionHandle()) { - optionals.set(0); - } - if (struct.isSetConfiguration()) { - optionals.set(1); - } - oprot.writeBitSet(optionals, 2); - if (struct.isSetSessionHandle()) { - struct.sessionHandle.write(oprot); - } - if (struct.isSetConfiguration()) { - { - oprot.writeI32(struct.configuration.size()); - for (Map.Entry _iter157 : struct.configuration.entrySet()) - { - oprot.writeString(_iter157.getKey()); - oprot.writeString(_iter157.getValue()); - } - } - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TOpenSessionResp struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - struct.serverProtocolVersion = TProtocolVersion.findByValue(iprot.readI32()); - struct.setServerProtocolVersionIsSet(true); - BitSet incoming = iprot.readBitSet(2); - if (incoming.get(0)) { - struct.sessionHandle = new TSessionHandle(); - struct.sessionHandle.read(iprot); - struct.setSessionHandleIsSet(true); - } - if (incoming.get(1)) { - { - org.apache.thrift.protocol.TMap _map158 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.configuration = new HashMap(2*_map158.size); - for (int _i159 = 0; _i159 < _map158.size; ++_i159) - { - String _key160; // required - String _val161; // required - _key160 = iprot.readString(); - _val161 = iprot.readString(); - struct.configuration.put(_key160, _val161); - } - } - struct.setConfigurationIsSet(true); - } - } - } - -} - diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TOperationHandle.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TOperationHandle.java deleted file mode 100644 index 8fbd8752eaca6..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TOperationHandle.java +++ /dev/null @@ -1,705 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TOperationHandle implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TOperationHandle"); - - private static final org.apache.thrift.protocol.TField OPERATION_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("operationId", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField OPERATION_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("operationType", org.apache.thrift.protocol.TType.I32, (short)2); - private static final org.apache.thrift.protocol.TField HAS_RESULT_SET_FIELD_DESC = new org.apache.thrift.protocol.TField("hasResultSet", org.apache.thrift.protocol.TType.BOOL, (short)3); - private static final org.apache.thrift.protocol.TField MODIFIED_ROW_COUNT_FIELD_DESC = new org.apache.thrift.protocol.TField("modifiedRowCount", org.apache.thrift.protocol.TType.DOUBLE, (short)4); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TOperationHandleStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TOperationHandleTupleSchemeFactory()); - } - - private THandleIdentifier operationId; // required - private TOperationType operationType; // required - private boolean hasResultSet; // required - private double modifiedRowCount; // optional - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - OPERATION_ID((short)1, "operationId"), - /** - * - * @see TOperationType - */ - OPERATION_TYPE((short)2, "operationType"), - HAS_RESULT_SET((short)3, "hasResultSet"), - MODIFIED_ROW_COUNT((short)4, "modifiedRowCount"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // OPERATION_ID - return OPERATION_ID; - case 2: // OPERATION_TYPE - return OPERATION_TYPE; - case 3: // HAS_RESULT_SET - return HAS_RESULT_SET; - case 4: // MODIFIED_ROW_COUNT - return MODIFIED_ROW_COUNT; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private static final int __HASRESULTSET_ISSET_ID = 0; - private static final int __MODIFIEDROWCOUNT_ISSET_ID = 1; - private byte __isset_bitfield = 0; - private _Fields optionals[] = {_Fields.MODIFIED_ROW_COUNT}; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.OPERATION_ID, new org.apache.thrift.meta_data.FieldMetaData("operationId", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, THandleIdentifier.class))); - tmpMap.put(_Fields.OPERATION_TYPE, new org.apache.thrift.meta_data.FieldMetaData("operationType", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, TOperationType.class))); - tmpMap.put(_Fields.HAS_RESULT_SET, new org.apache.thrift.meta_data.FieldMetaData("hasResultSet", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); - tmpMap.put(_Fields.MODIFIED_ROW_COUNT, new org.apache.thrift.meta_data.FieldMetaData("modifiedRowCount", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.DOUBLE))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TOperationHandle.class, metaDataMap); - } - - public TOperationHandle() { - } - - public TOperationHandle( - THandleIdentifier operationId, - TOperationType operationType, - boolean hasResultSet) - { - this(); - this.operationId = operationId; - this.operationType = operationType; - this.hasResultSet = hasResultSet; - setHasResultSetIsSet(true); - } - - /** - * Performs a deep copy on other. - */ - public TOperationHandle(TOperationHandle other) { - __isset_bitfield = other.__isset_bitfield; - if (other.isSetOperationId()) { - this.operationId = new THandleIdentifier(other.operationId); - } - if (other.isSetOperationType()) { - this.operationType = other.operationType; - } - this.hasResultSet = other.hasResultSet; - this.modifiedRowCount = other.modifiedRowCount; - } - - public TOperationHandle deepCopy() { - return new TOperationHandle(this); - } - - @Override - public void clear() { - this.operationId = null; - this.operationType = null; - setHasResultSetIsSet(false); - this.hasResultSet = false; - setModifiedRowCountIsSet(false); - this.modifiedRowCount = 0.0; - } - - public THandleIdentifier getOperationId() { - return this.operationId; - } - - public void setOperationId(THandleIdentifier operationId) { - this.operationId = operationId; - } - - public void unsetOperationId() { - this.operationId = null; - } - - /** Returns true if field operationId is set (has been assigned a value) and false otherwise */ - public boolean isSetOperationId() { - return this.operationId != null; - } - - public void setOperationIdIsSet(boolean value) { - if (!value) { - this.operationId = null; - } - } - - /** - * - * @see TOperationType - */ - public TOperationType getOperationType() { - return this.operationType; - } - - /** - * - * @see TOperationType - */ - public void setOperationType(TOperationType operationType) { - this.operationType = operationType; - } - - public void unsetOperationType() { - this.operationType = null; - } - - /** Returns true if field operationType is set (has been assigned a value) and false otherwise */ - public boolean isSetOperationType() { - return this.operationType != null; - } - - public void setOperationTypeIsSet(boolean value) { - if (!value) { - this.operationType = null; - } - } - - public boolean isHasResultSet() { - return this.hasResultSet; - } - - public void setHasResultSet(boolean hasResultSet) { - this.hasResultSet = hasResultSet; - setHasResultSetIsSet(true); - } - - public void unsetHasResultSet() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __HASRESULTSET_ISSET_ID); - } - - /** Returns true if field hasResultSet is set (has been assigned a value) and false otherwise */ - public boolean isSetHasResultSet() { - return EncodingUtils.testBit(__isset_bitfield, __HASRESULTSET_ISSET_ID); - } - - public void setHasResultSetIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __HASRESULTSET_ISSET_ID, value); - } - - public double getModifiedRowCount() { - return this.modifiedRowCount; - } - - public void setModifiedRowCount(double modifiedRowCount) { - this.modifiedRowCount = modifiedRowCount; - setModifiedRowCountIsSet(true); - } - - public void unsetModifiedRowCount() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __MODIFIEDROWCOUNT_ISSET_ID); - } - - /** Returns true if field modifiedRowCount is set (has been assigned a value) and false otherwise */ - public boolean isSetModifiedRowCount() { - return EncodingUtils.testBit(__isset_bitfield, __MODIFIEDROWCOUNT_ISSET_ID); - } - - public void setModifiedRowCountIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MODIFIEDROWCOUNT_ISSET_ID, value); - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case OPERATION_ID: - if (value == null) { - unsetOperationId(); - } else { - setOperationId((THandleIdentifier)value); - } - break; - - case OPERATION_TYPE: - if (value == null) { - unsetOperationType(); - } else { - setOperationType((TOperationType)value); - } - break; - - case HAS_RESULT_SET: - if (value == null) { - unsetHasResultSet(); - } else { - setHasResultSet((Boolean)value); - } - break; - - case MODIFIED_ROW_COUNT: - if (value == null) { - unsetModifiedRowCount(); - } else { - setModifiedRowCount((Double)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case OPERATION_ID: - return getOperationId(); - - case OPERATION_TYPE: - return getOperationType(); - - case HAS_RESULT_SET: - return Boolean.valueOf(isHasResultSet()); - - case MODIFIED_ROW_COUNT: - return Double.valueOf(getModifiedRowCount()); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case OPERATION_ID: - return isSetOperationId(); - case OPERATION_TYPE: - return isSetOperationType(); - case HAS_RESULT_SET: - return isSetHasResultSet(); - case MODIFIED_ROW_COUNT: - return isSetModifiedRowCount(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TOperationHandle) - return this.equals((TOperationHandle)that); - return false; - } - - public boolean equals(TOperationHandle that) { - if (that == null) - return false; - - boolean this_present_operationId = true && this.isSetOperationId(); - boolean that_present_operationId = true && that.isSetOperationId(); - if (this_present_operationId || that_present_operationId) { - if (!(this_present_operationId && that_present_operationId)) - return false; - if (!this.operationId.equals(that.operationId)) - return false; - } - - boolean this_present_operationType = true && this.isSetOperationType(); - boolean that_present_operationType = true && that.isSetOperationType(); - if (this_present_operationType || that_present_operationType) { - if (!(this_present_operationType && that_present_operationType)) - return false; - if (!this.operationType.equals(that.operationType)) - return false; - } - - boolean this_present_hasResultSet = true; - boolean that_present_hasResultSet = true; - if (this_present_hasResultSet || that_present_hasResultSet) { - if (!(this_present_hasResultSet && that_present_hasResultSet)) - return false; - if (this.hasResultSet != that.hasResultSet) - return false; - } - - boolean this_present_modifiedRowCount = true && this.isSetModifiedRowCount(); - boolean that_present_modifiedRowCount = true && that.isSetModifiedRowCount(); - if (this_present_modifiedRowCount || that_present_modifiedRowCount) { - if (!(this_present_modifiedRowCount && that_present_modifiedRowCount)) - return false; - if (this.modifiedRowCount != that.modifiedRowCount) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_operationId = true && (isSetOperationId()); - builder.append(present_operationId); - if (present_operationId) - builder.append(operationId); - - boolean present_operationType = true && (isSetOperationType()); - builder.append(present_operationType); - if (present_operationType) - builder.append(operationType.getValue()); - - boolean present_hasResultSet = true; - builder.append(present_hasResultSet); - if (present_hasResultSet) - builder.append(hasResultSet); - - boolean present_modifiedRowCount = true && (isSetModifiedRowCount()); - builder.append(present_modifiedRowCount); - if (present_modifiedRowCount) - builder.append(modifiedRowCount); - - return builder.toHashCode(); - } - - public int compareTo(TOperationHandle other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - TOperationHandle typedOther = (TOperationHandle)other; - - lastComparison = Boolean.valueOf(isSetOperationId()).compareTo(typedOther.isSetOperationId()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetOperationId()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.operationId, typedOther.operationId); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetOperationType()).compareTo(typedOther.isSetOperationType()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetOperationType()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.operationType, typedOther.operationType); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetHasResultSet()).compareTo(typedOther.isSetHasResultSet()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetHasResultSet()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.hasResultSet, typedOther.hasResultSet); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetModifiedRowCount()).compareTo(typedOther.isSetModifiedRowCount()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetModifiedRowCount()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.modifiedRowCount, typedOther.modifiedRowCount); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TOperationHandle("); - boolean first = true; - - sb.append("operationId:"); - if (this.operationId == null) { - sb.append("null"); - } else { - sb.append(this.operationId); - } - first = false; - if (!first) sb.append(", "); - sb.append("operationType:"); - if (this.operationType == null) { - sb.append("null"); - } else { - sb.append(this.operationType); - } - first = false; - if (!first) sb.append(", "); - sb.append("hasResultSet:"); - sb.append(this.hasResultSet); - first = false; - if (isSetModifiedRowCount()) { - if (!first) sb.append(", "); - sb.append("modifiedRowCount:"); - sb.append(this.modifiedRowCount); - first = false; - } - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetOperationId()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'operationId' is unset! Struct:" + toString()); - } - - if (!isSetOperationType()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'operationType' is unset! Struct:" + toString()); - } - - if (!isSetHasResultSet()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'hasResultSet' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (operationId != null) { - operationId.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. - __isset_bitfield = 0; - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TOperationHandleStandardSchemeFactory implements SchemeFactory { - public TOperationHandleStandardScheme getScheme() { - return new TOperationHandleStandardScheme(); - } - } - - private static class TOperationHandleStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TOperationHandle struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // OPERATION_ID - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.operationId = new THandleIdentifier(); - struct.operationId.read(iprot); - struct.setOperationIdIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // OPERATION_TYPE - if (schemeField.type == org.apache.thrift.protocol.TType.I32) { - struct.operationType = TOperationType.findByValue(iprot.readI32()); - struct.setOperationTypeIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 3: // HAS_RESULT_SET - if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { - struct.hasResultSet = iprot.readBool(); - struct.setHasResultSetIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 4: // MODIFIED_ROW_COUNT - if (schemeField.type == org.apache.thrift.protocol.TType.DOUBLE) { - struct.modifiedRowCount = iprot.readDouble(); - struct.setModifiedRowCountIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TOperationHandle struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.operationId != null) { - oprot.writeFieldBegin(OPERATION_ID_FIELD_DESC); - struct.operationId.write(oprot); - oprot.writeFieldEnd(); - } - if (struct.operationType != null) { - oprot.writeFieldBegin(OPERATION_TYPE_FIELD_DESC); - oprot.writeI32(struct.operationType.getValue()); - oprot.writeFieldEnd(); - } - oprot.writeFieldBegin(HAS_RESULT_SET_FIELD_DESC); - oprot.writeBool(struct.hasResultSet); - oprot.writeFieldEnd(); - if (struct.isSetModifiedRowCount()) { - oprot.writeFieldBegin(MODIFIED_ROW_COUNT_FIELD_DESC); - oprot.writeDouble(struct.modifiedRowCount); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TOperationHandleTupleSchemeFactory implements SchemeFactory { - public TOperationHandleTupleScheme getScheme() { - return new TOperationHandleTupleScheme(); - } - } - - private static class TOperationHandleTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TOperationHandle struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.operationId.write(oprot); - oprot.writeI32(struct.operationType.getValue()); - oprot.writeBool(struct.hasResultSet); - BitSet optionals = new BitSet(); - if (struct.isSetModifiedRowCount()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetModifiedRowCount()) { - oprot.writeDouble(struct.modifiedRowCount); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TOperationHandle struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.operationId = new THandleIdentifier(); - struct.operationId.read(iprot); - struct.setOperationIdIsSet(true); - struct.operationType = TOperationType.findByValue(iprot.readI32()); - struct.setOperationTypeIsSet(true); - struct.hasResultSet = iprot.readBool(); - struct.setHasResultSetIsSet(true); - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.modifiedRowCount = iprot.readDouble(); - struct.setModifiedRowCountIsSet(true); - } - } - } - -} - diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TOperationState.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TOperationState.java deleted file mode 100644 index 219866223a6b0..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TOperationState.java +++ /dev/null @@ -1,63 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - - -import java.util.Map; -import java.util.HashMap; -import org.apache.thrift.TEnum; - -public enum TOperationState implements org.apache.thrift.TEnum { - INITIALIZED_STATE(0), - RUNNING_STATE(1), - FINISHED_STATE(2), - CANCELED_STATE(3), - CLOSED_STATE(4), - ERROR_STATE(5), - UKNOWN_STATE(6), - PENDING_STATE(7); - - private final int value; - - private TOperationState(int value) { - this.value = value; - } - - /** - * Get the integer value of this enum value, as defined in the Thrift IDL. - */ - public int getValue() { - return value; - } - - /** - * Find a the enum type by its integer value, as defined in the Thrift IDL. - * @return null if the value is not found. - */ - public static TOperationState findByValue(int value) { - switch (value) { - case 0: - return INITIALIZED_STATE; - case 1: - return RUNNING_STATE; - case 2: - return FINISHED_STATE; - case 3: - return CANCELED_STATE; - case 4: - return CLOSED_STATE; - case 5: - return ERROR_STATE; - case 6: - return UKNOWN_STATE; - case 7: - return PENDING_STATE; - default: - return null; - } - } -} diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TOperationType.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TOperationType.java deleted file mode 100644 index b6d4b2fab9f96..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TOperationType.java +++ /dev/null @@ -1,66 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - - -import java.util.Map; -import java.util.HashMap; -import org.apache.thrift.TEnum; - -public enum TOperationType implements org.apache.thrift.TEnum { - EXECUTE_STATEMENT(0), - GET_TYPE_INFO(1), - GET_CATALOGS(2), - GET_SCHEMAS(3), - GET_TABLES(4), - GET_TABLE_TYPES(5), - GET_COLUMNS(6), - GET_FUNCTIONS(7), - UNKNOWN(8); - - private final int value; - - private TOperationType(int value) { - this.value = value; - } - - /** - * Get the integer value of this enum value, as defined in the Thrift IDL. - */ - public int getValue() { - return value; - } - - /** - * Find a the enum type by its integer value, as defined in the Thrift IDL. - * @return null if the value is not found. - */ - public static TOperationType findByValue(int value) { - switch (value) { - case 0: - return EXECUTE_STATEMENT; - case 1: - return GET_TYPE_INFO; - case 2: - return GET_CATALOGS; - case 3: - return GET_SCHEMAS; - case 4: - return GET_TABLES; - case 5: - return GET_TABLE_TYPES; - case 6: - return GET_COLUMNS; - case 7: - return GET_FUNCTIONS; - case 8: - return UNKNOWN; - default: - return null; - } - } -} diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TPrimitiveTypeEntry.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TPrimitiveTypeEntry.java deleted file mode 100644 index 9d2abf2b3b084..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TPrimitiveTypeEntry.java +++ /dev/null @@ -1,512 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TPrimitiveTypeEntry implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TPrimitiveTypeEntry"); - - private static final org.apache.thrift.protocol.TField TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("type", org.apache.thrift.protocol.TType.I32, (short)1); - private static final org.apache.thrift.protocol.TField TYPE_QUALIFIERS_FIELD_DESC = new org.apache.thrift.protocol.TField("typeQualifiers", org.apache.thrift.protocol.TType.STRUCT, (short)2); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TPrimitiveTypeEntryStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TPrimitiveTypeEntryTupleSchemeFactory()); - } - - private TTypeId type; // required - private TTypeQualifiers typeQualifiers; // optional - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - /** - * - * @see TTypeId - */ - TYPE((short)1, "type"), - TYPE_QUALIFIERS((short)2, "typeQualifiers"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // TYPE - return TYPE; - case 2: // TYPE_QUALIFIERS - return TYPE_QUALIFIERS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private _Fields optionals[] = {_Fields.TYPE_QUALIFIERS}; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.TYPE, new org.apache.thrift.meta_data.FieldMetaData("type", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, TTypeId.class))); - tmpMap.put(_Fields.TYPE_QUALIFIERS, new org.apache.thrift.meta_data.FieldMetaData("typeQualifiers", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TTypeQualifiers.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TPrimitiveTypeEntry.class, metaDataMap); - } - - public TPrimitiveTypeEntry() { - } - - public TPrimitiveTypeEntry( - TTypeId type) - { - this(); - this.type = type; - } - - /** - * Performs a deep copy on other. - */ - public TPrimitiveTypeEntry(TPrimitiveTypeEntry other) { - if (other.isSetType()) { - this.type = other.type; - } - if (other.isSetTypeQualifiers()) { - this.typeQualifiers = new TTypeQualifiers(other.typeQualifiers); - } - } - - public TPrimitiveTypeEntry deepCopy() { - return new TPrimitiveTypeEntry(this); - } - - @Override - public void clear() { - this.type = null; - this.typeQualifiers = null; - } - - /** - * - * @see TTypeId - */ - public TTypeId getType() { - return this.type; - } - - /** - * - * @see TTypeId - */ - public void setType(TTypeId type) { - this.type = type; - } - - public void unsetType() { - this.type = null; - } - - /** Returns true if field type is set (has been assigned a value) and false otherwise */ - public boolean isSetType() { - return this.type != null; - } - - public void setTypeIsSet(boolean value) { - if (!value) { - this.type = null; - } - } - - public TTypeQualifiers getTypeQualifiers() { - return this.typeQualifiers; - } - - public void setTypeQualifiers(TTypeQualifiers typeQualifiers) { - this.typeQualifiers = typeQualifiers; - } - - public void unsetTypeQualifiers() { - this.typeQualifiers = null; - } - - /** Returns true if field typeQualifiers is set (has been assigned a value) and false otherwise */ - public boolean isSetTypeQualifiers() { - return this.typeQualifiers != null; - } - - public void setTypeQualifiersIsSet(boolean value) { - if (!value) { - this.typeQualifiers = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case TYPE: - if (value == null) { - unsetType(); - } else { - setType((TTypeId)value); - } - break; - - case TYPE_QUALIFIERS: - if (value == null) { - unsetTypeQualifiers(); - } else { - setTypeQualifiers((TTypeQualifiers)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case TYPE: - return getType(); - - case TYPE_QUALIFIERS: - return getTypeQualifiers(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case TYPE: - return isSetType(); - case TYPE_QUALIFIERS: - return isSetTypeQualifiers(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TPrimitiveTypeEntry) - return this.equals((TPrimitiveTypeEntry)that); - return false; - } - - public boolean equals(TPrimitiveTypeEntry that) { - if (that == null) - return false; - - boolean this_present_type = true && this.isSetType(); - boolean that_present_type = true && that.isSetType(); - if (this_present_type || that_present_type) { - if (!(this_present_type && that_present_type)) - return false; - if (!this.type.equals(that.type)) - return false; - } - - boolean this_present_typeQualifiers = true && this.isSetTypeQualifiers(); - boolean that_present_typeQualifiers = true && that.isSetTypeQualifiers(); - if (this_present_typeQualifiers || that_present_typeQualifiers) { - if (!(this_present_typeQualifiers && that_present_typeQualifiers)) - return false; - if (!this.typeQualifiers.equals(that.typeQualifiers)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_type = true && (isSetType()); - builder.append(present_type); - if (present_type) - builder.append(type.getValue()); - - boolean present_typeQualifiers = true && (isSetTypeQualifiers()); - builder.append(present_typeQualifiers); - if (present_typeQualifiers) - builder.append(typeQualifiers); - - return builder.toHashCode(); - } - - public int compareTo(TPrimitiveTypeEntry other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - TPrimitiveTypeEntry typedOther = (TPrimitiveTypeEntry)other; - - lastComparison = Boolean.valueOf(isSetType()).compareTo(typedOther.isSetType()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetType()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.type, typedOther.type); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetTypeQualifiers()).compareTo(typedOther.isSetTypeQualifiers()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetTypeQualifiers()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.typeQualifiers, typedOther.typeQualifiers); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TPrimitiveTypeEntry("); - boolean first = true; - - sb.append("type:"); - if (this.type == null) { - sb.append("null"); - } else { - sb.append(this.type); - } - first = false; - if (isSetTypeQualifiers()) { - if (!first) sb.append(", "); - sb.append("typeQualifiers:"); - if (this.typeQualifiers == null) { - sb.append("null"); - } else { - sb.append(this.typeQualifiers); - } - first = false; - } - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetType()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'type' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (typeQualifiers != null) { - typeQualifiers.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TPrimitiveTypeEntryStandardSchemeFactory implements SchemeFactory { - public TPrimitiveTypeEntryStandardScheme getScheme() { - return new TPrimitiveTypeEntryStandardScheme(); - } - } - - private static class TPrimitiveTypeEntryStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TPrimitiveTypeEntry struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // TYPE - if (schemeField.type == org.apache.thrift.protocol.TType.I32) { - struct.type = TTypeId.findByValue(iprot.readI32()); - struct.setTypeIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // TYPE_QUALIFIERS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.typeQualifiers = new TTypeQualifiers(); - struct.typeQualifiers.read(iprot); - struct.setTypeQualifiersIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TPrimitiveTypeEntry struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.type != null) { - oprot.writeFieldBegin(TYPE_FIELD_DESC); - oprot.writeI32(struct.type.getValue()); - oprot.writeFieldEnd(); - } - if (struct.typeQualifiers != null) { - if (struct.isSetTypeQualifiers()) { - oprot.writeFieldBegin(TYPE_QUALIFIERS_FIELD_DESC); - struct.typeQualifiers.write(oprot); - oprot.writeFieldEnd(); - } - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TPrimitiveTypeEntryTupleSchemeFactory implements SchemeFactory { - public TPrimitiveTypeEntryTupleScheme getScheme() { - return new TPrimitiveTypeEntryTupleScheme(); - } - } - - private static class TPrimitiveTypeEntryTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TPrimitiveTypeEntry struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - oprot.writeI32(struct.type.getValue()); - BitSet optionals = new BitSet(); - if (struct.isSetTypeQualifiers()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetTypeQualifiers()) { - struct.typeQualifiers.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TPrimitiveTypeEntry struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.type = TTypeId.findByValue(iprot.readI32()); - struct.setTypeIsSet(true); - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.typeQualifiers = new TTypeQualifiers(); - struct.typeQualifiers.read(iprot); - struct.setTypeQualifiersIsSet(true); - } - } - } - -} - diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TProtocolVersion.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TProtocolVersion.java deleted file mode 100644 index a4279d29f662e..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TProtocolVersion.java +++ /dev/null @@ -1,63 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - - -import java.util.Map; -import java.util.HashMap; -import org.apache.thrift.TEnum; - -public enum TProtocolVersion implements org.apache.thrift.TEnum { - HIVE_CLI_SERVICE_PROTOCOL_V1(0), - HIVE_CLI_SERVICE_PROTOCOL_V2(1), - HIVE_CLI_SERVICE_PROTOCOL_V3(2), - HIVE_CLI_SERVICE_PROTOCOL_V4(3), - HIVE_CLI_SERVICE_PROTOCOL_V5(4), - HIVE_CLI_SERVICE_PROTOCOL_V6(5), - HIVE_CLI_SERVICE_PROTOCOL_V7(6), - HIVE_CLI_SERVICE_PROTOCOL_V8(7); - - private final int value; - - private TProtocolVersion(int value) { - this.value = value; - } - - /** - * Get the integer value of this enum value, as defined in the Thrift IDL. - */ - public int getValue() { - return value; - } - - /** - * Find a the enum type by its integer value, as defined in the Thrift IDL. - * @return null if the value is not found. - */ - public static TProtocolVersion findByValue(int value) { - switch (value) { - case 0: - return HIVE_CLI_SERVICE_PROTOCOL_V1; - case 1: - return HIVE_CLI_SERVICE_PROTOCOL_V2; - case 2: - return HIVE_CLI_SERVICE_PROTOCOL_V3; - case 3: - return HIVE_CLI_SERVICE_PROTOCOL_V4; - case 4: - return HIVE_CLI_SERVICE_PROTOCOL_V5; - case 5: - return HIVE_CLI_SERVICE_PROTOCOL_V6; - case 6: - return HIVE_CLI_SERVICE_PROTOCOL_V7; - case 7: - return HIVE_CLI_SERVICE_PROTOCOL_V8; - default: - return null; - } - } -} diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TRenewDelegationTokenReq.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TRenewDelegationTokenReq.java deleted file mode 100644 index a3e39c8cdf321..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TRenewDelegationTokenReq.java +++ /dev/null @@ -1,491 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TRenewDelegationTokenReq implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TRenewDelegationTokenReq"); - - private static final org.apache.thrift.protocol.TField SESSION_HANDLE_FIELD_DESC = new org.apache.thrift.protocol.TField("sessionHandle", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField DELEGATION_TOKEN_FIELD_DESC = new org.apache.thrift.protocol.TField("delegationToken", org.apache.thrift.protocol.TType.STRING, (short)2); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TRenewDelegationTokenReqStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TRenewDelegationTokenReqTupleSchemeFactory()); - } - - private TSessionHandle sessionHandle; // required - private String delegationToken; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SESSION_HANDLE((short)1, "sessionHandle"), - DELEGATION_TOKEN((short)2, "delegationToken"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // SESSION_HANDLE - return SESSION_HANDLE; - case 2: // DELEGATION_TOKEN - return DELEGATION_TOKEN; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SESSION_HANDLE, new org.apache.thrift.meta_data.FieldMetaData("sessionHandle", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TSessionHandle.class))); - tmpMap.put(_Fields.DELEGATION_TOKEN, new org.apache.thrift.meta_data.FieldMetaData("delegationToken", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TRenewDelegationTokenReq.class, metaDataMap); - } - - public TRenewDelegationTokenReq() { - } - - public TRenewDelegationTokenReq( - TSessionHandle sessionHandle, - String delegationToken) - { - this(); - this.sessionHandle = sessionHandle; - this.delegationToken = delegationToken; - } - - /** - * Performs a deep copy on other. - */ - public TRenewDelegationTokenReq(TRenewDelegationTokenReq other) { - if (other.isSetSessionHandle()) { - this.sessionHandle = new TSessionHandle(other.sessionHandle); - } - if (other.isSetDelegationToken()) { - this.delegationToken = other.delegationToken; - } - } - - public TRenewDelegationTokenReq deepCopy() { - return new TRenewDelegationTokenReq(this); - } - - @Override - public void clear() { - this.sessionHandle = null; - this.delegationToken = null; - } - - public TSessionHandle getSessionHandle() { - return this.sessionHandle; - } - - public void setSessionHandle(TSessionHandle sessionHandle) { - this.sessionHandle = sessionHandle; - } - - public void unsetSessionHandle() { - this.sessionHandle = null; - } - - /** Returns true if field sessionHandle is set (has been assigned a value) and false otherwise */ - public boolean isSetSessionHandle() { - return this.sessionHandle != null; - } - - public void setSessionHandleIsSet(boolean value) { - if (!value) { - this.sessionHandle = null; - } - } - - public String getDelegationToken() { - return this.delegationToken; - } - - public void setDelegationToken(String delegationToken) { - this.delegationToken = delegationToken; - } - - public void unsetDelegationToken() { - this.delegationToken = null; - } - - /** Returns true if field delegationToken is set (has been assigned a value) and false otherwise */ - public boolean isSetDelegationToken() { - return this.delegationToken != null; - } - - public void setDelegationTokenIsSet(boolean value) { - if (!value) { - this.delegationToken = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case SESSION_HANDLE: - if (value == null) { - unsetSessionHandle(); - } else { - setSessionHandle((TSessionHandle)value); - } - break; - - case DELEGATION_TOKEN: - if (value == null) { - unsetDelegationToken(); - } else { - setDelegationToken((String)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case SESSION_HANDLE: - return getSessionHandle(); - - case DELEGATION_TOKEN: - return getDelegationToken(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case SESSION_HANDLE: - return isSetSessionHandle(); - case DELEGATION_TOKEN: - return isSetDelegationToken(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TRenewDelegationTokenReq) - return this.equals((TRenewDelegationTokenReq)that); - return false; - } - - public boolean equals(TRenewDelegationTokenReq that) { - if (that == null) - return false; - - boolean this_present_sessionHandle = true && this.isSetSessionHandle(); - boolean that_present_sessionHandle = true && that.isSetSessionHandle(); - if (this_present_sessionHandle || that_present_sessionHandle) { - if (!(this_present_sessionHandle && that_present_sessionHandle)) - return false; - if (!this.sessionHandle.equals(that.sessionHandle)) - return false; - } - - boolean this_present_delegationToken = true && this.isSetDelegationToken(); - boolean that_present_delegationToken = true && that.isSetDelegationToken(); - if (this_present_delegationToken || that_present_delegationToken) { - if (!(this_present_delegationToken && that_present_delegationToken)) - return false; - if (!this.delegationToken.equals(that.delegationToken)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_sessionHandle = true && (isSetSessionHandle()); - builder.append(present_sessionHandle); - if (present_sessionHandle) - builder.append(sessionHandle); - - boolean present_delegationToken = true && (isSetDelegationToken()); - builder.append(present_delegationToken); - if (present_delegationToken) - builder.append(delegationToken); - - return builder.toHashCode(); - } - - public int compareTo(TRenewDelegationTokenReq other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - TRenewDelegationTokenReq typedOther = (TRenewDelegationTokenReq)other; - - lastComparison = Boolean.valueOf(isSetSessionHandle()).compareTo(typedOther.isSetSessionHandle()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSessionHandle()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.sessionHandle, typedOther.sessionHandle); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetDelegationToken()).compareTo(typedOther.isSetDelegationToken()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetDelegationToken()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.delegationToken, typedOther.delegationToken); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TRenewDelegationTokenReq("); - boolean first = true; - - sb.append("sessionHandle:"); - if (this.sessionHandle == null) { - sb.append("null"); - } else { - sb.append(this.sessionHandle); - } - first = false; - if (!first) sb.append(", "); - sb.append("delegationToken:"); - if (this.delegationToken == null) { - sb.append("null"); - } else { - sb.append(this.delegationToken); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetSessionHandle()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'sessionHandle' is unset! Struct:" + toString()); - } - - if (!isSetDelegationToken()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'delegationToken' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (sessionHandle != null) { - sessionHandle.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TRenewDelegationTokenReqStandardSchemeFactory implements SchemeFactory { - public TRenewDelegationTokenReqStandardScheme getScheme() { - return new TRenewDelegationTokenReqStandardScheme(); - } - } - - private static class TRenewDelegationTokenReqStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TRenewDelegationTokenReq struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // SESSION_HANDLE - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.sessionHandle = new TSessionHandle(); - struct.sessionHandle.read(iprot); - struct.setSessionHandleIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // DELEGATION_TOKEN - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.delegationToken = iprot.readString(); - struct.setDelegationTokenIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TRenewDelegationTokenReq struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.sessionHandle != null) { - oprot.writeFieldBegin(SESSION_HANDLE_FIELD_DESC); - struct.sessionHandle.write(oprot); - oprot.writeFieldEnd(); - } - if (struct.delegationToken != null) { - oprot.writeFieldBegin(DELEGATION_TOKEN_FIELD_DESC); - oprot.writeString(struct.delegationToken); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TRenewDelegationTokenReqTupleSchemeFactory implements SchemeFactory { - public TRenewDelegationTokenReqTupleScheme getScheme() { - return new TRenewDelegationTokenReqTupleScheme(); - } - } - - private static class TRenewDelegationTokenReqTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TRenewDelegationTokenReq struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.sessionHandle.write(oprot); - oprot.writeString(struct.delegationToken); - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TRenewDelegationTokenReq struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.sessionHandle = new TSessionHandle(); - struct.sessionHandle.read(iprot); - struct.setSessionHandleIsSet(true); - struct.delegationToken = iprot.readString(); - struct.setDelegationTokenIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TRenewDelegationTokenResp.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TRenewDelegationTokenResp.java deleted file mode 100644 index 5f3eb6c4d4b90..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TRenewDelegationTokenResp.java +++ /dev/null @@ -1,390 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TRenewDelegationTokenResp implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TRenewDelegationTokenResp"); - - private static final org.apache.thrift.protocol.TField STATUS_FIELD_DESC = new org.apache.thrift.protocol.TField("status", org.apache.thrift.protocol.TType.STRUCT, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TRenewDelegationTokenRespStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TRenewDelegationTokenRespTupleSchemeFactory()); - } - - private TStatus status; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - STATUS((short)1, "status"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // STATUS - return STATUS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.STATUS, new org.apache.thrift.meta_data.FieldMetaData("status", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TStatus.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TRenewDelegationTokenResp.class, metaDataMap); - } - - public TRenewDelegationTokenResp() { - } - - public TRenewDelegationTokenResp( - TStatus status) - { - this(); - this.status = status; - } - - /** - * Performs a deep copy on other. - */ - public TRenewDelegationTokenResp(TRenewDelegationTokenResp other) { - if (other.isSetStatus()) { - this.status = new TStatus(other.status); - } - } - - public TRenewDelegationTokenResp deepCopy() { - return new TRenewDelegationTokenResp(this); - } - - @Override - public void clear() { - this.status = null; - } - - public TStatus getStatus() { - return this.status; - } - - public void setStatus(TStatus status) { - this.status = status; - } - - public void unsetStatus() { - this.status = null; - } - - /** Returns true if field status is set (has been assigned a value) and false otherwise */ - public boolean isSetStatus() { - return this.status != null; - } - - public void setStatusIsSet(boolean value) { - if (!value) { - this.status = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case STATUS: - if (value == null) { - unsetStatus(); - } else { - setStatus((TStatus)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case STATUS: - return getStatus(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case STATUS: - return isSetStatus(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TRenewDelegationTokenResp) - return this.equals((TRenewDelegationTokenResp)that); - return false; - } - - public boolean equals(TRenewDelegationTokenResp that) { - if (that == null) - return false; - - boolean this_present_status = true && this.isSetStatus(); - boolean that_present_status = true && that.isSetStatus(); - if (this_present_status || that_present_status) { - if (!(this_present_status && that_present_status)) - return false; - if (!this.status.equals(that.status)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_status = true && (isSetStatus()); - builder.append(present_status); - if (present_status) - builder.append(status); - - return builder.toHashCode(); - } - - public int compareTo(TRenewDelegationTokenResp other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - TRenewDelegationTokenResp typedOther = (TRenewDelegationTokenResp)other; - - lastComparison = Boolean.valueOf(isSetStatus()).compareTo(typedOther.isSetStatus()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetStatus()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.status, typedOther.status); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TRenewDelegationTokenResp("); - boolean first = true; - - sb.append("status:"); - if (this.status == null) { - sb.append("null"); - } else { - sb.append(this.status); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetStatus()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'status' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (status != null) { - status.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TRenewDelegationTokenRespStandardSchemeFactory implements SchemeFactory { - public TRenewDelegationTokenRespStandardScheme getScheme() { - return new TRenewDelegationTokenRespStandardScheme(); - } - } - - private static class TRenewDelegationTokenRespStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TRenewDelegationTokenResp struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // STATUS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TRenewDelegationTokenResp struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.status != null) { - oprot.writeFieldBegin(STATUS_FIELD_DESC); - struct.status.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TRenewDelegationTokenRespTupleSchemeFactory implements SchemeFactory { - public TRenewDelegationTokenRespTupleScheme getScheme() { - return new TRenewDelegationTokenRespTupleScheme(); - } - } - - private static class TRenewDelegationTokenRespTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TRenewDelegationTokenResp struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.status.write(oprot); - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TRenewDelegationTokenResp struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TRow.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TRow.java deleted file mode 100644 index a44cfb08ff01a..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TRow.java +++ /dev/null @@ -1,439 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TRow implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TRow"); - - private static final org.apache.thrift.protocol.TField COL_VALS_FIELD_DESC = new org.apache.thrift.protocol.TField("colVals", org.apache.thrift.protocol.TType.LIST, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TRowStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TRowTupleSchemeFactory()); - } - - private List colVals; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - COL_VALS((short)1, "colVals"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // COL_VALS - return COL_VALS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.COL_VALS, new org.apache.thrift.meta_data.FieldMetaData("colVals", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TColumnValue.class)))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TRow.class, metaDataMap); - } - - public TRow() { - } - - public TRow( - List colVals) - { - this(); - this.colVals = colVals; - } - - /** - * Performs a deep copy on other. - */ - public TRow(TRow other) { - if (other.isSetColVals()) { - List __this__colVals = new ArrayList(); - for (TColumnValue other_element : other.colVals) { - __this__colVals.add(new TColumnValue(other_element)); - } - this.colVals = __this__colVals; - } - } - - public TRow deepCopy() { - return new TRow(this); - } - - @Override - public void clear() { - this.colVals = null; - } - - public int getColValsSize() { - return (this.colVals == null) ? 0 : this.colVals.size(); - } - - public java.util.Iterator getColValsIterator() { - return (this.colVals == null) ? null : this.colVals.iterator(); - } - - public void addToColVals(TColumnValue elem) { - if (this.colVals == null) { - this.colVals = new ArrayList(); - } - this.colVals.add(elem); - } - - public List getColVals() { - return this.colVals; - } - - public void setColVals(List colVals) { - this.colVals = colVals; - } - - public void unsetColVals() { - this.colVals = null; - } - - /** Returns true if field colVals is set (has been assigned a value) and false otherwise */ - public boolean isSetColVals() { - return this.colVals != null; - } - - public void setColValsIsSet(boolean value) { - if (!value) { - this.colVals = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case COL_VALS: - if (value == null) { - unsetColVals(); - } else { - setColVals((List)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case COL_VALS: - return getColVals(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case COL_VALS: - return isSetColVals(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TRow) - return this.equals((TRow)that); - return false; - } - - public boolean equals(TRow that) { - if (that == null) - return false; - - boolean this_present_colVals = true && this.isSetColVals(); - boolean that_present_colVals = true && that.isSetColVals(); - if (this_present_colVals || that_present_colVals) { - if (!(this_present_colVals && that_present_colVals)) - return false; - if (!this.colVals.equals(that.colVals)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_colVals = true && (isSetColVals()); - builder.append(present_colVals); - if (present_colVals) - builder.append(colVals); - - return builder.toHashCode(); - } - - public int compareTo(TRow other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - TRow typedOther = (TRow)other; - - lastComparison = Boolean.valueOf(isSetColVals()).compareTo(typedOther.isSetColVals()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetColVals()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.colVals, typedOther.colVals); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TRow("); - boolean first = true; - - sb.append("colVals:"); - if (this.colVals == null) { - sb.append("null"); - } else { - sb.append(this.colVals); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetColVals()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'colVals' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TRowStandardSchemeFactory implements SchemeFactory { - public TRowStandardScheme getScheme() { - return new TRowStandardScheme(); - } - } - - private static class TRowStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TRow struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // COL_VALS - if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { - { - org.apache.thrift.protocol.TList _list46 = iprot.readListBegin(); - struct.colVals = new ArrayList(_list46.size); - for (int _i47 = 0; _i47 < _list46.size; ++_i47) - { - TColumnValue _elem48; // optional - _elem48 = new TColumnValue(); - _elem48.read(iprot); - struct.colVals.add(_elem48); - } - iprot.readListEnd(); - } - struct.setColValsIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TRow struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.colVals != null) { - oprot.writeFieldBegin(COL_VALS_FIELD_DESC); - { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.colVals.size())); - for (TColumnValue _iter49 : struct.colVals) - { - _iter49.write(oprot); - } - oprot.writeListEnd(); - } - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TRowTupleSchemeFactory implements SchemeFactory { - public TRowTupleScheme getScheme() { - return new TRowTupleScheme(); - } - } - - private static class TRowTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TRow struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - { - oprot.writeI32(struct.colVals.size()); - for (TColumnValue _iter50 : struct.colVals) - { - _iter50.write(oprot); - } - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TRow struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - { - org.apache.thrift.protocol.TList _list51 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.colVals = new ArrayList(_list51.size); - for (int _i52 = 0; _i52 < _list51.size; ++_i52) - { - TColumnValue _elem53; // optional - _elem53 = new TColumnValue(); - _elem53.read(iprot); - struct.colVals.add(_elem53); - } - } - struct.setColValsIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TRowSet.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TRowSet.java deleted file mode 100644 index d16c8a4bb32da..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TRowSet.java +++ /dev/null @@ -1,702 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TRowSet implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TRowSet"); - - private static final org.apache.thrift.protocol.TField START_ROW_OFFSET_FIELD_DESC = new org.apache.thrift.protocol.TField("startRowOffset", org.apache.thrift.protocol.TType.I64, (short)1); - private static final org.apache.thrift.protocol.TField ROWS_FIELD_DESC = new org.apache.thrift.protocol.TField("rows", org.apache.thrift.protocol.TType.LIST, (short)2); - private static final org.apache.thrift.protocol.TField COLUMNS_FIELD_DESC = new org.apache.thrift.protocol.TField("columns", org.apache.thrift.protocol.TType.LIST, (short)3); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TRowSetStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TRowSetTupleSchemeFactory()); - } - - private long startRowOffset; // required - private List rows; // required - private List columns; // optional - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - START_ROW_OFFSET((short)1, "startRowOffset"), - ROWS((short)2, "rows"), - COLUMNS((short)3, "columns"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // START_ROW_OFFSET - return START_ROW_OFFSET; - case 2: // ROWS - return ROWS; - case 3: // COLUMNS - return COLUMNS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private static final int __STARTROWOFFSET_ISSET_ID = 0; - private byte __isset_bitfield = 0; - private _Fields optionals[] = {_Fields.COLUMNS}; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.START_ROW_OFFSET, new org.apache.thrift.meta_data.FieldMetaData("startRowOffset", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); - tmpMap.put(_Fields.ROWS, new org.apache.thrift.meta_data.FieldMetaData("rows", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TRow.class)))); - tmpMap.put(_Fields.COLUMNS, new org.apache.thrift.meta_data.FieldMetaData("columns", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TColumn.class)))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TRowSet.class, metaDataMap); - } - - public TRowSet() { - } - - public TRowSet( - long startRowOffset, - List rows) - { - this(); - this.startRowOffset = startRowOffset; - setStartRowOffsetIsSet(true); - this.rows = rows; - } - - /** - * Performs a deep copy on other. - */ - public TRowSet(TRowSet other) { - __isset_bitfield = other.__isset_bitfield; - this.startRowOffset = other.startRowOffset; - if (other.isSetRows()) { - List __this__rows = new ArrayList(); - for (TRow other_element : other.rows) { - __this__rows.add(new TRow(other_element)); - } - this.rows = __this__rows; - } - if (other.isSetColumns()) { - List __this__columns = new ArrayList(); - for (TColumn other_element : other.columns) { - __this__columns.add(new TColumn(other_element)); - } - this.columns = __this__columns; - } - } - - public TRowSet deepCopy() { - return new TRowSet(this); - } - - @Override - public void clear() { - setStartRowOffsetIsSet(false); - this.startRowOffset = 0; - this.rows = null; - this.columns = null; - } - - public long getStartRowOffset() { - return this.startRowOffset; - } - - public void setStartRowOffset(long startRowOffset) { - this.startRowOffset = startRowOffset; - setStartRowOffsetIsSet(true); - } - - public void unsetStartRowOffset() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __STARTROWOFFSET_ISSET_ID); - } - - /** Returns true if field startRowOffset is set (has been assigned a value) and false otherwise */ - public boolean isSetStartRowOffset() { - return EncodingUtils.testBit(__isset_bitfield, __STARTROWOFFSET_ISSET_ID); - } - - public void setStartRowOffsetIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __STARTROWOFFSET_ISSET_ID, value); - } - - public int getRowsSize() { - return (this.rows == null) ? 0 : this.rows.size(); - } - - public java.util.Iterator getRowsIterator() { - return (this.rows == null) ? null : this.rows.iterator(); - } - - public void addToRows(TRow elem) { - if (this.rows == null) { - this.rows = new ArrayList(); - } - this.rows.add(elem); - } - - public List getRows() { - return this.rows; - } - - public void setRows(List rows) { - this.rows = rows; - } - - public void unsetRows() { - this.rows = null; - } - - /** Returns true if field rows is set (has been assigned a value) and false otherwise */ - public boolean isSetRows() { - return this.rows != null; - } - - public void setRowsIsSet(boolean value) { - if (!value) { - this.rows = null; - } - } - - public int getColumnsSize() { - return (this.columns == null) ? 0 : this.columns.size(); - } - - public java.util.Iterator getColumnsIterator() { - return (this.columns == null) ? null : this.columns.iterator(); - } - - public void addToColumns(TColumn elem) { - if (this.columns == null) { - this.columns = new ArrayList(); - } - this.columns.add(elem); - } - - public List getColumns() { - return this.columns; - } - - public void setColumns(List columns) { - this.columns = columns; - } - - public void unsetColumns() { - this.columns = null; - } - - /** Returns true if field columns is set (has been assigned a value) and false otherwise */ - public boolean isSetColumns() { - return this.columns != null; - } - - public void setColumnsIsSet(boolean value) { - if (!value) { - this.columns = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case START_ROW_OFFSET: - if (value == null) { - unsetStartRowOffset(); - } else { - setStartRowOffset((Long)value); - } - break; - - case ROWS: - if (value == null) { - unsetRows(); - } else { - setRows((List)value); - } - break; - - case COLUMNS: - if (value == null) { - unsetColumns(); - } else { - setColumns((List)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case START_ROW_OFFSET: - return Long.valueOf(getStartRowOffset()); - - case ROWS: - return getRows(); - - case COLUMNS: - return getColumns(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case START_ROW_OFFSET: - return isSetStartRowOffset(); - case ROWS: - return isSetRows(); - case COLUMNS: - return isSetColumns(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TRowSet) - return this.equals((TRowSet)that); - return false; - } - - public boolean equals(TRowSet that) { - if (that == null) - return false; - - boolean this_present_startRowOffset = true; - boolean that_present_startRowOffset = true; - if (this_present_startRowOffset || that_present_startRowOffset) { - if (!(this_present_startRowOffset && that_present_startRowOffset)) - return false; - if (this.startRowOffset != that.startRowOffset) - return false; - } - - boolean this_present_rows = true && this.isSetRows(); - boolean that_present_rows = true && that.isSetRows(); - if (this_present_rows || that_present_rows) { - if (!(this_present_rows && that_present_rows)) - return false; - if (!this.rows.equals(that.rows)) - return false; - } - - boolean this_present_columns = true && this.isSetColumns(); - boolean that_present_columns = true && that.isSetColumns(); - if (this_present_columns || that_present_columns) { - if (!(this_present_columns && that_present_columns)) - return false; - if (!this.columns.equals(that.columns)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_startRowOffset = true; - builder.append(present_startRowOffset); - if (present_startRowOffset) - builder.append(startRowOffset); - - boolean present_rows = true && (isSetRows()); - builder.append(present_rows); - if (present_rows) - builder.append(rows); - - boolean present_columns = true && (isSetColumns()); - builder.append(present_columns); - if (present_columns) - builder.append(columns); - - return builder.toHashCode(); - } - - public int compareTo(TRowSet other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - TRowSet typedOther = (TRowSet)other; - - lastComparison = Boolean.valueOf(isSetStartRowOffset()).compareTo(typedOther.isSetStartRowOffset()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetStartRowOffset()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.startRowOffset, typedOther.startRowOffset); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetRows()).compareTo(typedOther.isSetRows()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetRows()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.rows, typedOther.rows); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetColumns()).compareTo(typedOther.isSetColumns()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetColumns()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.columns, typedOther.columns); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TRowSet("); - boolean first = true; - - sb.append("startRowOffset:"); - sb.append(this.startRowOffset); - first = false; - if (!first) sb.append(", "); - sb.append("rows:"); - if (this.rows == null) { - sb.append("null"); - } else { - sb.append(this.rows); - } - first = false; - if (isSetColumns()) { - if (!first) sb.append(", "); - sb.append("columns:"); - if (this.columns == null) { - sb.append("null"); - } else { - sb.append(this.columns); - } - first = false; - } - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetStartRowOffset()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'startRowOffset' is unset! Struct:" + toString()); - } - - if (!isSetRows()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'rows' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. - __isset_bitfield = 0; - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TRowSetStandardSchemeFactory implements SchemeFactory { - public TRowSetStandardScheme getScheme() { - return new TRowSetStandardScheme(); - } - } - - private static class TRowSetStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TRowSet struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // START_ROW_OFFSET - if (schemeField.type == org.apache.thrift.protocol.TType.I64) { - struct.startRowOffset = iprot.readI64(); - struct.setStartRowOffsetIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // ROWS - if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { - { - org.apache.thrift.protocol.TList _list118 = iprot.readListBegin(); - struct.rows = new ArrayList(_list118.size); - for (int _i119 = 0; _i119 < _list118.size; ++_i119) - { - TRow _elem120; // optional - _elem120 = new TRow(); - _elem120.read(iprot); - struct.rows.add(_elem120); - } - iprot.readListEnd(); - } - struct.setRowsIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 3: // COLUMNS - if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { - { - org.apache.thrift.protocol.TList _list121 = iprot.readListBegin(); - struct.columns = new ArrayList(_list121.size); - for (int _i122 = 0; _i122 < _list121.size; ++_i122) - { - TColumn _elem123; // optional - _elem123 = new TColumn(); - _elem123.read(iprot); - struct.columns.add(_elem123); - } - iprot.readListEnd(); - } - struct.setColumnsIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TRowSet struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - oprot.writeFieldBegin(START_ROW_OFFSET_FIELD_DESC); - oprot.writeI64(struct.startRowOffset); - oprot.writeFieldEnd(); - if (struct.rows != null) { - oprot.writeFieldBegin(ROWS_FIELD_DESC); - { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.rows.size())); - for (TRow _iter124 : struct.rows) - { - _iter124.write(oprot); - } - oprot.writeListEnd(); - } - oprot.writeFieldEnd(); - } - if (struct.columns != null) { - if (struct.isSetColumns()) { - oprot.writeFieldBegin(COLUMNS_FIELD_DESC); - { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.columns.size())); - for (TColumn _iter125 : struct.columns) - { - _iter125.write(oprot); - } - oprot.writeListEnd(); - } - oprot.writeFieldEnd(); - } - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TRowSetTupleSchemeFactory implements SchemeFactory { - public TRowSetTupleScheme getScheme() { - return new TRowSetTupleScheme(); - } - } - - private static class TRowSetTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TRowSet struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - oprot.writeI64(struct.startRowOffset); - { - oprot.writeI32(struct.rows.size()); - for (TRow _iter126 : struct.rows) - { - _iter126.write(oprot); - } - } - BitSet optionals = new BitSet(); - if (struct.isSetColumns()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetColumns()) { - { - oprot.writeI32(struct.columns.size()); - for (TColumn _iter127 : struct.columns) - { - _iter127.write(oprot); - } - } - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TRowSet struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.startRowOffset = iprot.readI64(); - struct.setStartRowOffsetIsSet(true); - { - org.apache.thrift.protocol.TList _list128 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.rows = new ArrayList(_list128.size); - for (int _i129 = 0; _i129 < _list128.size; ++_i129) - { - TRow _elem130; // optional - _elem130 = new TRow(); - _elem130.read(iprot); - struct.rows.add(_elem130); - } - } - struct.setRowsIsSet(true); - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - { - org.apache.thrift.protocol.TList _list131 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.columns = new ArrayList(_list131.size); - for (int _i132 = 0; _i132 < _list131.size; ++_i132) - { - TColumn _elem133; // optional - _elem133 = new TColumn(); - _elem133.read(iprot); - struct.columns.add(_elem133); - } - } - struct.setColumnsIsSet(true); - } - } - } - -} - diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TSessionHandle.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TSessionHandle.java deleted file mode 100644 index 82c00dd68a98b..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TSessionHandle.java +++ /dev/null @@ -1,390 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TSessionHandle implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TSessionHandle"); - - private static final org.apache.thrift.protocol.TField SESSION_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("sessionId", org.apache.thrift.protocol.TType.STRUCT, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TSessionHandleStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TSessionHandleTupleSchemeFactory()); - } - - private THandleIdentifier sessionId; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SESSION_ID((short)1, "sessionId"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // SESSION_ID - return SESSION_ID; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SESSION_ID, new org.apache.thrift.meta_data.FieldMetaData("sessionId", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, THandleIdentifier.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TSessionHandle.class, metaDataMap); - } - - public TSessionHandle() { - } - - public TSessionHandle( - THandleIdentifier sessionId) - { - this(); - this.sessionId = sessionId; - } - - /** - * Performs a deep copy on other. - */ - public TSessionHandle(TSessionHandle other) { - if (other.isSetSessionId()) { - this.sessionId = new THandleIdentifier(other.sessionId); - } - } - - public TSessionHandle deepCopy() { - return new TSessionHandle(this); - } - - @Override - public void clear() { - this.sessionId = null; - } - - public THandleIdentifier getSessionId() { - return this.sessionId; - } - - public void setSessionId(THandleIdentifier sessionId) { - this.sessionId = sessionId; - } - - public void unsetSessionId() { - this.sessionId = null; - } - - /** Returns true if field sessionId is set (has been assigned a value) and false otherwise */ - public boolean isSetSessionId() { - return this.sessionId != null; - } - - public void setSessionIdIsSet(boolean value) { - if (!value) { - this.sessionId = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case SESSION_ID: - if (value == null) { - unsetSessionId(); - } else { - setSessionId((THandleIdentifier)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case SESSION_ID: - return getSessionId(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case SESSION_ID: - return isSetSessionId(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TSessionHandle) - return this.equals((TSessionHandle)that); - return false; - } - - public boolean equals(TSessionHandle that) { - if (that == null) - return false; - - boolean this_present_sessionId = true && this.isSetSessionId(); - boolean that_present_sessionId = true && that.isSetSessionId(); - if (this_present_sessionId || that_present_sessionId) { - if (!(this_present_sessionId && that_present_sessionId)) - return false; - if (!this.sessionId.equals(that.sessionId)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_sessionId = true && (isSetSessionId()); - builder.append(present_sessionId); - if (present_sessionId) - builder.append(sessionId); - - return builder.toHashCode(); - } - - public int compareTo(TSessionHandle other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - TSessionHandle typedOther = (TSessionHandle)other; - - lastComparison = Boolean.valueOf(isSetSessionId()).compareTo(typedOther.isSetSessionId()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSessionId()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.sessionId, typedOther.sessionId); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TSessionHandle("); - boolean first = true; - - sb.append("sessionId:"); - if (this.sessionId == null) { - sb.append("null"); - } else { - sb.append(this.sessionId); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetSessionId()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'sessionId' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (sessionId != null) { - sessionId.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TSessionHandleStandardSchemeFactory implements SchemeFactory { - public TSessionHandleStandardScheme getScheme() { - return new TSessionHandleStandardScheme(); - } - } - - private static class TSessionHandleStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TSessionHandle struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // SESSION_ID - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.sessionId = new THandleIdentifier(); - struct.sessionId.read(iprot); - struct.setSessionIdIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TSessionHandle struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.sessionId != null) { - oprot.writeFieldBegin(SESSION_ID_FIELD_DESC); - struct.sessionId.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TSessionHandleTupleSchemeFactory implements SchemeFactory { - public TSessionHandleTupleScheme getScheme() { - return new TSessionHandleTupleScheme(); - } - } - - private static class TSessionHandleTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TSessionHandle struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.sessionId.write(oprot); - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TSessionHandle struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.sessionId = new THandleIdentifier(); - struct.sessionId.read(iprot); - struct.setSessionIdIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TStatus.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TStatus.java deleted file mode 100644 index 24a746e94965d..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TStatus.java +++ /dev/null @@ -1,874 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TStatus implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TStatus"); - - private static final org.apache.thrift.protocol.TField STATUS_CODE_FIELD_DESC = new org.apache.thrift.protocol.TField("statusCode", org.apache.thrift.protocol.TType.I32, (short)1); - private static final org.apache.thrift.protocol.TField INFO_MESSAGES_FIELD_DESC = new org.apache.thrift.protocol.TField("infoMessages", org.apache.thrift.protocol.TType.LIST, (short)2); - private static final org.apache.thrift.protocol.TField SQL_STATE_FIELD_DESC = new org.apache.thrift.protocol.TField("sqlState", org.apache.thrift.protocol.TType.STRING, (short)3); - private static final org.apache.thrift.protocol.TField ERROR_CODE_FIELD_DESC = new org.apache.thrift.protocol.TField("errorCode", org.apache.thrift.protocol.TType.I32, (short)4); - private static final org.apache.thrift.protocol.TField ERROR_MESSAGE_FIELD_DESC = new org.apache.thrift.protocol.TField("errorMessage", org.apache.thrift.protocol.TType.STRING, (short)5); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TStatusStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TStatusTupleSchemeFactory()); - } - - private TStatusCode statusCode; // required - private List infoMessages; // optional - private String sqlState; // optional - private int errorCode; // optional - private String errorMessage; // optional - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - /** - * - * @see TStatusCode - */ - STATUS_CODE((short)1, "statusCode"), - INFO_MESSAGES((short)2, "infoMessages"), - SQL_STATE((short)3, "sqlState"), - ERROR_CODE((short)4, "errorCode"), - ERROR_MESSAGE((short)5, "errorMessage"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // STATUS_CODE - return STATUS_CODE; - case 2: // INFO_MESSAGES - return INFO_MESSAGES; - case 3: // SQL_STATE - return SQL_STATE; - case 4: // ERROR_CODE - return ERROR_CODE; - case 5: // ERROR_MESSAGE - return ERROR_MESSAGE; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private static final int __ERRORCODE_ISSET_ID = 0; - private byte __isset_bitfield = 0; - private _Fields optionals[] = {_Fields.INFO_MESSAGES,_Fields.SQL_STATE,_Fields.ERROR_CODE,_Fields.ERROR_MESSAGE}; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.STATUS_CODE, new org.apache.thrift.meta_data.FieldMetaData("statusCode", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, TStatusCode.class))); - tmpMap.put(_Fields.INFO_MESSAGES, new org.apache.thrift.meta_data.FieldMetaData("infoMessages", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); - tmpMap.put(_Fields.SQL_STATE, new org.apache.thrift.meta_data.FieldMetaData("sqlState", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.ERROR_CODE, new org.apache.thrift.meta_data.FieldMetaData("errorCode", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); - tmpMap.put(_Fields.ERROR_MESSAGE, new org.apache.thrift.meta_data.FieldMetaData("errorMessage", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TStatus.class, metaDataMap); - } - - public TStatus() { - } - - public TStatus( - TStatusCode statusCode) - { - this(); - this.statusCode = statusCode; - } - - /** - * Performs a deep copy on other. - */ - public TStatus(TStatus other) { - __isset_bitfield = other.__isset_bitfield; - if (other.isSetStatusCode()) { - this.statusCode = other.statusCode; - } - if (other.isSetInfoMessages()) { - List __this__infoMessages = new ArrayList(); - for (String other_element : other.infoMessages) { - __this__infoMessages.add(other_element); - } - this.infoMessages = __this__infoMessages; - } - if (other.isSetSqlState()) { - this.sqlState = other.sqlState; - } - this.errorCode = other.errorCode; - if (other.isSetErrorMessage()) { - this.errorMessage = other.errorMessage; - } - } - - public TStatus deepCopy() { - return new TStatus(this); - } - - @Override - public void clear() { - this.statusCode = null; - this.infoMessages = null; - this.sqlState = null; - setErrorCodeIsSet(false); - this.errorCode = 0; - this.errorMessage = null; - } - - /** - * - * @see TStatusCode - */ - public TStatusCode getStatusCode() { - return this.statusCode; - } - - /** - * - * @see TStatusCode - */ - public void setStatusCode(TStatusCode statusCode) { - this.statusCode = statusCode; - } - - public void unsetStatusCode() { - this.statusCode = null; - } - - /** Returns true if field statusCode is set (has been assigned a value) and false otherwise */ - public boolean isSetStatusCode() { - return this.statusCode != null; - } - - public void setStatusCodeIsSet(boolean value) { - if (!value) { - this.statusCode = null; - } - } - - public int getInfoMessagesSize() { - return (this.infoMessages == null) ? 0 : this.infoMessages.size(); - } - - public java.util.Iterator getInfoMessagesIterator() { - return (this.infoMessages == null) ? null : this.infoMessages.iterator(); - } - - public void addToInfoMessages(String elem) { - if (this.infoMessages == null) { - this.infoMessages = new ArrayList(); - } - this.infoMessages.add(elem); - } - - public List getInfoMessages() { - return this.infoMessages; - } - - public void setInfoMessages(List infoMessages) { - this.infoMessages = infoMessages; - } - - public void unsetInfoMessages() { - this.infoMessages = null; - } - - /** Returns true if field infoMessages is set (has been assigned a value) and false otherwise */ - public boolean isSetInfoMessages() { - return this.infoMessages != null; - } - - public void setInfoMessagesIsSet(boolean value) { - if (!value) { - this.infoMessages = null; - } - } - - public String getSqlState() { - return this.sqlState; - } - - public void setSqlState(String sqlState) { - this.sqlState = sqlState; - } - - public void unsetSqlState() { - this.sqlState = null; - } - - /** Returns true if field sqlState is set (has been assigned a value) and false otherwise */ - public boolean isSetSqlState() { - return this.sqlState != null; - } - - public void setSqlStateIsSet(boolean value) { - if (!value) { - this.sqlState = null; - } - } - - public int getErrorCode() { - return this.errorCode; - } - - public void setErrorCode(int errorCode) { - this.errorCode = errorCode; - setErrorCodeIsSet(true); - } - - public void unsetErrorCode() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ERRORCODE_ISSET_ID); - } - - /** Returns true if field errorCode is set (has been assigned a value) and false otherwise */ - public boolean isSetErrorCode() { - return EncodingUtils.testBit(__isset_bitfield, __ERRORCODE_ISSET_ID); - } - - public void setErrorCodeIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ERRORCODE_ISSET_ID, value); - } - - public String getErrorMessage() { - return this.errorMessage; - } - - public void setErrorMessage(String errorMessage) { - this.errorMessage = errorMessage; - } - - public void unsetErrorMessage() { - this.errorMessage = null; - } - - /** Returns true if field errorMessage is set (has been assigned a value) and false otherwise */ - public boolean isSetErrorMessage() { - return this.errorMessage != null; - } - - public void setErrorMessageIsSet(boolean value) { - if (!value) { - this.errorMessage = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case STATUS_CODE: - if (value == null) { - unsetStatusCode(); - } else { - setStatusCode((TStatusCode)value); - } - break; - - case INFO_MESSAGES: - if (value == null) { - unsetInfoMessages(); - } else { - setInfoMessages((List)value); - } - break; - - case SQL_STATE: - if (value == null) { - unsetSqlState(); - } else { - setSqlState((String)value); - } - break; - - case ERROR_CODE: - if (value == null) { - unsetErrorCode(); - } else { - setErrorCode((Integer)value); - } - break; - - case ERROR_MESSAGE: - if (value == null) { - unsetErrorMessage(); - } else { - setErrorMessage((String)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case STATUS_CODE: - return getStatusCode(); - - case INFO_MESSAGES: - return getInfoMessages(); - - case SQL_STATE: - return getSqlState(); - - case ERROR_CODE: - return Integer.valueOf(getErrorCode()); - - case ERROR_MESSAGE: - return getErrorMessage(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case STATUS_CODE: - return isSetStatusCode(); - case INFO_MESSAGES: - return isSetInfoMessages(); - case SQL_STATE: - return isSetSqlState(); - case ERROR_CODE: - return isSetErrorCode(); - case ERROR_MESSAGE: - return isSetErrorMessage(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TStatus) - return this.equals((TStatus)that); - return false; - } - - public boolean equals(TStatus that) { - if (that == null) - return false; - - boolean this_present_statusCode = true && this.isSetStatusCode(); - boolean that_present_statusCode = true && that.isSetStatusCode(); - if (this_present_statusCode || that_present_statusCode) { - if (!(this_present_statusCode && that_present_statusCode)) - return false; - if (!this.statusCode.equals(that.statusCode)) - return false; - } - - boolean this_present_infoMessages = true && this.isSetInfoMessages(); - boolean that_present_infoMessages = true && that.isSetInfoMessages(); - if (this_present_infoMessages || that_present_infoMessages) { - if (!(this_present_infoMessages && that_present_infoMessages)) - return false; - if (!this.infoMessages.equals(that.infoMessages)) - return false; - } - - boolean this_present_sqlState = true && this.isSetSqlState(); - boolean that_present_sqlState = true && that.isSetSqlState(); - if (this_present_sqlState || that_present_sqlState) { - if (!(this_present_sqlState && that_present_sqlState)) - return false; - if (!this.sqlState.equals(that.sqlState)) - return false; - } - - boolean this_present_errorCode = true && this.isSetErrorCode(); - boolean that_present_errorCode = true && that.isSetErrorCode(); - if (this_present_errorCode || that_present_errorCode) { - if (!(this_present_errorCode && that_present_errorCode)) - return false; - if (this.errorCode != that.errorCode) - return false; - } - - boolean this_present_errorMessage = true && this.isSetErrorMessage(); - boolean that_present_errorMessage = true && that.isSetErrorMessage(); - if (this_present_errorMessage || that_present_errorMessage) { - if (!(this_present_errorMessage && that_present_errorMessage)) - return false; - if (!this.errorMessage.equals(that.errorMessage)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_statusCode = true && (isSetStatusCode()); - builder.append(present_statusCode); - if (present_statusCode) - builder.append(statusCode.getValue()); - - boolean present_infoMessages = true && (isSetInfoMessages()); - builder.append(present_infoMessages); - if (present_infoMessages) - builder.append(infoMessages); - - boolean present_sqlState = true && (isSetSqlState()); - builder.append(present_sqlState); - if (present_sqlState) - builder.append(sqlState); - - boolean present_errorCode = true && (isSetErrorCode()); - builder.append(present_errorCode); - if (present_errorCode) - builder.append(errorCode); - - boolean present_errorMessage = true && (isSetErrorMessage()); - builder.append(present_errorMessage); - if (present_errorMessage) - builder.append(errorMessage); - - return builder.toHashCode(); - } - - public int compareTo(TStatus other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - TStatus typedOther = (TStatus)other; - - lastComparison = Boolean.valueOf(isSetStatusCode()).compareTo(typedOther.isSetStatusCode()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetStatusCode()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.statusCode, typedOther.statusCode); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetInfoMessages()).compareTo(typedOther.isSetInfoMessages()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetInfoMessages()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.infoMessages, typedOther.infoMessages); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetSqlState()).compareTo(typedOther.isSetSqlState()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSqlState()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.sqlState, typedOther.sqlState); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetErrorCode()).compareTo(typedOther.isSetErrorCode()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetErrorCode()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.errorCode, typedOther.errorCode); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetErrorMessage()).compareTo(typedOther.isSetErrorMessage()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetErrorMessage()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.errorMessage, typedOther.errorMessage); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TStatus("); - boolean first = true; - - sb.append("statusCode:"); - if (this.statusCode == null) { - sb.append("null"); - } else { - sb.append(this.statusCode); - } - first = false; - if (isSetInfoMessages()) { - if (!first) sb.append(", "); - sb.append("infoMessages:"); - if (this.infoMessages == null) { - sb.append("null"); - } else { - sb.append(this.infoMessages); - } - first = false; - } - if (isSetSqlState()) { - if (!first) sb.append(", "); - sb.append("sqlState:"); - if (this.sqlState == null) { - sb.append("null"); - } else { - sb.append(this.sqlState); - } - first = false; - } - if (isSetErrorCode()) { - if (!first) sb.append(", "); - sb.append("errorCode:"); - sb.append(this.errorCode); - first = false; - } - if (isSetErrorMessage()) { - if (!first) sb.append(", "); - sb.append("errorMessage:"); - if (this.errorMessage == null) { - sb.append("null"); - } else { - sb.append(this.errorMessage); - } - first = false; - } - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetStatusCode()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'statusCode' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. - __isset_bitfield = 0; - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TStatusStandardSchemeFactory implements SchemeFactory { - public TStatusStandardScheme getScheme() { - return new TStatusStandardScheme(); - } - } - - private static class TStatusStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TStatus struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // STATUS_CODE - if (schemeField.type == org.apache.thrift.protocol.TType.I32) { - struct.statusCode = TStatusCode.findByValue(iprot.readI32()); - struct.setStatusCodeIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // INFO_MESSAGES - if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { - { - org.apache.thrift.protocol.TList _list134 = iprot.readListBegin(); - struct.infoMessages = new ArrayList(_list134.size); - for (int _i135 = 0; _i135 < _list134.size; ++_i135) - { - String _elem136; // optional - _elem136 = iprot.readString(); - struct.infoMessages.add(_elem136); - } - iprot.readListEnd(); - } - struct.setInfoMessagesIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 3: // SQL_STATE - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.sqlState = iprot.readString(); - struct.setSqlStateIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 4: // ERROR_CODE - if (schemeField.type == org.apache.thrift.protocol.TType.I32) { - struct.errorCode = iprot.readI32(); - struct.setErrorCodeIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 5: // ERROR_MESSAGE - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.errorMessage = iprot.readString(); - struct.setErrorMessageIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TStatus struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.statusCode != null) { - oprot.writeFieldBegin(STATUS_CODE_FIELD_DESC); - oprot.writeI32(struct.statusCode.getValue()); - oprot.writeFieldEnd(); - } - if (struct.infoMessages != null) { - if (struct.isSetInfoMessages()) { - oprot.writeFieldBegin(INFO_MESSAGES_FIELD_DESC); - { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.infoMessages.size())); - for (String _iter137 : struct.infoMessages) - { - oprot.writeString(_iter137); - } - oprot.writeListEnd(); - } - oprot.writeFieldEnd(); - } - } - if (struct.sqlState != null) { - if (struct.isSetSqlState()) { - oprot.writeFieldBegin(SQL_STATE_FIELD_DESC); - oprot.writeString(struct.sqlState); - oprot.writeFieldEnd(); - } - } - if (struct.isSetErrorCode()) { - oprot.writeFieldBegin(ERROR_CODE_FIELD_DESC); - oprot.writeI32(struct.errorCode); - oprot.writeFieldEnd(); - } - if (struct.errorMessage != null) { - if (struct.isSetErrorMessage()) { - oprot.writeFieldBegin(ERROR_MESSAGE_FIELD_DESC); - oprot.writeString(struct.errorMessage); - oprot.writeFieldEnd(); - } - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TStatusTupleSchemeFactory implements SchemeFactory { - public TStatusTupleScheme getScheme() { - return new TStatusTupleScheme(); - } - } - - private static class TStatusTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TStatus struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - oprot.writeI32(struct.statusCode.getValue()); - BitSet optionals = new BitSet(); - if (struct.isSetInfoMessages()) { - optionals.set(0); - } - if (struct.isSetSqlState()) { - optionals.set(1); - } - if (struct.isSetErrorCode()) { - optionals.set(2); - } - if (struct.isSetErrorMessage()) { - optionals.set(3); - } - oprot.writeBitSet(optionals, 4); - if (struct.isSetInfoMessages()) { - { - oprot.writeI32(struct.infoMessages.size()); - for (String _iter138 : struct.infoMessages) - { - oprot.writeString(_iter138); - } - } - } - if (struct.isSetSqlState()) { - oprot.writeString(struct.sqlState); - } - if (struct.isSetErrorCode()) { - oprot.writeI32(struct.errorCode); - } - if (struct.isSetErrorMessage()) { - oprot.writeString(struct.errorMessage); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TStatus struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.statusCode = TStatusCode.findByValue(iprot.readI32()); - struct.setStatusCodeIsSet(true); - BitSet incoming = iprot.readBitSet(4); - if (incoming.get(0)) { - { - org.apache.thrift.protocol.TList _list139 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.infoMessages = new ArrayList(_list139.size); - for (int _i140 = 0; _i140 < _list139.size; ++_i140) - { - String _elem141; // optional - _elem141 = iprot.readString(); - struct.infoMessages.add(_elem141); - } - } - struct.setInfoMessagesIsSet(true); - } - if (incoming.get(1)) { - struct.sqlState = iprot.readString(); - struct.setSqlStateIsSet(true); - } - if (incoming.get(2)) { - struct.errorCode = iprot.readI32(); - struct.setErrorCodeIsSet(true); - } - if (incoming.get(3)) { - struct.errorMessage = iprot.readString(); - struct.setErrorMessageIsSet(true); - } - } - } - -} - diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TStatusCode.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TStatusCode.java deleted file mode 100644 index e7fde45fd131a..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TStatusCode.java +++ /dev/null @@ -1,54 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - - -import java.util.Map; -import java.util.HashMap; -import org.apache.thrift.TEnum; - -public enum TStatusCode implements org.apache.thrift.TEnum { - SUCCESS_STATUS(0), - SUCCESS_WITH_INFO_STATUS(1), - STILL_EXECUTING_STATUS(2), - ERROR_STATUS(3), - INVALID_HANDLE_STATUS(4); - - private final int value; - - private TStatusCode(int value) { - this.value = value; - } - - /** - * Get the integer value of this enum value, as defined in the Thrift IDL. - */ - public int getValue() { - return value; - } - - /** - * Find a the enum type by its integer value, as defined in the Thrift IDL. - * @return null if the value is not found. - */ - public static TStatusCode findByValue(int value) { - switch (value) { - case 0: - return SUCCESS_STATUS; - case 1: - return SUCCESS_WITH_INFO_STATUS; - case 2: - return STILL_EXECUTING_STATUS; - case 3: - return ERROR_STATUS; - case 4: - return INVALID_HANDLE_STATUS; - default: - return null; - } - } -} diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TStringColumn.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TStringColumn.java deleted file mode 100644 index 3dae460c8621d..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TStringColumn.java +++ /dev/null @@ -1,548 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TStringColumn implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TStringColumn"); - - private static final org.apache.thrift.protocol.TField VALUES_FIELD_DESC = new org.apache.thrift.protocol.TField("values", org.apache.thrift.protocol.TType.LIST, (short)1); - private static final org.apache.thrift.protocol.TField NULLS_FIELD_DESC = new org.apache.thrift.protocol.TField("nulls", org.apache.thrift.protocol.TType.STRING, (short)2); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TStringColumnStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TStringColumnTupleSchemeFactory()); - } - - private List values; // required - private ByteBuffer nulls; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - VALUES((short)1, "values"), - NULLS((short)2, "nulls"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // VALUES - return VALUES; - case 2: // NULLS - return NULLS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.VALUES, new org.apache.thrift.meta_data.FieldMetaData("values", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); - tmpMap.put(_Fields.NULLS, new org.apache.thrift.meta_data.FieldMetaData("nulls", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TStringColumn.class, metaDataMap); - } - - public TStringColumn() { - } - - public TStringColumn( - List values, - ByteBuffer nulls) - { - this(); - this.values = values; - this.nulls = nulls; - } - - /** - * Performs a deep copy on other. - */ - public TStringColumn(TStringColumn other) { - if (other.isSetValues()) { - List __this__values = new ArrayList(); - for (String other_element : other.values) { - __this__values.add(other_element); - } - this.values = __this__values; - } - if (other.isSetNulls()) { - this.nulls = org.apache.thrift.TBaseHelper.copyBinary(other.nulls); -; - } - } - - public TStringColumn deepCopy() { - return new TStringColumn(this); - } - - @Override - public void clear() { - this.values = null; - this.nulls = null; - } - - public int getValuesSize() { - return (this.values == null) ? 0 : this.values.size(); - } - - public java.util.Iterator getValuesIterator() { - return (this.values == null) ? null : this.values.iterator(); - } - - public void addToValues(String elem) { - if (this.values == null) { - this.values = new ArrayList(); - } - this.values.add(elem); - } - - public List getValues() { - return this.values; - } - - public void setValues(List values) { - this.values = values; - } - - public void unsetValues() { - this.values = null; - } - - /** Returns true if field values is set (has been assigned a value) and false otherwise */ - public boolean isSetValues() { - return this.values != null; - } - - public void setValuesIsSet(boolean value) { - if (!value) { - this.values = null; - } - } - - public byte[] getNulls() { - setNulls(org.apache.thrift.TBaseHelper.rightSize(nulls)); - return nulls == null ? null : nulls.array(); - } - - public ByteBuffer bufferForNulls() { - return nulls; - } - - public void setNulls(byte[] nulls) { - setNulls(nulls == null ? (ByteBuffer)null : ByteBuffer.wrap(nulls)); - } - - public void setNulls(ByteBuffer nulls) { - this.nulls = nulls; - } - - public void unsetNulls() { - this.nulls = null; - } - - /** Returns true if field nulls is set (has been assigned a value) and false otherwise */ - public boolean isSetNulls() { - return this.nulls != null; - } - - public void setNullsIsSet(boolean value) { - if (!value) { - this.nulls = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case VALUES: - if (value == null) { - unsetValues(); - } else { - setValues((List)value); - } - break; - - case NULLS: - if (value == null) { - unsetNulls(); - } else { - setNulls((ByteBuffer)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case VALUES: - return getValues(); - - case NULLS: - return getNulls(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case VALUES: - return isSetValues(); - case NULLS: - return isSetNulls(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TStringColumn) - return this.equals((TStringColumn)that); - return false; - } - - public boolean equals(TStringColumn that) { - if (that == null) - return false; - - boolean this_present_values = true && this.isSetValues(); - boolean that_present_values = true && that.isSetValues(); - if (this_present_values || that_present_values) { - if (!(this_present_values && that_present_values)) - return false; - if (!this.values.equals(that.values)) - return false; - } - - boolean this_present_nulls = true && this.isSetNulls(); - boolean that_present_nulls = true && that.isSetNulls(); - if (this_present_nulls || that_present_nulls) { - if (!(this_present_nulls && that_present_nulls)) - return false; - if (!this.nulls.equals(that.nulls)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_values = true && (isSetValues()); - builder.append(present_values); - if (present_values) - builder.append(values); - - boolean present_nulls = true && (isSetNulls()); - builder.append(present_nulls); - if (present_nulls) - builder.append(nulls); - - return builder.toHashCode(); - } - - public int compareTo(TStringColumn other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - TStringColumn typedOther = (TStringColumn)other; - - lastComparison = Boolean.valueOf(isSetValues()).compareTo(typedOther.isSetValues()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetValues()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.values, typedOther.values); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetNulls()).compareTo(typedOther.isSetNulls()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetNulls()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.nulls, typedOther.nulls); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TStringColumn("); - boolean first = true; - - sb.append("values:"); - if (this.values == null) { - sb.append("null"); - } else { - sb.append(this.values); - } - first = false; - if (!first) sb.append(", "); - sb.append("nulls:"); - if (this.nulls == null) { - sb.append("null"); - } else { - org.apache.thrift.TBaseHelper.toString(this.nulls, sb); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetValues()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'values' is unset! Struct:" + toString()); - } - - if (!isSetNulls()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'nulls' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TStringColumnStandardSchemeFactory implements SchemeFactory { - public TStringColumnStandardScheme getScheme() { - return new TStringColumnStandardScheme(); - } - } - - private static class TStringColumnStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TStringColumn struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // VALUES - if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { - { - org.apache.thrift.protocol.TList _list102 = iprot.readListBegin(); - struct.values = new ArrayList(_list102.size); - for (int _i103 = 0; _i103 < _list102.size; ++_i103) - { - String _elem104; // optional - _elem104 = iprot.readString(); - struct.values.add(_elem104); - } - iprot.readListEnd(); - } - struct.setValuesIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // NULLS - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.nulls = iprot.readBinary(); - struct.setNullsIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TStringColumn struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.values != null) { - oprot.writeFieldBegin(VALUES_FIELD_DESC); - { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.values.size())); - for (String _iter105 : struct.values) - { - oprot.writeString(_iter105); - } - oprot.writeListEnd(); - } - oprot.writeFieldEnd(); - } - if (struct.nulls != null) { - oprot.writeFieldBegin(NULLS_FIELD_DESC); - oprot.writeBinary(struct.nulls); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TStringColumnTupleSchemeFactory implements SchemeFactory { - public TStringColumnTupleScheme getScheme() { - return new TStringColumnTupleScheme(); - } - } - - private static class TStringColumnTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TStringColumn struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - { - oprot.writeI32(struct.values.size()); - for (String _iter106 : struct.values) - { - oprot.writeString(_iter106); - } - } - oprot.writeBinary(struct.nulls); - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TStringColumn struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - { - org.apache.thrift.protocol.TList _list107 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.values = new ArrayList(_list107.size); - for (int _i108 = 0; _i108 < _list107.size; ++_i108) - { - String _elem109; // optional - _elem109 = iprot.readString(); - struct.values.add(_elem109); - } - } - struct.setValuesIsSet(true); - struct.nulls = iprot.readBinary(); - struct.setNullsIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TStringValue.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TStringValue.java deleted file mode 100644 index af7a109775a8b..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TStringValue.java +++ /dev/null @@ -1,389 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TStringValue implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TStringValue"); - - private static final org.apache.thrift.protocol.TField VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("value", org.apache.thrift.protocol.TType.STRING, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TStringValueStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TStringValueTupleSchemeFactory()); - } - - private String value; // optional - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - VALUE((short)1, "value"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // VALUE - return VALUE; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private _Fields optionals[] = {_Fields.VALUE}; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.VALUE, new org.apache.thrift.meta_data.FieldMetaData("value", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TStringValue.class, metaDataMap); - } - - public TStringValue() { - } - - /** - * Performs a deep copy on other. - */ - public TStringValue(TStringValue other) { - if (other.isSetValue()) { - this.value = other.value; - } - } - - public TStringValue deepCopy() { - return new TStringValue(this); - } - - @Override - public void clear() { - this.value = null; - } - - public String getValue() { - return this.value; - } - - public void setValue(String value) { - this.value = value; - } - - public void unsetValue() { - this.value = null; - } - - /** Returns true if field value is set (has been assigned a value) and false otherwise */ - public boolean isSetValue() { - return this.value != null; - } - - public void setValueIsSet(boolean value) { - if (!value) { - this.value = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case VALUE: - if (value == null) { - unsetValue(); - } else { - setValue((String)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case VALUE: - return getValue(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case VALUE: - return isSetValue(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TStringValue) - return this.equals((TStringValue)that); - return false; - } - - public boolean equals(TStringValue that) { - if (that == null) - return false; - - boolean this_present_value = true && this.isSetValue(); - boolean that_present_value = true && that.isSetValue(); - if (this_present_value || that_present_value) { - if (!(this_present_value && that_present_value)) - return false; - if (!this.value.equals(that.value)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_value = true && (isSetValue()); - builder.append(present_value); - if (present_value) - builder.append(value); - - return builder.toHashCode(); - } - - public int compareTo(TStringValue other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - TStringValue typedOther = (TStringValue)other; - - lastComparison = Boolean.valueOf(isSetValue()).compareTo(typedOther.isSetValue()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetValue()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.value, typedOther.value); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TStringValue("); - boolean first = true; - - if (isSetValue()) { - sb.append("value:"); - if (this.value == null) { - sb.append("null"); - } else { - sb.append(this.value); - } - first = false; - } - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TStringValueStandardSchemeFactory implements SchemeFactory { - public TStringValueStandardScheme getScheme() { - return new TStringValueStandardScheme(); - } - } - - private static class TStringValueStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TStringValue struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // VALUE - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.value = iprot.readString(); - struct.setValueIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TStringValue struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.value != null) { - if (struct.isSetValue()) { - oprot.writeFieldBegin(VALUE_FIELD_DESC); - oprot.writeString(struct.value); - oprot.writeFieldEnd(); - } - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TStringValueTupleSchemeFactory implements SchemeFactory { - public TStringValueTupleScheme getScheme() { - return new TStringValueTupleScheme(); - } - } - - private static class TStringValueTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TStringValue struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetValue()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetValue()) { - oprot.writeString(struct.value); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TStringValue struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.value = iprot.readString(); - struct.setValueIsSet(true); - } - } - } - -} - diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TStructTypeEntry.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TStructTypeEntry.java deleted file mode 100644 index 20f5fb6c29073..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TStructTypeEntry.java +++ /dev/null @@ -1,448 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TStructTypeEntry implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TStructTypeEntry"); - - private static final org.apache.thrift.protocol.TField NAME_TO_TYPE_PTR_FIELD_DESC = new org.apache.thrift.protocol.TField("nameToTypePtr", org.apache.thrift.protocol.TType.MAP, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TStructTypeEntryStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TStructTypeEntryTupleSchemeFactory()); - } - - private Map nameToTypePtr; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - NAME_TO_TYPE_PTR((short)1, "nameToTypePtr"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // NAME_TO_TYPE_PTR - return NAME_TO_TYPE_PTR; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.NAME_TO_TYPE_PTR, new org.apache.thrift.meta_data.FieldMetaData("nameToTypePtr", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32 , "TTypeEntryPtr")))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TStructTypeEntry.class, metaDataMap); - } - - public TStructTypeEntry() { - } - - public TStructTypeEntry( - Map nameToTypePtr) - { - this(); - this.nameToTypePtr = nameToTypePtr; - } - - /** - * Performs a deep copy on other. - */ - public TStructTypeEntry(TStructTypeEntry other) { - if (other.isSetNameToTypePtr()) { - Map __this__nameToTypePtr = new HashMap(); - for (Map.Entry other_element : other.nameToTypePtr.entrySet()) { - - String other_element_key = other_element.getKey(); - Integer other_element_value = other_element.getValue(); - - String __this__nameToTypePtr_copy_key = other_element_key; - - Integer __this__nameToTypePtr_copy_value = other_element_value; - - __this__nameToTypePtr.put(__this__nameToTypePtr_copy_key, __this__nameToTypePtr_copy_value); - } - this.nameToTypePtr = __this__nameToTypePtr; - } - } - - public TStructTypeEntry deepCopy() { - return new TStructTypeEntry(this); - } - - @Override - public void clear() { - this.nameToTypePtr = null; - } - - public int getNameToTypePtrSize() { - return (this.nameToTypePtr == null) ? 0 : this.nameToTypePtr.size(); - } - - public void putToNameToTypePtr(String key, int val) { - if (this.nameToTypePtr == null) { - this.nameToTypePtr = new HashMap(); - } - this.nameToTypePtr.put(key, val); - } - - public Map getNameToTypePtr() { - return this.nameToTypePtr; - } - - public void setNameToTypePtr(Map nameToTypePtr) { - this.nameToTypePtr = nameToTypePtr; - } - - public void unsetNameToTypePtr() { - this.nameToTypePtr = null; - } - - /** Returns true if field nameToTypePtr is set (has been assigned a value) and false otherwise */ - public boolean isSetNameToTypePtr() { - return this.nameToTypePtr != null; - } - - public void setNameToTypePtrIsSet(boolean value) { - if (!value) { - this.nameToTypePtr = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case NAME_TO_TYPE_PTR: - if (value == null) { - unsetNameToTypePtr(); - } else { - setNameToTypePtr((Map)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case NAME_TO_TYPE_PTR: - return getNameToTypePtr(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case NAME_TO_TYPE_PTR: - return isSetNameToTypePtr(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TStructTypeEntry) - return this.equals((TStructTypeEntry)that); - return false; - } - - public boolean equals(TStructTypeEntry that) { - if (that == null) - return false; - - boolean this_present_nameToTypePtr = true && this.isSetNameToTypePtr(); - boolean that_present_nameToTypePtr = true && that.isSetNameToTypePtr(); - if (this_present_nameToTypePtr || that_present_nameToTypePtr) { - if (!(this_present_nameToTypePtr && that_present_nameToTypePtr)) - return false; - if (!this.nameToTypePtr.equals(that.nameToTypePtr)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_nameToTypePtr = true && (isSetNameToTypePtr()); - builder.append(present_nameToTypePtr); - if (present_nameToTypePtr) - builder.append(nameToTypePtr); - - return builder.toHashCode(); - } - - public int compareTo(TStructTypeEntry other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - TStructTypeEntry typedOther = (TStructTypeEntry)other; - - lastComparison = Boolean.valueOf(isSetNameToTypePtr()).compareTo(typedOther.isSetNameToTypePtr()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetNameToTypePtr()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.nameToTypePtr, typedOther.nameToTypePtr); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TStructTypeEntry("); - boolean first = true; - - sb.append("nameToTypePtr:"); - if (this.nameToTypePtr == null) { - sb.append("null"); - } else { - sb.append(this.nameToTypePtr); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetNameToTypePtr()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'nameToTypePtr' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TStructTypeEntryStandardSchemeFactory implements SchemeFactory { - public TStructTypeEntryStandardScheme getScheme() { - return new TStructTypeEntryStandardScheme(); - } - } - - private static class TStructTypeEntryStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TStructTypeEntry struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // NAME_TO_TYPE_PTR - if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { - { - org.apache.thrift.protocol.TMap _map10 = iprot.readMapBegin(); - struct.nameToTypePtr = new HashMap(2*_map10.size); - for (int _i11 = 0; _i11 < _map10.size; ++_i11) - { - String _key12; // required - int _val13; // required - _key12 = iprot.readString(); - _val13 = iprot.readI32(); - struct.nameToTypePtr.put(_key12, _val13); - } - iprot.readMapEnd(); - } - struct.setNameToTypePtrIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TStructTypeEntry struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.nameToTypePtr != null) { - oprot.writeFieldBegin(NAME_TO_TYPE_PTR_FIELD_DESC); - { - oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.I32, struct.nameToTypePtr.size())); - for (Map.Entry _iter14 : struct.nameToTypePtr.entrySet()) - { - oprot.writeString(_iter14.getKey()); - oprot.writeI32(_iter14.getValue()); - } - oprot.writeMapEnd(); - } - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TStructTypeEntryTupleSchemeFactory implements SchemeFactory { - public TStructTypeEntryTupleScheme getScheme() { - return new TStructTypeEntryTupleScheme(); - } - } - - private static class TStructTypeEntryTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TStructTypeEntry struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - { - oprot.writeI32(struct.nameToTypePtr.size()); - for (Map.Entry _iter15 : struct.nameToTypePtr.entrySet()) - { - oprot.writeString(_iter15.getKey()); - oprot.writeI32(_iter15.getValue()); - } - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TStructTypeEntry struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - { - org.apache.thrift.protocol.TMap _map16 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.I32, iprot.readI32()); - struct.nameToTypePtr = new HashMap(2*_map16.size); - for (int _i17 = 0; _i17 < _map16.size; ++_i17) - { - String _key18; // required - int _val19; // required - _key18 = iprot.readString(); - _val19 = iprot.readI32(); - struct.nameToTypePtr.put(_key18, _val19); - } - } - struct.setNameToTypePtrIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TTableSchema.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TTableSchema.java deleted file mode 100644 index ff5e54db7c16c..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TTableSchema.java +++ /dev/null @@ -1,439 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TTableSchema implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TTableSchema"); - - private static final org.apache.thrift.protocol.TField COLUMNS_FIELD_DESC = new org.apache.thrift.protocol.TField("columns", org.apache.thrift.protocol.TType.LIST, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TTableSchemaStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TTableSchemaTupleSchemeFactory()); - } - - private List columns; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - COLUMNS((short)1, "columns"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // COLUMNS - return COLUMNS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.COLUMNS, new org.apache.thrift.meta_data.FieldMetaData("columns", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TColumnDesc.class)))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TTableSchema.class, metaDataMap); - } - - public TTableSchema() { - } - - public TTableSchema( - List columns) - { - this(); - this.columns = columns; - } - - /** - * Performs a deep copy on other. - */ - public TTableSchema(TTableSchema other) { - if (other.isSetColumns()) { - List __this__columns = new ArrayList(); - for (TColumnDesc other_element : other.columns) { - __this__columns.add(new TColumnDesc(other_element)); - } - this.columns = __this__columns; - } - } - - public TTableSchema deepCopy() { - return new TTableSchema(this); - } - - @Override - public void clear() { - this.columns = null; - } - - public int getColumnsSize() { - return (this.columns == null) ? 0 : this.columns.size(); - } - - public java.util.Iterator getColumnsIterator() { - return (this.columns == null) ? null : this.columns.iterator(); - } - - public void addToColumns(TColumnDesc elem) { - if (this.columns == null) { - this.columns = new ArrayList(); - } - this.columns.add(elem); - } - - public List getColumns() { - return this.columns; - } - - public void setColumns(List columns) { - this.columns = columns; - } - - public void unsetColumns() { - this.columns = null; - } - - /** Returns true if field columns is set (has been assigned a value) and false otherwise */ - public boolean isSetColumns() { - return this.columns != null; - } - - public void setColumnsIsSet(boolean value) { - if (!value) { - this.columns = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case COLUMNS: - if (value == null) { - unsetColumns(); - } else { - setColumns((List)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case COLUMNS: - return getColumns(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case COLUMNS: - return isSetColumns(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TTableSchema) - return this.equals((TTableSchema)that); - return false; - } - - public boolean equals(TTableSchema that) { - if (that == null) - return false; - - boolean this_present_columns = true && this.isSetColumns(); - boolean that_present_columns = true && that.isSetColumns(); - if (this_present_columns || that_present_columns) { - if (!(this_present_columns && that_present_columns)) - return false; - if (!this.columns.equals(that.columns)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_columns = true && (isSetColumns()); - builder.append(present_columns); - if (present_columns) - builder.append(columns); - - return builder.toHashCode(); - } - - public int compareTo(TTableSchema other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - TTableSchema typedOther = (TTableSchema)other; - - lastComparison = Boolean.valueOf(isSetColumns()).compareTo(typedOther.isSetColumns()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetColumns()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.columns, typedOther.columns); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TTableSchema("); - boolean first = true; - - sb.append("columns:"); - if (this.columns == null) { - sb.append("null"); - } else { - sb.append(this.columns); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetColumns()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'columns' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TTableSchemaStandardSchemeFactory implements SchemeFactory { - public TTableSchemaStandardScheme getScheme() { - return new TTableSchemaStandardScheme(); - } - } - - private static class TTableSchemaStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TTableSchema struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // COLUMNS - if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { - { - org.apache.thrift.protocol.TList _list38 = iprot.readListBegin(); - struct.columns = new ArrayList(_list38.size); - for (int _i39 = 0; _i39 < _list38.size; ++_i39) - { - TColumnDesc _elem40; // optional - _elem40 = new TColumnDesc(); - _elem40.read(iprot); - struct.columns.add(_elem40); - } - iprot.readListEnd(); - } - struct.setColumnsIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TTableSchema struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.columns != null) { - oprot.writeFieldBegin(COLUMNS_FIELD_DESC); - { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.columns.size())); - for (TColumnDesc _iter41 : struct.columns) - { - _iter41.write(oprot); - } - oprot.writeListEnd(); - } - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TTableSchemaTupleSchemeFactory implements SchemeFactory { - public TTableSchemaTupleScheme getScheme() { - return new TTableSchemaTupleScheme(); - } - } - - private static class TTableSchemaTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TTableSchema struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - { - oprot.writeI32(struct.columns.size()); - for (TColumnDesc _iter42 : struct.columns) - { - _iter42.write(oprot); - } - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TTableSchema struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - { - org.apache.thrift.protocol.TList _list43 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.columns = new ArrayList(_list43.size); - for (int _i44 = 0; _i44 < _list43.size; ++_i44) - { - TColumnDesc _elem45; // optional - _elem45 = new TColumnDesc(); - _elem45.read(iprot); - struct.columns.add(_elem45); - } - } - struct.setColumnsIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TTypeDesc.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TTypeDesc.java deleted file mode 100644 index 251f86a914719..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TTypeDesc.java +++ /dev/null @@ -1,439 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TTypeDesc implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TTypeDesc"); - - private static final org.apache.thrift.protocol.TField TYPES_FIELD_DESC = new org.apache.thrift.protocol.TField("types", org.apache.thrift.protocol.TType.LIST, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TTypeDescStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TTypeDescTupleSchemeFactory()); - } - - private List types; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - TYPES((short)1, "types"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // TYPES - return TYPES; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.TYPES, new org.apache.thrift.meta_data.FieldMetaData("types", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TTypeEntry.class)))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TTypeDesc.class, metaDataMap); - } - - public TTypeDesc() { - } - - public TTypeDesc( - List types) - { - this(); - this.types = types; - } - - /** - * Performs a deep copy on other. - */ - public TTypeDesc(TTypeDesc other) { - if (other.isSetTypes()) { - List __this__types = new ArrayList(); - for (TTypeEntry other_element : other.types) { - __this__types.add(new TTypeEntry(other_element)); - } - this.types = __this__types; - } - } - - public TTypeDesc deepCopy() { - return new TTypeDesc(this); - } - - @Override - public void clear() { - this.types = null; - } - - public int getTypesSize() { - return (this.types == null) ? 0 : this.types.size(); - } - - public java.util.Iterator getTypesIterator() { - return (this.types == null) ? null : this.types.iterator(); - } - - public void addToTypes(TTypeEntry elem) { - if (this.types == null) { - this.types = new ArrayList(); - } - this.types.add(elem); - } - - public List getTypes() { - return this.types; - } - - public void setTypes(List types) { - this.types = types; - } - - public void unsetTypes() { - this.types = null; - } - - /** Returns true if field types is set (has been assigned a value) and false otherwise */ - public boolean isSetTypes() { - return this.types != null; - } - - public void setTypesIsSet(boolean value) { - if (!value) { - this.types = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case TYPES: - if (value == null) { - unsetTypes(); - } else { - setTypes((List)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case TYPES: - return getTypes(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case TYPES: - return isSetTypes(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TTypeDesc) - return this.equals((TTypeDesc)that); - return false; - } - - public boolean equals(TTypeDesc that) { - if (that == null) - return false; - - boolean this_present_types = true && this.isSetTypes(); - boolean that_present_types = true && that.isSetTypes(); - if (this_present_types || that_present_types) { - if (!(this_present_types && that_present_types)) - return false; - if (!this.types.equals(that.types)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_types = true && (isSetTypes()); - builder.append(present_types); - if (present_types) - builder.append(types); - - return builder.toHashCode(); - } - - public int compareTo(TTypeDesc other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - TTypeDesc typedOther = (TTypeDesc)other; - - lastComparison = Boolean.valueOf(isSetTypes()).compareTo(typedOther.isSetTypes()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetTypes()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.types, typedOther.types); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TTypeDesc("); - boolean first = true; - - sb.append("types:"); - if (this.types == null) { - sb.append("null"); - } else { - sb.append(this.types); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetTypes()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'types' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TTypeDescStandardSchemeFactory implements SchemeFactory { - public TTypeDescStandardScheme getScheme() { - return new TTypeDescStandardScheme(); - } - } - - private static class TTypeDescStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TTypeDesc struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // TYPES - if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { - { - org.apache.thrift.protocol.TList _list30 = iprot.readListBegin(); - struct.types = new ArrayList(_list30.size); - for (int _i31 = 0; _i31 < _list30.size; ++_i31) - { - TTypeEntry _elem32; // optional - _elem32 = new TTypeEntry(); - _elem32.read(iprot); - struct.types.add(_elem32); - } - iprot.readListEnd(); - } - struct.setTypesIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TTypeDesc struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.types != null) { - oprot.writeFieldBegin(TYPES_FIELD_DESC); - { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.types.size())); - for (TTypeEntry _iter33 : struct.types) - { - _iter33.write(oprot); - } - oprot.writeListEnd(); - } - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TTypeDescTupleSchemeFactory implements SchemeFactory { - public TTypeDescTupleScheme getScheme() { - return new TTypeDescTupleScheme(); - } - } - - private static class TTypeDescTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TTypeDesc struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - { - oprot.writeI32(struct.types.size()); - for (TTypeEntry _iter34 : struct.types) - { - _iter34.write(oprot); - } - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TTypeDesc struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - { - org.apache.thrift.protocol.TList _list35 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.types = new ArrayList(_list35.size); - for (int _i36 = 0; _i36 < _list35.size; ++_i36) - { - TTypeEntry _elem37; // optional - _elem37 = new TTypeEntry(); - _elem37.read(iprot); - struct.types.add(_elem37); - } - } - struct.setTypesIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TTypeEntry.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TTypeEntry.java deleted file mode 100644 index d0d70c1279572..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TTypeEntry.java +++ /dev/null @@ -1,610 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TTypeEntry extends org.apache.thrift.TUnion { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TTypeEntry"); - private static final org.apache.thrift.protocol.TField PRIMITIVE_ENTRY_FIELD_DESC = new org.apache.thrift.protocol.TField("primitiveEntry", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField ARRAY_ENTRY_FIELD_DESC = new org.apache.thrift.protocol.TField("arrayEntry", org.apache.thrift.protocol.TType.STRUCT, (short)2); - private static final org.apache.thrift.protocol.TField MAP_ENTRY_FIELD_DESC = new org.apache.thrift.protocol.TField("mapEntry", org.apache.thrift.protocol.TType.STRUCT, (short)3); - private static final org.apache.thrift.protocol.TField STRUCT_ENTRY_FIELD_DESC = new org.apache.thrift.protocol.TField("structEntry", org.apache.thrift.protocol.TType.STRUCT, (short)4); - private static final org.apache.thrift.protocol.TField UNION_ENTRY_FIELD_DESC = new org.apache.thrift.protocol.TField("unionEntry", org.apache.thrift.protocol.TType.STRUCT, (short)5); - private static final org.apache.thrift.protocol.TField USER_DEFINED_TYPE_ENTRY_FIELD_DESC = new org.apache.thrift.protocol.TField("userDefinedTypeEntry", org.apache.thrift.protocol.TType.STRUCT, (short)6); - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - PRIMITIVE_ENTRY((short)1, "primitiveEntry"), - ARRAY_ENTRY((short)2, "arrayEntry"), - MAP_ENTRY((short)3, "mapEntry"), - STRUCT_ENTRY((short)4, "structEntry"), - UNION_ENTRY((short)5, "unionEntry"), - USER_DEFINED_TYPE_ENTRY((short)6, "userDefinedTypeEntry"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // PRIMITIVE_ENTRY - return PRIMITIVE_ENTRY; - case 2: // ARRAY_ENTRY - return ARRAY_ENTRY; - case 3: // MAP_ENTRY - return MAP_ENTRY; - case 4: // STRUCT_ENTRY - return STRUCT_ENTRY; - case 5: // UNION_ENTRY - return UNION_ENTRY; - case 6: // USER_DEFINED_TYPE_ENTRY - return USER_DEFINED_TYPE_ENTRY; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.PRIMITIVE_ENTRY, new org.apache.thrift.meta_data.FieldMetaData("primitiveEntry", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TPrimitiveTypeEntry.class))); - tmpMap.put(_Fields.ARRAY_ENTRY, new org.apache.thrift.meta_data.FieldMetaData("arrayEntry", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TArrayTypeEntry.class))); - tmpMap.put(_Fields.MAP_ENTRY, new org.apache.thrift.meta_data.FieldMetaData("mapEntry", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TMapTypeEntry.class))); - tmpMap.put(_Fields.STRUCT_ENTRY, new org.apache.thrift.meta_data.FieldMetaData("structEntry", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TStructTypeEntry.class))); - tmpMap.put(_Fields.UNION_ENTRY, new org.apache.thrift.meta_data.FieldMetaData("unionEntry", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TUnionTypeEntry.class))); - tmpMap.put(_Fields.USER_DEFINED_TYPE_ENTRY, new org.apache.thrift.meta_data.FieldMetaData("userDefinedTypeEntry", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TUserDefinedTypeEntry.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TTypeEntry.class, metaDataMap); - } - - public TTypeEntry() { - super(); - } - - public TTypeEntry(TTypeEntry._Fields setField, Object value) { - super(setField, value); - } - - public TTypeEntry(TTypeEntry other) { - super(other); - } - public TTypeEntry deepCopy() { - return new TTypeEntry(this); - } - - public static TTypeEntry primitiveEntry(TPrimitiveTypeEntry value) { - TTypeEntry x = new TTypeEntry(); - x.setPrimitiveEntry(value); - return x; - } - - public static TTypeEntry arrayEntry(TArrayTypeEntry value) { - TTypeEntry x = new TTypeEntry(); - x.setArrayEntry(value); - return x; - } - - public static TTypeEntry mapEntry(TMapTypeEntry value) { - TTypeEntry x = new TTypeEntry(); - x.setMapEntry(value); - return x; - } - - public static TTypeEntry structEntry(TStructTypeEntry value) { - TTypeEntry x = new TTypeEntry(); - x.setStructEntry(value); - return x; - } - - public static TTypeEntry unionEntry(TUnionTypeEntry value) { - TTypeEntry x = new TTypeEntry(); - x.setUnionEntry(value); - return x; - } - - public static TTypeEntry userDefinedTypeEntry(TUserDefinedTypeEntry value) { - TTypeEntry x = new TTypeEntry(); - x.setUserDefinedTypeEntry(value); - return x; - } - - - @Override - protected void checkType(_Fields setField, Object value) throws ClassCastException { - switch (setField) { - case PRIMITIVE_ENTRY: - if (value instanceof TPrimitiveTypeEntry) { - break; - } - throw new ClassCastException("Was expecting value of type TPrimitiveTypeEntry for field 'primitiveEntry', but got " + value.getClass().getSimpleName()); - case ARRAY_ENTRY: - if (value instanceof TArrayTypeEntry) { - break; - } - throw new ClassCastException("Was expecting value of type TArrayTypeEntry for field 'arrayEntry', but got " + value.getClass().getSimpleName()); - case MAP_ENTRY: - if (value instanceof TMapTypeEntry) { - break; - } - throw new ClassCastException("Was expecting value of type TMapTypeEntry for field 'mapEntry', but got " + value.getClass().getSimpleName()); - case STRUCT_ENTRY: - if (value instanceof TStructTypeEntry) { - break; - } - throw new ClassCastException("Was expecting value of type TStructTypeEntry for field 'structEntry', but got " + value.getClass().getSimpleName()); - case UNION_ENTRY: - if (value instanceof TUnionTypeEntry) { - break; - } - throw new ClassCastException("Was expecting value of type TUnionTypeEntry for field 'unionEntry', but got " + value.getClass().getSimpleName()); - case USER_DEFINED_TYPE_ENTRY: - if (value instanceof TUserDefinedTypeEntry) { - break; - } - throw new ClassCastException("Was expecting value of type TUserDefinedTypeEntry for field 'userDefinedTypeEntry', but got " + value.getClass().getSimpleName()); - default: - throw new IllegalArgumentException("Unknown field id " + setField); - } - } - - @Override - protected Object standardSchemeReadValue(org.apache.thrift.protocol.TProtocol iprot, org.apache.thrift.protocol.TField field) throws org.apache.thrift.TException { - _Fields setField = _Fields.findByThriftId(field.id); - if (setField != null) { - switch (setField) { - case PRIMITIVE_ENTRY: - if (field.type == PRIMITIVE_ENTRY_FIELD_DESC.type) { - TPrimitiveTypeEntry primitiveEntry; - primitiveEntry = new TPrimitiveTypeEntry(); - primitiveEntry.read(iprot); - return primitiveEntry; - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); - return null; - } - case ARRAY_ENTRY: - if (field.type == ARRAY_ENTRY_FIELD_DESC.type) { - TArrayTypeEntry arrayEntry; - arrayEntry = new TArrayTypeEntry(); - arrayEntry.read(iprot); - return arrayEntry; - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); - return null; - } - case MAP_ENTRY: - if (field.type == MAP_ENTRY_FIELD_DESC.type) { - TMapTypeEntry mapEntry; - mapEntry = new TMapTypeEntry(); - mapEntry.read(iprot); - return mapEntry; - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); - return null; - } - case STRUCT_ENTRY: - if (field.type == STRUCT_ENTRY_FIELD_DESC.type) { - TStructTypeEntry structEntry; - structEntry = new TStructTypeEntry(); - structEntry.read(iprot); - return structEntry; - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); - return null; - } - case UNION_ENTRY: - if (field.type == UNION_ENTRY_FIELD_DESC.type) { - TUnionTypeEntry unionEntry; - unionEntry = new TUnionTypeEntry(); - unionEntry.read(iprot); - return unionEntry; - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); - return null; - } - case USER_DEFINED_TYPE_ENTRY: - if (field.type == USER_DEFINED_TYPE_ENTRY_FIELD_DESC.type) { - TUserDefinedTypeEntry userDefinedTypeEntry; - userDefinedTypeEntry = new TUserDefinedTypeEntry(); - userDefinedTypeEntry.read(iprot); - return userDefinedTypeEntry; - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); - return null; - } - default: - throw new IllegalStateException("setField wasn't null, but didn't match any of the case statements!"); - } - } else { - return null; - } - } - - @Override - protected void standardSchemeWriteValue(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - switch (setField_) { - case PRIMITIVE_ENTRY: - TPrimitiveTypeEntry primitiveEntry = (TPrimitiveTypeEntry)value_; - primitiveEntry.write(oprot); - return; - case ARRAY_ENTRY: - TArrayTypeEntry arrayEntry = (TArrayTypeEntry)value_; - arrayEntry.write(oprot); - return; - case MAP_ENTRY: - TMapTypeEntry mapEntry = (TMapTypeEntry)value_; - mapEntry.write(oprot); - return; - case STRUCT_ENTRY: - TStructTypeEntry structEntry = (TStructTypeEntry)value_; - structEntry.write(oprot); - return; - case UNION_ENTRY: - TUnionTypeEntry unionEntry = (TUnionTypeEntry)value_; - unionEntry.write(oprot); - return; - case USER_DEFINED_TYPE_ENTRY: - TUserDefinedTypeEntry userDefinedTypeEntry = (TUserDefinedTypeEntry)value_; - userDefinedTypeEntry.write(oprot); - return; - default: - throw new IllegalStateException("Cannot write union with unknown field " + setField_); - } - } - - @Override - protected Object tupleSchemeReadValue(org.apache.thrift.protocol.TProtocol iprot, short fieldID) throws org.apache.thrift.TException { - _Fields setField = _Fields.findByThriftId(fieldID); - if (setField != null) { - switch (setField) { - case PRIMITIVE_ENTRY: - TPrimitiveTypeEntry primitiveEntry; - primitiveEntry = new TPrimitiveTypeEntry(); - primitiveEntry.read(iprot); - return primitiveEntry; - case ARRAY_ENTRY: - TArrayTypeEntry arrayEntry; - arrayEntry = new TArrayTypeEntry(); - arrayEntry.read(iprot); - return arrayEntry; - case MAP_ENTRY: - TMapTypeEntry mapEntry; - mapEntry = new TMapTypeEntry(); - mapEntry.read(iprot); - return mapEntry; - case STRUCT_ENTRY: - TStructTypeEntry structEntry; - structEntry = new TStructTypeEntry(); - structEntry.read(iprot); - return structEntry; - case UNION_ENTRY: - TUnionTypeEntry unionEntry; - unionEntry = new TUnionTypeEntry(); - unionEntry.read(iprot); - return unionEntry; - case USER_DEFINED_TYPE_ENTRY: - TUserDefinedTypeEntry userDefinedTypeEntry; - userDefinedTypeEntry = new TUserDefinedTypeEntry(); - userDefinedTypeEntry.read(iprot); - return userDefinedTypeEntry; - default: - throw new IllegalStateException("setField wasn't null, but didn't match any of the case statements!"); - } - } else { - throw new TProtocolException("Couldn't find a field with field id " + fieldID); - } - } - - @Override - protected void tupleSchemeWriteValue(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - switch (setField_) { - case PRIMITIVE_ENTRY: - TPrimitiveTypeEntry primitiveEntry = (TPrimitiveTypeEntry)value_; - primitiveEntry.write(oprot); - return; - case ARRAY_ENTRY: - TArrayTypeEntry arrayEntry = (TArrayTypeEntry)value_; - arrayEntry.write(oprot); - return; - case MAP_ENTRY: - TMapTypeEntry mapEntry = (TMapTypeEntry)value_; - mapEntry.write(oprot); - return; - case STRUCT_ENTRY: - TStructTypeEntry structEntry = (TStructTypeEntry)value_; - structEntry.write(oprot); - return; - case UNION_ENTRY: - TUnionTypeEntry unionEntry = (TUnionTypeEntry)value_; - unionEntry.write(oprot); - return; - case USER_DEFINED_TYPE_ENTRY: - TUserDefinedTypeEntry userDefinedTypeEntry = (TUserDefinedTypeEntry)value_; - userDefinedTypeEntry.write(oprot); - return; - default: - throw new IllegalStateException("Cannot write union with unknown field " + setField_); - } - } - - @Override - protected org.apache.thrift.protocol.TField getFieldDesc(_Fields setField) { - switch (setField) { - case PRIMITIVE_ENTRY: - return PRIMITIVE_ENTRY_FIELD_DESC; - case ARRAY_ENTRY: - return ARRAY_ENTRY_FIELD_DESC; - case MAP_ENTRY: - return MAP_ENTRY_FIELD_DESC; - case STRUCT_ENTRY: - return STRUCT_ENTRY_FIELD_DESC; - case UNION_ENTRY: - return UNION_ENTRY_FIELD_DESC; - case USER_DEFINED_TYPE_ENTRY: - return USER_DEFINED_TYPE_ENTRY_FIELD_DESC; - default: - throw new IllegalArgumentException("Unknown field id " + setField); - } - } - - @Override - protected org.apache.thrift.protocol.TStruct getStructDesc() { - return STRUCT_DESC; - } - - @Override - protected _Fields enumForId(short id) { - return _Fields.findByThriftIdOrThrow(id); - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - - public TPrimitiveTypeEntry getPrimitiveEntry() { - if (getSetField() == _Fields.PRIMITIVE_ENTRY) { - return (TPrimitiveTypeEntry)getFieldValue(); - } else { - throw new RuntimeException("Cannot get field 'primitiveEntry' because union is currently set to " + getFieldDesc(getSetField()).name); - } - } - - public void setPrimitiveEntry(TPrimitiveTypeEntry value) { - if (value == null) throw new NullPointerException(); - setField_ = _Fields.PRIMITIVE_ENTRY; - value_ = value; - } - - public TArrayTypeEntry getArrayEntry() { - if (getSetField() == _Fields.ARRAY_ENTRY) { - return (TArrayTypeEntry)getFieldValue(); - } else { - throw new RuntimeException("Cannot get field 'arrayEntry' because union is currently set to " + getFieldDesc(getSetField()).name); - } - } - - public void setArrayEntry(TArrayTypeEntry value) { - if (value == null) throw new NullPointerException(); - setField_ = _Fields.ARRAY_ENTRY; - value_ = value; - } - - public TMapTypeEntry getMapEntry() { - if (getSetField() == _Fields.MAP_ENTRY) { - return (TMapTypeEntry)getFieldValue(); - } else { - throw new RuntimeException("Cannot get field 'mapEntry' because union is currently set to " + getFieldDesc(getSetField()).name); - } - } - - public void setMapEntry(TMapTypeEntry value) { - if (value == null) throw new NullPointerException(); - setField_ = _Fields.MAP_ENTRY; - value_ = value; - } - - public TStructTypeEntry getStructEntry() { - if (getSetField() == _Fields.STRUCT_ENTRY) { - return (TStructTypeEntry)getFieldValue(); - } else { - throw new RuntimeException("Cannot get field 'structEntry' because union is currently set to " + getFieldDesc(getSetField()).name); - } - } - - public void setStructEntry(TStructTypeEntry value) { - if (value == null) throw new NullPointerException(); - setField_ = _Fields.STRUCT_ENTRY; - value_ = value; - } - - public TUnionTypeEntry getUnionEntry() { - if (getSetField() == _Fields.UNION_ENTRY) { - return (TUnionTypeEntry)getFieldValue(); - } else { - throw new RuntimeException("Cannot get field 'unionEntry' because union is currently set to " + getFieldDesc(getSetField()).name); - } - } - - public void setUnionEntry(TUnionTypeEntry value) { - if (value == null) throw new NullPointerException(); - setField_ = _Fields.UNION_ENTRY; - value_ = value; - } - - public TUserDefinedTypeEntry getUserDefinedTypeEntry() { - if (getSetField() == _Fields.USER_DEFINED_TYPE_ENTRY) { - return (TUserDefinedTypeEntry)getFieldValue(); - } else { - throw new RuntimeException("Cannot get field 'userDefinedTypeEntry' because union is currently set to " + getFieldDesc(getSetField()).name); - } - } - - public void setUserDefinedTypeEntry(TUserDefinedTypeEntry value) { - if (value == null) throw new NullPointerException(); - setField_ = _Fields.USER_DEFINED_TYPE_ENTRY; - value_ = value; - } - - public boolean isSetPrimitiveEntry() { - return setField_ == _Fields.PRIMITIVE_ENTRY; - } - - - public boolean isSetArrayEntry() { - return setField_ == _Fields.ARRAY_ENTRY; - } - - - public boolean isSetMapEntry() { - return setField_ == _Fields.MAP_ENTRY; - } - - - public boolean isSetStructEntry() { - return setField_ == _Fields.STRUCT_ENTRY; - } - - - public boolean isSetUnionEntry() { - return setField_ == _Fields.UNION_ENTRY; - } - - - public boolean isSetUserDefinedTypeEntry() { - return setField_ == _Fields.USER_DEFINED_TYPE_ENTRY; - } - - - public boolean equals(Object other) { - if (other instanceof TTypeEntry) { - return equals((TTypeEntry)other); - } else { - return false; - } - } - - public boolean equals(TTypeEntry other) { - return other != null && getSetField() == other.getSetField() && getFieldValue().equals(other.getFieldValue()); - } - - @Override - public int compareTo(TTypeEntry other) { - int lastComparison = org.apache.thrift.TBaseHelper.compareTo(getSetField(), other.getSetField()); - if (lastComparison == 0) { - return org.apache.thrift.TBaseHelper.compareTo(getFieldValue(), other.getFieldValue()); - } - return lastComparison; - } - - - @Override - public int hashCode() { - HashCodeBuilder hcb = new HashCodeBuilder(); - hcb.append(this.getClass().getName()); - org.apache.thrift.TFieldIdEnum setField = getSetField(); - if (setField != null) { - hcb.append(setField.getThriftFieldId()); - Object value = getFieldValue(); - if (value instanceof org.apache.thrift.TEnum) { - hcb.append(((org.apache.thrift.TEnum)getFieldValue()).getValue()); - } else { - hcb.append(value); - } - } - return hcb.toHashCode(); - } - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - -} diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TTypeId.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TTypeId.java deleted file mode 100644 index 40f05894623c0..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TTypeId.java +++ /dev/null @@ -1,105 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - - -import java.util.Map; -import java.util.HashMap; -import org.apache.thrift.TEnum; - -public enum TTypeId implements org.apache.thrift.TEnum { - BOOLEAN_TYPE(0), - TINYINT_TYPE(1), - SMALLINT_TYPE(2), - INT_TYPE(3), - BIGINT_TYPE(4), - FLOAT_TYPE(5), - DOUBLE_TYPE(6), - STRING_TYPE(7), - TIMESTAMP_TYPE(8), - BINARY_TYPE(9), - ARRAY_TYPE(10), - MAP_TYPE(11), - STRUCT_TYPE(12), - UNION_TYPE(13), - USER_DEFINED_TYPE(14), - DECIMAL_TYPE(15), - NULL_TYPE(16), - DATE_TYPE(17), - VARCHAR_TYPE(18), - CHAR_TYPE(19), - INTERVAL_YEAR_MONTH_TYPE(20), - INTERVAL_DAY_TIME_TYPE(21); - - private final int value; - - private TTypeId(int value) { - this.value = value; - } - - /** - * Get the integer value of this enum value, as defined in the Thrift IDL. - */ - public int getValue() { - return value; - } - - /** - * Find a the enum type by its integer value, as defined in the Thrift IDL. - * @return null if the value is not found. - */ - public static TTypeId findByValue(int value) { - switch (value) { - case 0: - return BOOLEAN_TYPE; - case 1: - return TINYINT_TYPE; - case 2: - return SMALLINT_TYPE; - case 3: - return INT_TYPE; - case 4: - return BIGINT_TYPE; - case 5: - return FLOAT_TYPE; - case 6: - return DOUBLE_TYPE; - case 7: - return STRING_TYPE; - case 8: - return TIMESTAMP_TYPE; - case 9: - return BINARY_TYPE; - case 10: - return ARRAY_TYPE; - case 11: - return MAP_TYPE; - case 12: - return STRUCT_TYPE; - case 13: - return UNION_TYPE; - case 14: - return USER_DEFINED_TYPE; - case 15: - return DECIMAL_TYPE; - case 16: - return NULL_TYPE; - case 17: - return DATE_TYPE; - case 18: - return VARCHAR_TYPE; - case 19: - return CHAR_TYPE; - case 20: - return INTERVAL_YEAR_MONTH_TYPE; - case 21: - return INTERVAL_DAY_TIME_TYPE; - default: - return null; - } - } -} diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TTypeQualifierValue.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TTypeQualifierValue.java deleted file mode 100644 index a3e3829372276..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TTypeQualifierValue.java +++ /dev/null @@ -1,361 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TTypeQualifierValue extends org.apache.thrift.TUnion { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TTypeQualifierValue"); - private static final org.apache.thrift.protocol.TField I32_VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("i32Value", org.apache.thrift.protocol.TType.I32, (short)1); - private static final org.apache.thrift.protocol.TField STRING_VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("stringValue", org.apache.thrift.protocol.TType.STRING, (short)2); - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - I32_VALUE((short)1, "i32Value"), - STRING_VALUE((short)2, "stringValue"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // I32_VALUE - return I32_VALUE; - case 2: // STRING_VALUE - return STRING_VALUE; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.I32_VALUE, new org.apache.thrift.meta_data.FieldMetaData("i32Value", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); - tmpMap.put(_Fields.STRING_VALUE, new org.apache.thrift.meta_data.FieldMetaData("stringValue", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TTypeQualifierValue.class, metaDataMap); - } - - public TTypeQualifierValue() { - super(); - } - - public TTypeQualifierValue(TTypeQualifierValue._Fields setField, Object value) { - super(setField, value); - } - - public TTypeQualifierValue(TTypeQualifierValue other) { - super(other); - } - public TTypeQualifierValue deepCopy() { - return new TTypeQualifierValue(this); - } - - public static TTypeQualifierValue i32Value(int value) { - TTypeQualifierValue x = new TTypeQualifierValue(); - x.setI32Value(value); - return x; - } - - public static TTypeQualifierValue stringValue(String value) { - TTypeQualifierValue x = new TTypeQualifierValue(); - x.setStringValue(value); - return x; - } - - - @Override - protected void checkType(_Fields setField, Object value) throws ClassCastException { - switch (setField) { - case I32_VALUE: - if (value instanceof Integer) { - break; - } - throw new ClassCastException("Was expecting value of type Integer for field 'i32Value', but got " + value.getClass().getSimpleName()); - case STRING_VALUE: - if (value instanceof String) { - break; - } - throw new ClassCastException("Was expecting value of type String for field 'stringValue', but got " + value.getClass().getSimpleName()); - default: - throw new IllegalArgumentException("Unknown field id " + setField); - } - } - - @Override - protected Object standardSchemeReadValue(org.apache.thrift.protocol.TProtocol iprot, org.apache.thrift.protocol.TField field) throws org.apache.thrift.TException { - _Fields setField = _Fields.findByThriftId(field.id); - if (setField != null) { - switch (setField) { - case I32_VALUE: - if (field.type == I32_VALUE_FIELD_DESC.type) { - Integer i32Value; - i32Value = iprot.readI32(); - return i32Value; - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); - return null; - } - case STRING_VALUE: - if (field.type == STRING_VALUE_FIELD_DESC.type) { - String stringValue; - stringValue = iprot.readString(); - return stringValue; - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); - return null; - } - default: - throw new IllegalStateException("setField wasn't null, but didn't match any of the case statements!"); - } - } else { - return null; - } - } - - @Override - protected void standardSchemeWriteValue(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - switch (setField_) { - case I32_VALUE: - Integer i32Value = (Integer)value_; - oprot.writeI32(i32Value); - return; - case STRING_VALUE: - String stringValue = (String)value_; - oprot.writeString(stringValue); - return; - default: - throw new IllegalStateException("Cannot write union with unknown field " + setField_); - } - } - - @Override - protected Object tupleSchemeReadValue(org.apache.thrift.protocol.TProtocol iprot, short fieldID) throws org.apache.thrift.TException { - _Fields setField = _Fields.findByThriftId(fieldID); - if (setField != null) { - switch (setField) { - case I32_VALUE: - Integer i32Value; - i32Value = iprot.readI32(); - return i32Value; - case STRING_VALUE: - String stringValue; - stringValue = iprot.readString(); - return stringValue; - default: - throw new IllegalStateException("setField wasn't null, but didn't match any of the case statements!"); - } - } else { - throw new TProtocolException("Couldn't find a field with field id " + fieldID); - } - } - - @Override - protected void tupleSchemeWriteValue(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - switch (setField_) { - case I32_VALUE: - Integer i32Value = (Integer)value_; - oprot.writeI32(i32Value); - return; - case STRING_VALUE: - String stringValue = (String)value_; - oprot.writeString(stringValue); - return; - default: - throw new IllegalStateException("Cannot write union with unknown field " + setField_); - } - } - - @Override - protected org.apache.thrift.protocol.TField getFieldDesc(_Fields setField) { - switch (setField) { - case I32_VALUE: - return I32_VALUE_FIELD_DESC; - case STRING_VALUE: - return STRING_VALUE_FIELD_DESC; - default: - throw new IllegalArgumentException("Unknown field id " + setField); - } - } - - @Override - protected org.apache.thrift.protocol.TStruct getStructDesc() { - return STRUCT_DESC; - } - - @Override - protected _Fields enumForId(short id) { - return _Fields.findByThriftIdOrThrow(id); - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - - public int getI32Value() { - if (getSetField() == _Fields.I32_VALUE) { - return (Integer)getFieldValue(); - } else { - throw new RuntimeException("Cannot get field 'i32Value' because union is currently set to " + getFieldDesc(getSetField()).name); - } - } - - public void setI32Value(int value) { - setField_ = _Fields.I32_VALUE; - value_ = value; - } - - public String getStringValue() { - if (getSetField() == _Fields.STRING_VALUE) { - return (String)getFieldValue(); - } else { - throw new RuntimeException("Cannot get field 'stringValue' because union is currently set to " + getFieldDesc(getSetField()).name); - } - } - - public void setStringValue(String value) { - if (value == null) throw new NullPointerException(); - setField_ = _Fields.STRING_VALUE; - value_ = value; - } - - public boolean isSetI32Value() { - return setField_ == _Fields.I32_VALUE; - } - - - public boolean isSetStringValue() { - return setField_ == _Fields.STRING_VALUE; - } - - - public boolean equals(Object other) { - if (other instanceof TTypeQualifierValue) { - return equals((TTypeQualifierValue)other); - } else { - return false; - } - } - - public boolean equals(TTypeQualifierValue other) { - return other != null && getSetField() == other.getSetField() && getFieldValue().equals(other.getFieldValue()); - } - - @Override - public int compareTo(TTypeQualifierValue other) { - int lastComparison = org.apache.thrift.TBaseHelper.compareTo(getSetField(), other.getSetField()); - if (lastComparison == 0) { - return org.apache.thrift.TBaseHelper.compareTo(getFieldValue(), other.getFieldValue()); - } - return lastComparison; - } - - - @Override - public int hashCode() { - HashCodeBuilder hcb = new HashCodeBuilder(); - hcb.append(this.getClass().getName()); - org.apache.thrift.TFieldIdEnum setField = getSetField(); - if (setField != null) { - hcb.append(setField.getThriftFieldId()); - Object value = getFieldValue(); - if (value instanceof org.apache.thrift.TEnum) { - hcb.append(((org.apache.thrift.TEnum)getFieldValue()).getValue()); - } else { - hcb.append(value); - } - } - return hcb.toHashCode(); - } - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - -} diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TTypeQualifiers.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TTypeQualifiers.java deleted file mode 100644 index 39355551d3722..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TTypeQualifiers.java +++ /dev/null @@ -1,450 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TTypeQualifiers implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TTypeQualifiers"); - - private static final org.apache.thrift.protocol.TField QUALIFIERS_FIELD_DESC = new org.apache.thrift.protocol.TField("qualifiers", org.apache.thrift.protocol.TType.MAP, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TTypeQualifiersStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TTypeQualifiersTupleSchemeFactory()); - } - - private Map qualifiers; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - QUALIFIERS((short)1, "qualifiers"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // QUALIFIERS - return QUALIFIERS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.QUALIFIERS, new org.apache.thrift.meta_data.FieldMetaData("qualifiers", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TTypeQualifierValue.class)))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TTypeQualifiers.class, metaDataMap); - } - - public TTypeQualifiers() { - } - - public TTypeQualifiers( - Map qualifiers) - { - this(); - this.qualifiers = qualifiers; - } - - /** - * Performs a deep copy on other. - */ - public TTypeQualifiers(TTypeQualifiers other) { - if (other.isSetQualifiers()) { - Map __this__qualifiers = new HashMap(); - for (Map.Entry other_element : other.qualifiers.entrySet()) { - - String other_element_key = other_element.getKey(); - TTypeQualifierValue other_element_value = other_element.getValue(); - - String __this__qualifiers_copy_key = other_element_key; - - TTypeQualifierValue __this__qualifiers_copy_value = new TTypeQualifierValue(other_element_value); - - __this__qualifiers.put(__this__qualifiers_copy_key, __this__qualifiers_copy_value); - } - this.qualifiers = __this__qualifiers; - } - } - - public TTypeQualifiers deepCopy() { - return new TTypeQualifiers(this); - } - - @Override - public void clear() { - this.qualifiers = null; - } - - public int getQualifiersSize() { - return (this.qualifiers == null) ? 0 : this.qualifiers.size(); - } - - public void putToQualifiers(String key, TTypeQualifierValue val) { - if (this.qualifiers == null) { - this.qualifiers = new HashMap(); - } - this.qualifiers.put(key, val); - } - - public Map getQualifiers() { - return this.qualifiers; - } - - public void setQualifiers(Map qualifiers) { - this.qualifiers = qualifiers; - } - - public void unsetQualifiers() { - this.qualifiers = null; - } - - /** Returns true if field qualifiers is set (has been assigned a value) and false otherwise */ - public boolean isSetQualifiers() { - return this.qualifiers != null; - } - - public void setQualifiersIsSet(boolean value) { - if (!value) { - this.qualifiers = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case QUALIFIERS: - if (value == null) { - unsetQualifiers(); - } else { - setQualifiers((Map)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case QUALIFIERS: - return getQualifiers(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case QUALIFIERS: - return isSetQualifiers(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TTypeQualifiers) - return this.equals((TTypeQualifiers)that); - return false; - } - - public boolean equals(TTypeQualifiers that) { - if (that == null) - return false; - - boolean this_present_qualifiers = true && this.isSetQualifiers(); - boolean that_present_qualifiers = true && that.isSetQualifiers(); - if (this_present_qualifiers || that_present_qualifiers) { - if (!(this_present_qualifiers && that_present_qualifiers)) - return false; - if (!this.qualifiers.equals(that.qualifiers)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_qualifiers = true && (isSetQualifiers()); - builder.append(present_qualifiers); - if (present_qualifiers) - builder.append(qualifiers); - - return builder.toHashCode(); - } - - public int compareTo(TTypeQualifiers other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - TTypeQualifiers typedOther = (TTypeQualifiers)other; - - lastComparison = Boolean.valueOf(isSetQualifiers()).compareTo(typedOther.isSetQualifiers()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetQualifiers()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.qualifiers, typedOther.qualifiers); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TTypeQualifiers("); - boolean first = true; - - sb.append("qualifiers:"); - if (this.qualifiers == null) { - sb.append("null"); - } else { - sb.append(this.qualifiers); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetQualifiers()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'qualifiers' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TTypeQualifiersStandardSchemeFactory implements SchemeFactory { - public TTypeQualifiersStandardScheme getScheme() { - return new TTypeQualifiersStandardScheme(); - } - } - - private static class TTypeQualifiersStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TTypeQualifiers struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // QUALIFIERS - if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { - { - org.apache.thrift.protocol.TMap _map0 = iprot.readMapBegin(); - struct.qualifiers = new HashMap(2*_map0.size); - for (int _i1 = 0; _i1 < _map0.size; ++_i1) - { - String _key2; // required - TTypeQualifierValue _val3; // required - _key2 = iprot.readString(); - _val3 = new TTypeQualifierValue(); - _val3.read(iprot); - struct.qualifiers.put(_key2, _val3); - } - iprot.readMapEnd(); - } - struct.setQualifiersIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TTypeQualifiers struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.qualifiers != null) { - oprot.writeFieldBegin(QUALIFIERS_FIELD_DESC); - { - oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, struct.qualifiers.size())); - for (Map.Entry _iter4 : struct.qualifiers.entrySet()) - { - oprot.writeString(_iter4.getKey()); - _iter4.getValue().write(oprot); - } - oprot.writeMapEnd(); - } - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TTypeQualifiersTupleSchemeFactory implements SchemeFactory { - public TTypeQualifiersTupleScheme getScheme() { - return new TTypeQualifiersTupleScheme(); - } - } - - private static class TTypeQualifiersTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TTypeQualifiers struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - { - oprot.writeI32(struct.qualifiers.size()); - for (Map.Entry _iter5 : struct.qualifiers.entrySet()) - { - oprot.writeString(_iter5.getKey()); - _iter5.getValue().write(oprot); - } - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TTypeQualifiers struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - { - org.apache.thrift.protocol.TMap _map6 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.qualifiers = new HashMap(2*_map6.size); - for (int _i7 = 0; _i7 < _map6.size; ++_i7) - { - String _key8; // required - TTypeQualifierValue _val9; // required - _key8 = iprot.readString(); - _val9 = new TTypeQualifierValue(); - _val9.read(iprot); - struct.qualifiers.put(_key8, _val9); - } - } - struct.setQualifiersIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TUnionTypeEntry.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TUnionTypeEntry.java deleted file mode 100644 index 73dd45d3dd01a..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TUnionTypeEntry.java +++ /dev/null @@ -1,448 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TUnionTypeEntry implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TUnionTypeEntry"); - - private static final org.apache.thrift.protocol.TField NAME_TO_TYPE_PTR_FIELD_DESC = new org.apache.thrift.protocol.TField("nameToTypePtr", org.apache.thrift.protocol.TType.MAP, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TUnionTypeEntryStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TUnionTypeEntryTupleSchemeFactory()); - } - - private Map nameToTypePtr; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - NAME_TO_TYPE_PTR((short)1, "nameToTypePtr"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // NAME_TO_TYPE_PTR - return NAME_TO_TYPE_PTR; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.NAME_TO_TYPE_PTR, new org.apache.thrift.meta_data.FieldMetaData("nameToTypePtr", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32 , "TTypeEntryPtr")))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TUnionTypeEntry.class, metaDataMap); - } - - public TUnionTypeEntry() { - } - - public TUnionTypeEntry( - Map nameToTypePtr) - { - this(); - this.nameToTypePtr = nameToTypePtr; - } - - /** - * Performs a deep copy on other. - */ - public TUnionTypeEntry(TUnionTypeEntry other) { - if (other.isSetNameToTypePtr()) { - Map __this__nameToTypePtr = new HashMap(); - for (Map.Entry other_element : other.nameToTypePtr.entrySet()) { - - String other_element_key = other_element.getKey(); - Integer other_element_value = other_element.getValue(); - - String __this__nameToTypePtr_copy_key = other_element_key; - - Integer __this__nameToTypePtr_copy_value = other_element_value; - - __this__nameToTypePtr.put(__this__nameToTypePtr_copy_key, __this__nameToTypePtr_copy_value); - } - this.nameToTypePtr = __this__nameToTypePtr; - } - } - - public TUnionTypeEntry deepCopy() { - return new TUnionTypeEntry(this); - } - - @Override - public void clear() { - this.nameToTypePtr = null; - } - - public int getNameToTypePtrSize() { - return (this.nameToTypePtr == null) ? 0 : this.nameToTypePtr.size(); - } - - public void putToNameToTypePtr(String key, int val) { - if (this.nameToTypePtr == null) { - this.nameToTypePtr = new HashMap(); - } - this.nameToTypePtr.put(key, val); - } - - public Map getNameToTypePtr() { - return this.nameToTypePtr; - } - - public void setNameToTypePtr(Map nameToTypePtr) { - this.nameToTypePtr = nameToTypePtr; - } - - public void unsetNameToTypePtr() { - this.nameToTypePtr = null; - } - - /** Returns true if field nameToTypePtr is set (has been assigned a value) and false otherwise */ - public boolean isSetNameToTypePtr() { - return this.nameToTypePtr != null; - } - - public void setNameToTypePtrIsSet(boolean value) { - if (!value) { - this.nameToTypePtr = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case NAME_TO_TYPE_PTR: - if (value == null) { - unsetNameToTypePtr(); - } else { - setNameToTypePtr((Map)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case NAME_TO_TYPE_PTR: - return getNameToTypePtr(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case NAME_TO_TYPE_PTR: - return isSetNameToTypePtr(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TUnionTypeEntry) - return this.equals((TUnionTypeEntry)that); - return false; - } - - public boolean equals(TUnionTypeEntry that) { - if (that == null) - return false; - - boolean this_present_nameToTypePtr = true && this.isSetNameToTypePtr(); - boolean that_present_nameToTypePtr = true && that.isSetNameToTypePtr(); - if (this_present_nameToTypePtr || that_present_nameToTypePtr) { - if (!(this_present_nameToTypePtr && that_present_nameToTypePtr)) - return false; - if (!this.nameToTypePtr.equals(that.nameToTypePtr)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_nameToTypePtr = true && (isSetNameToTypePtr()); - builder.append(present_nameToTypePtr); - if (present_nameToTypePtr) - builder.append(nameToTypePtr); - - return builder.toHashCode(); - } - - public int compareTo(TUnionTypeEntry other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - TUnionTypeEntry typedOther = (TUnionTypeEntry)other; - - lastComparison = Boolean.valueOf(isSetNameToTypePtr()).compareTo(typedOther.isSetNameToTypePtr()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetNameToTypePtr()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.nameToTypePtr, typedOther.nameToTypePtr); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TUnionTypeEntry("); - boolean first = true; - - sb.append("nameToTypePtr:"); - if (this.nameToTypePtr == null) { - sb.append("null"); - } else { - sb.append(this.nameToTypePtr); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetNameToTypePtr()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'nameToTypePtr' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TUnionTypeEntryStandardSchemeFactory implements SchemeFactory { - public TUnionTypeEntryStandardScheme getScheme() { - return new TUnionTypeEntryStandardScheme(); - } - } - - private static class TUnionTypeEntryStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TUnionTypeEntry struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // NAME_TO_TYPE_PTR - if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { - { - org.apache.thrift.protocol.TMap _map20 = iprot.readMapBegin(); - struct.nameToTypePtr = new HashMap(2*_map20.size); - for (int _i21 = 0; _i21 < _map20.size; ++_i21) - { - String _key22; // required - int _val23; // required - _key22 = iprot.readString(); - _val23 = iprot.readI32(); - struct.nameToTypePtr.put(_key22, _val23); - } - iprot.readMapEnd(); - } - struct.setNameToTypePtrIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TUnionTypeEntry struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.nameToTypePtr != null) { - oprot.writeFieldBegin(NAME_TO_TYPE_PTR_FIELD_DESC); - { - oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.I32, struct.nameToTypePtr.size())); - for (Map.Entry _iter24 : struct.nameToTypePtr.entrySet()) - { - oprot.writeString(_iter24.getKey()); - oprot.writeI32(_iter24.getValue()); - } - oprot.writeMapEnd(); - } - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TUnionTypeEntryTupleSchemeFactory implements SchemeFactory { - public TUnionTypeEntryTupleScheme getScheme() { - return new TUnionTypeEntryTupleScheme(); - } - } - - private static class TUnionTypeEntryTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TUnionTypeEntry struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - { - oprot.writeI32(struct.nameToTypePtr.size()); - for (Map.Entry _iter25 : struct.nameToTypePtr.entrySet()) - { - oprot.writeString(_iter25.getKey()); - oprot.writeI32(_iter25.getValue()); - } - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TUnionTypeEntry struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - { - org.apache.thrift.protocol.TMap _map26 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.I32, iprot.readI32()); - struct.nameToTypePtr = new HashMap(2*_map26.size); - for (int _i27 = 0; _i27 < _map26.size; ++_i27) - { - String _key28; // required - int _val29; // required - _key28 = iprot.readString(); - _val29 = iprot.readI32(); - struct.nameToTypePtr.put(_key28, _val29); - } - } - struct.setNameToTypePtrIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TUserDefinedTypeEntry.java b/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TUserDefinedTypeEntry.java deleted file mode 100644 index 3a111a2c8c2c6..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/gen/java/org/apache/hive/service/cli/thrift/TUserDefinedTypeEntry.java +++ /dev/null @@ -1,385 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.0) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.cli.thrift; - -import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class TUserDefinedTypeEntry implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TUserDefinedTypeEntry"); - - private static final org.apache.thrift.protocol.TField TYPE_CLASS_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("typeClassName", org.apache.thrift.protocol.TType.STRING, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TUserDefinedTypeEntryStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TUserDefinedTypeEntryTupleSchemeFactory()); - } - - private String typeClassName; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - TYPE_CLASS_NAME((short)1, "typeClassName"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // TYPE_CLASS_NAME - return TYPE_CLASS_NAME; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.TYPE_CLASS_NAME, new org.apache.thrift.meta_data.FieldMetaData("typeClassName", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TUserDefinedTypeEntry.class, metaDataMap); - } - - public TUserDefinedTypeEntry() { - } - - public TUserDefinedTypeEntry( - String typeClassName) - { - this(); - this.typeClassName = typeClassName; - } - - /** - * Performs a deep copy on other. - */ - public TUserDefinedTypeEntry(TUserDefinedTypeEntry other) { - if (other.isSetTypeClassName()) { - this.typeClassName = other.typeClassName; - } - } - - public TUserDefinedTypeEntry deepCopy() { - return new TUserDefinedTypeEntry(this); - } - - @Override - public void clear() { - this.typeClassName = null; - } - - public String getTypeClassName() { - return this.typeClassName; - } - - public void setTypeClassName(String typeClassName) { - this.typeClassName = typeClassName; - } - - public void unsetTypeClassName() { - this.typeClassName = null; - } - - /** Returns true if field typeClassName is set (has been assigned a value) and false otherwise */ - public boolean isSetTypeClassName() { - return this.typeClassName != null; - } - - public void setTypeClassNameIsSet(boolean value) { - if (!value) { - this.typeClassName = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case TYPE_CLASS_NAME: - if (value == null) { - unsetTypeClassName(); - } else { - setTypeClassName((String)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case TYPE_CLASS_NAME: - return getTypeClassName(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case TYPE_CLASS_NAME: - return isSetTypeClassName(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TUserDefinedTypeEntry) - return this.equals((TUserDefinedTypeEntry)that); - return false; - } - - public boolean equals(TUserDefinedTypeEntry that) { - if (that == null) - return false; - - boolean this_present_typeClassName = true && this.isSetTypeClassName(); - boolean that_present_typeClassName = true && that.isSetTypeClassName(); - if (this_present_typeClassName || that_present_typeClassName) { - if (!(this_present_typeClassName && that_present_typeClassName)) - return false; - if (!this.typeClassName.equals(that.typeClassName)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - HashCodeBuilder builder = new HashCodeBuilder(); - - boolean present_typeClassName = true && (isSetTypeClassName()); - builder.append(present_typeClassName); - if (present_typeClassName) - builder.append(typeClassName); - - return builder.toHashCode(); - } - - public int compareTo(TUserDefinedTypeEntry other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - TUserDefinedTypeEntry typedOther = (TUserDefinedTypeEntry)other; - - lastComparison = Boolean.valueOf(isSetTypeClassName()).compareTo(typedOther.isSetTypeClassName()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetTypeClassName()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.typeClassName, typedOther.typeClassName); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TUserDefinedTypeEntry("); - boolean first = true; - - sb.append("typeClassName:"); - if (this.typeClassName == null) { - sb.append("null"); - } else { - sb.append(this.typeClassName); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetTypeClassName()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'typeClassName' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TUserDefinedTypeEntryStandardSchemeFactory implements SchemeFactory { - public TUserDefinedTypeEntryStandardScheme getScheme() { - return new TUserDefinedTypeEntryStandardScheme(); - } - } - - private static class TUserDefinedTypeEntryStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TUserDefinedTypeEntry struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // TYPE_CLASS_NAME - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.typeClassName = iprot.readString(); - struct.setTypeClassNameIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TUserDefinedTypeEntry struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.typeClassName != null) { - oprot.writeFieldBegin(TYPE_CLASS_NAME_FIELD_DESC); - oprot.writeString(struct.typeClassName); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TUserDefinedTypeEntryTupleSchemeFactory implements SchemeFactory { - public TUserDefinedTypeEntryTupleScheme getScheme() { - return new TUserDefinedTypeEntryTupleScheme(); - } - } - - private static class TUserDefinedTypeEntryTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TUserDefinedTypeEntry struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - oprot.writeString(struct.typeClassName); - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TUserDefinedTypeEntry struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.typeClassName = iprot.readString(); - struct.setTypeClassNameIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/AbstractService.java b/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/AbstractService.java deleted file mode 100644 index 7e557aeccf5b0..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/AbstractService.java +++ /dev/null @@ -1,184 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hive.service; - -import java.util.ArrayList; -import java.util.List; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hive.conf.HiveConf; - -/** - * AbstractService. - * - */ -public abstract class AbstractService implements Service { - - private static final Log LOG = LogFactory.getLog(AbstractService.class); - - /** - * Service state: initially {@link STATE#NOTINITED}. - */ - private Service.STATE state = STATE.NOTINITED; - - /** - * Service name. - */ - private final String name; - /** - * Service start time. Will be zero until the service is started. - */ - private long startTime; - - /** - * The configuration. Will be null until the service is initialized. - */ - private HiveConf hiveConf; - - /** - * List of state change listeners; it is final to ensure - * that it will never be null. - */ - private final List listeners = - new ArrayList(); - - /** - * Construct the service. - * - * @param name - * service name - */ - public AbstractService(String name) { - this.name = name; - } - - @Override - public synchronized Service.STATE getServiceState() { - return state; - } - - /** - * {@inheritDoc} - * - * @throws IllegalStateException - * if the current service state does not permit - * this action - */ - @Override - public synchronized void init(HiveConf hiveConf) { - ensureCurrentState(STATE.NOTINITED); - this.hiveConf = hiveConf; - changeState(STATE.INITED); - LOG.info("Service:" + getName() + " is inited."); - } - - /** - * {@inheritDoc} - * - * @throws IllegalStateException - * if the current service state does not permit - * this action - */ - @Override - public synchronized void start() { - startTime = System.currentTimeMillis(); - ensureCurrentState(STATE.INITED); - changeState(STATE.STARTED); - LOG.info("Service:" + getName() + " is started."); - } - - /** - * {@inheritDoc} - * - * @throws IllegalStateException - * if the current service state does not permit - * this action - */ - @Override - public synchronized void stop() { - if (state == STATE.STOPPED || - state == STATE.INITED || - state == STATE.NOTINITED) { - // already stopped, or else it was never - // started (eg another service failing canceled startup) - return; - } - ensureCurrentState(STATE.STARTED); - changeState(STATE.STOPPED); - LOG.info("Service:" + getName() + " is stopped."); - } - - @Override - public synchronized void register(ServiceStateChangeListener l) { - listeners.add(l); - } - - @Override - public synchronized void unregister(ServiceStateChangeListener l) { - listeners.remove(l); - } - - @Override - public String getName() { - return name; - } - - @Override - public synchronized HiveConf getHiveConf() { - return hiveConf; - } - - @Override - public long getStartTime() { - return startTime; - } - - /** - * Verify that a service is in a given state. - * - * @param currentState - * the desired state - * @throws IllegalStateException - * if the service state is different from - * the desired state - */ - private void ensureCurrentState(Service.STATE currentState) { - ServiceOperations.ensureCurrentState(state, currentState); - } - - /** - * Change to a new state and notify all listeners. - * This is a private method that is only invoked from synchronized methods, - * which avoid having to clone the listener list. It does imply that - * the state change listener methods should be short lived, as they - * will delay the state transition. - * - * @param newState - * new service state - */ - private void changeState(Service.STATE newState) { - state = newState; - // notify listeners - for (ServiceStateChangeListener l : listeners) { - l.stateChanged(this); - } - } - -} diff --git a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/CompositeService.java b/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/CompositeService.java deleted file mode 100644 index 897911872b80f..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/CompositeService.java +++ /dev/null @@ -1,133 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hive.service; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.List; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hive.conf.HiveConf; - -/** - * CompositeService. - * - */ -public class CompositeService extends AbstractService { - - private static final Log LOG = LogFactory.getLog(CompositeService.class); - - private final List serviceList = new ArrayList(); - - public CompositeService(String name) { - super(name); - } - - public Collection getServices() { - return Collections.unmodifiableList(serviceList); - } - - protected synchronized void addService(Service service) { - serviceList.add(service); - } - - protected synchronized boolean removeService(Service service) { - return serviceList.remove(service); - } - - @Override - public synchronized void init(HiveConf hiveConf) { - for (Service service : serviceList) { - service.init(hiveConf); - } - super.init(hiveConf); - } - - @Override - public synchronized void start() { - int i = 0; - try { - for (int n = serviceList.size(); i < n; i++) { - Service service = serviceList.get(i); - service.start(); - } - super.start(); - } catch (Throwable e) { - LOG.error("Error starting services " + getName(), e); - // Note that the state of the failed service is still INITED and not - // STARTED. Even though the last service is not started completely, still - // call stop() on all services including failed service to make sure cleanup - // happens. - stop(i); - throw new ServiceException("Failed to Start " + getName(), e); - } - - } - - @Override - public synchronized void stop() { - if (this.getServiceState() == STATE.STOPPED) { - // The base composite-service is already stopped, don't do anything again. - return; - } - if (serviceList.size() > 0) { - stop(serviceList.size() - 1); - } - super.stop(); - } - - private synchronized void stop(int numOfServicesStarted) { - // stop in reserve order of start - for (int i = numOfServicesStarted; i >= 0; i--) { - Service service = serviceList.get(i); - try { - service.stop(); - } catch (Throwable t) { - LOG.info("Error stopping " + service.getName(), t); - } - } - } - - /** - * JVM Shutdown hook for CompositeService which will stop the given - * CompositeService gracefully in case of JVM shutdown. - */ - public static class CompositeServiceShutdownHook implements Runnable { - - private final CompositeService compositeService; - - public CompositeServiceShutdownHook(CompositeService compositeService) { - this.compositeService = compositeService; - } - - @Override - public void run() { - try { - // Stop the Composite Service - compositeService.stop(); - } catch (Throwable t) { - LOG.info("Error stopping " + compositeService.getName(), t); - } - } - } - - -} diff --git a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/CookieSigner.java b/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/CookieSigner.java deleted file mode 100644 index f2a80c9d5ffbc..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/CookieSigner.java +++ /dev/null @@ -1,108 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hive.service; - -import org.apache.commons.codec.binary.Base64; -import org.apache.commons.logging.LogFactory; -import org.apache.commons.logging.Log; - -import java.security.MessageDigest; -import java.security.NoSuchAlgorithmException; - -/** - * The cookie signer generates a signature based on SHA digest - * and appends it to the cookie value generated at the - * server side. It uses SHA digest algorithm to sign and verify signatures. - */ -public class CookieSigner { - private static final String SIGNATURE = "&s="; - private static final String SHA_STRING = "SHA"; - private byte[] secretBytes; - private static final Log LOG = LogFactory.getLog(CookieSigner.class); - - /** - * Constructor - * @param secret Secret Bytes - */ - public CookieSigner(byte[] secret) { - if (secret == null) { - throw new IllegalArgumentException(" NULL Secret Bytes"); - } - this.secretBytes = secret.clone(); - } - - /** - * Sign the cookie given the string token as input. - * @param str Input token - * @return Signed token that can be used to create a cookie - */ - public String signCookie(String str) { - if (str == null || str.isEmpty()) { - throw new IllegalArgumentException("NULL or empty string to sign"); - } - String signature = getSignature(str); - - if (LOG.isDebugEnabled()) { - LOG.debug("Signature generated for " + str + " is " + signature); - } - return str + SIGNATURE + signature; - } - - /** - * Verify a signed string and extracts the original string. - * @param signedStr The already signed string - * @return Raw Value of the string without the signature - */ - public String verifyAndExtract(String signedStr) { - int index = signedStr.lastIndexOf(SIGNATURE); - if (index == -1) { - throw new IllegalArgumentException("Invalid input sign: " + signedStr); - } - String originalSignature = signedStr.substring(index + SIGNATURE.length()); - String rawValue = signedStr.substring(0, index); - String currentSignature = getSignature(rawValue); - - if (LOG.isDebugEnabled()) { - LOG.debug("Signature generated for " + rawValue + " inside verify is " + currentSignature); - } - if (!MessageDigest.isEqual(originalSignature.getBytes(), currentSignature.getBytes())) { - throw new IllegalArgumentException("Invalid sign, original = " + originalSignature + - " current = " + currentSignature); - } - return rawValue; - } - - /** - * Get the signature of the input string based on SHA digest algorithm. - * @param str Input token - * @return Signed String - */ - private String getSignature(String str) { - try { - MessageDigest md = MessageDigest.getInstance(SHA_STRING); - md.update(str.getBytes()); - md.update(secretBytes); - byte[] digest = md.digest(); - return new Base64(0).encodeToString(digest); - } catch (NoSuchAlgorithmException ex) { - throw new RuntimeException("Invalid SHA digest String: " + SHA_STRING + - " " + ex.getMessage(), ex); - } - } -} diff --git a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/ServiceOperations.java b/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/ServiceOperations.java deleted file mode 100644 index f16863c1b41aa..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/ServiceOperations.java +++ /dev/null @@ -1,141 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hive.service; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hive.conf.HiveConf; - -/** - * ServiceOperations. - * - */ -public final class ServiceOperations { - private static final Log LOG = LogFactory.getLog(ServiceOperations.class); - - private ServiceOperations() { - } - - /** - * Verify that a service is in a given state. - * @param state the actual state a service is in - * @param expectedState the desired state - * @throws IllegalStateException if the service state is different from - * the desired state - */ - public static void ensureCurrentState(Service.STATE state, - Service.STATE expectedState) { - if (state != expectedState) { - throw new IllegalStateException("For this operation, the " + - "current service state must be " - + expectedState - + " instead of " + state); - } - } - - /** - * Initialize a service. - * - * The service state is checked before the operation begins. - * This process is not thread safe. - * @param service a service that must be in the state - * {@link Service.STATE#NOTINITED} - * @param configuration the configuration to initialize the service with - * @throws RuntimeException on a state change failure - * @throws IllegalStateException if the service is in the wrong state - */ - - public static void init(Service service, HiveConf configuration) { - Service.STATE state = service.getServiceState(); - ensureCurrentState(state, Service.STATE.NOTINITED); - service.init(configuration); - } - - /** - * Start a service. - * - * The service state is checked before the operation begins. - * This process is not thread safe. - * @param service a service that must be in the state - * {@link Service.STATE#INITED} - * @throws RuntimeException on a state change failure - * @throws IllegalStateException if the service is in the wrong state - */ - - public static void start(Service service) { - Service.STATE state = service.getServiceState(); - ensureCurrentState(state, Service.STATE.INITED); - service.start(); - } - - /** - * Initialize then start a service. - * - * The service state is checked before the operation begins. - * This process is not thread safe. - * @param service a service that must be in the state - * {@link Service.STATE#NOTINITED} - * @param configuration the configuration to initialize the service with - * @throws RuntimeException on a state change failure - * @throws IllegalStateException if the service is in the wrong state - */ - public static void deploy(Service service, HiveConf configuration) { - init(service, configuration); - start(service); - } - - /** - * Stop a service. - * - * Do nothing if the service is null or not in a state in which it can be/needs to be stopped. - * - * The service state is checked before the operation begins. - * This process is not thread safe. - * @param service a service or null - */ - public static void stop(Service service) { - if (service != null) { - Service.STATE state = service.getServiceState(); - if (state == Service.STATE.STARTED) { - service.stop(); - } - } - } - - /** - * Stop a service; if it is null do nothing. Exceptions are caught and - * logged at warn level. (but not Throwables). This operation is intended to - * be used in cleanup operations - * - * @param service a service; may be null - * @return any exception that was caught; null if none was. - */ - public static Exception stopQuietly(Service service) { - try { - stop(service); - } catch (Exception e) { - LOG.warn("When stopping the service " + service.getName() - + " : " + e, - e); - return e; - } - return null; - } - -} diff --git a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/ServiceUtils.java b/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/ServiceUtils.java deleted file mode 100644 index edb5eff9615bf..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/ServiceUtils.java +++ /dev/null @@ -1,44 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hive.service; - -public class ServiceUtils { - - /* - * Get the index separating the user name from domain name (the user's name up - * to the first '/' or '@'). - * - * @param userName full user name. - * @return index of domain match or -1 if not found - */ - public static int indexOfDomainMatch(String userName) { - if (userName == null) { - return -1; - } - - int idx = userName.indexOf('/'); - int idx2 = userName.indexOf('@'); - int endIdx = Math.min(idx, idx2); // Use the earlier match. - // Unless at least one of '/' or '@' was not found, in - // which case, user the latter match. - if (endIdx == -1) { - endIdx = Math.max(idx, idx2); - } - return endIdx; - } -} diff --git a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/auth/HiveAuthFactory.java b/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/auth/HiveAuthFactory.java deleted file mode 100644 index 10000f12ab329..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/auth/HiveAuthFactory.java +++ /dev/null @@ -1,419 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hive.service.auth; - -import java.io.IOException; -import java.lang.reflect.Field; -import java.lang.reflect.Method; -import java.net.InetSocketAddress; -import java.net.UnknownHostException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashMap; -import java.util.List; -import java.util.Locale; -import java.util.Map; -import java.util.Objects; - -import javax.net.ssl.SSLServerSocket; -import javax.security.auth.login.LoginException; -import javax.security.sasl.Sasl; - -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.conf.HiveConf.ConfVars; -import org.apache.hadoop.hive.metastore.HiveMetaStore; -import org.apache.hadoop.hive.metastore.HiveMetaStore.HMSHandler; -import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.shims.HadoopShims.KerberosNameShim; -import org.apache.hadoop.hive.shims.ShimLoader; -import org.apache.hadoop.hive.thrift.DBTokenStore; -import org.apache.hadoop.hive.thrift.HadoopThriftAuthBridge; -import org.apache.hadoop.hive.thrift.HadoopThriftAuthBridge.Server.ServerMode; -import org.apache.hadoop.security.SecurityUtil; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.authorize.ProxyUsers; -import org.apache.hive.service.cli.HiveSQLException; -import org.apache.hive.service.cli.thrift.ThriftCLIService; -import org.apache.thrift.TProcessorFactory; -import org.apache.thrift.transport.TSSLTransportFactory; -import org.apache.thrift.transport.TServerSocket; -import org.apache.thrift.transport.TSocket; -import org.apache.thrift.transport.TTransport; -import org.apache.thrift.transport.TTransportException; -import org.apache.thrift.transport.TTransportFactory; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * This class helps in some aspects of authentication. It creates the proper Thrift classes for the - * given configuration as well as helps with authenticating requests. - */ -public class HiveAuthFactory { - private static final Logger LOG = LoggerFactory.getLogger(HiveAuthFactory.class); - - - public enum AuthTypes { - NOSASL("NOSASL"), - NONE("NONE"), - LDAP("LDAP"), - KERBEROS("KERBEROS"), - CUSTOM("CUSTOM"), - PAM("PAM"); - - private final String authType; - - AuthTypes(String authType) { - this.authType = authType; - } - - public String getAuthName() { - return authType; - } - - } - - private HadoopThriftAuthBridge.Server saslServer; - private String authTypeStr; - private final String transportMode; - private final HiveConf conf; - - public static final String HS2_PROXY_USER = "hive.server2.proxy.user"; - public static final String HS2_CLIENT_TOKEN = "hiveserver2ClientToken"; - - private static Field keytabFile = null; - private static Method getKeytab = null; - static { - Class clz = UserGroupInformation.class; - try { - keytabFile = clz.getDeclaredField("keytabFile"); - keytabFile.setAccessible(true); - } catch (NoSuchFieldException nfe) { - LOG.debug("Cannot find private field \"keytabFile\" in class: " + - UserGroupInformation.class.getCanonicalName(), nfe); - keytabFile = null; - } - - try { - getKeytab = clz.getDeclaredMethod("getKeytab"); - getKeytab.setAccessible(true); - } catch(NoSuchMethodException nme) { - LOG.debug("Cannot find private method \"getKeytab\" in class:" + - UserGroupInformation.class.getCanonicalName(), nme); - getKeytab = null; - } - } - - public HiveAuthFactory(HiveConf conf) throws TTransportException, IOException { - this.conf = conf; - transportMode = conf.getVar(HiveConf.ConfVars.HIVE_SERVER2_TRANSPORT_MODE); - authTypeStr = conf.getVar(HiveConf.ConfVars.HIVE_SERVER2_AUTHENTICATION); - - // In http mode we use NOSASL as the default auth type - if ("http".equalsIgnoreCase(transportMode)) { - if (authTypeStr == null) { - authTypeStr = AuthTypes.NOSASL.getAuthName(); - } - } else { - if (authTypeStr == null) { - authTypeStr = AuthTypes.NONE.getAuthName(); - } - if (authTypeStr.equalsIgnoreCase(AuthTypes.KERBEROS.getAuthName())) { - String principal = conf.getVar(ConfVars.HIVE_SERVER2_KERBEROS_PRINCIPAL); - String keytab = conf.getVar(ConfVars.HIVE_SERVER2_KERBEROS_KEYTAB); - if (needUgiLogin(UserGroupInformation.getCurrentUser(), - SecurityUtil.getServerPrincipal(principal, "0.0.0.0"), keytab)) { - saslServer = ShimLoader.getHadoopThriftAuthBridge().createServer(principal, keytab); - } else { - // Using the default constructor to avoid unnecessary UGI login. - saslServer = new HadoopThriftAuthBridge.Server(); - } - - // start delegation token manager - try { - // rawStore is only necessary for DBTokenStore - Object rawStore = null; - String tokenStoreClass = conf.getVar(HiveConf.ConfVars.METASTORE_CLUSTER_DELEGATION_TOKEN_STORE_CLS); - - if (tokenStoreClass.equals(DBTokenStore.class.getName())) { - HMSHandler baseHandler = new HiveMetaStore.HMSHandler( - "new db based metaserver", conf, true); - rawStore = baseHandler.getMS(); - } - - saslServer.startDelegationTokenSecretManager(conf, rawStore, ServerMode.HIVESERVER2); - } - catch (MetaException|IOException e) { - throw new TTransportException("Failed to start token manager", e); - } - } - } - } - - public Map getSaslProperties() { - Map saslProps = new HashMap(); - SaslQOP saslQOP = SaslQOP.fromString(conf.getVar(ConfVars.HIVE_SERVER2_THRIFT_SASL_QOP)); - saslProps.put(Sasl.QOP, saslQOP.toString()); - saslProps.put(Sasl.SERVER_AUTH, "true"); - return saslProps; - } - - public TTransportFactory getAuthTransFactory() throws LoginException { - TTransportFactory transportFactory; - if (authTypeStr.equalsIgnoreCase(AuthTypes.KERBEROS.getAuthName())) { - try { - transportFactory = saslServer.createTransportFactory(getSaslProperties()); - } catch (TTransportException e) { - throw new LoginException(e.getMessage()); - } - } else if (authTypeStr.equalsIgnoreCase(AuthTypes.NONE.getAuthName())) { - transportFactory = PlainSaslHelper.getPlainTransportFactory(authTypeStr); - } else if (authTypeStr.equalsIgnoreCase(AuthTypes.LDAP.getAuthName())) { - transportFactory = PlainSaslHelper.getPlainTransportFactory(authTypeStr); - } else if (authTypeStr.equalsIgnoreCase(AuthTypes.PAM.getAuthName())) { - transportFactory = PlainSaslHelper.getPlainTransportFactory(authTypeStr); - } else if (authTypeStr.equalsIgnoreCase(AuthTypes.NOSASL.getAuthName())) { - transportFactory = new TTransportFactory(); - } else if (authTypeStr.equalsIgnoreCase(AuthTypes.CUSTOM.getAuthName())) { - transportFactory = PlainSaslHelper.getPlainTransportFactory(authTypeStr); - } else { - throw new LoginException("Unsupported authentication type " + authTypeStr); - } - return transportFactory; - } - - /** - * Returns the thrift processor factory for HiveServer2 running in binary mode - * @param service - * @return - * @throws LoginException - */ - public TProcessorFactory getAuthProcFactory(ThriftCLIService service) throws LoginException { - if (authTypeStr.equalsIgnoreCase(AuthTypes.KERBEROS.getAuthName())) { - return KerberosSaslHelper.getKerberosProcessorFactory(saslServer, service); - } else { - return PlainSaslHelper.getPlainProcessorFactory(service); - } - } - - public String getRemoteUser() { - return saslServer == null ? null : saslServer.getRemoteUser(); - } - - public String getIpAddress() { - if (saslServer == null || saslServer.getRemoteAddress() == null) { - return null; - } else { - return saslServer.getRemoteAddress().getHostAddress(); - } - } - - // Perform kerberos login using the hadoop shim API if the configuration is available - public static void loginFromKeytab(HiveConf hiveConf) throws IOException { - String principal = hiveConf.getVar(ConfVars.HIVE_SERVER2_KERBEROS_PRINCIPAL); - String keyTabFile = hiveConf.getVar(ConfVars.HIVE_SERVER2_KERBEROS_KEYTAB); - if (principal.isEmpty() || keyTabFile.isEmpty()) { - throw new IOException("HiveServer2 Kerberos principal or keytab is not correctly configured"); - } else { - UserGroupInformation.loginUserFromKeytab(SecurityUtil.getServerPrincipal(principal, "0.0.0.0"), keyTabFile); - } - } - - // Perform SPNEGO login using the hadoop shim API if the configuration is available - public static UserGroupInformation loginFromSpnegoKeytabAndReturnUGI(HiveConf hiveConf) - throws IOException { - String principal = hiveConf.getVar(ConfVars.HIVE_SERVER2_SPNEGO_PRINCIPAL); - String keyTabFile = hiveConf.getVar(ConfVars.HIVE_SERVER2_SPNEGO_KEYTAB); - if (principal.isEmpty() || keyTabFile.isEmpty()) { - throw new IOException("HiveServer2 SPNEGO principal or keytab is not correctly configured"); - } else { - return UserGroupInformation.loginUserFromKeytabAndReturnUGI(SecurityUtil.getServerPrincipal(principal, "0.0.0.0"), keyTabFile); - } - } - - public static TTransport getSocketTransport(String host, int port, int loginTimeout) { - return new TSocket(host, port, loginTimeout); - } - - public static TTransport getSSLSocket(String host, int port, int loginTimeout) - throws TTransportException { - return TSSLTransportFactory.getClientSocket(host, port, loginTimeout); - } - - public static TTransport getSSLSocket(String host, int port, int loginTimeout, - String trustStorePath, String trustStorePassWord) throws TTransportException { - TSSLTransportFactory.TSSLTransportParameters params = - new TSSLTransportFactory.TSSLTransportParameters(); - params.setTrustStore(trustStorePath, trustStorePassWord); - params.requireClientAuth(true); - return TSSLTransportFactory.getClientSocket(host, port, loginTimeout, params); - } - - public static TServerSocket getServerSocket(String hiveHost, int portNum) - throws TTransportException { - InetSocketAddress serverAddress; - if (hiveHost == null || hiveHost.isEmpty()) { - // Wildcard bind - serverAddress = new InetSocketAddress(portNum); - } else { - serverAddress = new InetSocketAddress(hiveHost, portNum); - } - return new TServerSocket(serverAddress); - } - - public static TServerSocket getServerSSLSocket(String hiveHost, int portNum, String keyStorePath, - String keyStorePassWord, List sslVersionBlacklist) throws TTransportException, - UnknownHostException { - TSSLTransportFactory.TSSLTransportParameters params = - new TSSLTransportFactory.TSSLTransportParameters(); - params.setKeyStore(keyStorePath, keyStorePassWord); - InetSocketAddress serverAddress; - if (hiveHost == null || hiveHost.isEmpty()) { - // Wildcard bind - serverAddress = new InetSocketAddress(portNum); - } else { - serverAddress = new InetSocketAddress(hiveHost, portNum); - } - TServerSocket thriftServerSocket = - TSSLTransportFactory.getServerSocket(portNum, 0, serverAddress.getAddress(), params); - if (thriftServerSocket.getServerSocket() instanceof SSLServerSocket) { - List sslVersionBlacklistLocal = new ArrayList(); - for (String sslVersion : sslVersionBlacklist) { - sslVersionBlacklistLocal.add(sslVersion.trim().toLowerCase(Locale.ROOT)); - } - SSLServerSocket sslServerSocket = (SSLServerSocket) thriftServerSocket.getServerSocket(); - List enabledProtocols = new ArrayList(); - for (String protocol : sslServerSocket.getEnabledProtocols()) { - if (sslVersionBlacklistLocal.contains(protocol.toLowerCase(Locale.ROOT))) { - LOG.debug("Disabling SSL Protocol: " + protocol); - } else { - enabledProtocols.add(protocol); - } - } - sslServerSocket.setEnabledProtocols(enabledProtocols.toArray(new String[0])); - LOG.info("SSL Server Socket Enabled Protocols: " - + Arrays.toString(sslServerSocket.getEnabledProtocols())); - } - return thriftServerSocket; - } - - // retrieve delegation token for the given user - public String getDelegationToken(String owner, String renewer) throws HiveSQLException { - if (saslServer == null) { - throw new HiveSQLException( - "Delegation token only supported over kerberos authentication", "08S01"); - } - - try { - String tokenStr = saslServer.getDelegationTokenWithService(owner, renewer, HS2_CLIENT_TOKEN); - if (tokenStr == null || tokenStr.isEmpty()) { - throw new HiveSQLException( - "Received empty retrieving delegation token for user " + owner, "08S01"); - } - return tokenStr; - } catch (IOException e) { - throw new HiveSQLException( - "Error retrieving delegation token for user " + owner, "08S01", e); - } catch (InterruptedException e) { - throw new HiveSQLException("delegation token retrieval interrupted", "08S01", e); - } - } - - // cancel given delegation token - public void cancelDelegationToken(String delegationToken) throws HiveSQLException { - if (saslServer == null) { - throw new HiveSQLException( - "Delegation token only supported over kerberos authentication", "08S01"); - } - try { - saslServer.cancelDelegationToken(delegationToken); - } catch (IOException e) { - throw new HiveSQLException( - "Error canceling delegation token " + delegationToken, "08S01", e); - } - } - - public void renewDelegationToken(String delegationToken) throws HiveSQLException { - if (saslServer == null) { - throw new HiveSQLException( - "Delegation token only supported over kerberos authentication", "08S01"); - } - try { - saslServer.renewDelegationToken(delegationToken); - } catch (IOException e) { - throw new HiveSQLException( - "Error renewing delegation token " + delegationToken, "08S01", e); - } - } - - public String getUserFromToken(String delegationToken) throws HiveSQLException { - if (saslServer == null) { - throw new HiveSQLException( - "Delegation token only supported over kerberos authentication", "08S01"); - } - try { - return saslServer.getUserFromToken(delegationToken); - } catch (IOException e) { - throw new HiveSQLException( - "Error extracting user from delegation token " + delegationToken, "08S01", e); - } - } - - public static void verifyProxyAccess(String realUser, String proxyUser, String ipAddress, - HiveConf hiveConf) throws HiveSQLException { - try { - UserGroupInformation sessionUgi; - if (UserGroupInformation.isSecurityEnabled()) { - KerberosNameShim kerbName = ShimLoader.getHadoopShims().getKerberosNameShim(realUser); - sessionUgi = UserGroupInformation.createProxyUser( - kerbName.getServiceName(), UserGroupInformation.getLoginUser()); - } else { - sessionUgi = UserGroupInformation.createRemoteUser(realUser); - } - if (!proxyUser.equalsIgnoreCase(realUser)) { - ProxyUsers.refreshSuperUserGroupsConfiguration(hiveConf); - ProxyUsers.authorize(UserGroupInformation.createProxyUser(proxyUser, sessionUgi), - ipAddress, hiveConf); - } - } catch (IOException e) { - throw new HiveSQLException( - "Failed to validate proxy privilege of " + realUser + " for " + proxyUser, "08S01", e); - } - } - - public static boolean needUgiLogin(UserGroupInformation ugi, String principal, String keytab) { - return null == ugi || !ugi.hasKerberosCredentials() || !ugi.getUserName().equals(principal) || - !Objects.equals(keytab, getKeytabFromUgi()); - } - - private static String getKeytabFromUgi() { - synchronized (UserGroupInformation.class) { - try { - if (keytabFile != null) { - return (String) keytabFile.get(null); - } else if (getKeytab != null) { - return (String) getKeytab.invoke(UserGroupInformation.getCurrentUser()); - } else { - return null; - } - } catch (Exception e) { - LOG.debug("Fail to get keytabFile path via reflection", e); - return null; - } - } - } -} diff --git a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/auth/HttpAuthUtils.java b/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/auth/HttpAuthUtils.java deleted file mode 100644 index f7375ee707830..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/auth/HttpAuthUtils.java +++ /dev/null @@ -1,189 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hive.service.auth; - -import java.security.AccessControlContext; -import java.security.AccessController; -import java.security.PrivilegedExceptionAction; -import java.util.Arrays; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Map; -import java.util.Random; -import java.util.Set; -import java.util.StringTokenizer; - -import javax.security.auth.Subject; - -import org.apache.commons.codec.binary.Base64; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hive.shims.ShimLoader; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.http.protocol.BasicHttpContext; -import org.apache.http.protocol.HttpContext; -import org.ietf.jgss.GSSContext; -import org.ietf.jgss.GSSManager; -import org.ietf.jgss.GSSName; -import org.ietf.jgss.Oid; - -/** - * Utility functions for HTTP mode authentication. - */ -public final class HttpAuthUtils { - public static final String WWW_AUTHENTICATE = "WWW-Authenticate"; - public static final String AUTHORIZATION = "Authorization"; - public static final String BASIC = "Basic"; - public static final String NEGOTIATE = "Negotiate"; - private static final Log LOG = LogFactory.getLog(HttpAuthUtils.class); - private static final String COOKIE_ATTR_SEPARATOR = "&"; - private static final String COOKIE_CLIENT_USER_NAME = "cu"; - private static final String COOKIE_CLIENT_RAND_NUMBER = "rn"; - private static final String COOKIE_KEY_VALUE_SEPARATOR = "="; - private static final Set COOKIE_ATTRIBUTES = - new HashSet(Arrays.asList(COOKIE_CLIENT_USER_NAME, COOKIE_CLIENT_RAND_NUMBER)); - - /** - * @return Stringified Base64 encoded kerberosAuthHeader on success - * @throws Exception - */ - public static String getKerberosServiceTicket(String principal, String host, - String serverHttpUrl, boolean assumeSubject) throws Exception { - String serverPrincipal = - ShimLoader.getHadoopThriftAuthBridge().getServerPrincipal(principal, host); - if (assumeSubject) { - // With this option, we're assuming that the external application, - // using the JDBC driver has done a JAAS kerberos login already - AccessControlContext context = AccessController.getContext(); - Subject subject = Subject.getSubject(context); - if (subject == null) { - throw new Exception("The Subject is not set"); - } - return Subject.doAs(subject, new HttpKerberosClientAction(serverPrincipal, serverHttpUrl)); - } else { - // JAAS login from ticket cache to setup the client UserGroupInformation - UserGroupInformation clientUGI = - ShimLoader.getHadoopThriftAuthBridge().getCurrentUGIWithConf("kerberos"); - return clientUGI.doAs(new HttpKerberosClientAction(serverPrincipal, serverHttpUrl)); - } - } - - /** - * Creates and returns a HS2 cookie token. - * @param clientUserName Client User name. - * @return An unsigned cookie token generated from input parameters. - * The final cookie generated is of the following format : - * {@code cu=&rn=&s=} - */ - public static String createCookieToken(String clientUserName) { - StringBuffer sb = new StringBuffer(); - sb.append(COOKIE_CLIENT_USER_NAME).append(COOKIE_KEY_VALUE_SEPARATOR).append(clientUserName) - .append(COOKIE_ATTR_SEPARATOR); - sb.append(COOKIE_CLIENT_RAND_NUMBER).append(COOKIE_KEY_VALUE_SEPARATOR) - .append((new Random(System.currentTimeMillis())).nextLong()); - return sb.toString(); - } - - /** - * Parses a cookie token to retrieve client user name. - * @param tokenStr Token String. - * @return A valid user name if input is of valid format, else returns null. - */ - public static String getUserNameFromCookieToken(String tokenStr) { - Map map = splitCookieToken(tokenStr); - - if (!map.keySet().equals(COOKIE_ATTRIBUTES)) { - LOG.error("Invalid token with missing attributes " + tokenStr); - return null; - } - return map.get(COOKIE_CLIENT_USER_NAME); - } - - /** - * Splits the cookie token into attributes pairs. - * @param str input token. - * @return a map with the attribute pairs of the token if the input is valid. - * Else, returns null. - */ - private static Map splitCookieToken(String tokenStr) { - Map map = new HashMap(); - StringTokenizer st = new StringTokenizer(tokenStr, COOKIE_ATTR_SEPARATOR); - - while (st.hasMoreTokens()) { - String part = st.nextToken(); - int separator = part.indexOf(COOKIE_KEY_VALUE_SEPARATOR); - if (separator == -1) { - LOG.error("Invalid token string " + tokenStr); - return null; - } - String key = part.substring(0, separator); - String value = part.substring(separator + 1); - map.put(key, value); - } - return map; - } - - - private HttpAuthUtils() { - throw new UnsupportedOperationException("Can't initialize class"); - } - - /** - * We'll create an instance of this class within a doAs block so that the client's TGT credentials - * can be read from the Subject - */ - public static class HttpKerberosClientAction implements PrivilegedExceptionAction { - public static final String HTTP_RESPONSE = "HTTP_RESPONSE"; - public static final String SERVER_HTTP_URL = "SERVER_HTTP_URL"; - private final String serverPrincipal; - private final String serverHttpUrl; - private final Base64 base64codec; - private final HttpContext httpContext; - - public HttpKerberosClientAction(String serverPrincipal, String serverHttpUrl) { - this.serverPrincipal = serverPrincipal; - this.serverHttpUrl = serverHttpUrl; - base64codec = new Base64(0); - httpContext = new BasicHttpContext(); - httpContext.setAttribute(SERVER_HTTP_URL, serverHttpUrl); - } - - @Override - public String run() throws Exception { - // This Oid for Kerberos GSS-API mechanism. - Oid mechOid = new Oid("1.2.840.113554.1.2.2"); - // Oid for kerberos principal name - Oid krb5PrincipalOid = new Oid("1.2.840.113554.1.2.2.1"); - GSSManager manager = GSSManager.getInstance(); - // GSS name for server - GSSName serverName = manager.createName(serverPrincipal, krb5PrincipalOid); - // Create a GSSContext for authentication with the service. - // We're passing client credentials as null since we want them to be read from the Subject. - GSSContext gssContext = - manager.createContext(serverName, mechOid, null, GSSContext.DEFAULT_LIFETIME); - gssContext.requestMutualAuth(false); - // Establish context - byte[] inToken = new byte[0]; - byte[] outToken = gssContext.initSecContext(inToken, 0, inToken.length); - gssContext.dispose(); - // Base64 encoded and stringified token for server - return new String(base64codec.encode(outToken)); - } - } -} diff --git a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/auth/KerberosSaslHelper.java b/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/auth/KerberosSaslHelper.java deleted file mode 100644 index 52eb752f1e026..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/auth/KerberosSaslHelper.java +++ /dev/null @@ -1,111 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hive.service.auth; - -import java.io.IOException; -import java.util.Map; -import javax.security.sasl.SaslException; - -import org.apache.hadoop.hive.shims.ShimLoader; -import org.apache.hadoop.hive.thrift.HadoopThriftAuthBridge; -import org.apache.hadoop.hive.thrift.HadoopThriftAuthBridge.Server; -import org.apache.hive.service.cli.thrift.TCLIService; -import org.apache.hive.service.cli.thrift.TCLIService.Iface; -import org.apache.hive.service.cli.thrift.ThriftCLIService; -import org.apache.thrift.TProcessor; -import org.apache.thrift.TProcessorFactory; -import org.apache.thrift.transport.TSaslClientTransport; -import org.apache.thrift.transport.TTransport; - -public final class KerberosSaslHelper { - - public static TProcessorFactory getKerberosProcessorFactory(Server saslServer, - ThriftCLIService service) { - return new CLIServiceProcessorFactory(saslServer, service); - } - - public static TTransport getKerberosTransport(String principal, String host, - TTransport underlyingTransport, Map saslProps, boolean assumeSubject) - throws SaslException { - try { - String[] names = principal.split("[/@]"); - if (names.length != 3) { - throw new IllegalArgumentException("Kerberos principal should have 3 parts: " + principal); - } - - if (assumeSubject) { - return createSubjectAssumedTransport(principal, underlyingTransport, saslProps); - } else { - HadoopThriftAuthBridge.Client authBridge = - ShimLoader.getHadoopThriftAuthBridge().createClientWithConf("kerberos"); - return authBridge.createClientTransport(principal, host, "KERBEROS", null, - underlyingTransport, saslProps); - } - } catch (IOException e) { - throw new SaslException("Failed to open client transport", e); - } - } - - public static TTransport createSubjectAssumedTransport(String principal, - TTransport underlyingTransport, Map saslProps) throws IOException { - String[] names = principal.split("[/@]"); - try { - TTransport saslTransport = - new TSaslClientTransport("GSSAPI", null, names[0], names[1], saslProps, null, - underlyingTransport); - return new TSubjectAssumingTransport(saslTransport); - } catch (SaslException se) { - throw new IOException("Could not instantiate SASL transport", se); - } - } - - public static TTransport getTokenTransport(String tokenStr, String host, - TTransport underlyingTransport, Map saslProps) throws SaslException { - HadoopThriftAuthBridge.Client authBridge = - ShimLoader.getHadoopThriftAuthBridge().createClientWithConf("kerberos"); - - try { - return authBridge.createClientTransport(null, host, "DIGEST", tokenStr, underlyingTransport, - saslProps); - } catch (IOException e) { - throw new SaslException("Failed to open client transport", e); - } - } - - private KerberosSaslHelper() { - throw new UnsupportedOperationException("Can't initialize class"); - } - - private static class CLIServiceProcessorFactory extends TProcessorFactory { - - private final ThriftCLIService service; - private final Server saslServer; - - CLIServiceProcessorFactory(Server saslServer, ThriftCLIService service) { - super(null); - this.service = service; - this.saslServer = saslServer; - } - - @Override - public TProcessor getProcessor(TTransport trans) { - TProcessor sqlProcessor = new TCLIService.Processor(service); - return saslServer.wrapNonAssumingProcessor(sqlProcessor); - } - } -} diff --git a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/auth/PlainSaslHelper.java b/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/auth/PlainSaslHelper.java deleted file mode 100644 index afc144199f1e8..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/auth/PlainSaslHelper.java +++ /dev/null @@ -1,154 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hive.service.auth; - -import java.io.IOException; -import java.security.Security; -import java.util.HashMap; -import javax.security.auth.callback.Callback; -import javax.security.auth.callback.CallbackHandler; -import javax.security.auth.callback.NameCallback; -import javax.security.auth.callback.PasswordCallback; -import javax.security.auth.callback.UnsupportedCallbackException; -import javax.security.auth.login.LoginException; -import javax.security.sasl.AuthenticationException; -import javax.security.sasl.AuthorizeCallback; -import javax.security.sasl.SaslException; - -import org.apache.hive.service.auth.AuthenticationProviderFactory.AuthMethods; -import org.apache.hive.service.auth.PlainSaslServer.SaslPlainProvider; -import org.apache.hive.service.cli.thrift.TCLIService.Iface; -import org.apache.hive.service.cli.thrift.ThriftCLIService; -import org.apache.thrift.TProcessor; -import org.apache.thrift.TProcessorFactory; -import org.apache.thrift.transport.TSaslClientTransport; -import org.apache.thrift.transport.TSaslServerTransport; -import org.apache.thrift.transport.TTransport; -import org.apache.thrift.transport.TTransportFactory; - -public final class PlainSaslHelper { - - public static TProcessorFactory getPlainProcessorFactory(ThriftCLIService service) { - return new SQLPlainProcessorFactory(service); - } - - // Register Plain SASL server provider - static { - Security.addProvider(new SaslPlainProvider()); - } - - public static TTransportFactory getPlainTransportFactory(String authTypeStr) - throws LoginException { - TSaslServerTransport.Factory saslFactory = new TSaslServerTransport.Factory(); - try { - saslFactory.addServerDefinition("PLAIN", authTypeStr, null, new HashMap(), - new PlainServerCallbackHandler(authTypeStr)); - } catch (AuthenticationException e) { - throw new LoginException("Error setting callback handler" + e); - } - return saslFactory; - } - - public static TTransport getPlainTransport(String username, String password, - TTransport underlyingTransport) throws SaslException { - return new TSaslClientTransport("PLAIN", null, null, null, new HashMap(), - new PlainCallbackHandler(username, password), underlyingTransport); - } - - private PlainSaslHelper() { - throw new UnsupportedOperationException("Can't initialize class"); - } - - private static final class PlainServerCallbackHandler implements CallbackHandler { - - private final AuthMethods authMethod; - - PlainServerCallbackHandler(String authMethodStr) throws AuthenticationException { - authMethod = AuthMethods.getValidAuthMethod(authMethodStr); - } - - @Override - public void handle(Callback[] callbacks) throws IOException, UnsupportedCallbackException { - String username = null; - String password = null; - AuthorizeCallback ac = null; - - for (Callback callback : callbacks) { - if (callback instanceof NameCallback) { - NameCallback nc = (NameCallback) callback; - username = nc.getName(); - } else if (callback instanceof PasswordCallback) { - PasswordCallback pc = (PasswordCallback) callback; - password = new String(pc.getPassword()); - } else if (callback instanceof AuthorizeCallback) { - ac = (AuthorizeCallback) callback; - } else { - throw new UnsupportedCallbackException(callback); - } - } - PasswdAuthenticationProvider provider = - AuthenticationProviderFactory.getAuthenticationProvider(authMethod); - provider.Authenticate(username, password); - if (ac != null) { - ac.setAuthorized(true); - } - } - } - - public static class PlainCallbackHandler implements CallbackHandler { - - private final String username; - private final String password; - - public PlainCallbackHandler(String username, String password) { - this.username = username; - this.password = password; - } - - @Override - public void handle(Callback[] callbacks) throws IOException, UnsupportedCallbackException { - for (Callback callback : callbacks) { - if (callback instanceof NameCallback) { - NameCallback nameCallback = (NameCallback) callback; - nameCallback.setName(username); - } else if (callback instanceof PasswordCallback) { - PasswordCallback passCallback = (PasswordCallback) callback; - passCallback.setPassword(password.toCharArray()); - } else { - throw new UnsupportedCallbackException(callback); - } - } - } - } - - private static final class SQLPlainProcessorFactory extends TProcessorFactory { - - private final ThriftCLIService service; - - SQLPlainProcessorFactory(ThriftCLIService service) { - super(null); - this.service = service; - } - - @Override - public TProcessor getProcessor(TTransport trans) { - return new TSetIpAddressProcessor(service); - } - } - -} diff --git a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/auth/TSetIpAddressProcessor.java b/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/auth/TSetIpAddressProcessor.java deleted file mode 100644 index 9a61ad49942c8..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/auth/TSetIpAddressProcessor.java +++ /dev/null @@ -1,114 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hive.service.auth; - -import org.apache.hive.service.cli.thrift.TCLIService; -import org.apache.hive.service.cli.thrift.TCLIService.Iface; -import org.apache.thrift.TException; -import org.apache.thrift.protocol.TProtocol; -import org.apache.thrift.transport.TSaslClientTransport; -import org.apache.thrift.transport.TSaslServerTransport; -import org.apache.thrift.transport.TSocket; -import org.apache.thrift.transport.TTransport; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * This class is responsible for setting the ipAddress for operations executed via HiveServer2. - * - * - IP address is only set for operations that calls listeners with hookContext - * - IP address is only set if the underlying transport mechanism is socket - * - * @see org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext - */ -public class TSetIpAddressProcessor extends TCLIService.Processor { - - private static final Logger LOGGER = LoggerFactory.getLogger(TSetIpAddressProcessor.class.getName()); - - public TSetIpAddressProcessor(Iface iface) { - super(iface); - } - - @Override - public boolean process(final TProtocol in, final TProtocol out) throws TException { - setIpAddress(in); - setUserName(in); - try { - return super.process(in, out); - } finally { - THREAD_LOCAL_USER_NAME.remove(); - THREAD_LOCAL_IP_ADDRESS.remove(); - } - } - - private void setUserName(final TProtocol in) { - TTransport transport = in.getTransport(); - if (transport instanceof TSaslServerTransport) { - String userName = ((TSaslServerTransport) transport).getSaslServer().getAuthorizationID(); - THREAD_LOCAL_USER_NAME.set(userName); - } - } - - protected void setIpAddress(final TProtocol in) { - TTransport transport = in.getTransport(); - TSocket tSocket = getUnderlyingSocketFromTransport(transport); - if (tSocket == null) { - LOGGER.warn("Unknown Transport, cannot determine ipAddress"); - } else { - THREAD_LOCAL_IP_ADDRESS.set(tSocket.getSocket().getInetAddress().getHostAddress()); - } - } - - private TSocket getUnderlyingSocketFromTransport(TTransport transport) { - while (transport != null) { - if (transport instanceof TSaslServerTransport) { - transport = ((TSaslServerTransport) transport).getUnderlyingTransport(); - } - if (transport instanceof TSaslClientTransport) { - transport = ((TSaslClientTransport) transport).getUnderlyingTransport(); - } - if (transport instanceof TSocket) { - return (TSocket) transport; - } - } - return null; - } - - private static final ThreadLocal THREAD_LOCAL_IP_ADDRESS = new ThreadLocal() { - @Override - protected synchronized String initialValue() { - return null; - } - }; - - private static final ThreadLocal THREAD_LOCAL_USER_NAME = new ThreadLocal() { - @Override - protected synchronized String initialValue() { - return null; - } - }; - - public static String getUserIpAddress() { - return THREAD_LOCAL_IP_ADDRESS.get(); - } - - public static String getUserName() { - return THREAD_LOCAL_USER_NAME.get(); - } -} diff --git a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/CLIService.java b/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/CLIService.java deleted file mode 100644 index 791ddcbd2c5b6..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/CLIService.java +++ /dev/null @@ -1,507 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hive.service.cli; - -import java.io.IOException; -import java.util.List; -import java.util.Map; -import java.util.concurrent.CancellationException; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; - -import javax.security.auth.login.LoginException; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.conf.HiveConf.ConfVars; -import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; -import org.apache.hadoop.hive.metastore.IMetaStoreClient; -import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.ql.exec.FunctionRegistry; -import org.apache.hadoop.hive.ql.metadata.Hive; -import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.session.SessionState; -import org.apache.hadoop.hive.shims.Utils; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hive.service.CompositeService; -import org.apache.hive.service.ServiceException; -import org.apache.hive.service.auth.HiveAuthFactory; -import org.apache.hive.service.cli.operation.Operation; -import org.apache.hive.service.cli.session.SessionManager; -import org.apache.hive.service.cli.thrift.TProtocolVersion; -import org.apache.hive.service.server.HiveServer2; - -/** - * CLIService. - * - */ -public class CLIService extends CompositeService implements ICLIService { - - public static final TProtocolVersion SERVER_VERSION; - - static { - TProtocolVersion[] protocols = TProtocolVersion.values(); - SERVER_VERSION = protocols[protocols.length - 1]; - } - - private final Log LOG = LogFactory.getLog(CLIService.class.getName()); - - private HiveConf hiveConf; - private SessionManager sessionManager; - private UserGroupInformation serviceUGI; - private UserGroupInformation httpUGI; - // The HiveServer2 instance running this service - private final HiveServer2 hiveServer2; - - public CLIService(HiveServer2 hiveServer2) { - super(CLIService.class.getSimpleName()); - this.hiveServer2 = hiveServer2; - } - - @Override - public synchronized void init(HiveConf hiveConf) { - this.hiveConf = hiveConf; - sessionManager = new SessionManager(hiveServer2); - addService(sessionManager); - // If the hadoop cluster is secure, do a kerberos login for the service from the keytab - if (UserGroupInformation.isSecurityEnabled()) { - try { - HiveAuthFactory.loginFromKeytab(hiveConf); - this.serviceUGI = Utils.getUGI(); - } catch (IOException e) { - throw new ServiceException("Unable to login to kerberos with given principal/keytab", e); - } catch (LoginException e) { - throw new ServiceException("Unable to login to kerberos with given principal/keytab", e); - } - - // Also try creating a UGI object for the SPNego principal - String principal = hiveConf.getVar(ConfVars.HIVE_SERVER2_SPNEGO_PRINCIPAL); - String keyTabFile = hiveConf.getVar(ConfVars.HIVE_SERVER2_SPNEGO_KEYTAB); - if (principal.isEmpty() || keyTabFile.isEmpty()) { - LOG.info("SPNego httpUGI not created, spNegoPrincipal: " + principal + - ", ketabFile: " + keyTabFile); - } else { - try { - this.httpUGI = HiveAuthFactory.loginFromSpnegoKeytabAndReturnUGI(hiveConf); - LOG.info("SPNego httpUGI successfully created."); - } catch (IOException e) { - LOG.warn("SPNego httpUGI creation failed: ", e); - } - } - } - // creates connection to HMS and thus *must* occur after kerberos login above - try { - applyAuthorizationConfigPolicy(hiveConf); - } catch (Exception e) { - throw new RuntimeException("Error applying authorization policy on hive configuration: " - + e.getMessage(), e); - } - setupBlockedUdfs(); - super.init(hiveConf); - } - - private void applyAuthorizationConfigPolicy(HiveConf newHiveConf) throws HiveException, - MetaException { - // authorization setup using SessionState should be revisited eventually, as - // authorization and authentication are not session specific settings - SessionState ss = new SessionState(newHiveConf); - ss.setIsHiveServerQuery(true); - SessionState.start(ss); - ss.applyAuthorizationPolicy(); - } - - private void setupBlockedUdfs() { - FunctionRegistry.setupPermissionsForBuiltinUDFs( - hiveConf.getVar(ConfVars.HIVE_SERVER2_BUILTIN_UDF_WHITELIST), - hiveConf.getVar(ConfVars.HIVE_SERVER2_BUILTIN_UDF_BLACKLIST)); - } - - public UserGroupInformation getServiceUGI() { - return this.serviceUGI; - } - - public UserGroupInformation getHttpUGI() { - return this.httpUGI; - } - - @Override - public synchronized void start() { - super.start(); - // Initialize and test a connection to the metastore - IMetaStoreClient metastoreClient = null; - try { - metastoreClient = new HiveMetaStoreClient(hiveConf); - metastoreClient.getDatabases("default"); - } catch (Exception e) { - throw new ServiceException("Unable to connect to MetaStore!", e); - } - finally { - if (metastoreClient != null) { - metastoreClient.close(); - } - } - } - - @Override - public synchronized void stop() { - super.stop(); - } - - /** - * @deprecated Use {@link #openSession(TProtocolVersion, String, String, String, Map)} - */ - @Deprecated - public SessionHandle openSession(TProtocolVersion protocol, String username, String password, - Map configuration) throws HiveSQLException { - SessionHandle sessionHandle = sessionManager.openSession(protocol, username, password, null, configuration, false, null); - LOG.debug(sessionHandle + ": openSession()"); - return sessionHandle; - } - - /** - * @deprecated Use {@link #openSessionWithImpersonation(TProtocolVersion, String, String, String, Map, String)} - */ - @Deprecated - public SessionHandle openSessionWithImpersonation(TProtocolVersion protocol, String username, - String password, Map configuration, String delegationToken) - throws HiveSQLException { - SessionHandle sessionHandle = sessionManager.openSession(protocol, username, password, null, configuration, - true, delegationToken); - LOG.debug(sessionHandle + ": openSessionWithImpersonation()"); - return sessionHandle; - } - - public SessionHandle openSession(TProtocolVersion protocol, String username, String password, String ipAddress, - Map configuration) throws HiveSQLException { - SessionHandle sessionHandle = sessionManager.openSession(protocol, username, password, ipAddress, configuration, false, null); - LOG.debug(sessionHandle + ": openSession()"); - return sessionHandle; - } - - public SessionHandle openSessionWithImpersonation(TProtocolVersion protocol, String username, - String password, String ipAddress, Map configuration, String delegationToken) - throws HiveSQLException { - SessionHandle sessionHandle = sessionManager.openSession(protocol, username, password, ipAddress, configuration, - true, delegationToken); - LOG.debug(sessionHandle + ": openSession()"); - return sessionHandle; - } - - /* (non-Javadoc) - * @see org.apache.hive.service.cli.ICLIService#openSession(java.lang.String, java.lang.String, java.util.Map) - */ - @Override - public SessionHandle openSession(String username, String password, Map configuration) - throws HiveSQLException { - SessionHandle sessionHandle = sessionManager.openSession(SERVER_VERSION, username, password, null, configuration, false, null); - LOG.debug(sessionHandle + ": openSession()"); - return sessionHandle; - } - - /* (non-Javadoc) - * @see org.apache.hive.service.cli.ICLIService#openSession(java.lang.String, java.lang.String, java.util.Map) - */ - @Override - public SessionHandle openSessionWithImpersonation(String username, String password, Map configuration, - String delegationToken) throws HiveSQLException { - SessionHandle sessionHandle = sessionManager.openSession(SERVER_VERSION, username, password, null, configuration, - true, delegationToken); - LOG.debug(sessionHandle + ": openSession()"); - return sessionHandle; - } - - /* (non-Javadoc) - * @see org.apache.hive.service.cli.ICLIService#closeSession(org.apache.hive.service.cli.SessionHandle) - */ - @Override - public void closeSession(SessionHandle sessionHandle) - throws HiveSQLException { - sessionManager.closeSession(sessionHandle); - LOG.debug(sessionHandle + ": closeSession()"); - } - - /* (non-Javadoc) - * @see org.apache.hive.service.cli.ICLIService#getInfo(org.apache.hive.service.cli.SessionHandle, java.util.List) - */ - @Override - public GetInfoValue getInfo(SessionHandle sessionHandle, GetInfoType getInfoType) - throws HiveSQLException { - GetInfoValue infoValue = sessionManager.getSession(sessionHandle) - .getInfo(getInfoType); - LOG.debug(sessionHandle + ": getInfo()"); - return infoValue; - } - - /* (non-Javadoc) - * @see org.apache.hive.service.cli.ICLIService#executeStatement(org.apache.hive.service.cli.SessionHandle, - * java.lang.String, java.util.Map) - */ - @Override - public OperationHandle executeStatement(SessionHandle sessionHandle, String statement, - Map confOverlay) - throws HiveSQLException { - OperationHandle opHandle = sessionManager.getSession(sessionHandle) - .executeStatement(statement, confOverlay); - LOG.debug(sessionHandle + ": executeStatement()"); - return opHandle; - } - - /* (non-Javadoc) - * @see org.apache.hive.service.cli.ICLIService#executeStatementAsync(org.apache.hive.service.cli.SessionHandle, - * java.lang.String, java.util.Map) - */ - @Override - public OperationHandle executeStatementAsync(SessionHandle sessionHandle, String statement, - Map confOverlay) throws HiveSQLException { - OperationHandle opHandle = sessionManager.getSession(sessionHandle) - .executeStatementAsync(statement, confOverlay); - LOG.debug(sessionHandle + ": executeStatementAsync()"); - return opHandle; - } - - - /* (non-Javadoc) - * @see org.apache.hive.service.cli.ICLIService#getTypeInfo(org.apache.hive.service.cli.SessionHandle) - */ - @Override - public OperationHandle getTypeInfo(SessionHandle sessionHandle) - throws HiveSQLException { - OperationHandle opHandle = sessionManager.getSession(sessionHandle) - .getTypeInfo(); - LOG.debug(sessionHandle + ": getTypeInfo()"); - return opHandle; - } - - /* (non-Javadoc) - * @see org.apache.hive.service.cli.ICLIService#getCatalogs(org.apache.hive.service.cli.SessionHandle) - */ - @Override - public OperationHandle getCatalogs(SessionHandle sessionHandle) - throws HiveSQLException { - OperationHandle opHandle = sessionManager.getSession(sessionHandle) - .getCatalogs(); - LOG.debug(sessionHandle + ": getCatalogs()"); - return opHandle; - } - - /* (non-Javadoc) - * @see org.apache.hive.service.cli.ICLIService#getSchemas(org.apache.hive.service.cli.SessionHandle, java.lang.String, java.lang.String) - */ - @Override - public OperationHandle getSchemas(SessionHandle sessionHandle, - String catalogName, String schemaName) - throws HiveSQLException { - OperationHandle opHandle = sessionManager.getSession(sessionHandle) - .getSchemas(catalogName, schemaName); - LOG.debug(sessionHandle + ": getSchemas()"); - return opHandle; - } - - /* (non-Javadoc) - * @see org.apache.hive.service.cli.ICLIService#getTables(org.apache.hive.service.cli.SessionHandle, java.lang.String, java.lang.String, java.lang.String, java.util.List) - */ - @Override - public OperationHandle getTables(SessionHandle sessionHandle, - String catalogName, String schemaName, String tableName, List tableTypes) - throws HiveSQLException { - OperationHandle opHandle = sessionManager.getSession(sessionHandle) - .getTables(catalogName, schemaName, tableName, tableTypes); - LOG.debug(sessionHandle + ": getTables()"); - return opHandle; - } - - /* (non-Javadoc) - * @see org.apache.hive.service.cli.ICLIService#getTableTypes(org.apache.hive.service.cli.SessionHandle) - */ - @Override - public OperationHandle getTableTypes(SessionHandle sessionHandle) - throws HiveSQLException { - OperationHandle opHandle = sessionManager.getSession(sessionHandle) - .getTableTypes(); - LOG.debug(sessionHandle + ": getTableTypes()"); - return opHandle; - } - - /* (non-Javadoc) - * @see org.apache.hive.service.cli.ICLIService#getColumns(org.apache.hive.service.cli.SessionHandle) - */ - @Override - public OperationHandle getColumns(SessionHandle sessionHandle, - String catalogName, String schemaName, String tableName, String columnName) - throws HiveSQLException { - OperationHandle opHandle = sessionManager.getSession(sessionHandle) - .getColumns(catalogName, schemaName, tableName, columnName); - LOG.debug(sessionHandle + ": getColumns()"); - return opHandle; - } - - /* (non-Javadoc) - * @see org.apache.hive.service.cli.ICLIService#getFunctions(org.apache.hive.service.cli.SessionHandle) - */ - @Override - public OperationHandle getFunctions(SessionHandle sessionHandle, - String catalogName, String schemaName, String functionName) - throws HiveSQLException { - OperationHandle opHandle = sessionManager.getSession(sessionHandle) - .getFunctions(catalogName, schemaName, functionName); - LOG.debug(sessionHandle + ": getFunctions()"); - return opHandle; - } - - /* (non-Javadoc) - * @see org.apache.hive.service.cli.ICLIService#getOperationStatus(org.apache.hive.service.cli.OperationHandle) - */ - @Override - public OperationStatus getOperationStatus(OperationHandle opHandle) - throws HiveSQLException { - Operation operation = sessionManager.getOperationManager().getOperation(opHandle); - /** - * If this is a background operation run asynchronously, - * we block for a configured duration, before we return - * (duration: HIVE_SERVER2_LONG_POLLING_TIMEOUT). - * However, if the background operation is complete, we return immediately. - */ - if (operation.shouldRunAsync()) { - HiveConf conf = operation.getParentSession().getHiveConf(); - long timeout = HiveConf.getTimeVar(conf, - HiveConf.ConfVars.HIVE_SERVER2_LONG_POLLING_TIMEOUT, TimeUnit.MILLISECONDS); - try { - operation.getBackgroundHandle().get(timeout, TimeUnit.MILLISECONDS); - } catch (TimeoutException e) { - // No Op, return to the caller since long polling timeout has expired - LOG.trace(opHandle + ": Long polling timed out"); - } catch (CancellationException e) { - // The background operation thread was cancelled - LOG.trace(opHandle + ": The background operation was cancelled", e); - } catch (ExecutionException e) { - // The background operation thread was aborted - LOG.warn(opHandle + ": The background operation was aborted", e); - } catch (InterruptedException e) { - // No op, this thread was interrupted - // In this case, the call might return sooner than long polling timeout - } - } - OperationStatus opStatus = operation.getStatus(); - LOG.debug(opHandle + ": getOperationStatus()"); - return opStatus; - } - - /* (non-Javadoc) - * @see org.apache.hive.service.cli.ICLIService#cancelOperation(org.apache.hive.service.cli.OperationHandle) - */ - @Override - public void cancelOperation(OperationHandle opHandle) - throws HiveSQLException { - sessionManager.getOperationManager().getOperation(opHandle) - .getParentSession().cancelOperation(opHandle); - LOG.debug(opHandle + ": cancelOperation()"); - } - - /* (non-Javadoc) - * @see org.apache.hive.service.cli.ICLIService#closeOperation(org.apache.hive.service.cli.OperationHandle) - */ - @Override - public void closeOperation(OperationHandle opHandle) - throws HiveSQLException { - sessionManager.getOperationManager().getOperation(opHandle) - .getParentSession().closeOperation(opHandle); - LOG.debug(opHandle + ": closeOperation"); - } - - /* (non-Javadoc) - * @see org.apache.hive.service.cli.ICLIService#getResultSetMetadata(org.apache.hive.service.cli.OperationHandle) - */ - @Override - public TableSchema getResultSetMetadata(OperationHandle opHandle) - throws HiveSQLException { - TableSchema tableSchema = sessionManager.getOperationManager() - .getOperation(opHandle).getParentSession().getResultSetMetadata(opHandle); - LOG.debug(opHandle + ": getResultSetMetadata()"); - return tableSchema; - } - - /* (non-Javadoc) - * @see org.apache.hive.service.cli.ICLIService#fetchResults(org.apache.hive.service.cli.OperationHandle) - */ - @Override - public RowSet fetchResults(OperationHandle opHandle) - throws HiveSQLException { - return fetchResults(opHandle, Operation.DEFAULT_FETCH_ORIENTATION, - Operation.DEFAULT_FETCH_MAX_ROWS, FetchType.QUERY_OUTPUT); - } - - @Override - public RowSet fetchResults(OperationHandle opHandle, FetchOrientation orientation, - long maxRows, FetchType fetchType) throws HiveSQLException { - RowSet rowSet = sessionManager.getOperationManager().getOperation(opHandle) - .getParentSession().fetchResults(opHandle, orientation, maxRows, fetchType); - LOG.debug(opHandle + ": fetchResults()"); - return rowSet; - } - - // obtain delegation token for the give user from metastore - public synchronized String getDelegationTokenFromMetaStore(String owner) - throws HiveSQLException, UnsupportedOperationException, LoginException, IOException { - if (!hiveConf.getBoolVar(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL) || - !hiveConf.getBoolVar(HiveConf.ConfVars.HIVE_SERVER2_ENABLE_DOAS)) { - throw new UnsupportedOperationException( - "delegation token is can only be obtained for a secure remote metastore"); - } - - try { - Hive.closeCurrent(); - return Hive.get(hiveConf).getDelegationToken(owner, owner); - } catch (HiveException e) { - if (e.getCause() instanceof UnsupportedOperationException) { - throw (UnsupportedOperationException)e.getCause(); - } else { - throw new HiveSQLException("Error connect metastore to setup impersonation", e); - } - } - } - - @Override - public String getDelegationToken(SessionHandle sessionHandle, HiveAuthFactory authFactory, - String owner, String renewer) throws HiveSQLException { - String delegationToken = sessionManager.getSession(sessionHandle) - .getDelegationToken(authFactory, owner, renewer); - LOG.info(sessionHandle + ": getDelegationToken()"); - return delegationToken; - } - - @Override - public void cancelDelegationToken(SessionHandle sessionHandle, HiveAuthFactory authFactory, - String tokenStr) throws HiveSQLException { - sessionManager.getSession(sessionHandle).cancelDelegationToken(authFactory, tokenStr); - LOG.info(sessionHandle + ": cancelDelegationToken()"); - } - - @Override - public void renewDelegationToken(SessionHandle sessionHandle, HiveAuthFactory authFactory, - String tokenStr) throws HiveSQLException { - sessionManager.getSession(sessionHandle).renewDelegationToken(authFactory, tokenStr); - LOG.info(sessionHandle + ": renewDelegationToken()"); - } - - public SessionManager getSessionManager() { - return sessionManager; - } -} diff --git a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/Column.java b/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/Column.java deleted file mode 100644 index 26d0f718f383a..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/Column.java +++ /dev/null @@ -1,423 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hive.service.cli; - -import java.nio.ByteBuffer; -import java.util.AbstractList; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.BitSet; -import java.util.List; - -import com.google.common.primitives.Booleans; -import com.google.common.primitives.Bytes; -import com.google.common.primitives.Doubles; -import com.google.common.primitives.Ints; -import com.google.common.primitives.Longs; -import com.google.common.primitives.Shorts; -import org.apache.hive.service.cli.thrift.TBinaryColumn; -import org.apache.hive.service.cli.thrift.TBoolColumn; -import org.apache.hive.service.cli.thrift.TByteColumn; -import org.apache.hive.service.cli.thrift.TColumn; -import org.apache.hive.service.cli.thrift.TDoubleColumn; -import org.apache.hive.service.cli.thrift.TI16Column; -import org.apache.hive.service.cli.thrift.TI32Column; -import org.apache.hive.service.cli.thrift.TI64Column; -import org.apache.hive.service.cli.thrift.TStringColumn; - -/** - * Column. - */ -public class Column extends AbstractList { - - private static final int DEFAULT_SIZE = 100; - - private final Type type; - - private BitSet nulls; - - private int size; - private boolean[] boolVars; - private byte[] byteVars; - private short[] shortVars; - private int[] intVars; - private long[] longVars; - private double[] doubleVars; - private List stringVars; - private List binaryVars; - - public Column(Type type, BitSet nulls, Object values) { - this.type = type; - this.nulls = nulls; - if (type == Type.BOOLEAN_TYPE) { - boolVars = (boolean[]) values; - size = boolVars.length; - } else if (type == Type.TINYINT_TYPE) { - byteVars = (byte[]) values; - size = byteVars.length; - } else if (type == Type.SMALLINT_TYPE) { - shortVars = (short[]) values; - size = shortVars.length; - } else if (type == Type.INT_TYPE) { - intVars = (int[]) values; - size = intVars.length; - } else if (type == Type.BIGINT_TYPE) { - longVars = (long[]) values; - size = longVars.length; - } else if (type == Type.DOUBLE_TYPE) { - doubleVars = (double[]) values; - size = doubleVars.length; - } else if (type == Type.BINARY_TYPE) { - binaryVars = (List) values; - size = binaryVars.size(); - } else if (type == Type.STRING_TYPE) { - stringVars = (List) values; - size = stringVars.size(); - } else { - throw new IllegalStateException("invalid union object"); - } - } - - public Column(Type type) { - nulls = new BitSet(); - switch (type) { - case BOOLEAN_TYPE: - boolVars = new boolean[DEFAULT_SIZE]; - break; - case TINYINT_TYPE: - byteVars = new byte[DEFAULT_SIZE]; - break; - case SMALLINT_TYPE: - shortVars = new short[DEFAULT_SIZE]; - break; - case INT_TYPE: - intVars = new int[DEFAULT_SIZE]; - break; - case BIGINT_TYPE: - longVars = new long[DEFAULT_SIZE]; - break; - case FLOAT_TYPE: - case DOUBLE_TYPE: - type = Type.DOUBLE_TYPE; - doubleVars = new double[DEFAULT_SIZE]; - break; - case BINARY_TYPE: - binaryVars = new ArrayList(); - break; - default: - type = Type.STRING_TYPE; - stringVars = new ArrayList(); - } - this.type = type; - } - - public Column(TColumn colValues) { - if (colValues.isSetBoolVal()) { - type = Type.BOOLEAN_TYPE; - nulls = toBitset(colValues.getBoolVal().getNulls()); - boolVars = Booleans.toArray(colValues.getBoolVal().getValues()); - size = boolVars.length; - } else if (colValues.isSetByteVal()) { - type = Type.TINYINT_TYPE; - nulls = toBitset(colValues.getByteVal().getNulls()); - byteVars = Bytes.toArray(colValues.getByteVal().getValues()); - size = byteVars.length; - } else if (colValues.isSetI16Val()) { - type = Type.SMALLINT_TYPE; - nulls = toBitset(colValues.getI16Val().getNulls()); - shortVars = Shorts.toArray(colValues.getI16Val().getValues()); - size = shortVars.length; - } else if (colValues.isSetI32Val()) { - type = Type.INT_TYPE; - nulls = toBitset(colValues.getI32Val().getNulls()); - intVars = Ints.toArray(colValues.getI32Val().getValues()); - size = intVars.length; - } else if (colValues.isSetI64Val()) { - type = Type.BIGINT_TYPE; - nulls = toBitset(colValues.getI64Val().getNulls()); - longVars = Longs.toArray(colValues.getI64Val().getValues()); - size = longVars.length; - } else if (colValues.isSetDoubleVal()) { - type = Type.DOUBLE_TYPE; - nulls = toBitset(colValues.getDoubleVal().getNulls()); - doubleVars = Doubles.toArray(colValues.getDoubleVal().getValues()); - size = doubleVars.length; - } else if (colValues.isSetBinaryVal()) { - type = Type.BINARY_TYPE; - nulls = toBitset(colValues.getBinaryVal().getNulls()); - binaryVars = colValues.getBinaryVal().getValues(); - size = binaryVars.size(); - } else if (colValues.isSetStringVal()) { - type = Type.STRING_TYPE; - nulls = toBitset(colValues.getStringVal().getNulls()); - stringVars = colValues.getStringVal().getValues(); - size = stringVars.size(); - } else { - throw new IllegalStateException("invalid union object"); - } - } - - public Column extractSubset(int start, int end) { - BitSet subNulls = nulls.get(start, end); - if (type == Type.BOOLEAN_TYPE) { - Column subset = new Column(type, subNulls, Arrays.copyOfRange(boolVars, start, end)); - boolVars = Arrays.copyOfRange(boolVars, end, size); - nulls = nulls.get(start, size); - size = boolVars.length; - return subset; - } - if (type == Type.TINYINT_TYPE) { - Column subset = new Column(type, subNulls, Arrays.copyOfRange(byteVars, start, end)); - byteVars = Arrays.copyOfRange(byteVars, end, size); - nulls = nulls.get(start, size); - size = byteVars.length; - return subset; - } - if (type == Type.SMALLINT_TYPE) { - Column subset = new Column(type, subNulls, Arrays.copyOfRange(shortVars, start, end)); - shortVars = Arrays.copyOfRange(shortVars, end, size); - nulls = nulls.get(start, size); - size = shortVars.length; - return subset; - } - if (type == Type.INT_TYPE) { - Column subset = new Column(type, subNulls, Arrays.copyOfRange(intVars, start, end)); - intVars = Arrays.copyOfRange(intVars, end, size); - nulls = nulls.get(start, size); - size = intVars.length; - return subset; - } - if (type == Type.BIGINT_TYPE) { - Column subset = new Column(type, subNulls, Arrays.copyOfRange(longVars, start, end)); - longVars = Arrays.copyOfRange(longVars, end, size); - nulls = nulls.get(start, size); - size = longVars.length; - return subset; - } - if (type == Type.DOUBLE_TYPE) { - Column subset = new Column(type, subNulls, Arrays.copyOfRange(doubleVars, start, end)); - doubleVars = Arrays.copyOfRange(doubleVars, end, size); - nulls = nulls.get(start, size); - size = doubleVars.length; - return subset; - } - if (type == Type.BINARY_TYPE) { - Column subset = new Column(type, subNulls, binaryVars.subList(start, end)); - binaryVars = binaryVars.subList(end, binaryVars.size()); - nulls = nulls.get(start, size); - size = binaryVars.size(); - return subset; - } - if (type == Type.STRING_TYPE) { - Column subset = new Column(type, subNulls, stringVars.subList(start, end)); - stringVars = stringVars.subList(end, stringVars.size()); - nulls = nulls.get(start, size); - size = stringVars.size(); - return subset; - } - throw new IllegalStateException("invalid union object"); - } - - private static final byte[] MASKS = new byte[] { - 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, (byte)0x80 - }; - - private static BitSet toBitset(byte[] nulls) { - BitSet bitset = new BitSet(); - int bits = nulls.length * 8; - for (int i = 0; i < bits; i++) { - bitset.set(i, (nulls[i / 8] & MASKS[i % 8]) != 0); - } - return bitset; - } - - private static byte[] toBinary(BitSet bitset) { - byte[] nulls = new byte[1 + (bitset.length() / 8)]; - for (int i = 0; i < bitset.length(); i++) { - nulls[i / 8] |= bitset.get(i) ? MASKS[i % 8] : 0; - } - return nulls; - } - - public Type getType() { - return type; - } - - @Override - public Object get(int index) { - if (nulls.get(index)) { - return null; - } - switch (type) { - case BOOLEAN_TYPE: - return boolVars[index]; - case TINYINT_TYPE: - return byteVars[index]; - case SMALLINT_TYPE: - return shortVars[index]; - case INT_TYPE: - return intVars[index]; - case BIGINT_TYPE: - return longVars[index]; - case DOUBLE_TYPE: - return doubleVars[index]; - case STRING_TYPE: - return stringVars.get(index); - case BINARY_TYPE: - return binaryVars.get(index).array(); - } - return null; - } - - @Override - public int size() { - return size; - } - - public TColumn toTColumn() { - TColumn value = new TColumn(); - ByteBuffer nullMasks = ByteBuffer.wrap(toBinary(nulls)); - switch (type) { - case BOOLEAN_TYPE: - value.setBoolVal(new TBoolColumn(Booleans.asList(Arrays.copyOfRange(boolVars, 0, size)), nullMasks)); - break; - case TINYINT_TYPE: - value.setByteVal(new TByteColumn(Bytes.asList(Arrays.copyOfRange(byteVars, 0, size)), nullMasks)); - break; - case SMALLINT_TYPE: - value.setI16Val(new TI16Column(Shorts.asList(Arrays.copyOfRange(shortVars, 0, size)), nullMasks)); - break; - case INT_TYPE: - value.setI32Val(new TI32Column(Ints.asList(Arrays.copyOfRange(intVars, 0, size)), nullMasks)); - break; - case BIGINT_TYPE: - value.setI64Val(new TI64Column(Longs.asList(Arrays.copyOfRange(longVars, 0, size)), nullMasks)); - break; - case DOUBLE_TYPE: - value.setDoubleVal(new TDoubleColumn(Doubles.asList(Arrays.copyOfRange(doubleVars, 0, size)), nullMasks)); - break; - case STRING_TYPE: - value.setStringVal(new TStringColumn(stringVars, nullMasks)); - break; - case BINARY_TYPE: - value.setBinaryVal(new TBinaryColumn(binaryVars, nullMasks)); - break; - } - return value; - } - - private static final ByteBuffer EMPTY_BINARY = ByteBuffer.allocate(0); - private static final String EMPTY_STRING = ""; - - public void addValue(Type type, Object field) { - switch (type) { - case BOOLEAN_TYPE: - nulls.set(size, field == null); - boolVars()[size] = field == null ? true : (Boolean)field; - break; - case TINYINT_TYPE: - nulls.set(size, field == null); - byteVars()[size] = field == null ? 0 : (Byte) field; - break; - case SMALLINT_TYPE: - nulls.set(size, field == null); - shortVars()[size] = field == null ? 0 : (Short)field; - break; - case INT_TYPE: - nulls.set(size, field == null); - intVars()[size] = field == null ? 0 : (Integer)field; - break; - case BIGINT_TYPE: - nulls.set(size, field == null); - longVars()[size] = field == null ? 0 : (Long)field; - break; - case FLOAT_TYPE: - nulls.set(size, field == null); - doubleVars()[size] = field == null ? 0 : Double.valueOf(field.toString()); - break; - case DOUBLE_TYPE: - nulls.set(size, field == null); - doubleVars()[size] = field == null ? 0 : (Double)field; - break; - case BINARY_TYPE: - nulls.set(binaryVars.size(), field == null); - binaryVars.add(field == null ? EMPTY_BINARY : ByteBuffer.wrap((byte[])field)); - break; - default: - nulls.set(stringVars.size(), field == null); - stringVars.add(field == null ? EMPTY_STRING : String.valueOf(field)); - break; - } - size++; - } - - private boolean[] boolVars() { - if (boolVars.length == size) { - boolean[] newVars = new boolean[size << 1]; - System.arraycopy(boolVars, 0, newVars, 0, size); - return boolVars = newVars; - } - return boolVars; - } - - private byte[] byteVars() { - if (byteVars.length == size) { - byte[] newVars = new byte[size << 1]; - System.arraycopy(byteVars, 0, newVars, 0, size); - return byteVars = newVars; - } - return byteVars; - } - - private short[] shortVars() { - if (shortVars.length == size) { - short[] newVars = new short[size << 1]; - System.arraycopy(shortVars, 0, newVars, 0, size); - return shortVars = newVars; - } - return shortVars; - } - - private int[] intVars() { - if (intVars.length == size) { - int[] newVars = new int[size << 1]; - System.arraycopy(intVars, 0, newVars, 0, size); - return intVars = newVars; - } - return intVars; - } - - private long[] longVars() { - if (longVars.length == size) { - long[] newVars = new long[size << 1]; - System.arraycopy(longVars, 0, newVars, 0, size); - return longVars = newVars; - } - return longVars; - } - - private double[] doubleVars() { - if (doubleVars.length == size) { - double[] newVars = new double[size << 1]; - System.arraycopy(doubleVars, 0, newVars, 0, size); - return doubleVars = newVars; - } - return doubleVars; - } -} diff --git a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/ColumnBasedSet.java b/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/ColumnBasedSet.java deleted file mode 100644 index 47a582e2223e4..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/ColumnBasedSet.java +++ /dev/null @@ -1,149 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hive.service.cli; - -import java.util.ArrayList; -import java.util.Iterator; -import java.util.List; - -import org.apache.hive.service.cli.thrift.TColumn; -import org.apache.hive.service.cli.thrift.TRow; -import org.apache.hive.service.cli.thrift.TRowSet; - -/** - * ColumnBasedSet. - */ -public class ColumnBasedSet implements RowSet { - - private long startOffset; - - private final Type[] types; // non-null only for writing (server-side) - private final List columns; - - public ColumnBasedSet(TableSchema schema) { - types = schema.toTypes(); - columns = new ArrayList(); - for (ColumnDescriptor colDesc : schema.getColumnDescriptors()) { - columns.add(new Column(colDesc.getType())); - } - } - - public ColumnBasedSet(TRowSet tRowSet) { - types = null; - columns = new ArrayList(); - for (TColumn tvalue : tRowSet.getColumns()) { - columns.add(new Column(tvalue)); - } - startOffset = tRowSet.getStartRowOffset(); - } - - private ColumnBasedSet(Type[] types, List columns, long startOffset) { - this.types = types; - this.columns = columns; - this.startOffset = startOffset; - } - - @Override - public ColumnBasedSet addRow(Object[] fields) { - for (int i = 0; i < fields.length; i++) { - columns.get(i).addValue(types[i], fields[i]); - } - return this; - } - - public List getColumns() { - return columns; - } - - @Override - public int numColumns() { - return columns.size(); - } - - @Override - public int numRows() { - return columns.isEmpty() ? 0 : columns.get(0).size(); - } - - @Override - public ColumnBasedSet extractSubset(int maxRows) { - int numRows = Math.min(numRows(), maxRows); - - List subset = new ArrayList(); - for (int i = 0; i < columns.size(); i++) { - subset.add(columns.get(i).extractSubset(0, numRows)); - } - ColumnBasedSet result = new ColumnBasedSet(types, subset, startOffset); - startOffset += numRows; - return result; - } - - @Override - public long getStartOffset() { - return startOffset; - } - - @Override - public void setStartOffset(long startOffset) { - this.startOffset = startOffset; - } - - public TRowSet toTRowSet() { - TRowSet tRowSet = new TRowSet(startOffset, new ArrayList()); - for (int i = 0; i < columns.size(); i++) { - tRowSet.addToColumns(columns.get(i).toTColumn()); - } - return tRowSet; - } - - @Override - public Iterator iterator() { - return new Iterator() { - - private int index; - private final Object[] convey = new Object[numColumns()]; - - @Override - public boolean hasNext() { - return index < numRows(); - } - - @Override - public Object[] next() { - for (int i = 0; i < columns.size(); i++) { - convey[i] = columns.get(i).get(index); - } - index++; - return convey; - } - - @Override - public void remove() { - throw new UnsupportedOperationException("remove"); - } - }; - } - - public Object[] fill(int index, Object[] convey) { - for (int i = 0; i < columns.size(); i++) { - convey[i] = columns.get(i).get(index); - } - return convey; - } -} diff --git a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/ColumnDescriptor.java b/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/ColumnDescriptor.java deleted file mode 100644 index f0bbf14693160..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/ColumnDescriptor.java +++ /dev/null @@ -1,99 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hive.service.cli; - -import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hive.service.cli.thrift.TColumnDesc; - - -/** - * ColumnDescriptor. - * - */ -public class ColumnDescriptor { - private final String name; - private final String comment; - private final TypeDescriptor type; - // ordinal position of this column in the schema - private final int position; - - public ColumnDescriptor(String name, String comment, TypeDescriptor type, int position) { - this.name = name; - this.comment = comment; - this.type = type; - this.position = position; - } - - public ColumnDescriptor(TColumnDesc tColumnDesc) { - name = tColumnDesc.getColumnName(); - comment = tColumnDesc.getComment(); - type = new TypeDescriptor(tColumnDesc.getTypeDesc()); - position = tColumnDesc.getPosition(); - } - - public ColumnDescriptor(FieldSchema column, int position) { - name = column.getName(); - comment = column.getComment(); - type = new TypeDescriptor(column.getType()); - this.position = position; - } - - public static ColumnDescriptor newPrimitiveColumnDescriptor(String name, String comment, Type type, int position) { - // Current usage looks like it's only for metadata columns, but if that changes then - // this method may need to require a type qualifiers aruments. - return new ColumnDescriptor(name, comment, new TypeDescriptor(type), position); - } - - public String getName() { - return name; - } - - public String getComment() { - return comment; - } - - public TypeDescriptor getTypeDescriptor() { - return type; - } - - public int getOrdinalPosition() { - return position; - } - - public TColumnDesc toTColumnDesc() { - TColumnDesc tColumnDesc = new TColumnDesc(); - tColumnDesc.setColumnName(name); - tColumnDesc.setComment(comment); - tColumnDesc.setTypeDesc(type.toTTypeDesc()); - tColumnDesc.setPosition(position); - return tColumnDesc; - } - - public Type getType() { - return type.getType(); - } - - public boolean isPrimitive() { - return type.getType().isPrimitiveType(); - } - - public String getTypeName() { - return type.getTypeName(); - } -} diff --git a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/ColumnValue.java b/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/ColumnValue.java deleted file mode 100644 index 462b93a0f09fe..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/ColumnValue.java +++ /dev/null @@ -1,288 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hive.service.cli; - -import java.math.BigDecimal; -import java.sql.Date; -import java.sql.Timestamp; - -import org.apache.hadoop.hive.common.type.HiveChar; -import org.apache.hadoop.hive.common.type.HiveIntervalDayTime; -import org.apache.hadoop.hive.common.type.HiveIntervalYearMonth; -import org.apache.hadoop.hive.common.type.HiveVarchar; -import org.apache.hive.service.cli.thrift.TBoolValue; -import org.apache.hive.service.cli.thrift.TByteValue; -import org.apache.hive.service.cli.thrift.TColumnValue; -import org.apache.hive.service.cli.thrift.TDoubleValue; -import org.apache.hive.service.cli.thrift.TI16Value; -import org.apache.hive.service.cli.thrift.TI32Value; -import org.apache.hive.service.cli.thrift.TI64Value; -import org.apache.hive.service.cli.thrift.TStringValue; - -import org.apache.spark.unsafe.types.UTF8String; - -/** - * Protocols before HIVE_CLI_SERVICE_PROTOCOL_V6 (used by RowBasedSet) - * - */ -public class ColumnValue { - - private static TColumnValue booleanValue(Boolean value) { - TBoolValue tBoolValue = new TBoolValue(); - if (value != null) { - tBoolValue.setValue(value); - } - return TColumnValue.boolVal(tBoolValue); - } - - private static TColumnValue byteValue(Byte value) { - TByteValue tByteValue = new TByteValue(); - if (value != null) { - tByteValue.setValue(value); - } - return TColumnValue.byteVal(tByteValue); - } - - private static TColumnValue shortValue(Short value) { - TI16Value tI16Value = new TI16Value(); - if (value != null) { - tI16Value.setValue(value); - } - return TColumnValue.i16Val(tI16Value); - } - - private static TColumnValue intValue(Integer value) { - TI32Value tI32Value = new TI32Value(); - if (value != null) { - tI32Value.setValue(value); - } - return TColumnValue.i32Val(tI32Value); - } - - private static TColumnValue longValue(Long value) { - TI64Value tI64Value = new TI64Value(); - if (value != null) { - tI64Value.setValue(value); - } - return TColumnValue.i64Val(tI64Value); - } - - private static TColumnValue floatValue(Float value) { - TDoubleValue tDoubleValue = new TDoubleValue(); - if (value != null) { - tDoubleValue.setValue(value); - } - return TColumnValue.doubleVal(tDoubleValue); - } - - private static TColumnValue doubleValue(Double value) { - TDoubleValue tDoubleValue = new TDoubleValue(); - if (value != null) { - tDoubleValue.setValue(value); - } - return TColumnValue.doubleVal(tDoubleValue); - } - - private static TColumnValue stringValue(String value) { - TStringValue tStringValue = new TStringValue(); - if (value != null) { - tStringValue.setValue(value); - } - return TColumnValue.stringVal(tStringValue); - } - - private static TColumnValue stringValue(HiveChar value) { - TStringValue tStringValue = new TStringValue(); - if (value != null) { - tStringValue.setValue(value.toString()); - } - return TColumnValue.stringVal(tStringValue); - } - - private static TColumnValue stringValue(HiveVarchar value) { - TStringValue tStringValue = new TStringValue(); - if (value != null) { - tStringValue.setValue(value.toString()); - } - return TColumnValue.stringVal(tStringValue); - } - - private static TColumnValue stringValue(HiveIntervalYearMonth value) { - TStringValue tStrValue = new TStringValue(); - if (value != null) { - tStrValue.setValue(value.toString()); - } - return TColumnValue.stringVal(tStrValue); - } - - private static TColumnValue stringValue(HiveIntervalDayTime value) { - TStringValue tStrValue = new TStringValue(); - if (value != null) { - tStrValue.setValue(value.toString()); - } - return TColumnValue.stringVal(tStrValue); - } - - public static TColumnValue toTColumnValue(Type type, Object value) { - switch (type) { - case BOOLEAN_TYPE: - return booleanValue((Boolean)value); - case TINYINT_TYPE: - return byteValue((Byte)value); - case SMALLINT_TYPE: - return shortValue((Short)value); - case INT_TYPE: - return intValue((Integer)value); - case BIGINT_TYPE: - return longValue((Long)value); - case FLOAT_TYPE: - return floatValue((Float)value); - case DOUBLE_TYPE: - return doubleValue((Double)value); - case STRING_TYPE: - return stringValue((String)value); - case CHAR_TYPE: - return stringValue((HiveChar)value); - case VARCHAR_TYPE: - return stringValue((HiveVarchar)value); - case DATE_TYPE: - case TIMESTAMP_TYPE: - // SPARK-31859, SPARK-31861: converted to string already in SparkExecuteStatementOperation - return stringValue((String)value); - case INTERVAL_YEAR_MONTH_TYPE: - return stringValue((HiveIntervalYearMonth) value); - case INTERVAL_DAY_TIME_TYPE: - return stringValue((HiveIntervalDayTime) value); - case DECIMAL_TYPE: - String plainStr = value == null ? null : ((BigDecimal)value).toPlainString(); - return stringValue(plainStr); - case BINARY_TYPE: - String strVal = value == null ? null : UTF8String.fromBytes((byte[])value).toString(); - return stringValue(strVal); - case ARRAY_TYPE: - case MAP_TYPE: - case STRUCT_TYPE: - case UNION_TYPE: - case USER_DEFINED_TYPE: - return stringValue((String)value); - case NULL_TYPE: - return stringValue((String)value); - default: - return null; - } - } - - private static Boolean getBooleanValue(TBoolValue tBoolValue) { - if (tBoolValue.isSetValue()) { - return tBoolValue.isValue(); - } - return null; - } - - private static Byte getByteValue(TByteValue tByteValue) { - if (tByteValue.isSetValue()) { - return tByteValue.getValue(); - } - return null; - } - - private static Short getShortValue(TI16Value tI16Value) { - if (tI16Value.isSetValue()) { - return tI16Value.getValue(); - } - return null; - } - - private static Integer getIntegerValue(TI32Value tI32Value) { - if (tI32Value.isSetValue()) { - return tI32Value.getValue(); - } - return null; - } - - private static Long getLongValue(TI64Value tI64Value) { - if (tI64Value.isSetValue()) { - return tI64Value.getValue(); - } - return null; - } - - private static Double getDoubleValue(TDoubleValue tDoubleValue) { - if (tDoubleValue.isSetValue()) { - return tDoubleValue.getValue(); - } - return null; - } - - private static String getStringValue(TStringValue tStringValue) { - if (tStringValue.isSetValue()) { - return tStringValue.getValue(); - } - return null; - } - - private static Date getDateValue(TStringValue tStringValue) { - if (tStringValue.isSetValue()) { - return Date.valueOf(tStringValue.getValue()); - } - return null; - } - - private static Timestamp getTimestampValue(TStringValue tStringValue) { - if (tStringValue.isSetValue()) { - return Timestamp.valueOf(tStringValue.getValue()); - } - return null; - } - - private static byte[] getBinaryValue(TStringValue tString) { - if (tString.isSetValue()) { - return tString.getValue().getBytes(); - } - return null; - } - - private static BigDecimal getBigDecimalValue(TStringValue tStringValue) { - if (tStringValue.isSetValue()) { - return new BigDecimal(tStringValue.getValue()); - } - return null; - } - - public static Object toColumnValue(TColumnValue value) { - TColumnValue._Fields field = value.getSetField(); - switch (field) { - case BOOL_VAL: - return getBooleanValue(value.getBoolVal()); - case BYTE_VAL: - return getByteValue(value.getByteVal()); - case I16_VAL: - return getShortValue(value.getI16Val()); - case I32_VAL: - return getIntegerValue(value.getI32Val()); - case I64_VAL: - return getLongValue(value.getI64Val()); - case DOUBLE_VAL: - return getDoubleValue(value.getDoubleVal()); - case STRING_VAL: - return getStringValue(value.getStringVal()); - } - throw new IllegalArgumentException("never"); - } -} diff --git a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/EmbeddedCLIServiceClient.java b/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/EmbeddedCLIServiceClient.java deleted file mode 100644 index 9cad5be198c06..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/EmbeddedCLIServiceClient.java +++ /dev/null @@ -1,208 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hive.service.cli; - -import java.util.List; -import java.util.Map; - -import org.apache.hive.service.auth.HiveAuthFactory; - - -/** - * EmbeddedCLIServiceClient. - * - */ -public class EmbeddedCLIServiceClient extends CLIServiceClient { - private final ICLIService cliService; - - public EmbeddedCLIServiceClient(ICLIService cliService) { - this.cliService = cliService; - } - - /* (non-Javadoc) - * @see org.apache.hive.service.cli.CLIServiceClient#openSession(java.lang.String, java.lang.String, java.util.Map) - */ - @Override - public SessionHandle openSession(String username, String password, - Map configuration) throws HiveSQLException { - return cliService.openSession(username, password, configuration); - } - - @Override - public SessionHandle openSessionWithImpersonation(String username, String password, - Map configuration, String delegationToken) throws HiveSQLException { - throw new HiveSQLException("Impersonated session is not supported in the embedded mode"); - } - - /* (non-Javadoc) - * @see org.apache.hive.service.cli.CLIServiceClient#closeSession(org.apache.hive.service.cli.SessionHandle) - */ - @Override - public void closeSession(SessionHandle sessionHandle) throws HiveSQLException { - cliService.closeSession(sessionHandle); - } - - /* (non-Javadoc) - * @see org.apache.hive.service.cli.CLIServiceClient#getInfo(org.apache.hive.service.cli.SessionHandle, java.util.List) - */ - @Override - public GetInfoValue getInfo(SessionHandle sessionHandle, GetInfoType getInfoType) - throws HiveSQLException { - return cliService.getInfo(sessionHandle, getInfoType); - } - - /* (non-Javadoc) - * @see org.apache.hive.service.cli.CLIServiceClient#executeStatement(org.apache.hive.service.cli.SessionHandle, - * java.lang.String, java.util.Map) - */ - @Override - public OperationHandle executeStatement(SessionHandle sessionHandle, String statement, - Map confOverlay) throws HiveSQLException { - return cliService.executeStatement(sessionHandle, statement, confOverlay); - } - - /* (non-Javadoc) - * @see org.apache.hive.service.cli.CLIServiceClient#executeStatementAsync(org.apache.hive.service.cli.SessionHandle, - * java.lang.String, java.util.Map) - */ - @Override - public OperationHandle executeStatementAsync(SessionHandle sessionHandle, String statement, - Map confOverlay) throws HiveSQLException { - return cliService.executeStatementAsync(sessionHandle, statement, confOverlay); - } - - - /* (non-Javadoc) - * @see org.apache.hive.service.cli.CLIServiceClient#getTypeInfo(org.apache.hive.service.cli.SessionHandle) - */ - @Override - public OperationHandle getTypeInfo(SessionHandle sessionHandle) throws HiveSQLException { - return cliService.getTypeInfo(sessionHandle); - } - - /* (non-Javadoc) - * @see org.apache.hive.service.cli.CLIServiceClient#getCatalogs(org.apache.hive.service.cli.SessionHandle) - */ - @Override - public OperationHandle getCatalogs(SessionHandle sessionHandle) throws HiveSQLException { - return cliService.getCatalogs(sessionHandle); - } - - /* (non-Javadoc) - * @see org.apache.hive.service.cli.CLIServiceClient#getSchemas(org.apache.hive.service.cli.SessionHandle, java.lang.String, java.lang.String) - */ - @Override - public OperationHandle getSchemas(SessionHandle sessionHandle, String catalogName, - String schemaName) throws HiveSQLException { - return cliService.getSchemas(sessionHandle, catalogName, schemaName); - } - - /* (non-Javadoc) - * @see org.apache.hive.service.cli.CLIServiceClient#getTables(org.apache.hive.service.cli.SessionHandle, java.lang.String, java.lang.String, java.lang.String, java.util.List) - */ - @Override - public OperationHandle getTables(SessionHandle sessionHandle, String catalogName, - String schemaName, String tableName, List tableTypes) throws HiveSQLException { - return cliService.getTables(sessionHandle, catalogName, schemaName, tableName, tableTypes); - } - - /* (non-Javadoc) - * @see org.apache.hive.service.cli.CLIServiceClient#getTableTypes(org.apache.hive.service.cli.SessionHandle) - */ - @Override - public OperationHandle getTableTypes(SessionHandle sessionHandle) throws HiveSQLException { - return cliService.getTableTypes(sessionHandle); - } - - /* (non-Javadoc) - * @see org.apache.hive.service.cli.CLIServiceClient#getColumns(org.apache.hive.service.cli.SessionHandle, java.lang.String, java.lang.String, java.lang.String, java.lang.String) - */ - @Override - public OperationHandle getColumns(SessionHandle sessionHandle, String catalogName, - String schemaName, String tableName, String columnName) throws HiveSQLException { - return cliService.getColumns(sessionHandle, catalogName, schemaName, tableName, columnName); - } - - /* (non-Javadoc) - * @see org.apache.hive.service.cli.CLIServiceClient#getFunctions(org.apache.hive.service.cli.SessionHandle, java.lang.String) - */ - @Override - public OperationHandle getFunctions(SessionHandle sessionHandle, - String catalogName, String schemaName, String functionName) - throws HiveSQLException { - return cliService.getFunctions(sessionHandle, catalogName, schemaName, functionName); - } - - /* (non-Javadoc) - * @see org.apache.hive.service.cli.CLIServiceClient#getOperationStatus(org.apache.hive.service.cli.OperationHandle) - */ - @Override - public OperationStatus getOperationStatus(OperationHandle opHandle) throws HiveSQLException { - return cliService.getOperationStatus(opHandle); - } - - /* (non-Javadoc) - * @see org.apache.hive.service.cli.CLIServiceClient#cancelOperation(org.apache.hive.service.cli.OperationHandle) - */ - @Override - public void cancelOperation(OperationHandle opHandle) throws HiveSQLException { - cliService.cancelOperation(opHandle); - } - - /* (non-Javadoc) - * @see org.apache.hive.service.cli.CLIServiceClient#closeOperation(org.apache.hive.service.cli.OperationHandle) - */ - @Override - public void closeOperation(OperationHandle opHandle) throws HiveSQLException { - cliService.closeOperation(opHandle); - } - - /* (non-Javadoc) - * @see org.apache.hive.service.cli.CLIServiceClient#getResultSetMetadata(org.apache.hive.service.cli.OperationHandle) - */ - @Override - public TableSchema getResultSetMetadata(OperationHandle opHandle) throws HiveSQLException { - return cliService.getResultSetMetadata(opHandle); - } - - @Override - public RowSet fetchResults(OperationHandle opHandle, FetchOrientation orientation, - long maxRows, FetchType fetchType) throws HiveSQLException { - return cliService.fetchResults(opHandle, orientation, maxRows, fetchType); - } - - - @Override - public String getDelegationToken(SessionHandle sessionHandle, HiveAuthFactory authFactory, - String owner, String renewer) throws HiveSQLException { - return cliService.getDelegationToken(sessionHandle, authFactory, owner, renewer); - } - - @Override - public void cancelDelegationToken(SessionHandle sessionHandle, HiveAuthFactory authFactory, - String tokenStr) throws HiveSQLException { - cliService.cancelDelegationToken(sessionHandle, authFactory, tokenStr); - } - - @Override - public void renewDelegationToken(SessionHandle sessionHandle, HiveAuthFactory authFactory, - String tokenStr) throws HiveSQLException { - cliService.renewDelegationToken(sessionHandle, authFactory, tokenStr); - } -} diff --git a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/FetchOrientation.java b/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/FetchOrientation.java deleted file mode 100644 index ffa6f2e1f3743..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/FetchOrientation.java +++ /dev/null @@ -1,54 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hive.service.cli; - -import org.apache.hive.service.cli.thrift.TFetchOrientation; - -/** - * FetchOrientation. - * - */ -public enum FetchOrientation { - FETCH_NEXT(TFetchOrientation.FETCH_NEXT), - FETCH_PRIOR(TFetchOrientation.FETCH_PRIOR), - FETCH_RELATIVE(TFetchOrientation.FETCH_RELATIVE), - FETCH_ABSOLUTE(TFetchOrientation.FETCH_ABSOLUTE), - FETCH_FIRST(TFetchOrientation.FETCH_FIRST), - FETCH_LAST(TFetchOrientation.FETCH_LAST); - - private TFetchOrientation tFetchOrientation; - - FetchOrientation(TFetchOrientation tFetchOrientation) { - this.tFetchOrientation = tFetchOrientation; - } - - public static FetchOrientation getFetchOrientation(TFetchOrientation tFetchOrientation) { - for (FetchOrientation fetchOrientation : values()) { - if (tFetchOrientation.equals(fetchOrientation.toTFetchOrientation())) { - return fetchOrientation; - } - } - // TODO: Should this really default to FETCH_NEXT? - return FETCH_NEXT; - } - - public TFetchOrientation toTFetchOrientation() { - return tFetchOrientation; - } -} diff --git a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/GetInfoType.java b/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/GetInfoType.java deleted file mode 100644 index 8dd33a88fdeb2..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/GetInfoType.java +++ /dev/null @@ -1,96 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hive.service.cli; - -import org.apache.hive.service.cli.thrift.TGetInfoType; - -/** - * GetInfoType. - * - */ -public enum GetInfoType { - CLI_MAX_DRIVER_CONNECTIONS(TGetInfoType.CLI_MAX_DRIVER_CONNECTIONS), - CLI_MAX_CONCURRENT_ACTIVITIES(TGetInfoType.CLI_MAX_CONCURRENT_ACTIVITIES), - CLI_DATA_SOURCE_NAME(TGetInfoType.CLI_DATA_SOURCE_NAME), - CLI_FETCH_DIRECTION(TGetInfoType.CLI_FETCH_DIRECTION), - CLI_SERVER_NAME(TGetInfoType.CLI_SERVER_NAME), - CLI_SEARCH_PATTERN_ESCAPE(TGetInfoType.CLI_SEARCH_PATTERN_ESCAPE), - CLI_DBMS_NAME(TGetInfoType.CLI_DBMS_NAME), - CLI_DBMS_VER(TGetInfoType.CLI_DBMS_VER), - CLI_ACCESSIBLE_TABLES(TGetInfoType.CLI_ACCESSIBLE_TABLES), - CLI_ACCESSIBLE_PROCEDURES(TGetInfoType.CLI_ACCESSIBLE_PROCEDURES), - CLI_CURSOR_COMMIT_BEHAVIOR(TGetInfoType.CLI_CURSOR_COMMIT_BEHAVIOR), - CLI_DATA_SOURCE_READ_ONLY(TGetInfoType.CLI_DATA_SOURCE_READ_ONLY), - CLI_DEFAULT_TXN_ISOLATION(TGetInfoType.CLI_DEFAULT_TXN_ISOLATION), - CLI_IDENTIFIER_CASE(TGetInfoType.CLI_IDENTIFIER_CASE), - CLI_IDENTIFIER_QUOTE_CHAR(TGetInfoType.CLI_IDENTIFIER_QUOTE_CHAR), - CLI_MAX_COLUMN_NAME_LEN(TGetInfoType.CLI_MAX_COLUMN_NAME_LEN), - CLI_MAX_CURSOR_NAME_LEN(TGetInfoType.CLI_MAX_CURSOR_NAME_LEN), - CLI_MAX_SCHEMA_NAME_LEN(TGetInfoType.CLI_MAX_SCHEMA_NAME_LEN), - CLI_MAX_CATALOG_NAME_LEN(TGetInfoType.CLI_MAX_CATALOG_NAME_LEN), - CLI_MAX_TABLE_NAME_LEN(TGetInfoType.CLI_MAX_TABLE_NAME_LEN), - CLI_SCROLL_CONCURRENCY(TGetInfoType.CLI_SCROLL_CONCURRENCY), - CLI_TXN_CAPABLE(TGetInfoType.CLI_TXN_CAPABLE), - CLI_USER_NAME(TGetInfoType.CLI_USER_NAME), - CLI_TXN_ISOLATION_OPTION(TGetInfoType.CLI_TXN_ISOLATION_OPTION), - CLI_INTEGRITY(TGetInfoType.CLI_INTEGRITY), - CLI_GETDATA_EXTENSIONS(TGetInfoType.CLI_GETDATA_EXTENSIONS), - CLI_NULL_COLLATION(TGetInfoType.CLI_NULL_COLLATION), - CLI_ALTER_TABLE(TGetInfoType.CLI_ALTER_TABLE), - CLI_ORDER_BY_COLUMNS_IN_SELECT(TGetInfoType.CLI_ORDER_BY_COLUMNS_IN_SELECT), - CLI_SPECIAL_CHARACTERS(TGetInfoType.CLI_SPECIAL_CHARACTERS), - CLI_MAX_COLUMNS_IN_GROUP_BY(TGetInfoType.CLI_MAX_COLUMNS_IN_GROUP_BY), - CLI_MAX_COLUMNS_IN_INDEX(TGetInfoType.CLI_MAX_COLUMNS_IN_INDEX), - CLI_MAX_COLUMNS_IN_ORDER_BY(TGetInfoType.CLI_MAX_COLUMNS_IN_ORDER_BY), - CLI_MAX_COLUMNS_IN_SELECT(TGetInfoType.CLI_MAX_COLUMNS_IN_SELECT), - CLI_MAX_COLUMNS_IN_TABLE(TGetInfoType.CLI_MAX_COLUMNS_IN_TABLE), - CLI_MAX_INDEX_SIZE(TGetInfoType.CLI_MAX_INDEX_SIZE), - CLI_MAX_ROW_SIZE(TGetInfoType.CLI_MAX_ROW_SIZE), - CLI_MAX_STATEMENT_LEN(TGetInfoType.CLI_MAX_STATEMENT_LEN), - CLI_MAX_TABLES_IN_SELECT(TGetInfoType.CLI_MAX_TABLES_IN_SELECT), - CLI_MAX_USER_NAME_LEN(TGetInfoType.CLI_MAX_USER_NAME_LEN), - CLI_OJ_CAPABILITIES(TGetInfoType.CLI_OJ_CAPABILITIES), - - CLI_XOPEN_CLI_YEAR(TGetInfoType.CLI_XOPEN_CLI_YEAR), - CLI_CURSOR_SENSITIVITY(TGetInfoType.CLI_CURSOR_SENSITIVITY), - CLI_DESCRIBE_PARAMETER(TGetInfoType.CLI_DESCRIBE_PARAMETER), - CLI_CATALOG_NAME(TGetInfoType.CLI_CATALOG_NAME), - CLI_COLLATION_SEQ(TGetInfoType.CLI_COLLATION_SEQ), - CLI_MAX_IDENTIFIER_LEN(TGetInfoType.CLI_MAX_IDENTIFIER_LEN); - - private final TGetInfoType tInfoType; - - GetInfoType(TGetInfoType tInfoType) { - this.tInfoType = tInfoType; - } - - public static GetInfoType getGetInfoType(TGetInfoType tGetInfoType) { - for (GetInfoType infoType : values()) { - if (tGetInfoType.equals(infoType.tInfoType)) { - return infoType; - } - } - throw new IllegalArgumentException("Unrecognized Thrift TGetInfoType value: " + tGetInfoType); - } - - public TGetInfoType toTGetInfoType() { - return tInfoType; - } - -} diff --git a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/GetInfoValue.java b/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/GetInfoValue.java deleted file mode 100644 index ba92ff4ab5c11..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/GetInfoValue.java +++ /dev/null @@ -1,82 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hive.service.cli; - -import org.apache.hive.service.cli.thrift.TGetInfoValue; - -/** - * GetInfoValue. - * - */ -public class GetInfoValue { - private String stringValue = null; - private short shortValue; - private int intValue; - private long longValue; - - public GetInfoValue(String stringValue) { - this.stringValue = stringValue; - } - - public GetInfoValue(short shortValue) { - this.shortValue = shortValue; - } - - public GetInfoValue(int intValue) { - this.intValue = intValue; - } - - public GetInfoValue(long longValue) { - this.longValue = longValue; - } - - public GetInfoValue(TGetInfoValue tGetInfoValue) { - switch (tGetInfoValue.getSetField()) { - case STRING_VALUE: - stringValue = tGetInfoValue.getStringValue(); - break; - default: - throw new IllegalArgumentException("Unreconigzed TGetInfoValue"); - } - } - - public TGetInfoValue toTGetInfoValue() { - TGetInfoValue tInfoValue = new TGetInfoValue(); - if (stringValue != null) { - tInfoValue.setStringValue(stringValue); - } - return tInfoValue; - } - - public String getStringValue() { - return stringValue; - } - - public short getShortValue() { - return shortValue; - } - - public int getIntValue() { - return intValue; - } - - public long getLongValue() { - return longValue; - } -} diff --git a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/Handle.java b/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/Handle.java deleted file mode 100644 index cf3427ae20f3c..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/Handle.java +++ /dev/null @@ -1,78 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hive.service.cli; - -import org.apache.hive.service.cli.thrift.THandleIdentifier; - - - - -public abstract class Handle { - - private final HandleIdentifier handleId; - - public Handle() { - handleId = new HandleIdentifier(); - } - - public Handle(HandleIdentifier handleId) { - this.handleId = handleId; - } - - public Handle(THandleIdentifier tHandleIdentifier) { - this.handleId = new HandleIdentifier(tHandleIdentifier); - } - - public HandleIdentifier getHandleIdentifier() { - return handleId; - } - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + ((handleId == null) ? 0 : handleId.hashCode()); - return result; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - if (obj == null) { - return false; - } - if (!(obj instanceof Handle)) { - return false; - } - Handle other = (Handle) obj; - if (handleId == null) { - if (other.handleId != null) { - return false; - } - } else if (!handleId.equals(other.handleId)) { - return false; - } - return true; - } - - @Override - public abstract String toString(); - -} diff --git a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/HandleIdentifier.java b/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/HandleIdentifier.java deleted file mode 100644 index 4dc80da8dc500..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/HandleIdentifier.java +++ /dev/null @@ -1,113 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hive.service.cli; - -import java.nio.ByteBuffer; -import java.util.UUID; - -import org.apache.hive.service.cli.thrift.THandleIdentifier; - -/** - * HandleIdentifier. - * - */ -public class HandleIdentifier { - private final UUID publicId; - private final UUID secretId; - - public HandleIdentifier() { - publicId = UUID.randomUUID(); - secretId = UUID.randomUUID(); - } - - public HandleIdentifier(UUID publicId, UUID secretId) { - this.publicId = publicId; - this.secretId = secretId; - } - - public HandleIdentifier(THandleIdentifier tHandleId) { - ByteBuffer bb = ByteBuffer.wrap(tHandleId.getGuid()); - this.publicId = new UUID(bb.getLong(), bb.getLong()); - bb = ByteBuffer.wrap(tHandleId.getSecret()); - this.secretId = new UUID(bb.getLong(), bb.getLong()); - } - - public UUID getPublicId() { - return publicId; - } - - public UUID getSecretId() { - return secretId; - } - - public THandleIdentifier toTHandleIdentifier() { - byte[] guid = new byte[16]; - byte[] secret = new byte[16]; - ByteBuffer guidBB = ByteBuffer.wrap(guid); - ByteBuffer secretBB = ByteBuffer.wrap(secret); - guidBB.putLong(publicId.getMostSignificantBits()); - guidBB.putLong(publicId.getLeastSignificantBits()); - secretBB.putLong(secretId.getMostSignificantBits()); - secretBB.putLong(secretId.getLeastSignificantBits()); - return new THandleIdentifier(ByteBuffer.wrap(guid), ByteBuffer.wrap(secret)); - } - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + ((publicId == null) ? 0 : publicId.hashCode()); - result = prime * result + ((secretId == null) ? 0 : secretId.hashCode()); - return result; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - if (obj == null) { - return false; - } - if (!(obj instanceof HandleIdentifier)) { - return false; - } - HandleIdentifier other = (HandleIdentifier) obj; - if (publicId == null) { - if (other.publicId != null) { - return false; - } - } else if (!publicId.equals(other.publicId)) { - return false; - } - if (secretId == null) { - if (other.secretId != null) { - return false; - } - } else if (!secretId.equals(other.secretId)) { - return false; - } - return true; - } - - @Override - public String toString() { - return publicId.toString(); - } -} diff --git a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/HiveSQLException.java b/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/HiveSQLException.java deleted file mode 100644 index 86e57fbf31fe0..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/HiveSQLException.java +++ /dev/null @@ -1,249 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hive.service.cli; - -import java.sql.SQLException; -import java.util.ArrayList; -import java.util.List; - -import org.apache.hive.service.cli.thrift.TStatus; -import org.apache.hive.service.cli.thrift.TStatusCode; - -/** - * HiveSQLException. - * - */ -public class HiveSQLException extends SQLException { - - /** - * - */ - private static final long serialVersionUID = -6095254671958748094L; - - /** - * - */ - public HiveSQLException() { - super(); - } - - /** - * @param reason - */ - public HiveSQLException(String reason) { - super(reason); - } - - /** - * @param cause - */ - public HiveSQLException(Throwable cause) { - super(cause); - } - - /** - * @param reason - * @param sqlState - */ - public HiveSQLException(String reason, String sqlState) { - super(reason, sqlState); - } - - /** - * @param reason - * @param cause - */ - public HiveSQLException(String reason, Throwable cause) { - super(reason, cause); - } - - /** - * @param reason - * @param sqlState - * @param vendorCode - */ - public HiveSQLException(String reason, String sqlState, int vendorCode) { - super(reason, sqlState, vendorCode); - } - - /** - * @param reason - * @param sqlState - * @param cause - */ - public HiveSQLException(String reason, String sqlState, Throwable cause) { - super(reason, sqlState, cause); - } - - /** - * @param reason - * @param sqlState - * @param vendorCode - * @param cause - */ - public HiveSQLException(String reason, String sqlState, int vendorCode, Throwable cause) { - super(reason, sqlState, vendorCode, cause); - } - - public HiveSQLException(TStatus status) { - // TODO: set correct vendorCode field - super(status.getErrorMessage(), status.getSqlState(), status.getErrorCode()); - if (status.getInfoMessages() != null) { - initCause(toCause(status.getInfoMessages())); - } - } - - /** - * Converts current object to a {@link TStatus} object - * @return a {@link TStatus} object - */ - public TStatus toTStatus() { - // TODO: convert sqlState, etc. - TStatus tStatus = new TStatus(TStatusCode.ERROR_STATUS); - tStatus.setSqlState(getSQLState()); - tStatus.setErrorCode(getErrorCode()); - tStatus.setErrorMessage(getMessage()); - tStatus.setInfoMessages(toString(this)); - return tStatus; - } - - /** - * Converts the specified {@link Exception} object into a {@link TStatus} object - * @param e a {@link Exception} object - * @return a {@link TStatus} object - */ - public static TStatus toTStatus(Exception e) { - if (e instanceof HiveSQLException) { - return ((HiveSQLException)e).toTStatus(); - } - TStatus tStatus = new TStatus(TStatusCode.ERROR_STATUS); - tStatus.setErrorMessage(e.getMessage()); - tStatus.setInfoMessages(toString(e)); - return tStatus; - } - - /** - * Converts a {@link Throwable} object into a flattened list of texts including its stack trace - * and the stack traces of the nested causes. - * @param ex a {@link Throwable} object - * @return a flattened list of texts including the {@link Throwable} object's stack trace - * and the stack traces of the nested causes. - */ - public static List toString(Throwable ex) { - return toString(ex, null); - } - - private static List toString(Throwable cause, StackTraceElement[] parent) { - StackTraceElement[] trace = cause.getStackTrace(); - int m = trace.length - 1; - if (parent != null) { - int n = parent.length - 1; - while (m >= 0 && n >= 0 && trace[m].equals(parent[n])) { - m--; - n--; - } - } - List detail = enroll(cause, trace, m); - cause = cause.getCause(); - if (cause != null) { - detail.addAll(toString(cause, trace)); - } - return detail; - } - - private static List enroll(Throwable ex, StackTraceElement[] trace, int max) { - List details = new ArrayList(); - StringBuilder builder = new StringBuilder(); - builder.append('*').append(ex.getClass().getName()).append(':'); - builder.append(ex.getMessage()).append(':'); - builder.append(trace.length).append(':').append(max); - details.add(builder.toString()); - for (int i = 0; i <= max; i++) { - builder.setLength(0); - builder.append(trace[i].getClassName()).append(':'); - builder.append(trace[i].getMethodName()).append(':'); - String fileName = trace[i].getFileName(); - builder.append(fileName == null ? "" : fileName).append(':'); - builder.append(trace[i].getLineNumber()); - details.add(builder.toString()); - } - return details; - } - - /** - * Converts a flattened list of texts including the stack trace and the stack - * traces of the nested causes into a {@link Throwable} object. - * @param details a flattened list of texts including the stack trace and the stack - * traces of the nested causes - * @return a {@link Throwable} object - */ - public static Throwable toCause(List details) { - return toStackTrace(details, null, 0); - } - - private static Throwable toStackTrace(List details, StackTraceElement[] parent, int index) { - String detail = details.get(index++); - if (!detail.startsWith("*")) { - return null; // should not be happened. ignore remaining - } - int i1 = detail.indexOf(':'); - int i3 = detail.lastIndexOf(':'); - int i2 = detail.substring(0, i3).lastIndexOf(':'); - String exceptionClass = detail.substring(1, i1); - String exceptionMessage = detail.substring(i1 + 1, i2); - Throwable ex = newInstance(exceptionClass, exceptionMessage); - - Integer length = Integer.valueOf(detail.substring(i2 + 1, i3)); - Integer unique = Integer.valueOf(detail.substring(i3 + 1)); - - int i = 0; - StackTraceElement[] trace = new StackTraceElement[length]; - for (; i <= unique; i++) { - detail = details.get(index++); - int j1 = detail.indexOf(':'); - int j3 = detail.lastIndexOf(':'); - int j2 = detail.substring(0, j3).lastIndexOf(':'); - String className = detail.substring(0, j1); - String methodName = detail.substring(j1 + 1, j2); - String fileName = detail.substring(j2 + 1, j3); - if (fileName.isEmpty()) { - fileName = null; - } - int lineNumber = Integer.valueOf(detail.substring(j3 + 1)); - trace[i] = new StackTraceElement(className, methodName, fileName, lineNumber); - } - int common = trace.length - i; - if (common > 0) { - System.arraycopy(parent, parent.length - common, trace, trace.length - common, common); - } - if (details.size() > index) { - ex.initCause(toStackTrace(details, trace, index)); - } - ex.setStackTrace(trace); - return ex; - } - - private static Throwable newInstance(String className, String message) { - try { - return (Throwable)Class.forName(className).getConstructor(String.class).newInstance(message); - } catch (Exception e) { - return new RuntimeException(className + ":" + message); - } - } -} diff --git a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/ICLIService.java b/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/ICLIService.java deleted file mode 100644 index c9cc1f4da56f1..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/ICLIService.java +++ /dev/null @@ -1,105 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hive.service.cli; - -import java.util.List; -import java.util.Map; - - - - -import org.apache.hive.service.auth.HiveAuthFactory; - -public interface ICLIService { - - SessionHandle openSession(String username, String password, - Map configuration) - throws HiveSQLException; - - SessionHandle openSessionWithImpersonation(String username, String password, - Map configuration, String delegationToken) - throws HiveSQLException; - - void closeSession(SessionHandle sessionHandle) - throws HiveSQLException; - - GetInfoValue getInfo(SessionHandle sessionHandle, GetInfoType infoType) - throws HiveSQLException; - - OperationHandle executeStatement(SessionHandle sessionHandle, String statement, - Map confOverlay) - throws HiveSQLException; - - OperationHandle executeStatementAsync(SessionHandle sessionHandle, - String statement, Map confOverlay) - throws HiveSQLException; - - OperationHandle getTypeInfo(SessionHandle sessionHandle) - throws HiveSQLException; - - OperationHandle getCatalogs(SessionHandle sessionHandle) - throws HiveSQLException; - - OperationHandle getSchemas(SessionHandle sessionHandle, - String catalogName, String schemaName) - throws HiveSQLException; - - OperationHandle getTables(SessionHandle sessionHandle, - String catalogName, String schemaName, String tableName, List tableTypes) - throws HiveSQLException; - - OperationHandle getTableTypes(SessionHandle sessionHandle) - throws HiveSQLException; - - OperationHandle getColumns(SessionHandle sessionHandle, - String catalogName, String schemaName, String tableName, String columnName) - throws HiveSQLException; - - OperationHandle getFunctions(SessionHandle sessionHandle, - String catalogName, String schemaName, String functionName) - throws HiveSQLException; - - OperationStatus getOperationStatus(OperationHandle opHandle) - throws HiveSQLException; - - void cancelOperation(OperationHandle opHandle) - throws HiveSQLException; - - void closeOperation(OperationHandle opHandle) - throws HiveSQLException; - - TableSchema getResultSetMetadata(OperationHandle opHandle) - throws HiveSQLException; - - RowSet fetchResults(OperationHandle opHandle) - throws HiveSQLException; - - RowSet fetchResults(OperationHandle opHandle, FetchOrientation orientation, - long maxRows, FetchType fetchType) throws HiveSQLException; - - String getDelegationToken(SessionHandle sessionHandle, HiveAuthFactory authFactory, - String owner, String renewer) throws HiveSQLException; - - void cancelDelegationToken(SessionHandle sessionHandle, HiveAuthFactory authFactory, - String tokenStr) throws HiveSQLException; - - void renewDelegationToken(SessionHandle sessionHandle, HiveAuthFactory authFactory, - String tokenStr) throws HiveSQLException; - - -} diff --git a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/OperationHandle.java b/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/OperationHandle.java deleted file mode 100644 index 5426e28471239..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/OperationHandle.java +++ /dev/null @@ -1,102 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hive.service.cli; - -import org.apache.hive.service.cli.thrift.TOperationHandle; -import org.apache.hive.service.cli.thrift.TProtocolVersion; - -public class OperationHandle extends Handle { - - private final OperationType opType; - private final TProtocolVersion protocol; - private boolean hasResultSet = false; - - public OperationHandle(OperationType opType, TProtocolVersion protocol) { - super(); - this.opType = opType; - this.protocol = protocol; - } - - // dummy handle for ThriftCLIService - public OperationHandle(TOperationHandle tOperationHandle) { - this(tOperationHandle, TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V1); - } - - public OperationHandle(TOperationHandle tOperationHandle, TProtocolVersion protocol) { - super(tOperationHandle.getOperationId()); - this.opType = OperationType.getOperationType(tOperationHandle.getOperationType()); - this.hasResultSet = tOperationHandle.isHasResultSet(); - this.protocol = protocol; - } - - public OperationType getOperationType() { - return opType; - } - - public void setHasResultSet(boolean hasResultSet) { - this.hasResultSet = hasResultSet; - } - - public boolean hasResultSet() { - return hasResultSet; - } - - public TOperationHandle toTOperationHandle() { - TOperationHandle tOperationHandle = new TOperationHandle(); - tOperationHandle.setOperationId(getHandleIdentifier().toTHandleIdentifier()); - tOperationHandle.setOperationType(opType.toTOperationType()); - tOperationHandle.setHasResultSet(hasResultSet); - return tOperationHandle; - } - - public TProtocolVersion getProtocolVersion() { - return protocol; - } - - @Override - public int hashCode() { - final int prime = 31; - int result = super.hashCode(); - result = prime * result + ((opType == null) ? 0 : opType.hashCode()); - return result; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - if (!super.equals(obj)) { - return false; - } - if (!(obj instanceof OperationHandle)) { - return false; - } - OperationHandle other = (OperationHandle) obj; - if (opType != other.opType) { - return false; - } - return true; - } - - @Override - public String toString() { - return "OperationHandle [opType=" + opType + ", getHandleIdentifier()=" + getHandleIdentifier() - + "]"; - } -} diff --git a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/OperationState.java b/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/OperationState.java deleted file mode 100644 index 1165180118413..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/OperationState.java +++ /dev/null @@ -1,108 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hive.service.cli; - -import org.apache.hive.service.cli.thrift.TOperationState; - -/** - * OperationState. - * - */ -public enum OperationState { - INITIALIZED(TOperationState.INITIALIZED_STATE, false), - RUNNING(TOperationState.RUNNING_STATE, false), - FINISHED(TOperationState.FINISHED_STATE, true), - CANCELED(TOperationState.CANCELED_STATE, true), - CLOSED(TOperationState.CLOSED_STATE, true), - ERROR(TOperationState.ERROR_STATE, true), - UNKNOWN(TOperationState.UKNOWN_STATE, false), - PENDING(TOperationState.PENDING_STATE, false); - - private final TOperationState tOperationState; - private final boolean terminal; - - OperationState(TOperationState tOperationState, boolean terminal) { - this.tOperationState = tOperationState; - this.terminal = terminal; - } - - // must be sync with TOperationState in order - public static OperationState getOperationState(TOperationState tOperationState) { - return OperationState.values()[tOperationState.getValue()]; - } - - public static void validateTransition(OperationState oldState, - OperationState newState) - throws HiveSQLException { - switch (oldState) { - case INITIALIZED: - switch (newState) { - case PENDING: - case RUNNING: - case CANCELED: - case CLOSED: - return; - } - break; - case PENDING: - switch (newState) { - case RUNNING: - case FINISHED: - case CANCELED: - case ERROR: - case CLOSED: - return; - } - break; - case RUNNING: - switch (newState) { - case FINISHED: - case CANCELED: - case ERROR: - case CLOSED: - return; - } - break; - case FINISHED: - case CANCELED: - case ERROR: - if (OperationState.CLOSED.equals(newState)) { - return; - } - break; - default: - // fall-through - } - throw new HiveSQLException("Illegal Operation state transition " + - "from " + oldState + " to " + newState); - } - - public void validateTransition(OperationState newState) - throws HiveSQLException { - validateTransition(this, newState); - } - - public TOperationState toTOperationState() { - return tOperationState; - } - - public boolean isTerminal() { - return terminal; - } -} diff --git a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/OperationType.java b/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/OperationType.java deleted file mode 100644 index 429d9a4c25688..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/OperationType.java +++ /dev/null @@ -1,58 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hive.service.cli; - -import org.apache.hive.service.cli.thrift.TOperationType; - -/** - * OperationType. - * - */ -public enum OperationType { - - UNKNOWN_OPERATION(TOperationType.UNKNOWN), - EXECUTE_STATEMENT(TOperationType.EXECUTE_STATEMENT), - GET_TYPE_INFO(TOperationType.GET_TYPE_INFO), - GET_CATALOGS(TOperationType.GET_CATALOGS), - GET_SCHEMAS(TOperationType.GET_SCHEMAS), - GET_TABLES(TOperationType.GET_TABLES), - GET_TABLE_TYPES(TOperationType.GET_TABLE_TYPES), - GET_COLUMNS(TOperationType.GET_COLUMNS), - GET_FUNCTIONS(TOperationType.GET_FUNCTIONS); - - private TOperationType tOperationType; - - OperationType(TOperationType tOpType) { - this.tOperationType = tOpType; - } - - public static OperationType getOperationType(TOperationType tOperationType) { - // TODO: replace this with a Map? - for (OperationType opType : values()) { - if (tOperationType.equals(opType.tOperationType)) { - return opType; - } - } - return OperationType.UNKNOWN_OPERATION; - } - - public TOperationType toTOperationType() { - return tOperationType; - } -} diff --git a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/PatternOrIdentifier.java b/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/PatternOrIdentifier.java deleted file mode 100644 index 6e4d43fd5df63..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/PatternOrIdentifier.java +++ /dev/null @@ -1,47 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hive.service.cli; - -/** - * PatternOrIdentifier. - * - */ -public class PatternOrIdentifier { - - boolean isPattern = false; - String text; - - public PatternOrIdentifier(String tpoi) { - text = tpoi; - isPattern = false; - } - - public boolean isPattern() { - return isPattern; - } - - public boolean isIdentifier() { - return !isPattern; - } - - @Override - public String toString() { - return text; - } -} diff --git a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/RowBasedSet.java b/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/RowBasedSet.java deleted file mode 100644 index 7452137f077db..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/RowBasedSet.java +++ /dev/null @@ -1,140 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hive.service.cli; - -import java.util.ArrayList; -import java.util.Iterator; -import java.util.List; - -import org.apache.hive.service.cli.thrift.TColumnValue; -import org.apache.hive.service.cli.thrift.TRow; -import org.apache.hive.service.cli.thrift.TRowSet; - -/** - * RowBasedSet - */ -public class RowBasedSet implements RowSet { - - private long startOffset; - - private final Type[] types; // non-null only for writing (server-side) - private final RemovableList rows; - - public RowBasedSet(TableSchema schema) { - types = schema.toTypes(); - rows = new RemovableList(); - } - - public RowBasedSet(TRowSet tRowSet) { - types = null; - rows = new RemovableList(tRowSet.getRows()); - startOffset = tRowSet.getStartRowOffset(); - } - - private RowBasedSet(Type[] types, List rows, long startOffset) { - this.types = types; - this.rows = new RemovableList(rows); - this.startOffset = startOffset; - } - - @Override - public RowBasedSet addRow(Object[] fields) { - TRow tRow = new TRow(); - for (int i = 0; i < fields.length; i++) { - tRow.addToColVals(ColumnValue.toTColumnValue(types[i], fields[i])); - } - rows.add(tRow); - return this; - } - - @Override - public int numColumns() { - return rows.isEmpty() ? 0 : rows.get(0).getColVals().size(); - } - - @Override - public int numRows() { - return rows.size(); - } - - public RowBasedSet extractSubset(int maxRows) { - int numRows = Math.min(numRows(), maxRows); - RowBasedSet result = new RowBasedSet(types, rows.subList(0, numRows), startOffset); - rows.removeRange(0, numRows); - startOffset += numRows; - return result; - } - - public long getStartOffset() { - return startOffset; - } - - public void setStartOffset(long startOffset) { - this.startOffset = startOffset; - } - - public int getSize() { - return rows.size(); - } - - public TRowSet toTRowSet() { - TRowSet tRowSet = new TRowSet(); - tRowSet.setStartRowOffset(startOffset); - tRowSet.setRows(new ArrayList(rows)); - return tRowSet; - } - - @Override - public Iterator iterator() { - return new Iterator() { - - final Iterator iterator = rows.iterator(); - final Object[] convey = new Object[numColumns()]; - - @Override - public boolean hasNext() { - return iterator.hasNext(); - } - - @Override - public Object[] next() { - TRow row = iterator.next(); - List values = row.getColVals(); - for (int i = 0; i < values.size(); i++) { - convey[i] = ColumnValue.toColumnValue(values.get(i)); - } - return convey; - } - - @Override - public void remove() { - throw new UnsupportedOperationException("remove"); - } - }; - } - - private static class RemovableList extends ArrayList { - RemovableList() { super(); } - RemovableList(List rows) { super(rows); } - @Override - public void removeRange(int fromIndex, int toIndex) { - super.removeRange(fromIndex, toIndex); - } - } -} diff --git a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/RowSet.java b/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/RowSet.java deleted file mode 100644 index ab0787e1d389e..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/RowSet.java +++ /dev/null @@ -1,38 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hive.service.cli; - -import org.apache.hive.service.cli.thrift.TRowSet; - -public interface RowSet extends Iterable { - - RowSet addRow(Object[] fields); - - RowSet extractSubset(int maxRows); - - int numColumns(); - - int numRows(); - - long getStartOffset(); - - void setStartOffset(long startOffset); - - TRowSet toTRowSet(); -} diff --git a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/RowSetFactory.java b/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/RowSetFactory.java deleted file mode 100644 index e8f68eaaf9063..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/RowSetFactory.java +++ /dev/null @@ -1,41 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hive.service.cli; - -import org.apache.hive.service.cli.thrift.TProtocolVersion; -import org.apache.hive.service.cli.thrift.TRowSet; - -import static org.apache.hive.service.cli.thrift.TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V6; - -public class RowSetFactory { - - public static RowSet create(TableSchema schema, TProtocolVersion version) { - if (version.getValue() >= HIVE_CLI_SERVICE_PROTOCOL_V6.getValue()) { - return new ColumnBasedSet(schema); - } - return new RowBasedSet(schema); - } - - public static RowSet create(TRowSet results, TProtocolVersion version) { - if (version.getValue() >= HIVE_CLI_SERVICE_PROTOCOL_V6.getValue()) { - return new ColumnBasedSet(results); - } - return new RowBasedSet(results); - } -} diff --git a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/SessionHandle.java b/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/SessionHandle.java deleted file mode 100644 index 52e0ad4834d8b..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/SessionHandle.java +++ /dev/null @@ -1,67 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hive.service.cli; - -import java.util.UUID; - -import org.apache.hive.service.cli.thrift.TProtocolVersion; -import org.apache.hive.service.cli.thrift.TSessionHandle; - - -/** - * SessionHandle. - * - */ -public class SessionHandle extends Handle { - - private final TProtocolVersion protocol; - - public SessionHandle(TProtocolVersion protocol) { - this.protocol = protocol; - } - - // dummy handle for ThriftCLIService - public SessionHandle(TSessionHandle tSessionHandle) { - this(tSessionHandle, TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V1); - } - - public SessionHandle(TSessionHandle tSessionHandle, TProtocolVersion protocol) { - super(tSessionHandle.getSessionId()); - this.protocol = protocol; - } - - public UUID getSessionId() { - return getHandleIdentifier().getPublicId(); - } - - public TSessionHandle toTSessionHandle() { - TSessionHandle tSessionHandle = new TSessionHandle(); - tSessionHandle.setSessionId(getHandleIdentifier().toTHandleIdentifier()); - return tSessionHandle; - } - - public TProtocolVersion getProtocolVersion() { - return protocol; - } - - @Override - public String toString() { - return "SessionHandle [" + getHandleIdentifier() + "]"; - } -} diff --git a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/TableSchema.java b/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/TableSchema.java deleted file mode 100644 index ee019bc737101..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/TableSchema.java +++ /dev/null @@ -1,102 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hive.service.cli; - -import java.util.ArrayList; -import java.util.List; - -import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.Schema; -import org.apache.hive.service.cli.thrift.TColumnDesc; -import org.apache.hive.service.cli.thrift.TTableSchema; - -/** - * TableSchema. - * - */ -public class TableSchema { - private final List columns = new ArrayList(); - - public TableSchema() { - } - - public TableSchema(int numColumns) { - // TODO: remove this constructor - } - - public TableSchema(TTableSchema tTableSchema) { - for (TColumnDesc tColumnDesc : tTableSchema.getColumns()) { - columns.add(new ColumnDescriptor(tColumnDesc)); - } - } - - public TableSchema(List fieldSchemas) { - int pos = 1; - for (FieldSchema field : fieldSchemas) { - columns.add(new ColumnDescriptor(field, pos++)); - } - } - - public TableSchema(Schema schema) { - this(schema.getFieldSchemas()); - } - - public List getColumnDescriptors() { - return new ArrayList(columns); - } - - public ColumnDescriptor getColumnDescriptorAt(int pos) { - return columns.get(pos); - } - - public int getSize() { - return columns.size(); - } - - public void clear() { - columns.clear(); - } - - - public TTableSchema toTTableSchema() { - TTableSchema tTableSchema = new TTableSchema(); - for (ColumnDescriptor col : columns) { - tTableSchema.addToColumns(col.toTColumnDesc()); - } - return tTableSchema; - } - - public Type[] toTypes() { - Type[] types = new Type[columns.size()]; - for (int i = 0; i < types.length; i++) { - types[i] = columns.get(i).getType(); - } - return types; - } - - public TableSchema addPrimitiveColumn(String columnName, Type columnType, String columnComment) { - columns.add(ColumnDescriptor.newPrimitiveColumnDescriptor(columnName, columnComment, columnType, columns.size() + 1)); - return this; - } - - public TableSchema addStringColumn(String columnName, String columnComment) { - columns.add(ColumnDescriptor.newPrimitiveColumnDescriptor(columnName, columnComment, Type.STRING_TYPE, columns.size() + 1)); - return this; - } -} diff --git a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/Type.java b/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/Type.java deleted file mode 100644 index 7752ec03a29b7..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/Type.java +++ /dev/null @@ -1,349 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hive.service.cli; - -import java.sql.DatabaseMetaData; -import java.util.Locale; - -import org.apache.hadoop.hive.common.type.HiveDecimal; -import org.apache.hive.service.cli.thrift.TTypeId; - -/** - * Type. - * - */ -public enum Type { - NULL_TYPE("VOID", - java.sql.Types.NULL, - TTypeId.NULL_TYPE), - BOOLEAN_TYPE("BOOLEAN", - java.sql.Types.BOOLEAN, - TTypeId.BOOLEAN_TYPE), - TINYINT_TYPE("TINYINT", - java.sql.Types.TINYINT, - TTypeId.TINYINT_TYPE), - SMALLINT_TYPE("SMALLINT", - java.sql.Types.SMALLINT, - TTypeId.SMALLINT_TYPE), - INT_TYPE("INT", - java.sql.Types.INTEGER, - TTypeId.INT_TYPE), - BIGINT_TYPE("BIGINT", - java.sql.Types.BIGINT, - TTypeId.BIGINT_TYPE), - FLOAT_TYPE("FLOAT", - java.sql.Types.FLOAT, - TTypeId.FLOAT_TYPE), - DOUBLE_TYPE("DOUBLE", - java.sql.Types.DOUBLE, - TTypeId.DOUBLE_TYPE), - STRING_TYPE("STRING", - java.sql.Types.VARCHAR, - TTypeId.STRING_TYPE), - CHAR_TYPE("CHAR", - java.sql.Types.CHAR, - TTypeId.CHAR_TYPE, - true, false, false), - VARCHAR_TYPE("VARCHAR", - java.sql.Types.VARCHAR, - TTypeId.VARCHAR_TYPE, - true, false, false), - DATE_TYPE("DATE", - java.sql.Types.DATE, - TTypeId.DATE_TYPE), - TIMESTAMP_TYPE("TIMESTAMP", - java.sql.Types.TIMESTAMP, - TTypeId.TIMESTAMP_TYPE), - INTERVAL_YEAR_MONTH_TYPE("INTERVAL_YEAR_MONTH", - java.sql.Types.OTHER, - TTypeId.INTERVAL_YEAR_MONTH_TYPE), - INTERVAL_DAY_TIME_TYPE("INTERVAL_DAY_TIME", - java.sql.Types.OTHER, - TTypeId.INTERVAL_DAY_TIME_TYPE), - BINARY_TYPE("BINARY", - java.sql.Types.BINARY, - TTypeId.BINARY_TYPE), - DECIMAL_TYPE("DECIMAL", - java.sql.Types.DECIMAL, - TTypeId.DECIMAL_TYPE, - true, false, false), - ARRAY_TYPE("ARRAY", - java.sql.Types.ARRAY, - TTypeId.ARRAY_TYPE, - true, true), - MAP_TYPE("MAP", - java.sql.Types.JAVA_OBJECT, - TTypeId.MAP_TYPE, - true, true), - STRUCT_TYPE("STRUCT", - java.sql.Types.STRUCT, - TTypeId.STRUCT_TYPE, - true, false), - UNION_TYPE("UNIONTYPE", - java.sql.Types.OTHER, - TTypeId.UNION_TYPE, - true, false), - USER_DEFINED_TYPE("USER_DEFINED", - java.sql.Types.OTHER, - TTypeId.USER_DEFINED_TYPE, - true, false); - - private final String name; - private final TTypeId tType; - private final int javaSQLType; - private final boolean isQualified; - private final boolean isComplex; - private final boolean isCollection; - - Type(String name, int javaSQLType, TTypeId tType, boolean isQualified, boolean isComplex, boolean isCollection) { - this.name = name; - this.javaSQLType = javaSQLType; - this.tType = tType; - this.isQualified = isQualified; - this.isComplex = isComplex; - this.isCollection = isCollection; - } - - Type(String name, int javaSQLType, TTypeId tType, boolean isComplex, boolean isCollection) { - this(name, javaSQLType, tType, false, isComplex, isCollection); - } - - Type(String name, int javaSqlType, TTypeId tType) { - this(name, javaSqlType, tType, false, false, false); - } - - public boolean isPrimitiveType() { - return !isComplex; - } - - public boolean isQualifiedType() { - return isQualified; - } - - public boolean isComplexType() { - return isComplex; - } - - public boolean isCollectionType() { - return isCollection; - } - - public static Type getType(TTypeId tType) { - for (Type type : values()) { - if (tType.equals(type.tType)) { - return type; - } - } - throw new IllegalArgumentException("Unregonized Thrift TTypeId value: " + tType); - } - - public static Type getType(String name) { - if (name == null) { - throw new IllegalArgumentException("Invalid type name: null"); - } - for (Type type : values()) { - if (name.equalsIgnoreCase(type.name)) { - return type; - } else if (type.isQualifiedType() || type.isComplexType()) { - if (name.toUpperCase(Locale.ROOT).startsWith(type.name)) { - return type; - } - } - } - throw new IllegalArgumentException("Unrecognized type name: " + name); - } - - /** - * Radix for this type (typically either 2 or 10) - * Null is returned for data types where this is not applicable. - */ - public Integer getNumPrecRadix() { - if (this.isNumericType()) { - return 10; - } - return null; - } - - /** - * Maximum precision for numeric types. - * Returns null for non-numeric types. - * @return - */ - public Integer getMaxPrecision() { - switch (this) { - case TINYINT_TYPE: - return 3; - case SMALLINT_TYPE: - return 5; - case INT_TYPE: - return 10; - case BIGINT_TYPE: - return 19; - case FLOAT_TYPE: - return 7; - case DOUBLE_TYPE: - return 15; - case DECIMAL_TYPE: - return HiveDecimal.MAX_PRECISION; - default: - return null; - } - } - - public boolean isNumericType() { - switch (this) { - case TINYINT_TYPE: - case SMALLINT_TYPE: - case INT_TYPE: - case BIGINT_TYPE: - case FLOAT_TYPE: - case DOUBLE_TYPE: - case DECIMAL_TYPE: - return true; - default: - return false; - } - } - - /** - * Prefix used to quote a literal of this type (may be null) - */ - public String getLiteralPrefix() { - return null; - } - - /** - * Suffix used to quote a literal of this type (may be null) - * @return - */ - public String getLiteralSuffix() { - return null; - } - - /** - * Can you use NULL for this type? - * @return - * DatabaseMetaData.typeNoNulls - does not allow NULL values - * DatabaseMetaData.typeNullable - allows NULL values - * DatabaseMetaData.typeNullableUnknown - nullability unknown - */ - public Short getNullable() { - // All Hive types are nullable - return DatabaseMetaData.typeNullable; - } - - /** - * Is the type case sensitive? - * @return - */ - public Boolean isCaseSensitive() { - switch (this) { - case STRING_TYPE: - return true; - default: - return false; - } - } - - /** - * Parameters used in creating the type (may be null) - * @return - */ - public String getCreateParams() { - return null; - } - - /** - * Can you use WHERE based on this type? - * @return - * DatabaseMetaData.typePredNone - No support - * DatabaseMetaData.typePredChar - Only support with WHERE .. LIKE - * DatabaseMetaData.typePredBasic - Supported except for WHERE .. LIKE - * DatabaseMetaData.typeSearchable - Supported for all WHERE .. - */ - public Short getSearchable() { - if (isPrimitiveType()) { - return DatabaseMetaData.typeSearchable; - } - return DatabaseMetaData.typePredNone; - } - - /** - * Is this type unsigned? - * @return - */ - public Boolean isUnsignedAttribute() { - if (isNumericType()) { - return false; - } - return true; - } - - /** - * Can this type represent money? - * @return - */ - public Boolean isFixedPrecScale() { - return false; - } - - /** - * Can this type be used for an auto-increment value? - * @return - */ - public Boolean isAutoIncrement() { - return false; - } - - /** - * Localized version of type name (may be null). - * @return - */ - public String getLocalizedName() { - return null; - } - - /** - * Minimum scale supported for this type - * @return - */ - public Short getMinimumScale() { - return 0; - } - - /** - * Maximum scale supported for this type - * @return - */ - public Short getMaximumScale() { - return 0; - } - - public TTypeId toTType() { - return tType; - } - - public int toJavaSQLType() { - return javaSQLType; - } - - public String getName() { - return name; - } -} diff --git a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/TypeDescriptor.java b/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/TypeDescriptor.java deleted file mode 100644 index b80fd67884add..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/TypeDescriptor.java +++ /dev/null @@ -1,159 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hive.service.cli; - -import java.util.List; - -import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo; -import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; -import org.apache.hive.service.cli.thrift.TPrimitiveTypeEntry; -import org.apache.hive.service.cli.thrift.TTypeDesc; -import org.apache.hive.service.cli.thrift.TTypeEntry; - -/** - * TypeDescriptor. - * - */ -public class TypeDescriptor { - - private final Type type; - private String typeName = null; - private TypeQualifiers typeQualifiers = null; - - public TypeDescriptor(Type type) { - this.type = type; - } - - public TypeDescriptor(TTypeDesc tTypeDesc) { - List tTypeEntries = tTypeDesc.getTypes(); - TPrimitiveTypeEntry top = tTypeEntries.get(0).getPrimitiveEntry(); - this.type = Type.getType(top.getType()); - if (top.isSetTypeQualifiers()) { - setTypeQualifiers(TypeQualifiers.fromTTypeQualifiers(top.getTypeQualifiers())); - } - } - - public TypeDescriptor(String typeName) { - this.type = Type.getType(typeName); - if (this.type.isComplexType()) { - this.typeName = typeName; - } else if (this.type.isQualifiedType()) { - PrimitiveTypeInfo pti = TypeInfoFactory.getPrimitiveTypeInfo(typeName); - setTypeQualifiers(TypeQualifiers.fromTypeInfo(pti)); - } - } - - public Type getType() { - return type; - } - - public TTypeDesc toTTypeDesc() { - TPrimitiveTypeEntry primitiveEntry = new TPrimitiveTypeEntry(type.toTType()); - if (getTypeQualifiers() != null) { - primitiveEntry.setTypeQualifiers(getTypeQualifiers().toTTypeQualifiers()); - } - TTypeEntry entry = TTypeEntry.primitiveEntry(primitiveEntry); - - TTypeDesc desc = new TTypeDesc(); - desc.addToTypes(entry); - return desc; - } - - public String getTypeName() { - if (typeName != null) { - return typeName; - } else { - return type.getName(); - } - } - - public TypeQualifiers getTypeQualifiers() { - return typeQualifiers; - } - - public void setTypeQualifiers(TypeQualifiers typeQualifiers) { - this.typeQualifiers = typeQualifiers; - } - - /** - * The column size for this type. - * For numeric data this is the maximum precision. - * For character data this is the length in characters. - * For datetime types this is the length in characters of the String representation - * (assuming the maximum allowed precision of the fractional seconds component). - * For binary data this is the length in bytes. - * Null is returned for data types where the column size is not applicable. - */ - public Integer getColumnSize() { - if (type.isNumericType()) { - return getPrecision(); - } - switch (type) { - case STRING_TYPE: - case BINARY_TYPE: - return Integer.MAX_VALUE; - case CHAR_TYPE: - case VARCHAR_TYPE: - return typeQualifiers.getCharacterMaximumLength(); - case DATE_TYPE: - return 10; - case TIMESTAMP_TYPE: - return 29; - default: - return null; - } - } - - /** - * Maximum precision for numeric types. - * Returns null for non-numeric types. - * @return - */ - public Integer getPrecision() { - if (this.type == Type.DECIMAL_TYPE) { - return typeQualifiers.getPrecision(); - } - return this.type.getMaxPrecision(); - } - - /** - * The number of fractional digits for this type. - * Null is returned for data types where this is not applicable. - */ - public Integer getDecimalDigits() { - switch (this.type) { - case BOOLEAN_TYPE: - case TINYINT_TYPE: - case SMALLINT_TYPE: - case INT_TYPE: - case BIGINT_TYPE: - return 0; - case FLOAT_TYPE: - return 7; - case DOUBLE_TYPE: - return 15; - case DECIMAL_TYPE: - return typeQualifiers.getScale(); - case TIMESTAMP_TYPE: - return 9; - default: - return null; - } - } -} diff --git a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/TypeQualifiers.java b/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/TypeQualifiers.java deleted file mode 100644 index c6da52c15a2b5..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/TypeQualifiers.java +++ /dev/null @@ -1,133 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hive.service.cli; - -import java.util.HashMap; -import java.util.Map; - -import org.apache.hadoop.hive.serde2.typeinfo.CharTypeInfo; -import org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo; -import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo; -import org.apache.hadoop.hive.serde2.typeinfo.VarcharTypeInfo; -import org.apache.hive.service.cli.thrift.TCLIServiceConstants; -import org.apache.hive.service.cli.thrift.TTypeQualifierValue; -import org.apache.hive.service.cli.thrift.TTypeQualifiers; - -/** - * This class holds type qualifier information for a primitive type, - * such as char/varchar length or decimal precision/scale. - */ -public class TypeQualifiers { - private Integer characterMaximumLength; - private Integer precision; - private Integer scale; - - public TypeQualifiers() {} - - public Integer getCharacterMaximumLength() { - return characterMaximumLength; - } - public void setCharacterMaximumLength(int characterMaximumLength) { - this.characterMaximumLength = characterMaximumLength; - } - - public TTypeQualifiers toTTypeQualifiers() { - TTypeQualifiers ret = null; - - Map qMap = new HashMap(); - if (getCharacterMaximumLength() != null) { - TTypeQualifierValue val = new TTypeQualifierValue(); - val.setI32Value(getCharacterMaximumLength().intValue()); - qMap.put(TCLIServiceConstants.CHARACTER_MAXIMUM_LENGTH, val); - } - - if (precision != null) { - TTypeQualifierValue val = new TTypeQualifierValue(); - val.setI32Value(precision.intValue()); - qMap.put(TCLIServiceConstants.PRECISION, val); - } - - if (scale != null) { - TTypeQualifierValue val = new TTypeQualifierValue(); - val.setI32Value(scale.intValue()); - qMap.put(TCLIServiceConstants.SCALE, val); - } - - if (qMap.size() > 0) { - ret = new TTypeQualifiers(qMap); - } - - return ret; - } - - public static TypeQualifiers fromTTypeQualifiers(TTypeQualifiers ttq) { - TypeQualifiers ret = null; - if (ttq != null) { - ret = new TypeQualifiers(); - Map tqMap = ttq.getQualifiers(); - - if (tqMap.containsKey(TCLIServiceConstants.CHARACTER_MAXIMUM_LENGTH)) { - ret.setCharacterMaximumLength( - tqMap.get(TCLIServiceConstants.CHARACTER_MAXIMUM_LENGTH).getI32Value()); - } - - if (tqMap.containsKey(TCLIServiceConstants.PRECISION)) { - ret.setPrecision(tqMap.get(TCLIServiceConstants.PRECISION).getI32Value()); - } - - if (tqMap.containsKey(TCLIServiceConstants.SCALE)) { - ret.setScale(tqMap.get(TCLIServiceConstants.SCALE).getI32Value()); - } - } - return ret; - } - - public static TypeQualifiers fromTypeInfo(PrimitiveTypeInfo pti) { - TypeQualifiers result = null; - if (pti instanceof VarcharTypeInfo) { - result = new TypeQualifiers(); - result.setCharacterMaximumLength(((VarcharTypeInfo)pti).getLength()); - } else if (pti instanceof CharTypeInfo) { - result = new TypeQualifiers(); - result.setCharacterMaximumLength(((CharTypeInfo)pti).getLength()); - } else if (pti instanceof DecimalTypeInfo) { - result = new TypeQualifiers(); - result.setPrecision(((DecimalTypeInfo)pti).precision()); - result.setScale(((DecimalTypeInfo)pti).scale()); - } - return result; - } - - public Integer getPrecision() { - return precision; - } - - public void setPrecision(Integer precision) { - this.precision = precision; - } - - public Integer getScale() { - return scale; - } - - public void setScale(Integer scale) { - this.scale = scale; - } - -} diff --git a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/operation/ClassicTableTypeMapping.java b/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/operation/ClassicTableTypeMapping.java deleted file mode 100644 index af36057bdaeca..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/operation/ClassicTableTypeMapping.java +++ /dev/null @@ -1,86 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hive.service.cli.operation; - -import java.util.HashMap; -import java.util.HashSet; -import java.util.Map; -import java.util.Set; - -import org.apache.hadoop.hive.metastore.TableType; - -/** - * ClassicTableTypeMapping. - * Classic table type mapping : - * Managed Table to Table - * External Table to Table - * Virtual View to View - */ -public class ClassicTableTypeMapping implements TableTypeMapping { - - public enum ClassicTableTypes { - TABLE, - VIEW, - } - - private final Map hiveToClientMap = new HashMap(); - private final Map clientToHiveMap = new HashMap(); - - public ClassicTableTypeMapping() { - hiveToClientMap.put(TableType.MANAGED_TABLE.toString(), - ClassicTableTypes.TABLE.toString()); - hiveToClientMap.put(TableType.EXTERNAL_TABLE.toString(), - ClassicTableTypes.TABLE.toString()); - hiveToClientMap.put(TableType.VIRTUAL_VIEW.toString(), - ClassicTableTypes.VIEW.toString()); - - clientToHiveMap.put(ClassicTableTypes.TABLE.toString(), - TableType.MANAGED_TABLE.toString()); - clientToHiveMap.put(ClassicTableTypes.VIEW.toString(), - TableType.VIRTUAL_VIEW.toString()); - } - - @Override - public String mapToHiveType(String clientTypeName) { - if (clientToHiveMap.containsKey(clientTypeName)) { - return clientToHiveMap.get(clientTypeName); - } else { - return clientTypeName; - } - } - - @Override - public String mapToClientType(String hiveTypeName) { - if (hiveToClientMap.containsKey(hiveTypeName)) { - return hiveToClientMap.get(hiveTypeName); - } else { - return hiveTypeName; - } - } - - @Override - public Set getTableTypeNames() { - Set typeNameSet = new HashSet(); - for (ClassicTableTypes typeNames : ClassicTableTypes.values()) { - typeNameSet.add(typeNames.toString()); - } - return typeNameSet; - } - -} diff --git a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/operation/ExecuteStatementOperation.java b/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/operation/ExecuteStatementOperation.java deleted file mode 100644 index 6740d3bb59dc3..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/operation/ExecuteStatementOperation.java +++ /dev/null @@ -1,83 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hive.service.cli.operation; - -import java.sql.SQLException; -import java.util.HashMap; -import java.util.Map; - -import org.apache.hadoop.hive.ql.processors.CommandProcessor; -import org.apache.hadoop.hive.ql.processors.CommandProcessorFactory; -import org.apache.hadoop.hive.ql.session.OperationLog; -import org.apache.hive.service.cli.HiveSQLException; -import org.apache.hive.service.cli.OperationType; -import org.apache.hive.service.cli.session.HiveSession; - -public abstract class ExecuteStatementOperation extends Operation { - protected String statement = null; - protected Map confOverlay = new HashMap(); - - public ExecuteStatementOperation(HiveSession parentSession, String statement, - Map confOverlay, boolean runInBackground) { - super(parentSession, OperationType.EXECUTE_STATEMENT, runInBackground); - this.statement = statement; - setConfOverlay(confOverlay); - } - - public String getStatement() { - return statement; - } - - public static ExecuteStatementOperation newExecuteStatementOperation( - HiveSession parentSession, String statement, Map confOverlay, boolean runAsync) - throws HiveSQLException { - String[] tokens = statement.trim().split("\\s+"); - CommandProcessor processor = null; - try { - processor = CommandProcessorFactory.getForHiveCommand(tokens, parentSession.getHiveConf()); - } catch (SQLException e) { - throw new HiveSQLException(e.getMessage(), e.getSQLState(), e); - } - if (processor == null) { - return new SQLOperation(parentSession, statement, confOverlay, runAsync); - } - return new HiveCommandOperation(parentSession, statement, processor, confOverlay); - } - - protected Map getConfOverlay() { - return confOverlay; - } - - protected void setConfOverlay(Map confOverlay) { - if (confOverlay != null) { - this.confOverlay = confOverlay; - } - } - - protected void registerCurrentOperationLog() { - if (isOperationLogEnabled) { - if (operationLog == null) { - LOG.warn("Failed to get current OperationLog object of Operation: " + - getHandle().getHandleIdentifier()); - isOperationLogEnabled = false; - return; - } - OperationLog.setCurrentOperationLog(operationLog); - } - } -} diff --git a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/operation/GetCatalogsOperation.java b/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/operation/GetCatalogsOperation.java deleted file mode 100644 index 581d975344060..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/operation/GetCatalogsOperation.java +++ /dev/null @@ -1,81 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hive.service.cli.operation; - -import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveOperationType; -import org.apache.hive.service.cli.FetchOrientation; -import org.apache.hive.service.cli.HiveSQLException; -import org.apache.hive.service.cli.OperationState; -import org.apache.hive.service.cli.OperationType; -import org.apache.hive.service.cli.RowSet; -import org.apache.hive.service.cli.RowSetFactory; -import org.apache.hive.service.cli.TableSchema; -import org.apache.hive.service.cli.session.HiveSession; - -/** - * GetCatalogsOperation. - * - */ -public class GetCatalogsOperation extends MetadataOperation { - private static final TableSchema RESULT_SET_SCHEMA = new TableSchema() - .addStringColumn("TABLE_CAT", "Catalog name. NULL if not applicable."); - - protected final RowSet rowSet; - - protected GetCatalogsOperation(HiveSession parentSession) { - super(parentSession, OperationType.GET_CATALOGS); - rowSet = RowSetFactory.create(RESULT_SET_SCHEMA, getProtocolVersion()); - } - - @Override - public void runInternal() throws HiveSQLException { - setState(OperationState.RUNNING); - try { - if (isAuthV2Enabled()) { - authorizeMetaGets(HiveOperationType.GET_CATALOGS, null); - } - setState(OperationState.FINISHED); - } catch (HiveSQLException e) { - setState(OperationState.ERROR); - throw e; - } - - } - - /* (non-Javadoc) - * @see org.apache.hive.service.cli.Operation#getResultSetSchema() - */ - @Override - public TableSchema getResultSetSchema() throws HiveSQLException { - return RESULT_SET_SCHEMA; - } - - /* (non-Javadoc) - * @see org.apache.hive.service.cli.Operation#getNextRowSet(org.apache.hive.service.cli.FetchOrientation, long) - */ - @Override - public RowSet getNextRowSet(FetchOrientation orientation, long maxRows) throws HiveSQLException { - assertState(OperationState.FINISHED); - validateDefaultFetchOrientation(orientation); - if (orientation.equals(FetchOrientation.FETCH_FIRST)) { - rowSet.setStartOffset(0); - } - return rowSet.extractSubset((int)maxRows); - } -} diff --git a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/operation/GetColumnsOperation.java b/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/operation/GetColumnsOperation.java deleted file mode 100644 index 96ba4890075ac..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/operation/GetColumnsOperation.java +++ /dev/null @@ -1,234 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hive.service.cli.operation; - -import java.sql.DatabaseMetaData; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.regex.Pattern; - -import org.apache.hadoop.hive.metastore.IMetaStoreClient; -import org.apache.hadoop.hive.metastore.api.Table; -import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveOperationType; -import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObject; -import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObject.HivePrivilegeObjectType; -import org.apache.hive.service.cli.ColumnDescriptor; -import org.apache.hive.service.cli.FetchOrientation; -import org.apache.hive.service.cli.HiveSQLException; -import org.apache.hive.service.cli.OperationState; -import org.apache.hive.service.cli.OperationType; -import org.apache.hive.service.cli.RowSet; -import org.apache.hive.service.cli.RowSetFactory; -import org.apache.hive.service.cli.TableSchema; -import org.apache.hive.service.cli.Type; -import org.apache.hive.service.cli.session.HiveSession; - -/** - * GetColumnsOperation. - * - */ -public class GetColumnsOperation extends MetadataOperation { - - protected static final TableSchema RESULT_SET_SCHEMA = new TableSchema() - .addPrimitiveColumn("TABLE_CAT", Type.STRING_TYPE, - "Catalog name. NULL if not applicable") - .addPrimitiveColumn("TABLE_SCHEM", Type.STRING_TYPE, - "Schema name") - .addPrimitiveColumn("TABLE_NAME", Type.STRING_TYPE, - "Table name") - .addPrimitiveColumn("COLUMN_NAME", Type.STRING_TYPE, - "Column name") - .addPrimitiveColumn("DATA_TYPE", Type.INT_TYPE, - "SQL type from java.sql.Types") - .addPrimitiveColumn("TYPE_NAME", Type.STRING_TYPE, - "Data source dependent type name, for a UDT the type name is fully qualified") - .addPrimitiveColumn("COLUMN_SIZE", Type.INT_TYPE, - "Column size. For char or date types this is the maximum number of characters," - + " for numeric or decimal types this is precision.") - .addPrimitiveColumn("BUFFER_LENGTH", Type.TINYINT_TYPE, - "Unused") - .addPrimitiveColumn("DECIMAL_DIGITS", Type.INT_TYPE, - "The number of fractional digits") - .addPrimitiveColumn("NUM_PREC_RADIX", Type.INT_TYPE, - "Radix (typically either 10 or 2)") - .addPrimitiveColumn("NULLABLE", Type.INT_TYPE, - "Is NULL allowed") - .addPrimitiveColumn("REMARKS", Type.STRING_TYPE, - "Comment describing column (may be null)") - .addPrimitiveColumn("COLUMN_DEF", Type.STRING_TYPE, - "Default value (may be null)") - .addPrimitiveColumn("SQL_DATA_TYPE", Type.INT_TYPE, - "Unused") - .addPrimitiveColumn("SQL_DATETIME_SUB", Type.INT_TYPE, - "Unused") - .addPrimitiveColumn("CHAR_OCTET_LENGTH", Type.INT_TYPE, - "For char types the maximum number of bytes in the column") - .addPrimitiveColumn("ORDINAL_POSITION", Type.INT_TYPE, - "Index of column in table (starting at 1)") - .addPrimitiveColumn("IS_NULLABLE", Type.STRING_TYPE, - "\"NO\" means column definitely does not allow NULL values; " - + "\"YES\" means the column might allow NULL values. An empty " - + "string means nobody knows.") - .addPrimitiveColumn("SCOPE_CATALOG", Type.STRING_TYPE, - "Catalog of table that is the scope of a reference attribute " - + "(null if DATA_TYPE isn't REF)") - .addPrimitiveColumn("SCOPE_SCHEMA", Type.STRING_TYPE, - "Schema of table that is the scope of a reference attribute " - + "(null if the DATA_TYPE isn't REF)") - .addPrimitiveColumn("SCOPE_TABLE", Type.STRING_TYPE, - "Table name that this the scope of a reference attribure " - + "(null if the DATA_TYPE isn't REF)") - .addPrimitiveColumn("SOURCE_DATA_TYPE", Type.SMALLINT_TYPE, - "Source type of a distinct type or user-generated Ref type, " - + "SQL type from java.sql.Types (null if DATA_TYPE isn't DISTINCT or user-generated REF)") - .addPrimitiveColumn("IS_AUTO_INCREMENT", Type.STRING_TYPE, - "Indicates whether this column is auto incremented."); - - private final String catalogName; - private final String schemaName; - private final String tableName; - private final String columnName; - - protected final RowSet rowSet; - - protected GetColumnsOperation(HiveSession parentSession, String catalogName, String schemaName, - String tableName, String columnName) { - super(parentSession, OperationType.GET_COLUMNS); - this.catalogName = catalogName; - this.schemaName = schemaName; - this.tableName = tableName; - this.columnName = columnName; - this.rowSet = RowSetFactory.create(RESULT_SET_SCHEMA, getProtocolVersion()); - } - - @Override - public void runInternal() throws HiveSQLException { - setState(OperationState.RUNNING); - try { - IMetaStoreClient metastoreClient = getParentSession().getMetaStoreClient(); - String schemaPattern = convertSchemaPattern(schemaName); - String tablePattern = convertIdentifierPattern(tableName, true); - - Pattern columnPattern = null; - if (columnName != null) { - columnPattern = Pattern.compile(convertIdentifierPattern(columnName, false)); - } - - List dbNames = metastoreClient.getDatabases(schemaPattern); - Collections.sort(dbNames); - Map> db2Tabs = new HashMap<>(); - - for (String dbName : dbNames) { - List tableNames = metastoreClient.getTables(dbName, tablePattern); - Collections.sort(tableNames); - db2Tabs.put(dbName, tableNames); - } - - if (isAuthV2Enabled()) { - List privObjs = getPrivObjs(db2Tabs); - String cmdStr = "catalog : " + catalogName + ", schemaPattern : " + schemaName - + ", tablePattern : " + tableName; - authorizeMetaGets(HiveOperationType.GET_COLUMNS, privObjs, cmdStr); - } - - for (Entry> dbTabs : db2Tabs.entrySet()) { - String dbName = dbTabs.getKey(); - List tableNames = dbTabs.getValue(); - for (Table table : metastoreClient.getTableObjectsByName(dbName, tableNames)) { - TableSchema schema = new TableSchema(metastoreClient.getSchema(dbName, table.getTableName())); - for (ColumnDescriptor column : schema.getColumnDescriptors()) { - if (columnPattern != null && !columnPattern.matcher(column.getName()).matches()) { - continue; - } - Object[] rowData = new Object[] { - null, // TABLE_CAT - table.getDbName(), // TABLE_SCHEM - table.getTableName(), // TABLE_NAME - column.getName(), // COLUMN_NAME - column.getType().toJavaSQLType(), // DATA_TYPE - column.getTypeName(), // TYPE_NAME - column.getTypeDescriptor().getColumnSize(), // COLUMN_SIZE - null, // BUFFER_LENGTH, unused - column.getTypeDescriptor().getDecimalDigits(), // DECIMAL_DIGITS - column.getType().getNumPrecRadix(), // NUM_PREC_RADIX - DatabaseMetaData.columnNullable, // NULLABLE - column.getComment(), // REMARKS - null, // COLUMN_DEF - null, // SQL_DATA_TYPE - null, // SQL_DATETIME_SUB - null, // CHAR_OCTET_LENGTH - column.getOrdinalPosition(), // ORDINAL_POSITION - "YES", // IS_NULLABLE - null, // SCOPE_CATALOG - null, // SCOPE_SCHEMA - null, // SCOPE_TABLE - null, // SOURCE_DATA_TYPE - "NO", // IS_AUTO_INCREMENT - }; - rowSet.addRow(rowData); - } - } - } - setState(OperationState.FINISHED); - } catch (Exception e) { - setState(OperationState.ERROR); - throw new HiveSQLException(e); - } - - } - - - private List getPrivObjs(Map> db2Tabs) { - List privObjs = new ArrayList<>(); - for (Entry> dbTabs : db2Tabs.entrySet()) { - for (String tabName : dbTabs.getValue()) { - privObjs.add(new HivePrivilegeObject(HivePrivilegeObjectType.TABLE_OR_VIEW, dbTabs.getKey(), - tabName)); - } - } - return privObjs; - } - - /* (non-Javadoc) - * @see org.apache.hive.service.cli.Operation#getResultSetSchema() - */ - @Override - public TableSchema getResultSetSchema() throws HiveSQLException { - assertState(OperationState.FINISHED); - return RESULT_SET_SCHEMA; - } - - /* (non-Javadoc) - * @see org.apache.hive.service.cli.Operation#getNextRowSet(org.apache.hive.service.cli.FetchOrientation, long) - */ - @Override - public RowSet getNextRowSet(FetchOrientation orientation, long maxRows) throws HiveSQLException { - assertState(OperationState.FINISHED); - validateDefaultFetchOrientation(orientation); - if (orientation.equals(FetchOrientation.FETCH_FIRST)) { - rowSet.setStartOffset(0); - } - return rowSet.extractSubset((int)maxRows); - } - -} diff --git a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/operation/GetFunctionsOperation.java b/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/operation/GetFunctionsOperation.java deleted file mode 100644 index 5dec8bdbf45de..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/operation/GetFunctionsOperation.java +++ /dev/null @@ -1,147 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hive.service.cli.operation; - -import java.sql.DatabaseMetaData; -import java.util.List; -import java.util.Set; - -import org.apache.hadoop.hive.metastore.IMetaStoreClient; -import org.apache.hadoop.hive.ql.exec.FunctionInfo; -import org.apache.hadoop.hive.ql.exec.FunctionRegistry; -import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveOperationType; -import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObject; -import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObjectUtils; -import org.apache.hive.service.cli.CLIServiceUtils; -import org.apache.hive.service.cli.FetchOrientation; -import org.apache.hive.service.cli.HiveSQLException; -import org.apache.hive.service.cli.OperationState; -import org.apache.hive.service.cli.OperationType; -import org.apache.hive.service.cli.RowSet; -import org.apache.hive.service.cli.RowSetFactory; -import org.apache.hive.service.cli.TableSchema; -import org.apache.hive.service.cli.Type; -import org.apache.hive.service.cli.session.HiveSession; -import org.apache.thrift.TException; - -/** - * GetFunctionsOperation. - * - */ -public class GetFunctionsOperation extends MetadataOperation { - private static final TableSchema RESULT_SET_SCHEMA = new TableSchema() - .addPrimitiveColumn("FUNCTION_CAT", Type.STRING_TYPE, - "Function catalog (may be null)") - .addPrimitiveColumn("FUNCTION_SCHEM", Type.STRING_TYPE, - "Function schema (may be null)") - .addPrimitiveColumn("FUNCTION_NAME", Type.STRING_TYPE, - "Function name. This is the name used to invoke the function") - .addPrimitiveColumn("REMARKS", Type.STRING_TYPE, - "Explanatory comment on the function") - .addPrimitiveColumn("FUNCTION_TYPE", Type.INT_TYPE, - "Kind of function.") - .addPrimitiveColumn("SPECIFIC_NAME", Type.STRING_TYPE, - "The name which uniquely identifies this function within its schema"); - - private final String catalogName; - private final String schemaName; - private final String functionName; - - protected final RowSet rowSet; - - public GetFunctionsOperation(HiveSession parentSession, - String catalogName, String schemaName, String functionName) { - super(parentSession, OperationType.GET_FUNCTIONS); - this.catalogName = catalogName; - this.schemaName = schemaName; - this.functionName = functionName; - this.rowSet = RowSetFactory.create(RESULT_SET_SCHEMA, getProtocolVersion()); - } - - @Override - public void runInternal() throws HiveSQLException { - setState(OperationState.RUNNING); - if (isAuthV2Enabled()) { - // get databases for schema pattern - IMetaStoreClient metastoreClient = getParentSession().getMetaStoreClient(); - String schemaPattern = convertSchemaPattern(schemaName); - List matchingDbs; - try { - matchingDbs = metastoreClient.getDatabases(schemaPattern); - } catch (TException e) { - setState(OperationState.ERROR); - throw new HiveSQLException(e); - } - // authorize this call on the schema objects - List privObjs = HivePrivilegeObjectUtils - .getHivePrivDbObjects(matchingDbs); - String cmdStr = "catalog : " + catalogName + ", schemaPattern : " + schemaName; - authorizeMetaGets(HiveOperationType.GET_FUNCTIONS, privObjs, cmdStr); - } - - try { - if ((null == catalogName || "".equals(catalogName)) - && (null == schemaName || "".equals(schemaName))) { - Set functionNames = FunctionRegistry - .getFunctionNames(CLIServiceUtils.patternToRegex(functionName)); - for (String functionName : functionNames) { - FunctionInfo functionInfo = FunctionRegistry.getFunctionInfo(functionName); - Object[] rowData = new Object[] { - null, // FUNCTION_CAT - null, // FUNCTION_SCHEM - functionInfo.getDisplayName(), // FUNCTION_NAME - "", // REMARKS - (functionInfo.isGenericUDTF() ? - DatabaseMetaData.functionReturnsTable - : DatabaseMetaData.functionNoTable), // FUNCTION_TYPE - functionInfo.getClass().getCanonicalName() - }; - rowSet.addRow(rowData); - } - } - setState(OperationState.FINISHED); - } catch (Exception e) { - setState(OperationState.ERROR); - throw new HiveSQLException(e); - } - } - - - /* (non-Javadoc) - * @see org.apache.hive.service.cli.Operation#getResultSetSchema() - */ - @Override - public TableSchema getResultSetSchema() throws HiveSQLException { - assertState(OperationState.FINISHED); - return RESULT_SET_SCHEMA; - } - - /* (non-Javadoc) - * @see org.apache.hive.service.cli.Operation#getNextRowSet(org.apache.hive.service.cli.FetchOrientation, long) - */ - @Override - public RowSet getNextRowSet(FetchOrientation orientation, long maxRows) throws HiveSQLException { - assertState(OperationState.FINISHED); - validateDefaultFetchOrientation(orientation); - if (orientation.equals(FetchOrientation.FETCH_FIRST)) { - rowSet.setStartOffset(0); - } - return rowSet.extractSubset((int)maxRows); - } -} diff --git a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/operation/GetSchemasOperation.java b/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/operation/GetSchemasOperation.java deleted file mode 100644 index 3516bc2ba242c..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/operation/GetSchemasOperation.java +++ /dev/null @@ -1,96 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hive.service.cli.operation; - -import org.apache.hadoop.hive.metastore.IMetaStoreClient; -import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveOperationType; -import org.apache.hive.service.cli.FetchOrientation; -import org.apache.hive.service.cli.HiveSQLException; -import org.apache.hive.service.cli.OperationState; -import org.apache.hive.service.cli.OperationType; -import org.apache.hive.service.cli.RowSet; -import org.apache.hive.service.cli.RowSetFactory; -import org.apache.hive.service.cli.TableSchema; -import org.apache.hive.service.cli.session.HiveSession; - -/** - * GetSchemasOperation. - * - */ -public class GetSchemasOperation extends MetadataOperation { - private final String catalogName; - private final String schemaName; - - private static final TableSchema RESULT_SET_SCHEMA = new TableSchema() - .addStringColumn("TABLE_SCHEM", "Schema name.") - .addStringColumn("TABLE_CATALOG", "Catalog name."); - - protected RowSet rowSet; - - protected GetSchemasOperation(HiveSession parentSession, - String catalogName, String schemaName) { - super(parentSession, OperationType.GET_SCHEMAS); - this.catalogName = catalogName; - this.schemaName = schemaName; - this.rowSet = RowSetFactory.create(RESULT_SET_SCHEMA, getProtocolVersion()); - } - - @Override - public void runInternal() throws HiveSQLException { - setState(OperationState.RUNNING); - if (isAuthV2Enabled()) { - String cmdStr = "catalog : " + catalogName + ", schemaPattern : " + schemaName; - authorizeMetaGets(HiveOperationType.GET_SCHEMAS, null, cmdStr); - } - try { - IMetaStoreClient metastoreClient = getParentSession().getMetaStoreClient(); - String schemaPattern = convertSchemaPattern(schemaName); - for (String dbName : metastoreClient.getDatabases(schemaPattern)) { - rowSet.addRow(new Object[] {dbName, DEFAULT_HIVE_CATALOG}); - } - setState(OperationState.FINISHED); - } catch (Exception e) { - setState(OperationState.ERROR); - throw new HiveSQLException(e); - } - } - - - /* (non-Javadoc) - * @see org.apache.hive.service.cli.Operation#getResultSetSchema() - */ - @Override - public TableSchema getResultSetSchema() throws HiveSQLException { - assertState(OperationState.FINISHED); - return RESULT_SET_SCHEMA; - } - - /* (non-Javadoc) - * @see org.apache.hive.service.cli.Operation#getNextRowSet(org.apache.hive.service.cli.FetchOrientation, long) - */ - @Override - public RowSet getNextRowSet(FetchOrientation orientation, long maxRows) throws HiveSQLException { - assertState(OperationState.FINISHED); - validateDefaultFetchOrientation(orientation); - if (orientation.equals(FetchOrientation.FETCH_FIRST)) { - rowSet.setStartOffset(0); - } - return rowSet.extractSubset((int)maxRows); - } -} diff --git a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/operation/GetTableTypesOperation.java b/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/operation/GetTableTypesOperation.java deleted file mode 100644 index b372f55cedd1c..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/operation/GetTableTypesOperation.java +++ /dev/null @@ -1,93 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hive.service.cli.operation; - -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.TableType; -import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveOperationType; -import org.apache.hive.service.cli.FetchOrientation; -import org.apache.hive.service.cli.HiveSQLException; -import org.apache.hive.service.cli.OperationState; -import org.apache.hive.service.cli.OperationType; -import org.apache.hive.service.cli.RowSet; -import org.apache.hive.service.cli.RowSetFactory; -import org.apache.hive.service.cli.TableSchema; -import org.apache.hive.service.cli.session.HiveSession; - -/** - * GetTableTypesOperation. - * - */ -public class GetTableTypesOperation extends MetadataOperation { - - protected static TableSchema RESULT_SET_SCHEMA = new TableSchema() - .addStringColumn("TABLE_TYPE", "Table type name."); - - protected final RowSet rowSet; - private final TableTypeMapping tableTypeMapping; - - protected GetTableTypesOperation(HiveSession parentSession) { - super(parentSession, OperationType.GET_TABLE_TYPES); - String tableMappingStr = getParentSession().getHiveConf() - .getVar(HiveConf.ConfVars.HIVE_SERVER2_TABLE_TYPE_MAPPING); - tableTypeMapping = - TableTypeMappingFactory.getTableTypeMapping(tableMappingStr); - rowSet = RowSetFactory.create(RESULT_SET_SCHEMA, getProtocolVersion()); - } - - @Override - public void runInternal() throws HiveSQLException { - setState(OperationState.RUNNING); - if (isAuthV2Enabled()) { - authorizeMetaGets(HiveOperationType.GET_TABLETYPES, null); - } - try { - for (TableType type : TableType.values()) { - rowSet.addRow(new String[] {tableTypeMapping.mapToClientType(type.toString())}); - } - setState(OperationState.FINISHED); - } catch (Exception e) { - setState(OperationState.ERROR); - throw new HiveSQLException(e); - } - } - - /* (non-Javadoc) - * @see org.apache.hive.service.cli.Operation#getResultSetSchema() - */ - @Override - public TableSchema getResultSetSchema() throws HiveSQLException { - assertState(OperationState.FINISHED); - return RESULT_SET_SCHEMA; - } - - /* (non-Javadoc) - * @see org.apache.hive.service.cli.Operation#getNextRowSet(org.apache.hive.service.cli.FetchOrientation, long) - */ - @Override - public RowSet getNextRowSet(FetchOrientation orientation, long maxRows) throws HiveSQLException { - assertState(OperationState.FINISHED); - validateDefaultFetchOrientation(orientation); - if (orientation.equals(FetchOrientation.FETCH_FIRST)) { - rowSet.setStartOffset(0); - } - return rowSet.extractSubset((int)maxRows); - } - -} diff --git a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/operation/GetTablesOperation.java b/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/operation/GetTablesOperation.java deleted file mode 100644 index 2af17a662a296..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/operation/GetTablesOperation.java +++ /dev/null @@ -1,135 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hive.service.cli.operation; - -import java.util.ArrayList; -import java.util.List; - -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.IMetaStoreClient; -import org.apache.hadoop.hive.metastore.api.Table; -import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveOperationType; -import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObject; -import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObjectUtils; -import org.apache.hive.service.cli.FetchOrientation; -import org.apache.hive.service.cli.HiveSQLException; -import org.apache.hive.service.cli.OperationState; -import org.apache.hive.service.cli.OperationType; -import org.apache.hive.service.cli.RowSet; -import org.apache.hive.service.cli.RowSetFactory; -import org.apache.hive.service.cli.TableSchema; -import org.apache.hive.service.cli.session.HiveSession; - -/** - * GetTablesOperation. - * - */ -public class GetTablesOperation extends MetadataOperation { - - private final String catalogName; - private final String schemaName; - private final String tableName; - private final List tableTypes = new ArrayList(); - protected final RowSet rowSet; - private final TableTypeMapping tableTypeMapping; - - - private static final TableSchema RESULT_SET_SCHEMA = new TableSchema() - .addStringColumn("TABLE_CAT", "Catalog name. NULL if not applicable.") - .addStringColumn("TABLE_SCHEM", "Schema name.") - .addStringColumn("TABLE_NAME", "Table name.") - .addStringColumn("TABLE_TYPE", "The table type, e.g. \"TABLE\", \"VIEW\", etc.") - .addStringColumn("REMARKS", "Comments about the table."); - - protected GetTablesOperation(HiveSession parentSession, - String catalogName, String schemaName, String tableName, - List tableTypes) { - super(parentSession, OperationType.GET_TABLES); - this.catalogName = catalogName; - this.schemaName = schemaName; - this.tableName = tableName; - String tableMappingStr = getParentSession().getHiveConf() - .getVar(HiveConf.ConfVars.HIVE_SERVER2_TABLE_TYPE_MAPPING); - tableTypeMapping = - TableTypeMappingFactory.getTableTypeMapping(tableMappingStr); - if (tableTypes != null) { - this.tableTypes.addAll(tableTypes); - } - this.rowSet = RowSetFactory.create(RESULT_SET_SCHEMA, getProtocolVersion()); - } - - @Override - public void runInternal() throws HiveSQLException { - setState(OperationState.RUNNING); - try { - IMetaStoreClient metastoreClient = getParentSession().getMetaStoreClient(); - String schemaPattern = convertSchemaPattern(schemaName); - List matchingDbs = metastoreClient.getDatabases(schemaPattern); - if(isAuthV2Enabled()){ - List privObjs = HivePrivilegeObjectUtils.getHivePrivDbObjects(matchingDbs); - String cmdStr = "catalog : " + catalogName + ", schemaPattern : " + schemaName; - authorizeMetaGets(HiveOperationType.GET_TABLES, privObjs, cmdStr); - } - - String tablePattern = convertIdentifierPattern(tableName, true); - for (String dbName : metastoreClient.getDatabases(schemaPattern)) { - List tableNames = metastoreClient.getTables(dbName, tablePattern); - for (Table table : metastoreClient.getTableObjectsByName(dbName, tableNames)) { - Object[] rowData = new Object[] { - DEFAULT_HIVE_CATALOG, - table.getDbName(), - table.getTableName(), - tableTypeMapping.mapToClientType(table.getTableType()), - table.getParameters().get("comment") - }; - if (tableTypes.isEmpty() || tableTypes.contains( - tableTypeMapping.mapToClientType(table.getTableType()))) { - rowSet.addRow(rowData); - } - } - } - setState(OperationState.FINISHED); - } catch (Exception e) { - setState(OperationState.ERROR); - throw new HiveSQLException(e); - } - } - - /* (non-Javadoc) - * @see org.apache.hive.service.cli.Operation#getResultSetSchema() - */ - @Override - public TableSchema getResultSetSchema() throws HiveSQLException { - assertState(OperationState.FINISHED); - return RESULT_SET_SCHEMA; - } - - /* (non-Javadoc) - * @see org.apache.hive.service.cli.Operation#getNextRowSet(org.apache.hive.service.cli.FetchOrientation, long) - */ - @Override - public RowSet getNextRowSet(FetchOrientation orientation, long maxRows) throws HiveSQLException { - assertState(OperationState.FINISHED); - validateDefaultFetchOrientation(orientation); - if (orientation.equals(FetchOrientation.FETCH_FIRST)) { - rowSet.setStartOffset(0); - } - return rowSet.extractSubset((int)maxRows); - } -} diff --git a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/operation/GetTypeInfoOperation.java b/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/operation/GetTypeInfoOperation.java deleted file mode 100644 index 3e81f8afbd85f..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/operation/GetTypeInfoOperation.java +++ /dev/null @@ -1,142 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hive.service.cli.operation; - -import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveOperationType; -import org.apache.hive.service.cli.FetchOrientation; -import org.apache.hive.service.cli.HiveSQLException; -import org.apache.hive.service.cli.OperationState; -import org.apache.hive.service.cli.OperationType; -import org.apache.hive.service.cli.RowSet; -import org.apache.hive.service.cli.RowSetFactory; -import org.apache.hive.service.cli.TableSchema; -import org.apache.hive.service.cli.Type; -import org.apache.hive.service.cli.session.HiveSession; - -/** - * GetTypeInfoOperation. - * - */ -public class GetTypeInfoOperation extends MetadataOperation { - - private static final TableSchema RESULT_SET_SCHEMA = new TableSchema() - .addPrimitiveColumn("TYPE_NAME", Type.STRING_TYPE, - "Type name") - .addPrimitiveColumn("DATA_TYPE", Type.INT_TYPE, - "SQL data type from java.sql.Types") - .addPrimitiveColumn("PRECISION", Type.INT_TYPE, - "Maximum precision") - .addPrimitiveColumn("LITERAL_PREFIX", Type.STRING_TYPE, - "Prefix used to quote a literal (may be null)") - .addPrimitiveColumn("LITERAL_SUFFIX", Type.STRING_TYPE, - "Suffix used to quote a literal (may be null)") - .addPrimitiveColumn("CREATE_PARAMS", Type.STRING_TYPE, - "Parameters used in creating the type (may be null)") - .addPrimitiveColumn("NULLABLE", Type.SMALLINT_TYPE, - "Can you use NULL for this type") - .addPrimitiveColumn("CASE_SENSITIVE", Type.BOOLEAN_TYPE, - "Is it case sensitive") - .addPrimitiveColumn("SEARCHABLE", Type.SMALLINT_TYPE, - "Can you use \"WHERE\" based on this type") - .addPrimitiveColumn("UNSIGNED_ATTRIBUTE", Type.BOOLEAN_TYPE, - "Is it unsigned") - .addPrimitiveColumn("FIXED_PREC_SCALE", Type.BOOLEAN_TYPE, - "Can it be a money value") - .addPrimitiveColumn("AUTO_INCREMENT", Type.BOOLEAN_TYPE, - "Can it be used for an auto-increment value") - .addPrimitiveColumn("LOCAL_TYPE_NAME", Type.STRING_TYPE, - "Localized version of type name (may be null)") - .addPrimitiveColumn("MINIMUM_SCALE", Type.SMALLINT_TYPE, - "Minimum scale supported") - .addPrimitiveColumn("MAXIMUM_SCALE", Type.SMALLINT_TYPE, - "Maximum scale supported") - .addPrimitiveColumn("SQL_DATA_TYPE", Type.INT_TYPE, - "Unused") - .addPrimitiveColumn("SQL_DATETIME_SUB", Type.INT_TYPE, - "Unused") - .addPrimitiveColumn("NUM_PREC_RADIX", Type.INT_TYPE, - "Usually 2 or 10"); - - protected final RowSet rowSet; - - protected GetTypeInfoOperation(HiveSession parentSession) { - super(parentSession, OperationType.GET_TYPE_INFO); - rowSet = RowSetFactory.create(RESULT_SET_SCHEMA, getProtocolVersion()); - } - - @Override - public void runInternal() throws HiveSQLException { - setState(OperationState.RUNNING); - if (isAuthV2Enabled()) { - authorizeMetaGets(HiveOperationType.GET_TYPEINFO, null); - } - try { - for (Type type : Type.values()) { - Object[] rowData = new Object[] { - type.getName(), // TYPE_NAME - type.toJavaSQLType(), // DATA_TYPE - type.getMaxPrecision(), // PRECISION - type.getLiteralPrefix(), // LITERAL_PREFIX - type.getLiteralSuffix(), // LITERAL_SUFFIX - type.getCreateParams(), // CREATE_PARAMS - type.getNullable(), // NULLABLE - type.isCaseSensitive(), // CASE_SENSITIVE - type.getSearchable(), // SEARCHABLE - type.isUnsignedAttribute(), // UNSIGNED_ATTRIBUTE - type.isFixedPrecScale(), // FIXED_PREC_SCALE - type.isAutoIncrement(), // AUTO_INCREMENT - type.getLocalizedName(), // LOCAL_TYPE_NAME - type.getMinimumScale(), // MINIMUM_SCALE - type.getMaximumScale(), // MAXIMUM_SCALE - null, // SQL_DATA_TYPE, unused - null, // SQL_DATETIME_SUB, unused - type.getNumPrecRadix() //NUM_PREC_RADIX - }; - rowSet.addRow(rowData); - } - setState(OperationState.FINISHED); - } catch (Exception e) { - setState(OperationState.ERROR); - throw new HiveSQLException(e); - } - } - - - /* (non-Javadoc) - * @see org.apache.hive.service.cli.Operation#getResultSetSchema() - */ - @Override - public TableSchema getResultSetSchema() throws HiveSQLException { - assertState(OperationState.FINISHED); - return RESULT_SET_SCHEMA; - } - - /* (non-Javadoc) - * @see org.apache.hive.service.cli.Operation#getNextRowSet(org.apache.hive.service.cli.FetchOrientation, long) - */ - @Override - public RowSet getNextRowSet(FetchOrientation orientation, long maxRows) throws HiveSQLException { - assertState(OperationState.FINISHED); - validateDefaultFetchOrientation(orientation); - if (orientation.equals(FetchOrientation.FETCH_FIRST)) { - rowSet.setStartOffset(0); - } - return rowSet.extractSubset((int)maxRows); - } -} diff --git a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/operation/HiveCommandOperation.java b/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/operation/HiveCommandOperation.java deleted file mode 100644 index 5b6e6ad042412..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/operation/HiveCommandOperation.java +++ /dev/null @@ -1,215 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hive.service.cli.operation; - -import java.io.BufferedReader; -import java.io.File; -import java.io.FileNotFoundException; -import java.io.FileOutputStream; -import java.io.FileReader; -import java.io.IOException; -import java.io.PrintStream; -import java.io.UnsupportedEncodingException; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; - -import static java.nio.charset.StandardCharsets.UTF_8; - -import org.apache.hadoop.hive.metastore.api.Schema; -import org.apache.hadoop.hive.ql.processors.CommandProcessor; -import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; -import org.apache.hadoop.hive.ql.session.SessionState; -import org.apache.hadoop.io.IOUtils; -import org.apache.hive.service.cli.FetchOrientation; -import org.apache.hive.service.cli.HiveSQLException; -import org.apache.hive.service.cli.OperationState; -import org.apache.hive.service.cli.RowSet; -import org.apache.hive.service.cli.RowSetFactory; -import org.apache.hive.service.cli.TableSchema; -import org.apache.hive.service.cli.session.HiveSession; - -/** - * Executes a HiveCommand - */ -public class HiveCommandOperation extends ExecuteStatementOperation { - private CommandProcessor commandProcessor; - private TableSchema resultSchema = null; - - /** - * For processors other than Hive queries (Driver), they output to session.out (a temp file) - * first and the fetchOne/fetchN/fetchAll functions get the output from pipeIn. - */ - private BufferedReader resultReader; - - - protected HiveCommandOperation(HiveSession parentSession, String statement, - CommandProcessor commandProcessor, Map confOverlay) { - super(parentSession, statement, confOverlay, false); - this.commandProcessor = commandProcessor; - setupSessionIO(parentSession.getSessionState()); - } - - private void setupSessionIO(SessionState sessionState) { - try { - LOG.info("Putting temp output to file " + sessionState.getTmpOutputFile().toString()); - sessionState.in = null; // hive server's session input stream is not used - // open a per-session file in auto-flush mode for writing temp results - sessionState.out = new PrintStream(new FileOutputStream(sessionState.getTmpOutputFile()), true, UTF_8.name()); - // TODO: for hadoop jobs, progress is printed out to session.err, - // we should find a way to feed back job progress to client - sessionState.err = new PrintStream(System.err, true, UTF_8.name()); - } catch (IOException e) { - LOG.error("Error in creating temp output file ", e); - try { - sessionState.in = null; - sessionState.out = new PrintStream(System.out, true, UTF_8.name()); - sessionState.err = new PrintStream(System.err, true, UTF_8.name()); - } catch (UnsupportedEncodingException ee) { - LOG.error("Error creating PrintStream", e); - ee.printStackTrace(); - sessionState.out = null; - sessionState.err = null; - } - } - } - - - private void tearDownSessionIO() { - IOUtils.cleanup(LOG, parentSession.getSessionState().out); - IOUtils.cleanup(LOG, parentSession.getSessionState().err); - } - - @Override - public void runInternal() throws HiveSQLException { - setState(OperationState.RUNNING); - try { - String command = getStatement().trim(); - String[] tokens = statement.split("\\s"); - String commandArgs = command.substring(tokens[0].length()).trim(); - - CommandProcessorResponse response = commandProcessor.run(commandArgs); - int returnCode = response.getResponseCode(); - if (returnCode != 0) { - throw toSQLException("Error while processing statement", response); - } - Schema schema = response.getSchema(); - if (schema != null) { - setHasResultSet(true); - resultSchema = new TableSchema(schema); - } else { - setHasResultSet(false); - resultSchema = new TableSchema(); - } - } catch (HiveSQLException e) { - setState(OperationState.ERROR); - throw e; - } catch (Exception e) { - setState(OperationState.ERROR); - throw new HiveSQLException("Error running query: " + e.toString(), e); - } - setState(OperationState.FINISHED); - } - - /* (non-Javadoc) - * @see org.apache.hive.service.cli.operation.Operation#close() - */ - @Override - public void close() throws HiveSQLException { - setState(OperationState.CLOSED); - tearDownSessionIO(); - cleanTmpFile(); - cleanupOperationLog(); - } - - /* (non-Javadoc) - * @see org.apache.hive.service.cli.operation.Operation#getResultSetSchema() - */ - @Override - public TableSchema getResultSetSchema() throws HiveSQLException { - return resultSchema; - } - - /* (non-Javadoc) - * @see org.apache.hive.service.cli.operation.Operation#getNextRowSet(org.apache.hive.service.cli.FetchOrientation, long) - */ - @Override - public RowSet getNextRowSet(FetchOrientation orientation, long maxRows) throws HiveSQLException { - validateDefaultFetchOrientation(orientation); - if (orientation.equals(FetchOrientation.FETCH_FIRST)) { - resetResultReader(); - } - List rows = readResults((int) maxRows); - RowSet rowSet = RowSetFactory.create(resultSchema, getProtocolVersion()); - - for (String row : rows) { - rowSet.addRow(new String[] {row}); - } - return rowSet; - } - - /** - * Reads the temporary results for non-Hive (non-Driver) commands to the - * resulting List of strings. - * @param nLines number of lines read at once. If it is <= 0, then read all lines. - */ - private List readResults(int nLines) throws HiveSQLException { - if (resultReader == null) { - SessionState sessionState = getParentSession().getSessionState(); - File tmp = sessionState.getTmpOutputFile(); - try { - resultReader = new BufferedReader(new FileReader(tmp)); - } catch (FileNotFoundException e) { - LOG.error("File " + tmp + " not found. ", e); - throw new HiveSQLException(e); - } - } - List results = new ArrayList(); - - for (int i = 0; i < nLines || nLines <= 0; ++i) { - try { - String line = resultReader.readLine(); - if (line == null) { - // reached the end of the result file - break; - } else { - results.add(line); - } - } catch (IOException e) { - LOG.error("Reading temp results encountered an exception: ", e); - throw new HiveSQLException(e); - } - } - return results; - } - - private void cleanTmpFile() { - resetResultReader(); - SessionState sessionState = getParentSession().getSessionState(); - File tmp = sessionState.getTmpOutputFile(); - tmp.delete(); - } - - private void resetResultReader() { - if (resultReader != null) { - IOUtils.cleanup(LOG, resultReader); - resultReader = null; - } - } -} diff --git a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/operation/HiveTableTypeMapping.java b/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/operation/HiveTableTypeMapping.java deleted file mode 100644 index b530f217125b8..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/operation/HiveTableTypeMapping.java +++ /dev/null @@ -1,51 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hive.service.cli.operation; - -import java.util.HashSet; -import java.util.Set; - -import org.apache.hadoop.hive.metastore.TableType; - -/** - * HiveTableTypeMapping. - * Default table type mapping - * - */ -public class HiveTableTypeMapping implements TableTypeMapping { - - @Override - public String mapToHiveType(String clientTypeName) { - return clientTypeName; - } - - @Override - public String mapToClientType(String hiveTypeName) { - return hiveTypeName; - } - - @Override - public Set getTableTypeNames() { - Set typeNameSet = new HashSet(); - for (TableType typeNames : TableType.values()) { - typeNameSet.add(typeNames.toString()); - } - return typeNameSet; - } -} diff --git a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/operation/MetadataOperation.java b/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/operation/MetadataOperation.java deleted file mode 100644 index 6c819876a556d..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/operation/MetadataOperation.java +++ /dev/null @@ -1,134 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hive.service.cli.operation; - -import java.util.List; - -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAccessControlException; -import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzContext; -import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzPluginException; -import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveOperationType; -import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObject; -import org.apache.hadoop.hive.ql.session.SessionState; -import org.apache.hive.service.cli.HiveSQLException; -import org.apache.hive.service.cli.OperationState; -import org.apache.hive.service.cli.OperationType; -import org.apache.hive.service.cli.TableSchema; -import org.apache.hive.service.cli.session.HiveSession; - -/** - * MetadataOperation. - * - */ -public abstract class MetadataOperation extends Operation { - - protected static final String DEFAULT_HIVE_CATALOG = ""; - protected static TableSchema RESULT_SET_SCHEMA; - private static final char SEARCH_STRING_ESCAPE = '\\'; - - protected MetadataOperation(HiveSession parentSession, OperationType opType) { - super(parentSession, opType, false); - setHasResultSet(true); - } - - - /* (non-Javadoc) - * @see org.apache.hive.service.cli.Operation#close() - */ - @Override - public void close() throws HiveSQLException { - setState(OperationState.CLOSED); - cleanupOperationLog(); - } - - /** - * Convert wildchars and escape sequence from JDBC format to datanucleous/regex - */ - protected String convertIdentifierPattern(final String pattern, boolean datanucleusFormat) { - if (pattern == null) { - return convertPattern("%", true); - } else { - return convertPattern(pattern, datanucleusFormat); - } - } - - /** - * Convert wildchars and escape sequence of schema pattern from JDBC format to datanucleous/regex - * The schema pattern treats empty string also as wildchar - */ - protected String convertSchemaPattern(final String pattern) { - if ((pattern == null) || pattern.isEmpty()) { - return convertPattern("%", true); - } else { - return convertPattern(pattern, true); - } - } - - /** - * Convert a pattern containing JDBC catalog search wildcards into - * Java regex patterns. - * - * @param pattern input which may contain '%' or '_' wildcard characters, or - * these characters escaped using {@link #getSearchStringEscape()}. - * @return replace %/_ with regex search characters, also handle escaped - * characters. - * - * The datanucleus module expects the wildchar as '*'. The columns search on the - * other hand is done locally inside the hive code and that requires the regex wildchar - * format '.*' This is driven by the datanucleusFormat flag. - */ - private String convertPattern(final String pattern, boolean datanucleusFormat) { - String wStr; - if (datanucleusFormat) { - wStr = "*"; - } else { - wStr = ".*"; - } - return pattern - .replaceAll("([^\\\\])%", "$1" + wStr).replaceAll("\\\\%", "%").replaceAll("^%", wStr) - .replaceAll("([^\\\\])_", "$1.").replaceAll("\\\\_", "_").replaceAll("^_", "."); - } - - protected boolean isAuthV2Enabled(){ - SessionState ss = SessionState.get(); - return (ss.isAuthorizationModeV2() && - HiveConf.getBoolVar(ss.getConf(), HiveConf.ConfVars.HIVE_AUTHORIZATION_ENABLED)); - } - - protected void authorizeMetaGets(HiveOperationType opType, List inpObjs) - throws HiveSQLException { - authorizeMetaGets(opType, inpObjs, null); - } - - protected void authorizeMetaGets(HiveOperationType opType, List inpObjs, - String cmdString) throws HiveSQLException { - SessionState ss = SessionState.get(); - HiveAuthzContext.Builder ctxBuilder = new HiveAuthzContext.Builder(); - ctxBuilder.setUserIpAddress(ss.getUserIpAddress()); - ctxBuilder.setCommandString(cmdString); - try { - ss.getAuthorizerV2().checkPrivileges(opType, inpObjs, null, - ctxBuilder.build()); - } catch (HiveAuthzPluginException | HiveAccessControlException e) { - throw new HiveSQLException(e.getMessage(), e); - } - } - -} diff --git a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/operation/Operation.java b/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/operation/Operation.java deleted file mode 100644 index 4b331423948fa..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/operation/Operation.java +++ /dev/null @@ -1,328 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hive.service.cli.operation; - -import java.io.File; -import java.io.FileNotFoundException; -import java.util.EnumSet; -import java.util.concurrent.Future; -import java.util.concurrent.TimeUnit; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; -import org.apache.hadoop.hive.ql.session.OperationLog; -import org.apache.hive.service.cli.FetchOrientation; -import org.apache.hive.service.cli.HiveSQLException; -import org.apache.hive.service.cli.OperationHandle; -import org.apache.hive.service.cli.OperationState; -import org.apache.hive.service.cli.OperationStatus; -import org.apache.hive.service.cli.OperationType; -import org.apache.hive.service.cli.RowSet; -import org.apache.hive.service.cli.TableSchema; -import org.apache.hive.service.cli.session.HiveSession; -import org.apache.hive.service.cli.thrift.TProtocolVersion; - -public abstract class Operation { - protected final HiveSession parentSession; - private OperationState state = OperationState.INITIALIZED; - private final OperationHandle opHandle; - private HiveConf configuration; - public static final Log LOG = LogFactory.getLog(Operation.class.getName()); - public static final FetchOrientation DEFAULT_FETCH_ORIENTATION = FetchOrientation.FETCH_NEXT; - public static final long DEFAULT_FETCH_MAX_ROWS = 100; - protected boolean hasResultSet; - protected volatile HiveSQLException operationException; - protected final boolean runAsync; - protected volatile Future backgroundHandle; - protected OperationLog operationLog; - protected boolean isOperationLogEnabled; - - private long operationTimeout; - private long lastAccessTime; - - protected static final EnumSet DEFAULT_FETCH_ORIENTATION_SET = - EnumSet.of( - FetchOrientation.FETCH_NEXT, - FetchOrientation.FETCH_FIRST, - FetchOrientation.FETCH_PRIOR); - - protected Operation(HiveSession parentSession, OperationType opType, boolean runInBackground) { - this.parentSession = parentSession; - this.runAsync = runInBackground; - this.opHandle = new OperationHandle(opType, parentSession.getProtocolVersion()); - lastAccessTime = System.currentTimeMillis(); - operationTimeout = HiveConf.getTimeVar(parentSession.getHiveConf(), - HiveConf.ConfVars.HIVE_SERVER2_IDLE_OPERATION_TIMEOUT, TimeUnit.MILLISECONDS); - } - - public Future getBackgroundHandle() { - return backgroundHandle; - } - - protected void setBackgroundHandle(Future backgroundHandle) { - this.backgroundHandle = backgroundHandle; - } - - public boolean shouldRunAsync() { - return runAsync; - } - - public void setConfiguration(HiveConf configuration) { - this.configuration = new HiveConf(configuration); - } - - public HiveConf getConfiguration() { - return new HiveConf(configuration); - } - - public HiveSession getParentSession() { - return parentSession; - } - - public OperationHandle getHandle() { - return opHandle; - } - - public TProtocolVersion getProtocolVersion() { - return opHandle.getProtocolVersion(); - } - - public OperationType getType() { - return opHandle.getOperationType(); - } - - public OperationStatus getStatus() { - return new OperationStatus(state, operationException); - } - - public boolean hasResultSet() { - return hasResultSet; - } - - protected void setHasResultSet(boolean hasResultSet) { - this.hasResultSet = hasResultSet; - opHandle.setHasResultSet(hasResultSet); - } - - public OperationLog getOperationLog() { - return operationLog; - } - - protected final OperationState setState(OperationState newState) throws HiveSQLException { - state.validateTransition(newState); - this.state = newState; - this.lastAccessTime = System.currentTimeMillis(); - return this.state; - } - - public boolean isTimedOut(long current) { - if (operationTimeout == 0) { - return false; - } - if (operationTimeout > 0) { - // check only when it's in terminal state - return state.isTerminal() && lastAccessTime + operationTimeout <= current; - } - return lastAccessTime + -operationTimeout <= current; - } - - public long getLastAccessTime() { - return lastAccessTime; - } - - public long getOperationTimeout() { - return operationTimeout; - } - - public void setOperationTimeout(long operationTimeout) { - this.operationTimeout = operationTimeout; - } - - protected void setOperationException(HiveSQLException operationException) { - this.operationException = operationException; - } - - protected final void assertState(OperationState state) throws HiveSQLException { - if (this.state != state) { - throw new HiveSQLException("Expected state " + state + ", but found " + this.state); - } - this.lastAccessTime = System.currentTimeMillis(); - } - - public boolean isRunning() { - return OperationState.RUNNING.equals(state); - } - - public boolean isFinished() { - return OperationState.FINISHED.equals(state); - } - - public boolean isCanceled() { - return OperationState.CANCELED.equals(state); - } - - public boolean isFailed() { - return OperationState.ERROR.equals(state); - } - - protected void createOperationLog() { - if (parentSession.isOperationLogEnabled()) { - File operationLogFile = new File(parentSession.getOperationLogSessionDir(), - opHandle.getHandleIdentifier().toString()); - isOperationLogEnabled = true; - - // create log file - try { - if (operationLogFile.exists()) { - LOG.warn("The operation log file should not exist, but it is already there: " + - operationLogFile.getAbsolutePath()); - operationLogFile.delete(); - } - if (!operationLogFile.createNewFile()) { - // the log file already exists and cannot be deleted. - // If it can be read/written, keep its contents and use it. - if (!operationLogFile.canRead() || !operationLogFile.canWrite()) { - LOG.warn("The already existed operation log file cannot be recreated, " + - "and it cannot be read or written: " + operationLogFile.getAbsolutePath()); - isOperationLogEnabled = false; - return; - } - } - } catch (Exception e) { - LOG.warn("Unable to create operation log file: " + operationLogFile.getAbsolutePath(), e); - isOperationLogEnabled = false; - return; - } - - // create OperationLog object with above log file - try { - operationLog = new OperationLog(opHandle.toString(), operationLogFile, parentSession.getHiveConf()); - } catch (FileNotFoundException e) { - LOG.warn("Unable to instantiate OperationLog object for operation: " + - opHandle, e); - isOperationLogEnabled = false; - return; - } - - // register this operationLog to current thread - OperationLog.setCurrentOperationLog(operationLog); - } - } - - protected void unregisterOperationLog() { - if (isOperationLogEnabled) { - OperationLog.removeCurrentOperationLog(); - } - } - - /** - * Invoked before runInternal(). - * Set up some preconditions, or configurations. - */ - protected void beforeRun() { - createOperationLog(); - } - - /** - * Invoked after runInternal(), even if an exception is thrown in runInternal(). - * Clean up resources, which was set up in beforeRun(). - */ - protected void afterRun() { - unregisterOperationLog(); - } - - /** - * Implemented by subclass of Operation class to execute specific behaviors. - * @throws HiveSQLException - */ - protected abstract void runInternal() throws HiveSQLException; - - public void run() throws HiveSQLException { - beforeRun(); - try { - runInternal(); - } finally { - afterRun(); - } - } - - protected void cleanupOperationLog() { - if (isOperationLogEnabled) { - if (operationLog == null) { - LOG.error("Operation [ " + opHandle.getHandleIdentifier() + " ] " - + "logging is enabled, but its OperationLog object cannot be found."); - } else { - operationLog.close(); - } - } - } - - // TODO: make this abstract and implement in subclasses. - public void cancel() throws HiveSQLException { - setState(OperationState.CANCELED); - throw new UnsupportedOperationException("SQLOperation.cancel()"); - } - - public void close() throws HiveSQLException { - setState(OperationState.CLOSED); - cleanupOperationLog(); - } - - public abstract TableSchema getResultSetSchema() throws HiveSQLException; - - public abstract RowSet getNextRowSet(FetchOrientation orientation, long maxRows) throws HiveSQLException; - - public RowSet getNextRowSet() throws HiveSQLException { - return getNextRowSet(FetchOrientation.FETCH_NEXT, DEFAULT_FETCH_MAX_ROWS); - } - - /** - * Verify if the given fetch orientation is part of the default orientation types. - * @param orientation - * @throws HiveSQLException - */ - protected void validateDefaultFetchOrientation(FetchOrientation orientation) - throws HiveSQLException { - validateFetchOrientation(orientation, DEFAULT_FETCH_ORIENTATION_SET); - } - - /** - * Verify if the given fetch orientation is part of the supported orientation types. - * @param orientation - * @param supportedOrientations - * @throws HiveSQLException - */ - protected void validateFetchOrientation(FetchOrientation orientation, - EnumSet supportedOrientations) throws HiveSQLException { - if (!supportedOrientations.contains(orientation)) { - throw new HiveSQLException("The fetch type " + orientation.toString() + - " is not supported for this resultset", "HY106"); - } - } - - protected HiveSQLException toSQLException(String prefix, CommandProcessorResponse response) { - HiveSQLException ex = new HiveSQLException(prefix + ": " + response.getErrorMessage(), - response.getSQLState(), response.getResponseCode()); - if (response.getException() != null) { - ex.initCause(response.getException()); - } - return ex; - } -} diff --git a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/operation/OperationManager.java b/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/operation/OperationManager.java deleted file mode 100644 index 92c340a29c107..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/operation/OperationManager.java +++ /dev/null @@ -1,284 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hive.service.cli.operation; - -import java.sql.SQLException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.Schema; -import org.apache.hadoop.hive.ql.session.OperationLog; -import org.apache.hive.service.AbstractService; -import org.apache.hive.service.cli.FetchOrientation; -import org.apache.hive.service.cli.HiveSQLException; -import org.apache.hive.service.cli.OperationHandle; -import org.apache.hive.service.cli.OperationState; -import org.apache.hive.service.cli.OperationStatus; -import org.apache.hive.service.cli.RowSet; -import org.apache.hive.service.cli.RowSetFactory; -import org.apache.hive.service.cli.TableSchema; -import org.apache.hive.service.cli.session.HiveSession; -import org.apache.log4j.Appender; -import org.apache.log4j.Logger; - -/** - * OperationManager. - * - */ -public class OperationManager extends AbstractService { - private final Log LOG = LogFactory.getLog(OperationManager.class.getName()); - - private final Map handleToOperation = - new HashMap(); - - public OperationManager() { - super(OperationManager.class.getSimpleName()); - } - - @Override - public synchronized void init(HiveConf hiveConf) { - if (hiveConf.getBoolVar(HiveConf.ConfVars.HIVE_SERVER2_LOGGING_OPERATION_ENABLED)) { - initOperationLogCapture(hiveConf.getVar( - HiveConf.ConfVars.HIVE_SERVER2_LOGGING_OPERATION_LEVEL)); - } else { - LOG.debug("Operation level logging is turned off"); - } - super.init(hiveConf); - } - - @Override - public synchronized void start() { - super.start(); - // TODO - } - - @Override - public synchronized void stop() { - // TODO - super.stop(); - } - - private void initOperationLogCapture(String loggingMode) { - // Register another Appender (with the same layout) that talks to us. - Appender ap = new LogDivertAppender(this, OperationLog.getLoggingLevel(loggingMode)); - Logger.getRootLogger().addAppender(ap); - } - - public ExecuteStatementOperation newExecuteStatementOperation(HiveSession parentSession, - String statement, Map confOverlay, boolean runAsync) - throws HiveSQLException { - ExecuteStatementOperation executeStatementOperation = ExecuteStatementOperation - .newExecuteStatementOperation(parentSession, statement, confOverlay, runAsync); - addOperation(executeStatementOperation); - return executeStatementOperation; - } - - public GetTypeInfoOperation newGetTypeInfoOperation(HiveSession parentSession) { - GetTypeInfoOperation operation = new GetTypeInfoOperation(parentSession); - addOperation(operation); - return operation; - } - - public GetCatalogsOperation newGetCatalogsOperation(HiveSession parentSession) { - GetCatalogsOperation operation = new GetCatalogsOperation(parentSession); - addOperation(operation); - return operation; - } - - public GetSchemasOperation newGetSchemasOperation(HiveSession parentSession, - String catalogName, String schemaName) { - GetSchemasOperation operation = new GetSchemasOperation(parentSession, catalogName, schemaName); - addOperation(operation); - return operation; - } - - public MetadataOperation newGetTablesOperation(HiveSession parentSession, - String catalogName, String schemaName, String tableName, - List tableTypes) { - MetadataOperation operation = - new GetTablesOperation(parentSession, catalogName, schemaName, tableName, tableTypes); - addOperation(operation); - return operation; - } - - public GetTableTypesOperation newGetTableTypesOperation(HiveSession parentSession) { - GetTableTypesOperation operation = new GetTableTypesOperation(parentSession); - addOperation(operation); - return operation; - } - - public GetColumnsOperation newGetColumnsOperation(HiveSession parentSession, - String catalogName, String schemaName, String tableName, String columnName) { - GetColumnsOperation operation = new GetColumnsOperation(parentSession, - catalogName, schemaName, tableName, columnName); - addOperation(operation); - return operation; - } - - public GetFunctionsOperation newGetFunctionsOperation(HiveSession parentSession, - String catalogName, String schemaName, String functionName) { - GetFunctionsOperation operation = new GetFunctionsOperation(parentSession, - catalogName, schemaName, functionName); - addOperation(operation); - return operation; - } - - public Operation getOperation(OperationHandle operationHandle) throws HiveSQLException { - Operation operation = getOperationInternal(operationHandle); - if (operation == null) { - throw new HiveSQLException("Invalid OperationHandle: " + operationHandle); - } - return operation; - } - - private synchronized Operation getOperationInternal(OperationHandle operationHandle) { - return handleToOperation.get(operationHandle); - } - - private synchronized Operation removeTimedOutOperation(OperationHandle operationHandle) { - Operation operation = handleToOperation.get(operationHandle); - if (operation != null && operation.isTimedOut(System.currentTimeMillis())) { - handleToOperation.remove(operationHandle); - return operation; - } - return null; - } - - private synchronized void addOperation(Operation operation) { - handleToOperation.put(operation.getHandle(), operation); - } - - private synchronized Operation removeOperation(OperationHandle opHandle) { - return handleToOperation.remove(opHandle); - } - - public OperationStatus getOperationStatus(OperationHandle opHandle) - throws HiveSQLException { - return getOperation(opHandle).getStatus(); - } - - public void cancelOperation(OperationHandle opHandle) throws HiveSQLException { - Operation operation = getOperation(opHandle); - OperationState opState = operation.getStatus().getState(); - if (opState == OperationState.CANCELED || - opState == OperationState.CLOSED || - opState == OperationState.FINISHED || - opState == OperationState.ERROR || - opState == OperationState.UNKNOWN) { - // Cancel should be a no-op in either cases - LOG.debug(opHandle + ": Operation is already aborted in state - " + opState); - } - else { - LOG.debug(opHandle + ": Attempting to cancel from state - " + opState); - operation.cancel(); - } - } - - public void closeOperation(OperationHandle opHandle) throws HiveSQLException { - Operation operation = removeOperation(opHandle); - if (operation == null) { - throw new HiveSQLException("Operation does not exist!"); - } - operation.close(); - } - - public TableSchema getOperationResultSetSchema(OperationHandle opHandle) - throws HiveSQLException { - return getOperation(opHandle).getResultSetSchema(); - } - - public RowSet getOperationNextRowSet(OperationHandle opHandle) - throws HiveSQLException { - return getOperation(opHandle).getNextRowSet(); - } - - public RowSet getOperationNextRowSet(OperationHandle opHandle, - FetchOrientation orientation, long maxRows) - throws HiveSQLException { - return getOperation(opHandle).getNextRowSet(orientation, maxRows); - } - - public RowSet getOperationLogRowSet(OperationHandle opHandle, - FetchOrientation orientation, long maxRows) - throws HiveSQLException { - // get the OperationLog object from the operation - OperationLog operationLog = getOperation(opHandle).getOperationLog(); - if (operationLog == null) { - throw new HiveSQLException("Couldn't find log associated with operation handle: " + opHandle); - } - - // read logs - List logs; - try { - logs = operationLog.readOperationLog(isFetchFirst(orientation), maxRows); - } catch (SQLException e) { - throw new HiveSQLException(e.getMessage(), e.getCause()); - } - - - // convert logs to RowSet - TableSchema tableSchema = new TableSchema(getLogSchema()); - RowSet rowSet = RowSetFactory.create(tableSchema, getOperation(opHandle).getProtocolVersion()); - for (String log : logs) { - rowSet.addRow(new String[] {log}); - } - - return rowSet; - } - - private boolean isFetchFirst(FetchOrientation fetchOrientation) { - //TODO: Since OperationLog is moved to package o.a.h.h.ql.session, - // we may add a Enum there and map FetchOrientation to it. - if (fetchOrientation.equals(FetchOrientation.FETCH_FIRST)) { - return true; - } - return false; - } - - private Schema getLogSchema() { - Schema schema = new Schema(); - FieldSchema fieldSchema = new FieldSchema(); - fieldSchema.setName("operation_log"); - fieldSchema.setType("string"); - schema.addToFieldSchemas(fieldSchema); - return schema; - } - - public OperationLog getOperationLogByThread() { - return OperationLog.getCurrentOperationLog(); - } - - public List removeExpiredOperations(OperationHandle[] handles) { - List removed = new ArrayList(); - for (OperationHandle handle : handles) { - Operation operation = removeTimedOutOperation(handle); - if (operation != null) { - LOG.warn("Operation " + handle + " is timed-out and will be closed"); - removed.add(operation); - } - } - return removed; - } -} diff --git a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/operation/SQLOperation.java b/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/operation/SQLOperation.java deleted file mode 100644 index c7726f1fac07a..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/operation/SQLOperation.java +++ /dev/null @@ -1,456 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hive.service.cli.operation; - -import java.io.IOException; -import java.io.Serializable; -import java.nio.charset.StandardCharsets; -import java.security.PrivilegedExceptionAction; -import java.sql.SQLException; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.Properties; -import java.util.concurrent.Future; -import java.util.concurrent.RejectedExecutionException; - -import org.apache.commons.codec.binary.Base64; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.Schema; -import org.apache.hadoop.hive.ql.CommandNeedRetryException; -import org.apache.hadoop.hive.ql.Driver; -import org.apache.hadoop.hive.ql.exec.ExplainTask; -import org.apache.hadoop.hive.ql.exec.Task; -import org.apache.hadoop.hive.ql.metadata.Hive; -import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.parse.VariableSubstitution; -import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; -import org.apache.hadoop.hive.ql.session.SessionState; -import org.apache.hadoop.hive.serde.serdeConstants; -import org.apache.hadoop.hive.serde2.SerDe; -import org.apache.hadoop.hive.serde2.SerDeException; -import org.apache.hadoop.hive.serde2.SerDeUtils; -import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe; -import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; -import org.apache.hadoop.hive.serde2.objectinspector.StructField; -import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; -import org.apache.hadoop.hive.shims.Utils; -import org.apache.hadoop.io.BytesWritable; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hive.service.cli.FetchOrientation; -import org.apache.hive.service.cli.HiveSQLException; -import org.apache.hive.service.cli.OperationState; -import org.apache.hive.service.cli.RowSet; -import org.apache.hive.service.cli.RowSetFactory; -import org.apache.hive.service.cli.TableSchema; -import org.apache.hive.service.cli.session.HiveSession; -import org.apache.hive.service.server.ThreadWithGarbageCleanup; - -/** - * SQLOperation. - * - */ -public class SQLOperation extends ExecuteStatementOperation { - - private Driver driver = null; - private CommandProcessorResponse response; - private TableSchema resultSchema = null; - private Schema mResultSchema = null; - private SerDe serde = null; - private boolean fetchStarted = false; - - public SQLOperation(HiveSession parentSession, String statement, Map confOverlay, boolean runInBackground) { - // TODO: call setRemoteUser in ExecuteStatementOperation or higher. - super(parentSession, statement, confOverlay, runInBackground); - } - - /*** - * Compile the query and extract metadata - * @param sqlOperationConf - * @throws HiveSQLException - */ - public void prepare(HiveConf sqlOperationConf) throws HiveSQLException { - setState(OperationState.RUNNING); - - try { - driver = new Driver(sqlOperationConf, getParentSession().getUserName()); - - // set the operation handle information in Driver, so that thrift API users - // can use the operation handle they receive, to lookup query information in - // Yarn ATS - String guid64 = Base64.encodeBase64URLSafeString(getHandle().getHandleIdentifier() - .toTHandleIdentifier().getGuid()).trim(); - driver.setOperationId(guid64); - - // In Hive server mode, we are not able to retry in the FetchTask - // case, when calling fetch queries since execute() has returned. - // For now, we disable the test attempts. - driver.setTryCount(Integer.MAX_VALUE); - - String subStatement = new VariableSubstitution().substitute(sqlOperationConf, statement); - response = driver.compileAndRespond(subStatement); - if (0 != response.getResponseCode()) { - throw toSQLException("Error while compiling statement", response); - } - - mResultSchema = driver.getSchema(); - - // hasResultSet should be true only if the query has a FetchTask - // "explain" is an exception for now - if(driver.getPlan().getFetchTask() != null) { - //Schema has to be set - if (mResultSchema == null || !mResultSchema.isSetFieldSchemas()) { - throw new HiveSQLException("Error compiling query: Schema and FieldSchema " + - "should be set when query plan has a FetchTask"); - } - resultSchema = new TableSchema(mResultSchema); - setHasResultSet(true); - } else { - setHasResultSet(false); - } - // Set hasResultSet true if the plan has ExplainTask - // TODO explain should use a FetchTask for reading - for (Task task: driver.getPlan().getRootTasks()) { - if (task.getClass() == ExplainTask.class) { - resultSchema = new TableSchema(mResultSchema); - setHasResultSet(true); - break; - } - } - } catch (HiveSQLException e) { - setState(OperationState.ERROR); - throw e; - } catch (Exception e) { - setState(OperationState.ERROR); - throw new HiveSQLException("Error running query: " + e.toString(), e); - } - } - - private void runQuery(HiveConf sqlOperationConf) throws HiveSQLException { - try { - // In Hive server mode, we are not able to retry in the FetchTask - // case, when calling fetch queries since execute() has returned. - // For now, we disable the test attempts. - driver.setTryCount(Integer.MAX_VALUE); - response = driver.run(); - if (0 != response.getResponseCode()) { - throw toSQLException("Error while processing statement", response); - } - } catch (HiveSQLException e) { - // If the operation was cancelled by another thread, - // Driver#run will return a non-zero response code. - // We will simply return if the operation state is CANCELED, - // otherwise throw an exception - if (getStatus().getState() == OperationState.CANCELED) { - return; - } - else { - setState(OperationState.ERROR); - throw e; - } - } catch (Exception e) { - setState(OperationState.ERROR); - throw new HiveSQLException("Error running query: " + e.toString(), e); - } - setState(OperationState.FINISHED); - } - - @Override - public void runInternal() throws HiveSQLException { - setState(OperationState.PENDING); - final HiveConf opConfig = getConfigForOperation(); - prepare(opConfig); - if (!shouldRunAsync()) { - runQuery(opConfig); - } else { - // We'll pass ThreadLocals in the background thread from the foreground (handler) thread - final SessionState parentSessionState = SessionState.get(); - // ThreadLocal Hive object needs to be set in background thread. - // The metastore client in Hive is associated with right user. - final Hive parentHive = getSessionHive(); - // Current UGI will get used by metastore when metsatore is in embedded mode - // So this needs to get passed to the new background thread - final UserGroupInformation currentUGI = getCurrentUGI(opConfig); - // Runnable impl to call runInternal asynchronously, - // from a different thread - Runnable backgroundOperation = new Runnable() { - @Override - public void run() { - PrivilegedExceptionAction doAsAction = new PrivilegedExceptionAction() { - @Override - public Object run() throws HiveSQLException { - Hive.set(parentHive); - SessionState.setCurrentSessionState(parentSessionState); - // Set current OperationLog in this async thread for keeping on saving query log. - registerCurrentOperationLog(); - try { - runQuery(opConfig); - } catch (HiveSQLException e) { - setOperationException(e); - LOG.error("Error running hive query: ", e); - } finally { - unregisterOperationLog(); - } - return null; - } - }; - - try { - currentUGI.doAs(doAsAction); - } catch (Exception e) { - setOperationException(new HiveSQLException(e)); - LOG.error("Error running hive query as user : " + currentUGI.getShortUserName(), e); - } - finally { - /** - * We'll cache the ThreadLocal RawStore object for this background thread for an orderly cleanup - * when this thread is garbage collected later. - * @see org.apache.hive.service.server.ThreadWithGarbageCleanup#finalize() - */ - if (ThreadWithGarbageCleanup.currentThread() instanceof ThreadWithGarbageCleanup) { - ThreadWithGarbageCleanup currentThread = - (ThreadWithGarbageCleanup) ThreadWithGarbageCleanup.currentThread(); - currentThread.cacheThreadLocalRawStore(); - } - } - } - }; - try { - // This submit blocks if no background threads are available to run this operation - Future backgroundHandle = - getParentSession().getSessionManager().submitBackgroundOperation(backgroundOperation); - setBackgroundHandle(backgroundHandle); - } catch (RejectedExecutionException rejected) { - setState(OperationState.ERROR); - throw new HiveSQLException("The background threadpool cannot accept" + - " new task for execution, please retry the operation", rejected); - } - } - } - - /** - * Returns the current UGI on the stack - * @param opConfig - * @return UserGroupInformation - * @throws HiveSQLException - */ - private UserGroupInformation getCurrentUGI(HiveConf opConfig) throws HiveSQLException { - try { - return Utils.getUGI(); - } catch (Exception e) { - throw new HiveSQLException("Unable to get current user", e); - } - } - - /** - * Returns the ThreadLocal Hive for the current thread - * @return Hive - * @throws HiveSQLException - */ - private Hive getSessionHive() throws HiveSQLException { - try { - return Hive.get(); - } catch (HiveException e) { - throw new HiveSQLException("Failed to get ThreadLocal Hive object", e); - } - } - - private void cleanup(OperationState state) throws HiveSQLException { - setState(state); - if (shouldRunAsync()) { - Future backgroundHandle = getBackgroundHandle(); - if (backgroundHandle != null) { - backgroundHandle.cancel(true); - } - } - if (driver != null) { - driver.close(); - driver.destroy(); - } - driver = null; - - SessionState ss = SessionState.get(); - if (ss.getTmpOutputFile() != null) { - ss.getTmpOutputFile().delete(); - } - } - - @Override - public void cancel() throws HiveSQLException { - cleanup(OperationState.CANCELED); - } - - @Override - public void close() throws HiveSQLException { - cleanup(OperationState.CLOSED); - cleanupOperationLog(); - } - - @Override - public TableSchema getResultSetSchema() throws HiveSQLException { - assertState(OperationState.FINISHED); - if (resultSchema == null) { - resultSchema = new TableSchema(driver.getSchema()); - } - return resultSchema; - } - - private final transient List convey = new ArrayList(); - - @Override - public RowSet getNextRowSet(FetchOrientation orientation, long maxRows) throws HiveSQLException { - validateDefaultFetchOrientation(orientation); - assertState(OperationState.FINISHED); - - RowSet rowSet = RowSetFactory.create(resultSchema, getProtocolVersion()); - - try { - /* if client is requesting fetch-from-start and its not the first time reading from this operation - * then reset the fetch position to beginning - */ - if (orientation.equals(FetchOrientation.FETCH_FIRST) && fetchStarted) { - driver.resetFetch(); - } - fetchStarted = true; - driver.setMaxRows((int) maxRows); - if (driver.getResults(convey)) { - return decode(convey, rowSet); - } - return rowSet; - } catch (IOException e) { - throw new HiveSQLException(e); - } catch (CommandNeedRetryException e) { - throw new HiveSQLException(e); - } catch (Exception e) { - throw new HiveSQLException(e); - } finally { - convey.clear(); - } - } - - private RowSet decode(List rows, RowSet rowSet) throws Exception { - if (driver.isFetchingTable()) { - return prepareFromRow(rows, rowSet); - } - return decodeFromString(rows, rowSet); - } - - // already encoded to thrift-able object in ThriftFormatter - private RowSet prepareFromRow(List rows, RowSet rowSet) throws Exception { - for (Object row : rows) { - rowSet.addRow((Object[]) row); - } - return rowSet; - } - - private RowSet decodeFromString(List rows, RowSet rowSet) - throws SQLException, SerDeException { - getSerDe(); - StructObjectInspector soi = (StructObjectInspector) serde.getObjectInspector(); - List fieldRefs = soi.getAllStructFieldRefs(); - - Object[] deserializedFields = new Object[fieldRefs.size()]; - Object rowObj; - ObjectInspector fieldOI; - - int protocol = getProtocolVersion().getValue(); - for (Object rowString : rows) { - rowObj = serde.deserialize(new BytesWritable(((String)rowString).getBytes(StandardCharsets.UTF_8))); - for (int i = 0; i < fieldRefs.size(); i++) { - StructField fieldRef = fieldRefs.get(i); - fieldOI = fieldRef.getFieldObjectInspector(); - Object fieldData = soi.getStructFieldData(rowObj, fieldRef); - deserializedFields[i] = SerDeUtils.toThriftPayload(fieldData, fieldOI, protocol); - } - rowSet.addRow(deserializedFields); - } - return rowSet; - } - - private SerDe getSerDe() throws SQLException { - if (serde != null) { - return serde; - } - try { - List fieldSchemas = mResultSchema.getFieldSchemas(); - StringBuilder namesSb = new StringBuilder(); - StringBuilder typesSb = new StringBuilder(); - - if (fieldSchemas != null && !fieldSchemas.isEmpty()) { - for (int pos = 0; pos < fieldSchemas.size(); pos++) { - if (pos != 0) { - namesSb.append(","); - typesSb.append(","); - } - namesSb.append(fieldSchemas.get(pos).getName()); - typesSb.append(fieldSchemas.get(pos).getType()); - } - } - String names = namesSb.toString(); - String types = typesSb.toString(); - - serde = new LazySimpleSerDe(); - Properties props = new Properties(); - if (names.length() > 0) { - LOG.debug("Column names: " + names); - props.setProperty(serdeConstants.LIST_COLUMNS, names); - } - if (types.length() > 0) { - LOG.debug("Column types: " + types); - props.setProperty(serdeConstants.LIST_COLUMN_TYPES, types); - } - SerDeUtils.initializeSerDe(serde, new HiveConf(), props, null); - - } catch (Exception ex) { - ex.printStackTrace(); - throw new SQLException("Could not create ResultSet: " + ex.getMessage(), ex); - } - return serde; - } - - /** - * If there are query specific settings to overlay, then create a copy of config - * There are two cases we need to clone the session config that's being passed to hive driver - * 1. Async query - - * If the client changes a config setting, that shouldn't reflect in the execution already underway - * 2. confOverlay - - * The query specific settings should only be applied to the query config and not session - * @return new configuration - * @throws HiveSQLException - */ - private HiveConf getConfigForOperation() throws HiveSQLException { - HiveConf sqlOperationConf = getParentSession().getHiveConf(); - if (!getConfOverlay().isEmpty() || shouldRunAsync()) { - // clone the parent session config for this query - sqlOperationConf = new HiveConf(sqlOperationConf); - - // apply overlay query specific settings, if any - for (Map.Entry confEntry : getConfOverlay().entrySet()) { - try { - sqlOperationConf.verifyAndSet(confEntry.getKey(), confEntry.getValue()); - } catch (IllegalArgumentException e) { - throw new HiveSQLException("Error applying statement specific settings", e); - } - } - } - return sqlOperationConf; - } -} diff --git a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/operation/TableTypeMapping.java b/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/operation/TableTypeMapping.java deleted file mode 100644 index e59d19ea6be42..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/operation/TableTypeMapping.java +++ /dev/null @@ -1,44 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hive.service.cli.operation; - -import java.util.Set; - - -public interface TableTypeMapping { - /** - * Map client's table type name to hive's table type - * @param clientTypeName - * @return - */ - String mapToHiveType(String clientTypeName); - - /** - * Map hive's table type name to client's table type - * @param hiveTypeName - * @return - */ - String mapToClientType(String hiveTypeName); - - /** - * Get all the table types of this mapping - * @return - */ - Set getTableTypeNames(); -} diff --git a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/session/HiveSession.java b/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/session/HiveSession.java deleted file mode 100644 index 65f9b298bf4f6..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/session/HiveSession.java +++ /dev/null @@ -1,156 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hive.service.cli.session; - -import java.util.List; -import java.util.Map; - -import org.apache.hadoop.hive.metastore.IMetaStoreClient; -import org.apache.hive.service.auth.HiveAuthFactory; -import org.apache.hive.service.cli.*; - -public interface HiveSession extends HiveSessionBase { - - void open(Map sessionConfMap) throws Exception; - - IMetaStoreClient getMetaStoreClient() throws HiveSQLException; - - /** - * getInfo operation handler - * @param getInfoType - * @return - * @throws HiveSQLException - */ - GetInfoValue getInfo(GetInfoType getInfoType) throws HiveSQLException; - - /** - * execute operation handler - * @param statement - * @param confOverlay - * @return - * @throws HiveSQLException - */ - OperationHandle executeStatement(String statement, - Map confOverlay) throws HiveSQLException; - - /** - * execute operation handler - * @param statement - * @param confOverlay - * @return - * @throws HiveSQLException - */ - OperationHandle executeStatementAsync(String statement, - Map confOverlay) throws HiveSQLException; - - /** - * getTypeInfo operation handler - * @return - * @throws HiveSQLException - */ - OperationHandle getTypeInfo() throws HiveSQLException; - - /** - * getCatalogs operation handler - * @return - * @throws HiveSQLException - */ - OperationHandle getCatalogs() throws HiveSQLException; - - /** - * getSchemas operation handler - * @param catalogName - * @param schemaName - * @return - * @throws HiveSQLException - */ - OperationHandle getSchemas(String catalogName, String schemaName) - throws HiveSQLException; - - /** - * getTables operation handler - * @param catalogName - * @param schemaName - * @param tableName - * @param tableTypes - * @return - * @throws HiveSQLException - */ - OperationHandle getTables(String catalogName, String schemaName, - String tableName, List tableTypes) throws HiveSQLException; - - /** - * getTableTypes operation handler - * @return - * @throws HiveSQLException - */ - OperationHandle getTableTypes() throws HiveSQLException ; - - /** - * getColumns operation handler - * @param catalogName - * @param schemaName - * @param tableName - * @param columnName - * @return - * @throws HiveSQLException - */ - OperationHandle getColumns(String catalogName, String schemaName, - String tableName, String columnName) throws HiveSQLException; - - /** - * getFunctions operation handler - * @param catalogName - * @param schemaName - * @param functionName - * @return - * @throws HiveSQLException - */ - OperationHandle getFunctions(String catalogName, String schemaName, - String functionName) throws HiveSQLException; - - /** - * close the session - * @throws HiveSQLException - */ - void close() throws HiveSQLException; - - void cancelOperation(OperationHandle opHandle) throws HiveSQLException; - - void closeOperation(OperationHandle opHandle) throws HiveSQLException; - - TableSchema getResultSetMetadata(OperationHandle opHandle) - throws HiveSQLException; - - RowSet fetchResults(OperationHandle opHandle, FetchOrientation orientation, - long maxRows, FetchType fetchType) throws HiveSQLException; - - String getDelegationToken(HiveAuthFactory authFactory, String owner, - String renewer) throws HiveSQLException; - - void cancelDelegationToken(HiveAuthFactory authFactory, String tokenStr) - throws HiveSQLException; - - void renewDelegationToken(HiveAuthFactory authFactory, String tokenStr) - throws HiveSQLException; - - void closeExpiredOperations(); - - long getNoOperationTime(); -} diff --git a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/session/HiveSessionBase.java b/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/session/HiveSessionBase.java deleted file mode 100644 index b72c18b2b2135..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/session/HiveSessionBase.java +++ /dev/null @@ -1,90 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hive.service.cli.session; - -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.ql.session.SessionState; -import org.apache.hive.service.cli.SessionHandle; -import org.apache.hive.service.cli.operation.OperationManager; -import org.apache.hive.service.cli.thrift.TProtocolVersion; - -import java.io.File; - -/** - * Methods that don't need to be executed under a doAs - * context are here. Rest of them in HiveSession interface - */ -public interface HiveSessionBase { - - TProtocolVersion getProtocolVersion(); - - /** - * Set the session manager for the session - * @param sessionManager - */ - void setSessionManager(SessionManager sessionManager); - - /** - * Get the session manager for the session - */ - SessionManager getSessionManager(); - - /** - * Set operation manager for the session - * @param operationManager - */ - void setOperationManager(OperationManager operationManager); - - /** - * Check whether operation logging is enabled and session dir is created successfully - */ - boolean isOperationLogEnabled(); - - /** - * Get the session dir, which is the parent dir of operation logs - * @return a file representing the parent directory of operation logs - */ - File getOperationLogSessionDir(); - - /** - * Set the session dir, which is the parent dir of operation logs - * @param operationLogRootDir the parent dir of the session dir - */ - void setOperationLogSessionDir(File operationLogRootDir); - - SessionHandle getSessionHandle(); - - String getUsername(); - - String getPassword(); - - HiveConf getHiveConf(); - - SessionState getSessionState(); - - String getUserName(); - - void setUserName(String userName); - - String getIpAddress(); - - void setIpAddress(String ipAddress); - - long getLastAccessTime(); -} diff --git a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/session/HiveSessionImpl.java b/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/session/HiveSessionImpl.java deleted file mode 100644 index e3fb54d9f47e9..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/session/HiveSessionImpl.java +++ /dev/null @@ -1,842 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hive.service.cli.session; - -import java.io.BufferedReader; -import java.io.File; -import java.io.FileInputStream; -import java.io.IOException; -import java.io.InputStreamReader; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; - -import org.apache.commons.io.FileUtils; -import org.apache.commons.lang3.StringUtils; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hive.common.cli.HiveFileProcessor; -import org.apache.hadoop.hive.common.cli.IHiveFileProcessor; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.conf.HiveConf.ConfVars; -import org.apache.hadoop.hive.metastore.IMetaStoreClient; -import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.ql.exec.FetchFormatter; -import org.apache.hadoop.hive.ql.exec.ListSinkOperator; -import org.apache.hadoop.hive.ql.exec.Utilities; -import org.apache.hadoop.hive.ql.history.HiveHistory; -import org.apache.hadoop.hive.ql.metadata.Hive; -import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.parse.VariableSubstitution; -import org.apache.hadoop.hive.ql.session.SessionState; -import org.apache.hadoop.hive.shims.ShimLoader; -import org.apache.hive.common.util.HiveVersionInfo; -import org.apache.hive.service.auth.HiveAuthFactory; -import org.apache.hive.service.cli.FetchOrientation; -import org.apache.hive.service.cli.FetchType; -import org.apache.hive.service.cli.GetInfoType; -import org.apache.hive.service.cli.GetInfoValue; -import org.apache.hive.service.cli.HiveSQLException; -import org.apache.hive.service.cli.OperationHandle; -import org.apache.hive.service.cli.RowSet; -import org.apache.hive.service.cli.SessionHandle; -import org.apache.hive.service.cli.TableSchema; -import org.apache.hive.service.cli.operation.ExecuteStatementOperation; -import org.apache.hive.service.cli.operation.GetCatalogsOperation; -import org.apache.hive.service.cli.operation.GetColumnsOperation; -import org.apache.hive.service.cli.operation.GetFunctionsOperation; -import org.apache.hive.service.cli.operation.GetSchemasOperation; -import org.apache.hive.service.cli.operation.GetTableTypesOperation; -import org.apache.hive.service.cli.operation.GetTypeInfoOperation; -import org.apache.hive.service.cli.operation.MetadataOperation; -import org.apache.hive.service.cli.operation.Operation; -import org.apache.hive.service.cli.operation.OperationManager; -import org.apache.hive.service.cli.thrift.TProtocolVersion; -import org.apache.hive.service.server.ThreadWithGarbageCleanup; - -import static org.apache.hadoop.hive.conf.SystemVariables.ENV_PREFIX; -import static org.apache.hadoop.hive.conf.SystemVariables.HIVECONF_PREFIX; -import static org.apache.hadoop.hive.conf.SystemVariables.HIVEVAR_PREFIX; -import static org.apache.hadoop.hive.conf.SystemVariables.METACONF_PREFIX; -import static org.apache.hadoop.hive.conf.SystemVariables.SYSTEM_PREFIX; - -/** - * HiveSession - * - */ -public class HiveSessionImpl implements HiveSession { - private final SessionHandle sessionHandle; - private String username; - private final String password; - private HiveConf hiveConf; - private SessionState sessionState; - private String ipAddress; - private static final String FETCH_WORK_SERDE_CLASS = - "org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"; - private static final Log LOG = LogFactory.getLog(HiveSessionImpl.class); - private SessionManager sessionManager; - private OperationManager operationManager; - private final Set opHandleSet = new HashSet(); - private boolean isOperationLogEnabled; - private File sessionLogDir; - private volatile long lastAccessTime; - private volatile long lastIdleTime; - - public HiveSessionImpl(TProtocolVersion protocol, String username, String password, - HiveConf serverhiveConf, String ipAddress) { - this.username = username; - this.password = password; - this.sessionHandle = new SessionHandle(protocol); - this.hiveConf = new HiveConf(serverhiveConf); - this.ipAddress = ipAddress; - - try { - // In non-impersonation mode, map scheduler queue to current user - // if fair scheduler is configured. - if (! hiveConf.getBoolVar(ConfVars.HIVE_SERVER2_ENABLE_DOAS) && - hiveConf.getBoolVar(ConfVars.HIVE_SERVER2_MAP_FAIR_SCHEDULER_QUEUE)) { - ShimLoader.getHadoopShims().refreshDefaultQueue(hiveConf, username); - } - } catch (IOException e) { - LOG.warn("Error setting scheduler queue: " + e, e); - } - // Set an explicit session name to control the download directory name - hiveConf.set(ConfVars.HIVESESSIONID.varname, - sessionHandle.getHandleIdentifier().toString()); - // Use thrift transportable formatter - hiveConf.set(ListSinkOperator.OUTPUT_FORMATTER, - FetchFormatter.ThriftFormatter.class.getName()); - hiveConf.setInt(ListSinkOperator.OUTPUT_PROTOCOL, protocol.getValue()); - } - - @Override - /** - * Opens a new HiveServer2 session for the client connection. - * Creates a new SessionState object that will be associated with this HiveServer2 session. - * When the server executes multiple queries in the same session, - * this SessionState object is reused across multiple queries. - * Note that if doAs is true, this call goes through a proxy object, - * which wraps the method logic in a UserGroupInformation#doAs. - * That's why it is important to create SessionState here rather than in the constructor. - */ - public void open(Map sessionConfMap) throws HiveSQLException { - sessionState = new SessionState(hiveConf, username); - sessionState.setUserIpAddress(ipAddress); - sessionState.setIsHiveServerQuery(true); - SessionState.start(sessionState); - try { - sessionState.reloadAuxJars(); - } catch (IOException e) { - String msg = "Failed to load reloadable jar file path: " + e; - LOG.error(msg, e); - throw new HiveSQLException(msg, e); - } - // Process global init file: .hiverc - processGlobalInitFile(); - if (sessionConfMap != null) { - configureSession(sessionConfMap); - } - lastAccessTime = System.currentTimeMillis(); - lastIdleTime = lastAccessTime; - } - - /** - * It is used for processing hiverc file from HiveServer2 side. - */ - private class GlobalHivercFileProcessor extends HiveFileProcessor { - @Override - protected BufferedReader loadFile(String fileName) throws IOException { - FileInputStream initStream = null; - BufferedReader bufferedReader = null; - initStream = new FileInputStream(fileName); - bufferedReader = new BufferedReader(new InputStreamReader(initStream)); - return bufferedReader; - } - - @Override - protected int processCmd(String cmd) { - int rc = 0; - String cmd_trimed = cmd.trim(); - try { - executeStatementInternal(cmd_trimed, null, false); - } catch (HiveSQLException e) { - rc = -1; - LOG.warn("Failed to execute HQL command in global .hiverc file.", e); - } - return rc; - } - } - - private void processGlobalInitFile() { - IHiveFileProcessor processor = new GlobalHivercFileProcessor(); - - try { - String hiverc = hiveConf.getVar(ConfVars.HIVE_SERVER2_GLOBAL_INIT_FILE_LOCATION); - if (hiverc != null) { - File hivercFile = new File(hiverc); - if (hivercFile.isDirectory()) { - hivercFile = new File(hivercFile, SessionManager.HIVERCFILE); - } - if (hivercFile.isFile()) { - LOG.info("Running global init file: " + hivercFile); - int rc = processor.processFile(hivercFile.getAbsolutePath()); - if (rc != 0) { - LOG.error("Failed on initializing global .hiverc file"); - } - } else { - LOG.debug("Global init file " + hivercFile + " does not exist"); - } - } - } catch (IOException e) { - LOG.warn("Failed on initializing global .hiverc file", e); - } - } - - private void configureSession(Map sessionConfMap) throws HiveSQLException { - SessionState.setCurrentSessionState(sessionState); - for (Map.Entry entry : sessionConfMap.entrySet()) { - String key = entry.getKey(); - if (key.startsWith("set:")) { - try { - setVariable(key.substring(4), entry.getValue()); - } catch (Exception e) { - throw new HiveSQLException(e); - } - } else if (key.startsWith("use:")) { - SessionState.get().setCurrentDatabase(entry.getValue()); - } else { - hiveConf.verifyAndSet(key, entry.getValue()); - } - } - } - - // Copy from org.apache.hadoop.hive.ql.processors.SetProcessor, only change: - // setConf(varname, propName, varvalue, true) when varname.startsWith(HIVECONF_PREFIX) - public static int setVariable(String varname, String varvalue) throws Exception { - SessionState ss = SessionState.get(); - if (varvalue.contains("\n")){ - ss.err.println("Warning: Value had a \\n character in it."); - } - varname = varname.trim(); - if (varname.startsWith(ENV_PREFIX)){ - ss.err.println("env:* variables can not be set."); - return 1; - } else if (varname.startsWith(SYSTEM_PREFIX)){ - String propName = varname.substring(SYSTEM_PREFIX.length()); - System.getProperties().setProperty(propName, - new VariableSubstitution().substitute(ss.getConf(),varvalue)); - } else if (varname.startsWith(HIVECONF_PREFIX)){ - String propName = varname.substring(HIVECONF_PREFIX.length()); - setConf(varname, propName, varvalue, true); - } else if (varname.startsWith(HIVEVAR_PREFIX)) { - String propName = varname.substring(HIVEVAR_PREFIX.length()); - ss.getHiveVariables().put(propName, - new VariableSubstitution().substitute(ss.getConf(),varvalue)); - } else if (varname.startsWith(METACONF_PREFIX)) { - String propName = varname.substring(METACONF_PREFIX.length()); - Hive hive = Hive.get(ss.getConf()); - hive.setMetaConf(propName, new VariableSubstitution().substitute(ss.getConf(), varvalue)); - } else { - setConf(varname, varname, varvalue, true); - } - return 0; - } - - // returns non-null string for validation fail - private static void setConf(String varname, String key, String varvalue, boolean register) - throws IllegalArgumentException { - HiveConf conf = SessionState.get().getConf(); - String value = new VariableSubstitution().substitute(conf, varvalue); - if (conf.getBoolVar(HiveConf.ConfVars.HIVECONFVALIDATION)) { - HiveConf.ConfVars confVars = HiveConf.getConfVars(key); - if (confVars != null) { - if (!confVars.isType(value)) { - StringBuilder message = new StringBuilder(); - message.append("'SET ").append(varname).append('=').append(varvalue); - message.append("' FAILED because ").append(key).append(" expects "); - message.append(confVars.typeString()).append(" type value."); - throw new IllegalArgumentException(message.toString()); - } - String fail = confVars.validate(value); - if (fail != null) { - StringBuilder message = new StringBuilder(); - message.append("'SET ").append(varname).append('=').append(varvalue); - message.append("' FAILED in validation : ").append(fail).append('.'); - throw new IllegalArgumentException(message.toString()); - } - } else if (key.startsWith("hive.")) { - throw new IllegalArgumentException("hive configuration " + key + " does not exists."); - } - } - conf.verifyAndSet(key, value); - if (register) { - SessionState.get().getOverriddenConfigurations().put(key, value); - } - } - - @Override - public void setOperationLogSessionDir(File operationLogRootDir) { - if (!operationLogRootDir.exists()) { - LOG.warn("The operation log root directory is removed, recreating: " + - operationLogRootDir.getAbsolutePath()); - if (!operationLogRootDir.mkdirs()) { - LOG.warn("Unable to create operation log root directory: " + - operationLogRootDir.getAbsolutePath()); - } - } - if (!operationLogRootDir.canWrite()) { - LOG.warn("The operation log root directory is not writable: " + - operationLogRootDir.getAbsolutePath()); - } - sessionLogDir = new File(operationLogRootDir, sessionHandle.getHandleIdentifier().toString()); - isOperationLogEnabled = true; - if (!sessionLogDir.exists()) { - if (!sessionLogDir.mkdir()) { - LOG.warn("Unable to create operation log session directory: " + - sessionLogDir.getAbsolutePath()); - isOperationLogEnabled = false; - } - } - if (isOperationLogEnabled) { - LOG.info("Operation log session directory is created: " + sessionLogDir.getAbsolutePath()); - } - } - - @Override - public boolean isOperationLogEnabled() { - return isOperationLogEnabled; - } - - @Override - public File getOperationLogSessionDir() { - return sessionLogDir; - } - - @Override - public TProtocolVersion getProtocolVersion() { - return sessionHandle.getProtocolVersion(); - } - - @Override - public SessionManager getSessionManager() { - return sessionManager; - } - - @Override - public void setSessionManager(SessionManager sessionManager) { - this.sessionManager = sessionManager; - } - - private OperationManager getOperationManager() { - return operationManager; - } - - @Override - public void setOperationManager(OperationManager operationManager) { - this.operationManager = operationManager; - } - - protected synchronized void acquire(boolean userAccess) { - // Need to make sure that the this HiveServer2's session's SessionState is - // stored in the thread local for the handler thread. - SessionState.setCurrentSessionState(sessionState); - if (userAccess) { - lastAccessTime = System.currentTimeMillis(); - } - } - - /** - * 1. We'll remove the ThreadLocal SessionState as this thread might now serve - * other requests. - * 2. We'll cache the ThreadLocal RawStore object for this background thread for an orderly cleanup - * when this thread is garbage collected later. - * @see org.apache.hive.service.server.ThreadWithGarbageCleanup#finalize() - */ - protected synchronized void release(boolean userAccess) { - SessionState.detachSession(); - if (ThreadWithGarbageCleanup.currentThread() instanceof ThreadWithGarbageCleanup) { - ThreadWithGarbageCleanup currentThread = - (ThreadWithGarbageCleanup) ThreadWithGarbageCleanup.currentThread(); - currentThread.cacheThreadLocalRawStore(); - } - if (userAccess) { - lastAccessTime = System.currentTimeMillis(); - } - if (opHandleSet.isEmpty()) { - lastIdleTime = System.currentTimeMillis(); - } else { - lastIdleTime = 0; - } - } - - @Override - public SessionHandle getSessionHandle() { - return sessionHandle; - } - - @Override - public String getUsername() { - return username; - } - - @Override - public String getPassword() { - return password; - } - - @Override - public HiveConf getHiveConf() { - hiveConf.setVar(HiveConf.ConfVars.HIVEFETCHOUTPUTSERDE, FETCH_WORK_SERDE_CLASS); - return hiveConf; - } - - @Override - public IMetaStoreClient getMetaStoreClient() throws HiveSQLException { - try { - return Hive.get(getHiveConf()).getMSC(); - } catch (HiveException e) { - throw new HiveSQLException("Failed to get metastore connection", e); - } catch (MetaException e) { - throw new HiveSQLException("Failed to get metastore connection", e); - } - } - - @Override - public GetInfoValue getInfo(GetInfoType getInfoType) - throws HiveSQLException { - acquire(true); - try { - switch (getInfoType) { - case CLI_SERVER_NAME: - return new GetInfoValue("Hive"); - case CLI_DBMS_NAME: - return new GetInfoValue("Apache Hive"); - case CLI_DBMS_VER: - return new GetInfoValue(HiveVersionInfo.getVersion()); - case CLI_MAX_COLUMN_NAME_LEN: - return new GetInfoValue(128); - case CLI_MAX_SCHEMA_NAME_LEN: - return new GetInfoValue(128); - case CLI_MAX_TABLE_NAME_LEN: - return new GetInfoValue(128); - case CLI_TXN_CAPABLE: - default: - throw new HiveSQLException("Unrecognized GetInfoType value: " + getInfoType.toString()); - } - } finally { - release(true); - } - } - - @Override - public OperationHandle executeStatement(String statement, Map confOverlay) - throws HiveSQLException { - return executeStatementInternal(statement, confOverlay, false); - } - - @Override - public OperationHandle executeStatementAsync(String statement, Map confOverlay) - throws HiveSQLException { - return executeStatementInternal(statement, confOverlay, true); - } - - private OperationHandle executeStatementInternal(String statement, Map confOverlay, - boolean runAsync) - throws HiveSQLException { - acquire(true); - - OperationManager operationManager = getOperationManager(); - ExecuteStatementOperation operation = operationManager - .newExecuteStatementOperation(getSession(), statement, confOverlay, runAsync); - OperationHandle opHandle = operation.getHandle(); - try { - operation.run(); - opHandleSet.add(opHandle); - return opHandle; - } catch (HiveSQLException e) { - // Referring to SQLOperation.java, there is no chance that a HiveSQLException throws and the asyn - // background operation submits to thread pool successfully at the same time. So, Cleanup - // opHandle directly when got HiveSQLException - operationManager.closeOperation(opHandle); - throw e; - } finally { - release(true); - } - } - - @Override - public OperationHandle getTypeInfo() - throws HiveSQLException { - acquire(true); - - OperationManager operationManager = getOperationManager(); - GetTypeInfoOperation operation = operationManager.newGetTypeInfoOperation(getSession()); - OperationHandle opHandle = operation.getHandle(); - try { - operation.run(); - opHandleSet.add(opHandle); - return opHandle; - } catch (HiveSQLException e) { - operationManager.closeOperation(opHandle); - throw e; - } finally { - release(true); - } - } - - @Override - public OperationHandle getCatalogs() - throws HiveSQLException { - acquire(true); - - OperationManager operationManager = getOperationManager(); - GetCatalogsOperation operation = operationManager.newGetCatalogsOperation(getSession()); - OperationHandle opHandle = operation.getHandle(); - try { - operation.run(); - opHandleSet.add(opHandle); - return opHandle; - } catch (HiveSQLException e) { - operationManager.closeOperation(opHandle); - throw e; - } finally { - release(true); - } - } - - @Override - public OperationHandle getSchemas(String catalogName, String schemaName) - throws HiveSQLException { - acquire(true); - - OperationManager operationManager = getOperationManager(); - GetSchemasOperation operation = - operationManager.newGetSchemasOperation(getSession(), catalogName, schemaName); - OperationHandle opHandle = operation.getHandle(); - try { - operation.run(); - opHandleSet.add(opHandle); - return opHandle; - } catch (HiveSQLException e) { - operationManager.closeOperation(opHandle); - throw e; - } finally { - release(true); - } - } - - @Override - public OperationHandle getTables(String catalogName, String schemaName, String tableName, - List tableTypes) - throws HiveSQLException { - acquire(true); - - OperationManager operationManager = getOperationManager(); - MetadataOperation operation = - operationManager.newGetTablesOperation(getSession(), catalogName, schemaName, tableName, tableTypes); - OperationHandle opHandle = operation.getHandle(); - try { - operation.run(); - opHandleSet.add(opHandle); - return opHandle; - } catch (HiveSQLException e) { - operationManager.closeOperation(opHandle); - throw e; - } finally { - release(true); - } - } - - @Override - public OperationHandle getTableTypes() - throws HiveSQLException { - acquire(true); - - OperationManager operationManager = getOperationManager(); - GetTableTypesOperation operation = operationManager.newGetTableTypesOperation(getSession()); - OperationHandle opHandle = operation.getHandle(); - try { - operation.run(); - opHandleSet.add(opHandle); - return opHandle; - } catch (HiveSQLException e) { - operationManager.closeOperation(opHandle); - throw e; - } finally { - release(true); - } - } - - @Override - public OperationHandle getColumns(String catalogName, String schemaName, - String tableName, String columnName) throws HiveSQLException { - acquire(true); - String addedJars = Utilities.getResourceFiles(hiveConf, SessionState.ResourceType.JAR); - if (StringUtils.isNotBlank(addedJars)) { - IMetaStoreClient metastoreClient = getSession().getMetaStoreClient(); - metastoreClient.setHiveAddedJars(addedJars); - } - OperationManager operationManager = getOperationManager(); - GetColumnsOperation operation = operationManager.newGetColumnsOperation(getSession(), - catalogName, schemaName, tableName, columnName); - OperationHandle opHandle = operation.getHandle(); - try { - operation.run(); - opHandleSet.add(opHandle); - return opHandle; - } catch (HiveSQLException e) { - operationManager.closeOperation(opHandle); - throw e; - } finally { - release(true); - } - } - - @Override - public OperationHandle getFunctions(String catalogName, String schemaName, String functionName) - throws HiveSQLException { - acquire(true); - - OperationManager operationManager = getOperationManager(); - GetFunctionsOperation operation = operationManager - .newGetFunctionsOperation(getSession(), catalogName, schemaName, functionName); - OperationHandle opHandle = operation.getHandle(); - try { - operation.run(); - opHandleSet.add(opHandle); - return opHandle; - } catch (HiveSQLException e) { - operationManager.closeOperation(opHandle); - throw e; - } finally { - release(true); - } - } - - @Override - public void close() throws HiveSQLException { - try { - acquire(true); - // Iterate through the opHandles and close their operations - for (OperationHandle opHandle : opHandleSet) { - try { - operationManager.closeOperation(opHandle); - } catch (Exception e) { - LOG.warn("Exception is thrown closing operation " + opHandle, e); - } - } - opHandleSet.clear(); - // Cleanup session log directory. - cleanupSessionLogDir(); - // Cleanup pipeout file. - cleanupPipeoutFile(); - HiveHistory hiveHist = sessionState.getHiveHistory(); - if (null != hiveHist) { - hiveHist.closeStream(); - } - try { - sessionState.close(); - } finally { - sessionState = null; - } - } catch (IOException ioe) { - throw new HiveSQLException("Failure to close", ioe); - } finally { - if (sessionState != null) { - try { - sessionState.close(); - } catch (Throwable t) { - LOG.warn("Error closing session", t); - } - sessionState = null; - } - release(true); - } - } - - private void cleanupPipeoutFile() { - String lScratchDir = hiveConf.getVar(ConfVars.LOCALSCRATCHDIR); - String sessionID = hiveConf.getVar(ConfVars.HIVESESSIONID); - - File[] fileAry = new File(lScratchDir).listFiles( - (dir, name) -> name.startsWith(sessionID) && name.endsWith(".pipeout")); - - if (fileAry == null) { - LOG.error("Unable to access pipeout files in " + lScratchDir); - } else { - for (File file : fileAry) { - try { - FileUtils.forceDelete(file); - } catch (Exception e) { - LOG.error("Failed to cleanup pipeout file: " + file, e); - } - } - } - } - - private void cleanupSessionLogDir() { - if (isOperationLogEnabled) { - try { - FileUtils.forceDelete(sessionLogDir); - } catch (Exception e) { - LOG.error("Failed to cleanup session log dir: " + sessionHandle, e); - } - } - } - - @Override - public SessionState getSessionState() { - return sessionState; - } - - @Override - public String getUserName() { - return username; - } - - @Override - public void setUserName(String userName) { - this.username = userName; - } - - @Override - public long getLastAccessTime() { - return lastAccessTime; - } - - @Override - public void closeExpiredOperations() { - OperationHandle[] handles = opHandleSet.toArray(new OperationHandle[opHandleSet.size()]); - if (handles.length > 0) { - List operations = operationManager.removeExpiredOperations(handles); - if (!operations.isEmpty()) { - closeTimedOutOperations(operations); - } - } - } - - @Override - public long getNoOperationTime() { - return lastIdleTime > 0 ? System.currentTimeMillis() - lastIdleTime : 0; - } - - private void closeTimedOutOperations(List operations) { - acquire(false); - try { - for (Operation operation : operations) { - opHandleSet.remove(operation.getHandle()); - try { - operation.close(); - } catch (Exception e) { - LOG.warn("Exception is thrown closing timed-out operation " + operation.getHandle(), e); - } - } - } finally { - release(false); - } - } - - @Override - public void cancelOperation(OperationHandle opHandle) throws HiveSQLException { - acquire(true); - try { - sessionManager.getOperationManager().cancelOperation(opHandle); - } finally { - release(true); - } - } - - @Override - public void closeOperation(OperationHandle opHandle) throws HiveSQLException { - acquire(true); - try { - operationManager.closeOperation(opHandle); - opHandleSet.remove(opHandle); - } finally { - release(true); - } - } - - @Override - public TableSchema getResultSetMetadata(OperationHandle opHandle) throws HiveSQLException { - acquire(true); - try { - return sessionManager.getOperationManager().getOperationResultSetSchema(opHandle); - } finally { - release(true); - } - } - - @Override - public RowSet fetchResults(OperationHandle opHandle, FetchOrientation orientation, - long maxRows, FetchType fetchType) throws HiveSQLException { - acquire(true); - try { - if (fetchType == FetchType.QUERY_OUTPUT) { - return operationManager.getOperationNextRowSet(opHandle, orientation, maxRows); - } - return operationManager.getOperationLogRowSet(opHandle, orientation, maxRows); - } finally { - release(true); - } - } - - protected HiveSession getSession() { - return this; - } - - @Override - public String getIpAddress() { - return ipAddress; - } - - @Override - public void setIpAddress(String ipAddress) { - this.ipAddress = ipAddress; - } - - @Override - public String getDelegationToken(HiveAuthFactory authFactory, String owner, String renewer) - throws HiveSQLException { - HiveAuthFactory.verifyProxyAccess(getUsername(), owner, getIpAddress(), getHiveConf()); - return authFactory.getDelegationToken(owner, renewer); - } - - @Override - public void cancelDelegationToken(HiveAuthFactory authFactory, String tokenStr) - throws HiveSQLException { - HiveAuthFactory.verifyProxyAccess(getUsername(), getUserFromToken(authFactory, tokenStr), - getIpAddress(), getHiveConf()); - authFactory.cancelDelegationToken(tokenStr); - } - - @Override - public void renewDelegationToken(HiveAuthFactory authFactory, String tokenStr) - throws HiveSQLException { - HiveAuthFactory.verifyProxyAccess(getUsername(), getUserFromToken(authFactory, tokenStr), - getIpAddress(), getHiveConf()); - authFactory.renewDelegationToken(tokenStr); - } - - // extract the real user from the given token string - private String getUserFromToken(HiveAuthFactory authFactory, String tokenStr) throws HiveSQLException { - return authFactory.getUserFromToken(tokenStr); - } -} diff --git a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/session/HiveSessionImplwithUGI.java b/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/session/HiveSessionImplwithUGI.java deleted file mode 100644 index 762dbb2faadec..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/session/HiveSessionImplwithUGI.java +++ /dev/null @@ -1,182 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hive.service.cli.session; - -import java.io.IOException; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.ql.metadata.Hive; -import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.shims.Utils; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hive.service.auth.HiveAuthFactory; -import org.apache.hive.service.cli.HiveSQLException; -import org.apache.hive.service.cli.thrift.TProtocolVersion; - -/** - * - * HiveSessionImplwithUGI. - * HiveSession with connecting user's UGI and delegation token if required - */ -public class HiveSessionImplwithUGI extends HiveSessionImpl { - public static final String HS2TOKEN = "HiveServer2ImpersonationToken"; - - private UserGroupInformation sessionUgi = null; - private String delegationTokenStr = null; - private Hive sessionHive = null; - private HiveSession proxySession = null; - static final Log LOG = LogFactory.getLog(HiveSessionImplwithUGI.class); - - public HiveSessionImplwithUGI(TProtocolVersion protocol, String username, String password, - HiveConf hiveConf, String ipAddress, String delegationToken) throws HiveSQLException { - super(protocol, username, password, hiveConf, ipAddress); - setSessionUGI(username); - setDelegationToken(delegationToken); - - // create a new metastore connection for this particular user session - Hive.set(null); - try { - sessionHive = Hive.get(getHiveConf()); - } catch (HiveException e) { - throw new HiveSQLException("Failed to setup metastore connection", e); - } - } - - // setup appropriate UGI for the session - public void setSessionUGI(String owner) throws HiveSQLException { - if (owner == null) { - throw new HiveSQLException("No username provided for impersonation"); - } - if (UserGroupInformation.isSecurityEnabled()) { - try { - sessionUgi = UserGroupInformation.createProxyUser( - owner, UserGroupInformation.getLoginUser()); - } catch (IOException e) { - throw new HiveSQLException("Couldn't setup proxy user", e); - } - } else { - sessionUgi = UserGroupInformation.createRemoteUser(owner); - } - } - - public UserGroupInformation getSessionUgi() { - return this.sessionUgi; - } - - public String getDelegationToken() { - return this.delegationTokenStr; - } - - @Override - protected synchronized void acquire(boolean userAccess) { - super.acquire(userAccess); - // if we have a metastore connection with impersonation, then set it first - if (sessionHive != null) { - Hive.set(sessionHive); - } - } - - /** - * Close the file systems for the session and remove it from the FileSystem cache. - * Cancel the session's delegation token and close the metastore connection - */ - @Override - public void close() throws HiveSQLException { - try { - acquire(true); - cancelDelegationToken(); - } finally { - try { - super.close(); - } finally { - try { - FileSystem.closeAllForUGI(sessionUgi); - } catch (IOException ioe) { - throw new HiveSQLException("Could not clean up file-system handles for UGI: " - + sessionUgi, ioe); - } - } - } - } - - /** - * Enable delegation token for the session - * save the token string and set the token.signature in hive conf. The metastore client uses - * this token.signature to determine where to use kerberos or delegation token - * @throws HiveException - * @throws IOException - */ - private void setDelegationToken(String delegationTokenStr) throws HiveSQLException { - this.delegationTokenStr = delegationTokenStr; - if (delegationTokenStr != null) { - getHiveConf().set("hive.metastore.token.signature", HS2TOKEN); - try { - Utils.setTokenStr(sessionUgi, delegationTokenStr, HS2TOKEN); - } catch (IOException e) { - throw new HiveSQLException("Couldn't setup delegation token in the ugi", e); - } - } - } - - // If the session has a delegation token obtained from the metastore, then cancel it - private void cancelDelegationToken() throws HiveSQLException { - if (delegationTokenStr != null) { - try { - Hive.get(getHiveConf()).cancelDelegationToken(delegationTokenStr); - } catch (HiveException e) { - throw new HiveSQLException("Couldn't cancel delegation token", e); - } - // close the metastore connection created with this delegation token - Hive.closeCurrent(); - } - } - - @Override - protected HiveSession getSession() { - assert proxySession != null; - - return proxySession; - } - - public void setProxySession(HiveSession proxySession) { - this.proxySession = proxySession; - } - - @Override - public String getDelegationToken(HiveAuthFactory authFactory, String owner, - String renewer) throws HiveSQLException { - return authFactory.getDelegationToken(owner, renewer); - } - - @Override - public void cancelDelegationToken(HiveAuthFactory authFactory, String tokenStr) - throws HiveSQLException { - authFactory.cancelDelegationToken(tokenStr); - } - - @Override - public void renewDelegationToken(HiveAuthFactory authFactory, String tokenStr) - throws HiveSQLException { - authFactory.renewDelegationToken(tokenStr); - } - -} diff --git a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/session/SessionManager.java b/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/session/SessionManager.java deleted file mode 100644 index ad6fb3ba37a0e..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/session/SessionManager.java +++ /dev/null @@ -1,377 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hive.service.cli.session; - -import java.io.File; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Date; -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.Future; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.TimeUnit; - -import org.apache.commons.io.FileUtils; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.conf.HiveConf.ConfVars; -import org.apache.hive.service.CompositeService; -import org.apache.hive.service.cli.HiveSQLException; -import org.apache.hive.service.cli.SessionHandle; -import org.apache.hive.service.cli.operation.OperationManager; -import org.apache.hive.service.cli.thrift.TProtocolVersion; -import org.apache.hive.service.server.HiveServer2; -import org.apache.hive.service.server.ThreadFactoryWithGarbageCleanup; - -/** - * SessionManager. - * - */ -public class SessionManager extends CompositeService { - - private static final Log LOG = LogFactory.getLog(SessionManager.class); - public static final String HIVERCFILE = ".hiverc"; - private HiveConf hiveConf; - private final Map handleToSession = - new ConcurrentHashMap(); - private final OperationManager operationManager = new OperationManager(); - private ThreadPoolExecutor backgroundOperationPool; - private boolean isOperationLogEnabled; - private File operationLogRootDir; - - private long checkInterval; - private long sessionTimeout; - private boolean checkOperation; - - private volatile boolean shutdown; - // The HiveServer2 instance running this service - private final HiveServer2 hiveServer2; - - public SessionManager(HiveServer2 hiveServer2) { - super(SessionManager.class.getSimpleName()); - this.hiveServer2 = hiveServer2; - } - - @Override - public synchronized void init(HiveConf hiveConf) { - this.hiveConf = hiveConf; - //Create operation log root directory, if operation logging is enabled - if (hiveConf.getBoolVar(ConfVars.HIVE_SERVER2_LOGGING_OPERATION_ENABLED)) { - initOperationLogRootDir(); - } - createBackgroundOperationPool(); - addService(operationManager); - super.init(hiveConf); - } - - private void createBackgroundOperationPool() { - int poolSize = hiveConf.getIntVar(ConfVars.HIVE_SERVER2_ASYNC_EXEC_THREADS); - LOG.info("HiveServer2: Background operation thread pool size: " + poolSize); - int poolQueueSize = hiveConf.getIntVar(ConfVars.HIVE_SERVER2_ASYNC_EXEC_WAIT_QUEUE_SIZE); - LOG.info("HiveServer2: Background operation thread wait queue size: " + poolQueueSize); - long keepAliveTime = HiveConf.getTimeVar( - hiveConf, ConfVars.HIVE_SERVER2_ASYNC_EXEC_KEEPALIVE_TIME, TimeUnit.SECONDS); - LOG.info( - "HiveServer2: Background operation thread keepalive time: " + keepAliveTime + " seconds"); - - // Create a thread pool with #poolSize threads - // Threads terminate when they are idle for more than the keepAliveTime - // A bounded blocking queue is used to queue incoming operations, if #operations > poolSize - String threadPoolName = "HiveServer2-Background-Pool"; - backgroundOperationPool = new ThreadPoolExecutor(poolSize, poolSize, - keepAliveTime, TimeUnit.SECONDS, new LinkedBlockingQueue(poolQueueSize), - new ThreadFactoryWithGarbageCleanup(threadPoolName)); - backgroundOperationPool.allowCoreThreadTimeOut(true); - - checkInterval = HiveConf.getTimeVar( - hiveConf, ConfVars.HIVE_SERVER2_SESSION_CHECK_INTERVAL, TimeUnit.MILLISECONDS); - sessionTimeout = HiveConf.getTimeVar( - hiveConf, ConfVars.HIVE_SERVER2_IDLE_SESSION_TIMEOUT, TimeUnit.MILLISECONDS); - checkOperation = HiveConf.getBoolVar(hiveConf, - ConfVars.HIVE_SERVER2_IDLE_SESSION_CHECK_OPERATION); - } - - private void initOperationLogRootDir() { - operationLogRootDir = new File( - hiveConf.getVar(ConfVars.HIVE_SERVER2_LOGGING_OPERATION_LOG_LOCATION)); - isOperationLogEnabled = true; - - if (operationLogRootDir.exists() && !operationLogRootDir.isDirectory()) { - LOG.warn("The operation log root directory exists, but it is not a directory: " + - operationLogRootDir.getAbsolutePath()); - isOperationLogEnabled = false; - } - - if (!operationLogRootDir.exists()) { - if (!operationLogRootDir.mkdirs()) { - LOG.warn("Unable to create operation log root directory: " + - operationLogRootDir.getAbsolutePath()); - isOperationLogEnabled = false; - } - } - - if (isOperationLogEnabled) { - LOG.info("Operation log root directory is created: " + operationLogRootDir.getAbsolutePath()); - try { - FileUtils.forceDeleteOnExit(operationLogRootDir); - } catch (IOException e) { - LOG.warn("Failed to schedule cleanup HS2 operation logging root dir: " + - operationLogRootDir.getAbsolutePath(), e); - } - } - } - - @Override - public synchronized void start() { - super.start(); - if (checkInterval > 0) { - startTimeoutChecker(); - } - } - - private final Object timeoutCheckerLock = new Object(); - - private void startTimeoutChecker() { - final long interval = Math.max(checkInterval, 3000L); // minimum 3 seconds - final Runnable timeoutChecker = new Runnable() { - @Override - public void run() { - sleepFor(interval); - while (!shutdown) { - long current = System.currentTimeMillis(); - for (HiveSession session : new ArrayList(handleToSession.values())) { - if (shutdown) { - break; - } - if (sessionTimeout > 0 && session.getLastAccessTime() + sessionTimeout <= current - && (!checkOperation || session.getNoOperationTime() > sessionTimeout)) { - SessionHandle handle = session.getSessionHandle(); - LOG.warn("Session " + handle + " is Timed-out (last access : " + - new Date(session.getLastAccessTime()) + ") and will be closed"); - try { - closeSession(handle); - } catch (HiveSQLException e) { - LOG.warn("Exception is thrown closing session " + handle, e); - } - } else { - session.closeExpiredOperations(); - } - } - sleepFor(interval); - } - } - - private void sleepFor(long interval) { - synchronized (timeoutCheckerLock) { - try { - timeoutCheckerLock.wait(interval); - } catch (InterruptedException e) { - // Ignore, and break. - } - } - } - }; - backgroundOperationPool.execute(timeoutChecker); - } - - private void shutdownTimeoutChecker() { - shutdown = true; - synchronized (timeoutCheckerLock) { - timeoutCheckerLock.notify(); - } - } - - @Override - public synchronized void stop() { - super.stop(); - shutdownTimeoutChecker(); - if (backgroundOperationPool != null) { - backgroundOperationPool.shutdown(); - long timeout = hiveConf.getTimeVar( - ConfVars.HIVE_SERVER2_ASYNC_EXEC_SHUTDOWN_TIMEOUT, TimeUnit.SECONDS); - try { - backgroundOperationPool.awaitTermination(timeout, TimeUnit.SECONDS); - } catch (InterruptedException e) { - LOG.warn("HIVE_SERVER2_ASYNC_EXEC_SHUTDOWN_TIMEOUT = " + timeout + - " seconds has been exceeded. RUNNING background operations will be shut down", e); - } - backgroundOperationPool = null; - } - cleanupLoggingRootDir(); - } - - private void cleanupLoggingRootDir() { - if (isOperationLogEnabled) { - try { - FileUtils.forceDelete(operationLogRootDir); - } catch (Exception e) { - LOG.warn("Failed to cleanup root dir of HS2 logging: " + operationLogRootDir - .getAbsolutePath(), e); - } - } - } - - public SessionHandle openSession(TProtocolVersion protocol, String username, String password, String ipAddress, - Map sessionConf) throws HiveSQLException { - return openSession(protocol, username, password, ipAddress, sessionConf, false, null); - } - - /** - * Opens a new session and creates a session handle. - * The username passed to this method is the effective username. - * If withImpersonation is true (==doAs true) we wrap all the calls in HiveSession - * within a UGI.doAs, where UGI corresponds to the effective user. - * - * Please see {@code org.apache.hive.service.cli.thrift.ThriftCLIService.getUserName()} for - * more details. - * - * @param protocol - * @param username - * @param password - * @param ipAddress - * @param sessionConf - * @param withImpersonation - * @param delegationToken - * @return - * @throws HiveSQLException - */ - public SessionHandle openSession(TProtocolVersion protocol, String username, String password, String ipAddress, - Map sessionConf, boolean withImpersonation, String delegationToken) - throws HiveSQLException { - HiveSession session; - // If doAs is set to true for HiveServer2, we will create a proxy object for the session impl. - // Within the proxy object, we wrap the method call in a UserGroupInformation#doAs - if (withImpersonation) { - HiveSessionImplwithUGI sessionWithUGI = new HiveSessionImplwithUGI(protocol, username, password, - hiveConf, ipAddress, delegationToken); - session = HiveSessionProxy.getProxy(sessionWithUGI, sessionWithUGI.getSessionUgi()); - sessionWithUGI.setProxySession(session); - } else { - session = new HiveSessionImpl(protocol, username, password, hiveConf, ipAddress); - } - session.setSessionManager(this); - session.setOperationManager(operationManager); - try { - session.open(sessionConf); - } catch (Exception e) { - try { - session.close(); - } catch (Throwable t) { - LOG.warn("Error closing session", t); - } - session = null; - throw new HiveSQLException("Failed to open new session: " + e, e); - } - if (isOperationLogEnabled) { - session.setOperationLogSessionDir(operationLogRootDir); - } - handleToSession.put(session.getSessionHandle(), session); - return session.getSessionHandle(); - } - - public void closeSession(SessionHandle sessionHandle) throws HiveSQLException { - HiveSession session = handleToSession.remove(sessionHandle); - if (session == null) { - throw new HiveSQLException("Session does not exist!"); - } - session.close(); - } - - public HiveSession getSession(SessionHandle sessionHandle) throws HiveSQLException { - HiveSession session = handleToSession.get(sessionHandle); - if (session == null) { - throw new HiveSQLException("Invalid SessionHandle: " + sessionHandle); - } - return session; - } - - public OperationManager getOperationManager() { - return operationManager; - } - - private static ThreadLocal threadLocalIpAddress = new ThreadLocal() { - @Override - protected synchronized String initialValue() { - return null; - } - }; - - public static void setIpAddress(String ipAddress) { - threadLocalIpAddress.set(ipAddress); - } - - public static void clearIpAddress() { - threadLocalIpAddress.remove(); - } - - public static String getIpAddress() { - return threadLocalIpAddress.get(); - } - - private static ThreadLocal threadLocalUserName = new ThreadLocal(){ - @Override - protected synchronized String initialValue() { - return null; - } - }; - - public static void setUserName(String userName) { - threadLocalUserName.set(userName); - } - - public static void clearUserName() { - threadLocalUserName.remove(); - } - - public static String getUserName() { - return threadLocalUserName.get(); - } - - private static ThreadLocal threadLocalProxyUserName = new ThreadLocal(){ - @Override - protected synchronized String initialValue() { - return null; - } - }; - - public static void setProxyUserName(String userName) { - LOG.debug("setting proxy user name based on query param to: " + userName); - threadLocalProxyUserName.set(userName); - } - - public static String getProxyUserName() { - return threadLocalProxyUserName.get(); - } - - public static void clearProxyUserName() { - threadLocalProxyUserName.remove(); - } - - public Future submitBackgroundOperation(Runnable r) { - return backgroundOperationPool.submit(r); - } - - public int getOpenSessionCount() { - return handleToSession.size(); - } -} - diff --git a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/thrift/ThriftBinaryCLIService.java b/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/thrift/ThriftBinaryCLIService.java deleted file mode 100644 index 00bdf7e19126e..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/thrift/ThriftBinaryCLIService.java +++ /dev/null @@ -1,121 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hive.service.cli.thrift; - -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.SynchronousQueue; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.TimeUnit; - -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.conf.HiveConf.ConfVars; -import org.apache.hadoop.hive.shims.ShimLoader; -import org.apache.hive.service.ServiceException; -import org.apache.hive.service.auth.HiveAuthFactory; -import org.apache.hive.service.cli.CLIService; -import org.apache.hive.service.server.ThreadFactoryWithGarbageCleanup; -import org.apache.thrift.TProcessorFactory; -import org.apache.thrift.protocol.TBinaryProtocol; -import org.apache.thrift.server.TThreadPoolServer; -import org.apache.thrift.transport.TServerSocket; -import org.apache.thrift.transport.TTransportFactory; - - -public class ThriftBinaryCLIService extends ThriftCLIService { - - public ThriftBinaryCLIService(CLIService cliService) { - super(cliService, ThriftBinaryCLIService.class.getSimpleName()); - } - - @Override - protected void initializeServer() { - try { - // Server thread pool - String threadPoolName = "HiveServer2-Handler-Pool"; - ExecutorService executorService = new ThreadPoolExecutor(minWorkerThreads, maxWorkerThreads, - workerKeepAliveTime, TimeUnit.SECONDS, new SynchronousQueue(), - new ThreadFactoryWithGarbageCleanup(threadPoolName)); - - // Thrift configs - hiveAuthFactory = new HiveAuthFactory(hiveConf); - TTransportFactory transportFactory = hiveAuthFactory.getAuthTransFactory(); - TProcessorFactory processorFactory = hiveAuthFactory.getAuthProcFactory(this); - TServerSocket serverSocket = null; - List sslVersionBlacklist = new ArrayList(); - for (String sslVersion : hiveConf.getVar(ConfVars.HIVE_SSL_PROTOCOL_BLACKLIST).split(",")) { - sslVersionBlacklist.add(sslVersion); - } - if (!hiveConf.getBoolVar(ConfVars.HIVE_SERVER2_USE_SSL)) { - serverSocket = HiveAuthFactory.getServerSocket(hiveHost, portNum); - } else { - String keyStorePath = hiveConf.getVar(ConfVars.HIVE_SERVER2_SSL_KEYSTORE_PATH).trim(); - if (keyStorePath.isEmpty()) { - throw new IllegalArgumentException(ConfVars.HIVE_SERVER2_SSL_KEYSTORE_PATH.varname - + " Not configured for SSL connection"); - } - String keyStorePassword = ShimLoader.getHadoopShims().getPassword(hiveConf, - HiveConf.ConfVars.HIVE_SERVER2_SSL_KEYSTORE_PASSWORD.varname); - serverSocket = HiveAuthFactory.getServerSSLSocket(hiveHost, portNum, keyStorePath, - keyStorePassword, sslVersionBlacklist); - } - - // In case HIVE_SERVER2_THRIFT_PORT or hive.server2.thrift.port is configured with 0 which - // represents any free port, we should set it to the actual one - portNum = serverSocket.getServerSocket().getLocalPort(); - - // Server args - int maxMessageSize = hiveConf.getIntVar(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_MAX_MESSAGE_SIZE); - int requestTimeout = (int) hiveConf.getTimeVar( - HiveConf.ConfVars.HIVE_SERVER2_THRIFT_LOGIN_TIMEOUT, TimeUnit.SECONDS); - int beBackoffSlotLength = (int) hiveConf.getTimeVar( - HiveConf.ConfVars.HIVE_SERVER2_THRIFT_LOGIN_BEBACKOFF_SLOT_LENGTH, TimeUnit.MILLISECONDS); - TThreadPoolServer.Args sargs = new TThreadPoolServer.Args(serverSocket) - .processorFactory(processorFactory).transportFactory(transportFactory) - .protocolFactory(new TBinaryProtocol.Factory()) - .inputProtocolFactory(new TBinaryProtocol.Factory(true, true, maxMessageSize, maxMessageSize)) - .requestTimeout(requestTimeout).requestTimeoutUnit(TimeUnit.SECONDS) - .beBackoffSlotLength(beBackoffSlotLength).beBackoffSlotLengthUnit(TimeUnit.MILLISECONDS) - .executorService(executorService); - - // TCP Server - server = new TThreadPoolServer(sargs); - server.setServerEventHandler(serverEventHandler); - String msg = "Starting " + ThriftBinaryCLIService.class.getSimpleName() + " on port " - + serverSocket.getServerSocket().getLocalPort() + " with " + minWorkerThreads + "..." + maxWorkerThreads + " worker threads"; - LOG.info(msg); - } catch (Exception t) { - throw new ServiceException("Error initializing " + getName(), t); - } - } - - @Override - public void run() { - try { - server.serve(); - } catch (Throwable t) { - LOG.fatal( - "Error starting HiveServer2: could not start " - + ThriftBinaryCLIService.class.getSimpleName(), t); - System.exit(-1); - } - } - -} diff --git a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java b/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java deleted file mode 100644 index ff533769b5b84..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java +++ /dev/null @@ -1,693 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hive.service.cli.thrift; - -import javax.security.auth.login.LoginException; -import java.io.IOException; -import java.net.InetAddress; -import java.net.UnknownHostException; -import java.util.HashMap; -import java.util.Map; -import java.util.concurrent.TimeUnit; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.conf.HiveConf.ConfVars; -import org.apache.hive.service.AbstractService; -import org.apache.hive.service.ServiceException; -import org.apache.hive.service.ServiceUtils; -import org.apache.hive.service.auth.HiveAuthFactory; -import org.apache.hive.service.auth.TSetIpAddressProcessor; -import org.apache.hive.service.cli.*; -import org.apache.hive.service.cli.session.SessionManager; -import org.apache.hive.service.server.HiveServer2; -import org.apache.thrift.TException; -import org.apache.thrift.protocol.TProtocol; -import org.apache.thrift.server.ServerContext; -import org.apache.thrift.server.TServer; -import org.apache.thrift.server.TServerEventHandler; -import org.apache.thrift.transport.TTransport; - -/** - * ThriftCLIService. - * - */ -public abstract class ThriftCLIService extends AbstractService implements TCLIService.Iface, Runnable { - - public static final Log LOG = LogFactory.getLog(ThriftCLIService.class.getName()); - - protected CLIService cliService; - private static final TStatus OK_STATUS = new TStatus(TStatusCode.SUCCESS_STATUS); - protected static HiveAuthFactory hiveAuthFactory; - - protected int portNum; - protected InetAddress serverIPAddress; - protected String hiveHost; - protected TServer server; - protected org.eclipse.jetty.server.Server httpServer; - - private boolean isStarted = false; - protected boolean isEmbedded = false; - - protected HiveConf hiveConf; - - protected int minWorkerThreads; - protected int maxWorkerThreads; - protected long workerKeepAliveTime; - - protected TServerEventHandler serverEventHandler; - protected ThreadLocal currentServerContext; - - static class ThriftCLIServerContext implements ServerContext { - private SessionHandle sessionHandle = null; - - public void setSessionHandle(SessionHandle sessionHandle) { - this.sessionHandle = sessionHandle; - } - - public SessionHandle getSessionHandle() { - return sessionHandle; - } - } - - public ThriftCLIService(CLIService service, String serviceName) { - super(serviceName); - this.cliService = service; - currentServerContext = new ThreadLocal(); - serverEventHandler = new TServerEventHandler() { - @Override - public ServerContext createContext( - TProtocol input, TProtocol output) { - return new ThriftCLIServerContext(); - } - - @Override - public void deleteContext(ServerContext serverContext, - TProtocol input, TProtocol output) { - ThriftCLIServerContext context = (ThriftCLIServerContext)serverContext; - SessionHandle sessionHandle = context.getSessionHandle(); - if (sessionHandle != null) { - LOG.info("Session disconnected without closing properly, close it now"); - try { - cliService.closeSession(sessionHandle); - } catch (HiveSQLException e) { - LOG.warn("Failed to close session: " + e, e); - } - } - } - - @Override - public void preServe() { - } - - @Override - public void processContext(ServerContext serverContext, - TTransport input, TTransport output) { - currentServerContext.set(serverContext); - } - }; - } - - @Override - public synchronized void init(HiveConf hiveConf) { - this.hiveConf = hiveConf; - // Initialize common server configs needed in both binary & http modes - String portString; - hiveHost = System.getenv("HIVE_SERVER2_THRIFT_BIND_HOST"); - if (hiveHost == null) { - hiveHost = hiveConf.getVar(ConfVars.HIVE_SERVER2_THRIFT_BIND_HOST); - } - try { - if (hiveHost != null && !hiveHost.isEmpty()) { - serverIPAddress = InetAddress.getByName(hiveHost); - } else { - serverIPAddress = InetAddress.getLocalHost(); - } - } catch (UnknownHostException e) { - throw new ServiceException(e); - } - // HTTP mode - if (HiveServer2.isHTTPTransportMode(hiveConf)) { - workerKeepAliveTime = - hiveConf.getTimeVar(ConfVars.HIVE_SERVER2_THRIFT_HTTP_WORKER_KEEPALIVE_TIME, - TimeUnit.SECONDS); - portString = System.getenv("HIVE_SERVER2_THRIFT_HTTP_PORT"); - if (portString != null) { - portNum = Integer.valueOf(portString); - } else { - portNum = hiveConf.getIntVar(ConfVars.HIVE_SERVER2_THRIFT_HTTP_PORT); - } - } - // Binary mode - else { - workerKeepAliveTime = - hiveConf.getTimeVar(ConfVars.HIVE_SERVER2_THRIFT_WORKER_KEEPALIVE_TIME, TimeUnit.SECONDS); - portString = System.getenv("HIVE_SERVER2_THRIFT_PORT"); - if (portString != null) { - portNum = Integer.valueOf(portString); - } else { - portNum = hiveConf.getIntVar(ConfVars.HIVE_SERVER2_THRIFT_PORT); - } - } - minWorkerThreads = hiveConf.getIntVar(ConfVars.HIVE_SERVER2_THRIFT_MIN_WORKER_THREADS); - maxWorkerThreads = hiveConf.getIntVar(ConfVars.HIVE_SERVER2_THRIFT_MAX_WORKER_THREADS); - super.init(hiveConf); - } - - @Override - public synchronized void start() { - super.start(); - if (!isStarted && !isEmbedded) { - initializeServer(); - new Thread(this).start(); - isStarted = true; - } - } - - @Override - public synchronized void stop() { - if (isStarted && !isEmbedded) { - if(server != null) { - server.stop(); - LOG.info("Thrift server has stopped"); - } - if((httpServer != null) && httpServer.isStarted()) { - try { - httpServer.stop(); - LOG.info("Http server has stopped"); - } catch (Exception e) { - LOG.error("Error stopping Http server: ", e); - } - } - isStarted = false; - } - super.stop(); - } - - public int getPortNumber() { - return portNum; - } - - public InetAddress getServerIPAddress() { - return serverIPAddress; - } - - @Override - public TGetDelegationTokenResp GetDelegationToken(TGetDelegationTokenReq req) - throws TException { - TGetDelegationTokenResp resp = new TGetDelegationTokenResp(); - resp.setStatus(notSupportTokenErrorStatus()); - return resp; - } - - @Override - public TCancelDelegationTokenResp CancelDelegationToken(TCancelDelegationTokenReq req) - throws TException { - TCancelDelegationTokenResp resp = new TCancelDelegationTokenResp(); - resp.setStatus(notSupportTokenErrorStatus()); - return resp; - } - - @Override - public TRenewDelegationTokenResp RenewDelegationToken(TRenewDelegationTokenReq req) - throws TException { - TRenewDelegationTokenResp resp = new TRenewDelegationTokenResp(); - resp.setStatus(notSupportTokenErrorStatus()); - return resp; - } - - private TStatus notSupportTokenErrorStatus() { - TStatus errorStatus = new TStatus(TStatusCode.ERROR_STATUS); - errorStatus.setErrorMessage("Delegation token is not supported"); - return errorStatus; - } - - @Override - public TOpenSessionResp OpenSession(TOpenSessionReq req) throws TException { - LOG.info("Client protocol version: " + req.getClient_protocol()); - TOpenSessionResp resp = new TOpenSessionResp(); - try { - SessionHandle sessionHandle = getSessionHandle(req, resp); - resp.setSessionHandle(sessionHandle.toTSessionHandle()); - // TODO: set real configuration map - resp.setConfiguration(new HashMap()); - resp.setStatus(OK_STATUS); - ThriftCLIServerContext context = - (ThriftCLIServerContext)currentServerContext.get(); - if (context != null) { - context.setSessionHandle(sessionHandle); - } - } catch (Exception e) { - LOG.warn("Error opening session: ", e); - resp.setStatus(HiveSQLException.toTStatus(e)); - } - return resp; - } - - private String getIpAddress() { - String clientIpAddress; - // Http transport mode. - // We set the thread local ip address, in ThriftHttpServlet. - if (cliService.getHiveConf().getVar( - ConfVars.HIVE_SERVER2_TRANSPORT_MODE).equalsIgnoreCase("http")) { - clientIpAddress = SessionManager.getIpAddress(); - } - else { - // Kerberos - if (isKerberosAuthMode()) { - clientIpAddress = hiveAuthFactory.getIpAddress(); - } - // Except kerberos, NOSASL - else { - clientIpAddress = TSetIpAddressProcessor.getUserIpAddress(); - } - } - LOG.debug("Client's IP Address: " + clientIpAddress); - return clientIpAddress; - } - - /** - * Returns the effective username. - * 1. If hive.server2.allow.user.substitution = false: the username of the connecting user - * 2. If hive.server2.allow.user.substitution = true: the username of the end user, - * that the connecting user is trying to proxy for. - * This includes a check whether the connecting user is allowed to proxy for the end user. - * @param req - * @return - * @throws HiveSQLException - */ - private String getUserName(TOpenSessionReq req) throws HiveSQLException { - String userName = null; - // Kerberos - if (isKerberosAuthMode()) { - userName = hiveAuthFactory.getRemoteUser(); - } - // Except kerberos, NOSASL - if (userName == null) { - userName = TSetIpAddressProcessor.getUserName(); - } - // Http transport mode. - // We set the thread local username, in ThriftHttpServlet. - if (cliService.getHiveConf().getVar( - ConfVars.HIVE_SERVER2_TRANSPORT_MODE).equalsIgnoreCase("http")) { - userName = SessionManager.getUserName(); - } - if (userName == null) { - userName = req.getUsername(); - } - - userName = getShortName(userName); - String effectiveClientUser = getProxyUser(userName, req.getConfiguration(), getIpAddress()); - LOG.debug("Client's username: " + effectiveClientUser); - return effectiveClientUser; - } - - private String getShortName(String userName) { - String ret = null; - if (userName != null) { - int indexOfDomainMatch = ServiceUtils.indexOfDomainMatch(userName); - ret = (indexOfDomainMatch <= 0) ? userName : - userName.substring(0, indexOfDomainMatch); - } - - return ret; - } - - /** - * Create a session handle - * @param req - * @param res - * @return - * @throws HiveSQLException - * @throws LoginException - * @throws IOException - */ - SessionHandle getSessionHandle(TOpenSessionReq req, TOpenSessionResp res) - throws HiveSQLException, LoginException, IOException { - String userName = getUserName(req); - String ipAddress = getIpAddress(); - TProtocolVersion protocol = getMinVersion(CLIService.SERVER_VERSION, - req.getClient_protocol()); - res.setServerProtocolVersion(protocol); - SessionHandle sessionHandle; - if (cliService.getHiveConf().getBoolVar(ConfVars.HIVE_SERVER2_ENABLE_DOAS) && - (userName != null)) { - String delegationTokenStr = getDelegationToken(userName); - sessionHandle = cliService.openSessionWithImpersonation(protocol, userName, - req.getPassword(), ipAddress, req.getConfiguration(), delegationTokenStr); - } else { - sessionHandle = cliService.openSession(protocol, userName, req.getPassword(), - ipAddress, req.getConfiguration()); - } - return sessionHandle; - } - - - private String getDelegationToken(String userName) - throws HiveSQLException, LoginException, IOException { - if (userName == null || !cliService.getHiveConf().getVar(ConfVars.HIVE_SERVER2_AUTHENTICATION) - .equalsIgnoreCase(HiveAuthFactory.AuthTypes.KERBEROS.toString())) { - return null; - } - try { - return cliService.getDelegationTokenFromMetaStore(userName); - } catch (UnsupportedOperationException e) { - // The delegation token is not applicable in the given deployment mode - } - return null; - } - - private TProtocolVersion getMinVersion(TProtocolVersion... versions) { - TProtocolVersion[] values = TProtocolVersion.values(); - int current = values[values.length - 1].getValue(); - for (TProtocolVersion version : versions) { - if (current > version.getValue()) { - current = version.getValue(); - } - } - for (TProtocolVersion version : values) { - if (version.getValue() == current) { - return version; - } - } - throw new IllegalArgumentException("never"); - } - - @Override - public TCloseSessionResp CloseSession(TCloseSessionReq req) throws TException { - TCloseSessionResp resp = new TCloseSessionResp(); - try { - SessionHandle sessionHandle = new SessionHandle(req.getSessionHandle()); - cliService.closeSession(sessionHandle); - resp.setStatus(OK_STATUS); - ThriftCLIServerContext context = - (ThriftCLIServerContext)currentServerContext.get(); - if (context != null) { - context.setSessionHandle(null); - } - } catch (Exception e) { - LOG.warn("Error closing session: ", e); - resp.setStatus(HiveSQLException.toTStatus(e)); - } - return resp; - } - - @Override - public TGetInfoResp GetInfo(TGetInfoReq req) throws TException { - TGetInfoResp resp = new TGetInfoResp(); - try { - GetInfoValue getInfoValue = - cliService.getInfo(new SessionHandle(req.getSessionHandle()), - GetInfoType.getGetInfoType(req.getInfoType())); - resp.setInfoValue(getInfoValue.toTGetInfoValue()); - resp.setStatus(OK_STATUS); - } catch (Exception e) { - LOG.warn("Error getting info: ", e); - resp.setStatus(HiveSQLException.toTStatus(e)); - } - return resp; - } - - @Override - public TExecuteStatementResp ExecuteStatement(TExecuteStatementReq req) throws TException { - TExecuteStatementResp resp = new TExecuteStatementResp(); - try { - SessionHandle sessionHandle = new SessionHandle(req.getSessionHandle()); - String statement = req.getStatement(); - Map confOverlay = req.getConfOverlay(); - Boolean runAsync = req.isRunAsync(); - OperationHandle operationHandle = runAsync ? - cliService.executeStatementAsync(sessionHandle, statement, confOverlay) - : cliService.executeStatement(sessionHandle, statement, confOverlay); - resp.setOperationHandle(operationHandle.toTOperationHandle()); - resp.setStatus(OK_STATUS); - } catch (Exception e) { - LOG.warn("Error executing statement: ", e); - resp.setStatus(HiveSQLException.toTStatus(e)); - } - return resp; - } - - @Override - public TGetTypeInfoResp GetTypeInfo(TGetTypeInfoReq req) throws TException { - TGetTypeInfoResp resp = new TGetTypeInfoResp(); - try { - OperationHandle operationHandle = cliService.getTypeInfo(new SessionHandle(req.getSessionHandle())); - resp.setOperationHandle(operationHandle.toTOperationHandle()); - resp.setStatus(OK_STATUS); - } catch (Exception e) { - LOG.warn("Error getting type info: ", e); - resp.setStatus(HiveSQLException.toTStatus(e)); - } - return resp; - } - - @Override - public TGetCatalogsResp GetCatalogs(TGetCatalogsReq req) throws TException { - TGetCatalogsResp resp = new TGetCatalogsResp(); - try { - OperationHandle opHandle = cliService.getCatalogs(new SessionHandle(req.getSessionHandle())); - resp.setOperationHandle(opHandle.toTOperationHandle()); - resp.setStatus(OK_STATUS); - } catch (Exception e) { - LOG.warn("Error getting catalogs: ", e); - resp.setStatus(HiveSQLException.toTStatus(e)); - } - return resp; - } - - @Override - public TGetSchemasResp GetSchemas(TGetSchemasReq req) throws TException { - TGetSchemasResp resp = new TGetSchemasResp(); - try { - OperationHandle opHandle = cliService.getSchemas( - new SessionHandle(req.getSessionHandle()), req.getCatalogName(), req.getSchemaName()); - resp.setOperationHandle(opHandle.toTOperationHandle()); - resp.setStatus(OK_STATUS); - } catch (Exception e) { - LOG.warn("Error getting schemas: ", e); - resp.setStatus(HiveSQLException.toTStatus(e)); - } - return resp; - } - - @Override - public TGetTablesResp GetTables(TGetTablesReq req) throws TException { - TGetTablesResp resp = new TGetTablesResp(); - try { - OperationHandle opHandle = cliService - .getTables(new SessionHandle(req.getSessionHandle()), req.getCatalogName(), - req.getSchemaName(), req.getTableName(), req.getTableTypes()); - resp.setOperationHandle(opHandle.toTOperationHandle()); - resp.setStatus(OK_STATUS); - } catch (Exception e) { - LOG.warn("Error getting tables: ", e); - resp.setStatus(HiveSQLException.toTStatus(e)); - } - return resp; - } - - @Override - public TGetTableTypesResp GetTableTypes(TGetTableTypesReq req) throws TException { - TGetTableTypesResp resp = new TGetTableTypesResp(); - try { - OperationHandle opHandle = cliService.getTableTypes(new SessionHandle(req.getSessionHandle())); - resp.setOperationHandle(opHandle.toTOperationHandle()); - resp.setStatus(OK_STATUS); - } catch (Exception e) { - LOG.warn("Error getting table types: ", e); - resp.setStatus(HiveSQLException.toTStatus(e)); - } - return resp; - } - - @Override - public TGetColumnsResp GetColumns(TGetColumnsReq req) throws TException { - TGetColumnsResp resp = new TGetColumnsResp(); - try { - OperationHandle opHandle = cliService.getColumns( - new SessionHandle(req.getSessionHandle()), - req.getCatalogName(), - req.getSchemaName(), - req.getTableName(), - req.getColumnName()); - resp.setOperationHandle(opHandle.toTOperationHandle()); - resp.setStatus(OK_STATUS); - } catch (Exception e) { - LOG.warn("Error getting columns: ", e); - resp.setStatus(HiveSQLException.toTStatus(e)); - } - return resp; - } - - @Override - public TGetFunctionsResp GetFunctions(TGetFunctionsReq req) throws TException { - TGetFunctionsResp resp = new TGetFunctionsResp(); - try { - OperationHandle opHandle = cliService.getFunctions( - new SessionHandle(req.getSessionHandle()), req.getCatalogName(), - req.getSchemaName(), req.getFunctionName()); - resp.setOperationHandle(opHandle.toTOperationHandle()); - resp.setStatus(OK_STATUS); - } catch (Exception e) { - LOG.warn("Error getting functions: ", e); - resp.setStatus(HiveSQLException.toTStatus(e)); - } - return resp; - } - - @Override - public TGetOperationStatusResp GetOperationStatus(TGetOperationStatusReq req) throws TException { - TGetOperationStatusResp resp = new TGetOperationStatusResp(); - try { - OperationStatus operationStatus = cliService.getOperationStatus( - new OperationHandle(req.getOperationHandle())); - resp.setOperationState(operationStatus.getState().toTOperationState()); - HiveSQLException opException = operationStatus.getOperationException(); - if (opException != null) { - resp.setSqlState(opException.getSQLState()); - resp.setErrorCode(opException.getErrorCode()); - resp.setErrorMessage(org.apache.hadoop.util.StringUtils - .stringifyException(opException)); - } - resp.setStatus(OK_STATUS); - } catch (Exception e) { - LOG.warn("Error getting operation status: ", e); - resp.setStatus(HiveSQLException.toTStatus(e)); - } - return resp; - } - - @Override - public TCancelOperationResp CancelOperation(TCancelOperationReq req) throws TException { - TCancelOperationResp resp = new TCancelOperationResp(); - try { - cliService.cancelOperation(new OperationHandle(req.getOperationHandle())); - resp.setStatus(OK_STATUS); - } catch (Exception e) { - LOG.warn("Error cancelling operation: ", e); - resp.setStatus(HiveSQLException.toTStatus(e)); - } - return resp; - } - - @Override - public TCloseOperationResp CloseOperation(TCloseOperationReq req) throws TException { - TCloseOperationResp resp = new TCloseOperationResp(); - try { - cliService.closeOperation(new OperationHandle(req.getOperationHandle())); - resp.setStatus(OK_STATUS); - } catch (Exception e) { - LOG.warn("Error closing operation: ", e); - resp.setStatus(HiveSQLException.toTStatus(e)); - } - return resp; - } - - @Override - public TGetResultSetMetadataResp GetResultSetMetadata(TGetResultSetMetadataReq req) - throws TException { - TGetResultSetMetadataResp resp = new TGetResultSetMetadataResp(); - try { - TableSchema schema = cliService.getResultSetMetadata(new OperationHandle(req.getOperationHandle())); - resp.setSchema(schema.toTTableSchema()); - resp.setStatus(OK_STATUS); - } catch (Exception e) { - LOG.warn("Error getting result set metadata: ", e); - resp.setStatus(HiveSQLException.toTStatus(e)); - } - return resp; - } - - @Override - public TFetchResultsResp FetchResults(TFetchResultsReq req) throws TException { - TFetchResultsResp resp = new TFetchResultsResp(); - try { - RowSet rowSet = cliService.fetchResults( - new OperationHandle(req.getOperationHandle()), - FetchOrientation.getFetchOrientation(req.getOrientation()), - req.getMaxRows(), - FetchType.getFetchType(req.getFetchType())); - resp.setResults(rowSet.toTRowSet()); - resp.setHasMoreRows(false); - resp.setStatus(OK_STATUS); - } catch (Exception e) { - LOG.warn("Error fetching results: ", e); - resp.setStatus(HiveSQLException.toTStatus(e)); - } - return resp; - } - - protected abstract void initializeServer(); - - @Override - public abstract void run(); - - /** - * If the proxy user name is provided then check privileges to substitute the user. - * @param realUser - * @param sessionConf - * @param ipAddress - * @return - * @throws HiveSQLException - */ - private String getProxyUser(String realUser, Map sessionConf, - String ipAddress) throws HiveSQLException { - String proxyUser = null; - // Http transport mode. - // We set the thread local proxy username, in ThriftHttpServlet. - if (cliService.getHiveConf().getVar( - ConfVars.HIVE_SERVER2_TRANSPORT_MODE).equalsIgnoreCase("http")) { - proxyUser = SessionManager.getProxyUserName(); - LOG.debug("Proxy user from query string: " + proxyUser); - } - - if (proxyUser == null && sessionConf != null && sessionConf.containsKey(HiveAuthFactory.HS2_PROXY_USER)) { - String proxyUserFromThriftBody = sessionConf.get(HiveAuthFactory.HS2_PROXY_USER); - LOG.debug("Proxy user from thrift body: " + proxyUserFromThriftBody); - proxyUser = proxyUserFromThriftBody; - } - - if (proxyUser == null) { - return realUser; - } - - // check whether substitution is allowed - if (!hiveConf.getBoolVar(HiveConf.ConfVars.HIVE_SERVER2_ALLOW_USER_SUBSTITUTION)) { - throw new HiveSQLException("Proxy user substitution is not allowed"); - } - - // If there's no authentication, then directly substitute the user - if (HiveAuthFactory.AuthTypes.NONE.toString() - .equalsIgnoreCase(hiveConf.getVar(ConfVars.HIVE_SERVER2_AUTHENTICATION))) { - return proxyUser; - } - - // Verify proxy user privilege of the realUser for the proxyUser - HiveAuthFactory.verifyProxyAccess(realUser, proxyUser, ipAddress, hiveConf); - LOG.debug("Verified proxy user: " + proxyUser); - return proxyUser; - } - - private boolean isKerberosAuthMode() { - return cliService.getHiveConf().getVar(ConfVars.HIVE_SERVER2_AUTHENTICATION) - .equalsIgnoreCase(HiveAuthFactory.AuthTypes.KERBEROS.toString()); - } -} diff --git a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/thrift/ThriftCLIServiceClient.java b/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/thrift/ThriftCLIServiceClient.java deleted file mode 100644 index 1af45398b895c..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/thrift/ThriftCLIServiceClient.java +++ /dev/null @@ -1,440 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hive.service.cli.thrift; - -import java.util.List; -import java.util.Map; - -import org.apache.hive.service.auth.HiveAuthFactory; -import org.apache.hive.service.cli.*; -import org.apache.thrift.TException; - -/** - * ThriftCLIServiceClient. - * - */ -public class ThriftCLIServiceClient extends CLIServiceClient { - private final TCLIService.Iface cliService; - - public ThriftCLIServiceClient(TCLIService.Iface cliService) { - this.cliService = cliService; - } - - public void checkStatus(TStatus status) throws HiveSQLException { - if (TStatusCode.ERROR_STATUS.equals(status.getStatusCode())) { - throw new HiveSQLException(status); - } - } - - /* (non-Javadoc) - * @see org.apache.hive.service.cli.ICLIService#openSession(java.lang.String, java.lang.String, java.util.Map) - */ - @Override - public SessionHandle openSession(String username, String password, - Map configuration) - throws HiveSQLException { - try { - TOpenSessionReq req = new TOpenSessionReq(); - req.setUsername(username); - req.setPassword(password); - req.setConfiguration(configuration); - TOpenSessionResp resp = cliService.OpenSession(req); - checkStatus(resp.getStatus()); - return new SessionHandle(resp.getSessionHandle(), resp.getServerProtocolVersion()); - } catch (HiveSQLException e) { - throw e; - } catch (Exception e) { - throw new HiveSQLException(e); - } - } - - /* (non-Javadoc) - * @see org.apache.hive.service.cli.ICLIService#closeSession(org.apache.hive.service.cli.SessionHandle) - */ - @Override - public SessionHandle openSessionWithImpersonation(String username, String password, - Map configuration, String delegationToken) throws HiveSQLException { - throw new HiveSQLException("open with impersonation operation is not supported in the client"); - } - - /* (non-Javadoc) - * @see org.apache.hive.service.cli.ICLIService#closeSession(org.apache.hive.service.cli.SessionHandle) - */ - @Override - public void closeSession(SessionHandle sessionHandle) throws HiveSQLException { - try { - TCloseSessionReq req = new TCloseSessionReq(sessionHandle.toTSessionHandle()); - TCloseSessionResp resp = cliService.CloseSession(req); - checkStatus(resp.getStatus()); - } catch (HiveSQLException e) { - throw e; - } catch (Exception e) { - throw new HiveSQLException(e); - } - } - - /* (non-Javadoc) - * @see org.apache.hive.service.cli.ICLIService#getInfo(org.apache.hive.service.cli.SessionHandle, java.util.List) - */ - @Override - public GetInfoValue getInfo(SessionHandle sessionHandle, GetInfoType infoType) - throws HiveSQLException { - try { - // FIXME extract the right info type - TGetInfoReq req = new TGetInfoReq(sessionHandle.toTSessionHandle(), infoType.toTGetInfoType()); - TGetInfoResp resp = cliService.GetInfo(req); - checkStatus(resp.getStatus()); - return new GetInfoValue(resp.getInfoValue()); - } catch (HiveSQLException e) { - throw e; - } catch (Exception e) { - throw new HiveSQLException(e); - } - } - - /* (non-Javadoc) - * @see org.apache.hive.service.cli.ICLIService#executeStatement(org.apache.hive.service.cli.SessionHandle, java.lang.String, java.util.Map) - */ - @Override - public OperationHandle executeStatement(SessionHandle sessionHandle, String statement, - Map confOverlay) - throws HiveSQLException { - return executeStatementInternal(sessionHandle, statement, confOverlay, false); - } - - /* (non-Javadoc) - * @see org.apache.hive.service.cli.ICLIService#executeStatementAsync(org.apache.hive.service.cli.SessionHandle, java.lang.String, java.util.Map) - */ - @Override - public OperationHandle executeStatementAsync(SessionHandle sessionHandle, String statement, - Map confOverlay) - throws HiveSQLException { - return executeStatementInternal(sessionHandle, statement, confOverlay, true); - } - - private OperationHandle executeStatementInternal(SessionHandle sessionHandle, String statement, - Map confOverlay, boolean isAsync) - throws HiveSQLException { - try { - TExecuteStatementReq req = - new TExecuteStatementReq(sessionHandle.toTSessionHandle(), statement); - req.setConfOverlay(confOverlay); - req.setRunAsync(isAsync); - TExecuteStatementResp resp = cliService.ExecuteStatement(req); - checkStatus(resp.getStatus()); - TProtocolVersion protocol = sessionHandle.getProtocolVersion(); - return new OperationHandle(resp.getOperationHandle(), protocol); - } catch (HiveSQLException e) { - throw e; - } catch (Exception e) { - throw new HiveSQLException(e); - } - } - - /* (non-Javadoc) - * @see org.apache.hive.service.cli.ICLIService#getTypeInfo(org.apache.hive.service.cli.SessionHandle) - */ - @Override - public OperationHandle getTypeInfo(SessionHandle sessionHandle) throws HiveSQLException { - try { - TGetTypeInfoReq req = new TGetTypeInfoReq(sessionHandle.toTSessionHandle()); - TGetTypeInfoResp resp = cliService.GetTypeInfo(req); - checkStatus(resp.getStatus()); - TProtocolVersion protocol = sessionHandle.getProtocolVersion(); - return new OperationHandle(resp.getOperationHandle(), protocol); - } catch (HiveSQLException e) { - throw e; - } catch (Exception e) { - throw new HiveSQLException(e); - } - } - - /* (non-Javadoc) - * @see org.apache.hive.service.cli.ICLIService#getCatalogs(org.apache.hive.service.cli.SessionHandle) - */ - @Override - public OperationHandle getCatalogs(SessionHandle sessionHandle) throws HiveSQLException { - try { - TGetCatalogsReq req = new TGetCatalogsReq(sessionHandle.toTSessionHandle()); - TGetCatalogsResp resp = cliService.GetCatalogs(req); - checkStatus(resp.getStatus()); - TProtocolVersion protocol = sessionHandle.getProtocolVersion(); - return new OperationHandle(resp.getOperationHandle(), protocol); - } catch (HiveSQLException e) { - throw e; - } catch (Exception e) { - throw new HiveSQLException(e); - } - } - - /* (non-Javadoc) - * @see org.apache.hive.service.cli.ICLIService#getSchemas(org.apache.hive.service.cli.SessionHandle, java.lang.String, java.lang.String) - */ - @Override - public OperationHandle getSchemas(SessionHandle sessionHandle, String catalogName, - String schemaName) - throws HiveSQLException { - try { - TGetSchemasReq req = new TGetSchemasReq(sessionHandle.toTSessionHandle()); - req.setCatalogName(catalogName); - req.setSchemaName(schemaName); - TGetSchemasResp resp = cliService.GetSchemas(req); - checkStatus(resp.getStatus()); - TProtocolVersion protocol = sessionHandle.getProtocolVersion(); - return new OperationHandle(resp.getOperationHandle(), protocol); - } catch (HiveSQLException e) { - throw e; - } catch (Exception e) { - throw new HiveSQLException(e); - } - } - - /* (non-Javadoc) - * @see org.apache.hive.service.cli.ICLIService#getTables(org.apache.hive.service.cli.SessionHandle, java.lang.String, java.lang.String, java.lang.String, java.util.List) - */ - @Override - public OperationHandle getTables(SessionHandle sessionHandle, String catalogName, - String schemaName, String tableName, List tableTypes) - throws HiveSQLException { - try { - TGetTablesReq req = new TGetTablesReq(sessionHandle.toTSessionHandle()); - req.setTableName(tableName); - req.setTableTypes(tableTypes); - req.setSchemaName(schemaName); - TGetTablesResp resp = cliService.GetTables(req); - checkStatus(resp.getStatus()); - TProtocolVersion protocol = sessionHandle.getProtocolVersion(); - return new OperationHandle(resp.getOperationHandle(), protocol); - } catch (HiveSQLException e) { - throw e; - } catch (Exception e) { - throw new HiveSQLException(e); - } - } - - /* (non-Javadoc) - * @see org.apache.hive.service.cli.ICLIService#getTableTypes(org.apache.hive.service.cli.SessionHandle) - */ - @Override - public OperationHandle getTableTypes(SessionHandle sessionHandle) throws HiveSQLException { - try { - TGetTableTypesReq req = new TGetTableTypesReq(sessionHandle.toTSessionHandle()); - TGetTableTypesResp resp = cliService.GetTableTypes(req); - checkStatus(resp.getStatus()); - TProtocolVersion protocol = sessionHandle.getProtocolVersion(); - return new OperationHandle(resp.getOperationHandle(), protocol); - } catch (HiveSQLException e) { - throw e; - } catch (Exception e) { - throw new HiveSQLException(e); - } - } - - /* (non-Javadoc) - * @see org.apache.hive.service.cli.ICLIService#getColumns(org.apache.hive.service.cli.SessionHandle) - */ - @Override - public OperationHandle getColumns(SessionHandle sessionHandle, - String catalogName, String schemaName, String tableName, String columnName) - throws HiveSQLException { - try { - TGetColumnsReq req = new TGetColumnsReq(); - req.setSessionHandle(sessionHandle.toTSessionHandle()); - req.setCatalogName(catalogName); - req.setSchemaName(schemaName); - req.setTableName(tableName); - req.setColumnName(columnName); - TGetColumnsResp resp = cliService.GetColumns(req); - checkStatus(resp.getStatus()); - TProtocolVersion protocol = sessionHandle.getProtocolVersion(); - return new OperationHandle(resp.getOperationHandle(), protocol); - } catch (HiveSQLException e) { - throw e; - } catch (Exception e) { - throw new HiveSQLException(e); - } - } - - /* (non-Javadoc) - * @see org.apache.hive.service.cli.ICLIService#getFunctions(org.apache.hive.service.cli.SessionHandle) - */ - @Override - public OperationHandle getFunctions(SessionHandle sessionHandle, - String catalogName, String schemaName, String functionName) throws HiveSQLException { - try { - TGetFunctionsReq req = new TGetFunctionsReq(sessionHandle.toTSessionHandle(), functionName); - req.setCatalogName(catalogName); - req.setSchemaName(schemaName); - TGetFunctionsResp resp = cliService.GetFunctions(req); - checkStatus(resp.getStatus()); - TProtocolVersion protocol = sessionHandle.getProtocolVersion(); - return new OperationHandle(resp.getOperationHandle(), protocol); - } catch (HiveSQLException e) { - throw e; - } catch (Exception e) { - throw new HiveSQLException(e); - } - } - - /* (non-Javadoc) - * @see org.apache.hive.service.cli.ICLIService#getOperationStatus(org.apache.hive.service.cli.OperationHandle) - */ - @Override - public OperationStatus getOperationStatus(OperationHandle opHandle) throws HiveSQLException { - try { - TGetOperationStatusReq req = new TGetOperationStatusReq(opHandle.toTOperationHandle()); - TGetOperationStatusResp resp = cliService.GetOperationStatus(req); - // Checks the status of the RPC call, throws an exception in case of error - checkStatus(resp.getStatus()); - OperationState opState = OperationState.getOperationState(resp.getOperationState()); - HiveSQLException opException = null; - if (opState == OperationState.ERROR) { - opException = new HiveSQLException(resp.getErrorMessage(), resp.getSqlState(), resp.getErrorCode()); - } - return new OperationStatus(opState, opException); - } catch (HiveSQLException e) { - throw e; - } catch (Exception e) { - throw new HiveSQLException(e); - } - } - - /* (non-Javadoc) - * @see org.apache.hive.service.cli.ICLIService#cancelOperation(org.apache.hive.service.cli.OperationHandle) - */ - @Override - public void cancelOperation(OperationHandle opHandle) throws HiveSQLException { - try { - TCancelOperationReq req = new TCancelOperationReq(opHandle.toTOperationHandle()); - TCancelOperationResp resp = cliService.CancelOperation(req); - checkStatus(resp.getStatus()); - } catch (HiveSQLException e) { - throw e; - } catch (Exception e) { - throw new HiveSQLException(e); - } - } - - /* (non-Javadoc) - * @see org.apache.hive.service.cli.ICLIService#closeOperation(org.apache.hive.service.cli.OperationHandle) - */ - @Override - public void closeOperation(OperationHandle opHandle) - throws HiveSQLException { - try { - TCloseOperationReq req = new TCloseOperationReq(opHandle.toTOperationHandle()); - TCloseOperationResp resp = cliService.CloseOperation(req); - checkStatus(resp.getStatus()); - } catch (HiveSQLException e) { - throw e; - } catch (Exception e) { - throw new HiveSQLException(e); - } - } - - /* (non-Javadoc) - * @see org.apache.hive.service.cli.ICLIService#getResultSetMetadata(org.apache.hive.service.cli.OperationHandle) - */ - @Override - public TableSchema getResultSetMetadata(OperationHandle opHandle) - throws HiveSQLException { - try { - TGetResultSetMetadataReq req = new TGetResultSetMetadataReq(opHandle.toTOperationHandle()); - TGetResultSetMetadataResp resp = cliService.GetResultSetMetadata(req); - checkStatus(resp.getStatus()); - return new TableSchema(resp.getSchema()); - } catch (HiveSQLException e) { - throw e; - } catch (Exception e) { - throw new HiveSQLException(e); - } - } - - @Override - public RowSet fetchResults(OperationHandle opHandle, FetchOrientation orientation, long maxRows, - FetchType fetchType) throws HiveSQLException { - try { - TFetchResultsReq req = new TFetchResultsReq(); - req.setOperationHandle(opHandle.toTOperationHandle()); - req.setOrientation(orientation.toTFetchOrientation()); - req.setMaxRows(maxRows); - req.setFetchType(fetchType.toTFetchType()); - TFetchResultsResp resp = cliService.FetchResults(req); - checkStatus(resp.getStatus()); - return RowSetFactory.create(resp.getResults(), opHandle.getProtocolVersion()); - } catch (HiveSQLException e) { - throw e; - } catch (Exception e) { - throw new HiveSQLException(e); - } - } - - /* (non-Javadoc) - * @see org.apache.hive.service.cli.ICLIService#fetchResults(org.apache.hive.service.cli.OperationHandle) - */ - @Override - public RowSet fetchResults(OperationHandle opHandle) throws HiveSQLException { - // TODO: set the correct default fetch size - return fetchResults(opHandle, FetchOrientation.FETCH_NEXT, 10000, FetchType.QUERY_OUTPUT); - } - - @Override - public String getDelegationToken(SessionHandle sessionHandle, HiveAuthFactory authFactory, - String owner, String renewer) throws HiveSQLException { - TGetDelegationTokenReq req = new TGetDelegationTokenReq( - sessionHandle.toTSessionHandle(), owner, renewer); - try { - TGetDelegationTokenResp tokenResp = cliService.GetDelegationToken(req); - checkStatus(tokenResp.getStatus()); - return tokenResp.getDelegationToken(); - } catch (Exception e) { - throw new HiveSQLException(e); - } - } - - @Override - public void cancelDelegationToken(SessionHandle sessionHandle, HiveAuthFactory authFactory, - String tokenStr) throws HiveSQLException { - TCancelDelegationTokenReq cancelReq = new TCancelDelegationTokenReq( - sessionHandle.toTSessionHandle(), tokenStr); - try { - TCancelDelegationTokenResp cancelResp = - cliService.CancelDelegationToken(cancelReq); - checkStatus(cancelResp.getStatus()); - return; - } catch (TException e) { - throw new HiveSQLException(e); - } - } - - @Override - public void renewDelegationToken(SessionHandle sessionHandle, HiveAuthFactory authFactory, - String tokenStr) throws HiveSQLException { - TRenewDelegationTokenReq cancelReq = new TRenewDelegationTokenReq( - sessionHandle.toTSessionHandle(), tokenStr); - try { - TRenewDelegationTokenResp renewResp = - cliService.RenewDelegationToken(cancelReq); - checkStatus(renewResp.getStatus()); - return; - } catch (Exception e) { - throw new HiveSQLException(e); - } - } -} diff --git a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/thrift/ThriftHttpCLIService.java b/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/thrift/ThriftHttpCLIService.java deleted file mode 100644 index bd64c777c1d76..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/thrift/ThriftHttpCLIService.java +++ /dev/null @@ -1,194 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hive.service.cli.thrift; - -import java.util.Arrays; -import java.util.concurrent.SynchronousQueue; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.TimeUnit; - -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.conf.HiveConf.ConfVars; -import org.apache.hadoop.hive.shims.ShimLoader; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.util.Shell; -import org.apache.hive.service.ServiceException; -import org.apache.hive.service.auth.HiveAuthFactory; -import org.apache.hive.service.cli.CLIService; -import org.apache.hive.service.cli.thrift.TCLIService.Iface; -import org.apache.hive.service.server.ThreadFactoryWithGarbageCleanup; -import org.apache.thrift.TProcessor; -import org.apache.thrift.protocol.TBinaryProtocol; -import org.apache.thrift.protocol.TProtocolFactory; -import org.apache.thrift.server.TServlet; -import org.eclipse.jetty.server.AbstractConnectionFactory; -import org.eclipse.jetty.server.ConnectionFactory; -import org.eclipse.jetty.server.HttpConnectionFactory; -import org.eclipse.jetty.server.ServerConnector; -import org.eclipse.jetty.servlet.ServletContextHandler; -import org.eclipse.jetty.servlet.ServletHolder; -import org.eclipse.jetty.util.ssl.SslContextFactory; -import org.eclipse.jetty.util.thread.ExecutorThreadPool; -import org.eclipse.jetty.util.thread.ScheduledExecutorScheduler; - - -public class ThriftHttpCLIService extends ThriftCLIService { - - public ThriftHttpCLIService(CLIService cliService) { - super(cliService, ThriftHttpCLIService.class.getSimpleName()); - } - - @Override - protected void initializeServer() { - try { - // Server thread pool - // Start with minWorkerThreads, expand till maxWorkerThreads and reject subsequent requests - String threadPoolName = "HiveServer2-HttpHandler-Pool"; - ThreadPoolExecutor executorService = new ThreadPoolExecutor(minWorkerThreads, maxWorkerThreads, - workerKeepAliveTime, TimeUnit.SECONDS, new SynchronousQueue(), - new ThreadFactoryWithGarbageCleanup(threadPoolName)); - ExecutorThreadPool threadPool = new ExecutorThreadPool(executorService); - - // HTTP Server - httpServer = new org.eclipse.jetty.server.Server(threadPool); - - // Connector configs - - ConnectionFactory[] connectionFactories; - boolean useSsl = hiveConf.getBoolVar(ConfVars.HIVE_SERVER2_USE_SSL); - String schemeName = useSsl ? "https" : "http"; - // Change connector if SSL is used - if (useSsl) { - String keyStorePath = hiveConf.getVar(ConfVars.HIVE_SERVER2_SSL_KEYSTORE_PATH).trim(); - String keyStorePassword = ShimLoader.getHadoopShims().getPassword(hiveConf, - HiveConf.ConfVars.HIVE_SERVER2_SSL_KEYSTORE_PASSWORD.varname); - if (keyStorePath.isEmpty()) { - throw new IllegalArgumentException(ConfVars.HIVE_SERVER2_SSL_KEYSTORE_PATH.varname - + " Not configured for SSL connection"); - } - SslContextFactory sslContextFactory = new SslContextFactory.Server(); - String[] excludedProtocols = hiveConf.getVar(ConfVars.HIVE_SSL_PROTOCOL_BLACKLIST).split(","); - LOG.info("HTTP Server SSL: adding excluded protocols: " + Arrays.toString(excludedProtocols)); - sslContextFactory.addExcludeProtocols(excludedProtocols); - LOG.info("HTTP Server SSL: SslContextFactory.getExcludeProtocols = " + - Arrays.toString(sslContextFactory.getExcludeProtocols())); - sslContextFactory.setKeyStorePath(keyStorePath); - sslContextFactory.setKeyStorePassword(keyStorePassword); - connectionFactories = AbstractConnectionFactory.getFactories( - sslContextFactory, new HttpConnectionFactory()); - } else { - connectionFactories = new ConnectionFactory[] { new HttpConnectionFactory() }; - } - ServerConnector connector = new ServerConnector( - httpServer, - null, - // Call this full constructor to set this, which forces daemon threads: - new ScheduledExecutorScheduler("HiveServer2-HttpHandler-JettyScheduler", true), - null, - -1, - -1, - connectionFactories); - - connector.setPort(portNum); - // Linux:yes, Windows:no - connector.setReuseAddress(!Shell.WINDOWS); - int maxIdleTime = (int) hiveConf.getTimeVar(ConfVars.HIVE_SERVER2_THRIFT_HTTP_MAX_IDLE_TIME, - TimeUnit.MILLISECONDS); - connector.setIdleTimeout(maxIdleTime); - - httpServer.addConnector(connector); - - // Thrift configs - hiveAuthFactory = new HiveAuthFactory(hiveConf); - TProcessor processor = new TCLIService.Processor(this); - TProtocolFactory protocolFactory = new TBinaryProtocol.Factory(); - // Set during the init phase of HiveServer2 if auth mode is kerberos - // UGI for the hive/_HOST (kerberos) principal - UserGroupInformation serviceUGI = cliService.getServiceUGI(); - // UGI for the http/_HOST (SPNego) principal - UserGroupInformation httpUGI = cliService.getHttpUGI(); - String authType = hiveConf.getVar(ConfVars.HIVE_SERVER2_AUTHENTICATION); - TServlet thriftHttpServlet = new ThriftHttpServlet(processor, protocolFactory, authType, - serviceUGI, httpUGI); - - // Context handler - final ServletContextHandler context = new ServletContextHandler( - ServletContextHandler.SESSIONS); - context.setContextPath("/"); - String httpPath = getHttpPath(hiveConf - .getVar(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_HTTP_PATH)); - httpServer.setHandler(context); - context.addServlet(new ServletHolder(thriftHttpServlet), httpPath); - - // TODO: check defaults: maxTimeout, keepalive, maxBodySize, bodyRecieveDuration, etc. - // Finally, start the server - httpServer.start(); - // In case HIVE_SERVER2_THRIFT_HTTP_PORT or hive.server2.thrift.http.port is configured with - // 0 which represents any free port, we should set it to the actual one - portNum = connector.getLocalPort(); - String msg = "Started " + ThriftHttpCLIService.class.getSimpleName() + " in " + schemeName - + " mode on port " + connector.getLocalPort()+ " path=" + httpPath + " with " + minWorkerThreads + "..." - + maxWorkerThreads + " worker threads"; - LOG.info(msg); - } catch (Exception t) { - throw new ServiceException("Error initializing " + getName(), t); - } - } - - /** - * Configure Jetty to serve http requests. Example of a client connection URL: - * http://localhost:10000/servlets/thrifths2/ A gateway may cause actual target URL to differ, - * e.g. http://gateway:port/hive2/servlets/thrifths2/ - */ - @Override - public void run() { - try { - httpServer.join(); - } catch (Throwable t) { - LOG.fatal( - "Error starting HiveServer2: could not start " - + ThriftHttpCLIService.class.getSimpleName(), t); - System.exit(-1); - } - } - - /** - * The config parameter can be like "path", "/path", "/path/", "path/*", "/path1/path2/*" and so on. - * httpPath should end up as "/*", "/path/*" or "/path1/../pathN/*" - * @param httpPath - * @return - */ - private String getHttpPath(String httpPath) { - if(httpPath == null || httpPath.equals("")) { - httpPath = "/*"; - } - else { - if(!httpPath.startsWith("/")) { - httpPath = "/" + httpPath; - } - if(httpPath.endsWith("/")) { - httpPath = httpPath + "*"; - } - if(!httpPath.endsWith("/*")) { - httpPath = httpPath + "/*"; - } - } - return httpPath; - } -} diff --git a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/thrift/ThriftHttpServlet.java b/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/thrift/ThriftHttpServlet.java deleted file mode 100644 index e15d2d0566d2b..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/cli/thrift/ThriftHttpServlet.java +++ /dev/null @@ -1,545 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hive.service.cli.thrift; - -import java.io.IOException; -import java.io.UnsupportedEncodingException; -import java.security.PrivilegedExceptionAction; -import java.util.Map; -import java.util.Random; -import java.util.Set; -import java.util.concurrent.TimeUnit; - -import javax.servlet.ServletException; -import javax.servlet.http.Cookie; -import javax.servlet.http.HttpServletRequest; -import javax.servlet.http.HttpServletResponse; -import javax.ws.rs.core.NewCookie; - -import org.apache.commons.codec.binary.Base64; -import org.apache.commons.codec.binary.StringUtils; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.conf.HiveConf.ConfVars; -import org.apache.hadoop.hive.shims.HadoopShims.KerberosNameShim; -import org.apache.hadoop.hive.shims.ShimLoader; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hive.service.auth.AuthenticationProviderFactory; -import org.apache.hive.service.auth.AuthenticationProviderFactory.AuthMethods; -import org.apache.hive.service.auth.HiveAuthFactory; -import org.apache.hive.service.auth.HttpAuthUtils; -import org.apache.hive.service.auth.HttpAuthenticationException; -import org.apache.hive.service.auth.PasswdAuthenticationProvider; -import org.apache.hive.service.cli.session.SessionManager; -import org.apache.hive.service.CookieSigner; -import org.apache.thrift.TProcessor; -import org.apache.thrift.protocol.TProtocolFactory; -import org.apache.thrift.server.TServlet; -import org.ietf.jgss.GSSContext; -import org.ietf.jgss.GSSCredential; -import org.ietf.jgss.GSSException; -import org.ietf.jgss.GSSManager; -import org.ietf.jgss.GSSName; -import org.ietf.jgss.Oid; - -/** - * - * ThriftHttpServlet - * - */ -public class ThriftHttpServlet extends TServlet { - - private static final long serialVersionUID = 1L; - public static final Log LOG = LogFactory.getLog(ThriftHttpServlet.class.getName()); - private final String authType; - private final UserGroupInformation serviceUGI; - private final UserGroupInformation httpUGI; - private HiveConf hiveConf = new HiveConf(); - - // Class members for cookie based authentication. - private CookieSigner signer; - public static final String AUTH_COOKIE = "hive.server2.auth"; - private static final Random RAN = new Random(); - private boolean isCookieAuthEnabled; - private String cookieDomain; - private String cookiePath; - private int cookieMaxAge; - private boolean isCookieSecure; - private boolean isHttpOnlyCookie; - - public ThriftHttpServlet(TProcessor processor, TProtocolFactory protocolFactory, - String authType, UserGroupInformation serviceUGI, UserGroupInformation httpUGI) { - super(processor, protocolFactory); - this.authType = authType; - this.serviceUGI = serviceUGI; - this.httpUGI = httpUGI; - this.isCookieAuthEnabled = hiveConf.getBoolVar( - ConfVars.HIVE_SERVER2_THRIFT_HTTP_COOKIE_AUTH_ENABLED); - // Initialize the cookie based authentication related variables. - if (isCookieAuthEnabled) { - // Generate the signer with secret. - String secret = Long.toString(RAN.nextLong()); - LOG.debug("Using the random number as the secret for cookie generation " + secret); - this.signer = new CookieSigner(secret.getBytes()); - this.cookieMaxAge = (int) hiveConf.getTimeVar( - ConfVars.HIVE_SERVER2_THRIFT_HTTP_COOKIE_MAX_AGE, TimeUnit.SECONDS); - this.cookieDomain = hiveConf.getVar(ConfVars.HIVE_SERVER2_THRIFT_HTTP_COOKIE_DOMAIN); - this.cookiePath = hiveConf.getVar(ConfVars.HIVE_SERVER2_THRIFT_HTTP_COOKIE_PATH); - this.isCookieSecure = hiveConf.getBoolVar( - ConfVars.HIVE_SERVER2_THRIFT_HTTP_COOKIE_IS_SECURE); - this.isHttpOnlyCookie = hiveConf.getBoolVar( - ConfVars.HIVE_SERVER2_THRIFT_HTTP_COOKIE_IS_HTTPONLY); - } - } - - @Override - protected void doPost(HttpServletRequest request, HttpServletResponse response) - throws ServletException, IOException { - String clientUserName = null; - String clientIpAddress; - boolean requireNewCookie = false; - - try { - // If the cookie based authentication is already enabled, parse the - // request and validate the request cookies. - if (isCookieAuthEnabled) { - clientUserName = validateCookie(request); - requireNewCookie = (clientUserName == null); - if (requireNewCookie) { - LOG.info("Could not validate cookie sent, will try to generate a new cookie"); - } - } - // If the cookie based authentication is not enabled or the request does - // not have a valid cookie, use the kerberos or password based authentication - // depending on the server setup. - if (clientUserName == null) { - // For a kerberos setup - if (isKerberosAuthMode(authType)) { - clientUserName = doKerberosAuth(request); - } - // For password based authentication - else { - clientUserName = doPasswdAuth(request, authType); - } - } - LOG.debug("Client username: " + clientUserName); - - // Set the thread local username to be used for doAs if true - SessionManager.setUserName(clientUserName); - - // find proxy user if any from query param - String doAsQueryParam = getDoAsQueryParam(request.getQueryString()); - if (doAsQueryParam != null) { - SessionManager.setProxyUserName(doAsQueryParam); - } - - clientIpAddress = request.getRemoteAddr(); - LOG.debug("Client IP Address: " + clientIpAddress); - // Set the thread local ip address - SessionManager.setIpAddress(clientIpAddress); - // Generate new cookie and add it to the response - if (requireNewCookie && - !authType.equalsIgnoreCase(HiveAuthFactory.AuthTypes.NOSASL.toString())) { - String cookieToken = HttpAuthUtils.createCookieToken(clientUserName); - Cookie hs2Cookie = createCookie(signer.signCookie(cookieToken)); - - if (isHttpOnlyCookie) { - response.setHeader("SET-COOKIE", getHttpOnlyCookieHeader(hs2Cookie)); - } else { - response.addCookie(hs2Cookie); - } - LOG.info("Cookie added for clientUserName " + clientUserName); - } - super.doPost(request, response); - } - catch (HttpAuthenticationException e) { - LOG.error("Error: ", e); - // Send a 401 to the client - response.setStatus(HttpServletResponse.SC_UNAUTHORIZED); - if(isKerberosAuthMode(authType)) { - response.addHeader(HttpAuthUtils.WWW_AUTHENTICATE, HttpAuthUtils.NEGOTIATE); - } - response.getWriter().println("Authentication Error: " + e.getMessage()); - } - finally { - // Clear the thread locals - SessionManager.clearUserName(); - SessionManager.clearIpAddress(); - SessionManager.clearProxyUserName(); - } - } - - /** - * Retrieves the client name from cookieString. If the cookie does not - * correspond to a valid client, the function returns null. - * @param cookies HTTP Request cookies. - * @return Client Username if cookieString has a HS2 Generated cookie that is currently valid. - * Else, returns null. - */ - private String getClientNameFromCookie(Cookie[] cookies) { - // Current Cookie Name, Current Cookie Value - String currName, currValue; - - // Following is the main loop which iterates through all the cookies send by the client. - // The HS2 generated cookies are of the format hive.server2.auth= - // A cookie which is identified as a hiveserver2 generated cookie is validated - // by calling signer.verifyAndExtract(). If the validation passes, send the - // username for which the cookie is validated to the caller. If no client side - // cookie passes the validation, return null to the caller. - for (Cookie currCookie : cookies) { - // Get the cookie name - currName = currCookie.getName(); - if (!currName.equals(AUTH_COOKIE)) { - // Not a HS2 generated cookie, continue. - continue; - } - // If we reached here, we have match for HS2 generated cookie - currValue = currCookie.getValue(); - // Validate the value. - currValue = signer.verifyAndExtract(currValue); - // Retrieve the user name, do the final validation step. - if (currValue != null) { - String userName = HttpAuthUtils.getUserNameFromCookieToken(currValue); - - if (userName == null) { - LOG.warn("Invalid cookie token " + currValue); - continue; - } - //We have found a valid cookie in the client request. - if (LOG.isDebugEnabled()) { - LOG.debug("Validated the cookie for user " + userName); - } - return userName; - } - } - // No valid HS2 generated cookies found, return null - return null; - } - - /** - * Convert cookie array to human readable cookie string - * @param cookies Cookie Array - * @return String containing all the cookies separated by a newline character. - * Each cookie is of the format [key]=[value] - */ - private String toCookieStr(Cookie[] cookies) { - String cookieStr = ""; - - for (Cookie c : cookies) { - cookieStr += c.getName() + "=" + c.getValue() + " ;\n"; - } - return cookieStr; - } - - /** - * Validate the request cookie. This function iterates over the request cookie headers - * and finds a cookie that represents a valid client/server session. If it finds one, it - * returns the client name associated with the session. Else, it returns null. - * @param request The HTTP Servlet Request send by the client - * @return Client Username if the request has valid HS2 cookie, else returns null - * @throws UnsupportedEncodingException - */ - private String validateCookie(HttpServletRequest request) throws UnsupportedEncodingException { - // Find all the valid cookies associated with the request. - Cookie[] cookies = request.getCookies(); - - if (cookies == null) { - if (LOG.isDebugEnabled()) { - LOG.debug("No valid cookies associated with the request " + request); - } - return null; - } - if (LOG.isDebugEnabled()) { - LOG.debug("Received cookies: " + toCookieStr(cookies)); - } - return getClientNameFromCookie(cookies); - } - - /** - * Generate a server side cookie given the cookie value as the input. - * @param str Input string token. - * @return The generated cookie. - * @throws UnsupportedEncodingException - */ - private Cookie createCookie(String str) throws UnsupportedEncodingException { - if (LOG.isDebugEnabled()) { - LOG.debug("Cookie name = " + AUTH_COOKIE + " value = " + str); - } - Cookie cookie = new Cookie(AUTH_COOKIE, str); - - cookie.setMaxAge(cookieMaxAge); - if (cookieDomain != null) { - cookie.setDomain(cookieDomain); - } - if (cookiePath != null) { - cookie.setPath(cookiePath); - } - cookie.setSecure(isCookieSecure); - return cookie; - } - - /** - * Generate httponly cookie from HS2 cookie - * @param cookie HS2 generated cookie - * @return The httponly cookie - */ - private static String getHttpOnlyCookieHeader(Cookie cookie) { - NewCookie newCookie = new NewCookie(cookie.getName(), cookie.getValue(), - cookie.getPath(), cookie.getDomain(), cookie.getVersion(), - cookie.getComment(), cookie.getMaxAge(), cookie.getSecure()); - return newCookie + "; HttpOnly"; - } - - /** - * Do the LDAP/PAM authentication - * @param request - * @param authType - * @throws HttpAuthenticationException - */ - private String doPasswdAuth(HttpServletRequest request, String authType) - throws HttpAuthenticationException { - String userName = getUsername(request, authType); - // No-op when authType is NOSASL - if (!authType.equalsIgnoreCase(HiveAuthFactory.AuthTypes.NOSASL.toString())) { - try { - AuthMethods authMethod = AuthMethods.getValidAuthMethod(authType); - PasswdAuthenticationProvider provider = - AuthenticationProviderFactory.getAuthenticationProvider(authMethod); - provider.Authenticate(userName, getPassword(request, authType)); - - } catch (Exception e) { - throw new HttpAuthenticationException(e); - } - } - return userName; - } - - /** - * Do the GSS-API kerberos authentication. - * We already have a logged in subject in the form of serviceUGI, - * which GSS-API will extract information from. - * In case of a SPNego request we use the httpUGI, - * for the authenticating service tickets. - * @param request - * @return - * @throws HttpAuthenticationException - */ - private String doKerberosAuth(HttpServletRequest request) - throws HttpAuthenticationException { - // Try authenticating with the http/_HOST principal - if (httpUGI != null) { - try { - return httpUGI.doAs(new HttpKerberosServerAction(request, httpUGI)); - } catch (Exception e) { - LOG.info("Failed to authenticate with http/_HOST kerberos principal, " + - "trying with hive/_HOST kerberos principal"); - } - } - // Now try with hive/_HOST principal - try { - return serviceUGI.doAs(new HttpKerberosServerAction(request, serviceUGI)); - } catch (Exception e) { - LOG.error("Failed to authenticate with hive/_HOST kerberos principal"); - throw new HttpAuthenticationException(e); - } - - } - - class HttpKerberosServerAction implements PrivilegedExceptionAction { - HttpServletRequest request; - UserGroupInformation serviceUGI; - - HttpKerberosServerAction(HttpServletRequest request, - UserGroupInformation serviceUGI) { - this.request = request; - this.serviceUGI = serviceUGI; - } - - @Override - public String run() throws HttpAuthenticationException { - // Get own Kerberos credentials for accepting connection - GSSManager manager = GSSManager.getInstance(); - GSSContext gssContext = null; - String serverPrincipal = getPrincipalWithoutRealm( - serviceUGI.getUserName()); - try { - // This Oid for Kerberos GSS-API mechanism. - Oid kerberosMechOid = new Oid("1.2.840.113554.1.2.2"); - // Oid for SPNego GSS-API mechanism. - Oid spnegoMechOid = new Oid("1.3.6.1.5.5.2"); - // Oid for kerberos principal name - Oid krb5PrincipalOid = new Oid("1.2.840.113554.1.2.2.1"); - - // GSS name for server - GSSName serverName = manager.createName(serverPrincipal, krb5PrincipalOid); - - // GSS credentials for server - GSSCredential serverCreds = manager.createCredential(serverName, - GSSCredential.DEFAULT_LIFETIME, - new Oid[]{kerberosMechOid, spnegoMechOid}, - GSSCredential.ACCEPT_ONLY); - - // Create a GSS context - gssContext = manager.createContext(serverCreds); - // Get service ticket from the authorization header - String serviceTicketBase64 = getAuthHeader(request, authType); - byte[] inToken = Base64.decodeBase64(serviceTicketBase64.getBytes()); - gssContext.acceptSecContext(inToken, 0, inToken.length); - // Authenticate or deny based on its context completion - if (!gssContext.isEstablished()) { - throw new HttpAuthenticationException("Kerberos authentication failed: " + - "unable to establish context with the service ticket " + - "provided by the client."); - } - else { - return getPrincipalWithoutRealmAndHost(gssContext.getSrcName().toString()); - } - } - catch (GSSException e) { - throw new HttpAuthenticationException("Kerberos authentication failed: ", e); - } - finally { - if (gssContext != null) { - try { - gssContext.dispose(); - } catch (GSSException e) { - // No-op - } - } - } - } - - private String getPrincipalWithoutRealm(String fullPrincipal) - throws HttpAuthenticationException { - KerberosNameShim fullKerberosName; - try { - fullKerberosName = ShimLoader.getHadoopShims().getKerberosNameShim(fullPrincipal); - } catch (IOException e) { - throw new HttpAuthenticationException(e); - } - String serviceName = fullKerberosName.getServiceName(); - String hostName = fullKerberosName.getHostName(); - String principalWithoutRealm = serviceName; - if (hostName != null) { - principalWithoutRealm = serviceName + "/" + hostName; - } - return principalWithoutRealm; - } - - private String getPrincipalWithoutRealmAndHost(String fullPrincipal) - throws HttpAuthenticationException { - KerberosNameShim fullKerberosName; - try { - fullKerberosName = ShimLoader.getHadoopShims().getKerberosNameShim(fullPrincipal); - return fullKerberosName.getShortName(); - } catch (IOException e) { - throw new HttpAuthenticationException(e); - } - } - } - - private String getUsername(HttpServletRequest request, String authType) - throws HttpAuthenticationException { - String[] creds = getAuthHeaderTokens(request, authType); - // Username must be present - if (creds[0] == null || creds[0].isEmpty()) { - throw new HttpAuthenticationException("Authorization header received " + - "from the client does not contain username."); - } - return creds[0]; - } - - private String getPassword(HttpServletRequest request, String authType) - throws HttpAuthenticationException { - String[] creds = getAuthHeaderTokens(request, authType); - // Password must be present - if (creds[1] == null || creds[1].isEmpty()) { - throw new HttpAuthenticationException("Authorization header received " + - "from the client does not contain username."); - } - return creds[1]; - } - - private String[] getAuthHeaderTokens(HttpServletRequest request, - String authType) throws HttpAuthenticationException { - String authHeaderBase64 = getAuthHeader(request, authType); - String authHeaderString = StringUtils.newStringUtf8( - Base64.decodeBase64(authHeaderBase64.getBytes())); - String[] creds = authHeaderString.split(":"); - return creds; - } - - /** - * Returns the base64 encoded auth header payload - * @param request - * @param authType - * @return - * @throws HttpAuthenticationException - */ - private String getAuthHeader(HttpServletRequest request, String authType) - throws HttpAuthenticationException { - String authHeader = request.getHeader(HttpAuthUtils.AUTHORIZATION); - // Each http request must have an Authorization header - if (authHeader == null || authHeader.isEmpty()) { - throw new HttpAuthenticationException("Authorization header received " + - "from the client is empty."); - } - - String authHeaderBase64String; - int beginIndex; - if (isKerberosAuthMode(authType)) { - beginIndex = (HttpAuthUtils.NEGOTIATE + " ").length(); - } - else { - beginIndex = (HttpAuthUtils.BASIC + " ").length(); - } - authHeaderBase64String = authHeader.substring(beginIndex); - // Authorization header must have a payload - if (authHeaderBase64String == null || authHeaderBase64String.isEmpty()) { - throw new HttpAuthenticationException("Authorization header received " + - "from the client does not contain any data."); - } - return authHeaderBase64String; - } - - private boolean isKerberosAuthMode(String authType) { - return authType.equalsIgnoreCase(HiveAuthFactory.AuthTypes.KERBEROS.toString()); - } - - private static String getDoAsQueryParam(String queryString) { - if (LOG.isDebugEnabled()) { - LOG.debug("URL query string:" + queryString); - } - if (queryString == null) { - return null; - } - Map params = javax.servlet.http.HttpUtils.parseQueryString( queryString ); - Set keySet = params.keySet(); - for (String key: keySet) { - if (key.equalsIgnoreCase("doAs")) { - return params.get(key)[0]; - } - } - return null; - } - -} - - diff --git a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/server/HiveServer2.java b/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/server/HiveServer2.java deleted file mode 100644 index 95233996cbbcb..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/server/HiveServer2.java +++ /dev/null @@ -1,277 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hive.service.server; - -import java.util.Properties; - -import scala.runtime.AbstractFunction0; -import scala.runtime.BoxedUnit; - -import org.apache.commons.cli.GnuParser; -import org.apache.commons.cli.HelpFormatter; -import org.apache.commons.cli.Option; -import org.apache.commons.cli.OptionBuilder; -import org.apache.commons.cli.Options; -import org.apache.commons.cli.ParseException; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.shims.ShimLoader; -import org.apache.hive.common.util.HiveStringUtils; -import org.apache.hive.service.CompositeService; -import org.apache.hive.service.cli.CLIService; -import org.apache.hive.service.cli.thrift.ThriftBinaryCLIService; -import org.apache.hive.service.cli.thrift.ThriftCLIService; -import org.apache.hive.service.cli.thrift.ThriftHttpCLIService; - -import org.apache.spark.util.ShutdownHookManager; - -/** - * HiveServer2. - * - */ -public class HiveServer2 extends CompositeService { - private static final Log LOG = LogFactory.getLog(HiveServer2.class); - - private CLIService cliService; - private ThriftCLIService thriftCLIService; - - public HiveServer2() { - super(HiveServer2.class.getSimpleName()); - HiveConf.setLoadHiveServer2Config(true); - } - - @Override - public synchronized void init(HiveConf hiveConf) { - cliService = new CLIService(this); - addService(cliService); - if (isHTTPTransportMode(hiveConf)) { - thriftCLIService = new ThriftHttpCLIService(cliService); - } else { - thriftCLIService = new ThriftBinaryCLIService(cliService); - } - addService(thriftCLIService); - super.init(hiveConf); - - // Add a shutdown hook for catching SIGTERM & SIGINT - // this must be higher than the Hadoop Filesystem priority of 10, - // which the default priority is. - // The signature of the callback must match that of a scala () -> Unit - // function - ShutdownHookManager.addShutdownHook( - new AbstractFunction0() { - public BoxedUnit apply() { - try { - LOG.info("Hive Server Shutdown hook invoked"); - stop(); - } catch (Throwable e) { - LOG.warn("Ignoring Exception while stopping Hive Server from shutdown hook", - e); - } - return BoxedUnit.UNIT; - } - }); - } - - public static boolean isHTTPTransportMode(HiveConf hiveConf) { - String transportMode = System.getenv("HIVE_SERVER2_TRANSPORT_MODE"); - if (transportMode == null) { - transportMode = hiveConf.getVar(HiveConf.ConfVars.HIVE_SERVER2_TRANSPORT_MODE); - } - if (transportMode != null && (transportMode.equalsIgnoreCase("http"))) { - return true; - } - return false; - } - - @Override - public synchronized void start() { - super.start(); - } - - @Override - public synchronized void stop() { - LOG.info("Shutting down HiveServer2"); - super.stop(); - } - - private static void startHiveServer2() throws Throwable { - long attempts = 0, maxAttempts = 1; - while (true) { - LOG.info("Starting HiveServer2"); - HiveConf hiveConf = new HiveConf(); - maxAttempts = hiveConf.getLongVar(HiveConf.ConfVars.HIVE_SERVER2_MAX_START_ATTEMPTS); - HiveServer2 server = null; - try { - server = new HiveServer2(); - server.init(hiveConf); - server.start(); - ShimLoader.getHadoopShims().startPauseMonitor(hiveConf); - break; - } catch (Throwable throwable) { - if (server != null) { - try { - server.stop(); - } catch (Throwable t) { - LOG.info("Exception caught when calling stop of HiveServer2 before retrying start", t); - } finally { - server = null; - } - } - if (++attempts >= maxAttempts) { - throw new Error("Max start attempts " + maxAttempts + " exhausted", throwable); - } else { - LOG.warn("Error starting HiveServer2 on attempt " + attempts - + ", will retry in 60 seconds", throwable); - try { - Thread.sleep(60L * 1000L); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } - } - } - } - } - - public static void main(String[] args) { - HiveConf.setLoadHiveServer2Config(true); - ServerOptionsProcessor oproc = new ServerOptionsProcessor("hiveserver2"); - ServerOptionsProcessorResponse oprocResponse = oproc.parse(args); - - HiveStringUtils.startupShutdownMessage(HiveServer2.class, args, LOG); - - // Call the executor which will execute the appropriate command based on the parsed options - oprocResponse.getServerOptionsExecutor().execute(); - } - - /** - * ServerOptionsProcessor. - * Process arguments given to HiveServer2 (-hiveconf property=value) - * Set properties in System properties - * Create an appropriate response object, - * which has executor to execute the appropriate command based on the parsed options. - */ - public static class ServerOptionsProcessor { - private final Options options = new Options(); - private org.apache.commons.cli.CommandLine commandLine; - private final String serverName; - private final StringBuilder debugMessage = new StringBuilder(); - - @SuppressWarnings("static-access") - public ServerOptionsProcessor(String serverName) { - this.serverName = serverName; - // -hiveconf x=y - options.addOption(OptionBuilder - .withValueSeparator() - .hasArgs(2) - .withArgName("property=value") - .withLongOpt("hiveconf") - .withDescription("Use value for given property") - .create()); - options.addOption(new Option("H", "help", false, "Print help information")); - } - - public ServerOptionsProcessorResponse parse(String[] argv) { - try { - commandLine = new GnuParser().parse(options, argv); - // Process --hiveconf - // Get hiveconf param values and set the System property values - Properties confProps = commandLine.getOptionProperties("hiveconf"); - for (String propKey : confProps.stringPropertyNames()) { - // save logging message for log4j output latter after log4j initialize properly - debugMessage.append("Setting " + propKey + "=" + confProps.getProperty(propKey) + ";\n"); - System.setProperty(propKey, confProps.getProperty(propKey)); - } - - // Process --help - if (commandLine.hasOption('H')) { - return new ServerOptionsProcessorResponse(new HelpOptionExecutor(serverName, options)); - } - } catch (ParseException e) { - // Error out & exit - we were not able to parse the args successfully - System.err.println("Error starting HiveServer2 with given arguments: "); - System.err.println(e.getMessage()); - System.exit(-1); - } - // Default executor, when no option is specified - return new ServerOptionsProcessorResponse(new StartOptionExecutor()); - } - - StringBuilder getDebugMessage() { - return debugMessage; - } - } - - /** - * The response sent back from {@link ServerOptionsProcessor#parse(String[])} - */ - static class ServerOptionsProcessorResponse { - private final ServerOptionsExecutor serverOptionsExecutor; - - ServerOptionsProcessorResponse(ServerOptionsExecutor serverOptionsExecutor) { - this.serverOptionsExecutor = serverOptionsExecutor; - } - - ServerOptionsExecutor getServerOptionsExecutor() { - return serverOptionsExecutor; - } - } - - /** - * The executor interface for running the appropriate HiveServer2 command based on parsed options - */ - interface ServerOptionsExecutor { - void execute(); - } - - /** - * HelpOptionExecutor: executes the --help option by printing out the usage - */ - static class HelpOptionExecutor implements ServerOptionsExecutor { - private final Options options; - private final String serverName; - - HelpOptionExecutor(String serverName, Options options) { - this.options = options; - this.serverName = serverName; - } - - @Override - public void execute() { - new HelpFormatter().printHelp(serverName, options); - System.exit(0); - } - } - - /** - * StartOptionExecutor: starts HiveServer2. - * This is the default executor, when no option is specified. - */ - static class StartOptionExecutor implements ServerOptionsExecutor { - @Override - public void execute() { - try { - startHiveServer2(); - } catch (Throwable t) { - LOG.fatal("Error starting HiveServer2", t); - System.exit(-1); - } - } - } -} diff --git a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/server/ThreadWithGarbageCleanup.java b/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/server/ThreadWithGarbageCleanup.java deleted file mode 100644 index 8ee98103f7ef7..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/main/java/org/apache/hive/service/server/ThreadWithGarbageCleanup.java +++ /dev/null @@ -1,77 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - -package org.apache.hive.service.server; - -import java.util.Map; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hive.metastore.HiveMetaStore; -import org.apache.hadoop.hive.metastore.RawStore; - -/** - * A HiveServer2 thread used to construct new server threads. - * In particular, this thread ensures an orderly cleanup, - * when killed by its corresponding ExecutorService. - */ -public class ThreadWithGarbageCleanup extends Thread { - private static final Log LOG = LogFactory.getLog(ThreadWithGarbageCleanup.class); - - Map threadRawStoreMap = - ThreadFactoryWithGarbageCleanup.getThreadRawStoreMap(); - - public ThreadWithGarbageCleanup(Runnable runnable) { - super(runnable); - } - - /** - * Add any Thread specific garbage cleanup code here. - * Currently, it shuts down the RawStore object for this thread if it is not null. - */ - @Override - public void finalize() throws Throwable { - cleanRawStore(); - super.finalize(); - } - - private void cleanRawStore() { - Long threadId = this.getId(); - RawStore threadLocalRawStore = threadRawStoreMap.get(threadId); - if (threadLocalRawStore != null) { - LOG.debug("RawStore: " + threadLocalRawStore + ", for the thread: " + - this.getName() + " will be closed now."); - threadLocalRawStore.shutdown(); - threadRawStoreMap.remove(threadId); - } - } - - /** - * Cache the ThreadLocal RawStore object. Called from the corresponding thread. - */ - public void cacheThreadLocalRawStore() { - Long threadId = this.getId(); - RawStore threadLocalRawStore = HiveMetaStore.HMSHandler.getRawStore(); - if (threadLocalRawStore != null && !threadRawStoreMap.containsKey(threadId)) { - LOG.debug("Adding RawStore: " + threadLocalRawStore + ", for the thread: " + - this.getName() + " to threadRawStoreMap for future cleanup."); - threadRawStoreMap.put(threadId, threadLocalRawStore); - } - } -} diff --git a/sql/hive-thriftserver/v1.2/src/main/scala/org/apache/spark/sql/hive/thriftserver/ThriftserverShimUtils.scala b/sql/hive-thriftserver/v1.2/src/main/scala/org/apache/spark/sql/hive/thriftserver/ThriftserverShimUtils.scala deleted file mode 100644 index 9a28dd6a31e6e..0000000000000 --- a/sql/hive-thriftserver/v1.2/src/main/scala/org/apache/spark/sql/hive/thriftserver/ThriftserverShimUtils.scala +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.spark.sql.hive.thriftserver - -import org.apache.commons.logging.LogFactory -import org.apache.hadoop.hive.ql.session.SessionState -import org.apache.hive.service.cli.{RowSet, RowSetFactory, TableSchema, Type} -import org.apache.hive.service.cli.Type._ -import org.apache.hive.service.cli.thrift.TProtocolVersion._ - -/** - * Various utilities for hive-thriftserver used to upgrade the built-in Hive. - */ -private[thriftserver] object ThriftserverShimUtils { - - private[thriftserver] object TOperationType { - val GET_TYPE_INFO = org.apache.hive.service.cli.thrift.TOperationType.GET_TYPE_INFO - } - - private[thriftserver] type TProtocolVersion = org.apache.hive.service.cli.thrift.TProtocolVersion - private[thriftserver] type Client = org.apache.hive.service.cli.thrift.TCLIService.Client - private[thriftserver] type TOpenSessionReq = org.apache.hive.service.cli.thrift.TOpenSessionReq - private[thriftserver] type TGetSchemasReq = org.apache.hive.service.cli.thrift.TGetSchemasReq - private[thriftserver] type TGetTablesReq = org.apache.hive.service.cli.thrift.TGetTablesReq - private[thriftserver] type TGetColumnsReq = org.apache.hive.service.cli.thrift.TGetColumnsReq - private[thriftserver] type TGetInfoReq = org.apache.hive.service.cli.thrift.TGetInfoReq - private[thriftserver] type TExecuteStatementReq = - org.apache.hive.service.cli.thrift.TExecuteStatementReq - private[thriftserver] type THandleIdentifier = - org.apache.hive.service.cli.thrift.THandleIdentifier - private[thriftserver] type TOperationType = org.apache.hive.service.cli.thrift.TOperationType - private[thriftserver] type TOperationHandle = org.apache.hive.service.cli.thrift.TOperationHandle - - private[thriftserver] def getConsole: SessionState.LogHelper = { - val LOG = LogFactory.getLog(classOf[SparkSQLCLIDriver]) - new SessionState.LogHelper(LOG) - } - - private[thriftserver] def resultRowSet( - getResultSetSchema: TableSchema, - getProtocolVersion: TProtocolVersion): RowSet = { - RowSetFactory.create(getResultSetSchema, getProtocolVersion) - } - - private[thriftserver] def supportedType(): Seq[Type] = { - Seq(NULL_TYPE, BOOLEAN_TYPE, STRING_TYPE, BINARY_TYPE, - TINYINT_TYPE, SMALLINT_TYPE, INT_TYPE, BIGINT_TYPE, - FLOAT_TYPE, DOUBLE_TYPE, DECIMAL_TYPE, - DATE_TYPE, TIMESTAMP_TYPE, - ARRAY_TYPE, MAP_TYPE, STRUCT_TYPE) - } - - private[thriftserver] val testedProtocolVersions = Seq( - HIVE_CLI_SERVICE_PROTOCOL_V1, - HIVE_CLI_SERVICE_PROTOCOL_V2, - HIVE_CLI_SERVICE_PROTOCOL_V3, - HIVE_CLI_SERVICE_PROTOCOL_V4, - HIVE_CLI_SERVICE_PROTOCOL_V5, - HIVE_CLI_SERVICE_PROTOCOL_V6, - HIVE_CLI_SERVICE_PROTOCOL_V7, - HIVE_CLI_SERVICE_PROTOCOL_V8) -} diff --git a/sql/hive-thriftserver/v2.3/if/TCLIService.thrift b/sql/hive-thriftserver/v2.3/if/TCLIService.thrift deleted file mode 100644 index 9026cd25df5b3..0000000000000 --- a/sql/hive-thriftserver/v2.3/if/TCLIService.thrift +++ /dev/null @@ -1,1269 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Coding Conventions for this file: -// -// Structs/Enums/Unions -// * Struct, Enum, and Union names begin with a "T", -// and use a capital letter for each new word, with no underscores. -// * All fields should be declared as either optional or required. -// -// Functions -// * Function names start with a capital letter and have a capital letter for -// each new word, with no underscores. -// * Each function should take exactly one parameter, named TFunctionNameReq, -// and should return either void or TFunctionNameResp. This convention allows -// incremental updates. -// -// Services -// * Service names begin with the letter "T", use a capital letter for each -// new word (with no underscores), and end with the word "Service". - -namespace java org.apache.hive.service.rpc.thrift -namespace cpp apache.hive.service.rpc.thrift - -// List of protocol versions. A new token should be -// added to the end of this list every time a change is made. -enum TProtocolVersion { - HIVE_CLI_SERVICE_PROTOCOL_V1, - - // V2 adds support for asynchronous execution - HIVE_CLI_SERVICE_PROTOCOL_V2 - - // V3 add varchar type, primitive type qualifiers - HIVE_CLI_SERVICE_PROTOCOL_V3 - - // V4 add decimal precision/scale, char type - HIVE_CLI_SERVICE_PROTOCOL_V4 - - // V5 adds error details when GetOperationStatus returns in error state - HIVE_CLI_SERVICE_PROTOCOL_V5 - - // V6 uses binary type for binary payload (was string) and uses columnar result set - HIVE_CLI_SERVICE_PROTOCOL_V6 - - // V7 adds support for delegation token based connection - HIVE_CLI_SERVICE_PROTOCOL_V7 - - // V8 adds support for interval types - HIVE_CLI_SERVICE_PROTOCOL_V8 - - // V9 adds support for serializing ResultSets in SerDe - HIVE_CLI_SERVICE_PROTOCOL_V9 - - // V10 adds support for in place updates via GetOperationStatus - HIVE_CLI_SERVICE_PROTOCOL_V10 -} - -enum TTypeId { - BOOLEAN_TYPE, - TINYINT_TYPE, - SMALLINT_TYPE, - INT_TYPE, - BIGINT_TYPE, - FLOAT_TYPE, - DOUBLE_TYPE, - STRING_TYPE, - TIMESTAMP_TYPE, - BINARY_TYPE, - ARRAY_TYPE, - MAP_TYPE, - STRUCT_TYPE, - UNION_TYPE, - USER_DEFINED_TYPE, - DECIMAL_TYPE, - NULL_TYPE, - DATE_TYPE, - VARCHAR_TYPE, - CHAR_TYPE, - INTERVAL_YEAR_MONTH_TYPE, - INTERVAL_DAY_TIME_TYPE -} - -const set PRIMITIVE_TYPES = [ - TTypeId.BOOLEAN_TYPE, - TTypeId.TINYINT_TYPE, - TTypeId.SMALLINT_TYPE, - TTypeId.INT_TYPE, - TTypeId.BIGINT_TYPE, - TTypeId.FLOAT_TYPE, - TTypeId.DOUBLE_TYPE, - TTypeId.STRING_TYPE, - TTypeId.TIMESTAMP_TYPE, - TTypeId.BINARY_TYPE, - TTypeId.DECIMAL_TYPE, - TTypeId.NULL_TYPE, - TTypeId.DATE_TYPE, - TTypeId.VARCHAR_TYPE, - TTypeId.CHAR_TYPE, - TTypeId.INTERVAL_YEAR_MONTH_TYPE, - TTypeId.INTERVAL_DAY_TIME_TYPE -] - -const set COMPLEX_TYPES = [ - TTypeId.ARRAY_TYPE - TTypeId.MAP_TYPE - TTypeId.STRUCT_TYPE - TTypeId.UNION_TYPE - TTypeId.USER_DEFINED_TYPE -] - -const set COLLECTION_TYPES = [ - TTypeId.ARRAY_TYPE - TTypeId.MAP_TYPE -] - -const map TYPE_NAMES = { - TTypeId.BOOLEAN_TYPE: "BOOLEAN", - TTypeId.TINYINT_TYPE: "TINYINT", - TTypeId.SMALLINT_TYPE: "SMALLINT", - TTypeId.INT_TYPE: "INT", - TTypeId.BIGINT_TYPE: "BIGINT", - TTypeId.FLOAT_TYPE: "FLOAT", - TTypeId.DOUBLE_TYPE: "DOUBLE", - TTypeId.STRING_TYPE: "STRING", - TTypeId.TIMESTAMP_TYPE: "TIMESTAMP", - TTypeId.BINARY_TYPE: "BINARY", - TTypeId.ARRAY_TYPE: "ARRAY", - TTypeId.MAP_TYPE: "MAP", - TTypeId.STRUCT_TYPE: "STRUCT", - TTypeId.UNION_TYPE: "UNIONTYPE", - TTypeId.DECIMAL_TYPE: "DECIMAL", - TTypeId.NULL_TYPE: "NULL" - TTypeId.DATE_TYPE: "DATE" - TTypeId.VARCHAR_TYPE: "VARCHAR" - TTypeId.CHAR_TYPE: "CHAR" - TTypeId.INTERVAL_YEAR_MONTH_TYPE: "INTERVAL_YEAR_MONTH" - TTypeId.INTERVAL_DAY_TIME_TYPE: "INTERVAL_DAY_TIME" -} - -// Thrift does not support recursively defined types or forward declarations, -// which makes it difficult to represent Hive's nested types. -// To get around these limitations TTypeDesc employs a type list that maps -// integer "pointers" to TTypeEntry objects. The following examples show -// how different types are represented using this scheme: -// -// "INT": -// TTypeDesc { -// types = [ -// TTypeEntry.primitive_entry { -// type = INT_TYPE -// } -// ] -// } -// -// "ARRAY": -// TTypeDesc { -// types = [ -// TTypeEntry.array_entry { -// object_type_ptr = 1 -// }, -// TTypeEntry.primitive_entry { -// type = INT_TYPE -// } -// ] -// } -// -// "MAP": -// TTypeDesc { -// types = [ -// TTypeEntry.map_entry { -// key_type_ptr = 1 -// value_type_ptr = 2 -// }, -// TTypeEntry.primitive_entry { -// type = INT_TYPE -// }, -// TTypeEntry.primitive_entry { -// type = STRING_TYPE -// } -// ] -// } - -typedef i32 TTypeEntryPtr - -// Valid TTypeQualifiers key names -const string CHARACTER_MAXIMUM_LENGTH = "characterMaximumLength" - -// Type qualifier key name for decimal -const string PRECISION = "precision" -const string SCALE = "scale" - -union TTypeQualifierValue { - 1: optional i32 i32Value - 2: optional string stringValue -} - -// Type qualifiers for primitive type. -struct TTypeQualifiers { - 1: required map qualifiers -} - -// Type entry for a primitive type. -struct TPrimitiveTypeEntry { - // The primitive type token. This must satisfy the condition - // that type is in the PRIMITIVE_TYPES set. - 1: required TTypeId type - 2: optional TTypeQualifiers typeQualifiers -} - -// Type entry for an ARRAY type. -struct TArrayTypeEntry { - 1: required TTypeEntryPtr objectTypePtr -} - -// Type entry for a MAP type. -struct TMapTypeEntry { - 1: required TTypeEntryPtr keyTypePtr - 2: required TTypeEntryPtr valueTypePtr -} - -// Type entry for a STRUCT type. -struct TStructTypeEntry { - 1: required map nameToTypePtr -} - -// Type entry for a UNIONTYPE type. -struct TUnionTypeEntry { - 1: required map nameToTypePtr -} - -struct TUserDefinedTypeEntry { - // The fully qualified name of the class implementing this type. - 1: required string typeClassName -} - -// We use a union here since Thrift does not support inheritance. -union TTypeEntry { - 1: TPrimitiveTypeEntry primitiveEntry - 2: TArrayTypeEntry arrayEntry - 3: TMapTypeEntry mapEntry - 4: TStructTypeEntry structEntry - 5: TUnionTypeEntry unionEntry - 6: TUserDefinedTypeEntry userDefinedTypeEntry -} - -// Type descriptor for columns. -struct TTypeDesc { - // The "top" type is always the first element of the list. - // If the top type is an ARRAY, MAP, STRUCT, or UNIONTYPE - // type, then subsequent elements represent nested types. - 1: required list types -} - -// A result set column descriptor. -struct TColumnDesc { - // The name of the column - 1: required string columnName - - // The type descriptor for this column - 2: required TTypeDesc typeDesc - - // The ordinal position of this column in the schema - 3: required i32 position - - 4: optional string comment -} - -// Metadata used to describe the schema (column names, types, comments) -// of result sets. -struct TTableSchema { - 1: required list columns -} - -// A Boolean column value. -struct TBoolValue { - // NULL if value is unset. - 1: optional bool value -} - -// A Byte column value. -struct TByteValue { - // NULL if value is unset. - 1: optional byte value -} - -// A signed, 16 bit column value. -struct TI16Value { - // NULL if value is unset - 1: optional i16 value -} - -// A signed, 32 bit column value -struct TI32Value { - // NULL if value is unset - 1: optional i32 value -} - -// A signed 64 bit column value -struct TI64Value { - // NULL if value is unset - 1: optional i64 value -} - -// A floating point 64 bit column value -struct TDoubleValue { - // NULL if value is unset - 1: optional double value -} - -struct TStringValue { - // NULL if value is unset - 1: optional string value -} - -// A single column value in a result set. -// Note that Hive's type system is richer than Thrift's, -// so in some cases we have to map multiple Hive types -// to the same Thrift type. On the client-side this is -// disambiguated by looking at the Schema of the -// result set. -union TColumnValue { - 1: TBoolValue boolVal // BOOLEAN - 2: TByteValue byteVal // TINYINT - 3: TI16Value i16Val // SMALLINT - 4: TI32Value i32Val // INT - 5: TI64Value i64Val // BIGINT, TIMESTAMP - 6: TDoubleValue doubleVal // FLOAT, DOUBLE - 7: TStringValue stringVal // STRING, LIST, MAP, STRUCT, UNIONTYPE, BINARY, DECIMAL, NULL, INTERVAL_YEAR_MONTH, INTERVAL_DAY_TIME -} - -// Represents a row in a rowset. -struct TRow { - 1: required list colVals -} - -struct TBoolColumn { - 1: required list values - 2: required binary nulls -} - -struct TByteColumn { - 1: required list values - 2: required binary nulls -} - -struct TI16Column { - 1: required list values - 2: required binary nulls -} - -struct TI32Column { - 1: required list values - 2: required binary nulls -} - -struct TI64Column { - 1: required list values - 2: required binary nulls -} - -struct TDoubleColumn { - 1: required list values - 2: required binary nulls -} - -struct TStringColumn { - 1: required list values - 2: required binary nulls -} - -struct TBinaryColumn { - 1: required list values - 2: required binary nulls -} - -// Note that Hive's type system is richer than Thrift's, -// so in some cases we have to map multiple Hive types -// to the same Thrift type. On the client-side this is -// disambiguated by looking at the Schema of the -// result set. -union TColumn { - 1: TBoolColumn boolVal // BOOLEAN - 2: TByteColumn byteVal // TINYINT - 3: TI16Column i16Val // SMALLINT - 4: TI32Column i32Val // INT - 5: TI64Column i64Val // BIGINT, TIMESTAMP - 6: TDoubleColumn doubleVal // FLOAT, DOUBLE - 7: TStringColumn stringVal // STRING, LIST, MAP, STRUCT, UNIONTYPE, DECIMAL, NULL - 8: TBinaryColumn binaryVal // BINARY -} - -// Represents a rowset -struct TRowSet { - // The starting row offset of this rowset. - 1: required i64 startRowOffset - 2: required list rows - 3: optional list columns - 4: optional binary binaryColumns - 5: optional i32 columnCount -} - -// The return status code contained in each response. -enum TStatusCode { - SUCCESS_STATUS, - SUCCESS_WITH_INFO_STATUS, - STILL_EXECUTING_STATUS, - ERROR_STATUS, - INVALID_HANDLE_STATUS -} - -// The return status of a remote request -struct TStatus { - 1: required TStatusCode statusCode - - // If status is SUCCESS_WITH_INFO, info_msgs may be populated with - // additional diagnostic information. - 2: optional list infoMessages - - // If status is ERROR, then the following fields may be set - 3: optional string sqlState // as defined in the ISO/IEF CLI specification - 4: optional i32 errorCode // internal error code - 5: optional string errorMessage -} - -// The state of an operation (i.e. a query or other -// asynchronous operation that generates a result set) -// on the server. -enum TOperationState { - // The operation has been initialized - INITIALIZED_STATE, - - // The operation is running. In this state the result - // set is not available. - RUNNING_STATE, - - // The operation has completed. When an operation is in - // this state its result set may be fetched. - FINISHED_STATE, - - // The operation was canceled by a client - CANCELED_STATE, - - // The operation was closed by a client - CLOSED_STATE, - - // The operation failed due to an error - ERROR_STATE, - - // The operation is in an unrecognized state - UKNOWN_STATE, - - // The operation is in an pending state - PENDING_STATE, - - // The operation is in an timedout state - TIMEDOUT_STATE, -} - -// A string identifier. This is interpreted literally. -typedef string TIdentifier - -// A search pattern. -// -// Valid search pattern characters: -// '_': Any single character. -// '%': Any sequence of zero or more characters. -// '\': Escape character used to include special characters, -// e.g. '_', '%', '\'. If a '\' precedes a non-special -// character it has no special meaning and is interpreted -// literally. -typedef string TPattern - - -// A search pattern or identifier. Used as input -// parameter for many of the catalog functions. -typedef string TPatternOrIdentifier - -struct THandleIdentifier { - // 16 byte globally unique identifier - // This is the public ID of the handle and - // can be used for reporting. - 1: required binary guid, - - // 16 byte secret generated by the server - // and used to verify that the handle is not - // being hijacked by another user. - 2: required binary secret, -} - -// Client-side handle to persistent -// session information on the server-side. -struct TSessionHandle { - 1: required THandleIdentifier sessionId -} - -// The subtype of an OperationHandle. -enum TOperationType { - EXECUTE_STATEMENT, - GET_TYPE_INFO, - GET_CATALOGS, - GET_SCHEMAS, - GET_TABLES, - GET_TABLE_TYPES, - GET_COLUMNS, - GET_FUNCTIONS, - UNKNOWN, -} - -// Client-side reference to a task running -// asynchronously on the server. -struct TOperationHandle { - 1: required THandleIdentifier operationId - 2: required TOperationType operationType - - // If hasResultSet = TRUE, then this operation - // generates a result set that can be fetched. - // Note that the result set may be empty. - // - // If hasResultSet = FALSE, then this operation - // does not generate a result set, and calling - // GetResultSetMetadata or FetchResults against - // this OperationHandle will generate an error. - 3: required bool hasResultSet - - // For operations that don't generate result sets, - // modifiedRowCount is either: - // - // 1) The number of rows that were modified by - // the DML operation (e.g. number of rows inserted, - // number of rows deleted, etc). - // - // 2) 0 for operations that don't modify or add rows. - // - // 3) < 0 if the operation is capable of modifiying rows, - // but Hive is unable to determine how many rows were - // modified. For example, Hive's LOAD DATA command - // doesn't generate row count information because - // Hive doesn't inspect the data as it is loaded. - // - // modifiedRowCount is unset if the operation generates - // a result set. - 4: optional double modifiedRowCount -} - - -// OpenSession() -// -// Open a session (connection) on the server against -// which operations may be executed. -struct TOpenSessionReq { - // The version of the HiveServer2 protocol that the client is using. - 1: required TProtocolVersion client_protocol = TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V10 - - // Username and password for authentication. - // Depending on the authentication scheme being used, - // this information may instead be provided by a lower - // protocol layer, in which case these fields may be - // left unset. - 2: optional string username - 3: optional string password - - // Configuration overlay which is applied when the session is - // first created. - 4: optional map configuration -} - -struct TOpenSessionResp { - 1: required TStatus status - - // The protocol version that the server is using. - 2: required TProtocolVersion serverProtocolVersion = TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V10 - - // Session Handle - 3: optional TSessionHandle sessionHandle - - // The configuration settings for this session. - 4: optional map configuration -} - - -// CloseSession() -// -// Closes the specified session and frees any resources -// currently allocated to that session. Any open -// operations in that session will be canceled. -struct TCloseSessionReq { - 1: required TSessionHandle sessionHandle -} - -struct TCloseSessionResp { - 1: required TStatus status -} - - - -enum TGetInfoType { - CLI_MAX_DRIVER_CONNECTIONS = 0, - CLI_MAX_CONCURRENT_ACTIVITIES = 1, - CLI_DATA_SOURCE_NAME = 2, - CLI_FETCH_DIRECTION = 8, - CLI_SERVER_NAME = 13, - CLI_SEARCH_PATTERN_ESCAPE = 14, - CLI_DBMS_NAME = 17, - CLI_DBMS_VER = 18, - CLI_ACCESSIBLE_TABLES = 19, - CLI_ACCESSIBLE_PROCEDURES = 20, - CLI_CURSOR_COMMIT_BEHAVIOR = 23, - CLI_DATA_SOURCE_READ_ONLY = 25, - CLI_DEFAULT_TXN_ISOLATION = 26, - CLI_IDENTIFIER_CASE = 28, - CLI_IDENTIFIER_QUOTE_CHAR = 29, - CLI_MAX_COLUMN_NAME_LEN = 30, - CLI_MAX_CURSOR_NAME_LEN = 31, - CLI_MAX_SCHEMA_NAME_LEN = 32, - CLI_MAX_CATALOG_NAME_LEN = 34, - CLI_MAX_TABLE_NAME_LEN = 35, - CLI_SCROLL_CONCURRENCY = 43, - CLI_TXN_CAPABLE = 46, - CLI_USER_NAME = 47, - CLI_TXN_ISOLATION_OPTION = 72, - CLI_INTEGRITY = 73, - CLI_GETDATA_EXTENSIONS = 81, - CLI_NULL_COLLATION = 85, - CLI_ALTER_TABLE = 86, - CLI_ORDER_BY_COLUMNS_IN_SELECT = 90, - CLI_SPECIAL_CHARACTERS = 94, - CLI_MAX_COLUMNS_IN_GROUP_BY = 97, - CLI_MAX_COLUMNS_IN_INDEX = 98, - CLI_MAX_COLUMNS_IN_ORDER_BY = 99, - CLI_MAX_COLUMNS_IN_SELECT = 100, - CLI_MAX_COLUMNS_IN_TABLE = 101, - CLI_MAX_INDEX_SIZE = 102, - CLI_MAX_ROW_SIZE = 104, - CLI_MAX_STATEMENT_LEN = 105, - CLI_MAX_TABLES_IN_SELECT = 106, - CLI_MAX_USER_NAME_LEN = 107, - CLI_OJ_CAPABILITIES = 115, - - CLI_XOPEN_CLI_YEAR = 10000, - CLI_CURSOR_SENSITIVITY = 10001, - CLI_DESCRIBE_PARAMETER = 10002, - CLI_CATALOG_NAME = 10003, - CLI_COLLATION_SEQ = 10004, - CLI_MAX_IDENTIFIER_LEN = 10005, -} - -union TGetInfoValue { - 1: string stringValue - 2: i16 smallIntValue - 3: i32 integerBitmask - 4: i32 integerFlag - 5: i32 binaryValue - 6: i64 lenValue -} - -// GetInfo() -// -// This function is based on ODBC's CLIGetInfo() function. -// The function returns general information about the data source -// using the same keys as ODBC. -struct TGetInfoReq { - // The sesssion to run this request against - 1: required TSessionHandle sessionHandle - - 2: required TGetInfoType infoType -} - -struct TGetInfoResp { - 1: required TStatus status - - 2: required TGetInfoValue infoValue -} - - -// ExecuteStatement() -// -// Execute a statement. -// The returned OperationHandle can be used to check on the -// status of the statement, and to fetch results once the -// statement has finished executing. -struct TExecuteStatementReq { - // The session to execute the statement against - 1: required TSessionHandle sessionHandle - - // The statement to be executed (DML, DDL, SET, etc) - 2: required string statement - - // Configuration properties that are overlayed on top of the - // the existing session configuration before this statement - // is executed. These properties apply to this statement - // only and will not affect the subsequent state of the Session. - 3: optional map confOverlay - - // Execute asynchronously when runAsync is true - 4: optional bool runAsync = false - - // The number of seconds after which the query will timeout on the server - 5: optional i64 queryTimeout = 0 -} - -struct TExecuteStatementResp { - 1: required TStatus status - 2: optional TOperationHandle operationHandle -} - -// GetTypeInfo() -// -// Get information about types supported by the HiveServer instance. -// The information is returned as a result set which can be fetched -// using the OperationHandle provided in the response. -// -// Refer to the documentation for ODBC's CLIGetTypeInfo function for -// the format of the result set. -struct TGetTypeInfoReq { - // The session to run this request against. - 1: required TSessionHandle sessionHandle -} - -struct TGetTypeInfoResp { - 1: required TStatus status - 2: optional TOperationHandle operationHandle -} - - -// GetCatalogs() -// -// Returns the list of catalogs (databases) -// Results are ordered by TABLE_CATALOG -// -// Resultset columns : -// col1 -// name: TABLE_CAT -// type: STRING -// desc: Catalog name. NULL if not applicable. -// -struct TGetCatalogsReq { - // Session to run this request against - 1: required TSessionHandle sessionHandle -} - -struct TGetCatalogsResp { - 1: required TStatus status - 2: optional TOperationHandle operationHandle -} - - -// GetSchemas() -// -// Retrieves the schema names available in this database. -// The results are ordered by TABLE_CATALOG and TABLE_SCHEM. -// col1 -// name: TABLE_SCHEM -// type: STRING -// desc: schema name -// col2 -// name: TABLE_CATALOG -// type: STRING -// desc: catalog name -struct TGetSchemasReq { - // Session to run this request against - 1: required TSessionHandle sessionHandle - - // Name of the catalog. Must not contain a search pattern. - 2: optional TIdentifier catalogName - - // schema name or pattern - 3: optional TPatternOrIdentifier schemaName -} - -struct TGetSchemasResp { - 1: required TStatus status - 2: optional TOperationHandle operationHandle -} - - -// GetTables() -// -// Returns a list of tables with catalog, schema, and table -// type information. The information is returned as a result -// set which can be fetched using the OperationHandle -// provided in the response. -// Results are ordered by TABLE_TYPE, TABLE_CAT, TABLE_SCHEM, and TABLE_NAME -// -// Result Set Columns: -// -// col1 -// name: TABLE_CAT -// type: STRING -// desc: Catalog name. NULL if not applicable. -// -// col2 -// name: TABLE_SCHEM -// type: STRING -// desc: Schema name. -// -// col3 -// name: TABLE_NAME -// type: STRING -// desc: Table name. -// -// col4 -// name: TABLE_TYPE -// type: STRING -// desc: The table type, e.g. "TABLE", "VIEW", etc. -// -// col5 -// name: REMARKS -// type: STRING -// desc: Comments about the table -// -struct TGetTablesReq { - // Session to run this request against - 1: required TSessionHandle sessionHandle - - // Name of the catalog or a search pattern. - 2: optional TPatternOrIdentifier catalogName - - // Name of the schema or a search pattern. - 3: optional TPatternOrIdentifier schemaName - - // Name of the table or a search pattern. - 4: optional TPatternOrIdentifier tableName - - // List of table types to match - // e.g. "TABLE", "VIEW", "SYSTEM TABLE", "GLOBAL TEMPORARY", - // "LOCAL TEMPORARY", "ALIAS", "SYNONYM", etc. - 5: optional list tableTypes -} - -struct TGetTablesResp { - 1: required TStatus status - 2: optional TOperationHandle operationHandle -} - - -// GetTableTypes() -// -// Returns the table types available in this database. -// The results are ordered by table type. -// -// col1 -// name: TABLE_TYPE -// type: STRING -// desc: Table type name. -struct TGetTableTypesReq { - // Session to run this request against - 1: required TSessionHandle sessionHandle -} - -struct TGetTableTypesResp { - 1: required TStatus status - 2: optional TOperationHandle operationHandle -} - - -// GetColumns() -// -// Returns a list of columns in the specified tables. -// The information is returned as a result set which can be fetched -// using the OperationHandle provided in the response. -// Results are ordered by TABLE_CAT, TABLE_SCHEM, TABLE_NAME, -// and ORDINAL_POSITION. -// -// Result Set Columns are the same as those for the ODBC CLIColumns -// function. -// -struct TGetColumnsReq { - // Session to run this request against - 1: required TSessionHandle sessionHandle - - // Name of the catalog. Must not contain a search pattern. - 2: optional TIdentifier catalogName - - // Schema name or search pattern - 3: optional TPatternOrIdentifier schemaName - - // Table name or search pattern - 4: optional TPatternOrIdentifier tableName - - // Column name or search pattern - 5: optional TPatternOrIdentifier columnName -} - -struct TGetColumnsResp { - 1: required TStatus status - 2: optional TOperationHandle operationHandle -} - - -// GetFunctions() -// -// Returns a list of functions supported by the data source. The -// behavior of this function matches -// java.sql.DatabaseMetaData.getFunctions() both in terms of -// inputs and outputs. -// -// Result Set Columns: -// -// col1 -// name: FUNCTION_CAT -// type: STRING -// desc: Function catalog (may be null) -// -// col2 -// name: FUNCTION_SCHEM -// type: STRING -// desc: Function schema (may be null) -// -// col3 -// name: FUNCTION_NAME -// type: STRING -// desc: Function name. This is the name used to invoke the function. -// -// col4 -// name: REMARKS -// type: STRING -// desc: Explanatory comment on the function. -// -// col5 -// name: FUNCTION_TYPE -// type: SMALLINT -// desc: Kind of function. One of: -// * functionResultUnknown - Cannot determine if a return value or a table -// will be returned. -// * functionNoTable - Does not a return a table. -// * functionReturnsTable - Returns a table. -// -// col6 -// name: SPECIFIC_NAME -// type: STRING -// desc: The name which uniquely identifies this function within its schema. -// In this case this is the fully qualified class name of the class -// that implements this function. -// -struct TGetFunctionsReq { - // Session to run this request against - 1: required TSessionHandle sessionHandle - - // A catalog name; must match the catalog name as it is stored in the - // database; "" retrieves those without a catalog; null means - // that the catalog name should not be used to narrow the search. - 2: optional TIdentifier catalogName - - // A schema name pattern; must match the schema name as it is stored - // in the database; "" retrieves those without a schema; null means - // that the schema name should not be used to narrow the search. - 3: optional TPatternOrIdentifier schemaName - - // A function name pattern; must match the function name as it is stored - // in the database. - 4: required TPatternOrIdentifier functionName -} - -struct TGetFunctionsResp { - 1: required TStatus status - 2: optional TOperationHandle operationHandle -} - -struct TGetPrimaryKeysReq { - // Session to run this request against - 1: required TSessionHandle sessionHandle - - // Name of the catalog. - 2: optional TIdentifier catalogName - - // Name of the schema. - 3: optional TIdentifier schemaName - - // Name of the table. - 4: optional TIdentifier tableName -} - -struct TGetPrimaryKeysResp { - 1: required TStatus status - 2: optional TOperationHandle operationHandle -} - -struct TGetCrossReferenceReq { - // Session to run this request against - 1: required TSessionHandle sessionHandle - - // Name of the parent catalog. - 2: optional TIdentifier parentCatalogName - - // Name of the parent schema. - 3: optional TIdentifier parentSchemaName - - // Name of the parent table. - 4: optional TIdentifier parentTableName - - // Name of the foreign catalog. - 5: optional TIdentifier foreignCatalogName - - // Name of the foreign schema. - 6: optional TIdentifier foreignSchemaName - - // Name of the foreign table. - 7: optional TIdentifier foreignTableName -} - -struct TGetCrossReferenceResp { - 1: required TStatus status - 2: optional TOperationHandle operationHandle -} - -// GetOperationStatus() -// -// Get the status of an operation running on the server. -struct TGetOperationStatusReq { - // Session to run this request against - 1: required TOperationHandle operationHandle - // optional arguments to get progress information - 2: optional bool getProgressUpdate -} - -struct TGetOperationStatusResp { - 1: required TStatus status - 2: optional TOperationState operationState - - // If operationState is ERROR_STATE, then the following fields may be set - // sqlState as defined in the ISO/IEF CLI specification - 3: optional string sqlState - - // Internal error code - 4: optional i32 errorCode - - // Error message - 5: optional string errorMessage - - // List of statuses of sub tasks - 6: optional string taskStatus - - // When was the operation started - 7: optional i64 operationStarted - - // When was the operation completed - 8: optional i64 operationCompleted - - // If the operation has the result - 9: optional bool hasResultSet - - 10: optional TProgressUpdateResp progressUpdateResponse - -} - - -// CancelOperation() -// -// Cancels processing on the specified operation handle and -// frees any resources which were allocated. -struct TCancelOperationReq { - // Operation to cancel - 1: required TOperationHandle operationHandle -} - -struct TCancelOperationResp { - 1: required TStatus status -} - - -// CloseOperation() -// -// Given an operation in the FINISHED, CANCELED, -// or ERROR states, CloseOperation() will free -// all of the resources which were allocated on -// the server to service the operation. -struct TCloseOperationReq { - 1: required TOperationHandle operationHandle -} - -struct TCloseOperationResp { - 1: required TStatus status -} - - -// GetResultSetMetadata() -// -// Retrieves schema information for the specified operation -struct TGetResultSetMetadataReq { - // Operation for which to fetch result set schema information - 1: required TOperationHandle operationHandle -} - -struct TGetResultSetMetadataResp { - 1: required TStatus status - 2: optional TTableSchema schema -} - - -enum TFetchOrientation { - // Get the next rowset. The fetch offset is ignored. - FETCH_NEXT, - - // Get the previous rowset. The fetch offset is ignored. - FETCH_PRIOR, - - // Return the rowset at the given fetch offset relative - // to the curren rowset. - // NOT SUPPORTED - FETCH_RELATIVE, - - // Return the rowset at the specified fetch offset. - // NOT SUPPORTED - FETCH_ABSOLUTE, - - // Get the first rowset in the result set. - FETCH_FIRST, - - // Get the last rowset in the result set. - // NOT SUPPORTED - FETCH_LAST -} - -// FetchResults() -// -// Fetch rows from the server corresponding to -// a particular OperationHandle. -struct TFetchResultsReq { - // Operation from which to fetch results. - 1: required TOperationHandle operationHandle - - // The fetch orientation. This must be either - // FETCH_NEXT, FETCH_PRIOR or FETCH_FIRST. Defaults to FETCH_NEXT. - 2: required TFetchOrientation orientation = TFetchOrientation.FETCH_NEXT - - // Max number of rows that should be returned in - // the rowset. - 3: required i64 maxRows - - // The type of a fetch results request. 0 represents Query output. 1 represents Log - 4: optional i16 fetchType = 0 -} - -struct TFetchResultsResp { - 1: required TStatus status - - // TRUE if there are more rows left to fetch from the server. - 2: optional bool hasMoreRows - - // The rowset. This is optional so that we have the - // option in the future of adding alternate formats for - // representing result set data, e.g. delimited strings, - // binary encoded, etc. - 3: optional TRowSet results -} - -// GetDelegationToken() -// Retrieve delegation token for the current user -struct TGetDelegationTokenReq { - // session handle - 1: required TSessionHandle sessionHandle - - // userid for the proxy user - 2: required string owner - - // designated renewer userid - 3: required string renewer -} - -struct TGetDelegationTokenResp { - // status of the request - 1: required TStatus status - - // delegation token string - 2: optional string delegationToken -} - -// CancelDelegationToken() -// Cancel the given delegation token -struct TCancelDelegationTokenReq { - // session handle - 1: required TSessionHandle sessionHandle - - // delegation token to cancel - 2: required string delegationToken -} - -struct TCancelDelegationTokenResp { - // status of the request - 1: required TStatus status -} - -// RenewDelegationToken() -// Renew the given delegation token -struct TRenewDelegationTokenReq { - // session handle - 1: required TSessionHandle sessionHandle - - // delegation token to renew - 2: required string delegationToken -} - -struct TRenewDelegationTokenResp { - // status of the request - 1: required TStatus status -} - -enum TJobExecutionStatus { - IN_PROGRESS, - COMPLETE, - NOT_AVAILABLE -} - -struct TProgressUpdateResp { - 1: required list headerNames - 2: required list> rows - 3: required double progressedPercentage - 4: required TJobExecutionStatus status - 5: required string footerSummary - 6: required i64 startTime -} - -service TCLIService { - - TOpenSessionResp OpenSession(1:TOpenSessionReq req); - - TCloseSessionResp CloseSession(1:TCloseSessionReq req); - - TGetInfoResp GetInfo(1:TGetInfoReq req); - - TExecuteStatementResp ExecuteStatement(1:TExecuteStatementReq req); - - TGetTypeInfoResp GetTypeInfo(1:TGetTypeInfoReq req); - - TGetCatalogsResp GetCatalogs(1:TGetCatalogsReq req); - - TGetSchemasResp GetSchemas(1:TGetSchemasReq req); - - TGetTablesResp GetTables(1:TGetTablesReq req); - - TGetTableTypesResp GetTableTypes(1:TGetTableTypesReq req); - - TGetColumnsResp GetColumns(1:TGetColumnsReq req); - - TGetFunctionsResp GetFunctions(1:TGetFunctionsReq req); - - TGetPrimaryKeysResp GetPrimaryKeys(1:TGetPrimaryKeysReq req); - - TGetCrossReferenceResp GetCrossReference(1:TGetCrossReferenceReq req); - - TGetOperationStatusResp GetOperationStatus(1:TGetOperationStatusReq req); - - TCancelOperationResp CancelOperation(1:TCancelOperationReq req); - - TCloseOperationResp CloseOperation(1:TCloseOperationReq req); - - TGetResultSetMetadataResp GetResultSetMetadata(1:TGetResultSetMetadataReq req); - - TFetchResultsResp FetchResults(1:TFetchResultsReq req); - - TGetDelegationTokenResp GetDelegationToken(1:TGetDelegationTokenReq req); - - TCancelDelegationTokenResp CancelDelegationToken(1:TCancelDelegationTokenReq req); - - TRenewDelegationTokenResp RenewDelegationToken(1:TRenewDelegationTokenReq req); -} diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TArrayTypeEntry.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TArrayTypeEntry.java deleted file mode 100644 index 358e322632144..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TArrayTypeEntry.java +++ /dev/null @@ -1,387 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TArrayTypeEntry implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TArrayTypeEntry"); - - private static final org.apache.thrift.protocol.TField OBJECT_TYPE_PTR_FIELD_DESC = new org.apache.thrift.protocol.TField("objectTypePtr", org.apache.thrift.protocol.TType.I32, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TArrayTypeEntryStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TArrayTypeEntryTupleSchemeFactory()); - } - - private int objectTypePtr; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - OBJECT_TYPE_PTR((short)1, "objectTypePtr"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // OBJECT_TYPE_PTR - return OBJECT_TYPE_PTR; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private static final int __OBJECTTYPEPTR_ISSET_ID = 0; - private byte __isset_bitfield = 0; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.OBJECT_TYPE_PTR, new org.apache.thrift.meta_data.FieldMetaData("objectTypePtr", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32 , "TTypeEntryPtr"))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TArrayTypeEntry.class, metaDataMap); - } - - public TArrayTypeEntry() { - } - - public TArrayTypeEntry( - int objectTypePtr) - { - this(); - this.objectTypePtr = objectTypePtr; - setObjectTypePtrIsSet(true); - } - - /** - * Performs a deep copy on other. - */ - public TArrayTypeEntry(TArrayTypeEntry other) { - __isset_bitfield = other.__isset_bitfield; - this.objectTypePtr = other.objectTypePtr; - } - - public TArrayTypeEntry deepCopy() { - return new TArrayTypeEntry(this); - } - - @Override - public void clear() { - setObjectTypePtrIsSet(false); - this.objectTypePtr = 0; - } - - public int getObjectTypePtr() { - return this.objectTypePtr; - } - - public void setObjectTypePtr(int objectTypePtr) { - this.objectTypePtr = objectTypePtr; - setObjectTypePtrIsSet(true); - } - - public void unsetObjectTypePtr() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __OBJECTTYPEPTR_ISSET_ID); - } - - /** Returns true if field objectTypePtr is set (has been assigned a value) and false otherwise */ - public boolean isSetObjectTypePtr() { - return EncodingUtils.testBit(__isset_bitfield, __OBJECTTYPEPTR_ISSET_ID); - } - - public void setObjectTypePtrIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __OBJECTTYPEPTR_ISSET_ID, value); - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case OBJECT_TYPE_PTR: - if (value == null) { - unsetObjectTypePtr(); - } else { - setObjectTypePtr((Integer)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case OBJECT_TYPE_PTR: - return getObjectTypePtr(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case OBJECT_TYPE_PTR: - return isSetObjectTypePtr(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TArrayTypeEntry) - return this.equals((TArrayTypeEntry)that); - return false; - } - - public boolean equals(TArrayTypeEntry that) { - if (that == null) - return false; - - boolean this_present_objectTypePtr = true; - boolean that_present_objectTypePtr = true; - if (this_present_objectTypePtr || that_present_objectTypePtr) { - if (!(this_present_objectTypePtr && that_present_objectTypePtr)) - return false; - if (this.objectTypePtr != that.objectTypePtr) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_objectTypePtr = true; - list.add(present_objectTypePtr); - if (present_objectTypePtr) - list.add(objectTypePtr); - - return list.hashCode(); - } - - @Override - public int compareTo(TArrayTypeEntry other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetObjectTypePtr()).compareTo(other.isSetObjectTypePtr()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetObjectTypePtr()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.objectTypePtr, other.objectTypePtr); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TArrayTypeEntry("); - boolean first = true; - - sb.append("objectTypePtr:"); - sb.append(this.objectTypePtr); - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetObjectTypePtr()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'objectTypePtr' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. - __isset_bitfield = 0; - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TArrayTypeEntryStandardSchemeFactory implements SchemeFactory { - public TArrayTypeEntryStandardScheme getScheme() { - return new TArrayTypeEntryStandardScheme(); - } - } - - private static class TArrayTypeEntryStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TArrayTypeEntry struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // OBJECT_TYPE_PTR - if (schemeField.type == org.apache.thrift.protocol.TType.I32) { - struct.objectTypePtr = iprot.readI32(); - struct.setObjectTypePtrIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TArrayTypeEntry struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - oprot.writeFieldBegin(OBJECT_TYPE_PTR_FIELD_DESC); - oprot.writeI32(struct.objectTypePtr); - oprot.writeFieldEnd(); - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TArrayTypeEntryTupleSchemeFactory implements SchemeFactory { - public TArrayTypeEntryTupleScheme getScheme() { - return new TArrayTypeEntryTupleScheme(); - } - } - - private static class TArrayTypeEntryTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TArrayTypeEntry struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - oprot.writeI32(struct.objectTypePtr); - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TArrayTypeEntry struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.objectTypePtr = iprot.readI32(); - struct.setObjectTypePtrIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TBinaryColumn.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TBinaryColumn.java deleted file mode 100644 index a869cee007c0b..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TBinaryColumn.java +++ /dev/null @@ -1,548 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TBinaryColumn implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TBinaryColumn"); - - private static final org.apache.thrift.protocol.TField VALUES_FIELD_DESC = new org.apache.thrift.protocol.TField("values", org.apache.thrift.protocol.TType.LIST, (short)1); - private static final org.apache.thrift.protocol.TField NULLS_FIELD_DESC = new org.apache.thrift.protocol.TField("nulls", org.apache.thrift.protocol.TType.STRING, (short)2); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TBinaryColumnStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TBinaryColumnTupleSchemeFactory()); - } - - private List values; // required - private ByteBuffer nulls; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - VALUES((short)1, "values"), - NULLS((short)2, "nulls"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // VALUES - return VALUES; - case 2: // NULLS - return NULLS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.VALUES, new org.apache.thrift.meta_data.FieldMetaData("values", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true)))); - tmpMap.put(_Fields.NULLS, new org.apache.thrift.meta_data.FieldMetaData("nulls", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TBinaryColumn.class, metaDataMap); - } - - public TBinaryColumn() { - } - - public TBinaryColumn( - List values, - ByteBuffer nulls) - { - this(); - this.values = values; - this.nulls = org.apache.thrift.TBaseHelper.copyBinary(nulls); - } - - /** - * Performs a deep copy on other. - */ - public TBinaryColumn(TBinaryColumn other) { - if (other.isSetValues()) { - List __this__values = new ArrayList(other.values); - this.values = __this__values; - } - if (other.isSetNulls()) { - this.nulls = org.apache.thrift.TBaseHelper.copyBinary(other.nulls); - } - } - - public TBinaryColumn deepCopy() { - return new TBinaryColumn(this); - } - - @Override - public void clear() { - this.values = null; - this.nulls = null; - } - - public int getValuesSize() { - return (this.values == null) ? 0 : this.values.size(); - } - - public java.util.Iterator getValuesIterator() { - return (this.values == null) ? null : this.values.iterator(); - } - - public void addToValues(ByteBuffer elem) { - if (this.values == null) { - this.values = new ArrayList(); - } - this.values.add(elem); - } - - public List getValues() { - return this.values; - } - - public void setValues(List values) { - this.values = values; - } - - public void unsetValues() { - this.values = null; - } - - /** Returns true if field values is set (has been assigned a value) and false otherwise */ - public boolean isSetValues() { - return this.values != null; - } - - public void setValuesIsSet(boolean value) { - if (!value) { - this.values = null; - } - } - - public byte[] getNulls() { - setNulls(org.apache.thrift.TBaseHelper.rightSize(nulls)); - return nulls == null ? null : nulls.array(); - } - - public ByteBuffer bufferForNulls() { - return org.apache.thrift.TBaseHelper.copyBinary(nulls); - } - - public void setNulls(byte[] nulls) { - this.nulls = nulls == null ? (ByteBuffer)null : ByteBuffer.wrap(Arrays.copyOf(nulls, nulls.length)); - } - - public void setNulls(ByteBuffer nulls) { - this.nulls = org.apache.thrift.TBaseHelper.copyBinary(nulls); - } - - public void unsetNulls() { - this.nulls = null; - } - - /** Returns true if field nulls is set (has been assigned a value) and false otherwise */ - public boolean isSetNulls() { - return this.nulls != null; - } - - public void setNullsIsSet(boolean value) { - if (!value) { - this.nulls = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case VALUES: - if (value == null) { - unsetValues(); - } else { - setValues((List)value); - } - break; - - case NULLS: - if (value == null) { - unsetNulls(); - } else { - setNulls((ByteBuffer)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case VALUES: - return getValues(); - - case NULLS: - return getNulls(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case VALUES: - return isSetValues(); - case NULLS: - return isSetNulls(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TBinaryColumn) - return this.equals((TBinaryColumn)that); - return false; - } - - public boolean equals(TBinaryColumn that) { - if (that == null) - return false; - - boolean this_present_values = true && this.isSetValues(); - boolean that_present_values = true && that.isSetValues(); - if (this_present_values || that_present_values) { - if (!(this_present_values && that_present_values)) - return false; - if (!this.values.equals(that.values)) - return false; - } - - boolean this_present_nulls = true && this.isSetNulls(); - boolean that_present_nulls = true && that.isSetNulls(); - if (this_present_nulls || that_present_nulls) { - if (!(this_present_nulls && that_present_nulls)) - return false; - if (!this.nulls.equals(that.nulls)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_values = true && (isSetValues()); - list.add(present_values); - if (present_values) - list.add(values); - - boolean present_nulls = true && (isSetNulls()); - list.add(present_nulls); - if (present_nulls) - list.add(nulls); - - return list.hashCode(); - } - - @Override - public int compareTo(TBinaryColumn other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetValues()).compareTo(other.isSetValues()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetValues()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.values, other.values); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetNulls()).compareTo(other.isSetNulls()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetNulls()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.nulls, other.nulls); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TBinaryColumn("); - boolean first = true; - - sb.append("values:"); - if (this.values == null) { - sb.append("null"); - } else { - org.apache.thrift.TBaseHelper.toString(this.values, sb); - } - first = false; - if (!first) sb.append(", "); - sb.append("nulls:"); - if (this.nulls == null) { - sb.append("null"); - } else { - org.apache.thrift.TBaseHelper.toString(this.nulls, sb); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetValues()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'values' is unset! Struct:" + toString()); - } - - if (!isSetNulls()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'nulls' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TBinaryColumnStandardSchemeFactory implements SchemeFactory { - public TBinaryColumnStandardScheme getScheme() { - return new TBinaryColumnStandardScheme(); - } - } - - private static class TBinaryColumnStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TBinaryColumn struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // VALUES - if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { - { - org.apache.thrift.protocol.TList _list110 = iprot.readListBegin(); - struct.values = new ArrayList(_list110.size); - ByteBuffer _elem111; - for (int _i112 = 0; _i112 < _list110.size; ++_i112) - { - _elem111 = iprot.readBinary(); - struct.values.add(_elem111); - } - iprot.readListEnd(); - } - struct.setValuesIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // NULLS - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.nulls = iprot.readBinary(); - struct.setNullsIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TBinaryColumn struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.values != null) { - oprot.writeFieldBegin(VALUES_FIELD_DESC); - { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.values.size())); - for (ByteBuffer _iter113 : struct.values) - { - oprot.writeBinary(_iter113); - } - oprot.writeListEnd(); - } - oprot.writeFieldEnd(); - } - if (struct.nulls != null) { - oprot.writeFieldBegin(NULLS_FIELD_DESC); - oprot.writeBinary(struct.nulls); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TBinaryColumnTupleSchemeFactory implements SchemeFactory { - public TBinaryColumnTupleScheme getScheme() { - return new TBinaryColumnTupleScheme(); - } - } - - private static class TBinaryColumnTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TBinaryColumn struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - { - oprot.writeI32(struct.values.size()); - for (ByteBuffer _iter114 : struct.values) - { - oprot.writeBinary(_iter114); - } - } - oprot.writeBinary(struct.nulls); - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TBinaryColumn struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - { - org.apache.thrift.protocol.TList _list115 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.values = new ArrayList(_list115.size); - ByteBuffer _elem116; - for (int _i117 = 0; _i117 < _list115.size; ++_i117) - { - _elem116 = iprot.readBinary(); - struct.values.add(_elem116); - } - } - struct.setValuesIsSet(true); - struct.nulls = iprot.readBinary(); - struct.setNullsIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TBoolColumn.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TBoolColumn.java deleted file mode 100644 index 9bb636672aa1e..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TBoolColumn.java +++ /dev/null @@ -1,548 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TBoolColumn implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TBoolColumn"); - - private static final org.apache.thrift.protocol.TField VALUES_FIELD_DESC = new org.apache.thrift.protocol.TField("values", org.apache.thrift.protocol.TType.LIST, (short)1); - private static final org.apache.thrift.protocol.TField NULLS_FIELD_DESC = new org.apache.thrift.protocol.TField("nulls", org.apache.thrift.protocol.TType.STRING, (short)2); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TBoolColumnStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TBoolColumnTupleSchemeFactory()); - } - - private List values; // required - private ByteBuffer nulls; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - VALUES((short)1, "values"), - NULLS((short)2, "nulls"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // VALUES - return VALUES; - case 2: // NULLS - return NULLS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.VALUES, new org.apache.thrift.meta_data.FieldMetaData("values", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)))); - tmpMap.put(_Fields.NULLS, new org.apache.thrift.meta_data.FieldMetaData("nulls", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TBoolColumn.class, metaDataMap); - } - - public TBoolColumn() { - } - - public TBoolColumn( - List values, - ByteBuffer nulls) - { - this(); - this.values = values; - this.nulls = org.apache.thrift.TBaseHelper.copyBinary(nulls); - } - - /** - * Performs a deep copy on other. - */ - public TBoolColumn(TBoolColumn other) { - if (other.isSetValues()) { - List __this__values = new ArrayList(other.values); - this.values = __this__values; - } - if (other.isSetNulls()) { - this.nulls = org.apache.thrift.TBaseHelper.copyBinary(other.nulls); - } - } - - public TBoolColumn deepCopy() { - return new TBoolColumn(this); - } - - @Override - public void clear() { - this.values = null; - this.nulls = null; - } - - public int getValuesSize() { - return (this.values == null) ? 0 : this.values.size(); - } - - public java.util.Iterator getValuesIterator() { - return (this.values == null) ? null : this.values.iterator(); - } - - public void addToValues(boolean elem) { - if (this.values == null) { - this.values = new ArrayList(); - } - this.values.add(elem); - } - - public List getValues() { - return this.values; - } - - public void setValues(List values) { - this.values = values; - } - - public void unsetValues() { - this.values = null; - } - - /** Returns true if field values is set (has been assigned a value) and false otherwise */ - public boolean isSetValues() { - return this.values != null; - } - - public void setValuesIsSet(boolean value) { - if (!value) { - this.values = null; - } - } - - public byte[] getNulls() { - setNulls(org.apache.thrift.TBaseHelper.rightSize(nulls)); - return nulls == null ? null : nulls.array(); - } - - public ByteBuffer bufferForNulls() { - return org.apache.thrift.TBaseHelper.copyBinary(nulls); - } - - public void setNulls(byte[] nulls) { - this.nulls = nulls == null ? (ByteBuffer)null : ByteBuffer.wrap(Arrays.copyOf(nulls, nulls.length)); - } - - public void setNulls(ByteBuffer nulls) { - this.nulls = org.apache.thrift.TBaseHelper.copyBinary(nulls); - } - - public void unsetNulls() { - this.nulls = null; - } - - /** Returns true if field nulls is set (has been assigned a value) and false otherwise */ - public boolean isSetNulls() { - return this.nulls != null; - } - - public void setNullsIsSet(boolean value) { - if (!value) { - this.nulls = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case VALUES: - if (value == null) { - unsetValues(); - } else { - setValues((List)value); - } - break; - - case NULLS: - if (value == null) { - unsetNulls(); - } else { - setNulls((ByteBuffer)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case VALUES: - return getValues(); - - case NULLS: - return getNulls(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case VALUES: - return isSetValues(); - case NULLS: - return isSetNulls(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TBoolColumn) - return this.equals((TBoolColumn)that); - return false; - } - - public boolean equals(TBoolColumn that) { - if (that == null) - return false; - - boolean this_present_values = true && this.isSetValues(); - boolean that_present_values = true && that.isSetValues(); - if (this_present_values || that_present_values) { - if (!(this_present_values && that_present_values)) - return false; - if (!this.values.equals(that.values)) - return false; - } - - boolean this_present_nulls = true && this.isSetNulls(); - boolean that_present_nulls = true && that.isSetNulls(); - if (this_present_nulls || that_present_nulls) { - if (!(this_present_nulls && that_present_nulls)) - return false; - if (!this.nulls.equals(that.nulls)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_values = true && (isSetValues()); - list.add(present_values); - if (present_values) - list.add(values); - - boolean present_nulls = true && (isSetNulls()); - list.add(present_nulls); - if (present_nulls) - list.add(nulls); - - return list.hashCode(); - } - - @Override - public int compareTo(TBoolColumn other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetValues()).compareTo(other.isSetValues()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetValues()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.values, other.values); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetNulls()).compareTo(other.isSetNulls()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetNulls()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.nulls, other.nulls); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TBoolColumn("); - boolean first = true; - - sb.append("values:"); - if (this.values == null) { - sb.append("null"); - } else { - sb.append(this.values); - } - first = false; - if (!first) sb.append(", "); - sb.append("nulls:"); - if (this.nulls == null) { - sb.append("null"); - } else { - org.apache.thrift.TBaseHelper.toString(this.nulls, sb); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetValues()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'values' is unset! Struct:" + toString()); - } - - if (!isSetNulls()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'nulls' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TBoolColumnStandardSchemeFactory implements SchemeFactory { - public TBoolColumnStandardScheme getScheme() { - return new TBoolColumnStandardScheme(); - } - } - - private static class TBoolColumnStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TBoolColumn struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // VALUES - if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { - { - org.apache.thrift.protocol.TList _list54 = iprot.readListBegin(); - struct.values = new ArrayList(_list54.size); - boolean _elem55; - for (int _i56 = 0; _i56 < _list54.size; ++_i56) - { - _elem55 = iprot.readBool(); - struct.values.add(_elem55); - } - iprot.readListEnd(); - } - struct.setValuesIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // NULLS - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.nulls = iprot.readBinary(); - struct.setNullsIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TBoolColumn struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.values != null) { - oprot.writeFieldBegin(VALUES_FIELD_DESC); - { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.BOOL, struct.values.size())); - for (boolean _iter57 : struct.values) - { - oprot.writeBool(_iter57); - } - oprot.writeListEnd(); - } - oprot.writeFieldEnd(); - } - if (struct.nulls != null) { - oprot.writeFieldBegin(NULLS_FIELD_DESC); - oprot.writeBinary(struct.nulls); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TBoolColumnTupleSchemeFactory implements SchemeFactory { - public TBoolColumnTupleScheme getScheme() { - return new TBoolColumnTupleScheme(); - } - } - - private static class TBoolColumnTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TBoolColumn struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - { - oprot.writeI32(struct.values.size()); - for (boolean _iter58 : struct.values) - { - oprot.writeBool(_iter58); - } - } - oprot.writeBinary(struct.nulls); - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TBoolColumn struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - { - org.apache.thrift.protocol.TList _list59 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.BOOL, iprot.readI32()); - struct.values = new ArrayList(_list59.size); - boolean _elem60; - for (int _i61 = 0; _i61 < _list59.size; ++_i61) - { - _elem60 = iprot.readBool(); - struct.values.add(_elem60); - } - } - struct.setValuesIsSet(true); - struct.nulls = iprot.readBinary(); - struct.setNullsIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TBoolValue.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TBoolValue.java deleted file mode 100644 index 87b3070a89b11..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TBoolValue.java +++ /dev/null @@ -1,390 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TBoolValue implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TBoolValue"); - - private static final org.apache.thrift.protocol.TField VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("value", org.apache.thrift.protocol.TType.BOOL, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TBoolValueStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TBoolValueTupleSchemeFactory()); - } - - private boolean value; // optional - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - VALUE((short)1, "value"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // VALUE - return VALUE; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private static final int __VALUE_ISSET_ID = 0; - private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.VALUE}; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.VALUE, new org.apache.thrift.meta_data.FieldMetaData("value", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TBoolValue.class, metaDataMap); - } - - public TBoolValue() { - } - - /** - * Performs a deep copy on other. - */ - public TBoolValue(TBoolValue other) { - __isset_bitfield = other.__isset_bitfield; - this.value = other.value; - } - - public TBoolValue deepCopy() { - return new TBoolValue(this); - } - - @Override - public void clear() { - setValueIsSet(false); - this.value = false; - } - - public boolean isValue() { - return this.value; - } - - public void setValue(boolean value) { - this.value = value; - setValueIsSet(true); - } - - public void unsetValue() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __VALUE_ISSET_ID); - } - - /** Returns true if field value is set (has been assigned a value) and false otherwise */ - public boolean isSetValue() { - return EncodingUtils.testBit(__isset_bitfield, __VALUE_ISSET_ID); - } - - public void setValueIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __VALUE_ISSET_ID, value); - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case VALUE: - if (value == null) { - unsetValue(); - } else { - setValue((Boolean)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case VALUE: - return isValue(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case VALUE: - return isSetValue(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TBoolValue) - return this.equals((TBoolValue)that); - return false; - } - - public boolean equals(TBoolValue that) { - if (that == null) - return false; - - boolean this_present_value = true && this.isSetValue(); - boolean that_present_value = true && that.isSetValue(); - if (this_present_value || that_present_value) { - if (!(this_present_value && that_present_value)) - return false; - if (this.value != that.value) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_value = true && (isSetValue()); - list.add(present_value); - if (present_value) - list.add(value); - - return list.hashCode(); - } - - @Override - public int compareTo(TBoolValue other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetValue()).compareTo(other.isSetValue()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetValue()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.value, other.value); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TBoolValue("); - boolean first = true; - - if (isSetValue()) { - sb.append("value:"); - sb.append(this.value); - first = false; - } - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. - __isset_bitfield = 0; - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TBoolValueStandardSchemeFactory implements SchemeFactory { - public TBoolValueStandardScheme getScheme() { - return new TBoolValueStandardScheme(); - } - } - - private static class TBoolValueStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TBoolValue struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // VALUE - if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { - struct.value = iprot.readBool(); - struct.setValueIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TBoolValue struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.isSetValue()) { - oprot.writeFieldBegin(VALUE_FIELD_DESC); - oprot.writeBool(struct.value); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TBoolValueTupleSchemeFactory implements SchemeFactory { - public TBoolValueTupleScheme getScheme() { - return new TBoolValueTupleScheme(); - } - } - - private static class TBoolValueTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TBoolValue struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetValue()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetValue()) { - oprot.writeBool(struct.value); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TBoolValue struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.value = iprot.readBool(); - struct.setValueIsSet(true); - } - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TByteColumn.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TByteColumn.java deleted file mode 100644 index 68b3d3c31eb03..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TByteColumn.java +++ /dev/null @@ -1,548 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TByteColumn implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TByteColumn"); - - private static final org.apache.thrift.protocol.TField VALUES_FIELD_DESC = new org.apache.thrift.protocol.TField("values", org.apache.thrift.protocol.TType.LIST, (short)1); - private static final org.apache.thrift.protocol.TField NULLS_FIELD_DESC = new org.apache.thrift.protocol.TField("nulls", org.apache.thrift.protocol.TType.STRING, (short)2); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TByteColumnStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TByteColumnTupleSchemeFactory()); - } - - private List values; // required - private ByteBuffer nulls; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - VALUES((short)1, "values"), - NULLS((short)2, "nulls"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // VALUES - return VALUES; - case 2: // NULLS - return NULLS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.VALUES, new org.apache.thrift.meta_data.FieldMetaData("values", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BYTE)))); - tmpMap.put(_Fields.NULLS, new org.apache.thrift.meta_data.FieldMetaData("nulls", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TByteColumn.class, metaDataMap); - } - - public TByteColumn() { - } - - public TByteColumn( - List values, - ByteBuffer nulls) - { - this(); - this.values = values; - this.nulls = org.apache.thrift.TBaseHelper.copyBinary(nulls); - } - - /** - * Performs a deep copy on other. - */ - public TByteColumn(TByteColumn other) { - if (other.isSetValues()) { - List __this__values = new ArrayList(other.values); - this.values = __this__values; - } - if (other.isSetNulls()) { - this.nulls = org.apache.thrift.TBaseHelper.copyBinary(other.nulls); - } - } - - public TByteColumn deepCopy() { - return new TByteColumn(this); - } - - @Override - public void clear() { - this.values = null; - this.nulls = null; - } - - public int getValuesSize() { - return (this.values == null) ? 0 : this.values.size(); - } - - public java.util.Iterator getValuesIterator() { - return (this.values == null) ? null : this.values.iterator(); - } - - public void addToValues(byte elem) { - if (this.values == null) { - this.values = new ArrayList(); - } - this.values.add(elem); - } - - public List getValues() { - return this.values; - } - - public void setValues(List values) { - this.values = values; - } - - public void unsetValues() { - this.values = null; - } - - /** Returns true if field values is set (has been assigned a value) and false otherwise */ - public boolean isSetValues() { - return this.values != null; - } - - public void setValuesIsSet(boolean value) { - if (!value) { - this.values = null; - } - } - - public byte[] getNulls() { - setNulls(org.apache.thrift.TBaseHelper.rightSize(nulls)); - return nulls == null ? null : nulls.array(); - } - - public ByteBuffer bufferForNulls() { - return org.apache.thrift.TBaseHelper.copyBinary(nulls); - } - - public void setNulls(byte[] nulls) { - this.nulls = nulls == null ? (ByteBuffer)null : ByteBuffer.wrap(Arrays.copyOf(nulls, nulls.length)); - } - - public void setNulls(ByteBuffer nulls) { - this.nulls = org.apache.thrift.TBaseHelper.copyBinary(nulls); - } - - public void unsetNulls() { - this.nulls = null; - } - - /** Returns true if field nulls is set (has been assigned a value) and false otherwise */ - public boolean isSetNulls() { - return this.nulls != null; - } - - public void setNullsIsSet(boolean value) { - if (!value) { - this.nulls = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case VALUES: - if (value == null) { - unsetValues(); - } else { - setValues((List)value); - } - break; - - case NULLS: - if (value == null) { - unsetNulls(); - } else { - setNulls((ByteBuffer)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case VALUES: - return getValues(); - - case NULLS: - return getNulls(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case VALUES: - return isSetValues(); - case NULLS: - return isSetNulls(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TByteColumn) - return this.equals((TByteColumn)that); - return false; - } - - public boolean equals(TByteColumn that) { - if (that == null) - return false; - - boolean this_present_values = true && this.isSetValues(); - boolean that_present_values = true && that.isSetValues(); - if (this_present_values || that_present_values) { - if (!(this_present_values && that_present_values)) - return false; - if (!this.values.equals(that.values)) - return false; - } - - boolean this_present_nulls = true && this.isSetNulls(); - boolean that_present_nulls = true && that.isSetNulls(); - if (this_present_nulls || that_present_nulls) { - if (!(this_present_nulls && that_present_nulls)) - return false; - if (!this.nulls.equals(that.nulls)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_values = true && (isSetValues()); - list.add(present_values); - if (present_values) - list.add(values); - - boolean present_nulls = true && (isSetNulls()); - list.add(present_nulls); - if (present_nulls) - list.add(nulls); - - return list.hashCode(); - } - - @Override - public int compareTo(TByteColumn other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetValues()).compareTo(other.isSetValues()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetValues()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.values, other.values); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetNulls()).compareTo(other.isSetNulls()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetNulls()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.nulls, other.nulls); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TByteColumn("); - boolean first = true; - - sb.append("values:"); - if (this.values == null) { - sb.append("null"); - } else { - sb.append(this.values); - } - first = false; - if (!first) sb.append(", "); - sb.append("nulls:"); - if (this.nulls == null) { - sb.append("null"); - } else { - org.apache.thrift.TBaseHelper.toString(this.nulls, sb); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetValues()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'values' is unset! Struct:" + toString()); - } - - if (!isSetNulls()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'nulls' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TByteColumnStandardSchemeFactory implements SchemeFactory { - public TByteColumnStandardScheme getScheme() { - return new TByteColumnStandardScheme(); - } - } - - private static class TByteColumnStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TByteColumn struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // VALUES - if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { - { - org.apache.thrift.protocol.TList _list62 = iprot.readListBegin(); - struct.values = new ArrayList(_list62.size); - byte _elem63; - for (int _i64 = 0; _i64 < _list62.size; ++_i64) - { - _elem63 = iprot.readByte(); - struct.values.add(_elem63); - } - iprot.readListEnd(); - } - struct.setValuesIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // NULLS - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.nulls = iprot.readBinary(); - struct.setNullsIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TByteColumn struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.values != null) { - oprot.writeFieldBegin(VALUES_FIELD_DESC); - { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.BYTE, struct.values.size())); - for (byte _iter65 : struct.values) - { - oprot.writeByte(_iter65); - } - oprot.writeListEnd(); - } - oprot.writeFieldEnd(); - } - if (struct.nulls != null) { - oprot.writeFieldBegin(NULLS_FIELD_DESC); - oprot.writeBinary(struct.nulls); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TByteColumnTupleSchemeFactory implements SchemeFactory { - public TByteColumnTupleScheme getScheme() { - return new TByteColumnTupleScheme(); - } - } - - private static class TByteColumnTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TByteColumn struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - { - oprot.writeI32(struct.values.size()); - for (byte _iter66 : struct.values) - { - oprot.writeByte(_iter66); - } - } - oprot.writeBinary(struct.nulls); - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TByteColumn struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - { - org.apache.thrift.protocol.TList _list67 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.BYTE, iprot.readI32()); - struct.values = new ArrayList(_list67.size); - byte _elem68; - for (int _i69 = 0; _i69 < _list67.size; ++_i69) - { - _elem68 = iprot.readByte(); - struct.values.add(_elem68); - } - } - struct.setValuesIsSet(true); - struct.nulls = iprot.readBinary(); - struct.setNullsIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TByteValue.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TByteValue.java deleted file mode 100644 index a3d5951335fa7..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TByteValue.java +++ /dev/null @@ -1,390 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TByteValue implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TByteValue"); - - private static final org.apache.thrift.protocol.TField VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("value", org.apache.thrift.protocol.TType.BYTE, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TByteValueStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TByteValueTupleSchemeFactory()); - } - - private byte value; // optional - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - VALUE((short)1, "value"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // VALUE - return VALUE; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private static final int __VALUE_ISSET_ID = 0; - private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.VALUE}; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.VALUE, new org.apache.thrift.meta_data.FieldMetaData("value", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BYTE))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TByteValue.class, metaDataMap); - } - - public TByteValue() { - } - - /** - * Performs a deep copy on other. - */ - public TByteValue(TByteValue other) { - __isset_bitfield = other.__isset_bitfield; - this.value = other.value; - } - - public TByteValue deepCopy() { - return new TByteValue(this); - } - - @Override - public void clear() { - setValueIsSet(false); - this.value = 0; - } - - public byte getValue() { - return this.value; - } - - public void setValue(byte value) { - this.value = value; - setValueIsSet(true); - } - - public void unsetValue() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __VALUE_ISSET_ID); - } - - /** Returns true if field value is set (has been assigned a value) and false otherwise */ - public boolean isSetValue() { - return EncodingUtils.testBit(__isset_bitfield, __VALUE_ISSET_ID); - } - - public void setValueIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __VALUE_ISSET_ID, value); - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case VALUE: - if (value == null) { - unsetValue(); - } else { - setValue((Byte)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case VALUE: - return getValue(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case VALUE: - return isSetValue(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TByteValue) - return this.equals((TByteValue)that); - return false; - } - - public boolean equals(TByteValue that) { - if (that == null) - return false; - - boolean this_present_value = true && this.isSetValue(); - boolean that_present_value = true && that.isSetValue(); - if (this_present_value || that_present_value) { - if (!(this_present_value && that_present_value)) - return false; - if (this.value != that.value) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_value = true && (isSetValue()); - list.add(present_value); - if (present_value) - list.add(value); - - return list.hashCode(); - } - - @Override - public int compareTo(TByteValue other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetValue()).compareTo(other.isSetValue()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetValue()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.value, other.value); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TByteValue("); - boolean first = true; - - if (isSetValue()) { - sb.append("value:"); - sb.append(this.value); - first = false; - } - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. - __isset_bitfield = 0; - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TByteValueStandardSchemeFactory implements SchemeFactory { - public TByteValueStandardScheme getScheme() { - return new TByteValueStandardScheme(); - } - } - - private static class TByteValueStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TByteValue struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // VALUE - if (schemeField.type == org.apache.thrift.protocol.TType.BYTE) { - struct.value = iprot.readByte(); - struct.setValueIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TByteValue struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.isSetValue()) { - oprot.writeFieldBegin(VALUE_FIELD_DESC); - oprot.writeByte(struct.value); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TByteValueTupleSchemeFactory implements SchemeFactory { - public TByteValueTupleScheme getScheme() { - return new TByteValueTupleScheme(); - } - } - - private static class TByteValueTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TByteValue struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetValue()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetValue()) { - oprot.writeByte(struct.value); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TByteValue struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.value = iprot.readByte(); - struct.setValueIsSet(true); - } - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TCLIService.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TCLIService.java deleted file mode 100644 index 6584c24a0142a..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TCLIService.java +++ /dev/null @@ -1,18138 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TCLIService { - - public interface Iface { - - public TOpenSessionResp OpenSession(TOpenSessionReq req) throws org.apache.thrift.TException; - - public TCloseSessionResp CloseSession(TCloseSessionReq req) throws org.apache.thrift.TException; - - public TGetInfoResp GetInfo(TGetInfoReq req) throws org.apache.thrift.TException; - - public TExecuteStatementResp ExecuteStatement(TExecuteStatementReq req) throws org.apache.thrift.TException; - - public TGetTypeInfoResp GetTypeInfo(TGetTypeInfoReq req) throws org.apache.thrift.TException; - - public TGetCatalogsResp GetCatalogs(TGetCatalogsReq req) throws org.apache.thrift.TException; - - public TGetSchemasResp GetSchemas(TGetSchemasReq req) throws org.apache.thrift.TException; - - public TGetTablesResp GetTables(TGetTablesReq req) throws org.apache.thrift.TException; - - public TGetTableTypesResp GetTableTypes(TGetTableTypesReq req) throws org.apache.thrift.TException; - - public TGetColumnsResp GetColumns(TGetColumnsReq req) throws org.apache.thrift.TException; - - public TGetFunctionsResp GetFunctions(TGetFunctionsReq req) throws org.apache.thrift.TException; - - public TGetPrimaryKeysResp GetPrimaryKeys(TGetPrimaryKeysReq req) throws org.apache.thrift.TException; - - public TGetCrossReferenceResp GetCrossReference(TGetCrossReferenceReq req) throws org.apache.thrift.TException; - - public TGetOperationStatusResp GetOperationStatus(TGetOperationStatusReq req) throws org.apache.thrift.TException; - - public TCancelOperationResp CancelOperation(TCancelOperationReq req) throws org.apache.thrift.TException; - - public TCloseOperationResp CloseOperation(TCloseOperationReq req) throws org.apache.thrift.TException; - - public TGetResultSetMetadataResp GetResultSetMetadata(TGetResultSetMetadataReq req) throws org.apache.thrift.TException; - - public TFetchResultsResp FetchResults(TFetchResultsReq req) throws org.apache.thrift.TException; - - public TGetDelegationTokenResp GetDelegationToken(TGetDelegationTokenReq req) throws org.apache.thrift.TException; - - public TCancelDelegationTokenResp CancelDelegationToken(TCancelDelegationTokenReq req) throws org.apache.thrift.TException; - - public TRenewDelegationTokenResp RenewDelegationToken(TRenewDelegationTokenReq req) throws org.apache.thrift.TException; - - } - - public interface AsyncIface { - - public void OpenSession(TOpenSessionReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - - public void CloseSession(TCloseSessionReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - - public void GetInfo(TGetInfoReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - - public void ExecuteStatement(TExecuteStatementReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - - public void GetTypeInfo(TGetTypeInfoReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - - public void GetCatalogs(TGetCatalogsReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - - public void GetSchemas(TGetSchemasReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - - public void GetTables(TGetTablesReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - - public void GetTableTypes(TGetTableTypesReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - - public void GetColumns(TGetColumnsReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - - public void GetFunctions(TGetFunctionsReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - - public void GetPrimaryKeys(TGetPrimaryKeysReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - - public void GetCrossReference(TGetCrossReferenceReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - - public void GetOperationStatus(TGetOperationStatusReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - - public void CancelOperation(TCancelOperationReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - - public void CloseOperation(TCloseOperationReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - - public void GetResultSetMetadata(TGetResultSetMetadataReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - - public void FetchResults(TFetchResultsReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - - public void GetDelegationToken(TGetDelegationTokenReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - - public void CancelDelegationToken(TCancelDelegationTokenReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - - public void RenewDelegationToken(TRenewDelegationTokenReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; - - } - - public static class Client extends org.apache.thrift.TServiceClient implements Iface { - public static class Factory implements org.apache.thrift.TServiceClientFactory { - public Factory() {} - public Client getClient(org.apache.thrift.protocol.TProtocol prot) { - return new Client(prot); - } - public Client getClient(org.apache.thrift.protocol.TProtocol iprot, org.apache.thrift.protocol.TProtocol oprot) { - return new Client(iprot, oprot); - } - } - - public Client(org.apache.thrift.protocol.TProtocol prot) - { - super(prot, prot); - } - - public Client(org.apache.thrift.protocol.TProtocol iprot, org.apache.thrift.protocol.TProtocol oprot) { - super(iprot, oprot); - } - - public TOpenSessionResp OpenSession(TOpenSessionReq req) throws org.apache.thrift.TException - { - send_OpenSession(req); - return recv_OpenSession(); - } - - public void send_OpenSession(TOpenSessionReq req) throws org.apache.thrift.TException - { - OpenSession_args args = new OpenSession_args(); - args.setReq(req); - sendBase("OpenSession", args); - } - - public TOpenSessionResp recv_OpenSession() throws org.apache.thrift.TException - { - OpenSession_result result = new OpenSession_result(); - receiveBase(result, "OpenSession"); - if (result.isSetSuccess()) { - return result.success; - } - throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "OpenSession failed: unknown result"); - } - - public TCloseSessionResp CloseSession(TCloseSessionReq req) throws org.apache.thrift.TException - { - send_CloseSession(req); - return recv_CloseSession(); - } - - public void send_CloseSession(TCloseSessionReq req) throws org.apache.thrift.TException - { - CloseSession_args args = new CloseSession_args(); - args.setReq(req); - sendBase("CloseSession", args); - } - - public TCloseSessionResp recv_CloseSession() throws org.apache.thrift.TException - { - CloseSession_result result = new CloseSession_result(); - receiveBase(result, "CloseSession"); - if (result.isSetSuccess()) { - return result.success; - } - throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "CloseSession failed: unknown result"); - } - - public TGetInfoResp GetInfo(TGetInfoReq req) throws org.apache.thrift.TException - { - send_GetInfo(req); - return recv_GetInfo(); - } - - public void send_GetInfo(TGetInfoReq req) throws org.apache.thrift.TException - { - GetInfo_args args = new GetInfo_args(); - args.setReq(req); - sendBase("GetInfo", args); - } - - public TGetInfoResp recv_GetInfo() throws org.apache.thrift.TException - { - GetInfo_result result = new GetInfo_result(); - receiveBase(result, "GetInfo"); - if (result.isSetSuccess()) { - return result.success; - } - throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "GetInfo failed: unknown result"); - } - - public TExecuteStatementResp ExecuteStatement(TExecuteStatementReq req) throws org.apache.thrift.TException - { - send_ExecuteStatement(req); - return recv_ExecuteStatement(); - } - - public void send_ExecuteStatement(TExecuteStatementReq req) throws org.apache.thrift.TException - { - ExecuteStatement_args args = new ExecuteStatement_args(); - args.setReq(req); - sendBase("ExecuteStatement", args); - } - - public TExecuteStatementResp recv_ExecuteStatement() throws org.apache.thrift.TException - { - ExecuteStatement_result result = new ExecuteStatement_result(); - receiveBase(result, "ExecuteStatement"); - if (result.isSetSuccess()) { - return result.success; - } - throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "ExecuteStatement failed: unknown result"); - } - - public TGetTypeInfoResp GetTypeInfo(TGetTypeInfoReq req) throws org.apache.thrift.TException - { - send_GetTypeInfo(req); - return recv_GetTypeInfo(); - } - - public void send_GetTypeInfo(TGetTypeInfoReq req) throws org.apache.thrift.TException - { - GetTypeInfo_args args = new GetTypeInfo_args(); - args.setReq(req); - sendBase("GetTypeInfo", args); - } - - public TGetTypeInfoResp recv_GetTypeInfo() throws org.apache.thrift.TException - { - GetTypeInfo_result result = new GetTypeInfo_result(); - receiveBase(result, "GetTypeInfo"); - if (result.isSetSuccess()) { - return result.success; - } - throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "GetTypeInfo failed: unknown result"); - } - - public TGetCatalogsResp GetCatalogs(TGetCatalogsReq req) throws org.apache.thrift.TException - { - send_GetCatalogs(req); - return recv_GetCatalogs(); - } - - public void send_GetCatalogs(TGetCatalogsReq req) throws org.apache.thrift.TException - { - GetCatalogs_args args = new GetCatalogs_args(); - args.setReq(req); - sendBase("GetCatalogs", args); - } - - public TGetCatalogsResp recv_GetCatalogs() throws org.apache.thrift.TException - { - GetCatalogs_result result = new GetCatalogs_result(); - receiveBase(result, "GetCatalogs"); - if (result.isSetSuccess()) { - return result.success; - } - throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "GetCatalogs failed: unknown result"); - } - - public TGetSchemasResp GetSchemas(TGetSchemasReq req) throws org.apache.thrift.TException - { - send_GetSchemas(req); - return recv_GetSchemas(); - } - - public void send_GetSchemas(TGetSchemasReq req) throws org.apache.thrift.TException - { - GetSchemas_args args = new GetSchemas_args(); - args.setReq(req); - sendBase("GetSchemas", args); - } - - public TGetSchemasResp recv_GetSchemas() throws org.apache.thrift.TException - { - GetSchemas_result result = new GetSchemas_result(); - receiveBase(result, "GetSchemas"); - if (result.isSetSuccess()) { - return result.success; - } - throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "GetSchemas failed: unknown result"); - } - - public TGetTablesResp GetTables(TGetTablesReq req) throws org.apache.thrift.TException - { - send_GetTables(req); - return recv_GetTables(); - } - - public void send_GetTables(TGetTablesReq req) throws org.apache.thrift.TException - { - GetTables_args args = new GetTables_args(); - args.setReq(req); - sendBase("GetTables", args); - } - - public TGetTablesResp recv_GetTables() throws org.apache.thrift.TException - { - GetTables_result result = new GetTables_result(); - receiveBase(result, "GetTables"); - if (result.isSetSuccess()) { - return result.success; - } - throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "GetTables failed: unknown result"); - } - - public TGetTableTypesResp GetTableTypes(TGetTableTypesReq req) throws org.apache.thrift.TException - { - send_GetTableTypes(req); - return recv_GetTableTypes(); - } - - public void send_GetTableTypes(TGetTableTypesReq req) throws org.apache.thrift.TException - { - GetTableTypes_args args = new GetTableTypes_args(); - args.setReq(req); - sendBase("GetTableTypes", args); - } - - public TGetTableTypesResp recv_GetTableTypes() throws org.apache.thrift.TException - { - GetTableTypes_result result = new GetTableTypes_result(); - receiveBase(result, "GetTableTypes"); - if (result.isSetSuccess()) { - return result.success; - } - throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "GetTableTypes failed: unknown result"); - } - - public TGetColumnsResp GetColumns(TGetColumnsReq req) throws org.apache.thrift.TException - { - send_GetColumns(req); - return recv_GetColumns(); - } - - public void send_GetColumns(TGetColumnsReq req) throws org.apache.thrift.TException - { - GetColumns_args args = new GetColumns_args(); - args.setReq(req); - sendBase("GetColumns", args); - } - - public TGetColumnsResp recv_GetColumns() throws org.apache.thrift.TException - { - GetColumns_result result = new GetColumns_result(); - receiveBase(result, "GetColumns"); - if (result.isSetSuccess()) { - return result.success; - } - throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "GetColumns failed: unknown result"); - } - - public TGetFunctionsResp GetFunctions(TGetFunctionsReq req) throws org.apache.thrift.TException - { - send_GetFunctions(req); - return recv_GetFunctions(); - } - - public void send_GetFunctions(TGetFunctionsReq req) throws org.apache.thrift.TException - { - GetFunctions_args args = new GetFunctions_args(); - args.setReq(req); - sendBase("GetFunctions", args); - } - - public TGetFunctionsResp recv_GetFunctions() throws org.apache.thrift.TException - { - GetFunctions_result result = new GetFunctions_result(); - receiveBase(result, "GetFunctions"); - if (result.isSetSuccess()) { - return result.success; - } - throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "GetFunctions failed: unknown result"); - } - - public TGetPrimaryKeysResp GetPrimaryKeys(TGetPrimaryKeysReq req) throws org.apache.thrift.TException - { - send_GetPrimaryKeys(req); - return recv_GetPrimaryKeys(); - } - - public void send_GetPrimaryKeys(TGetPrimaryKeysReq req) throws org.apache.thrift.TException - { - GetPrimaryKeys_args args = new GetPrimaryKeys_args(); - args.setReq(req); - sendBase("GetPrimaryKeys", args); - } - - public TGetPrimaryKeysResp recv_GetPrimaryKeys() throws org.apache.thrift.TException - { - GetPrimaryKeys_result result = new GetPrimaryKeys_result(); - receiveBase(result, "GetPrimaryKeys"); - if (result.isSetSuccess()) { - return result.success; - } - throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "GetPrimaryKeys failed: unknown result"); - } - - public TGetCrossReferenceResp GetCrossReference(TGetCrossReferenceReq req) throws org.apache.thrift.TException - { - send_GetCrossReference(req); - return recv_GetCrossReference(); - } - - public void send_GetCrossReference(TGetCrossReferenceReq req) throws org.apache.thrift.TException - { - GetCrossReference_args args = new GetCrossReference_args(); - args.setReq(req); - sendBase("GetCrossReference", args); - } - - public TGetCrossReferenceResp recv_GetCrossReference() throws org.apache.thrift.TException - { - GetCrossReference_result result = new GetCrossReference_result(); - receiveBase(result, "GetCrossReference"); - if (result.isSetSuccess()) { - return result.success; - } - throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "GetCrossReference failed: unknown result"); - } - - public TGetOperationStatusResp GetOperationStatus(TGetOperationStatusReq req) throws org.apache.thrift.TException - { - send_GetOperationStatus(req); - return recv_GetOperationStatus(); - } - - public void send_GetOperationStatus(TGetOperationStatusReq req) throws org.apache.thrift.TException - { - GetOperationStatus_args args = new GetOperationStatus_args(); - args.setReq(req); - sendBase("GetOperationStatus", args); - } - - public TGetOperationStatusResp recv_GetOperationStatus() throws org.apache.thrift.TException - { - GetOperationStatus_result result = new GetOperationStatus_result(); - receiveBase(result, "GetOperationStatus"); - if (result.isSetSuccess()) { - return result.success; - } - throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "GetOperationStatus failed: unknown result"); - } - - public TCancelOperationResp CancelOperation(TCancelOperationReq req) throws org.apache.thrift.TException - { - send_CancelOperation(req); - return recv_CancelOperation(); - } - - public void send_CancelOperation(TCancelOperationReq req) throws org.apache.thrift.TException - { - CancelOperation_args args = new CancelOperation_args(); - args.setReq(req); - sendBase("CancelOperation", args); - } - - public TCancelOperationResp recv_CancelOperation() throws org.apache.thrift.TException - { - CancelOperation_result result = new CancelOperation_result(); - receiveBase(result, "CancelOperation"); - if (result.isSetSuccess()) { - return result.success; - } - throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "CancelOperation failed: unknown result"); - } - - public TCloseOperationResp CloseOperation(TCloseOperationReq req) throws org.apache.thrift.TException - { - send_CloseOperation(req); - return recv_CloseOperation(); - } - - public void send_CloseOperation(TCloseOperationReq req) throws org.apache.thrift.TException - { - CloseOperation_args args = new CloseOperation_args(); - args.setReq(req); - sendBase("CloseOperation", args); - } - - public TCloseOperationResp recv_CloseOperation() throws org.apache.thrift.TException - { - CloseOperation_result result = new CloseOperation_result(); - receiveBase(result, "CloseOperation"); - if (result.isSetSuccess()) { - return result.success; - } - throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "CloseOperation failed: unknown result"); - } - - public TGetResultSetMetadataResp GetResultSetMetadata(TGetResultSetMetadataReq req) throws org.apache.thrift.TException - { - send_GetResultSetMetadata(req); - return recv_GetResultSetMetadata(); - } - - public void send_GetResultSetMetadata(TGetResultSetMetadataReq req) throws org.apache.thrift.TException - { - GetResultSetMetadata_args args = new GetResultSetMetadata_args(); - args.setReq(req); - sendBase("GetResultSetMetadata", args); - } - - public TGetResultSetMetadataResp recv_GetResultSetMetadata() throws org.apache.thrift.TException - { - GetResultSetMetadata_result result = new GetResultSetMetadata_result(); - receiveBase(result, "GetResultSetMetadata"); - if (result.isSetSuccess()) { - return result.success; - } - throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "GetResultSetMetadata failed: unknown result"); - } - - public TFetchResultsResp FetchResults(TFetchResultsReq req) throws org.apache.thrift.TException - { - send_FetchResults(req); - return recv_FetchResults(); - } - - public void send_FetchResults(TFetchResultsReq req) throws org.apache.thrift.TException - { - FetchResults_args args = new FetchResults_args(); - args.setReq(req); - sendBase("FetchResults", args); - } - - public TFetchResultsResp recv_FetchResults() throws org.apache.thrift.TException - { - FetchResults_result result = new FetchResults_result(); - receiveBase(result, "FetchResults"); - if (result.isSetSuccess()) { - return result.success; - } - throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "FetchResults failed: unknown result"); - } - - public TGetDelegationTokenResp GetDelegationToken(TGetDelegationTokenReq req) throws org.apache.thrift.TException - { - send_GetDelegationToken(req); - return recv_GetDelegationToken(); - } - - public void send_GetDelegationToken(TGetDelegationTokenReq req) throws org.apache.thrift.TException - { - GetDelegationToken_args args = new GetDelegationToken_args(); - args.setReq(req); - sendBase("GetDelegationToken", args); - } - - public TGetDelegationTokenResp recv_GetDelegationToken() throws org.apache.thrift.TException - { - GetDelegationToken_result result = new GetDelegationToken_result(); - receiveBase(result, "GetDelegationToken"); - if (result.isSetSuccess()) { - return result.success; - } - throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "GetDelegationToken failed: unknown result"); - } - - public TCancelDelegationTokenResp CancelDelegationToken(TCancelDelegationTokenReq req) throws org.apache.thrift.TException - { - send_CancelDelegationToken(req); - return recv_CancelDelegationToken(); - } - - public void send_CancelDelegationToken(TCancelDelegationTokenReq req) throws org.apache.thrift.TException - { - CancelDelegationToken_args args = new CancelDelegationToken_args(); - args.setReq(req); - sendBase("CancelDelegationToken", args); - } - - public TCancelDelegationTokenResp recv_CancelDelegationToken() throws org.apache.thrift.TException - { - CancelDelegationToken_result result = new CancelDelegationToken_result(); - receiveBase(result, "CancelDelegationToken"); - if (result.isSetSuccess()) { - return result.success; - } - throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "CancelDelegationToken failed: unknown result"); - } - - public TRenewDelegationTokenResp RenewDelegationToken(TRenewDelegationTokenReq req) throws org.apache.thrift.TException - { - send_RenewDelegationToken(req); - return recv_RenewDelegationToken(); - } - - public void send_RenewDelegationToken(TRenewDelegationTokenReq req) throws org.apache.thrift.TException - { - RenewDelegationToken_args args = new RenewDelegationToken_args(); - args.setReq(req); - sendBase("RenewDelegationToken", args); - } - - public TRenewDelegationTokenResp recv_RenewDelegationToken() throws org.apache.thrift.TException - { - RenewDelegationToken_result result = new RenewDelegationToken_result(); - receiveBase(result, "RenewDelegationToken"); - if (result.isSetSuccess()) { - return result.success; - } - throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "RenewDelegationToken failed: unknown result"); - } - - } - public static class AsyncClient extends org.apache.thrift.async.TAsyncClient implements AsyncIface { - public static class Factory implements org.apache.thrift.async.TAsyncClientFactory { - private org.apache.thrift.async.TAsyncClientManager clientManager; - private org.apache.thrift.protocol.TProtocolFactory protocolFactory; - public Factory(org.apache.thrift.async.TAsyncClientManager clientManager, org.apache.thrift.protocol.TProtocolFactory protocolFactory) { - this.clientManager = clientManager; - this.protocolFactory = protocolFactory; - } - public AsyncClient getAsyncClient(org.apache.thrift.transport.TNonblockingTransport transport) { - return new AsyncClient(protocolFactory, clientManager, transport); - } - } - - public AsyncClient(org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.async.TAsyncClientManager clientManager, org.apache.thrift.transport.TNonblockingTransport transport) { - super(protocolFactory, clientManager, transport); - } - - public void OpenSession(TOpenSessionReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { - checkReady(); - OpenSession_call method_call = new OpenSession_call(req, resultHandler, this, ___protocolFactory, ___transport); - this.___currentMethod = method_call; - ___manager.call(method_call); - } - - public static class OpenSession_call extends org.apache.thrift.async.TAsyncMethodCall { - private TOpenSessionReq req; - public OpenSession_call(TOpenSessionReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { - super(client, protocolFactory, transport, resultHandler, false); - this.req = req; - } - - public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { - prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("OpenSession", org.apache.thrift.protocol.TMessageType.CALL, 0)); - OpenSession_args args = new OpenSession_args(); - args.setReq(req); - args.write(prot); - prot.writeMessageEnd(); - } - - public TOpenSessionResp getResult() throws org.apache.thrift.TException { - if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { - throw new IllegalStateException("Method call not finished!"); - } - org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); - org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); - return (new Client(prot)).recv_OpenSession(); - } - } - - public void CloseSession(TCloseSessionReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { - checkReady(); - CloseSession_call method_call = new CloseSession_call(req, resultHandler, this, ___protocolFactory, ___transport); - this.___currentMethod = method_call; - ___manager.call(method_call); - } - - public static class CloseSession_call extends org.apache.thrift.async.TAsyncMethodCall { - private TCloseSessionReq req; - public CloseSession_call(TCloseSessionReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { - super(client, protocolFactory, transport, resultHandler, false); - this.req = req; - } - - public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { - prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("CloseSession", org.apache.thrift.protocol.TMessageType.CALL, 0)); - CloseSession_args args = new CloseSession_args(); - args.setReq(req); - args.write(prot); - prot.writeMessageEnd(); - } - - public TCloseSessionResp getResult() throws org.apache.thrift.TException { - if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { - throw new IllegalStateException("Method call not finished!"); - } - org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); - org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); - return (new Client(prot)).recv_CloseSession(); - } - } - - public void GetInfo(TGetInfoReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { - checkReady(); - GetInfo_call method_call = new GetInfo_call(req, resultHandler, this, ___protocolFactory, ___transport); - this.___currentMethod = method_call; - ___manager.call(method_call); - } - - public static class GetInfo_call extends org.apache.thrift.async.TAsyncMethodCall { - private TGetInfoReq req; - public GetInfo_call(TGetInfoReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { - super(client, protocolFactory, transport, resultHandler, false); - this.req = req; - } - - public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { - prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("GetInfo", org.apache.thrift.protocol.TMessageType.CALL, 0)); - GetInfo_args args = new GetInfo_args(); - args.setReq(req); - args.write(prot); - prot.writeMessageEnd(); - } - - public TGetInfoResp getResult() throws org.apache.thrift.TException { - if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { - throw new IllegalStateException("Method call not finished!"); - } - org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); - org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); - return (new Client(prot)).recv_GetInfo(); - } - } - - public void ExecuteStatement(TExecuteStatementReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { - checkReady(); - ExecuteStatement_call method_call = new ExecuteStatement_call(req, resultHandler, this, ___protocolFactory, ___transport); - this.___currentMethod = method_call; - ___manager.call(method_call); - } - - public static class ExecuteStatement_call extends org.apache.thrift.async.TAsyncMethodCall { - private TExecuteStatementReq req; - public ExecuteStatement_call(TExecuteStatementReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { - super(client, protocolFactory, transport, resultHandler, false); - this.req = req; - } - - public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { - prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("ExecuteStatement", org.apache.thrift.protocol.TMessageType.CALL, 0)); - ExecuteStatement_args args = new ExecuteStatement_args(); - args.setReq(req); - args.write(prot); - prot.writeMessageEnd(); - } - - public TExecuteStatementResp getResult() throws org.apache.thrift.TException { - if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { - throw new IllegalStateException("Method call not finished!"); - } - org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); - org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); - return (new Client(prot)).recv_ExecuteStatement(); - } - } - - public void GetTypeInfo(TGetTypeInfoReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { - checkReady(); - GetTypeInfo_call method_call = new GetTypeInfo_call(req, resultHandler, this, ___protocolFactory, ___transport); - this.___currentMethod = method_call; - ___manager.call(method_call); - } - - public static class GetTypeInfo_call extends org.apache.thrift.async.TAsyncMethodCall { - private TGetTypeInfoReq req; - public GetTypeInfo_call(TGetTypeInfoReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { - super(client, protocolFactory, transport, resultHandler, false); - this.req = req; - } - - public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { - prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("GetTypeInfo", org.apache.thrift.protocol.TMessageType.CALL, 0)); - GetTypeInfo_args args = new GetTypeInfo_args(); - args.setReq(req); - args.write(prot); - prot.writeMessageEnd(); - } - - public TGetTypeInfoResp getResult() throws org.apache.thrift.TException { - if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { - throw new IllegalStateException("Method call not finished!"); - } - org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); - org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); - return (new Client(prot)).recv_GetTypeInfo(); - } - } - - public void GetCatalogs(TGetCatalogsReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { - checkReady(); - GetCatalogs_call method_call = new GetCatalogs_call(req, resultHandler, this, ___protocolFactory, ___transport); - this.___currentMethod = method_call; - ___manager.call(method_call); - } - - public static class GetCatalogs_call extends org.apache.thrift.async.TAsyncMethodCall { - private TGetCatalogsReq req; - public GetCatalogs_call(TGetCatalogsReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { - super(client, protocolFactory, transport, resultHandler, false); - this.req = req; - } - - public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { - prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("GetCatalogs", org.apache.thrift.protocol.TMessageType.CALL, 0)); - GetCatalogs_args args = new GetCatalogs_args(); - args.setReq(req); - args.write(prot); - prot.writeMessageEnd(); - } - - public TGetCatalogsResp getResult() throws org.apache.thrift.TException { - if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { - throw new IllegalStateException("Method call not finished!"); - } - org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); - org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); - return (new Client(prot)).recv_GetCatalogs(); - } - } - - public void GetSchemas(TGetSchemasReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { - checkReady(); - GetSchemas_call method_call = new GetSchemas_call(req, resultHandler, this, ___protocolFactory, ___transport); - this.___currentMethod = method_call; - ___manager.call(method_call); - } - - public static class GetSchemas_call extends org.apache.thrift.async.TAsyncMethodCall { - private TGetSchemasReq req; - public GetSchemas_call(TGetSchemasReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { - super(client, protocolFactory, transport, resultHandler, false); - this.req = req; - } - - public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { - prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("GetSchemas", org.apache.thrift.protocol.TMessageType.CALL, 0)); - GetSchemas_args args = new GetSchemas_args(); - args.setReq(req); - args.write(prot); - prot.writeMessageEnd(); - } - - public TGetSchemasResp getResult() throws org.apache.thrift.TException { - if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { - throw new IllegalStateException("Method call not finished!"); - } - org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); - org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); - return (new Client(prot)).recv_GetSchemas(); - } - } - - public void GetTables(TGetTablesReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { - checkReady(); - GetTables_call method_call = new GetTables_call(req, resultHandler, this, ___protocolFactory, ___transport); - this.___currentMethod = method_call; - ___manager.call(method_call); - } - - public static class GetTables_call extends org.apache.thrift.async.TAsyncMethodCall { - private TGetTablesReq req; - public GetTables_call(TGetTablesReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { - super(client, protocolFactory, transport, resultHandler, false); - this.req = req; - } - - public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { - prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("GetTables", org.apache.thrift.protocol.TMessageType.CALL, 0)); - GetTables_args args = new GetTables_args(); - args.setReq(req); - args.write(prot); - prot.writeMessageEnd(); - } - - public TGetTablesResp getResult() throws org.apache.thrift.TException { - if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { - throw new IllegalStateException("Method call not finished!"); - } - org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); - org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); - return (new Client(prot)).recv_GetTables(); - } - } - - public void GetTableTypes(TGetTableTypesReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { - checkReady(); - GetTableTypes_call method_call = new GetTableTypes_call(req, resultHandler, this, ___protocolFactory, ___transport); - this.___currentMethod = method_call; - ___manager.call(method_call); - } - - public static class GetTableTypes_call extends org.apache.thrift.async.TAsyncMethodCall { - private TGetTableTypesReq req; - public GetTableTypes_call(TGetTableTypesReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { - super(client, protocolFactory, transport, resultHandler, false); - this.req = req; - } - - public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { - prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("GetTableTypes", org.apache.thrift.protocol.TMessageType.CALL, 0)); - GetTableTypes_args args = new GetTableTypes_args(); - args.setReq(req); - args.write(prot); - prot.writeMessageEnd(); - } - - public TGetTableTypesResp getResult() throws org.apache.thrift.TException { - if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { - throw new IllegalStateException("Method call not finished!"); - } - org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); - org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); - return (new Client(prot)).recv_GetTableTypes(); - } - } - - public void GetColumns(TGetColumnsReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { - checkReady(); - GetColumns_call method_call = new GetColumns_call(req, resultHandler, this, ___protocolFactory, ___transport); - this.___currentMethod = method_call; - ___manager.call(method_call); - } - - public static class GetColumns_call extends org.apache.thrift.async.TAsyncMethodCall { - private TGetColumnsReq req; - public GetColumns_call(TGetColumnsReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { - super(client, protocolFactory, transport, resultHandler, false); - this.req = req; - } - - public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { - prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("GetColumns", org.apache.thrift.protocol.TMessageType.CALL, 0)); - GetColumns_args args = new GetColumns_args(); - args.setReq(req); - args.write(prot); - prot.writeMessageEnd(); - } - - public TGetColumnsResp getResult() throws org.apache.thrift.TException { - if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { - throw new IllegalStateException("Method call not finished!"); - } - org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); - org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); - return (new Client(prot)).recv_GetColumns(); - } - } - - public void GetFunctions(TGetFunctionsReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { - checkReady(); - GetFunctions_call method_call = new GetFunctions_call(req, resultHandler, this, ___protocolFactory, ___transport); - this.___currentMethod = method_call; - ___manager.call(method_call); - } - - public static class GetFunctions_call extends org.apache.thrift.async.TAsyncMethodCall { - private TGetFunctionsReq req; - public GetFunctions_call(TGetFunctionsReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { - super(client, protocolFactory, transport, resultHandler, false); - this.req = req; - } - - public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { - prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("GetFunctions", org.apache.thrift.protocol.TMessageType.CALL, 0)); - GetFunctions_args args = new GetFunctions_args(); - args.setReq(req); - args.write(prot); - prot.writeMessageEnd(); - } - - public TGetFunctionsResp getResult() throws org.apache.thrift.TException { - if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { - throw new IllegalStateException("Method call not finished!"); - } - org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); - org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); - return (new Client(prot)).recv_GetFunctions(); - } - } - - public void GetPrimaryKeys(TGetPrimaryKeysReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { - checkReady(); - GetPrimaryKeys_call method_call = new GetPrimaryKeys_call(req, resultHandler, this, ___protocolFactory, ___transport); - this.___currentMethod = method_call; - ___manager.call(method_call); - } - - public static class GetPrimaryKeys_call extends org.apache.thrift.async.TAsyncMethodCall { - private TGetPrimaryKeysReq req; - public GetPrimaryKeys_call(TGetPrimaryKeysReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { - super(client, protocolFactory, transport, resultHandler, false); - this.req = req; - } - - public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { - prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("GetPrimaryKeys", org.apache.thrift.protocol.TMessageType.CALL, 0)); - GetPrimaryKeys_args args = new GetPrimaryKeys_args(); - args.setReq(req); - args.write(prot); - prot.writeMessageEnd(); - } - - public TGetPrimaryKeysResp getResult() throws org.apache.thrift.TException { - if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { - throw new IllegalStateException("Method call not finished!"); - } - org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); - org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); - return (new Client(prot)).recv_GetPrimaryKeys(); - } - } - - public void GetCrossReference(TGetCrossReferenceReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { - checkReady(); - GetCrossReference_call method_call = new GetCrossReference_call(req, resultHandler, this, ___protocolFactory, ___transport); - this.___currentMethod = method_call; - ___manager.call(method_call); - } - - public static class GetCrossReference_call extends org.apache.thrift.async.TAsyncMethodCall { - private TGetCrossReferenceReq req; - public GetCrossReference_call(TGetCrossReferenceReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { - super(client, protocolFactory, transport, resultHandler, false); - this.req = req; - } - - public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { - prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("GetCrossReference", org.apache.thrift.protocol.TMessageType.CALL, 0)); - GetCrossReference_args args = new GetCrossReference_args(); - args.setReq(req); - args.write(prot); - prot.writeMessageEnd(); - } - - public TGetCrossReferenceResp getResult() throws org.apache.thrift.TException { - if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { - throw new IllegalStateException("Method call not finished!"); - } - org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); - org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); - return (new Client(prot)).recv_GetCrossReference(); - } - } - - public void GetOperationStatus(TGetOperationStatusReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { - checkReady(); - GetOperationStatus_call method_call = new GetOperationStatus_call(req, resultHandler, this, ___protocolFactory, ___transport); - this.___currentMethod = method_call; - ___manager.call(method_call); - } - - public static class GetOperationStatus_call extends org.apache.thrift.async.TAsyncMethodCall { - private TGetOperationStatusReq req; - public GetOperationStatus_call(TGetOperationStatusReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { - super(client, protocolFactory, transport, resultHandler, false); - this.req = req; - } - - public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { - prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("GetOperationStatus", org.apache.thrift.protocol.TMessageType.CALL, 0)); - GetOperationStatus_args args = new GetOperationStatus_args(); - args.setReq(req); - args.write(prot); - prot.writeMessageEnd(); - } - - public TGetOperationStatusResp getResult() throws org.apache.thrift.TException { - if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { - throw new IllegalStateException("Method call not finished!"); - } - org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); - org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); - return (new Client(prot)).recv_GetOperationStatus(); - } - } - - public void CancelOperation(TCancelOperationReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { - checkReady(); - CancelOperation_call method_call = new CancelOperation_call(req, resultHandler, this, ___protocolFactory, ___transport); - this.___currentMethod = method_call; - ___manager.call(method_call); - } - - public static class CancelOperation_call extends org.apache.thrift.async.TAsyncMethodCall { - private TCancelOperationReq req; - public CancelOperation_call(TCancelOperationReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { - super(client, protocolFactory, transport, resultHandler, false); - this.req = req; - } - - public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { - prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("CancelOperation", org.apache.thrift.protocol.TMessageType.CALL, 0)); - CancelOperation_args args = new CancelOperation_args(); - args.setReq(req); - args.write(prot); - prot.writeMessageEnd(); - } - - public TCancelOperationResp getResult() throws org.apache.thrift.TException { - if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { - throw new IllegalStateException("Method call not finished!"); - } - org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); - org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); - return (new Client(prot)).recv_CancelOperation(); - } - } - - public void CloseOperation(TCloseOperationReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { - checkReady(); - CloseOperation_call method_call = new CloseOperation_call(req, resultHandler, this, ___protocolFactory, ___transport); - this.___currentMethod = method_call; - ___manager.call(method_call); - } - - public static class CloseOperation_call extends org.apache.thrift.async.TAsyncMethodCall { - private TCloseOperationReq req; - public CloseOperation_call(TCloseOperationReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { - super(client, protocolFactory, transport, resultHandler, false); - this.req = req; - } - - public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { - prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("CloseOperation", org.apache.thrift.protocol.TMessageType.CALL, 0)); - CloseOperation_args args = new CloseOperation_args(); - args.setReq(req); - args.write(prot); - prot.writeMessageEnd(); - } - - public TCloseOperationResp getResult() throws org.apache.thrift.TException { - if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { - throw new IllegalStateException("Method call not finished!"); - } - org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); - org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); - return (new Client(prot)).recv_CloseOperation(); - } - } - - public void GetResultSetMetadata(TGetResultSetMetadataReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { - checkReady(); - GetResultSetMetadata_call method_call = new GetResultSetMetadata_call(req, resultHandler, this, ___protocolFactory, ___transport); - this.___currentMethod = method_call; - ___manager.call(method_call); - } - - public static class GetResultSetMetadata_call extends org.apache.thrift.async.TAsyncMethodCall { - private TGetResultSetMetadataReq req; - public GetResultSetMetadata_call(TGetResultSetMetadataReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { - super(client, protocolFactory, transport, resultHandler, false); - this.req = req; - } - - public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { - prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("GetResultSetMetadata", org.apache.thrift.protocol.TMessageType.CALL, 0)); - GetResultSetMetadata_args args = new GetResultSetMetadata_args(); - args.setReq(req); - args.write(prot); - prot.writeMessageEnd(); - } - - public TGetResultSetMetadataResp getResult() throws org.apache.thrift.TException { - if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { - throw new IllegalStateException("Method call not finished!"); - } - org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); - org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); - return (new Client(prot)).recv_GetResultSetMetadata(); - } - } - - public void FetchResults(TFetchResultsReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { - checkReady(); - FetchResults_call method_call = new FetchResults_call(req, resultHandler, this, ___protocolFactory, ___transport); - this.___currentMethod = method_call; - ___manager.call(method_call); - } - - public static class FetchResults_call extends org.apache.thrift.async.TAsyncMethodCall { - private TFetchResultsReq req; - public FetchResults_call(TFetchResultsReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { - super(client, protocolFactory, transport, resultHandler, false); - this.req = req; - } - - public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { - prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("FetchResults", org.apache.thrift.protocol.TMessageType.CALL, 0)); - FetchResults_args args = new FetchResults_args(); - args.setReq(req); - args.write(prot); - prot.writeMessageEnd(); - } - - public TFetchResultsResp getResult() throws org.apache.thrift.TException { - if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { - throw new IllegalStateException("Method call not finished!"); - } - org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); - org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); - return (new Client(prot)).recv_FetchResults(); - } - } - - public void GetDelegationToken(TGetDelegationTokenReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { - checkReady(); - GetDelegationToken_call method_call = new GetDelegationToken_call(req, resultHandler, this, ___protocolFactory, ___transport); - this.___currentMethod = method_call; - ___manager.call(method_call); - } - - public static class GetDelegationToken_call extends org.apache.thrift.async.TAsyncMethodCall { - private TGetDelegationTokenReq req; - public GetDelegationToken_call(TGetDelegationTokenReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { - super(client, protocolFactory, transport, resultHandler, false); - this.req = req; - } - - public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { - prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("GetDelegationToken", org.apache.thrift.protocol.TMessageType.CALL, 0)); - GetDelegationToken_args args = new GetDelegationToken_args(); - args.setReq(req); - args.write(prot); - prot.writeMessageEnd(); - } - - public TGetDelegationTokenResp getResult() throws org.apache.thrift.TException { - if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { - throw new IllegalStateException("Method call not finished!"); - } - org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); - org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); - return (new Client(prot)).recv_GetDelegationToken(); - } - } - - public void CancelDelegationToken(TCancelDelegationTokenReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { - checkReady(); - CancelDelegationToken_call method_call = new CancelDelegationToken_call(req, resultHandler, this, ___protocolFactory, ___transport); - this.___currentMethod = method_call; - ___manager.call(method_call); - } - - public static class CancelDelegationToken_call extends org.apache.thrift.async.TAsyncMethodCall { - private TCancelDelegationTokenReq req; - public CancelDelegationToken_call(TCancelDelegationTokenReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { - super(client, protocolFactory, transport, resultHandler, false); - this.req = req; - } - - public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { - prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("CancelDelegationToken", org.apache.thrift.protocol.TMessageType.CALL, 0)); - CancelDelegationToken_args args = new CancelDelegationToken_args(); - args.setReq(req); - args.write(prot); - prot.writeMessageEnd(); - } - - public TCancelDelegationTokenResp getResult() throws org.apache.thrift.TException { - if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { - throw new IllegalStateException("Method call not finished!"); - } - org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); - org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); - return (new Client(prot)).recv_CancelDelegationToken(); - } - } - - public void RenewDelegationToken(TRenewDelegationTokenReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { - checkReady(); - RenewDelegationToken_call method_call = new RenewDelegationToken_call(req, resultHandler, this, ___protocolFactory, ___transport); - this.___currentMethod = method_call; - ___manager.call(method_call); - } - - public static class RenewDelegationToken_call extends org.apache.thrift.async.TAsyncMethodCall { - private TRenewDelegationTokenReq req; - public RenewDelegationToken_call(TRenewDelegationTokenReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { - super(client, protocolFactory, transport, resultHandler, false); - this.req = req; - } - - public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { - prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("RenewDelegationToken", org.apache.thrift.protocol.TMessageType.CALL, 0)); - RenewDelegationToken_args args = new RenewDelegationToken_args(); - args.setReq(req); - args.write(prot); - prot.writeMessageEnd(); - } - - public TRenewDelegationTokenResp getResult() throws org.apache.thrift.TException { - if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { - throw new IllegalStateException("Method call not finished!"); - } - org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); - org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); - return (new Client(prot)).recv_RenewDelegationToken(); - } - } - - } - - public static class Processor extends org.apache.thrift.TBaseProcessor implements org.apache.thrift.TProcessor { - private static final Logger LOGGER = LoggerFactory.getLogger(Processor.class.getName()); - public Processor(I iface) { - super(iface, getProcessMap(new HashMap>())); - } - - protected Processor(I iface, Map> processMap) { - super(iface, getProcessMap(processMap)); - } - - private static Map> getProcessMap(Map> processMap) { - processMap.put("OpenSession", new OpenSession()); - processMap.put("CloseSession", new CloseSession()); - processMap.put("GetInfo", new GetInfo()); - processMap.put("ExecuteStatement", new ExecuteStatement()); - processMap.put("GetTypeInfo", new GetTypeInfo()); - processMap.put("GetCatalogs", new GetCatalogs()); - processMap.put("GetSchemas", new GetSchemas()); - processMap.put("GetTables", new GetTables()); - processMap.put("GetTableTypes", new GetTableTypes()); - processMap.put("GetColumns", new GetColumns()); - processMap.put("GetFunctions", new GetFunctions()); - processMap.put("GetPrimaryKeys", new GetPrimaryKeys()); - processMap.put("GetCrossReference", new GetCrossReference()); - processMap.put("GetOperationStatus", new GetOperationStatus()); - processMap.put("CancelOperation", new CancelOperation()); - processMap.put("CloseOperation", new CloseOperation()); - processMap.put("GetResultSetMetadata", new GetResultSetMetadata()); - processMap.put("FetchResults", new FetchResults()); - processMap.put("GetDelegationToken", new GetDelegationToken()); - processMap.put("CancelDelegationToken", new CancelDelegationToken()); - processMap.put("RenewDelegationToken", new RenewDelegationToken()); - return processMap; - } - - public static class OpenSession extends org.apache.thrift.ProcessFunction { - public OpenSession() { - super("OpenSession"); - } - - public OpenSession_args getEmptyArgsInstance() { - return new OpenSession_args(); - } - - protected boolean isOneway() { - return false; - } - - public OpenSession_result getResult(I iface, OpenSession_args args) throws org.apache.thrift.TException { - OpenSession_result result = new OpenSession_result(); - result.success = iface.OpenSession(args.req); - return result; - } - } - - public static class CloseSession extends org.apache.thrift.ProcessFunction { - public CloseSession() { - super("CloseSession"); - } - - public CloseSession_args getEmptyArgsInstance() { - return new CloseSession_args(); - } - - protected boolean isOneway() { - return false; - } - - public CloseSession_result getResult(I iface, CloseSession_args args) throws org.apache.thrift.TException { - CloseSession_result result = new CloseSession_result(); - result.success = iface.CloseSession(args.req); - return result; - } - } - - public static class GetInfo extends org.apache.thrift.ProcessFunction { - public GetInfo() { - super("GetInfo"); - } - - public GetInfo_args getEmptyArgsInstance() { - return new GetInfo_args(); - } - - protected boolean isOneway() { - return false; - } - - public GetInfo_result getResult(I iface, GetInfo_args args) throws org.apache.thrift.TException { - GetInfo_result result = new GetInfo_result(); - result.success = iface.GetInfo(args.req); - return result; - } - } - - public static class ExecuteStatement extends org.apache.thrift.ProcessFunction { - public ExecuteStatement() { - super("ExecuteStatement"); - } - - public ExecuteStatement_args getEmptyArgsInstance() { - return new ExecuteStatement_args(); - } - - protected boolean isOneway() { - return false; - } - - public ExecuteStatement_result getResult(I iface, ExecuteStatement_args args) throws org.apache.thrift.TException { - ExecuteStatement_result result = new ExecuteStatement_result(); - result.success = iface.ExecuteStatement(args.req); - return result; - } - } - - public static class GetTypeInfo extends org.apache.thrift.ProcessFunction { - public GetTypeInfo() { - super("GetTypeInfo"); - } - - public GetTypeInfo_args getEmptyArgsInstance() { - return new GetTypeInfo_args(); - } - - protected boolean isOneway() { - return false; - } - - public GetTypeInfo_result getResult(I iface, GetTypeInfo_args args) throws org.apache.thrift.TException { - GetTypeInfo_result result = new GetTypeInfo_result(); - result.success = iface.GetTypeInfo(args.req); - return result; - } - } - - public static class GetCatalogs extends org.apache.thrift.ProcessFunction { - public GetCatalogs() { - super("GetCatalogs"); - } - - public GetCatalogs_args getEmptyArgsInstance() { - return new GetCatalogs_args(); - } - - protected boolean isOneway() { - return false; - } - - public GetCatalogs_result getResult(I iface, GetCatalogs_args args) throws org.apache.thrift.TException { - GetCatalogs_result result = new GetCatalogs_result(); - result.success = iface.GetCatalogs(args.req); - return result; - } - } - - public static class GetSchemas extends org.apache.thrift.ProcessFunction { - public GetSchemas() { - super("GetSchemas"); - } - - public GetSchemas_args getEmptyArgsInstance() { - return new GetSchemas_args(); - } - - protected boolean isOneway() { - return false; - } - - public GetSchemas_result getResult(I iface, GetSchemas_args args) throws org.apache.thrift.TException { - GetSchemas_result result = new GetSchemas_result(); - result.success = iface.GetSchemas(args.req); - return result; - } - } - - public static class GetTables extends org.apache.thrift.ProcessFunction { - public GetTables() { - super("GetTables"); - } - - public GetTables_args getEmptyArgsInstance() { - return new GetTables_args(); - } - - protected boolean isOneway() { - return false; - } - - public GetTables_result getResult(I iface, GetTables_args args) throws org.apache.thrift.TException { - GetTables_result result = new GetTables_result(); - result.success = iface.GetTables(args.req); - return result; - } - } - - public static class GetTableTypes extends org.apache.thrift.ProcessFunction { - public GetTableTypes() { - super("GetTableTypes"); - } - - public GetTableTypes_args getEmptyArgsInstance() { - return new GetTableTypes_args(); - } - - protected boolean isOneway() { - return false; - } - - public GetTableTypes_result getResult(I iface, GetTableTypes_args args) throws org.apache.thrift.TException { - GetTableTypes_result result = new GetTableTypes_result(); - result.success = iface.GetTableTypes(args.req); - return result; - } - } - - public static class GetColumns extends org.apache.thrift.ProcessFunction { - public GetColumns() { - super("GetColumns"); - } - - public GetColumns_args getEmptyArgsInstance() { - return new GetColumns_args(); - } - - protected boolean isOneway() { - return false; - } - - public GetColumns_result getResult(I iface, GetColumns_args args) throws org.apache.thrift.TException { - GetColumns_result result = new GetColumns_result(); - result.success = iface.GetColumns(args.req); - return result; - } - } - - public static class GetFunctions extends org.apache.thrift.ProcessFunction { - public GetFunctions() { - super("GetFunctions"); - } - - public GetFunctions_args getEmptyArgsInstance() { - return new GetFunctions_args(); - } - - protected boolean isOneway() { - return false; - } - - public GetFunctions_result getResult(I iface, GetFunctions_args args) throws org.apache.thrift.TException { - GetFunctions_result result = new GetFunctions_result(); - result.success = iface.GetFunctions(args.req); - return result; - } - } - - public static class GetPrimaryKeys extends org.apache.thrift.ProcessFunction { - public GetPrimaryKeys() { - super("GetPrimaryKeys"); - } - - public GetPrimaryKeys_args getEmptyArgsInstance() { - return new GetPrimaryKeys_args(); - } - - protected boolean isOneway() { - return false; - } - - public GetPrimaryKeys_result getResult(I iface, GetPrimaryKeys_args args) throws org.apache.thrift.TException { - GetPrimaryKeys_result result = new GetPrimaryKeys_result(); - result.success = iface.GetPrimaryKeys(args.req); - return result; - } - } - - public static class GetCrossReference extends org.apache.thrift.ProcessFunction { - public GetCrossReference() { - super("GetCrossReference"); - } - - public GetCrossReference_args getEmptyArgsInstance() { - return new GetCrossReference_args(); - } - - protected boolean isOneway() { - return false; - } - - public GetCrossReference_result getResult(I iface, GetCrossReference_args args) throws org.apache.thrift.TException { - GetCrossReference_result result = new GetCrossReference_result(); - result.success = iface.GetCrossReference(args.req); - return result; - } - } - - public static class GetOperationStatus extends org.apache.thrift.ProcessFunction { - public GetOperationStatus() { - super("GetOperationStatus"); - } - - public GetOperationStatus_args getEmptyArgsInstance() { - return new GetOperationStatus_args(); - } - - protected boolean isOneway() { - return false; - } - - public GetOperationStatus_result getResult(I iface, GetOperationStatus_args args) throws org.apache.thrift.TException { - GetOperationStatus_result result = new GetOperationStatus_result(); - result.success = iface.GetOperationStatus(args.req); - return result; - } - } - - public static class CancelOperation extends org.apache.thrift.ProcessFunction { - public CancelOperation() { - super("CancelOperation"); - } - - public CancelOperation_args getEmptyArgsInstance() { - return new CancelOperation_args(); - } - - protected boolean isOneway() { - return false; - } - - public CancelOperation_result getResult(I iface, CancelOperation_args args) throws org.apache.thrift.TException { - CancelOperation_result result = new CancelOperation_result(); - result.success = iface.CancelOperation(args.req); - return result; - } - } - - public static class CloseOperation extends org.apache.thrift.ProcessFunction { - public CloseOperation() { - super("CloseOperation"); - } - - public CloseOperation_args getEmptyArgsInstance() { - return new CloseOperation_args(); - } - - protected boolean isOneway() { - return false; - } - - public CloseOperation_result getResult(I iface, CloseOperation_args args) throws org.apache.thrift.TException { - CloseOperation_result result = new CloseOperation_result(); - result.success = iface.CloseOperation(args.req); - return result; - } - } - - public static class GetResultSetMetadata extends org.apache.thrift.ProcessFunction { - public GetResultSetMetadata() { - super("GetResultSetMetadata"); - } - - public GetResultSetMetadata_args getEmptyArgsInstance() { - return new GetResultSetMetadata_args(); - } - - protected boolean isOneway() { - return false; - } - - public GetResultSetMetadata_result getResult(I iface, GetResultSetMetadata_args args) throws org.apache.thrift.TException { - GetResultSetMetadata_result result = new GetResultSetMetadata_result(); - result.success = iface.GetResultSetMetadata(args.req); - return result; - } - } - - public static class FetchResults extends org.apache.thrift.ProcessFunction { - public FetchResults() { - super("FetchResults"); - } - - public FetchResults_args getEmptyArgsInstance() { - return new FetchResults_args(); - } - - protected boolean isOneway() { - return false; - } - - public FetchResults_result getResult(I iface, FetchResults_args args) throws org.apache.thrift.TException { - FetchResults_result result = new FetchResults_result(); - result.success = iface.FetchResults(args.req); - return result; - } - } - - public static class GetDelegationToken extends org.apache.thrift.ProcessFunction { - public GetDelegationToken() { - super("GetDelegationToken"); - } - - public GetDelegationToken_args getEmptyArgsInstance() { - return new GetDelegationToken_args(); - } - - protected boolean isOneway() { - return false; - } - - public GetDelegationToken_result getResult(I iface, GetDelegationToken_args args) throws org.apache.thrift.TException { - GetDelegationToken_result result = new GetDelegationToken_result(); - result.success = iface.GetDelegationToken(args.req); - return result; - } - } - - public static class CancelDelegationToken extends org.apache.thrift.ProcessFunction { - public CancelDelegationToken() { - super("CancelDelegationToken"); - } - - public CancelDelegationToken_args getEmptyArgsInstance() { - return new CancelDelegationToken_args(); - } - - protected boolean isOneway() { - return false; - } - - public CancelDelegationToken_result getResult(I iface, CancelDelegationToken_args args) throws org.apache.thrift.TException { - CancelDelegationToken_result result = new CancelDelegationToken_result(); - result.success = iface.CancelDelegationToken(args.req); - return result; - } - } - - public static class RenewDelegationToken extends org.apache.thrift.ProcessFunction { - public RenewDelegationToken() { - super("RenewDelegationToken"); - } - - public RenewDelegationToken_args getEmptyArgsInstance() { - return new RenewDelegationToken_args(); - } - - protected boolean isOneway() { - return false; - } - - public RenewDelegationToken_result getResult(I iface, RenewDelegationToken_args args) throws org.apache.thrift.TException { - RenewDelegationToken_result result = new RenewDelegationToken_result(); - result.success = iface.RenewDelegationToken(args.req); - return result; - } - } - - } - - public static class AsyncProcessor extends org.apache.thrift.TBaseAsyncProcessor { - private static final Logger LOGGER = LoggerFactory.getLogger(AsyncProcessor.class.getName()); - public AsyncProcessor(I iface) { - super(iface, getProcessMap(new HashMap>())); - } - - protected AsyncProcessor(I iface, Map> processMap) { - super(iface, getProcessMap(processMap)); - } - - private static Map> getProcessMap(Map> processMap) { - processMap.put("OpenSession", new OpenSession()); - processMap.put("CloseSession", new CloseSession()); - processMap.put("GetInfo", new GetInfo()); - processMap.put("ExecuteStatement", new ExecuteStatement()); - processMap.put("GetTypeInfo", new GetTypeInfo()); - processMap.put("GetCatalogs", new GetCatalogs()); - processMap.put("GetSchemas", new GetSchemas()); - processMap.put("GetTables", new GetTables()); - processMap.put("GetTableTypes", new GetTableTypes()); - processMap.put("GetColumns", new GetColumns()); - processMap.put("GetFunctions", new GetFunctions()); - processMap.put("GetPrimaryKeys", new GetPrimaryKeys()); - processMap.put("GetCrossReference", new GetCrossReference()); - processMap.put("GetOperationStatus", new GetOperationStatus()); - processMap.put("CancelOperation", new CancelOperation()); - processMap.put("CloseOperation", new CloseOperation()); - processMap.put("GetResultSetMetadata", new GetResultSetMetadata()); - processMap.put("FetchResults", new FetchResults()); - processMap.put("GetDelegationToken", new GetDelegationToken()); - processMap.put("CancelDelegationToken", new CancelDelegationToken()); - processMap.put("RenewDelegationToken", new RenewDelegationToken()); - return processMap; - } - - public static class OpenSession extends org.apache.thrift.AsyncProcessFunction { - public OpenSession() { - super("OpenSession"); - } - - public OpenSession_args getEmptyArgsInstance() { - return new OpenSession_args(); - } - - public AsyncMethodCallback getResultHandler(final AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { - final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback() { - public void onComplete(TOpenSessionResp o) { - OpenSession_result result = new OpenSession_result(); - result.success = o; - try { - fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); - return; - } catch (Exception e) { - LOGGER.error("Exception writing to internal frame buffer", e); - } - fb.close(); - } - public void onError(Exception e) { - byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; - org.apache.thrift.TBase msg; - OpenSession_result result = new OpenSession_result(); - { - msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; - msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); - } - try { - fcall.sendResponse(fb,msg,msgType,seqid); - return; - } catch (Exception ex) { - LOGGER.error("Exception writing to internal frame buffer", ex); - } - fb.close(); - } - }; - } - - protected boolean isOneway() { - return false; - } - - public void start(I iface, OpenSession_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.OpenSession(args.req,resultHandler); - } - } - - public static class CloseSession extends org.apache.thrift.AsyncProcessFunction { - public CloseSession() { - super("CloseSession"); - } - - public CloseSession_args getEmptyArgsInstance() { - return new CloseSession_args(); - } - - public AsyncMethodCallback getResultHandler(final AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { - final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback() { - public void onComplete(TCloseSessionResp o) { - CloseSession_result result = new CloseSession_result(); - result.success = o; - try { - fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); - return; - } catch (Exception e) { - LOGGER.error("Exception writing to internal frame buffer", e); - } - fb.close(); - } - public void onError(Exception e) { - byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; - org.apache.thrift.TBase msg; - CloseSession_result result = new CloseSession_result(); - { - msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; - msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); - } - try { - fcall.sendResponse(fb,msg,msgType,seqid); - return; - } catch (Exception ex) { - LOGGER.error("Exception writing to internal frame buffer", ex); - } - fb.close(); - } - }; - } - - protected boolean isOneway() { - return false; - } - - public void start(I iface, CloseSession_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.CloseSession(args.req,resultHandler); - } - } - - public static class GetInfo extends org.apache.thrift.AsyncProcessFunction { - public GetInfo() { - super("GetInfo"); - } - - public GetInfo_args getEmptyArgsInstance() { - return new GetInfo_args(); - } - - public AsyncMethodCallback getResultHandler(final AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { - final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback() { - public void onComplete(TGetInfoResp o) { - GetInfo_result result = new GetInfo_result(); - result.success = o; - try { - fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); - return; - } catch (Exception e) { - LOGGER.error("Exception writing to internal frame buffer", e); - } - fb.close(); - } - public void onError(Exception e) { - byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; - org.apache.thrift.TBase msg; - GetInfo_result result = new GetInfo_result(); - { - msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; - msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); - } - try { - fcall.sendResponse(fb,msg,msgType,seqid); - return; - } catch (Exception ex) { - LOGGER.error("Exception writing to internal frame buffer", ex); - } - fb.close(); - } - }; - } - - protected boolean isOneway() { - return false; - } - - public void start(I iface, GetInfo_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.GetInfo(args.req,resultHandler); - } - } - - public static class ExecuteStatement extends org.apache.thrift.AsyncProcessFunction { - public ExecuteStatement() { - super("ExecuteStatement"); - } - - public ExecuteStatement_args getEmptyArgsInstance() { - return new ExecuteStatement_args(); - } - - public AsyncMethodCallback getResultHandler(final AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { - final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback() { - public void onComplete(TExecuteStatementResp o) { - ExecuteStatement_result result = new ExecuteStatement_result(); - result.success = o; - try { - fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); - return; - } catch (Exception e) { - LOGGER.error("Exception writing to internal frame buffer", e); - } - fb.close(); - } - public void onError(Exception e) { - byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; - org.apache.thrift.TBase msg; - ExecuteStatement_result result = new ExecuteStatement_result(); - { - msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; - msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); - } - try { - fcall.sendResponse(fb,msg,msgType,seqid); - return; - } catch (Exception ex) { - LOGGER.error("Exception writing to internal frame buffer", ex); - } - fb.close(); - } - }; - } - - protected boolean isOneway() { - return false; - } - - public void start(I iface, ExecuteStatement_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.ExecuteStatement(args.req,resultHandler); - } - } - - public static class GetTypeInfo extends org.apache.thrift.AsyncProcessFunction { - public GetTypeInfo() { - super("GetTypeInfo"); - } - - public GetTypeInfo_args getEmptyArgsInstance() { - return new GetTypeInfo_args(); - } - - public AsyncMethodCallback getResultHandler(final AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { - final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback() { - public void onComplete(TGetTypeInfoResp o) { - GetTypeInfo_result result = new GetTypeInfo_result(); - result.success = o; - try { - fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); - return; - } catch (Exception e) { - LOGGER.error("Exception writing to internal frame buffer", e); - } - fb.close(); - } - public void onError(Exception e) { - byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; - org.apache.thrift.TBase msg; - GetTypeInfo_result result = new GetTypeInfo_result(); - { - msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; - msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); - } - try { - fcall.sendResponse(fb,msg,msgType,seqid); - return; - } catch (Exception ex) { - LOGGER.error("Exception writing to internal frame buffer", ex); - } - fb.close(); - } - }; - } - - protected boolean isOneway() { - return false; - } - - public void start(I iface, GetTypeInfo_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.GetTypeInfo(args.req,resultHandler); - } - } - - public static class GetCatalogs extends org.apache.thrift.AsyncProcessFunction { - public GetCatalogs() { - super("GetCatalogs"); - } - - public GetCatalogs_args getEmptyArgsInstance() { - return new GetCatalogs_args(); - } - - public AsyncMethodCallback getResultHandler(final AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { - final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback() { - public void onComplete(TGetCatalogsResp o) { - GetCatalogs_result result = new GetCatalogs_result(); - result.success = o; - try { - fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); - return; - } catch (Exception e) { - LOGGER.error("Exception writing to internal frame buffer", e); - } - fb.close(); - } - public void onError(Exception e) { - byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; - org.apache.thrift.TBase msg; - GetCatalogs_result result = new GetCatalogs_result(); - { - msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; - msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); - } - try { - fcall.sendResponse(fb,msg,msgType,seqid); - return; - } catch (Exception ex) { - LOGGER.error("Exception writing to internal frame buffer", ex); - } - fb.close(); - } - }; - } - - protected boolean isOneway() { - return false; - } - - public void start(I iface, GetCatalogs_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.GetCatalogs(args.req,resultHandler); - } - } - - public static class GetSchemas extends org.apache.thrift.AsyncProcessFunction { - public GetSchemas() { - super("GetSchemas"); - } - - public GetSchemas_args getEmptyArgsInstance() { - return new GetSchemas_args(); - } - - public AsyncMethodCallback getResultHandler(final AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { - final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback() { - public void onComplete(TGetSchemasResp o) { - GetSchemas_result result = new GetSchemas_result(); - result.success = o; - try { - fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); - return; - } catch (Exception e) { - LOGGER.error("Exception writing to internal frame buffer", e); - } - fb.close(); - } - public void onError(Exception e) { - byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; - org.apache.thrift.TBase msg; - GetSchemas_result result = new GetSchemas_result(); - { - msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; - msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); - } - try { - fcall.sendResponse(fb,msg,msgType,seqid); - return; - } catch (Exception ex) { - LOGGER.error("Exception writing to internal frame buffer", ex); - } - fb.close(); - } - }; - } - - protected boolean isOneway() { - return false; - } - - public void start(I iface, GetSchemas_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.GetSchemas(args.req,resultHandler); - } - } - - public static class GetTables extends org.apache.thrift.AsyncProcessFunction { - public GetTables() { - super("GetTables"); - } - - public GetTables_args getEmptyArgsInstance() { - return new GetTables_args(); - } - - public AsyncMethodCallback getResultHandler(final AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { - final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback() { - public void onComplete(TGetTablesResp o) { - GetTables_result result = new GetTables_result(); - result.success = o; - try { - fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); - return; - } catch (Exception e) { - LOGGER.error("Exception writing to internal frame buffer", e); - } - fb.close(); - } - public void onError(Exception e) { - byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; - org.apache.thrift.TBase msg; - GetTables_result result = new GetTables_result(); - { - msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; - msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); - } - try { - fcall.sendResponse(fb,msg,msgType,seqid); - return; - } catch (Exception ex) { - LOGGER.error("Exception writing to internal frame buffer", ex); - } - fb.close(); - } - }; - } - - protected boolean isOneway() { - return false; - } - - public void start(I iface, GetTables_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.GetTables(args.req,resultHandler); - } - } - - public static class GetTableTypes extends org.apache.thrift.AsyncProcessFunction { - public GetTableTypes() { - super("GetTableTypes"); - } - - public GetTableTypes_args getEmptyArgsInstance() { - return new GetTableTypes_args(); - } - - public AsyncMethodCallback getResultHandler(final AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { - final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback() { - public void onComplete(TGetTableTypesResp o) { - GetTableTypes_result result = new GetTableTypes_result(); - result.success = o; - try { - fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); - return; - } catch (Exception e) { - LOGGER.error("Exception writing to internal frame buffer", e); - } - fb.close(); - } - public void onError(Exception e) { - byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; - org.apache.thrift.TBase msg; - GetTableTypes_result result = new GetTableTypes_result(); - { - msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; - msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); - } - try { - fcall.sendResponse(fb,msg,msgType,seqid); - return; - } catch (Exception ex) { - LOGGER.error("Exception writing to internal frame buffer", ex); - } - fb.close(); - } - }; - } - - protected boolean isOneway() { - return false; - } - - public void start(I iface, GetTableTypes_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.GetTableTypes(args.req,resultHandler); - } - } - - public static class GetColumns extends org.apache.thrift.AsyncProcessFunction { - public GetColumns() { - super("GetColumns"); - } - - public GetColumns_args getEmptyArgsInstance() { - return new GetColumns_args(); - } - - public AsyncMethodCallback getResultHandler(final AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { - final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback() { - public void onComplete(TGetColumnsResp o) { - GetColumns_result result = new GetColumns_result(); - result.success = o; - try { - fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); - return; - } catch (Exception e) { - LOGGER.error("Exception writing to internal frame buffer", e); - } - fb.close(); - } - public void onError(Exception e) { - byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; - org.apache.thrift.TBase msg; - GetColumns_result result = new GetColumns_result(); - { - msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; - msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); - } - try { - fcall.sendResponse(fb,msg,msgType,seqid); - return; - } catch (Exception ex) { - LOGGER.error("Exception writing to internal frame buffer", ex); - } - fb.close(); - } - }; - } - - protected boolean isOneway() { - return false; - } - - public void start(I iface, GetColumns_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.GetColumns(args.req,resultHandler); - } - } - - public static class GetFunctions extends org.apache.thrift.AsyncProcessFunction { - public GetFunctions() { - super("GetFunctions"); - } - - public GetFunctions_args getEmptyArgsInstance() { - return new GetFunctions_args(); - } - - public AsyncMethodCallback getResultHandler(final AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { - final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback() { - public void onComplete(TGetFunctionsResp o) { - GetFunctions_result result = new GetFunctions_result(); - result.success = o; - try { - fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); - return; - } catch (Exception e) { - LOGGER.error("Exception writing to internal frame buffer", e); - } - fb.close(); - } - public void onError(Exception e) { - byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; - org.apache.thrift.TBase msg; - GetFunctions_result result = new GetFunctions_result(); - { - msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; - msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); - } - try { - fcall.sendResponse(fb,msg,msgType,seqid); - return; - } catch (Exception ex) { - LOGGER.error("Exception writing to internal frame buffer", ex); - } - fb.close(); - } - }; - } - - protected boolean isOneway() { - return false; - } - - public void start(I iface, GetFunctions_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.GetFunctions(args.req,resultHandler); - } - } - - public static class GetPrimaryKeys extends org.apache.thrift.AsyncProcessFunction { - public GetPrimaryKeys() { - super("GetPrimaryKeys"); - } - - public GetPrimaryKeys_args getEmptyArgsInstance() { - return new GetPrimaryKeys_args(); - } - - public AsyncMethodCallback getResultHandler(final AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { - final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback() { - public void onComplete(TGetPrimaryKeysResp o) { - GetPrimaryKeys_result result = new GetPrimaryKeys_result(); - result.success = o; - try { - fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); - return; - } catch (Exception e) { - LOGGER.error("Exception writing to internal frame buffer", e); - } - fb.close(); - } - public void onError(Exception e) { - byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; - org.apache.thrift.TBase msg; - GetPrimaryKeys_result result = new GetPrimaryKeys_result(); - { - msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; - msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); - } - try { - fcall.sendResponse(fb,msg,msgType,seqid); - return; - } catch (Exception ex) { - LOGGER.error("Exception writing to internal frame buffer", ex); - } - fb.close(); - } - }; - } - - protected boolean isOneway() { - return false; - } - - public void start(I iface, GetPrimaryKeys_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.GetPrimaryKeys(args.req,resultHandler); - } - } - - public static class GetCrossReference extends org.apache.thrift.AsyncProcessFunction { - public GetCrossReference() { - super("GetCrossReference"); - } - - public GetCrossReference_args getEmptyArgsInstance() { - return new GetCrossReference_args(); - } - - public AsyncMethodCallback getResultHandler(final AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { - final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback() { - public void onComplete(TGetCrossReferenceResp o) { - GetCrossReference_result result = new GetCrossReference_result(); - result.success = o; - try { - fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); - return; - } catch (Exception e) { - LOGGER.error("Exception writing to internal frame buffer", e); - } - fb.close(); - } - public void onError(Exception e) { - byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; - org.apache.thrift.TBase msg; - GetCrossReference_result result = new GetCrossReference_result(); - { - msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; - msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); - } - try { - fcall.sendResponse(fb,msg,msgType,seqid); - return; - } catch (Exception ex) { - LOGGER.error("Exception writing to internal frame buffer", ex); - } - fb.close(); - } - }; - } - - protected boolean isOneway() { - return false; - } - - public void start(I iface, GetCrossReference_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.GetCrossReference(args.req,resultHandler); - } - } - - public static class GetOperationStatus extends org.apache.thrift.AsyncProcessFunction { - public GetOperationStatus() { - super("GetOperationStatus"); - } - - public GetOperationStatus_args getEmptyArgsInstance() { - return new GetOperationStatus_args(); - } - - public AsyncMethodCallback getResultHandler(final AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { - final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback() { - public void onComplete(TGetOperationStatusResp o) { - GetOperationStatus_result result = new GetOperationStatus_result(); - result.success = o; - try { - fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); - return; - } catch (Exception e) { - LOGGER.error("Exception writing to internal frame buffer", e); - } - fb.close(); - } - public void onError(Exception e) { - byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; - org.apache.thrift.TBase msg; - GetOperationStatus_result result = new GetOperationStatus_result(); - { - msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; - msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); - } - try { - fcall.sendResponse(fb,msg,msgType,seqid); - return; - } catch (Exception ex) { - LOGGER.error("Exception writing to internal frame buffer", ex); - } - fb.close(); - } - }; - } - - protected boolean isOneway() { - return false; - } - - public void start(I iface, GetOperationStatus_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.GetOperationStatus(args.req,resultHandler); - } - } - - public static class CancelOperation extends org.apache.thrift.AsyncProcessFunction { - public CancelOperation() { - super("CancelOperation"); - } - - public CancelOperation_args getEmptyArgsInstance() { - return new CancelOperation_args(); - } - - public AsyncMethodCallback getResultHandler(final AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { - final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback() { - public void onComplete(TCancelOperationResp o) { - CancelOperation_result result = new CancelOperation_result(); - result.success = o; - try { - fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); - return; - } catch (Exception e) { - LOGGER.error("Exception writing to internal frame buffer", e); - } - fb.close(); - } - public void onError(Exception e) { - byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; - org.apache.thrift.TBase msg; - CancelOperation_result result = new CancelOperation_result(); - { - msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; - msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); - } - try { - fcall.sendResponse(fb,msg,msgType,seqid); - return; - } catch (Exception ex) { - LOGGER.error("Exception writing to internal frame buffer", ex); - } - fb.close(); - } - }; - } - - protected boolean isOneway() { - return false; - } - - public void start(I iface, CancelOperation_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.CancelOperation(args.req,resultHandler); - } - } - - public static class CloseOperation extends org.apache.thrift.AsyncProcessFunction { - public CloseOperation() { - super("CloseOperation"); - } - - public CloseOperation_args getEmptyArgsInstance() { - return new CloseOperation_args(); - } - - public AsyncMethodCallback getResultHandler(final AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { - final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback() { - public void onComplete(TCloseOperationResp o) { - CloseOperation_result result = new CloseOperation_result(); - result.success = o; - try { - fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); - return; - } catch (Exception e) { - LOGGER.error("Exception writing to internal frame buffer", e); - } - fb.close(); - } - public void onError(Exception e) { - byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; - org.apache.thrift.TBase msg; - CloseOperation_result result = new CloseOperation_result(); - { - msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; - msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); - } - try { - fcall.sendResponse(fb,msg,msgType,seqid); - return; - } catch (Exception ex) { - LOGGER.error("Exception writing to internal frame buffer", ex); - } - fb.close(); - } - }; - } - - protected boolean isOneway() { - return false; - } - - public void start(I iface, CloseOperation_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.CloseOperation(args.req,resultHandler); - } - } - - public static class GetResultSetMetadata extends org.apache.thrift.AsyncProcessFunction { - public GetResultSetMetadata() { - super("GetResultSetMetadata"); - } - - public GetResultSetMetadata_args getEmptyArgsInstance() { - return new GetResultSetMetadata_args(); - } - - public AsyncMethodCallback getResultHandler(final AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { - final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback() { - public void onComplete(TGetResultSetMetadataResp o) { - GetResultSetMetadata_result result = new GetResultSetMetadata_result(); - result.success = o; - try { - fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); - return; - } catch (Exception e) { - LOGGER.error("Exception writing to internal frame buffer", e); - } - fb.close(); - } - public void onError(Exception e) { - byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; - org.apache.thrift.TBase msg; - GetResultSetMetadata_result result = new GetResultSetMetadata_result(); - { - msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; - msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); - } - try { - fcall.sendResponse(fb,msg,msgType,seqid); - return; - } catch (Exception ex) { - LOGGER.error("Exception writing to internal frame buffer", ex); - } - fb.close(); - } - }; - } - - protected boolean isOneway() { - return false; - } - - public void start(I iface, GetResultSetMetadata_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.GetResultSetMetadata(args.req,resultHandler); - } - } - - public static class FetchResults extends org.apache.thrift.AsyncProcessFunction { - public FetchResults() { - super("FetchResults"); - } - - public FetchResults_args getEmptyArgsInstance() { - return new FetchResults_args(); - } - - public AsyncMethodCallback getResultHandler(final AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { - final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback() { - public void onComplete(TFetchResultsResp o) { - FetchResults_result result = new FetchResults_result(); - result.success = o; - try { - fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); - return; - } catch (Exception e) { - LOGGER.error("Exception writing to internal frame buffer", e); - } - fb.close(); - } - public void onError(Exception e) { - byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; - org.apache.thrift.TBase msg; - FetchResults_result result = new FetchResults_result(); - { - msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; - msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); - } - try { - fcall.sendResponse(fb,msg,msgType,seqid); - return; - } catch (Exception ex) { - LOGGER.error("Exception writing to internal frame buffer", ex); - } - fb.close(); - } - }; - } - - protected boolean isOneway() { - return false; - } - - public void start(I iface, FetchResults_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.FetchResults(args.req,resultHandler); - } - } - - public static class GetDelegationToken extends org.apache.thrift.AsyncProcessFunction { - public GetDelegationToken() { - super("GetDelegationToken"); - } - - public GetDelegationToken_args getEmptyArgsInstance() { - return new GetDelegationToken_args(); - } - - public AsyncMethodCallback getResultHandler(final AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { - final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback() { - public void onComplete(TGetDelegationTokenResp o) { - GetDelegationToken_result result = new GetDelegationToken_result(); - result.success = o; - try { - fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); - return; - } catch (Exception e) { - LOGGER.error("Exception writing to internal frame buffer", e); - } - fb.close(); - } - public void onError(Exception e) { - byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; - org.apache.thrift.TBase msg; - GetDelegationToken_result result = new GetDelegationToken_result(); - { - msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; - msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); - } - try { - fcall.sendResponse(fb,msg,msgType,seqid); - return; - } catch (Exception ex) { - LOGGER.error("Exception writing to internal frame buffer", ex); - } - fb.close(); - } - }; - } - - protected boolean isOneway() { - return false; - } - - public void start(I iface, GetDelegationToken_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.GetDelegationToken(args.req,resultHandler); - } - } - - public static class CancelDelegationToken extends org.apache.thrift.AsyncProcessFunction { - public CancelDelegationToken() { - super("CancelDelegationToken"); - } - - public CancelDelegationToken_args getEmptyArgsInstance() { - return new CancelDelegationToken_args(); - } - - public AsyncMethodCallback getResultHandler(final AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { - final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback() { - public void onComplete(TCancelDelegationTokenResp o) { - CancelDelegationToken_result result = new CancelDelegationToken_result(); - result.success = o; - try { - fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); - return; - } catch (Exception e) { - LOGGER.error("Exception writing to internal frame buffer", e); - } - fb.close(); - } - public void onError(Exception e) { - byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; - org.apache.thrift.TBase msg; - CancelDelegationToken_result result = new CancelDelegationToken_result(); - { - msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; - msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); - } - try { - fcall.sendResponse(fb,msg,msgType,seqid); - return; - } catch (Exception ex) { - LOGGER.error("Exception writing to internal frame buffer", ex); - } - fb.close(); - } - }; - } - - protected boolean isOneway() { - return false; - } - - public void start(I iface, CancelDelegationToken_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.CancelDelegationToken(args.req,resultHandler); - } - } - - public static class RenewDelegationToken extends org.apache.thrift.AsyncProcessFunction { - public RenewDelegationToken() { - super("RenewDelegationToken"); - } - - public RenewDelegationToken_args getEmptyArgsInstance() { - return new RenewDelegationToken_args(); - } - - public AsyncMethodCallback getResultHandler(final AbstractNonblockingServer.AsyncFrameBuffer fb, final int seqid) { - final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback() { - public void onComplete(TRenewDelegationTokenResp o) { - RenewDelegationToken_result result = new RenewDelegationToken_result(); - result.success = o; - try { - fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); - return; - } catch (Exception e) { - LOGGER.error("Exception writing to internal frame buffer", e); - } - fb.close(); - } - public void onError(Exception e) { - byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; - org.apache.thrift.TBase msg; - RenewDelegationToken_result result = new RenewDelegationToken_result(); - { - msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; - msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); - } - try { - fcall.sendResponse(fb,msg,msgType,seqid); - return; - } catch (Exception ex) { - LOGGER.error("Exception writing to internal frame buffer", ex); - } - fb.close(); - } - }; - } - - protected boolean isOneway() { - return false; - } - - public void start(I iface, RenewDelegationToken_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.RenewDelegationToken(args.req,resultHandler); - } - } - - } - - public static class OpenSession_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("OpenSession_args"); - - private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new OpenSession_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new OpenSession_argsTupleSchemeFactory()); - } - - private TOpenSessionReq req; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - REQ((short)1, "req"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // REQ - return REQ; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TOpenSessionReq.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(OpenSession_args.class, metaDataMap); - } - - public OpenSession_args() { - } - - public OpenSession_args( - TOpenSessionReq req) - { - this(); - this.req = req; - } - - /** - * Performs a deep copy on other. - */ - public OpenSession_args(OpenSession_args other) { - if (other.isSetReq()) { - this.req = new TOpenSessionReq(other.req); - } - } - - public OpenSession_args deepCopy() { - return new OpenSession_args(this); - } - - @Override - public void clear() { - this.req = null; - } - - public TOpenSessionReq getReq() { - return this.req; - } - - public void setReq(TOpenSessionReq req) { - this.req = req; - } - - public void unsetReq() { - this.req = null; - } - - /** Returns true if field req is set (has been assigned a value) and false otherwise */ - public boolean isSetReq() { - return this.req != null; - } - - public void setReqIsSet(boolean value) { - if (!value) { - this.req = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case REQ: - if (value == null) { - unsetReq(); - } else { - setReq((TOpenSessionReq)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case REQ: - return getReq(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case REQ: - return isSetReq(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof OpenSession_args) - return this.equals((OpenSession_args)that); - return false; - } - - public boolean equals(OpenSession_args that) { - if (that == null) - return false; - - boolean this_present_req = true && this.isSetReq(); - boolean that_present_req = true && that.isSetReq(); - if (this_present_req || that_present_req) { - if (!(this_present_req && that_present_req)) - return false; - if (!this.req.equals(that.req)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_req = true && (isSetReq()); - list.add(present_req); - if (present_req) - list.add(req); - - return list.hashCode(); - } - - @Override - public int compareTo(OpenSession_args other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetReq()).compareTo(other.isSetReq()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetReq()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.req, other.req); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("OpenSession_args("); - boolean first = true; - - sb.append("req:"); - if (this.req == null) { - sb.append("null"); - } else { - sb.append(this.req); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (req != null) { - req.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class OpenSession_argsStandardSchemeFactory implements SchemeFactory { - public OpenSession_argsStandardScheme getScheme() { - return new OpenSession_argsStandardScheme(); - } - } - - private static class OpenSession_argsStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, OpenSession_args struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // REQ - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.req = new TOpenSessionReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, OpenSession_args struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.req != null) { - oprot.writeFieldBegin(REQ_FIELD_DESC); - struct.req.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class OpenSession_argsTupleSchemeFactory implements SchemeFactory { - public OpenSession_argsTupleScheme getScheme() { - return new OpenSession_argsTupleScheme(); - } - } - - private static class OpenSession_argsTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, OpenSession_args struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetReq()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetReq()) { - struct.req.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, OpenSession_args struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.req = new TOpenSessionReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } - } - } - - } - - public static class OpenSession_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("OpenSession_result"); - - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new OpenSession_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new OpenSession_resultTupleSchemeFactory()); - } - - private TOpenSessionResp success; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SUCCESS((short)0, "success"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 0: // SUCCESS - return SUCCESS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TOpenSessionResp.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(OpenSession_result.class, metaDataMap); - } - - public OpenSession_result() { - } - - public OpenSession_result( - TOpenSessionResp success) - { - this(); - this.success = success; - } - - /** - * Performs a deep copy on other. - */ - public OpenSession_result(OpenSession_result other) { - if (other.isSetSuccess()) { - this.success = new TOpenSessionResp(other.success); - } - } - - public OpenSession_result deepCopy() { - return new OpenSession_result(this); - } - - @Override - public void clear() { - this.success = null; - } - - public TOpenSessionResp getSuccess() { - return this.success; - } - - public void setSuccess(TOpenSessionResp success) { - this.success = success; - } - - public void unsetSuccess() { - this.success = null; - } - - /** Returns true if field success is set (has been assigned a value) and false otherwise */ - public boolean isSetSuccess() { - return this.success != null; - } - - public void setSuccessIsSet(boolean value) { - if (!value) { - this.success = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case SUCCESS: - if (value == null) { - unsetSuccess(); - } else { - setSuccess((TOpenSessionResp)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case SUCCESS: - return getSuccess(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case SUCCESS: - return isSetSuccess(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof OpenSession_result) - return this.equals((OpenSession_result)that); - return false; - } - - public boolean equals(OpenSession_result that) { - if (that == null) - return false; - - boolean this_present_success = true && this.isSetSuccess(); - boolean that_present_success = true && that.isSetSuccess(); - if (this_present_success || that_present_success) { - if (!(this_present_success && that_present_success)) - return false; - if (!this.success.equals(that.success)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_success = true && (isSetSuccess()); - list.add(present_success); - if (present_success) - list.add(success); - - return list.hashCode(); - } - - @Override - public int compareTo(OpenSession_result other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSuccess()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("OpenSession_result("); - boolean first = true; - - sb.append("success:"); - if (this.success == null) { - sb.append("null"); - } else { - sb.append(this.success); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (success != null) { - success.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class OpenSession_resultStandardSchemeFactory implements SchemeFactory { - public OpenSession_resultStandardScheme getScheme() { - return new OpenSession_resultStandardScheme(); - } - } - - private static class OpenSession_resultStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, OpenSession_result struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new TOpenSessionResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, OpenSession_result struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.success != null) { - oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - struct.success.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class OpenSession_resultTupleSchemeFactory implements SchemeFactory { - public OpenSession_resultTupleScheme getScheme() { - return new OpenSession_resultTupleScheme(); - } - } - - private static class OpenSession_resultTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, OpenSession_result struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetSuccess()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetSuccess()) { - struct.success.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, OpenSession_result struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.success = new TOpenSessionResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } - } - } - - } - - public static class CloseSession_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("CloseSession_args"); - - private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new CloseSession_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new CloseSession_argsTupleSchemeFactory()); - } - - private TCloseSessionReq req; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - REQ((short)1, "req"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // REQ - return REQ; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TCloseSessionReq.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(CloseSession_args.class, metaDataMap); - } - - public CloseSession_args() { - } - - public CloseSession_args( - TCloseSessionReq req) - { - this(); - this.req = req; - } - - /** - * Performs a deep copy on other. - */ - public CloseSession_args(CloseSession_args other) { - if (other.isSetReq()) { - this.req = new TCloseSessionReq(other.req); - } - } - - public CloseSession_args deepCopy() { - return new CloseSession_args(this); - } - - @Override - public void clear() { - this.req = null; - } - - public TCloseSessionReq getReq() { - return this.req; - } - - public void setReq(TCloseSessionReq req) { - this.req = req; - } - - public void unsetReq() { - this.req = null; - } - - /** Returns true if field req is set (has been assigned a value) and false otherwise */ - public boolean isSetReq() { - return this.req != null; - } - - public void setReqIsSet(boolean value) { - if (!value) { - this.req = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case REQ: - if (value == null) { - unsetReq(); - } else { - setReq((TCloseSessionReq)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case REQ: - return getReq(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case REQ: - return isSetReq(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof CloseSession_args) - return this.equals((CloseSession_args)that); - return false; - } - - public boolean equals(CloseSession_args that) { - if (that == null) - return false; - - boolean this_present_req = true && this.isSetReq(); - boolean that_present_req = true && that.isSetReq(); - if (this_present_req || that_present_req) { - if (!(this_present_req && that_present_req)) - return false; - if (!this.req.equals(that.req)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_req = true && (isSetReq()); - list.add(present_req); - if (present_req) - list.add(req); - - return list.hashCode(); - } - - @Override - public int compareTo(CloseSession_args other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetReq()).compareTo(other.isSetReq()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetReq()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.req, other.req); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("CloseSession_args("); - boolean first = true; - - sb.append("req:"); - if (this.req == null) { - sb.append("null"); - } else { - sb.append(this.req); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (req != null) { - req.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class CloseSession_argsStandardSchemeFactory implements SchemeFactory { - public CloseSession_argsStandardScheme getScheme() { - return new CloseSession_argsStandardScheme(); - } - } - - private static class CloseSession_argsStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, CloseSession_args struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // REQ - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.req = new TCloseSessionReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, CloseSession_args struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.req != null) { - oprot.writeFieldBegin(REQ_FIELD_DESC); - struct.req.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class CloseSession_argsTupleSchemeFactory implements SchemeFactory { - public CloseSession_argsTupleScheme getScheme() { - return new CloseSession_argsTupleScheme(); - } - } - - private static class CloseSession_argsTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, CloseSession_args struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetReq()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetReq()) { - struct.req.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, CloseSession_args struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.req = new TCloseSessionReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } - } - } - - } - - public static class CloseSession_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("CloseSession_result"); - - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new CloseSession_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new CloseSession_resultTupleSchemeFactory()); - } - - private TCloseSessionResp success; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SUCCESS((short)0, "success"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 0: // SUCCESS - return SUCCESS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TCloseSessionResp.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(CloseSession_result.class, metaDataMap); - } - - public CloseSession_result() { - } - - public CloseSession_result( - TCloseSessionResp success) - { - this(); - this.success = success; - } - - /** - * Performs a deep copy on other. - */ - public CloseSession_result(CloseSession_result other) { - if (other.isSetSuccess()) { - this.success = new TCloseSessionResp(other.success); - } - } - - public CloseSession_result deepCopy() { - return new CloseSession_result(this); - } - - @Override - public void clear() { - this.success = null; - } - - public TCloseSessionResp getSuccess() { - return this.success; - } - - public void setSuccess(TCloseSessionResp success) { - this.success = success; - } - - public void unsetSuccess() { - this.success = null; - } - - /** Returns true if field success is set (has been assigned a value) and false otherwise */ - public boolean isSetSuccess() { - return this.success != null; - } - - public void setSuccessIsSet(boolean value) { - if (!value) { - this.success = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case SUCCESS: - if (value == null) { - unsetSuccess(); - } else { - setSuccess((TCloseSessionResp)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case SUCCESS: - return getSuccess(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case SUCCESS: - return isSetSuccess(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof CloseSession_result) - return this.equals((CloseSession_result)that); - return false; - } - - public boolean equals(CloseSession_result that) { - if (that == null) - return false; - - boolean this_present_success = true && this.isSetSuccess(); - boolean that_present_success = true && that.isSetSuccess(); - if (this_present_success || that_present_success) { - if (!(this_present_success && that_present_success)) - return false; - if (!this.success.equals(that.success)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_success = true && (isSetSuccess()); - list.add(present_success); - if (present_success) - list.add(success); - - return list.hashCode(); - } - - @Override - public int compareTo(CloseSession_result other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSuccess()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("CloseSession_result("); - boolean first = true; - - sb.append("success:"); - if (this.success == null) { - sb.append("null"); - } else { - sb.append(this.success); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (success != null) { - success.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class CloseSession_resultStandardSchemeFactory implements SchemeFactory { - public CloseSession_resultStandardScheme getScheme() { - return new CloseSession_resultStandardScheme(); - } - } - - private static class CloseSession_resultStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, CloseSession_result struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new TCloseSessionResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, CloseSession_result struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.success != null) { - oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - struct.success.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class CloseSession_resultTupleSchemeFactory implements SchemeFactory { - public CloseSession_resultTupleScheme getScheme() { - return new CloseSession_resultTupleScheme(); - } - } - - private static class CloseSession_resultTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, CloseSession_result struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetSuccess()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetSuccess()) { - struct.success.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, CloseSession_result struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.success = new TCloseSessionResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } - } - } - - } - - public static class GetInfo_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetInfo_args"); - - private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new GetInfo_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new GetInfo_argsTupleSchemeFactory()); - } - - private TGetInfoReq req; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - REQ((short)1, "req"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // REQ - return REQ; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TGetInfoReq.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetInfo_args.class, metaDataMap); - } - - public GetInfo_args() { - } - - public GetInfo_args( - TGetInfoReq req) - { - this(); - this.req = req; - } - - /** - * Performs a deep copy on other. - */ - public GetInfo_args(GetInfo_args other) { - if (other.isSetReq()) { - this.req = new TGetInfoReq(other.req); - } - } - - public GetInfo_args deepCopy() { - return new GetInfo_args(this); - } - - @Override - public void clear() { - this.req = null; - } - - public TGetInfoReq getReq() { - return this.req; - } - - public void setReq(TGetInfoReq req) { - this.req = req; - } - - public void unsetReq() { - this.req = null; - } - - /** Returns true if field req is set (has been assigned a value) and false otherwise */ - public boolean isSetReq() { - return this.req != null; - } - - public void setReqIsSet(boolean value) { - if (!value) { - this.req = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case REQ: - if (value == null) { - unsetReq(); - } else { - setReq((TGetInfoReq)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case REQ: - return getReq(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case REQ: - return isSetReq(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof GetInfo_args) - return this.equals((GetInfo_args)that); - return false; - } - - public boolean equals(GetInfo_args that) { - if (that == null) - return false; - - boolean this_present_req = true && this.isSetReq(); - boolean that_present_req = true && that.isSetReq(); - if (this_present_req || that_present_req) { - if (!(this_present_req && that_present_req)) - return false; - if (!this.req.equals(that.req)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_req = true && (isSetReq()); - list.add(present_req); - if (present_req) - list.add(req); - - return list.hashCode(); - } - - @Override - public int compareTo(GetInfo_args other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetReq()).compareTo(other.isSetReq()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetReq()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.req, other.req); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("GetInfo_args("); - boolean first = true; - - sb.append("req:"); - if (this.req == null) { - sb.append("null"); - } else { - sb.append(this.req); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (req != null) { - req.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class GetInfo_argsStandardSchemeFactory implements SchemeFactory { - public GetInfo_argsStandardScheme getScheme() { - return new GetInfo_argsStandardScheme(); - } - } - - private static class GetInfo_argsStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, GetInfo_args struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // REQ - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.req = new TGetInfoReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, GetInfo_args struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.req != null) { - oprot.writeFieldBegin(REQ_FIELD_DESC); - struct.req.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class GetInfo_argsTupleSchemeFactory implements SchemeFactory { - public GetInfo_argsTupleScheme getScheme() { - return new GetInfo_argsTupleScheme(); - } - } - - private static class GetInfo_argsTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, GetInfo_args struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetReq()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetReq()) { - struct.req.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, GetInfo_args struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.req = new TGetInfoReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } - } - } - - } - - public static class GetInfo_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetInfo_result"); - - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new GetInfo_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new GetInfo_resultTupleSchemeFactory()); - } - - private TGetInfoResp success; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SUCCESS((short)0, "success"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 0: // SUCCESS - return SUCCESS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TGetInfoResp.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetInfo_result.class, metaDataMap); - } - - public GetInfo_result() { - } - - public GetInfo_result( - TGetInfoResp success) - { - this(); - this.success = success; - } - - /** - * Performs a deep copy on other. - */ - public GetInfo_result(GetInfo_result other) { - if (other.isSetSuccess()) { - this.success = new TGetInfoResp(other.success); - } - } - - public GetInfo_result deepCopy() { - return new GetInfo_result(this); - } - - @Override - public void clear() { - this.success = null; - } - - public TGetInfoResp getSuccess() { - return this.success; - } - - public void setSuccess(TGetInfoResp success) { - this.success = success; - } - - public void unsetSuccess() { - this.success = null; - } - - /** Returns true if field success is set (has been assigned a value) and false otherwise */ - public boolean isSetSuccess() { - return this.success != null; - } - - public void setSuccessIsSet(boolean value) { - if (!value) { - this.success = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case SUCCESS: - if (value == null) { - unsetSuccess(); - } else { - setSuccess((TGetInfoResp)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case SUCCESS: - return getSuccess(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case SUCCESS: - return isSetSuccess(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof GetInfo_result) - return this.equals((GetInfo_result)that); - return false; - } - - public boolean equals(GetInfo_result that) { - if (that == null) - return false; - - boolean this_present_success = true && this.isSetSuccess(); - boolean that_present_success = true && that.isSetSuccess(); - if (this_present_success || that_present_success) { - if (!(this_present_success && that_present_success)) - return false; - if (!this.success.equals(that.success)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_success = true && (isSetSuccess()); - list.add(present_success); - if (present_success) - list.add(success); - - return list.hashCode(); - } - - @Override - public int compareTo(GetInfo_result other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSuccess()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("GetInfo_result("); - boolean first = true; - - sb.append("success:"); - if (this.success == null) { - sb.append("null"); - } else { - sb.append(this.success); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (success != null) { - success.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class GetInfo_resultStandardSchemeFactory implements SchemeFactory { - public GetInfo_resultStandardScheme getScheme() { - return new GetInfo_resultStandardScheme(); - } - } - - private static class GetInfo_resultStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, GetInfo_result struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new TGetInfoResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, GetInfo_result struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.success != null) { - oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - struct.success.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class GetInfo_resultTupleSchemeFactory implements SchemeFactory { - public GetInfo_resultTupleScheme getScheme() { - return new GetInfo_resultTupleScheme(); - } - } - - private static class GetInfo_resultTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, GetInfo_result struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetSuccess()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetSuccess()) { - struct.success.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, GetInfo_result struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.success = new TGetInfoResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } - } - } - - } - - public static class ExecuteStatement_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ExecuteStatement_args"); - - private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new ExecuteStatement_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new ExecuteStatement_argsTupleSchemeFactory()); - } - - private TExecuteStatementReq req; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - REQ((short)1, "req"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // REQ - return REQ; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TExecuteStatementReq.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ExecuteStatement_args.class, metaDataMap); - } - - public ExecuteStatement_args() { - } - - public ExecuteStatement_args( - TExecuteStatementReq req) - { - this(); - this.req = req; - } - - /** - * Performs a deep copy on other. - */ - public ExecuteStatement_args(ExecuteStatement_args other) { - if (other.isSetReq()) { - this.req = new TExecuteStatementReq(other.req); - } - } - - public ExecuteStatement_args deepCopy() { - return new ExecuteStatement_args(this); - } - - @Override - public void clear() { - this.req = null; - } - - public TExecuteStatementReq getReq() { - return this.req; - } - - public void setReq(TExecuteStatementReq req) { - this.req = req; - } - - public void unsetReq() { - this.req = null; - } - - /** Returns true if field req is set (has been assigned a value) and false otherwise */ - public boolean isSetReq() { - return this.req != null; - } - - public void setReqIsSet(boolean value) { - if (!value) { - this.req = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case REQ: - if (value == null) { - unsetReq(); - } else { - setReq((TExecuteStatementReq)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case REQ: - return getReq(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case REQ: - return isSetReq(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof ExecuteStatement_args) - return this.equals((ExecuteStatement_args)that); - return false; - } - - public boolean equals(ExecuteStatement_args that) { - if (that == null) - return false; - - boolean this_present_req = true && this.isSetReq(); - boolean that_present_req = true && that.isSetReq(); - if (this_present_req || that_present_req) { - if (!(this_present_req && that_present_req)) - return false; - if (!this.req.equals(that.req)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_req = true && (isSetReq()); - list.add(present_req); - if (present_req) - list.add(req); - - return list.hashCode(); - } - - @Override - public int compareTo(ExecuteStatement_args other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetReq()).compareTo(other.isSetReq()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetReq()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.req, other.req); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("ExecuteStatement_args("); - boolean first = true; - - sb.append("req:"); - if (this.req == null) { - sb.append("null"); - } else { - sb.append(this.req); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (req != null) { - req.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class ExecuteStatement_argsStandardSchemeFactory implements SchemeFactory { - public ExecuteStatement_argsStandardScheme getScheme() { - return new ExecuteStatement_argsStandardScheme(); - } - } - - private static class ExecuteStatement_argsStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, ExecuteStatement_args struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // REQ - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.req = new TExecuteStatementReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, ExecuteStatement_args struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.req != null) { - oprot.writeFieldBegin(REQ_FIELD_DESC); - struct.req.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class ExecuteStatement_argsTupleSchemeFactory implements SchemeFactory { - public ExecuteStatement_argsTupleScheme getScheme() { - return new ExecuteStatement_argsTupleScheme(); - } - } - - private static class ExecuteStatement_argsTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, ExecuteStatement_args struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetReq()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetReq()) { - struct.req.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, ExecuteStatement_args struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.req = new TExecuteStatementReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } - } - } - - } - - public static class ExecuteStatement_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ExecuteStatement_result"); - - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new ExecuteStatement_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new ExecuteStatement_resultTupleSchemeFactory()); - } - - private TExecuteStatementResp success; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SUCCESS((short)0, "success"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 0: // SUCCESS - return SUCCESS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TExecuteStatementResp.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ExecuteStatement_result.class, metaDataMap); - } - - public ExecuteStatement_result() { - } - - public ExecuteStatement_result( - TExecuteStatementResp success) - { - this(); - this.success = success; - } - - /** - * Performs a deep copy on other. - */ - public ExecuteStatement_result(ExecuteStatement_result other) { - if (other.isSetSuccess()) { - this.success = new TExecuteStatementResp(other.success); - } - } - - public ExecuteStatement_result deepCopy() { - return new ExecuteStatement_result(this); - } - - @Override - public void clear() { - this.success = null; - } - - public TExecuteStatementResp getSuccess() { - return this.success; - } - - public void setSuccess(TExecuteStatementResp success) { - this.success = success; - } - - public void unsetSuccess() { - this.success = null; - } - - /** Returns true if field success is set (has been assigned a value) and false otherwise */ - public boolean isSetSuccess() { - return this.success != null; - } - - public void setSuccessIsSet(boolean value) { - if (!value) { - this.success = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case SUCCESS: - if (value == null) { - unsetSuccess(); - } else { - setSuccess((TExecuteStatementResp)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case SUCCESS: - return getSuccess(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case SUCCESS: - return isSetSuccess(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof ExecuteStatement_result) - return this.equals((ExecuteStatement_result)that); - return false; - } - - public boolean equals(ExecuteStatement_result that) { - if (that == null) - return false; - - boolean this_present_success = true && this.isSetSuccess(); - boolean that_present_success = true && that.isSetSuccess(); - if (this_present_success || that_present_success) { - if (!(this_present_success && that_present_success)) - return false; - if (!this.success.equals(that.success)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_success = true && (isSetSuccess()); - list.add(present_success); - if (present_success) - list.add(success); - - return list.hashCode(); - } - - @Override - public int compareTo(ExecuteStatement_result other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSuccess()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("ExecuteStatement_result("); - boolean first = true; - - sb.append("success:"); - if (this.success == null) { - sb.append("null"); - } else { - sb.append(this.success); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (success != null) { - success.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class ExecuteStatement_resultStandardSchemeFactory implements SchemeFactory { - public ExecuteStatement_resultStandardScheme getScheme() { - return new ExecuteStatement_resultStandardScheme(); - } - } - - private static class ExecuteStatement_resultStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, ExecuteStatement_result struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new TExecuteStatementResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, ExecuteStatement_result struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.success != null) { - oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - struct.success.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class ExecuteStatement_resultTupleSchemeFactory implements SchemeFactory { - public ExecuteStatement_resultTupleScheme getScheme() { - return new ExecuteStatement_resultTupleScheme(); - } - } - - private static class ExecuteStatement_resultTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, ExecuteStatement_result struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetSuccess()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetSuccess()) { - struct.success.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, ExecuteStatement_result struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.success = new TExecuteStatementResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } - } - } - - } - - public static class GetTypeInfo_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetTypeInfo_args"); - - private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new GetTypeInfo_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new GetTypeInfo_argsTupleSchemeFactory()); - } - - private TGetTypeInfoReq req; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - REQ((short)1, "req"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // REQ - return REQ; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TGetTypeInfoReq.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetTypeInfo_args.class, metaDataMap); - } - - public GetTypeInfo_args() { - } - - public GetTypeInfo_args( - TGetTypeInfoReq req) - { - this(); - this.req = req; - } - - /** - * Performs a deep copy on other. - */ - public GetTypeInfo_args(GetTypeInfo_args other) { - if (other.isSetReq()) { - this.req = new TGetTypeInfoReq(other.req); - } - } - - public GetTypeInfo_args deepCopy() { - return new GetTypeInfo_args(this); - } - - @Override - public void clear() { - this.req = null; - } - - public TGetTypeInfoReq getReq() { - return this.req; - } - - public void setReq(TGetTypeInfoReq req) { - this.req = req; - } - - public void unsetReq() { - this.req = null; - } - - /** Returns true if field req is set (has been assigned a value) and false otherwise */ - public boolean isSetReq() { - return this.req != null; - } - - public void setReqIsSet(boolean value) { - if (!value) { - this.req = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case REQ: - if (value == null) { - unsetReq(); - } else { - setReq((TGetTypeInfoReq)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case REQ: - return getReq(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case REQ: - return isSetReq(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof GetTypeInfo_args) - return this.equals((GetTypeInfo_args)that); - return false; - } - - public boolean equals(GetTypeInfo_args that) { - if (that == null) - return false; - - boolean this_present_req = true && this.isSetReq(); - boolean that_present_req = true && that.isSetReq(); - if (this_present_req || that_present_req) { - if (!(this_present_req && that_present_req)) - return false; - if (!this.req.equals(that.req)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_req = true && (isSetReq()); - list.add(present_req); - if (present_req) - list.add(req); - - return list.hashCode(); - } - - @Override - public int compareTo(GetTypeInfo_args other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetReq()).compareTo(other.isSetReq()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetReq()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.req, other.req); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("GetTypeInfo_args("); - boolean first = true; - - sb.append("req:"); - if (this.req == null) { - sb.append("null"); - } else { - sb.append(this.req); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (req != null) { - req.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class GetTypeInfo_argsStandardSchemeFactory implements SchemeFactory { - public GetTypeInfo_argsStandardScheme getScheme() { - return new GetTypeInfo_argsStandardScheme(); - } - } - - private static class GetTypeInfo_argsStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, GetTypeInfo_args struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // REQ - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.req = new TGetTypeInfoReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, GetTypeInfo_args struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.req != null) { - oprot.writeFieldBegin(REQ_FIELD_DESC); - struct.req.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class GetTypeInfo_argsTupleSchemeFactory implements SchemeFactory { - public GetTypeInfo_argsTupleScheme getScheme() { - return new GetTypeInfo_argsTupleScheme(); - } - } - - private static class GetTypeInfo_argsTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, GetTypeInfo_args struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetReq()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetReq()) { - struct.req.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, GetTypeInfo_args struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.req = new TGetTypeInfoReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } - } - } - - } - - public static class GetTypeInfo_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetTypeInfo_result"); - - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new GetTypeInfo_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new GetTypeInfo_resultTupleSchemeFactory()); - } - - private TGetTypeInfoResp success; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SUCCESS((short)0, "success"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 0: // SUCCESS - return SUCCESS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TGetTypeInfoResp.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetTypeInfo_result.class, metaDataMap); - } - - public GetTypeInfo_result() { - } - - public GetTypeInfo_result( - TGetTypeInfoResp success) - { - this(); - this.success = success; - } - - /** - * Performs a deep copy on other. - */ - public GetTypeInfo_result(GetTypeInfo_result other) { - if (other.isSetSuccess()) { - this.success = new TGetTypeInfoResp(other.success); - } - } - - public GetTypeInfo_result deepCopy() { - return new GetTypeInfo_result(this); - } - - @Override - public void clear() { - this.success = null; - } - - public TGetTypeInfoResp getSuccess() { - return this.success; - } - - public void setSuccess(TGetTypeInfoResp success) { - this.success = success; - } - - public void unsetSuccess() { - this.success = null; - } - - /** Returns true if field success is set (has been assigned a value) and false otherwise */ - public boolean isSetSuccess() { - return this.success != null; - } - - public void setSuccessIsSet(boolean value) { - if (!value) { - this.success = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case SUCCESS: - if (value == null) { - unsetSuccess(); - } else { - setSuccess((TGetTypeInfoResp)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case SUCCESS: - return getSuccess(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case SUCCESS: - return isSetSuccess(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof GetTypeInfo_result) - return this.equals((GetTypeInfo_result)that); - return false; - } - - public boolean equals(GetTypeInfo_result that) { - if (that == null) - return false; - - boolean this_present_success = true && this.isSetSuccess(); - boolean that_present_success = true && that.isSetSuccess(); - if (this_present_success || that_present_success) { - if (!(this_present_success && that_present_success)) - return false; - if (!this.success.equals(that.success)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_success = true && (isSetSuccess()); - list.add(present_success); - if (present_success) - list.add(success); - - return list.hashCode(); - } - - @Override - public int compareTo(GetTypeInfo_result other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSuccess()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("GetTypeInfo_result("); - boolean first = true; - - sb.append("success:"); - if (this.success == null) { - sb.append("null"); - } else { - sb.append(this.success); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (success != null) { - success.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class GetTypeInfo_resultStandardSchemeFactory implements SchemeFactory { - public GetTypeInfo_resultStandardScheme getScheme() { - return new GetTypeInfo_resultStandardScheme(); - } - } - - private static class GetTypeInfo_resultStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, GetTypeInfo_result struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new TGetTypeInfoResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, GetTypeInfo_result struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.success != null) { - oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - struct.success.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class GetTypeInfo_resultTupleSchemeFactory implements SchemeFactory { - public GetTypeInfo_resultTupleScheme getScheme() { - return new GetTypeInfo_resultTupleScheme(); - } - } - - private static class GetTypeInfo_resultTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, GetTypeInfo_result struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetSuccess()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetSuccess()) { - struct.success.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, GetTypeInfo_result struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.success = new TGetTypeInfoResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } - } - } - - } - - public static class GetCatalogs_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetCatalogs_args"); - - private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new GetCatalogs_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new GetCatalogs_argsTupleSchemeFactory()); - } - - private TGetCatalogsReq req; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - REQ((short)1, "req"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // REQ - return REQ; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TGetCatalogsReq.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetCatalogs_args.class, metaDataMap); - } - - public GetCatalogs_args() { - } - - public GetCatalogs_args( - TGetCatalogsReq req) - { - this(); - this.req = req; - } - - /** - * Performs a deep copy on other. - */ - public GetCatalogs_args(GetCatalogs_args other) { - if (other.isSetReq()) { - this.req = new TGetCatalogsReq(other.req); - } - } - - public GetCatalogs_args deepCopy() { - return new GetCatalogs_args(this); - } - - @Override - public void clear() { - this.req = null; - } - - public TGetCatalogsReq getReq() { - return this.req; - } - - public void setReq(TGetCatalogsReq req) { - this.req = req; - } - - public void unsetReq() { - this.req = null; - } - - /** Returns true if field req is set (has been assigned a value) and false otherwise */ - public boolean isSetReq() { - return this.req != null; - } - - public void setReqIsSet(boolean value) { - if (!value) { - this.req = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case REQ: - if (value == null) { - unsetReq(); - } else { - setReq((TGetCatalogsReq)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case REQ: - return getReq(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case REQ: - return isSetReq(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof GetCatalogs_args) - return this.equals((GetCatalogs_args)that); - return false; - } - - public boolean equals(GetCatalogs_args that) { - if (that == null) - return false; - - boolean this_present_req = true && this.isSetReq(); - boolean that_present_req = true && that.isSetReq(); - if (this_present_req || that_present_req) { - if (!(this_present_req && that_present_req)) - return false; - if (!this.req.equals(that.req)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_req = true && (isSetReq()); - list.add(present_req); - if (present_req) - list.add(req); - - return list.hashCode(); - } - - @Override - public int compareTo(GetCatalogs_args other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetReq()).compareTo(other.isSetReq()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetReq()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.req, other.req); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("GetCatalogs_args("); - boolean first = true; - - sb.append("req:"); - if (this.req == null) { - sb.append("null"); - } else { - sb.append(this.req); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (req != null) { - req.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class GetCatalogs_argsStandardSchemeFactory implements SchemeFactory { - public GetCatalogs_argsStandardScheme getScheme() { - return new GetCatalogs_argsStandardScheme(); - } - } - - private static class GetCatalogs_argsStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, GetCatalogs_args struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // REQ - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.req = new TGetCatalogsReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, GetCatalogs_args struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.req != null) { - oprot.writeFieldBegin(REQ_FIELD_DESC); - struct.req.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class GetCatalogs_argsTupleSchemeFactory implements SchemeFactory { - public GetCatalogs_argsTupleScheme getScheme() { - return new GetCatalogs_argsTupleScheme(); - } - } - - private static class GetCatalogs_argsTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, GetCatalogs_args struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetReq()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetReq()) { - struct.req.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, GetCatalogs_args struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.req = new TGetCatalogsReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } - } - } - - } - - public static class GetCatalogs_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetCatalogs_result"); - - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new GetCatalogs_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new GetCatalogs_resultTupleSchemeFactory()); - } - - private TGetCatalogsResp success; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SUCCESS((short)0, "success"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 0: // SUCCESS - return SUCCESS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TGetCatalogsResp.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetCatalogs_result.class, metaDataMap); - } - - public GetCatalogs_result() { - } - - public GetCatalogs_result( - TGetCatalogsResp success) - { - this(); - this.success = success; - } - - /** - * Performs a deep copy on other. - */ - public GetCatalogs_result(GetCatalogs_result other) { - if (other.isSetSuccess()) { - this.success = new TGetCatalogsResp(other.success); - } - } - - public GetCatalogs_result deepCopy() { - return new GetCatalogs_result(this); - } - - @Override - public void clear() { - this.success = null; - } - - public TGetCatalogsResp getSuccess() { - return this.success; - } - - public void setSuccess(TGetCatalogsResp success) { - this.success = success; - } - - public void unsetSuccess() { - this.success = null; - } - - /** Returns true if field success is set (has been assigned a value) and false otherwise */ - public boolean isSetSuccess() { - return this.success != null; - } - - public void setSuccessIsSet(boolean value) { - if (!value) { - this.success = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case SUCCESS: - if (value == null) { - unsetSuccess(); - } else { - setSuccess((TGetCatalogsResp)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case SUCCESS: - return getSuccess(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case SUCCESS: - return isSetSuccess(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof GetCatalogs_result) - return this.equals((GetCatalogs_result)that); - return false; - } - - public boolean equals(GetCatalogs_result that) { - if (that == null) - return false; - - boolean this_present_success = true && this.isSetSuccess(); - boolean that_present_success = true && that.isSetSuccess(); - if (this_present_success || that_present_success) { - if (!(this_present_success && that_present_success)) - return false; - if (!this.success.equals(that.success)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_success = true && (isSetSuccess()); - list.add(present_success); - if (present_success) - list.add(success); - - return list.hashCode(); - } - - @Override - public int compareTo(GetCatalogs_result other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSuccess()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("GetCatalogs_result("); - boolean first = true; - - sb.append("success:"); - if (this.success == null) { - sb.append("null"); - } else { - sb.append(this.success); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (success != null) { - success.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class GetCatalogs_resultStandardSchemeFactory implements SchemeFactory { - public GetCatalogs_resultStandardScheme getScheme() { - return new GetCatalogs_resultStandardScheme(); - } - } - - private static class GetCatalogs_resultStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, GetCatalogs_result struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new TGetCatalogsResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, GetCatalogs_result struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.success != null) { - oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - struct.success.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class GetCatalogs_resultTupleSchemeFactory implements SchemeFactory { - public GetCatalogs_resultTupleScheme getScheme() { - return new GetCatalogs_resultTupleScheme(); - } - } - - private static class GetCatalogs_resultTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, GetCatalogs_result struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetSuccess()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetSuccess()) { - struct.success.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, GetCatalogs_result struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.success = new TGetCatalogsResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } - } - } - - } - - public static class GetSchemas_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetSchemas_args"); - - private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new GetSchemas_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new GetSchemas_argsTupleSchemeFactory()); - } - - private TGetSchemasReq req; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - REQ((short)1, "req"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // REQ - return REQ; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TGetSchemasReq.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetSchemas_args.class, metaDataMap); - } - - public GetSchemas_args() { - } - - public GetSchemas_args( - TGetSchemasReq req) - { - this(); - this.req = req; - } - - /** - * Performs a deep copy on other. - */ - public GetSchemas_args(GetSchemas_args other) { - if (other.isSetReq()) { - this.req = new TGetSchemasReq(other.req); - } - } - - public GetSchemas_args deepCopy() { - return new GetSchemas_args(this); - } - - @Override - public void clear() { - this.req = null; - } - - public TGetSchemasReq getReq() { - return this.req; - } - - public void setReq(TGetSchemasReq req) { - this.req = req; - } - - public void unsetReq() { - this.req = null; - } - - /** Returns true if field req is set (has been assigned a value) and false otherwise */ - public boolean isSetReq() { - return this.req != null; - } - - public void setReqIsSet(boolean value) { - if (!value) { - this.req = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case REQ: - if (value == null) { - unsetReq(); - } else { - setReq((TGetSchemasReq)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case REQ: - return getReq(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case REQ: - return isSetReq(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof GetSchemas_args) - return this.equals((GetSchemas_args)that); - return false; - } - - public boolean equals(GetSchemas_args that) { - if (that == null) - return false; - - boolean this_present_req = true && this.isSetReq(); - boolean that_present_req = true && that.isSetReq(); - if (this_present_req || that_present_req) { - if (!(this_present_req && that_present_req)) - return false; - if (!this.req.equals(that.req)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_req = true && (isSetReq()); - list.add(present_req); - if (present_req) - list.add(req); - - return list.hashCode(); - } - - @Override - public int compareTo(GetSchemas_args other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetReq()).compareTo(other.isSetReq()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetReq()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.req, other.req); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("GetSchemas_args("); - boolean first = true; - - sb.append("req:"); - if (this.req == null) { - sb.append("null"); - } else { - sb.append(this.req); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (req != null) { - req.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class GetSchemas_argsStandardSchemeFactory implements SchemeFactory { - public GetSchemas_argsStandardScheme getScheme() { - return new GetSchemas_argsStandardScheme(); - } - } - - private static class GetSchemas_argsStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, GetSchemas_args struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // REQ - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.req = new TGetSchemasReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, GetSchemas_args struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.req != null) { - oprot.writeFieldBegin(REQ_FIELD_DESC); - struct.req.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class GetSchemas_argsTupleSchemeFactory implements SchemeFactory { - public GetSchemas_argsTupleScheme getScheme() { - return new GetSchemas_argsTupleScheme(); - } - } - - private static class GetSchemas_argsTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, GetSchemas_args struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetReq()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetReq()) { - struct.req.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, GetSchemas_args struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.req = new TGetSchemasReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } - } - } - - } - - public static class GetSchemas_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetSchemas_result"); - - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new GetSchemas_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new GetSchemas_resultTupleSchemeFactory()); - } - - private TGetSchemasResp success; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SUCCESS((short)0, "success"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 0: // SUCCESS - return SUCCESS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TGetSchemasResp.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetSchemas_result.class, metaDataMap); - } - - public GetSchemas_result() { - } - - public GetSchemas_result( - TGetSchemasResp success) - { - this(); - this.success = success; - } - - /** - * Performs a deep copy on other. - */ - public GetSchemas_result(GetSchemas_result other) { - if (other.isSetSuccess()) { - this.success = new TGetSchemasResp(other.success); - } - } - - public GetSchemas_result deepCopy() { - return new GetSchemas_result(this); - } - - @Override - public void clear() { - this.success = null; - } - - public TGetSchemasResp getSuccess() { - return this.success; - } - - public void setSuccess(TGetSchemasResp success) { - this.success = success; - } - - public void unsetSuccess() { - this.success = null; - } - - /** Returns true if field success is set (has been assigned a value) and false otherwise */ - public boolean isSetSuccess() { - return this.success != null; - } - - public void setSuccessIsSet(boolean value) { - if (!value) { - this.success = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case SUCCESS: - if (value == null) { - unsetSuccess(); - } else { - setSuccess((TGetSchemasResp)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case SUCCESS: - return getSuccess(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case SUCCESS: - return isSetSuccess(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof GetSchemas_result) - return this.equals((GetSchemas_result)that); - return false; - } - - public boolean equals(GetSchemas_result that) { - if (that == null) - return false; - - boolean this_present_success = true && this.isSetSuccess(); - boolean that_present_success = true && that.isSetSuccess(); - if (this_present_success || that_present_success) { - if (!(this_present_success && that_present_success)) - return false; - if (!this.success.equals(that.success)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_success = true && (isSetSuccess()); - list.add(present_success); - if (present_success) - list.add(success); - - return list.hashCode(); - } - - @Override - public int compareTo(GetSchemas_result other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSuccess()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("GetSchemas_result("); - boolean first = true; - - sb.append("success:"); - if (this.success == null) { - sb.append("null"); - } else { - sb.append(this.success); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (success != null) { - success.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class GetSchemas_resultStandardSchemeFactory implements SchemeFactory { - public GetSchemas_resultStandardScheme getScheme() { - return new GetSchemas_resultStandardScheme(); - } - } - - private static class GetSchemas_resultStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, GetSchemas_result struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new TGetSchemasResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, GetSchemas_result struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.success != null) { - oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - struct.success.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class GetSchemas_resultTupleSchemeFactory implements SchemeFactory { - public GetSchemas_resultTupleScheme getScheme() { - return new GetSchemas_resultTupleScheme(); - } - } - - private static class GetSchemas_resultTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, GetSchemas_result struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetSuccess()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetSuccess()) { - struct.success.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, GetSchemas_result struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.success = new TGetSchemasResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } - } - } - - } - - public static class GetTables_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetTables_args"); - - private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new GetTables_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new GetTables_argsTupleSchemeFactory()); - } - - private TGetTablesReq req; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - REQ((short)1, "req"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // REQ - return REQ; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TGetTablesReq.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetTables_args.class, metaDataMap); - } - - public GetTables_args() { - } - - public GetTables_args( - TGetTablesReq req) - { - this(); - this.req = req; - } - - /** - * Performs a deep copy on other. - */ - public GetTables_args(GetTables_args other) { - if (other.isSetReq()) { - this.req = new TGetTablesReq(other.req); - } - } - - public GetTables_args deepCopy() { - return new GetTables_args(this); - } - - @Override - public void clear() { - this.req = null; - } - - public TGetTablesReq getReq() { - return this.req; - } - - public void setReq(TGetTablesReq req) { - this.req = req; - } - - public void unsetReq() { - this.req = null; - } - - /** Returns true if field req is set (has been assigned a value) and false otherwise */ - public boolean isSetReq() { - return this.req != null; - } - - public void setReqIsSet(boolean value) { - if (!value) { - this.req = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case REQ: - if (value == null) { - unsetReq(); - } else { - setReq((TGetTablesReq)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case REQ: - return getReq(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case REQ: - return isSetReq(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof GetTables_args) - return this.equals((GetTables_args)that); - return false; - } - - public boolean equals(GetTables_args that) { - if (that == null) - return false; - - boolean this_present_req = true && this.isSetReq(); - boolean that_present_req = true && that.isSetReq(); - if (this_present_req || that_present_req) { - if (!(this_present_req && that_present_req)) - return false; - if (!this.req.equals(that.req)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_req = true && (isSetReq()); - list.add(present_req); - if (present_req) - list.add(req); - - return list.hashCode(); - } - - @Override - public int compareTo(GetTables_args other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetReq()).compareTo(other.isSetReq()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetReq()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.req, other.req); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("GetTables_args("); - boolean first = true; - - sb.append("req:"); - if (this.req == null) { - sb.append("null"); - } else { - sb.append(this.req); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (req != null) { - req.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class GetTables_argsStandardSchemeFactory implements SchemeFactory { - public GetTables_argsStandardScheme getScheme() { - return new GetTables_argsStandardScheme(); - } - } - - private static class GetTables_argsStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, GetTables_args struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // REQ - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.req = new TGetTablesReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, GetTables_args struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.req != null) { - oprot.writeFieldBegin(REQ_FIELD_DESC); - struct.req.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class GetTables_argsTupleSchemeFactory implements SchemeFactory { - public GetTables_argsTupleScheme getScheme() { - return new GetTables_argsTupleScheme(); - } - } - - private static class GetTables_argsTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, GetTables_args struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetReq()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetReq()) { - struct.req.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, GetTables_args struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.req = new TGetTablesReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } - } - } - - } - - public static class GetTables_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetTables_result"); - - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new GetTables_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new GetTables_resultTupleSchemeFactory()); - } - - private TGetTablesResp success; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SUCCESS((short)0, "success"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 0: // SUCCESS - return SUCCESS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TGetTablesResp.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetTables_result.class, metaDataMap); - } - - public GetTables_result() { - } - - public GetTables_result( - TGetTablesResp success) - { - this(); - this.success = success; - } - - /** - * Performs a deep copy on other. - */ - public GetTables_result(GetTables_result other) { - if (other.isSetSuccess()) { - this.success = new TGetTablesResp(other.success); - } - } - - public GetTables_result deepCopy() { - return new GetTables_result(this); - } - - @Override - public void clear() { - this.success = null; - } - - public TGetTablesResp getSuccess() { - return this.success; - } - - public void setSuccess(TGetTablesResp success) { - this.success = success; - } - - public void unsetSuccess() { - this.success = null; - } - - /** Returns true if field success is set (has been assigned a value) and false otherwise */ - public boolean isSetSuccess() { - return this.success != null; - } - - public void setSuccessIsSet(boolean value) { - if (!value) { - this.success = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case SUCCESS: - if (value == null) { - unsetSuccess(); - } else { - setSuccess((TGetTablesResp)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case SUCCESS: - return getSuccess(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case SUCCESS: - return isSetSuccess(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof GetTables_result) - return this.equals((GetTables_result)that); - return false; - } - - public boolean equals(GetTables_result that) { - if (that == null) - return false; - - boolean this_present_success = true && this.isSetSuccess(); - boolean that_present_success = true && that.isSetSuccess(); - if (this_present_success || that_present_success) { - if (!(this_present_success && that_present_success)) - return false; - if (!this.success.equals(that.success)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_success = true && (isSetSuccess()); - list.add(present_success); - if (present_success) - list.add(success); - - return list.hashCode(); - } - - @Override - public int compareTo(GetTables_result other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSuccess()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("GetTables_result("); - boolean first = true; - - sb.append("success:"); - if (this.success == null) { - sb.append("null"); - } else { - sb.append(this.success); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (success != null) { - success.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class GetTables_resultStandardSchemeFactory implements SchemeFactory { - public GetTables_resultStandardScheme getScheme() { - return new GetTables_resultStandardScheme(); - } - } - - private static class GetTables_resultStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, GetTables_result struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new TGetTablesResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, GetTables_result struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.success != null) { - oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - struct.success.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class GetTables_resultTupleSchemeFactory implements SchemeFactory { - public GetTables_resultTupleScheme getScheme() { - return new GetTables_resultTupleScheme(); - } - } - - private static class GetTables_resultTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, GetTables_result struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetSuccess()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetSuccess()) { - struct.success.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, GetTables_result struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.success = new TGetTablesResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } - } - } - - } - - public static class GetTableTypes_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetTableTypes_args"); - - private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new GetTableTypes_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new GetTableTypes_argsTupleSchemeFactory()); - } - - private TGetTableTypesReq req; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - REQ((short)1, "req"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // REQ - return REQ; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TGetTableTypesReq.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetTableTypes_args.class, metaDataMap); - } - - public GetTableTypes_args() { - } - - public GetTableTypes_args( - TGetTableTypesReq req) - { - this(); - this.req = req; - } - - /** - * Performs a deep copy on other. - */ - public GetTableTypes_args(GetTableTypes_args other) { - if (other.isSetReq()) { - this.req = new TGetTableTypesReq(other.req); - } - } - - public GetTableTypes_args deepCopy() { - return new GetTableTypes_args(this); - } - - @Override - public void clear() { - this.req = null; - } - - public TGetTableTypesReq getReq() { - return this.req; - } - - public void setReq(TGetTableTypesReq req) { - this.req = req; - } - - public void unsetReq() { - this.req = null; - } - - /** Returns true if field req is set (has been assigned a value) and false otherwise */ - public boolean isSetReq() { - return this.req != null; - } - - public void setReqIsSet(boolean value) { - if (!value) { - this.req = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case REQ: - if (value == null) { - unsetReq(); - } else { - setReq((TGetTableTypesReq)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case REQ: - return getReq(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case REQ: - return isSetReq(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof GetTableTypes_args) - return this.equals((GetTableTypes_args)that); - return false; - } - - public boolean equals(GetTableTypes_args that) { - if (that == null) - return false; - - boolean this_present_req = true && this.isSetReq(); - boolean that_present_req = true && that.isSetReq(); - if (this_present_req || that_present_req) { - if (!(this_present_req && that_present_req)) - return false; - if (!this.req.equals(that.req)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_req = true && (isSetReq()); - list.add(present_req); - if (present_req) - list.add(req); - - return list.hashCode(); - } - - @Override - public int compareTo(GetTableTypes_args other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetReq()).compareTo(other.isSetReq()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetReq()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.req, other.req); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("GetTableTypes_args("); - boolean first = true; - - sb.append("req:"); - if (this.req == null) { - sb.append("null"); - } else { - sb.append(this.req); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (req != null) { - req.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class GetTableTypes_argsStandardSchemeFactory implements SchemeFactory { - public GetTableTypes_argsStandardScheme getScheme() { - return new GetTableTypes_argsStandardScheme(); - } - } - - private static class GetTableTypes_argsStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, GetTableTypes_args struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // REQ - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.req = new TGetTableTypesReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, GetTableTypes_args struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.req != null) { - oprot.writeFieldBegin(REQ_FIELD_DESC); - struct.req.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class GetTableTypes_argsTupleSchemeFactory implements SchemeFactory { - public GetTableTypes_argsTupleScheme getScheme() { - return new GetTableTypes_argsTupleScheme(); - } - } - - private static class GetTableTypes_argsTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, GetTableTypes_args struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetReq()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetReq()) { - struct.req.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, GetTableTypes_args struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.req = new TGetTableTypesReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } - } - } - - } - - public static class GetTableTypes_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetTableTypes_result"); - - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new GetTableTypes_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new GetTableTypes_resultTupleSchemeFactory()); - } - - private TGetTableTypesResp success; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SUCCESS((short)0, "success"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 0: // SUCCESS - return SUCCESS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TGetTableTypesResp.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetTableTypes_result.class, metaDataMap); - } - - public GetTableTypes_result() { - } - - public GetTableTypes_result( - TGetTableTypesResp success) - { - this(); - this.success = success; - } - - /** - * Performs a deep copy on other. - */ - public GetTableTypes_result(GetTableTypes_result other) { - if (other.isSetSuccess()) { - this.success = new TGetTableTypesResp(other.success); - } - } - - public GetTableTypes_result deepCopy() { - return new GetTableTypes_result(this); - } - - @Override - public void clear() { - this.success = null; - } - - public TGetTableTypesResp getSuccess() { - return this.success; - } - - public void setSuccess(TGetTableTypesResp success) { - this.success = success; - } - - public void unsetSuccess() { - this.success = null; - } - - /** Returns true if field success is set (has been assigned a value) and false otherwise */ - public boolean isSetSuccess() { - return this.success != null; - } - - public void setSuccessIsSet(boolean value) { - if (!value) { - this.success = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case SUCCESS: - if (value == null) { - unsetSuccess(); - } else { - setSuccess((TGetTableTypesResp)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case SUCCESS: - return getSuccess(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case SUCCESS: - return isSetSuccess(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof GetTableTypes_result) - return this.equals((GetTableTypes_result)that); - return false; - } - - public boolean equals(GetTableTypes_result that) { - if (that == null) - return false; - - boolean this_present_success = true && this.isSetSuccess(); - boolean that_present_success = true && that.isSetSuccess(); - if (this_present_success || that_present_success) { - if (!(this_present_success && that_present_success)) - return false; - if (!this.success.equals(that.success)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_success = true && (isSetSuccess()); - list.add(present_success); - if (present_success) - list.add(success); - - return list.hashCode(); - } - - @Override - public int compareTo(GetTableTypes_result other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSuccess()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("GetTableTypes_result("); - boolean first = true; - - sb.append("success:"); - if (this.success == null) { - sb.append("null"); - } else { - sb.append(this.success); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (success != null) { - success.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class GetTableTypes_resultStandardSchemeFactory implements SchemeFactory { - public GetTableTypes_resultStandardScheme getScheme() { - return new GetTableTypes_resultStandardScheme(); - } - } - - private static class GetTableTypes_resultStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, GetTableTypes_result struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new TGetTableTypesResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, GetTableTypes_result struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.success != null) { - oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - struct.success.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class GetTableTypes_resultTupleSchemeFactory implements SchemeFactory { - public GetTableTypes_resultTupleScheme getScheme() { - return new GetTableTypes_resultTupleScheme(); - } - } - - private static class GetTableTypes_resultTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, GetTableTypes_result struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetSuccess()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetSuccess()) { - struct.success.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, GetTableTypes_result struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.success = new TGetTableTypesResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } - } - } - - } - - public static class GetColumns_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetColumns_args"); - - private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new GetColumns_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new GetColumns_argsTupleSchemeFactory()); - } - - private TGetColumnsReq req; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - REQ((short)1, "req"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // REQ - return REQ; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TGetColumnsReq.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetColumns_args.class, metaDataMap); - } - - public GetColumns_args() { - } - - public GetColumns_args( - TGetColumnsReq req) - { - this(); - this.req = req; - } - - /** - * Performs a deep copy on other. - */ - public GetColumns_args(GetColumns_args other) { - if (other.isSetReq()) { - this.req = new TGetColumnsReq(other.req); - } - } - - public GetColumns_args deepCopy() { - return new GetColumns_args(this); - } - - @Override - public void clear() { - this.req = null; - } - - public TGetColumnsReq getReq() { - return this.req; - } - - public void setReq(TGetColumnsReq req) { - this.req = req; - } - - public void unsetReq() { - this.req = null; - } - - /** Returns true if field req is set (has been assigned a value) and false otherwise */ - public boolean isSetReq() { - return this.req != null; - } - - public void setReqIsSet(boolean value) { - if (!value) { - this.req = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case REQ: - if (value == null) { - unsetReq(); - } else { - setReq((TGetColumnsReq)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case REQ: - return getReq(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case REQ: - return isSetReq(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof GetColumns_args) - return this.equals((GetColumns_args)that); - return false; - } - - public boolean equals(GetColumns_args that) { - if (that == null) - return false; - - boolean this_present_req = true && this.isSetReq(); - boolean that_present_req = true && that.isSetReq(); - if (this_present_req || that_present_req) { - if (!(this_present_req && that_present_req)) - return false; - if (!this.req.equals(that.req)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_req = true && (isSetReq()); - list.add(present_req); - if (present_req) - list.add(req); - - return list.hashCode(); - } - - @Override - public int compareTo(GetColumns_args other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetReq()).compareTo(other.isSetReq()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetReq()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.req, other.req); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("GetColumns_args("); - boolean first = true; - - sb.append("req:"); - if (this.req == null) { - sb.append("null"); - } else { - sb.append(this.req); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (req != null) { - req.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class GetColumns_argsStandardSchemeFactory implements SchemeFactory { - public GetColumns_argsStandardScheme getScheme() { - return new GetColumns_argsStandardScheme(); - } - } - - private static class GetColumns_argsStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, GetColumns_args struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // REQ - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.req = new TGetColumnsReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, GetColumns_args struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.req != null) { - oprot.writeFieldBegin(REQ_FIELD_DESC); - struct.req.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class GetColumns_argsTupleSchemeFactory implements SchemeFactory { - public GetColumns_argsTupleScheme getScheme() { - return new GetColumns_argsTupleScheme(); - } - } - - private static class GetColumns_argsTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, GetColumns_args struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetReq()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetReq()) { - struct.req.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, GetColumns_args struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.req = new TGetColumnsReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } - } - } - - } - - public static class GetColumns_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetColumns_result"); - - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new GetColumns_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new GetColumns_resultTupleSchemeFactory()); - } - - private TGetColumnsResp success; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SUCCESS((short)0, "success"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 0: // SUCCESS - return SUCCESS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TGetColumnsResp.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetColumns_result.class, metaDataMap); - } - - public GetColumns_result() { - } - - public GetColumns_result( - TGetColumnsResp success) - { - this(); - this.success = success; - } - - /** - * Performs a deep copy on other. - */ - public GetColumns_result(GetColumns_result other) { - if (other.isSetSuccess()) { - this.success = new TGetColumnsResp(other.success); - } - } - - public GetColumns_result deepCopy() { - return new GetColumns_result(this); - } - - @Override - public void clear() { - this.success = null; - } - - public TGetColumnsResp getSuccess() { - return this.success; - } - - public void setSuccess(TGetColumnsResp success) { - this.success = success; - } - - public void unsetSuccess() { - this.success = null; - } - - /** Returns true if field success is set (has been assigned a value) and false otherwise */ - public boolean isSetSuccess() { - return this.success != null; - } - - public void setSuccessIsSet(boolean value) { - if (!value) { - this.success = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case SUCCESS: - if (value == null) { - unsetSuccess(); - } else { - setSuccess((TGetColumnsResp)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case SUCCESS: - return getSuccess(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case SUCCESS: - return isSetSuccess(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof GetColumns_result) - return this.equals((GetColumns_result)that); - return false; - } - - public boolean equals(GetColumns_result that) { - if (that == null) - return false; - - boolean this_present_success = true && this.isSetSuccess(); - boolean that_present_success = true && that.isSetSuccess(); - if (this_present_success || that_present_success) { - if (!(this_present_success && that_present_success)) - return false; - if (!this.success.equals(that.success)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_success = true && (isSetSuccess()); - list.add(present_success); - if (present_success) - list.add(success); - - return list.hashCode(); - } - - @Override - public int compareTo(GetColumns_result other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSuccess()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("GetColumns_result("); - boolean first = true; - - sb.append("success:"); - if (this.success == null) { - sb.append("null"); - } else { - sb.append(this.success); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (success != null) { - success.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class GetColumns_resultStandardSchemeFactory implements SchemeFactory { - public GetColumns_resultStandardScheme getScheme() { - return new GetColumns_resultStandardScheme(); - } - } - - private static class GetColumns_resultStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, GetColumns_result struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new TGetColumnsResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, GetColumns_result struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.success != null) { - oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - struct.success.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class GetColumns_resultTupleSchemeFactory implements SchemeFactory { - public GetColumns_resultTupleScheme getScheme() { - return new GetColumns_resultTupleScheme(); - } - } - - private static class GetColumns_resultTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, GetColumns_result struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetSuccess()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetSuccess()) { - struct.success.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, GetColumns_result struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.success = new TGetColumnsResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } - } - } - - } - - public static class GetFunctions_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetFunctions_args"); - - private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new GetFunctions_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new GetFunctions_argsTupleSchemeFactory()); - } - - private TGetFunctionsReq req; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - REQ((short)1, "req"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // REQ - return REQ; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TGetFunctionsReq.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetFunctions_args.class, metaDataMap); - } - - public GetFunctions_args() { - } - - public GetFunctions_args( - TGetFunctionsReq req) - { - this(); - this.req = req; - } - - /** - * Performs a deep copy on other. - */ - public GetFunctions_args(GetFunctions_args other) { - if (other.isSetReq()) { - this.req = new TGetFunctionsReq(other.req); - } - } - - public GetFunctions_args deepCopy() { - return new GetFunctions_args(this); - } - - @Override - public void clear() { - this.req = null; - } - - public TGetFunctionsReq getReq() { - return this.req; - } - - public void setReq(TGetFunctionsReq req) { - this.req = req; - } - - public void unsetReq() { - this.req = null; - } - - /** Returns true if field req is set (has been assigned a value) and false otherwise */ - public boolean isSetReq() { - return this.req != null; - } - - public void setReqIsSet(boolean value) { - if (!value) { - this.req = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case REQ: - if (value == null) { - unsetReq(); - } else { - setReq((TGetFunctionsReq)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case REQ: - return getReq(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case REQ: - return isSetReq(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof GetFunctions_args) - return this.equals((GetFunctions_args)that); - return false; - } - - public boolean equals(GetFunctions_args that) { - if (that == null) - return false; - - boolean this_present_req = true && this.isSetReq(); - boolean that_present_req = true && that.isSetReq(); - if (this_present_req || that_present_req) { - if (!(this_present_req && that_present_req)) - return false; - if (!this.req.equals(that.req)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_req = true && (isSetReq()); - list.add(present_req); - if (present_req) - list.add(req); - - return list.hashCode(); - } - - @Override - public int compareTo(GetFunctions_args other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetReq()).compareTo(other.isSetReq()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetReq()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.req, other.req); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("GetFunctions_args("); - boolean first = true; - - sb.append("req:"); - if (this.req == null) { - sb.append("null"); - } else { - sb.append(this.req); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (req != null) { - req.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class GetFunctions_argsStandardSchemeFactory implements SchemeFactory { - public GetFunctions_argsStandardScheme getScheme() { - return new GetFunctions_argsStandardScheme(); - } - } - - private static class GetFunctions_argsStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, GetFunctions_args struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // REQ - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.req = new TGetFunctionsReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, GetFunctions_args struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.req != null) { - oprot.writeFieldBegin(REQ_FIELD_DESC); - struct.req.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class GetFunctions_argsTupleSchemeFactory implements SchemeFactory { - public GetFunctions_argsTupleScheme getScheme() { - return new GetFunctions_argsTupleScheme(); - } - } - - private static class GetFunctions_argsTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, GetFunctions_args struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetReq()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetReq()) { - struct.req.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, GetFunctions_args struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.req = new TGetFunctionsReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } - } - } - - } - - public static class GetFunctions_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetFunctions_result"); - - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new GetFunctions_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new GetFunctions_resultTupleSchemeFactory()); - } - - private TGetFunctionsResp success; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SUCCESS((short)0, "success"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 0: // SUCCESS - return SUCCESS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TGetFunctionsResp.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetFunctions_result.class, metaDataMap); - } - - public GetFunctions_result() { - } - - public GetFunctions_result( - TGetFunctionsResp success) - { - this(); - this.success = success; - } - - /** - * Performs a deep copy on other. - */ - public GetFunctions_result(GetFunctions_result other) { - if (other.isSetSuccess()) { - this.success = new TGetFunctionsResp(other.success); - } - } - - public GetFunctions_result deepCopy() { - return new GetFunctions_result(this); - } - - @Override - public void clear() { - this.success = null; - } - - public TGetFunctionsResp getSuccess() { - return this.success; - } - - public void setSuccess(TGetFunctionsResp success) { - this.success = success; - } - - public void unsetSuccess() { - this.success = null; - } - - /** Returns true if field success is set (has been assigned a value) and false otherwise */ - public boolean isSetSuccess() { - return this.success != null; - } - - public void setSuccessIsSet(boolean value) { - if (!value) { - this.success = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case SUCCESS: - if (value == null) { - unsetSuccess(); - } else { - setSuccess((TGetFunctionsResp)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case SUCCESS: - return getSuccess(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case SUCCESS: - return isSetSuccess(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof GetFunctions_result) - return this.equals((GetFunctions_result)that); - return false; - } - - public boolean equals(GetFunctions_result that) { - if (that == null) - return false; - - boolean this_present_success = true && this.isSetSuccess(); - boolean that_present_success = true && that.isSetSuccess(); - if (this_present_success || that_present_success) { - if (!(this_present_success && that_present_success)) - return false; - if (!this.success.equals(that.success)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_success = true && (isSetSuccess()); - list.add(present_success); - if (present_success) - list.add(success); - - return list.hashCode(); - } - - @Override - public int compareTo(GetFunctions_result other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSuccess()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("GetFunctions_result("); - boolean first = true; - - sb.append("success:"); - if (this.success == null) { - sb.append("null"); - } else { - sb.append(this.success); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (success != null) { - success.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class GetFunctions_resultStandardSchemeFactory implements SchemeFactory { - public GetFunctions_resultStandardScheme getScheme() { - return new GetFunctions_resultStandardScheme(); - } - } - - private static class GetFunctions_resultStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, GetFunctions_result struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new TGetFunctionsResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, GetFunctions_result struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.success != null) { - oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - struct.success.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class GetFunctions_resultTupleSchemeFactory implements SchemeFactory { - public GetFunctions_resultTupleScheme getScheme() { - return new GetFunctions_resultTupleScheme(); - } - } - - private static class GetFunctions_resultTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, GetFunctions_result struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetSuccess()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetSuccess()) { - struct.success.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, GetFunctions_result struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.success = new TGetFunctionsResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } - } - } - - } - - public static class GetPrimaryKeys_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetPrimaryKeys_args"); - - private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new GetPrimaryKeys_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new GetPrimaryKeys_argsTupleSchemeFactory()); - } - - private TGetPrimaryKeysReq req; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - REQ((short)1, "req"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // REQ - return REQ; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TGetPrimaryKeysReq.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetPrimaryKeys_args.class, metaDataMap); - } - - public GetPrimaryKeys_args() { - } - - public GetPrimaryKeys_args( - TGetPrimaryKeysReq req) - { - this(); - this.req = req; - } - - /** - * Performs a deep copy on other. - */ - public GetPrimaryKeys_args(GetPrimaryKeys_args other) { - if (other.isSetReq()) { - this.req = new TGetPrimaryKeysReq(other.req); - } - } - - public GetPrimaryKeys_args deepCopy() { - return new GetPrimaryKeys_args(this); - } - - @Override - public void clear() { - this.req = null; - } - - public TGetPrimaryKeysReq getReq() { - return this.req; - } - - public void setReq(TGetPrimaryKeysReq req) { - this.req = req; - } - - public void unsetReq() { - this.req = null; - } - - /** Returns true if field req is set (has been assigned a value) and false otherwise */ - public boolean isSetReq() { - return this.req != null; - } - - public void setReqIsSet(boolean value) { - if (!value) { - this.req = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case REQ: - if (value == null) { - unsetReq(); - } else { - setReq((TGetPrimaryKeysReq)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case REQ: - return getReq(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case REQ: - return isSetReq(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof GetPrimaryKeys_args) - return this.equals((GetPrimaryKeys_args)that); - return false; - } - - public boolean equals(GetPrimaryKeys_args that) { - if (that == null) - return false; - - boolean this_present_req = true && this.isSetReq(); - boolean that_present_req = true && that.isSetReq(); - if (this_present_req || that_present_req) { - if (!(this_present_req && that_present_req)) - return false; - if (!this.req.equals(that.req)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_req = true && (isSetReq()); - list.add(present_req); - if (present_req) - list.add(req); - - return list.hashCode(); - } - - @Override - public int compareTo(GetPrimaryKeys_args other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetReq()).compareTo(other.isSetReq()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetReq()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.req, other.req); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("GetPrimaryKeys_args("); - boolean first = true; - - sb.append("req:"); - if (this.req == null) { - sb.append("null"); - } else { - sb.append(this.req); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (req != null) { - req.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class GetPrimaryKeys_argsStandardSchemeFactory implements SchemeFactory { - public GetPrimaryKeys_argsStandardScheme getScheme() { - return new GetPrimaryKeys_argsStandardScheme(); - } - } - - private static class GetPrimaryKeys_argsStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, GetPrimaryKeys_args struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // REQ - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.req = new TGetPrimaryKeysReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, GetPrimaryKeys_args struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.req != null) { - oprot.writeFieldBegin(REQ_FIELD_DESC); - struct.req.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class GetPrimaryKeys_argsTupleSchemeFactory implements SchemeFactory { - public GetPrimaryKeys_argsTupleScheme getScheme() { - return new GetPrimaryKeys_argsTupleScheme(); - } - } - - private static class GetPrimaryKeys_argsTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, GetPrimaryKeys_args struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetReq()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetReq()) { - struct.req.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, GetPrimaryKeys_args struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.req = new TGetPrimaryKeysReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } - } - } - - } - - public static class GetPrimaryKeys_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetPrimaryKeys_result"); - - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new GetPrimaryKeys_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new GetPrimaryKeys_resultTupleSchemeFactory()); - } - - private TGetPrimaryKeysResp success; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SUCCESS((short)0, "success"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 0: // SUCCESS - return SUCCESS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TGetPrimaryKeysResp.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetPrimaryKeys_result.class, metaDataMap); - } - - public GetPrimaryKeys_result() { - } - - public GetPrimaryKeys_result( - TGetPrimaryKeysResp success) - { - this(); - this.success = success; - } - - /** - * Performs a deep copy on other. - */ - public GetPrimaryKeys_result(GetPrimaryKeys_result other) { - if (other.isSetSuccess()) { - this.success = new TGetPrimaryKeysResp(other.success); - } - } - - public GetPrimaryKeys_result deepCopy() { - return new GetPrimaryKeys_result(this); - } - - @Override - public void clear() { - this.success = null; - } - - public TGetPrimaryKeysResp getSuccess() { - return this.success; - } - - public void setSuccess(TGetPrimaryKeysResp success) { - this.success = success; - } - - public void unsetSuccess() { - this.success = null; - } - - /** Returns true if field success is set (has been assigned a value) and false otherwise */ - public boolean isSetSuccess() { - return this.success != null; - } - - public void setSuccessIsSet(boolean value) { - if (!value) { - this.success = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case SUCCESS: - if (value == null) { - unsetSuccess(); - } else { - setSuccess((TGetPrimaryKeysResp)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case SUCCESS: - return getSuccess(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case SUCCESS: - return isSetSuccess(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof GetPrimaryKeys_result) - return this.equals((GetPrimaryKeys_result)that); - return false; - } - - public boolean equals(GetPrimaryKeys_result that) { - if (that == null) - return false; - - boolean this_present_success = true && this.isSetSuccess(); - boolean that_present_success = true && that.isSetSuccess(); - if (this_present_success || that_present_success) { - if (!(this_present_success && that_present_success)) - return false; - if (!this.success.equals(that.success)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_success = true && (isSetSuccess()); - list.add(present_success); - if (present_success) - list.add(success); - - return list.hashCode(); - } - - @Override - public int compareTo(GetPrimaryKeys_result other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSuccess()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("GetPrimaryKeys_result("); - boolean first = true; - - sb.append("success:"); - if (this.success == null) { - sb.append("null"); - } else { - sb.append(this.success); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (success != null) { - success.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class GetPrimaryKeys_resultStandardSchemeFactory implements SchemeFactory { - public GetPrimaryKeys_resultStandardScheme getScheme() { - return new GetPrimaryKeys_resultStandardScheme(); - } - } - - private static class GetPrimaryKeys_resultStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, GetPrimaryKeys_result struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new TGetPrimaryKeysResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, GetPrimaryKeys_result struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.success != null) { - oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - struct.success.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class GetPrimaryKeys_resultTupleSchemeFactory implements SchemeFactory { - public GetPrimaryKeys_resultTupleScheme getScheme() { - return new GetPrimaryKeys_resultTupleScheme(); - } - } - - private static class GetPrimaryKeys_resultTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, GetPrimaryKeys_result struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetSuccess()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetSuccess()) { - struct.success.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, GetPrimaryKeys_result struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.success = new TGetPrimaryKeysResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } - } - } - - } - - public static class GetCrossReference_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetCrossReference_args"); - - private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new GetCrossReference_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new GetCrossReference_argsTupleSchemeFactory()); - } - - private TGetCrossReferenceReq req; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - REQ((short)1, "req"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // REQ - return REQ; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TGetCrossReferenceReq.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetCrossReference_args.class, metaDataMap); - } - - public GetCrossReference_args() { - } - - public GetCrossReference_args( - TGetCrossReferenceReq req) - { - this(); - this.req = req; - } - - /** - * Performs a deep copy on other. - */ - public GetCrossReference_args(GetCrossReference_args other) { - if (other.isSetReq()) { - this.req = new TGetCrossReferenceReq(other.req); - } - } - - public GetCrossReference_args deepCopy() { - return new GetCrossReference_args(this); - } - - @Override - public void clear() { - this.req = null; - } - - public TGetCrossReferenceReq getReq() { - return this.req; - } - - public void setReq(TGetCrossReferenceReq req) { - this.req = req; - } - - public void unsetReq() { - this.req = null; - } - - /** Returns true if field req is set (has been assigned a value) and false otherwise */ - public boolean isSetReq() { - return this.req != null; - } - - public void setReqIsSet(boolean value) { - if (!value) { - this.req = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case REQ: - if (value == null) { - unsetReq(); - } else { - setReq((TGetCrossReferenceReq)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case REQ: - return getReq(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case REQ: - return isSetReq(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof GetCrossReference_args) - return this.equals((GetCrossReference_args)that); - return false; - } - - public boolean equals(GetCrossReference_args that) { - if (that == null) - return false; - - boolean this_present_req = true && this.isSetReq(); - boolean that_present_req = true && that.isSetReq(); - if (this_present_req || that_present_req) { - if (!(this_present_req && that_present_req)) - return false; - if (!this.req.equals(that.req)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_req = true && (isSetReq()); - list.add(present_req); - if (present_req) - list.add(req); - - return list.hashCode(); - } - - @Override - public int compareTo(GetCrossReference_args other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetReq()).compareTo(other.isSetReq()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetReq()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.req, other.req); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("GetCrossReference_args("); - boolean first = true; - - sb.append("req:"); - if (this.req == null) { - sb.append("null"); - } else { - sb.append(this.req); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (req != null) { - req.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class GetCrossReference_argsStandardSchemeFactory implements SchemeFactory { - public GetCrossReference_argsStandardScheme getScheme() { - return new GetCrossReference_argsStandardScheme(); - } - } - - private static class GetCrossReference_argsStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, GetCrossReference_args struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // REQ - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.req = new TGetCrossReferenceReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, GetCrossReference_args struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.req != null) { - oprot.writeFieldBegin(REQ_FIELD_DESC); - struct.req.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class GetCrossReference_argsTupleSchemeFactory implements SchemeFactory { - public GetCrossReference_argsTupleScheme getScheme() { - return new GetCrossReference_argsTupleScheme(); - } - } - - private static class GetCrossReference_argsTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, GetCrossReference_args struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetReq()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetReq()) { - struct.req.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, GetCrossReference_args struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.req = new TGetCrossReferenceReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } - } - } - - } - - public static class GetCrossReference_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetCrossReference_result"); - - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new GetCrossReference_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new GetCrossReference_resultTupleSchemeFactory()); - } - - private TGetCrossReferenceResp success; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SUCCESS((short)0, "success"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 0: // SUCCESS - return SUCCESS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TGetCrossReferenceResp.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetCrossReference_result.class, metaDataMap); - } - - public GetCrossReference_result() { - } - - public GetCrossReference_result( - TGetCrossReferenceResp success) - { - this(); - this.success = success; - } - - /** - * Performs a deep copy on other. - */ - public GetCrossReference_result(GetCrossReference_result other) { - if (other.isSetSuccess()) { - this.success = new TGetCrossReferenceResp(other.success); - } - } - - public GetCrossReference_result deepCopy() { - return new GetCrossReference_result(this); - } - - @Override - public void clear() { - this.success = null; - } - - public TGetCrossReferenceResp getSuccess() { - return this.success; - } - - public void setSuccess(TGetCrossReferenceResp success) { - this.success = success; - } - - public void unsetSuccess() { - this.success = null; - } - - /** Returns true if field success is set (has been assigned a value) and false otherwise */ - public boolean isSetSuccess() { - return this.success != null; - } - - public void setSuccessIsSet(boolean value) { - if (!value) { - this.success = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case SUCCESS: - if (value == null) { - unsetSuccess(); - } else { - setSuccess((TGetCrossReferenceResp)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case SUCCESS: - return getSuccess(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case SUCCESS: - return isSetSuccess(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof GetCrossReference_result) - return this.equals((GetCrossReference_result)that); - return false; - } - - public boolean equals(GetCrossReference_result that) { - if (that == null) - return false; - - boolean this_present_success = true && this.isSetSuccess(); - boolean that_present_success = true && that.isSetSuccess(); - if (this_present_success || that_present_success) { - if (!(this_present_success && that_present_success)) - return false; - if (!this.success.equals(that.success)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_success = true && (isSetSuccess()); - list.add(present_success); - if (present_success) - list.add(success); - - return list.hashCode(); - } - - @Override - public int compareTo(GetCrossReference_result other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSuccess()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("GetCrossReference_result("); - boolean first = true; - - sb.append("success:"); - if (this.success == null) { - sb.append("null"); - } else { - sb.append(this.success); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (success != null) { - success.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class GetCrossReference_resultStandardSchemeFactory implements SchemeFactory { - public GetCrossReference_resultStandardScheme getScheme() { - return new GetCrossReference_resultStandardScheme(); - } - } - - private static class GetCrossReference_resultStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, GetCrossReference_result struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new TGetCrossReferenceResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, GetCrossReference_result struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.success != null) { - oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - struct.success.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class GetCrossReference_resultTupleSchemeFactory implements SchemeFactory { - public GetCrossReference_resultTupleScheme getScheme() { - return new GetCrossReference_resultTupleScheme(); - } - } - - private static class GetCrossReference_resultTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, GetCrossReference_result struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetSuccess()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetSuccess()) { - struct.success.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, GetCrossReference_result struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.success = new TGetCrossReferenceResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } - } - } - - } - - public static class GetOperationStatus_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetOperationStatus_args"); - - private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new GetOperationStatus_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new GetOperationStatus_argsTupleSchemeFactory()); - } - - private TGetOperationStatusReq req; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - REQ((short)1, "req"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // REQ - return REQ; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TGetOperationStatusReq.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetOperationStatus_args.class, metaDataMap); - } - - public GetOperationStatus_args() { - } - - public GetOperationStatus_args( - TGetOperationStatusReq req) - { - this(); - this.req = req; - } - - /** - * Performs a deep copy on other. - */ - public GetOperationStatus_args(GetOperationStatus_args other) { - if (other.isSetReq()) { - this.req = new TGetOperationStatusReq(other.req); - } - } - - public GetOperationStatus_args deepCopy() { - return new GetOperationStatus_args(this); - } - - @Override - public void clear() { - this.req = null; - } - - public TGetOperationStatusReq getReq() { - return this.req; - } - - public void setReq(TGetOperationStatusReq req) { - this.req = req; - } - - public void unsetReq() { - this.req = null; - } - - /** Returns true if field req is set (has been assigned a value) and false otherwise */ - public boolean isSetReq() { - return this.req != null; - } - - public void setReqIsSet(boolean value) { - if (!value) { - this.req = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case REQ: - if (value == null) { - unsetReq(); - } else { - setReq((TGetOperationStatusReq)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case REQ: - return getReq(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case REQ: - return isSetReq(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof GetOperationStatus_args) - return this.equals((GetOperationStatus_args)that); - return false; - } - - public boolean equals(GetOperationStatus_args that) { - if (that == null) - return false; - - boolean this_present_req = true && this.isSetReq(); - boolean that_present_req = true && that.isSetReq(); - if (this_present_req || that_present_req) { - if (!(this_present_req && that_present_req)) - return false; - if (!this.req.equals(that.req)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_req = true && (isSetReq()); - list.add(present_req); - if (present_req) - list.add(req); - - return list.hashCode(); - } - - @Override - public int compareTo(GetOperationStatus_args other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetReq()).compareTo(other.isSetReq()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetReq()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.req, other.req); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("GetOperationStatus_args("); - boolean first = true; - - sb.append("req:"); - if (this.req == null) { - sb.append("null"); - } else { - sb.append(this.req); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (req != null) { - req.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class GetOperationStatus_argsStandardSchemeFactory implements SchemeFactory { - public GetOperationStatus_argsStandardScheme getScheme() { - return new GetOperationStatus_argsStandardScheme(); - } - } - - private static class GetOperationStatus_argsStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, GetOperationStatus_args struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // REQ - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.req = new TGetOperationStatusReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, GetOperationStatus_args struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.req != null) { - oprot.writeFieldBegin(REQ_FIELD_DESC); - struct.req.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class GetOperationStatus_argsTupleSchemeFactory implements SchemeFactory { - public GetOperationStatus_argsTupleScheme getScheme() { - return new GetOperationStatus_argsTupleScheme(); - } - } - - private static class GetOperationStatus_argsTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, GetOperationStatus_args struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetReq()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetReq()) { - struct.req.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, GetOperationStatus_args struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.req = new TGetOperationStatusReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } - } - } - - } - - public static class GetOperationStatus_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetOperationStatus_result"); - - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new GetOperationStatus_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new GetOperationStatus_resultTupleSchemeFactory()); - } - - private TGetOperationStatusResp success; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SUCCESS((short)0, "success"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 0: // SUCCESS - return SUCCESS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TGetOperationStatusResp.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetOperationStatus_result.class, metaDataMap); - } - - public GetOperationStatus_result() { - } - - public GetOperationStatus_result( - TGetOperationStatusResp success) - { - this(); - this.success = success; - } - - /** - * Performs a deep copy on other. - */ - public GetOperationStatus_result(GetOperationStatus_result other) { - if (other.isSetSuccess()) { - this.success = new TGetOperationStatusResp(other.success); - } - } - - public GetOperationStatus_result deepCopy() { - return new GetOperationStatus_result(this); - } - - @Override - public void clear() { - this.success = null; - } - - public TGetOperationStatusResp getSuccess() { - return this.success; - } - - public void setSuccess(TGetOperationStatusResp success) { - this.success = success; - } - - public void unsetSuccess() { - this.success = null; - } - - /** Returns true if field success is set (has been assigned a value) and false otherwise */ - public boolean isSetSuccess() { - return this.success != null; - } - - public void setSuccessIsSet(boolean value) { - if (!value) { - this.success = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case SUCCESS: - if (value == null) { - unsetSuccess(); - } else { - setSuccess((TGetOperationStatusResp)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case SUCCESS: - return getSuccess(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case SUCCESS: - return isSetSuccess(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof GetOperationStatus_result) - return this.equals((GetOperationStatus_result)that); - return false; - } - - public boolean equals(GetOperationStatus_result that) { - if (that == null) - return false; - - boolean this_present_success = true && this.isSetSuccess(); - boolean that_present_success = true && that.isSetSuccess(); - if (this_present_success || that_present_success) { - if (!(this_present_success && that_present_success)) - return false; - if (!this.success.equals(that.success)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_success = true && (isSetSuccess()); - list.add(present_success); - if (present_success) - list.add(success); - - return list.hashCode(); - } - - @Override - public int compareTo(GetOperationStatus_result other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSuccess()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("GetOperationStatus_result("); - boolean first = true; - - sb.append("success:"); - if (this.success == null) { - sb.append("null"); - } else { - sb.append(this.success); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (success != null) { - success.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class GetOperationStatus_resultStandardSchemeFactory implements SchemeFactory { - public GetOperationStatus_resultStandardScheme getScheme() { - return new GetOperationStatus_resultStandardScheme(); - } - } - - private static class GetOperationStatus_resultStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, GetOperationStatus_result struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new TGetOperationStatusResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, GetOperationStatus_result struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.success != null) { - oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - struct.success.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class GetOperationStatus_resultTupleSchemeFactory implements SchemeFactory { - public GetOperationStatus_resultTupleScheme getScheme() { - return new GetOperationStatus_resultTupleScheme(); - } - } - - private static class GetOperationStatus_resultTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, GetOperationStatus_result struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetSuccess()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetSuccess()) { - struct.success.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, GetOperationStatus_result struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.success = new TGetOperationStatusResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } - } - } - - } - - public static class CancelOperation_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("CancelOperation_args"); - - private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new CancelOperation_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new CancelOperation_argsTupleSchemeFactory()); - } - - private TCancelOperationReq req; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - REQ((short)1, "req"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // REQ - return REQ; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TCancelOperationReq.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(CancelOperation_args.class, metaDataMap); - } - - public CancelOperation_args() { - } - - public CancelOperation_args( - TCancelOperationReq req) - { - this(); - this.req = req; - } - - /** - * Performs a deep copy on other. - */ - public CancelOperation_args(CancelOperation_args other) { - if (other.isSetReq()) { - this.req = new TCancelOperationReq(other.req); - } - } - - public CancelOperation_args deepCopy() { - return new CancelOperation_args(this); - } - - @Override - public void clear() { - this.req = null; - } - - public TCancelOperationReq getReq() { - return this.req; - } - - public void setReq(TCancelOperationReq req) { - this.req = req; - } - - public void unsetReq() { - this.req = null; - } - - /** Returns true if field req is set (has been assigned a value) and false otherwise */ - public boolean isSetReq() { - return this.req != null; - } - - public void setReqIsSet(boolean value) { - if (!value) { - this.req = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case REQ: - if (value == null) { - unsetReq(); - } else { - setReq((TCancelOperationReq)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case REQ: - return getReq(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case REQ: - return isSetReq(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof CancelOperation_args) - return this.equals((CancelOperation_args)that); - return false; - } - - public boolean equals(CancelOperation_args that) { - if (that == null) - return false; - - boolean this_present_req = true && this.isSetReq(); - boolean that_present_req = true && that.isSetReq(); - if (this_present_req || that_present_req) { - if (!(this_present_req && that_present_req)) - return false; - if (!this.req.equals(that.req)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_req = true && (isSetReq()); - list.add(present_req); - if (present_req) - list.add(req); - - return list.hashCode(); - } - - @Override - public int compareTo(CancelOperation_args other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetReq()).compareTo(other.isSetReq()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetReq()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.req, other.req); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("CancelOperation_args("); - boolean first = true; - - sb.append("req:"); - if (this.req == null) { - sb.append("null"); - } else { - sb.append(this.req); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (req != null) { - req.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class CancelOperation_argsStandardSchemeFactory implements SchemeFactory { - public CancelOperation_argsStandardScheme getScheme() { - return new CancelOperation_argsStandardScheme(); - } - } - - private static class CancelOperation_argsStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, CancelOperation_args struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // REQ - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.req = new TCancelOperationReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, CancelOperation_args struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.req != null) { - oprot.writeFieldBegin(REQ_FIELD_DESC); - struct.req.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class CancelOperation_argsTupleSchemeFactory implements SchemeFactory { - public CancelOperation_argsTupleScheme getScheme() { - return new CancelOperation_argsTupleScheme(); - } - } - - private static class CancelOperation_argsTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, CancelOperation_args struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetReq()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetReq()) { - struct.req.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, CancelOperation_args struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.req = new TCancelOperationReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } - } - } - - } - - public static class CancelOperation_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("CancelOperation_result"); - - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new CancelOperation_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new CancelOperation_resultTupleSchemeFactory()); - } - - private TCancelOperationResp success; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SUCCESS((short)0, "success"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 0: // SUCCESS - return SUCCESS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TCancelOperationResp.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(CancelOperation_result.class, metaDataMap); - } - - public CancelOperation_result() { - } - - public CancelOperation_result( - TCancelOperationResp success) - { - this(); - this.success = success; - } - - /** - * Performs a deep copy on other. - */ - public CancelOperation_result(CancelOperation_result other) { - if (other.isSetSuccess()) { - this.success = new TCancelOperationResp(other.success); - } - } - - public CancelOperation_result deepCopy() { - return new CancelOperation_result(this); - } - - @Override - public void clear() { - this.success = null; - } - - public TCancelOperationResp getSuccess() { - return this.success; - } - - public void setSuccess(TCancelOperationResp success) { - this.success = success; - } - - public void unsetSuccess() { - this.success = null; - } - - /** Returns true if field success is set (has been assigned a value) and false otherwise */ - public boolean isSetSuccess() { - return this.success != null; - } - - public void setSuccessIsSet(boolean value) { - if (!value) { - this.success = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case SUCCESS: - if (value == null) { - unsetSuccess(); - } else { - setSuccess((TCancelOperationResp)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case SUCCESS: - return getSuccess(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case SUCCESS: - return isSetSuccess(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof CancelOperation_result) - return this.equals((CancelOperation_result)that); - return false; - } - - public boolean equals(CancelOperation_result that) { - if (that == null) - return false; - - boolean this_present_success = true && this.isSetSuccess(); - boolean that_present_success = true && that.isSetSuccess(); - if (this_present_success || that_present_success) { - if (!(this_present_success && that_present_success)) - return false; - if (!this.success.equals(that.success)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_success = true && (isSetSuccess()); - list.add(present_success); - if (present_success) - list.add(success); - - return list.hashCode(); - } - - @Override - public int compareTo(CancelOperation_result other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSuccess()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("CancelOperation_result("); - boolean first = true; - - sb.append("success:"); - if (this.success == null) { - sb.append("null"); - } else { - sb.append(this.success); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (success != null) { - success.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class CancelOperation_resultStandardSchemeFactory implements SchemeFactory { - public CancelOperation_resultStandardScheme getScheme() { - return new CancelOperation_resultStandardScheme(); - } - } - - private static class CancelOperation_resultStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, CancelOperation_result struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new TCancelOperationResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, CancelOperation_result struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.success != null) { - oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - struct.success.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class CancelOperation_resultTupleSchemeFactory implements SchemeFactory { - public CancelOperation_resultTupleScheme getScheme() { - return new CancelOperation_resultTupleScheme(); - } - } - - private static class CancelOperation_resultTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, CancelOperation_result struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetSuccess()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetSuccess()) { - struct.success.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, CancelOperation_result struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.success = new TCancelOperationResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } - } - } - - } - - public static class CloseOperation_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("CloseOperation_args"); - - private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new CloseOperation_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new CloseOperation_argsTupleSchemeFactory()); - } - - private TCloseOperationReq req; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - REQ((short)1, "req"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // REQ - return REQ; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TCloseOperationReq.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(CloseOperation_args.class, metaDataMap); - } - - public CloseOperation_args() { - } - - public CloseOperation_args( - TCloseOperationReq req) - { - this(); - this.req = req; - } - - /** - * Performs a deep copy on other. - */ - public CloseOperation_args(CloseOperation_args other) { - if (other.isSetReq()) { - this.req = new TCloseOperationReq(other.req); - } - } - - public CloseOperation_args deepCopy() { - return new CloseOperation_args(this); - } - - @Override - public void clear() { - this.req = null; - } - - public TCloseOperationReq getReq() { - return this.req; - } - - public void setReq(TCloseOperationReq req) { - this.req = req; - } - - public void unsetReq() { - this.req = null; - } - - /** Returns true if field req is set (has been assigned a value) and false otherwise */ - public boolean isSetReq() { - return this.req != null; - } - - public void setReqIsSet(boolean value) { - if (!value) { - this.req = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case REQ: - if (value == null) { - unsetReq(); - } else { - setReq((TCloseOperationReq)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case REQ: - return getReq(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case REQ: - return isSetReq(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof CloseOperation_args) - return this.equals((CloseOperation_args)that); - return false; - } - - public boolean equals(CloseOperation_args that) { - if (that == null) - return false; - - boolean this_present_req = true && this.isSetReq(); - boolean that_present_req = true && that.isSetReq(); - if (this_present_req || that_present_req) { - if (!(this_present_req && that_present_req)) - return false; - if (!this.req.equals(that.req)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_req = true && (isSetReq()); - list.add(present_req); - if (present_req) - list.add(req); - - return list.hashCode(); - } - - @Override - public int compareTo(CloseOperation_args other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetReq()).compareTo(other.isSetReq()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetReq()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.req, other.req); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("CloseOperation_args("); - boolean first = true; - - sb.append("req:"); - if (this.req == null) { - sb.append("null"); - } else { - sb.append(this.req); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (req != null) { - req.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class CloseOperation_argsStandardSchemeFactory implements SchemeFactory { - public CloseOperation_argsStandardScheme getScheme() { - return new CloseOperation_argsStandardScheme(); - } - } - - private static class CloseOperation_argsStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, CloseOperation_args struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // REQ - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.req = new TCloseOperationReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, CloseOperation_args struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.req != null) { - oprot.writeFieldBegin(REQ_FIELD_DESC); - struct.req.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class CloseOperation_argsTupleSchemeFactory implements SchemeFactory { - public CloseOperation_argsTupleScheme getScheme() { - return new CloseOperation_argsTupleScheme(); - } - } - - private static class CloseOperation_argsTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, CloseOperation_args struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetReq()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetReq()) { - struct.req.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, CloseOperation_args struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.req = new TCloseOperationReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } - } - } - - } - - public static class CloseOperation_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("CloseOperation_result"); - - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new CloseOperation_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new CloseOperation_resultTupleSchemeFactory()); - } - - private TCloseOperationResp success; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SUCCESS((short)0, "success"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 0: // SUCCESS - return SUCCESS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TCloseOperationResp.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(CloseOperation_result.class, metaDataMap); - } - - public CloseOperation_result() { - } - - public CloseOperation_result( - TCloseOperationResp success) - { - this(); - this.success = success; - } - - /** - * Performs a deep copy on other. - */ - public CloseOperation_result(CloseOperation_result other) { - if (other.isSetSuccess()) { - this.success = new TCloseOperationResp(other.success); - } - } - - public CloseOperation_result deepCopy() { - return new CloseOperation_result(this); - } - - @Override - public void clear() { - this.success = null; - } - - public TCloseOperationResp getSuccess() { - return this.success; - } - - public void setSuccess(TCloseOperationResp success) { - this.success = success; - } - - public void unsetSuccess() { - this.success = null; - } - - /** Returns true if field success is set (has been assigned a value) and false otherwise */ - public boolean isSetSuccess() { - return this.success != null; - } - - public void setSuccessIsSet(boolean value) { - if (!value) { - this.success = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case SUCCESS: - if (value == null) { - unsetSuccess(); - } else { - setSuccess((TCloseOperationResp)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case SUCCESS: - return getSuccess(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case SUCCESS: - return isSetSuccess(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof CloseOperation_result) - return this.equals((CloseOperation_result)that); - return false; - } - - public boolean equals(CloseOperation_result that) { - if (that == null) - return false; - - boolean this_present_success = true && this.isSetSuccess(); - boolean that_present_success = true && that.isSetSuccess(); - if (this_present_success || that_present_success) { - if (!(this_present_success && that_present_success)) - return false; - if (!this.success.equals(that.success)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_success = true && (isSetSuccess()); - list.add(present_success); - if (present_success) - list.add(success); - - return list.hashCode(); - } - - @Override - public int compareTo(CloseOperation_result other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSuccess()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("CloseOperation_result("); - boolean first = true; - - sb.append("success:"); - if (this.success == null) { - sb.append("null"); - } else { - sb.append(this.success); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (success != null) { - success.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class CloseOperation_resultStandardSchemeFactory implements SchemeFactory { - public CloseOperation_resultStandardScheme getScheme() { - return new CloseOperation_resultStandardScheme(); - } - } - - private static class CloseOperation_resultStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, CloseOperation_result struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new TCloseOperationResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, CloseOperation_result struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.success != null) { - oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - struct.success.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class CloseOperation_resultTupleSchemeFactory implements SchemeFactory { - public CloseOperation_resultTupleScheme getScheme() { - return new CloseOperation_resultTupleScheme(); - } - } - - private static class CloseOperation_resultTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, CloseOperation_result struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetSuccess()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetSuccess()) { - struct.success.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, CloseOperation_result struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.success = new TCloseOperationResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } - } - } - - } - - public static class GetResultSetMetadata_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetResultSetMetadata_args"); - - private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new GetResultSetMetadata_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new GetResultSetMetadata_argsTupleSchemeFactory()); - } - - private TGetResultSetMetadataReq req; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - REQ((short)1, "req"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // REQ - return REQ; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TGetResultSetMetadataReq.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetResultSetMetadata_args.class, metaDataMap); - } - - public GetResultSetMetadata_args() { - } - - public GetResultSetMetadata_args( - TGetResultSetMetadataReq req) - { - this(); - this.req = req; - } - - /** - * Performs a deep copy on other. - */ - public GetResultSetMetadata_args(GetResultSetMetadata_args other) { - if (other.isSetReq()) { - this.req = new TGetResultSetMetadataReq(other.req); - } - } - - public GetResultSetMetadata_args deepCopy() { - return new GetResultSetMetadata_args(this); - } - - @Override - public void clear() { - this.req = null; - } - - public TGetResultSetMetadataReq getReq() { - return this.req; - } - - public void setReq(TGetResultSetMetadataReq req) { - this.req = req; - } - - public void unsetReq() { - this.req = null; - } - - /** Returns true if field req is set (has been assigned a value) and false otherwise */ - public boolean isSetReq() { - return this.req != null; - } - - public void setReqIsSet(boolean value) { - if (!value) { - this.req = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case REQ: - if (value == null) { - unsetReq(); - } else { - setReq((TGetResultSetMetadataReq)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case REQ: - return getReq(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case REQ: - return isSetReq(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof GetResultSetMetadata_args) - return this.equals((GetResultSetMetadata_args)that); - return false; - } - - public boolean equals(GetResultSetMetadata_args that) { - if (that == null) - return false; - - boolean this_present_req = true && this.isSetReq(); - boolean that_present_req = true && that.isSetReq(); - if (this_present_req || that_present_req) { - if (!(this_present_req && that_present_req)) - return false; - if (!this.req.equals(that.req)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_req = true && (isSetReq()); - list.add(present_req); - if (present_req) - list.add(req); - - return list.hashCode(); - } - - @Override - public int compareTo(GetResultSetMetadata_args other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetReq()).compareTo(other.isSetReq()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetReq()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.req, other.req); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("GetResultSetMetadata_args("); - boolean first = true; - - sb.append("req:"); - if (this.req == null) { - sb.append("null"); - } else { - sb.append(this.req); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (req != null) { - req.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class GetResultSetMetadata_argsStandardSchemeFactory implements SchemeFactory { - public GetResultSetMetadata_argsStandardScheme getScheme() { - return new GetResultSetMetadata_argsStandardScheme(); - } - } - - private static class GetResultSetMetadata_argsStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, GetResultSetMetadata_args struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // REQ - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.req = new TGetResultSetMetadataReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, GetResultSetMetadata_args struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.req != null) { - oprot.writeFieldBegin(REQ_FIELD_DESC); - struct.req.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class GetResultSetMetadata_argsTupleSchemeFactory implements SchemeFactory { - public GetResultSetMetadata_argsTupleScheme getScheme() { - return new GetResultSetMetadata_argsTupleScheme(); - } - } - - private static class GetResultSetMetadata_argsTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, GetResultSetMetadata_args struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetReq()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetReq()) { - struct.req.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, GetResultSetMetadata_args struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.req = new TGetResultSetMetadataReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } - } - } - - } - - public static class GetResultSetMetadata_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetResultSetMetadata_result"); - - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new GetResultSetMetadata_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new GetResultSetMetadata_resultTupleSchemeFactory()); - } - - private TGetResultSetMetadataResp success; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SUCCESS((short)0, "success"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 0: // SUCCESS - return SUCCESS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TGetResultSetMetadataResp.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetResultSetMetadata_result.class, metaDataMap); - } - - public GetResultSetMetadata_result() { - } - - public GetResultSetMetadata_result( - TGetResultSetMetadataResp success) - { - this(); - this.success = success; - } - - /** - * Performs a deep copy on other. - */ - public GetResultSetMetadata_result(GetResultSetMetadata_result other) { - if (other.isSetSuccess()) { - this.success = new TGetResultSetMetadataResp(other.success); - } - } - - public GetResultSetMetadata_result deepCopy() { - return new GetResultSetMetadata_result(this); - } - - @Override - public void clear() { - this.success = null; - } - - public TGetResultSetMetadataResp getSuccess() { - return this.success; - } - - public void setSuccess(TGetResultSetMetadataResp success) { - this.success = success; - } - - public void unsetSuccess() { - this.success = null; - } - - /** Returns true if field success is set (has been assigned a value) and false otherwise */ - public boolean isSetSuccess() { - return this.success != null; - } - - public void setSuccessIsSet(boolean value) { - if (!value) { - this.success = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case SUCCESS: - if (value == null) { - unsetSuccess(); - } else { - setSuccess((TGetResultSetMetadataResp)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case SUCCESS: - return getSuccess(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case SUCCESS: - return isSetSuccess(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof GetResultSetMetadata_result) - return this.equals((GetResultSetMetadata_result)that); - return false; - } - - public boolean equals(GetResultSetMetadata_result that) { - if (that == null) - return false; - - boolean this_present_success = true && this.isSetSuccess(); - boolean that_present_success = true && that.isSetSuccess(); - if (this_present_success || that_present_success) { - if (!(this_present_success && that_present_success)) - return false; - if (!this.success.equals(that.success)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_success = true && (isSetSuccess()); - list.add(present_success); - if (present_success) - list.add(success); - - return list.hashCode(); - } - - @Override - public int compareTo(GetResultSetMetadata_result other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSuccess()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("GetResultSetMetadata_result("); - boolean first = true; - - sb.append("success:"); - if (this.success == null) { - sb.append("null"); - } else { - sb.append(this.success); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (success != null) { - success.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class GetResultSetMetadata_resultStandardSchemeFactory implements SchemeFactory { - public GetResultSetMetadata_resultStandardScheme getScheme() { - return new GetResultSetMetadata_resultStandardScheme(); - } - } - - private static class GetResultSetMetadata_resultStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, GetResultSetMetadata_result struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new TGetResultSetMetadataResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, GetResultSetMetadata_result struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.success != null) { - oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - struct.success.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class GetResultSetMetadata_resultTupleSchemeFactory implements SchemeFactory { - public GetResultSetMetadata_resultTupleScheme getScheme() { - return new GetResultSetMetadata_resultTupleScheme(); - } - } - - private static class GetResultSetMetadata_resultTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, GetResultSetMetadata_result struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetSuccess()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetSuccess()) { - struct.success.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, GetResultSetMetadata_result struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.success = new TGetResultSetMetadataResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } - } - } - - } - - public static class FetchResults_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("FetchResults_args"); - - private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new FetchResults_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new FetchResults_argsTupleSchemeFactory()); - } - - private TFetchResultsReq req; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - REQ((short)1, "req"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // REQ - return REQ; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TFetchResultsReq.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(FetchResults_args.class, metaDataMap); - } - - public FetchResults_args() { - } - - public FetchResults_args( - TFetchResultsReq req) - { - this(); - this.req = req; - } - - /** - * Performs a deep copy on other. - */ - public FetchResults_args(FetchResults_args other) { - if (other.isSetReq()) { - this.req = new TFetchResultsReq(other.req); - } - } - - public FetchResults_args deepCopy() { - return new FetchResults_args(this); - } - - @Override - public void clear() { - this.req = null; - } - - public TFetchResultsReq getReq() { - return this.req; - } - - public void setReq(TFetchResultsReq req) { - this.req = req; - } - - public void unsetReq() { - this.req = null; - } - - /** Returns true if field req is set (has been assigned a value) and false otherwise */ - public boolean isSetReq() { - return this.req != null; - } - - public void setReqIsSet(boolean value) { - if (!value) { - this.req = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case REQ: - if (value == null) { - unsetReq(); - } else { - setReq((TFetchResultsReq)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case REQ: - return getReq(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case REQ: - return isSetReq(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof FetchResults_args) - return this.equals((FetchResults_args)that); - return false; - } - - public boolean equals(FetchResults_args that) { - if (that == null) - return false; - - boolean this_present_req = true && this.isSetReq(); - boolean that_present_req = true && that.isSetReq(); - if (this_present_req || that_present_req) { - if (!(this_present_req && that_present_req)) - return false; - if (!this.req.equals(that.req)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_req = true && (isSetReq()); - list.add(present_req); - if (present_req) - list.add(req); - - return list.hashCode(); - } - - @Override - public int compareTo(FetchResults_args other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetReq()).compareTo(other.isSetReq()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetReq()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.req, other.req); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("FetchResults_args("); - boolean first = true; - - sb.append("req:"); - if (this.req == null) { - sb.append("null"); - } else { - sb.append(this.req); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (req != null) { - req.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class FetchResults_argsStandardSchemeFactory implements SchemeFactory { - public FetchResults_argsStandardScheme getScheme() { - return new FetchResults_argsStandardScheme(); - } - } - - private static class FetchResults_argsStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, FetchResults_args struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // REQ - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.req = new TFetchResultsReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, FetchResults_args struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.req != null) { - oprot.writeFieldBegin(REQ_FIELD_DESC); - struct.req.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class FetchResults_argsTupleSchemeFactory implements SchemeFactory { - public FetchResults_argsTupleScheme getScheme() { - return new FetchResults_argsTupleScheme(); - } - } - - private static class FetchResults_argsTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, FetchResults_args struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetReq()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetReq()) { - struct.req.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, FetchResults_args struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.req = new TFetchResultsReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } - } - } - - } - - public static class FetchResults_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("FetchResults_result"); - - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new FetchResults_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new FetchResults_resultTupleSchemeFactory()); - } - - private TFetchResultsResp success; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SUCCESS((short)0, "success"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 0: // SUCCESS - return SUCCESS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TFetchResultsResp.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(FetchResults_result.class, metaDataMap); - } - - public FetchResults_result() { - } - - public FetchResults_result( - TFetchResultsResp success) - { - this(); - this.success = success; - } - - /** - * Performs a deep copy on other. - */ - public FetchResults_result(FetchResults_result other) { - if (other.isSetSuccess()) { - this.success = new TFetchResultsResp(other.success); - } - } - - public FetchResults_result deepCopy() { - return new FetchResults_result(this); - } - - @Override - public void clear() { - this.success = null; - } - - public TFetchResultsResp getSuccess() { - return this.success; - } - - public void setSuccess(TFetchResultsResp success) { - this.success = success; - } - - public void unsetSuccess() { - this.success = null; - } - - /** Returns true if field success is set (has been assigned a value) and false otherwise */ - public boolean isSetSuccess() { - return this.success != null; - } - - public void setSuccessIsSet(boolean value) { - if (!value) { - this.success = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case SUCCESS: - if (value == null) { - unsetSuccess(); - } else { - setSuccess((TFetchResultsResp)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case SUCCESS: - return getSuccess(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case SUCCESS: - return isSetSuccess(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof FetchResults_result) - return this.equals((FetchResults_result)that); - return false; - } - - public boolean equals(FetchResults_result that) { - if (that == null) - return false; - - boolean this_present_success = true && this.isSetSuccess(); - boolean that_present_success = true && that.isSetSuccess(); - if (this_present_success || that_present_success) { - if (!(this_present_success && that_present_success)) - return false; - if (!this.success.equals(that.success)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_success = true && (isSetSuccess()); - list.add(present_success); - if (present_success) - list.add(success); - - return list.hashCode(); - } - - @Override - public int compareTo(FetchResults_result other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSuccess()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("FetchResults_result("); - boolean first = true; - - sb.append("success:"); - if (this.success == null) { - sb.append("null"); - } else { - sb.append(this.success); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (success != null) { - success.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class FetchResults_resultStandardSchemeFactory implements SchemeFactory { - public FetchResults_resultStandardScheme getScheme() { - return new FetchResults_resultStandardScheme(); - } - } - - private static class FetchResults_resultStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, FetchResults_result struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new TFetchResultsResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, FetchResults_result struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.success != null) { - oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - struct.success.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class FetchResults_resultTupleSchemeFactory implements SchemeFactory { - public FetchResults_resultTupleScheme getScheme() { - return new FetchResults_resultTupleScheme(); - } - } - - private static class FetchResults_resultTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, FetchResults_result struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetSuccess()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetSuccess()) { - struct.success.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, FetchResults_result struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.success = new TFetchResultsResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } - } - } - - } - - public static class GetDelegationToken_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetDelegationToken_args"); - - private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new GetDelegationToken_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new GetDelegationToken_argsTupleSchemeFactory()); - } - - private TGetDelegationTokenReq req; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - REQ((short)1, "req"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // REQ - return REQ; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TGetDelegationTokenReq.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetDelegationToken_args.class, metaDataMap); - } - - public GetDelegationToken_args() { - } - - public GetDelegationToken_args( - TGetDelegationTokenReq req) - { - this(); - this.req = req; - } - - /** - * Performs a deep copy on other. - */ - public GetDelegationToken_args(GetDelegationToken_args other) { - if (other.isSetReq()) { - this.req = new TGetDelegationTokenReq(other.req); - } - } - - public GetDelegationToken_args deepCopy() { - return new GetDelegationToken_args(this); - } - - @Override - public void clear() { - this.req = null; - } - - public TGetDelegationTokenReq getReq() { - return this.req; - } - - public void setReq(TGetDelegationTokenReq req) { - this.req = req; - } - - public void unsetReq() { - this.req = null; - } - - /** Returns true if field req is set (has been assigned a value) and false otherwise */ - public boolean isSetReq() { - return this.req != null; - } - - public void setReqIsSet(boolean value) { - if (!value) { - this.req = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case REQ: - if (value == null) { - unsetReq(); - } else { - setReq((TGetDelegationTokenReq)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case REQ: - return getReq(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case REQ: - return isSetReq(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof GetDelegationToken_args) - return this.equals((GetDelegationToken_args)that); - return false; - } - - public boolean equals(GetDelegationToken_args that) { - if (that == null) - return false; - - boolean this_present_req = true && this.isSetReq(); - boolean that_present_req = true && that.isSetReq(); - if (this_present_req || that_present_req) { - if (!(this_present_req && that_present_req)) - return false; - if (!this.req.equals(that.req)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_req = true && (isSetReq()); - list.add(present_req); - if (present_req) - list.add(req); - - return list.hashCode(); - } - - @Override - public int compareTo(GetDelegationToken_args other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetReq()).compareTo(other.isSetReq()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetReq()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.req, other.req); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("GetDelegationToken_args("); - boolean first = true; - - sb.append("req:"); - if (this.req == null) { - sb.append("null"); - } else { - sb.append(this.req); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (req != null) { - req.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class GetDelegationToken_argsStandardSchemeFactory implements SchemeFactory { - public GetDelegationToken_argsStandardScheme getScheme() { - return new GetDelegationToken_argsStandardScheme(); - } - } - - private static class GetDelegationToken_argsStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, GetDelegationToken_args struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // REQ - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.req = new TGetDelegationTokenReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, GetDelegationToken_args struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.req != null) { - oprot.writeFieldBegin(REQ_FIELD_DESC); - struct.req.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class GetDelegationToken_argsTupleSchemeFactory implements SchemeFactory { - public GetDelegationToken_argsTupleScheme getScheme() { - return new GetDelegationToken_argsTupleScheme(); - } - } - - private static class GetDelegationToken_argsTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, GetDelegationToken_args struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetReq()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetReq()) { - struct.req.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, GetDelegationToken_args struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.req = new TGetDelegationTokenReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } - } - } - - } - - public static class GetDelegationToken_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetDelegationToken_result"); - - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new GetDelegationToken_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new GetDelegationToken_resultTupleSchemeFactory()); - } - - private TGetDelegationTokenResp success; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SUCCESS((short)0, "success"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 0: // SUCCESS - return SUCCESS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TGetDelegationTokenResp.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetDelegationToken_result.class, metaDataMap); - } - - public GetDelegationToken_result() { - } - - public GetDelegationToken_result( - TGetDelegationTokenResp success) - { - this(); - this.success = success; - } - - /** - * Performs a deep copy on other. - */ - public GetDelegationToken_result(GetDelegationToken_result other) { - if (other.isSetSuccess()) { - this.success = new TGetDelegationTokenResp(other.success); - } - } - - public GetDelegationToken_result deepCopy() { - return new GetDelegationToken_result(this); - } - - @Override - public void clear() { - this.success = null; - } - - public TGetDelegationTokenResp getSuccess() { - return this.success; - } - - public void setSuccess(TGetDelegationTokenResp success) { - this.success = success; - } - - public void unsetSuccess() { - this.success = null; - } - - /** Returns true if field success is set (has been assigned a value) and false otherwise */ - public boolean isSetSuccess() { - return this.success != null; - } - - public void setSuccessIsSet(boolean value) { - if (!value) { - this.success = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case SUCCESS: - if (value == null) { - unsetSuccess(); - } else { - setSuccess((TGetDelegationTokenResp)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case SUCCESS: - return getSuccess(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case SUCCESS: - return isSetSuccess(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof GetDelegationToken_result) - return this.equals((GetDelegationToken_result)that); - return false; - } - - public boolean equals(GetDelegationToken_result that) { - if (that == null) - return false; - - boolean this_present_success = true && this.isSetSuccess(); - boolean that_present_success = true && that.isSetSuccess(); - if (this_present_success || that_present_success) { - if (!(this_present_success && that_present_success)) - return false; - if (!this.success.equals(that.success)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_success = true && (isSetSuccess()); - list.add(present_success); - if (present_success) - list.add(success); - - return list.hashCode(); - } - - @Override - public int compareTo(GetDelegationToken_result other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSuccess()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("GetDelegationToken_result("); - boolean first = true; - - sb.append("success:"); - if (this.success == null) { - sb.append("null"); - } else { - sb.append(this.success); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (success != null) { - success.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class GetDelegationToken_resultStandardSchemeFactory implements SchemeFactory { - public GetDelegationToken_resultStandardScheme getScheme() { - return new GetDelegationToken_resultStandardScheme(); - } - } - - private static class GetDelegationToken_resultStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, GetDelegationToken_result struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new TGetDelegationTokenResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, GetDelegationToken_result struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.success != null) { - oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - struct.success.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class GetDelegationToken_resultTupleSchemeFactory implements SchemeFactory { - public GetDelegationToken_resultTupleScheme getScheme() { - return new GetDelegationToken_resultTupleScheme(); - } - } - - private static class GetDelegationToken_resultTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, GetDelegationToken_result struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetSuccess()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetSuccess()) { - struct.success.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, GetDelegationToken_result struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.success = new TGetDelegationTokenResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } - } - } - - } - - public static class CancelDelegationToken_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("CancelDelegationToken_args"); - - private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new CancelDelegationToken_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new CancelDelegationToken_argsTupleSchemeFactory()); - } - - private TCancelDelegationTokenReq req; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - REQ((short)1, "req"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // REQ - return REQ; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TCancelDelegationTokenReq.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(CancelDelegationToken_args.class, metaDataMap); - } - - public CancelDelegationToken_args() { - } - - public CancelDelegationToken_args( - TCancelDelegationTokenReq req) - { - this(); - this.req = req; - } - - /** - * Performs a deep copy on other. - */ - public CancelDelegationToken_args(CancelDelegationToken_args other) { - if (other.isSetReq()) { - this.req = new TCancelDelegationTokenReq(other.req); - } - } - - public CancelDelegationToken_args deepCopy() { - return new CancelDelegationToken_args(this); - } - - @Override - public void clear() { - this.req = null; - } - - public TCancelDelegationTokenReq getReq() { - return this.req; - } - - public void setReq(TCancelDelegationTokenReq req) { - this.req = req; - } - - public void unsetReq() { - this.req = null; - } - - /** Returns true if field req is set (has been assigned a value) and false otherwise */ - public boolean isSetReq() { - return this.req != null; - } - - public void setReqIsSet(boolean value) { - if (!value) { - this.req = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case REQ: - if (value == null) { - unsetReq(); - } else { - setReq((TCancelDelegationTokenReq)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case REQ: - return getReq(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case REQ: - return isSetReq(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof CancelDelegationToken_args) - return this.equals((CancelDelegationToken_args)that); - return false; - } - - public boolean equals(CancelDelegationToken_args that) { - if (that == null) - return false; - - boolean this_present_req = true && this.isSetReq(); - boolean that_present_req = true && that.isSetReq(); - if (this_present_req || that_present_req) { - if (!(this_present_req && that_present_req)) - return false; - if (!this.req.equals(that.req)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_req = true && (isSetReq()); - list.add(present_req); - if (present_req) - list.add(req); - - return list.hashCode(); - } - - @Override - public int compareTo(CancelDelegationToken_args other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetReq()).compareTo(other.isSetReq()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetReq()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.req, other.req); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("CancelDelegationToken_args("); - boolean first = true; - - sb.append("req:"); - if (this.req == null) { - sb.append("null"); - } else { - sb.append(this.req); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (req != null) { - req.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class CancelDelegationToken_argsStandardSchemeFactory implements SchemeFactory { - public CancelDelegationToken_argsStandardScheme getScheme() { - return new CancelDelegationToken_argsStandardScheme(); - } - } - - private static class CancelDelegationToken_argsStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, CancelDelegationToken_args struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // REQ - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.req = new TCancelDelegationTokenReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, CancelDelegationToken_args struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.req != null) { - oprot.writeFieldBegin(REQ_FIELD_DESC); - struct.req.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class CancelDelegationToken_argsTupleSchemeFactory implements SchemeFactory { - public CancelDelegationToken_argsTupleScheme getScheme() { - return new CancelDelegationToken_argsTupleScheme(); - } - } - - private static class CancelDelegationToken_argsTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, CancelDelegationToken_args struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetReq()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetReq()) { - struct.req.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, CancelDelegationToken_args struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.req = new TCancelDelegationTokenReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } - } - } - - } - - public static class CancelDelegationToken_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("CancelDelegationToken_result"); - - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new CancelDelegationToken_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new CancelDelegationToken_resultTupleSchemeFactory()); - } - - private TCancelDelegationTokenResp success; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SUCCESS((short)0, "success"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 0: // SUCCESS - return SUCCESS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TCancelDelegationTokenResp.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(CancelDelegationToken_result.class, metaDataMap); - } - - public CancelDelegationToken_result() { - } - - public CancelDelegationToken_result( - TCancelDelegationTokenResp success) - { - this(); - this.success = success; - } - - /** - * Performs a deep copy on other. - */ - public CancelDelegationToken_result(CancelDelegationToken_result other) { - if (other.isSetSuccess()) { - this.success = new TCancelDelegationTokenResp(other.success); - } - } - - public CancelDelegationToken_result deepCopy() { - return new CancelDelegationToken_result(this); - } - - @Override - public void clear() { - this.success = null; - } - - public TCancelDelegationTokenResp getSuccess() { - return this.success; - } - - public void setSuccess(TCancelDelegationTokenResp success) { - this.success = success; - } - - public void unsetSuccess() { - this.success = null; - } - - /** Returns true if field success is set (has been assigned a value) and false otherwise */ - public boolean isSetSuccess() { - return this.success != null; - } - - public void setSuccessIsSet(boolean value) { - if (!value) { - this.success = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case SUCCESS: - if (value == null) { - unsetSuccess(); - } else { - setSuccess((TCancelDelegationTokenResp)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case SUCCESS: - return getSuccess(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case SUCCESS: - return isSetSuccess(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof CancelDelegationToken_result) - return this.equals((CancelDelegationToken_result)that); - return false; - } - - public boolean equals(CancelDelegationToken_result that) { - if (that == null) - return false; - - boolean this_present_success = true && this.isSetSuccess(); - boolean that_present_success = true && that.isSetSuccess(); - if (this_present_success || that_present_success) { - if (!(this_present_success && that_present_success)) - return false; - if (!this.success.equals(that.success)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_success = true && (isSetSuccess()); - list.add(present_success); - if (present_success) - list.add(success); - - return list.hashCode(); - } - - @Override - public int compareTo(CancelDelegationToken_result other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSuccess()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("CancelDelegationToken_result("); - boolean first = true; - - sb.append("success:"); - if (this.success == null) { - sb.append("null"); - } else { - sb.append(this.success); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (success != null) { - success.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class CancelDelegationToken_resultStandardSchemeFactory implements SchemeFactory { - public CancelDelegationToken_resultStandardScheme getScheme() { - return new CancelDelegationToken_resultStandardScheme(); - } - } - - private static class CancelDelegationToken_resultStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, CancelDelegationToken_result struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new TCancelDelegationTokenResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, CancelDelegationToken_result struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.success != null) { - oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - struct.success.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class CancelDelegationToken_resultTupleSchemeFactory implements SchemeFactory { - public CancelDelegationToken_resultTupleScheme getScheme() { - return new CancelDelegationToken_resultTupleScheme(); - } - } - - private static class CancelDelegationToken_resultTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, CancelDelegationToken_result struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetSuccess()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetSuccess()) { - struct.success.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, CancelDelegationToken_result struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.success = new TCancelDelegationTokenResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } - } - } - - } - - public static class RenewDelegationToken_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("RenewDelegationToken_args"); - - private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new RenewDelegationToken_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new RenewDelegationToken_argsTupleSchemeFactory()); - } - - private TRenewDelegationTokenReq req; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - REQ((short)1, "req"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // REQ - return REQ; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TRenewDelegationTokenReq.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(RenewDelegationToken_args.class, metaDataMap); - } - - public RenewDelegationToken_args() { - } - - public RenewDelegationToken_args( - TRenewDelegationTokenReq req) - { - this(); - this.req = req; - } - - /** - * Performs a deep copy on other. - */ - public RenewDelegationToken_args(RenewDelegationToken_args other) { - if (other.isSetReq()) { - this.req = new TRenewDelegationTokenReq(other.req); - } - } - - public RenewDelegationToken_args deepCopy() { - return new RenewDelegationToken_args(this); - } - - @Override - public void clear() { - this.req = null; - } - - public TRenewDelegationTokenReq getReq() { - return this.req; - } - - public void setReq(TRenewDelegationTokenReq req) { - this.req = req; - } - - public void unsetReq() { - this.req = null; - } - - /** Returns true if field req is set (has been assigned a value) and false otherwise */ - public boolean isSetReq() { - return this.req != null; - } - - public void setReqIsSet(boolean value) { - if (!value) { - this.req = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case REQ: - if (value == null) { - unsetReq(); - } else { - setReq((TRenewDelegationTokenReq)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case REQ: - return getReq(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case REQ: - return isSetReq(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof RenewDelegationToken_args) - return this.equals((RenewDelegationToken_args)that); - return false; - } - - public boolean equals(RenewDelegationToken_args that) { - if (that == null) - return false; - - boolean this_present_req = true && this.isSetReq(); - boolean that_present_req = true && that.isSetReq(); - if (this_present_req || that_present_req) { - if (!(this_present_req && that_present_req)) - return false; - if (!this.req.equals(that.req)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_req = true && (isSetReq()); - list.add(present_req); - if (present_req) - list.add(req); - - return list.hashCode(); - } - - @Override - public int compareTo(RenewDelegationToken_args other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetReq()).compareTo(other.isSetReq()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetReq()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.req, other.req); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("RenewDelegationToken_args("); - boolean first = true; - - sb.append("req:"); - if (this.req == null) { - sb.append("null"); - } else { - sb.append(this.req); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (req != null) { - req.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class RenewDelegationToken_argsStandardSchemeFactory implements SchemeFactory { - public RenewDelegationToken_argsStandardScheme getScheme() { - return new RenewDelegationToken_argsStandardScheme(); - } - } - - private static class RenewDelegationToken_argsStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, RenewDelegationToken_args struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // REQ - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.req = new TRenewDelegationTokenReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, RenewDelegationToken_args struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.req != null) { - oprot.writeFieldBegin(REQ_FIELD_DESC); - struct.req.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class RenewDelegationToken_argsTupleSchemeFactory implements SchemeFactory { - public RenewDelegationToken_argsTupleScheme getScheme() { - return new RenewDelegationToken_argsTupleScheme(); - } - } - - private static class RenewDelegationToken_argsTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, RenewDelegationToken_args struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetReq()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetReq()) { - struct.req.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, RenewDelegationToken_args struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.req = new TRenewDelegationTokenReq(); - struct.req.read(iprot); - struct.setReqIsSet(true); - } - } - } - - } - - public static class RenewDelegationToken_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("RenewDelegationToken_result"); - - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new RenewDelegationToken_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new RenewDelegationToken_resultTupleSchemeFactory()); - } - - private TRenewDelegationTokenResp success; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SUCCESS((short)0, "success"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 0: // SUCCESS - return SUCCESS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TRenewDelegationTokenResp.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(RenewDelegationToken_result.class, metaDataMap); - } - - public RenewDelegationToken_result() { - } - - public RenewDelegationToken_result( - TRenewDelegationTokenResp success) - { - this(); - this.success = success; - } - - /** - * Performs a deep copy on other. - */ - public RenewDelegationToken_result(RenewDelegationToken_result other) { - if (other.isSetSuccess()) { - this.success = new TRenewDelegationTokenResp(other.success); - } - } - - public RenewDelegationToken_result deepCopy() { - return new RenewDelegationToken_result(this); - } - - @Override - public void clear() { - this.success = null; - } - - public TRenewDelegationTokenResp getSuccess() { - return this.success; - } - - public void setSuccess(TRenewDelegationTokenResp success) { - this.success = success; - } - - public void unsetSuccess() { - this.success = null; - } - - /** Returns true if field success is set (has been assigned a value) and false otherwise */ - public boolean isSetSuccess() { - return this.success != null; - } - - public void setSuccessIsSet(boolean value) { - if (!value) { - this.success = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case SUCCESS: - if (value == null) { - unsetSuccess(); - } else { - setSuccess((TRenewDelegationTokenResp)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case SUCCESS: - return getSuccess(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case SUCCESS: - return isSetSuccess(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof RenewDelegationToken_result) - return this.equals((RenewDelegationToken_result)that); - return false; - } - - public boolean equals(RenewDelegationToken_result that) { - if (that == null) - return false; - - boolean this_present_success = true && this.isSetSuccess(); - boolean that_present_success = true && that.isSetSuccess(); - if (this_present_success || that_present_success) { - if (!(this_present_success && that_present_success)) - return false; - if (!this.success.equals(that.success)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_success = true && (isSetSuccess()); - list.add(present_success); - if (present_success) - list.add(success); - - return list.hashCode(); - } - - @Override - public int compareTo(RenewDelegationToken_result other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSuccess()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("RenewDelegationToken_result("); - boolean first = true; - - sb.append("success:"); - if (this.success == null) { - sb.append("null"); - } else { - sb.append(this.success); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - if (success != null) { - success.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class RenewDelegationToken_resultStandardSchemeFactory implements SchemeFactory { - public RenewDelegationToken_resultStandardScheme getScheme() { - return new RenewDelegationToken_resultStandardScheme(); - } - } - - private static class RenewDelegationToken_resultStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, RenewDelegationToken_result struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new TRenewDelegationTokenResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, RenewDelegationToken_result struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.success != null) { - oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - struct.success.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class RenewDelegationToken_resultTupleSchemeFactory implements SchemeFactory { - public RenewDelegationToken_resultTupleScheme getScheme() { - return new RenewDelegationToken_resultTupleScheme(); - } - } - - private static class RenewDelegationToken_resultTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, RenewDelegationToken_result struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetSuccess()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetSuccess()) { - struct.success.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, RenewDelegationToken_result struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.success = new TRenewDelegationTokenResp(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } - } - } - - } - -} diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TCLIServiceConstants.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TCLIServiceConstants.java deleted file mode 100644 index 930bed731ed2a..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TCLIServiceConstants.java +++ /dev/null @@ -1,106 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -public class TCLIServiceConstants { - - public static final Set PRIMITIVE_TYPES = new HashSet(); - static { - PRIMITIVE_TYPES.add(org.apache.hive.service.rpc.thrift.TTypeId.BOOLEAN_TYPE); - PRIMITIVE_TYPES.add(org.apache.hive.service.rpc.thrift.TTypeId.TINYINT_TYPE); - PRIMITIVE_TYPES.add(org.apache.hive.service.rpc.thrift.TTypeId.SMALLINT_TYPE); - PRIMITIVE_TYPES.add(org.apache.hive.service.rpc.thrift.TTypeId.INT_TYPE); - PRIMITIVE_TYPES.add(org.apache.hive.service.rpc.thrift.TTypeId.BIGINT_TYPE); - PRIMITIVE_TYPES.add(org.apache.hive.service.rpc.thrift.TTypeId.FLOAT_TYPE); - PRIMITIVE_TYPES.add(org.apache.hive.service.rpc.thrift.TTypeId.DOUBLE_TYPE); - PRIMITIVE_TYPES.add(org.apache.hive.service.rpc.thrift.TTypeId.STRING_TYPE); - PRIMITIVE_TYPES.add(org.apache.hive.service.rpc.thrift.TTypeId.TIMESTAMP_TYPE); - PRIMITIVE_TYPES.add(org.apache.hive.service.rpc.thrift.TTypeId.BINARY_TYPE); - PRIMITIVE_TYPES.add(org.apache.hive.service.rpc.thrift.TTypeId.DECIMAL_TYPE); - PRIMITIVE_TYPES.add(org.apache.hive.service.rpc.thrift.TTypeId.NULL_TYPE); - PRIMITIVE_TYPES.add(org.apache.hive.service.rpc.thrift.TTypeId.DATE_TYPE); - PRIMITIVE_TYPES.add(org.apache.hive.service.rpc.thrift.TTypeId.VARCHAR_TYPE); - PRIMITIVE_TYPES.add(org.apache.hive.service.rpc.thrift.TTypeId.CHAR_TYPE); - PRIMITIVE_TYPES.add(org.apache.hive.service.rpc.thrift.TTypeId.INTERVAL_YEAR_MONTH_TYPE); - PRIMITIVE_TYPES.add(org.apache.hive.service.rpc.thrift.TTypeId.INTERVAL_DAY_TIME_TYPE); - } - - public static final Set COMPLEX_TYPES = new HashSet(); - static { - COMPLEX_TYPES.add(org.apache.hive.service.rpc.thrift.TTypeId.ARRAY_TYPE); - COMPLEX_TYPES.add(org.apache.hive.service.rpc.thrift.TTypeId.MAP_TYPE); - COMPLEX_TYPES.add(org.apache.hive.service.rpc.thrift.TTypeId.STRUCT_TYPE); - COMPLEX_TYPES.add(org.apache.hive.service.rpc.thrift.TTypeId.UNION_TYPE); - COMPLEX_TYPES.add(org.apache.hive.service.rpc.thrift.TTypeId.USER_DEFINED_TYPE); - } - - public static final Set COLLECTION_TYPES = new HashSet(); - static { - COLLECTION_TYPES.add(org.apache.hive.service.rpc.thrift.TTypeId.ARRAY_TYPE); - COLLECTION_TYPES.add(org.apache.hive.service.rpc.thrift.TTypeId.MAP_TYPE); - } - - public static final Map TYPE_NAMES = new HashMap(); - static { - TYPE_NAMES.put(org.apache.hive.service.rpc.thrift.TTypeId.BOOLEAN_TYPE, "BOOLEAN"); - TYPE_NAMES.put(org.apache.hive.service.rpc.thrift.TTypeId.TINYINT_TYPE, "TINYINT"); - TYPE_NAMES.put(org.apache.hive.service.rpc.thrift.TTypeId.SMALLINT_TYPE, "SMALLINT"); - TYPE_NAMES.put(org.apache.hive.service.rpc.thrift.TTypeId.INT_TYPE, "INT"); - TYPE_NAMES.put(org.apache.hive.service.rpc.thrift.TTypeId.BIGINT_TYPE, "BIGINT"); - TYPE_NAMES.put(org.apache.hive.service.rpc.thrift.TTypeId.FLOAT_TYPE, "FLOAT"); - TYPE_NAMES.put(org.apache.hive.service.rpc.thrift.TTypeId.DOUBLE_TYPE, "DOUBLE"); - TYPE_NAMES.put(org.apache.hive.service.rpc.thrift.TTypeId.STRING_TYPE, "STRING"); - TYPE_NAMES.put(org.apache.hive.service.rpc.thrift.TTypeId.TIMESTAMP_TYPE, "TIMESTAMP"); - TYPE_NAMES.put(org.apache.hive.service.rpc.thrift.TTypeId.BINARY_TYPE, "BINARY"); - TYPE_NAMES.put(org.apache.hive.service.rpc.thrift.TTypeId.ARRAY_TYPE, "ARRAY"); - TYPE_NAMES.put(org.apache.hive.service.rpc.thrift.TTypeId.MAP_TYPE, "MAP"); - TYPE_NAMES.put(org.apache.hive.service.rpc.thrift.TTypeId.STRUCT_TYPE, "STRUCT"); - TYPE_NAMES.put(org.apache.hive.service.rpc.thrift.TTypeId.UNION_TYPE, "UNIONTYPE"); - TYPE_NAMES.put(org.apache.hive.service.rpc.thrift.TTypeId.DECIMAL_TYPE, "DECIMAL"); - TYPE_NAMES.put(org.apache.hive.service.rpc.thrift.TTypeId.NULL_TYPE, "NULL"); - TYPE_NAMES.put(org.apache.hive.service.rpc.thrift.TTypeId.DATE_TYPE, "DATE"); - TYPE_NAMES.put(org.apache.hive.service.rpc.thrift.TTypeId.VARCHAR_TYPE, "VARCHAR"); - TYPE_NAMES.put(org.apache.hive.service.rpc.thrift.TTypeId.CHAR_TYPE, "CHAR"); - TYPE_NAMES.put(org.apache.hive.service.rpc.thrift.TTypeId.INTERVAL_YEAR_MONTH_TYPE, "INTERVAL_YEAR_MONTH"); - TYPE_NAMES.put(org.apache.hive.service.rpc.thrift.TTypeId.INTERVAL_DAY_TIME_TYPE, "INTERVAL_DAY_TIME"); - } - - public static final String CHARACTER_MAXIMUM_LENGTH = "characterMaximumLength"; - - public static final String PRECISION = "precision"; - - public static final String SCALE = "scale"; - -} diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TCancelDelegationTokenReq.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TCancelDelegationTokenReq.java deleted file mode 100644 index a7d4e7de1f60d..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TCancelDelegationTokenReq.java +++ /dev/null @@ -1,495 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TCancelDelegationTokenReq implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TCancelDelegationTokenReq"); - - private static final org.apache.thrift.protocol.TField SESSION_HANDLE_FIELD_DESC = new org.apache.thrift.protocol.TField("sessionHandle", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField DELEGATION_TOKEN_FIELD_DESC = new org.apache.thrift.protocol.TField("delegationToken", org.apache.thrift.protocol.TType.STRING, (short)2); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TCancelDelegationTokenReqStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TCancelDelegationTokenReqTupleSchemeFactory()); - } - - private TSessionHandle sessionHandle; // required - private String delegationToken; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SESSION_HANDLE((short)1, "sessionHandle"), - DELEGATION_TOKEN((short)2, "delegationToken"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // SESSION_HANDLE - return SESSION_HANDLE; - case 2: // DELEGATION_TOKEN - return DELEGATION_TOKEN; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SESSION_HANDLE, new org.apache.thrift.meta_data.FieldMetaData("sessionHandle", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TSessionHandle.class))); - tmpMap.put(_Fields.DELEGATION_TOKEN, new org.apache.thrift.meta_data.FieldMetaData("delegationToken", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TCancelDelegationTokenReq.class, metaDataMap); - } - - public TCancelDelegationTokenReq() { - } - - public TCancelDelegationTokenReq( - TSessionHandle sessionHandle, - String delegationToken) - { - this(); - this.sessionHandle = sessionHandle; - this.delegationToken = delegationToken; - } - - /** - * Performs a deep copy on other. - */ - public TCancelDelegationTokenReq(TCancelDelegationTokenReq other) { - if (other.isSetSessionHandle()) { - this.sessionHandle = new TSessionHandle(other.sessionHandle); - } - if (other.isSetDelegationToken()) { - this.delegationToken = other.delegationToken; - } - } - - public TCancelDelegationTokenReq deepCopy() { - return new TCancelDelegationTokenReq(this); - } - - @Override - public void clear() { - this.sessionHandle = null; - this.delegationToken = null; - } - - public TSessionHandle getSessionHandle() { - return this.sessionHandle; - } - - public void setSessionHandle(TSessionHandle sessionHandle) { - this.sessionHandle = sessionHandle; - } - - public void unsetSessionHandle() { - this.sessionHandle = null; - } - - /** Returns true if field sessionHandle is set (has been assigned a value) and false otherwise */ - public boolean isSetSessionHandle() { - return this.sessionHandle != null; - } - - public void setSessionHandleIsSet(boolean value) { - if (!value) { - this.sessionHandle = null; - } - } - - public String getDelegationToken() { - return this.delegationToken; - } - - public void setDelegationToken(String delegationToken) { - this.delegationToken = delegationToken; - } - - public void unsetDelegationToken() { - this.delegationToken = null; - } - - /** Returns true if field delegationToken is set (has been assigned a value) and false otherwise */ - public boolean isSetDelegationToken() { - return this.delegationToken != null; - } - - public void setDelegationTokenIsSet(boolean value) { - if (!value) { - this.delegationToken = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case SESSION_HANDLE: - if (value == null) { - unsetSessionHandle(); - } else { - setSessionHandle((TSessionHandle)value); - } - break; - - case DELEGATION_TOKEN: - if (value == null) { - unsetDelegationToken(); - } else { - setDelegationToken((String)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case SESSION_HANDLE: - return getSessionHandle(); - - case DELEGATION_TOKEN: - return getDelegationToken(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case SESSION_HANDLE: - return isSetSessionHandle(); - case DELEGATION_TOKEN: - return isSetDelegationToken(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TCancelDelegationTokenReq) - return this.equals((TCancelDelegationTokenReq)that); - return false; - } - - public boolean equals(TCancelDelegationTokenReq that) { - if (that == null) - return false; - - boolean this_present_sessionHandle = true && this.isSetSessionHandle(); - boolean that_present_sessionHandle = true && that.isSetSessionHandle(); - if (this_present_sessionHandle || that_present_sessionHandle) { - if (!(this_present_sessionHandle && that_present_sessionHandle)) - return false; - if (!this.sessionHandle.equals(that.sessionHandle)) - return false; - } - - boolean this_present_delegationToken = true && this.isSetDelegationToken(); - boolean that_present_delegationToken = true && that.isSetDelegationToken(); - if (this_present_delegationToken || that_present_delegationToken) { - if (!(this_present_delegationToken && that_present_delegationToken)) - return false; - if (!this.delegationToken.equals(that.delegationToken)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_sessionHandle = true && (isSetSessionHandle()); - list.add(present_sessionHandle); - if (present_sessionHandle) - list.add(sessionHandle); - - boolean present_delegationToken = true && (isSetDelegationToken()); - list.add(present_delegationToken); - if (present_delegationToken) - list.add(delegationToken); - - return list.hashCode(); - } - - @Override - public int compareTo(TCancelDelegationTokenReq other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetSessionHandle()).compareTo(other.isSetSessionHandle()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSessionHandle()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.sessionHandle, other.sessionHandle); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetDelegationToken()).compareTo(other.isSetDelegationToken()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetDelegationToken()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.delegationToken, other.delegationToken); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TCancelDelegationTokenReq("); - boolean first = true; - - sb.append("sessionHandle:"); - if (this.sessionHandle == null) { - sb.append("null"); - } else { - sb.append(this.sessionHandle); - } - first = false; - if (!first) sb.append(", "); - sb.append("delegationToken:"); - if (this.delegationToken == null) { - sb.append("null"); - } else { - sb.append(this.delegationToken); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetSessionHandle()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'sessionHandle' is unset! Struct:" + toString()); - } - - if (!isSetDelegationToken()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'delegationToken' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (sessionHandle != null) { - sessionHandle.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TCancelDelegationTokenReqStandardSchemeFactory implements SchemeFactory { - public TCancelDelegationTokenReqStandardScheme getScheme() { - return new TCancelDelegationTokenReqStandardScheme(); - } - } - - private static class TCancelDelegationTokenReqStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TCancelDelegationTokenReq struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // SESSION_HANDLE - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.sessionHandle = new TSessionHandle(); - struct.sessionHandle.read(iprot); - struct.setSessionHandleIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // DELEGATION_TOKEN - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.delegationToken = iprot.readString(); - struct.setDelegationTokenIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TCancelDelegationTokenReq struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.sessionHandle != null) { - oprot.writeFieldBegin(SESSION_HANDLE_FIELD_DESC); - struct.sessionHandle.write(oprot); - oprot.writeFieldEnd(); - } - if (struct.delegationToken != null) { - oprot.writeFieldBegin(DELEGATION_TOKEN_FIELD_DESC); - oprot.writeString(struct.delegationToken); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TCancelDelegationTokenReqTupleSchemeFactory implements SchemeFactory { - public TCancelDelegationTokenReqTupleScheme getScheme() { - return new TCancelDelegationTokenReqTupleScheme(); - } - } - - private static class TCancelDelegationTokenReqTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TCancelDelegationTokenReq struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.sessionHandle.write(oprot); - oprot.writeString(struct.delegationToken); - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TCancelDelegationTokenReq struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.sessionHandle = new TSessionHandle(); - struct.sessionHandle.read(iprot); - struct.setSessionHandleIsSet(true); - struct.delegationToken = iprot.readString(); - struct.setDelegationTokenIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TCancelDelegationTokenResp.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TCancelDelegationTokenResp.java deleted file mode 100644 index 611e92ca2af30..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TCancelDelegationTokenResp.java +++ /dev/null @@ -1,394 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TCancelDelegationTokenResp implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TCancelDelegationTokenResp"); - - private static final org.apache.thrift.protocol.TField STATUS_FIELD_DESC = new org.apache.thrift.protocol.TField("status", org.apache.thrift.protocol.TType.STRUCT, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TCancelDelegationTokenRespStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TCancelDelegationTokenRespTupleSchemeFactory()); - } - - private TStatus status; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - STATUS((short)1, "status"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // STATUS - return STATUS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.STATUS, new org.apache.thrift.meta_data.FieldMetaData("status", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TStatus.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TCancelDelegationTokenResp.class, metaDataMap); - } - - public TCancelDelegationTokenResp() { - } - - public TCancelDelegationTokenResp( - TStatus status) - { - this(); - this.status = status; - } - - /** - * Performs a deep copy on other. - */ - public TCancelDelegationTokenResp(TCancelDelegationTokenResp other) { - if (other.isSetStatus()) { - this.status = new TStatus(other.status); - } - } - - public TCancelDelegationTokenResp deepCopy() { - return new TCancelDelegationTokenResp(this); - } - - @Override - public void clear() { - this.status = null; - } - - public TStatus getStatus() { - return this.status; - } - - public void setStatus(TStatus status) { - this.status = status; - } - - public void unsetStatus() { - this.status = null; - } - - /** Returns true if field status is set (has been assigned a value) and false otherwise */ - public boolean isSetStatus() { - return this.status != null; - } - - public void setStatusIsSet(boolean value) { - if (!value) { - this.status = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case STATUS: - if (value == null) { - unsetStatus(); - } else { - setStatus((TStatus)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case STATUS: - return getStatus(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case STATUS: - return isSetStatus(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TCancelDelegationTokenResp) - return this.equals((TCancelDelegationTokenResp)that); - return false; - } - - public boolean equals(TCancelDelegationTokenResp that) { - if (that == null) - return false; - - boolean this_present_status = true && this.isSetStatus(); - boolean that_present_status = true && that.isSetStatus(); - if (this_present_status || that_present_status) { - if (!(this_present_status && that_present_status)) - return false; - if (!this.status.equals(that.status)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_status = true && (isSetStatus()); - list.add(present_status); - if (present_status) - list.add(status); - - return list.hashCode(); - } - - @Override - public int compareTo(TCancelDelegationTokenResp other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetStatus()).compareTo(other.isSetStatus()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetStatus()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.status, other.status); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TCancelDelegationTokenResp("); - boolean first = true; - - sb.append("status:"); - if (this.status == null) { - sb.append("null"); - } else { - sb.append(this.status); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetStatus()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'status' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (status != null) { - status.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TCancelDelegationTokenRespStandardSchemeFactory implements SchemeFactory { - public TCancelDelegationTokenRespStandardScheme getScheme() { - return new TCancelDelegationTokenRespStandardScheme(); - } - } - - private static class TCancelDelegationTokenRespStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TCancelDelegationTokenResp struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // STATUS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TCancelDelegationTokenResp struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.status != null) { - oprot.writeFieldBegin(STATUS_FIELD_DESC); - struct.status.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TCancelDelegationTokenRespTupleSchemeFactory implements SchemeFactory { - public TCancelDelegationTokenRespTupleScheme getScheme() { - return new TCancelDelegationTokenRespTupleScheme(); - } - } - - private static class TCancelDelegationTokenRespTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TCancelDelegationTokenResp struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.status.write(oprot); - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TCancelDelegationTokenResp struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TCancelOperationReq.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TCancelOperationReq.java deleted file mode 100644 index 4076c573fafb7..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TCancelOperationReq.java +++ /dev/null @@ -1,394 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TCancelOperationReq implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TCancelOperationReq"); - - private static final org.apache.thrift.protocol.TField OPERATION_HANDLE_FIELD_DESC = new org.apache.thrift.protocol.TField("operationHandle", org.apache.thrift.protocol.TType.STRUCT, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TCancelOperationReqStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TCancelOperationReqTupleSchemeFactory()); - } - - private TOperationHandle operationHandle; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - OPERATION_HANDLE((short)1, "operationHandle"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // OPERATION_HANDLE - return OPERATION_HANDLE; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.OPERATION_HANDLE, new org.apache.thrift.meta_data.FieldMetaData("operationHandle", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TOperationHandle.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TCancelOperationReq.class, metaDataMap); - } - - public TCancelOperationReq() { - } - - public TCancelOperationReq( - TOperationHandle operationHandle) - { - this(); - this.operationHandle = operationHandle; - } - - /** - * Performs a deep copy on other. - */ - public TCancelOperationReq(TCancelOperationReq other) { - if (other.isSetOperationHandle()) { - this.operationHandle = new TOperationHandle(other.operationHandle); - } - } - - public TCancelOperationReq deepCopy() { - return new TCancelOperationReq(this); - } - - @Override - public void clear() { - this.operationHandle = null; - } - - public TOperationHandle getOperationHandle() { - return this.operationHandle; - } - - public void setOperationHandle(TOperationHandle operationHandle) { - this.operationHandle = operationHandle; - } - - public void unsetOperationHandle() { - this.operationHandle = null; - } - - /** Returns true if field operationHandle is set (has been assigned a value) and false otherwise */ - public boolean isSetOperationHandle() { - return this.operationHandle != null; - } - - public void setOperationHandleIsSet(boolean value) { - if (!value) { - this.operationHandle = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case OPERATION_HANDLE: - if (value == null) { - unsetOperationHandle(); - } else { - setOperationHandle((TOperationHandle)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case OPERATION_HANDLE: - return getOperationHandle(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case OPERATION_HANDLE: - return isSetOperationHandle(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TCancelOperationReq) - return this.equals((TCancelOperationReq)that); - return false; - } - - public boolean equals(TCancelOperationReq that) { - if (that == null) - return false; - - boolean this_present_operationHandle = true && this.isSetOperationHandle(); - boolean that_present_operationHandle = true && that.isSetOperationHandle(); - if (this_present_operationHandle || that_present_operationHandle) { - if (!(this_present_operationHandle && that_present_operationHandle)) - return false; - if (!this.operationHandle.equals(that.operationHandle)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_operationHandle = true && (isSetOperationHandle()); - list.add(present_operationHandle); - if (present_operationHandle) - list.add(operationHandle); - - return list.hashCode(); - } - - @Override - public int compareTo(TCancelOperationReq other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetOperationHandle()).compareTo(other.isSetOperationHandle()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetOperationHandle()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.operationHandle, other.operationHandle); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TCancelOperationReq("); - boolean first = true; - - sb.append("operationHandle:"); - if (this.operationHandle == null) { - sb.append("null"); - } else { - sb.append(this.operationHandle); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetOperationHandle()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'operationHandle' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (operationHandle != null) { - operationHandle.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TCancelOperationReqStandardSchemeFactory implements SchemeFactory { - public TCancelOperationReqStandardScheme getScheme() { - return new TCancelOperationReqStandardScheme(); - } - } - - private static class TCancelOperationReqStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TCancelOperationReq struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // OPERATION_HANDLE - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.operationHandle = new TOperationHandle(); - struct.operationHandle.read(iprot); - struct.setOperationHandleIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TCancelOperationReq struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.operationHandle != null) { - oprot.writeFieldBegin(OPERATION_HANDLE_FIELD_DESC); - struct.operationHandle.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TCancelOperationReqTupleSchemeFactory implements SchemeFactory { - public TCancelOperationReqTupleScheme getScheme() { - return new TCancelOperationReqTupleScheme(); - } - } - - private static class TCancelOperationReqTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TCancelOperationReq struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.operationHandle.write(oprot); - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TCancelOperationReq struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.operationHandle = new TOperationHandle(); - struct.operationHandle.read(iprot); - struct.setOperationHandleIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TCancelOperationResp.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TCancelOperationResp.java deleted file mode 100644 index 7bcc765c85daa..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TCancelOperationResp.java +++ /dev/null @@ -1,394 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TCancelOperationResp implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TCancelOperationResp"); - - private static final org.apache.thrift.protocol.TField STATUS_FIELD_DESC = new org.apache.thrift.protocol.TField("status", org.apache.thrift.protocol.TType.STRUCT, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TCancelOperationRespStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TCancelOperationRespTupleSchemeFactory()); - } - - private TStatus status; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - STATUS((short)1, "status"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // STATUS - return STATUS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.STATUS, new org.apache.thrift.meta_data.FieldMetaData("status", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TStatus.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TCancelOperationResp.class, metaDataMap); - } - - public TCancelOperationResp() { - } - - public TCancelOperationResp( - TStatus status) - { - this(); - this.status = status; - } - - /** - * Performs a deep copy on other. - */ - public TCancelOperationResp(TCancelOperationResp other) { - if (other.isSetStatus()) { - this.status = new TStatus(other.status); - } - } - - public TCancelOperationResp deepCopy() { - return new TCancelOperationResp(this); - } - - @Override - public void clear() { - this.status = null; - } - - public TStatus getStatus() { - return this.status; - } - - public void setStatus(TStatus status) { - this.status = status; - } - - public void unsetStatus() { - this.status = null; - } - - /** Returns true if field status is set (has been assigned a value) and false otherwise */ - public boolean isSetStatus() { - return this.status != null; - } - - public void setStatusIsSet(boolean value) { - if (!value) { - this.status = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case STATUS: - if (value == null) { - unsetStatus(); - } else { - setStatus((TStatus)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case STATUS: - return getStatus(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case STATUS: - return isSetStatus(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TCancelOperationResp) - return this.equals((TCancelOperationResp)that); - return false; - } - - public boolean equals(TCancelOperationResp that) { - if (that == null) - return false; - - boolean this_present_status = true && this.isSetStatus(); - boolean that_present_status = true && that.isSetStatus(); - if (this_present_status || that_present_status) { - if (!(this_present_status && that_present_status)) - return false; - if (!this.status.equals(that.status)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_status = true && (isSetStatus()); - list.add(present_status); - if (present_status) - list.add(status); - - return list.hashCode(); - } - - @Override - public int compareTo(TCancelOperationResp other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetStatus()).compareTo(other.isSetStatus()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetStatus()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.status, other.status); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TCancelOperationResp("); - boolean first = true; - - sb.append("status:"); - if (this.status == null) { - sb.append("null"); - } else { - sb.append(this.status); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetStatus()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'status' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (status != null) { - status.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TCancelOperationRespStandardSchemeFactory implements SchemeFactory { - public TCancelOperationRespStandardScheme getScheme() { - return new TCancelOperationRespStandardScheme(); - } - } - - private static class TCancelOperationRespStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TCancelOperationResp struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // STATUS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TCancelOperationResp struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.status != null) { - oprot.writeFieldBegin(STATUS_FIELD_DESC); - struct.status.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TCancelOperationRespTupleSchemeFactory implements SchemeFactory { - public TCancelOperationRespTupleScheme getScheme() { - return new TCancelOperationRespTupleScheme(); - } - } - - private static class TCancelOperationRespTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TCancelOperationResp struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.status.write(oprot); - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TCancelOperationResp struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TCloseOperationReq.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TCloseOperationReq.java deleted file mode 100644 index 47a6b8329c05b..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TCloseOperationReq.java +++ /dev/null @@ -1,394 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TCloseOperationReq implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TCloseOperationReq"); - - private static final org.apache.thrift.protocol.TField OPERATION_HANDLE_FIELD_DESC = new org.apache.thrift.protocol.TField("operationHandle", org.apache.thrift.protocol.TType.STRUCT, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TCloseOperationReqStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TCloseOperationReqTupleSchemeFactory()); - } - - private TOperationHandle operationHandle; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - OPERATION_HANDLE((short)1, "operationHandle"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // OPERATION_HANDLE - return OPERATION_HANDLE; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.OPERATION_HANDLE, new org.apache.thrift.meta_data.FieldMetaData("operationHandle", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TOperationHandle.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TCloseOperationReq.class, metaDataMap); - } - - public TCloseOperationReq() { - } - - public TCloseOperationReq( - TOperationHandle operationHandle) - { - this(); - this.operationHandle = operationHandle; - } - - /** - * Performs a deep copy on other. - */ - public TCloseOperationReq(TCloseOperationReq other) { - if (other.isSetOperationHandle()) { - this.operationHandle = new TOperationHandle(other.operationHandle); - } - } - - public TCloseOperationReq deepCopy() { - return new TCloseOperationReq(this); - } - - @Override - public void clear() { - this.operationHandle = null; - } - - public TOperationHandle getOperationHandle() { - return this.operationHandle; - } - - public void setOperationHandle(TOperationHandle operationHandle) { - this.operationHandle = operationHandle; - } - - public void unsetOperationHandle() { - this.operationHandle = null; - } - - /** Returns true if field operationHandle is set (has been assigned a value) and false otherwise */ - public boolean isSetOperationHandle() { - return this.operationHandle != null; - } - - public void setOperationHandleIsSet(boolean value) { - if (!value) { - this.operationHandle = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case OPERATION_HANDLE: - if (value == null) { - unsetOperationHandle(); - } else { - setOperationHandle((TOperationHandle)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case OPERATION_HANDLE: - return getOperationHandle(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case OPERATION_HANDLE: - return isSetOperationHandle(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TCloseOperationReq) - return this.equals((TCloseOperationReq)that); - return false; - } - - public boolean equals(TCloseOperationReq that) { - if (that == null) - return false; - - boolean this_present_operationHandle = true && this.isSetOperationHandle(); - boolean that_present_operationHandle = true && that.isSetOperationHandle(); - if (this_present_operationHandle || that_present_operationHandle) { - if (!(this_present_operationHandle && that_present_operationHandle)) - return false; - if (!this.operationHandle.equals(that.operationHandle)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_operationHandle = true && (isSetOperationHandle()); - list.add(present_operationHandle); - if (present_operationHandle) - list.add(operationHandle); - - return list.hashCode(); - } - - @Override - public int compareTo(TCloseOperationReq other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetOperationHandle()).compareTo(other.isSetOperationHandle()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetOperationHandle()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.operationHandle, other.operationHandle); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TCloseOperationReq("); - boolean first = true; - - sb.append("operationHandle:"); - if (this.operationHandle == null) { - sb.append("null"); - } else { - sb.append(this.operationHandle); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetOperationHandle()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'operationHandle' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (operationHandle != null) { - operationHandle.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TCloseOperationReqStandardSchemeFactory implements SchemeFactory { - public TCloseOperationReqStandardScheme getScheme() { - return new TCloseOperationReqStandardScheme(); - } - } - - private static class TCloseOperationReqStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TCloseOperationReq struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // OPERATION_HANDLE - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.operationHandle = new TOperationHandle(); - struct.operationHandle.read(iprot); - struct.setOperationHandleIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TCloseOperationReq struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.operationHandle != null) { - oprot.writeFieldBegin(OPERATION_HANDLE_FIELD_DESC); - struct.operationHandle.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TCloseOperationReqTupleSchemeFactory implements SchemeFactory { - public TCloseOperationReqTupleScheme getScheme() { - return new TCloseOperationReqTupleScheme(); - } - } - - private static class TCloseOperationReqTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TCloseOperationReq struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.operationHandle.write(oprot); - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TCloseOperationReq struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.operationHandle = new TOperationHandle(); - struct.operationHandle.read(iprot); - struct.setOperationHandleIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TCloseOperationResp.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TCloseOperationResp.java deleted file mode 100644 index 0860a2b1c5bac..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TCloseOperationResp.java +++ /dev/null @@ -1,394 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TCloseOperationResp implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TCloseOperationResp"); - - private static final org.apache.thrift.protocol.TField STATUS_FIELD_DESC = new org.apache.thrift.protocol.TField("status", org.apache.thrift.protocol.TType.STRUCT, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TCloseOperationRespStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TCloseOperationRespTupleSchemeFactory()); - } - - private TStatus status; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - STATUS((short)1, "status"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // STATUS - return STATUS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.STATUS, new org.apache.thrift.meta_data.FieldMetaData("status", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TStatus.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TCloseOperationResp.class, metaDataMap); - } - - public TCloseOperationResp() { - } - - public TCloseOperationResp( - TStatus status) - { - this(); - this.status = status; - } - - /** - * Performs a deep copy on other. - */ - public TCloseOperationResp(TCloseOperationResp other) { - if (other.isSetStatus()) { - this.status = new TStatus(other.status); - } - } - - public TCloseOperationResp deepCopy() { - return new TCloseOperationResp(this); - } - - @Override - public void clear() { - this.status = null; - } - - public TStatus getStatus() { - return this.status; - } - - public void setStatus(TStatus status) { - this.status = status; - } - - public void unsetStatus() { - this.status = null; - } - - /** Returns true if field status is set (has been assigned a value) and false otherwise */ - public boolean isSetStatus() { - return this.status != null; - } - - public void setStatusIsSet(boolean value) { - if (!value) { - this.status = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case STATUS: - if (value == null) { - unsetStatus(); - } else { - setStatus((TStatus)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case STATUS: - return getStatus(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case STATUS: - return isSetStatus(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TCloseOperationResp) - return this.equals((TCloseOperationResp)that); - return false; - } - - public boolean equals(TCloseOperationResp that) { - if (that == null) - return false; - - boolean this_present_status = true && this.isSetStatus(); - boolean that_present_status = true && that.isSetStatus(); - if (this_present_status || that_present_status) { - if (!(this_present_status && that_present_status)) - return false; - if (!this.status.equals(that.status)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_status = true && (isSetStatus()); - list.add(present_status); - if (present_status) - list.add(status); - - return list.hashCode(); - } - - @Override - public int compareTo(TCloseOperationResp other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetStatus()).compareTo(other.isSetStatus()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetStatus()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.status, other.status); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TCloseOperationResp("); - boolean first = true; - - sb.append("status:"); - if (this.status == null) { - sb.append("null"); - } else { - sb.append(this.status); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetStatus()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'status' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (status != null) { - status.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TCloseOperationRespStandardSchemeFactory implements SchemeFactory { - public TCloseOperationRespStandardScheme getScheme() { - return new TCloseOperationRespStandardScheme(); - } - } - - private static class TCloseOperationRespStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TCloseOperationResp struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // STATUS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TCloseOperationResp struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.status != null) { - oprot.writeFieldBegin(STATUS_FIELD_DESC); - struct.status.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TCloseOperationRespTupleSchemeFactory implements SchemeFactory { - public TCloseOperationRespTupleScheme getScheme() { - return new TCloseOperationRespTupleScheme(); - } - } - - private static class TCloseOperationRespTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TCloseOperationResp struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.status.write(oprot); - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TCloseOperationResp struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TCloseSessionReq.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TCloseSessionReq.java deleted file mode 100644 index 43ee87f487a67..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TCloseSessionReq.java +++ /dev/null @@ -1,394 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TCloseSessionReq implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TCloseSessionReq"); - - private static final org.apache.thrift.protocol.TField SESSION_HANDLE_FIELD_DESC = new org.apache.thrift.protocol.TField("sessionHandle", org.apache.thrift.protocol.TType.STRUCT, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TCloseSessionReqStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TCloseSessionReqTupleSchemeFactory()); - } - - private TSessionHandle sessionHandle; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SESSION_HANDLE((short)1, "sessionHandle"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // SESSION_HANDLE - return SESSION_HANDLE; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SESSION_HANDLE, new org.apache.thrift.meta_data.FieldMetaData("sessionHandle", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TSessionHandle.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TCloseSessionReq.class, metaDataMap); - } - - public TCloseSessionReq() { - } - - public TCloseSessionReq( - TSessionHandle sessionHandle) - { - this(); - this.sessionHandle = sessionHandle; - } - - /** - * Performs a deep copy on other. - */ - public TCloseSessionReq(TCloseSessionReq other) { - if (other.isSetSessionHandle()) { - this.sessionHandle = new TSessionHandle(other.sessionHandle); - } - } - - public TCloseSessionReq deepCopy() { - return new TCloseSessionReq(this); - } - - @Override - public void clear() { - this.sessionHandle = null; - } - - public TSessionHandle getSessionHandle() { - return this.sessionHandle; - } - - public void setSessionHandle(TSessionHandle sessionHandle) { - this.sessionHandle = sessionHandle; - } - - public void unsetSessionHandle() { - this.sessionHandle = null; - } - - /** Returns true if field sessionHandle is set (has been assigned a value) and false otherwise */ - public boolean isSetSessionHandle() { - return this.sessionHandle != null; - } - - public void setSessionHandleIsSet(boolean value) { - if (!value) { - this.sessionHandle = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case SESSION_HANDLE: - if (value == null) { - unsetSessionHandle(); - } else { - setSessionHandle((TSessionHandle)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case SESSION_HANDLE: - return getSessionHandle(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case SESSION_HANDLE: - return isSetSessionHandle(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TCloseSessionReq) - return this.equals((TCloseSessionReq)that); - return false; - } - - public boolean equals(TCloseSessionReq that) { - if (that == null) - return false; - - boolean this_present_sessionHandle = true && this.isSetSessionHandle(); - boolean that_present_sessionHandle = true && that.isSetSessionHandle(); - if (this_present_sessionHandle || that_present_sessionHandle) { - if (!(this_present_sessionHandle && that_present_sessionHandle)) - return false; - if (!this.sessionHandle.equals(that.sessionHandle)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_sessionHandle = true && (isSetSessionHandle()); - list.add(present_sessionHandle); - if (present_sessionHandle) - list.add(sessionHandle); - - return list.hashCode(); - } - - @Override - public int compareTo(TCloseSessionReq other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetSessionHandle()).compareTo(other.isSetSessionHandle()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSessionHandle()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.sessionHandle, other.sessionHandle); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TCloseSessionReq("); - boolean first = true; - - sb.append("sessionHandle:"); - if (this.sessionHandle == null) { - sb.append("null"); - } else { - sb.append(this.sessionHandle); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetSessionHandle()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'sessionHandle' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (sessionHandle != null) { - sessionHandle.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TCloseSessionReqStandardSchemeFactory implements SchemeFactory { - public TCloseSessionReqStandardScheme getScheme() { - return new TCloseSessionReqStandardScheme(); - } - } - - private static class TCloseSessionReqStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TCloseSessionReq struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // SESSION_HANDLE - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.sessionHandle = new TSessionHandle(); - struct.sessionHandle.read(iprot); - struct.setSessionHandleIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TCloseSessionReq struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.sessionHandle != null) { - oprot.writeFieldBegin(SESSION_HANDLE_FIELD_DESC); - struct.sessionHandle.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TCloseSessionReqTupleSchemeFactory implements SchemeFactory { - public TCloseSessionReqTupleScheme getScheme() { - return new TCloseSessionReqTupleScheme(); - } - } - - private static class TCloseSessionReqTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TCloseSessionReq struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.sessionHandle.write(oprot); - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TCloseSessionReq struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.sessionHandle = new TSessionHandle(); - struct.sessionHandle.read(iprot); - struct.setSessionHandleIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TCloseSessionResp.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TCloseSessionResp.java deleted file mode 100644 index 38f82ac8d3cd2..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TCloseSessionResp.java +++ /dev/null @@ -1,394 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TCloseSessionResp implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TCloseSessionResp"); - - private static final org.apache.thrift.protocol.TField STATUS_FIELD_DESC = new org.apache.thrift.protocol.TField("status", org.apache.thrift.protocol.TType.STRUCT, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TCloseSessionRespStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TCloseSessionRespTupleSchemeFactory()); - } - - private TStatus status; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - STATUS((short)1, "status"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // STATUS - return STATUS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.STATUS, new org.apache.thrift.meta_data.FieldMetaData("status", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TStatus.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TCloseSessionResp.class, metaDataMap); - } - - public TCloseSessionResp() { - } - - public TCloseSessionResp( - TStatus status) - { - this(); - this.status = status; - } - - /** - * Performs a deep copy on other. - */ - public TCloseSessionResp(TCloseSessionResp other) { - if (other.isSetStatus()) { - this.status = new TStatus(other.status); - } - } - - public TCloseSessionResp deepCopy() { - return new TCloseSessionResp(this); - } - - @Override - public void clear() { - this.status = null; - } - - public TStatus getStatus() { - return this.status; - } - - public void setStatus(TStatus status) { - this.status = status; - } - - public void unsetStatus() { - this.status = null; - } - - /** Returns true if field status is set (has been assigned a value) and false otherwise */ - public boolean isSetStatus() { - return this.status != null; - } - - public void setStatusIsSet(boolean value) { - if (!value) { - this.status = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case STATUS: - if (value == null) { - unsetStatus(); - } else { - setStatus((TStatus)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case STATUS: - return getStatus(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case STATUS: - return isSetStatus(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TCloseSessionResp) - return this.equals((TCloseSessionResp)that); - return false; - } - - public boolean equals(TCloseSessionResp that) { - if (that == null) - return false; - - boolean this_present_status = true && this.isSetStatus(); - boolean that_present_status = true && that.isSetStatus(); - if (this_present_status || that_present_status) { - if (!(this_present_status && that_present_status)) - return false; - if (!this.status.equals(that.status)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_status = true && (isSetStatus()); - list.add(present_status); - if (present_status) - list.add(status); - - return list.hashCode(); - } - - @Override - public int compareTo(TCloseSessionResp other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetStatus()).compareTo(other.isSetStatus()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetStatus()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.status, other.status); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TCloseSessionResp("); - boolean first = true; - - sb.append("status:"); - if (this.status == null) { - sb.append("null"); - } else { - sb.append(this.status); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetStatus()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'status' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (status != null) { - status.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TCloseSessionRespStandardSchemeFactory implements SchemeFactory { - public TCloseSessionRespStandardScheme getScheme() { - return new TCloseSessionRespStandardScheme(); - } - } - - private static class TCloseSessionRespStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TCloseSessionResp struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // STATUS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TCloseSessionResp struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.status != null) { - oprot.writeFieldBegin(STATUS_FIELD_DESC); - struct.status.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TCloseSessionRespTupleSchemeFactory implements SchemeFactory { - public TCloseSessionRespTupleScheme getScheme() { - return new TCloseSessionRespTupleScheme(); - } - } - - private static class TCloseSessionRespTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TCloseSessionResp struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.status.write(oprot); - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TCloseSessionResp struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TColumn.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TColumn.java deleted file mode 100644 index dd79482200961..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TColumn.java +++ /dev/null @@ -1,736 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -public class TColumn extends org.apache.thrift.TUnion { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TColumn"); - private static final org.apache.thrift.protocol.TField BOOL_VAL_FIELD_DESC = new org.apache.thrift.protocol.TField("boolVal", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField BYTE_VAL_FIELD_DESC = new org.apache.thrift.protocol.TField("byteVal", org.apache.thrift.protocol.TType.STRUCT, (short)2); - private static final org.apache.thrift.protocol.TField I16_VAL_FIELD_DESC = new org.apache.thrift.protocol.TField("i16Val", org.apache.thrift.protocol.TType.STRUCT, (short)3); - private static final org.apache.thrift.protocol.TField I32_VAL_FIELD_DESC = new org.apache.thrift.protocol.TField("i32Val", org.apache.thrift.protocol.TType.STRUCT, (short)4); - private static final org.apache.thrift.protocol.TField I64_VAL_FIELD_DESC = new org.apache.thrift.protocol.TField("i64Val", org.apache.thrift.protocol.TType.STRUCT, (short)5); - private static final org.apache.thrift.protocol.TField DOUBLE_VAL_FIELD_DESC = new org.apache.thrift.protocol.TField("doubleVal", org.apache.thrift.protocol.TType.STRUCT, (short)6); - private static final org.apache.thrift.protocol.TField STRING_VAL_FIELD_DESC = new org.apache.thrift.protocol.TField("stringVal", org.apache.thrift.protocol.TType.STRUCT, (short)7); - private static final org.apache.thrift.protocol.TField BINARY_VAL_FIELD_DESC = new org.apache.thrift.protocol.TField("binaryVal", org.apache.thrift.protocol.TType.STRUCT, (short)8); - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - BOOL_VAL((short)1, "boolVal"), - BYTE_VAL((short)2, "byteVal"), - I16_VAL((short)3, "i16Val"), - I32_VAL((short)4, "i32Val"), - I64_VAL((short)5, "i64Val"), - DOUBLE_VAL((short)6, "doubleVal"), - STRING_VAL((short)7, "stringVal"), - BINARY_VAL((short)8, "binaryVal"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // BOOL_VAL - return BOOL_VAL; - case 2: // BYTE_VAL - return BYTE_VAL; - case 3: // I16_VAL - return I16_VAL; - case 4: // I32_VAL - return I32_VAL; - case 5: // I64_VAL - return I64_VAL; - case 6: // DOUBLE_VAL - return DOUBLE_VAL; - case 7: // STRING_VAL - return STRING_VAL; - case 8: // BINARY_VAL - return BINARY_VAL; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.BOOL_VAL, new org.apache.thrift.meta_data.FieldMetaData("boolVal", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TBoolColumn.class))); - tmpMap.put(_Fields.BYTE_VAL, new org.apache.thrift.meta_data.FieldMetaData("byteVal", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TByteColumn.class))); - tmpMap.put(_Fields.I16_VAL, new org.apache.thrift.meta_data.FieldMetaData("i16Val", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TI16Column.class))); - tmpMap.put(_Fields.I32_VAL, new org.apache.thrift.meta_data.FieldMetaData("i32Val", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TI32Column.class))); - tmpMap.put(_Fields.I64_VAL, new org.apache.thrift.meta_data.FieldMetaData("i64Val", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TI64Column.class))); - tmpMap.put(_Fields.DOUBLE_VAL, new org.apache.thrift.meta_data.FieldMetaData("doubleVal", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TDoubleColumn.class))); - tmpMap.put(_Fields.STRING_VAL, new org.apache.thrift.meta_data.FieldMetaData("stringVal", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TStringColumn.class))); - tmpMap.put(_Fields.BINARY_VAL, new org.apache.thrift.meta_data.FieldMetaData("binaryVal", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TBinaryColumn.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TColumn.class, metaDataMap); - } - - public TColumn() { - super(); - } - - public TColumn(TColumn._Fields setField, Object value) { - super(setField, value); - } - - public TColumn(TColumn other) { - super(other); - } - public TColumn deepCopy() { - return new TColumn(this); - } - - public static TColumn boolVal(TBoolColumn value) { - TColumn x = new TColumn(); - x.setBoolVal(value); - return x; - } - - public static TColumn byteVal(TByteColumn value) { - TColumn x = new TColumn(); - x.setByteVal(value); - return x; - } - - public static TColumn i16Val(TI16Column value) { - TColumn x = new TColumn(); - x.setI16Val(value); - return x; - } - - public static TColumn i32Val(TI32Column value) { - TColumn x = new TColumn(); - x.setI32Val(value); - return x; - } - - public static TColumn i64Val(TI64Column value) { - TColumn x = new TColumn(); - x.setI64Val(value); - return x; - } - - public static TColumn doubleVal(TDoubleColumn value) { - TColumn x = new TColumn(); - x.setDoubleVal(value); - return x; - } - - public static TColumn stringVal(TStringColumn value) { - TColumn x = new TColumn(); - x.setStringVal(value); - return x; - } - - public static TColumn binaryVal(TBinaryColumn value) { - TColumn x = new TColumn(); - x.setBinaryVal(value); - return x; - } - - - @Override - protected void checkType(_Fields setField, Object value) throws ClassCastException { - switch (setField) { - case BOOL_VAL: - if (value instanceof TBoolColumn) { - break; - } - throw new ClassCastException("Was expecting value of type TBoolColumn for field 'boolVal', but got " + value.getClass().getSimpleName()); - case BYTE_VAL: - if (value instanceof TByteColumn) { - break; - } - throw new ClassCastException("Was expecting value of type TByteColumn for field 'byteVal', but got " + value.getClass().getSimpleName()); - case I16_VAL: - if (value instanceof TI16Column) { - break; - } - throw new ClassCastException("Was expecting value of type TI16Column for field 'i16Val', but got " + value.getClass().getSimpleName()); - case I32_VAL: - if (value instanceof TI32Column) { - break; - } - throw new ClassCastException("Was expecting value of type TI32Column for field 'i32Val', but got " + value.getClass().getSimpleName()); - case I64_VAL: - if (value instanceof TI64Column) { - break; - } - throw new ClassCastException("Was expecting value of type TI64Column for field 'i64Val', but got " + value.getClass().getSimpleName()); - case DOUBLE_VAL: - if (value instanceof TDoubleColumn) { - break; - } - throw new ClassCastException("Was expecting value of type TDoubleColumn for field 'doubleVal', but got " + value.getClass().getSimpleName()); - case STRING_VAL: - if (value instanceof TStringColumn) { - break; - } - throw new ClassCastException("Was expecting value of type TStringColumn for field 'stringVal', but got " + value.getClass().getSimpleName()); - case BINARY_VAL: - if (value instanceof TBinaryColumn) { - break; - } - throw new ClassCastException("Was expecting value of type TBinaryColumn for field 'binaryVal', but got " + value.getClass().getSimpleName()); - default: - throw new IllegalArgumentException("Unknown field id " + setField); - } - } - - @Override - protected Object standardSchemeReadValue(org.apache.thrift.protocol.TProtocol iprot, org.apache.thrift.protocol.TField field) throws org.apache.thrift.TException { - _Fields setField = _Fields.findByThriftId(field.id); - if (setField != null) { - switch (setField) { - case BOOL_VAL: - if (field.type == BOOL_VAL_FIELD_DESC.type) { - TBoolColumn boolVal; - boolVal = new TBoolColumn(); - boolVal.read(iprot); - return boolVal; - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); - return null; - } - case BYTE_VAL: - if (field.type == BYTE_VAL_FIELD_DESC.type) { - TByteColumn byteVal; - byteVal = new TByteColumn(); - byteVal.read(iprot); - return byteVal; - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); - return null; - } - case I16_VAL: - if (field.type == I16_VAL_FIELD_DESC.type) { - TI16Column i16Val; - i16Val = new TI16Column(); - i16Val.read(iprot); - return i16Val; - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); - return null; - } - case I32_VAL: - if (field.type == I32_VAL_FIELD_DESC.type) { - TI32Column i32Val; - i32Val = new TI32Column(); - i32Val.read(iprot); - return i32Val; - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); - return null; - } - case I64_VAL: - if (field.type == I64_VAL_FIELD_DESC.type) { - TI64Column i64Val; - i64Val = new TI64Column(); - i64Val.read(iprot); - return i64Val; - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); - return null; - } - case DOUBLE_VAL: - if (field.type == DOUBLE_VAL_FIELD_DESC.type) { - TDoubleColumn doubleVal; - doubleVal = new TDoubleColumn(); - doubleVal.read(iprot); - return doubleVal; - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); - return null; - } - case STRING_VAL: - if (field.type == STRING_VAL_FIELD_DESC.type) { - TStringColumn stringVal; - stringVal = new TStringColumn(); - stringVal.read(iprot); - return stringVal; - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); - return null; - } - case BINARY_VAL: - if (field.type == BINARY_VAL_FIELD_DESC.type) { - TBinaryColumn binaryVal; - binaryVal = new TBinaryColumn(); - binaryVal.read(iprot); - return binaryVal; - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); - return null; - } - default: - throw new IllegalStateException("setField wasn't null, but didn't match any of the case statements!"); - } - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); - return null; - } - } - - @Override - protected void standardSchemeWriteValue(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - switch (setField_) { - case BOOL_VAL: - TBoolColumn boolVal = (TBoolColumn)value_; - boolVal.write(oprot); - return; - case BYTE_VAL: - TByteColumn byteVal = (TByteColumn)value_; - byteVal.write(oprot); - return; - case I16_VAL: - TI16Column i16Val = (TI16Column)value_; - i16Val.write(oprot); - return; - case I32_VAL: - TI32Column i32Val = (TI32Column)value_; - i32Val.write(oprot); - return; - case I64_VAL: - TI64Column i64Val = (TI64Column)value_; - i64Val.write(oprot); - return; - case DOUBLE_VAL: - TDoubleColumn doubleVal = (TDoubleColumn)value_; - doubleVal.write(oprot); - return; - case STRING_VAL: - TStringColumn stringVal = (TStringColumn)value_; - stringVal.write(oprot); - return; - case BINARY_VAL: - TBinaryColumn binaryVal = (TBinaryColumn)value_; - binaryVal.write(oprot); - return; - default: - throw new IllegalStateException("Cannot write union with unknown field " + setField_); - } - } - - @Override - protected Object tupleSchemeReadValue(org.apache.thrift.protocol.TProtocol iprot, short fieldID) throws org.apache.thrift.TException { - _Fields setField = _Fields.findByThriftId(fieldID); - if (setField != null) { - switch (setField) { - case BOOL_VAL: - TBoolColumn boolVal; - boolVal = new TBoolColumn(); - boolVal.read(iprot); - return boolVal; - case BYTE_VAL: - TByteColumn byteVal; - byteVal = new TByteColumn(); - byteVal.read(iprot); - return byteVal; - case I16_VAL: - TI16Column i16Val; - i16Val = new TI16Column(); - i16Val.read(iprot); - return i16Val; - case I32_VAL: - TI32Column i32Val; - i32Val = new TI32Column(); - i32Val.read(iprot); - return i32Val; - case I64_VAL: - TI64Column i64Val; - i64Val = new TI64Column(); - i64Val.read(iprot); - return i64Val; - case DOUBLE_VAL: - TDoubleColumn doubleVal; - doubleVal = new TDoubleColumn(); - doubleVal.read(iprot); - return doubleVal; - case STRING_VAL: - TStringColumn stringVal; - stringVal = new TStringColumn(); - stringVal.read(iprot); - return stringVal; - case BINARY_VAL: - TBinaryColumn binaryVal; - binaryVal = new TBinaryColumn(); - binaryVal.read(iprot); - return binaryVal; - default: - throw new IllegalStateException("setField wasn't null, but didn't match any of the case statements!"); - } - } else { - throw new TProtocolException("Couldn't find a field with field id " + fieldID); - } - } - - @Override - protected void tupleSchemeWriteValue(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - switch (setField_) { - case BOOL_VAL: - TBoolColumn boolVal = (TBoolColumn)value_; - boolVal.write(oprot); - return; - case BYTE_VAL: - TByteColumn byteVal = (TByteColumn)value_; - byteVal.write(oprot); - return; - case I16_VAL: - TI16Column i16Val = (TI16Column)value_; - i16Val.write(oprot); - return; - case I32_VAL: - TI32Column i32Val = (TI32Column)value_; - i32Val.write(oprot); - return; - case I64_VAL: - TI64Column i64Val = (TI64Column)value_; - i64Val.write(oprot); - return; - case DOUBLE_VAL: - TDoubleColumn doubleVal = (TDoubleColumn)value_; - doubleVal.write(oprot); - return; - case STRING_VAL: - TStringColumn stringVal = (TStringColumn)value_; - stringVal.write(oprot); - return; - case BINARY_VAL: - TBinaryColumn binaryVal = (TBinaryColumn)value_; - binaryVal.write(oprot); - return; - default: - throw new IllegalStateException("Cannot write union with unknown field " + setField_); - } - } - - @Override - protected org.apache.thrift.protocol.TField getFieldDesc(_Fields setField) { - switch (setField) { - case BOOL_VAL: - return BOOL_VAL_FIELD_DESC; - case BYTE_VAL: - return BYTE_VAL_FIELD_DESC; - case I16_VAL: - return I16_VAL_FIELD_DESC; - case I32_VAL: - return I32_VAL_FIELD_DESC; - case I64_VAL: - return I64_VAL_FIELD_DESC; - case DOUBLE_VAL: - return DOUBLE_VAL_FIELD_DESC; - case STRING_VAL: - return STRING_VAL_FIELD_DESC; - case BINARY_VAL: - return BINARY_VAL_FIELD_DESC; - default: - throw new IllegalArgumentException("Unknown field id " + setField); - } - } - - @Override - protected org.apache.thrift.protocol.TStruct getStructDesc() { - return STRUCT_DESC; - } - - @Override - protected _Fields enumForId(short id) { - return _Fields.findByThriftIdOrThrow(id); - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - - public TBoolColumn getBoolVal() { - if (getSetField() == _Fields.BOOL_VAL) { - return (TBoolColumn)getFieldValue(); - } else { - throw new RuntimeException("Cannot get field 'boolVal' because union is currently set to " + getFieldDesc(getSetField()).name); - } - } - - public void setBoolVal(TBoolColumn value) { - if (value == null) throw new NullPointerException(); - setField_ = _Fields.BOOL_VAL; - value_ = value; - } - - public TByteColumn getByteVal() { - if (getSetField() == _Fields.BYTE_VAL) { - return (TByteColumn)getFieldValue(); - } else { - throw new RuntimeException("Cannot get field 'byteVal' because union is currently set to " + getFieldDesc(getSetField()).name); - } - } - - public void setByteVal(TByteColumn value) { - if (value == null) throw new NullPointerException(); - setField_ = _Fields.BYTE_VAL; - value_ = value; - } - - public TI16Column getI16Val() { - if (getSetField() == _Fields.I16_VAL) { - return (TI16Column)getFieldValue(); - } else { - throw new RuntimeException("Cannot get field 'i16Val' because union is currently set to " + getFieldDesc(getSetField()).name); - } - } - - public void setI16Val(TI16Column value) { - if (value == null) throw new NullPointerException(); - setField_ = _Fields.I16_VAL; - value_ = value; - } - - public TI32Column getI32Val() { - if (getSetField() == _Fields.I32_VAL) { - return (TI32Column)getFieldValue(); - } else { - throw new RuntimeException("Cannot get field 'i32Val' because union is currently set to " + getFieldDesc(getSetField()).name); - } - } - - public void setI32Val(TI32Column value) { - if (value == null) throw new NullPointerException(); - setField_ = _Fields.I32_VAL; - value_ = value; - } - - public TI64Column getI64Val() { - if (getSetField() == _Fields.I64_VAL) { - return (TI64Column)getFieldValue(); - } else { - throw new RuntimeException("Cannot get field 'i64Val' because union is currently set to " + getFieldDesc(getSetField()).name); - } - } - - public void setI64Val(TI64Column value) { - if (value == null) throw new NullPointerException(); - setField_ = _Fields.I64_VAL; - value_ = value; - } - - public TDoubleColumn getDoubleVal() { - if (getSetField() == _Fields.DOUBLE_VAL) { - return (TDoubleColumn)getFieldValue(); - } else { - throw new RuntimeException("Cannot get field 'doubleVal' because union is currently set to " + getFieldDesc(getSetField()).name); - } - } - - public void setDoubleVal(TDoubleColumn value) { - if (value == null) throw new NullPointerException(); - setField_ = _Fields.DOUBLE_VAL; - value_ = value; - } - - public TStringColumn getStringVal() { - if (getSetField() == _Fields.STRING_VAL) { - return (TStringColumn)getFieldValue(); - } else { - throw new RuntimeException("Cannot get field 'stringVal' because union is currently set to " + getFieldDesc(getSetField()).name); - } - } - - public void setStringVal(TStringColumn value) { - if (value == null) throw new NullPointerException(); - setField_ = _Fields.STRING_VAL; - value_ = value; - } - - public TBinaryColumn getBinaryVal() { - if (getSetField() == _Fields.BINARY_VAL) { - return (TBinaryColumn)getFieldValue(); - } else { - throw new RuntimeException("Cannot get field 'binaryVal' because union is currently set to " + getFieldDesc(getSetField()).name); - } - } - - public void setBinaryVal(TBinaryColumn value) { - if (value == null) throw new NullPointerException(); - setField_ = _Fields.BINARY_VAL; - value_ = value; - } - - public boolean isSetBoolVal() { - return setField_ == _Fields.BOOL_VAL; - } - - - public boolean isSetByteVal() { - return setField_ == _Fields.BYTE_VAL; - } - - - public boolean isSetI16Val() { - return setField_ == _Fields.I16_VAL; - } - - - public boolean isSetI32Val() { - return setField_ == _Fields.I32_VAL; - } - - - public boolean isSetI64Val() { - return setField_ == _Fields.I64_VAL; - } - - - public boolean isSetDoubleVal() { - return setField_ == _Fields.DOUBLE_VAL; - } - - - public boolean isSetStringVal() { - return setField_ == _Fields.STRING_VAL; - } - - - public boolean isSetBinaryVal() { - return setField_ == _Fields.BINARY_VAL; - } - - - public boolean equals(Object other) { - if (other instanceof TColumn) { - return equals((TColumn)other); - } else { - return false; - } - } - - public boolean equals(TColumn other) { - return other != null && getSetField() == other.getSetField() && getFieldValue().equals(other.getFieldValue()); - } - - @Override - public int compareTo(TColumn other) { - int lastComparison = org.apache.thrift.TBaseHelper.compareTo(getSetField(), other.getSetField()); - if (lastComparison == 0) { - return org.apache.thrift.TBaseHelper.compareTo(getFieldValue(), other.getFieldValue()); - } - return lastComparison; - } - - - @Override - public int hashCode() { - List list = new ArrayList(); - list.add(this.getClass().getName()); - org.apache.thrift.TFieldIdEnum setField = getSetField(); - if (setField != null) { - list.add(setField.getThriftFieldId()); - Object value = getFieldValue(); - if (value instanceof org.apache.thrift.TEnum) { - list.add(((org.apache.thrift.TEnum)getFieldValue()).getValue()); - } else { - list.add(value); - } - } - return list.hashCode(); - } - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - -} diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TColumnDesc.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TColumnDesc.java deleted file mode 100644 index 31472c8f54b94..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TColumnDesc.java +++ /dev/null @@ -1,704 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TColumnDesc implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TColumnDesc"); - - private static final org.apache.thrift.protocol.TField COLUMN_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("columnName", org.apache.thrift.protocol.TType.STRING, (short)1); - private static final org.apache.thrift.protocol.TField TYPE_DESC_FIELD_DESC = new org.apache.thrift.protocol.TField("typeDesc", org.apache.thrift.protocol.TType.STRUCT, (short)2); - private static final org.apache.thrift.protocol.TField POSITION_FIELD_DESC = new org.apache.thrift.protocol.TField("position", org.apache.thrift.protocol.TType.I32, (short)3); - private static final org.apache.thrift.protocol.TField COMMENT_FIELD_DESC = new org.apache.thrift.protocol.TField("comment", org.apache.thrift.protocol.TType.STRING, (short)4); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TColumnDescStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TColumnDescTupleSchemeFactory()); - } - - private String columnName; // required - private TTypeDesc typeDesc; // required - private int position; // required - private String comment; // optional - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - COLUMN_NAME((short)1, "columnName"), - TYPE_DESC((short)2, "typeDesc"), - POSITION((short)3, "position"), - COMMENT((short)4, "comment"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // COLUMN_NAME - return COLUMN_NAME; - case 2: // TYPE_DESC - return TYPE_DESC; - case 3: // POSITION - return POSITION; - case 4: // COMMENT - return COMMENT; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private static final int __POSITION_ISSET_ID = 0; - private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.COMMENT}; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.COLUMN_NAME, new org.apache.thrift.meta_data.FieldMetaData("columnName", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.TYPE_DESC, new org.apache.thrift.meta_data.FieldMetaData("typeDesc", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TTypeDesc.class))); - tmpMap.put(_Fields.POSITION, new org.apache.thrift.meta_data.FieldMetaData("position", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); - tmpMap.put(_Fields.COMMENT, new org.apache.thrift.meta_data.FieldMetaData("comment", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TColumnDesc.class, metaDataMap); - } - - public TColumnDesc() { - } - - public TColumnDesc( - String columnName, - TTypeDesc typeDesc, - int position) - { - this(); - this.columnName = columnName; - this.typeDesc = typeDesc; - this.position = position; - setPositionIsSet(true); - } - - /** - * Performs a deep copy on other. - */ - public TColumnDesc(TColumnDesc other) { - __isset_bitfield = other.__isset_bitfield; - if (other.isSetColumnName()) { - this.columnName = other.columnName; - } - if (other.isSetTypeDesc()) { - this.typeDesc = new TTypeDesc(other.typeDesc); - } - this.position = other.position; - if (other.isSetComment()) { - this.comment = other.comment; - } - } - - public TColumnDesc deepCopy() { - return new TColumnDesc(this); - } - - @Override - public void clear() { - this.columnName = null; - this.typeDesc = null; - setPositionIsSet(false); - this.position = 0; - this.comment = null; - } - - public String getColumnName() { - return this.columnName; - } - - public void setColumnName(String columnName) { - this.columnName = columnName; - } - - public void unsetColumnName() { - this.columnName = null; - } - - /** Returns true if field columnName is set (has been assigned a value) and false otherwise */ - public boolean isSetColumnName() { - return this.columnName != null; - } - - public void setColumnNameIsSet(boolean value) { - if (!value) { - this.columnName = null; - } - } - - public TTypeDesc getTypeDesc() { - return this.typeDesc; - } - - public void setTypeDesc(TTypeDesc typeDesc) { - this.typeDesc = typeDesc; - } - - public void unsetTypeDesc() { - this.typeDesc = null; - } - - /** Returns true if field typeDesc is set (has been assigned a value) and false otherwise */ - public boolean isSetTypeDesc() { - return this.typeDesc != null; - } - - public void setTypeDescIsSet(boolean value) { - if (!value) { - this.typeDesc = null; - } - } - - public int getPosition() { - return this.position; - } - - public void setPosition(int position) { - this.position = position; - setPositionIsSet(true); - } - - public void unsetPosition() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __POSITION_ISSET_ID); - } - - /** Returns true if field position is set (has been assigned a value) and false otherwise */ - public boolean isSetPosition() { - return EncodingUtils.testBit(__isset_bitfield, __POSITION_ISSET_ID); - } - - public void setPositionIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __POSITION_ISSET_ID, value); - } - - public String getComment() { - return this.comment; - } - - public void setComment(String comment) { - this.comment = comment; - } - - public void unsetComment() { - this.comment = null; - } - - /** Returns true if field comment is set (has been assigned a value) and false otherwise */ - public boolean isSetComment() { - return this.comment != null; - } - - public void setCommentIsSet(boolean value) { - if (!value) { - this.comment = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case COLUMN_NAME: - if (value == null) { - unsetColumnName(); - } else { - setColumnName((String)value); - } - break; - - case TYPE_DESC: - if (value == null) { - unsetTypeDesc(); - } else { - setTypeDesc((TTypeDesc)value); - } - break; - - case POSITION: - if (value == null) { - unsetPosition(); - } else { - setPosition((Integer)value); - } - break; - - case COMMENT: - if (value == null) { - unsetComment(); - } else { - setComment((String)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case COLUMN_NAME: - return getColumnName(); - - case TYPE_DESC: - return getTypeDesc(); - - case POSITION: - return getPosition(); - - case COMMENT: - return getComment(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case COLUMN_NAME: - return isSetColumnName(); - case TYPE_DESC: - return isSetTypeDesc(); - case POSITION: - return isSetPosition(); - case COMMENT: - return isSetComment(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TColumnDesc) - return this.equals((TColumnDesc)that); - return false; - } - - public boolean equals(TColumnDesc that) { - if (that == null) - return false; - - boolean this_present_columnName = true && this.isSetColumnName(); - boolean that_present_columnName = true && that.isSetColumnName(); - if (this_present_columnName || that_present_columnName) { - if (!(this_present_columnName && that_present_columnName)) - return false; - if (!this.columnName.equals(that.columnName)) - return false; - } - - boolean this_present_typeDesc = true && this.isSetTypeDesc(); - boolean that_present_typeDesc = true && that.isSetTypeDesc(); - if (this_present_typeDesc || that_present_typeDesc) { - if (!(this_present_typeDesc && that_present_typeDesc)) - return false; - if (!this.typeDesc.equals(that.typeDesc)) - return false; - } - - boolean this_present_position = true; - boolean that_present_position = true; - if (this_present_position || that_present_position) { - if (!(this_present_position && that_present_position)) - return false; - if (this.position != that.position) - return false; - } - - boolean this_present_comment = true && this.isSetComment(); - boolean that_present_comment = true && that.isSetComment(); - if (this_present_comment || that_present_comment) { - if (!(this_present_comment && that_present_comment)) - return false; - if (!this.comment.equals(that.comment)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_columnName = true && (isSetColumnName()); - list.add(present_columnName); - if (present_columnName) - list.add(columnName); - - boolean present_typeDesc = true && (isSetTypeDesc()); - list.add(present_typeDesc); - if (present_typeDesc) - list.add(typeDesc); - - boolean present_position = true; - list.add(present_position); - if (present_position) - list.add(position); - - boolean present_comment = true && (isSetComment()); - list.add(present_comment); - if (present_comment) - list.add(comment); - - return list.hashCode(); - } - - @Override - public int compareTo(TColumnDesc other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetColumnName()).compareTo(other.isSetColumnName()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetColumnName()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.columnName, other.columnName); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetTypeDesc()).compareTo(other.isSetTypeDesc()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetTypeDesc()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.typeDesc, other.typeDesc); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetPosition()).compareTo(other.isSetPosition()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetPosition()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.position, other.position); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetComment()).compareTo(other.isSetComment()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetComment()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.comment, other.comment); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TColumnDesc("); - boolean first = true; - - sb.append("columnName:"); - if (this.columnName == null) { - sb.append("null"); - } else { - sb.append(this.columnName); - } - first = false; - if (!first) sb.append(", "); - sb.append("typeDesc:"); - if (this.typeDesc == null) { - sb.append("null"); - } else { - sb.append(this.typeDesc); - } - first = false; - if (!first) sb.append(", "); - sb.append("position:"); - sb.append(this.position); - first = false; - if (isSetComment()) { - if (!first) sb.append(", "); - sb.append("comment:"); - if (this.comment == null) { - sb.append("null"); - } else { - sb.append(this.comment); - } - first = false; - } - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetColumnName()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'columnName' is unset! Struct:" + toString()); - } - - if (!isSetTypeDesc()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'typeDesc' is unset! Struct:" + toString()); - } - - if (!isSetPosition()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'position' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (typeDesc != null) { - typeDesc.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. - __isset_bitfield = 0; - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TColumnDescStandardSchemeFactory implements SchemeFactory { - public TColumnDescStandardScheme getScheme() { - return new TColumnDescStandardScheme(); - } - } - - private static class TColumnDescStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TColumnDesc struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // COLUMN_NAME - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.columnName = iprot.readString(); - struct.setColumnNameIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // TYPE_DESC - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.typeDesc = new TTypeDesc(); - struct.typeDesc.read(iprot); - struct.setTypeDescIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 3: // POSITION - if (schemeField.type == org.apache.thrift.protocol.TType.I32) { - struct.position = iprot.readI32(); - struct.setPositionIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 4: // COMMENT - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.comment = iprot.readString(); - struct.setCommentIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TColumnDesc struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.columnName != null) { - oprot.writeFieldBegin(COLUMN_NAME_FIELD_DESC); - oprot.writeString(struct.columnName); - oprot.writeFieldEnd(); - } - if (struct.typeDesc != null) { - oprot.writeFieldBegin(TYPE_DESC_FIELD_DESC); - struct.typeDesc.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldBegin(POSITION_FIELD_DESC); - oprot.writeI32(struct.position); - oprot.writeFieldEnd(); - if (struct.comment != null) { - if (struct.isSetComment()) { - oprot.writeFieldBegin(COMMENT_FIELD_DESC); - oprot.writeString(struct.comment); - oprot.writeFieldEnd(); - } - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TColumnDescTupleSchemeFactory implements SchemeFactory { - public TColumnDescTupleScheme getScheme() { - return new TColumnDescTupleScheme(); - } - } - - private static class TColumnDescTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TColumnDesc struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - oprot.writeString(struct.columnName); - struct.typeDesc.write(oprot); - oprot.writeI32(struct.position); - BitSet optionals = new BitSet(); - if (struct.isSetComment()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetComment()) { - oprot.writeString(struct.comment); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TColumnDesc struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.columnName = iprot.readString(); - struct.setColumnNameIsSet(true); - struct.typeDesc = new TTypeDesc(); - struct.typeDesc.read(iprot); - struct.setTypeDescIsSet(true); - struct.position = iprot.readI32(); - struct.setPositionIsSet(true); - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.comment = iprot.readString(); - struct.setCommentIsSet(true); - } - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TColumnValue.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TColumnValue.java deleted file mode 100644 index d1cc8e919bc0c..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TColumnValue.java +++ /dev/null @@ -1,675 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -public class TColumnValue extends org.apache.thrift.TUnion { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TColumnValue"); - private static final org.apache.thrift.protocol.TField BOOL_VAL_FIELD_DESC = new org.apache.thrift.protocol.TField("boolVal", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField BYTE_VAL_FIELD_DESC = new org.apache.thrift.protocol.TField("byteVal", org.apache.thrift.protocol.TType.STRUCT, (short)2); - private static final org.apache.thrift.protocol.TField I16_VAL_FIELD_DESC = new org.apache.thrift.protocol.TField("i16Val", org.apache.thrift.protocol.TType.STRUCT, (short)3); - private static final org.apache.thrift.protocol.TField I32_VAL_FIELD_DESC = new org.apache.thrift.protocol.TField("i32Val", org.apache.thrift.protocol.TType.STRUCT, (short)4); - private static final org.apache.thrift.protocol.TField I64_VAL_FIELD_DESC = new org.apache.thrift.protocol.TField("i64Val", org.apache.thrift.protocol.TType.STRUCT, (short)5); - private static final org.apache.thrift.protocol.TField DOUBLE_VAL_FIELD_DESC = new org.apache.thrift.protocol.TField("doubleVal", org.apache.thrift.protocol.TType.STRUCT, (short)6); - private static final org.apache.thrift.protocol.TField STRING_VAL_FIELD_DESC = new org.apache.thrift.protocol.TField("stringVal", org.apache.thrift.protocol.TType.STRUCT, (short)7); - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - BOOL_VAL((short)1, "boolVal"), - BYTE_VAL((short)2, "byteVal"), - I16_VAL((short)3, "i16Val"), - I32_VAL((short)4, "i32Val"), - I64_VAL((short)5, "i64Val"), - DOUBLE_VAL((short)6, "doubleVal"), - STRING_VAL((short)7, "stringVal"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // BOOL_VAL - return BOOL_VAL; - case 2: // BYTE_VAL - return BYTE_VAL; - case 3: // I16_VAL - return I16_VAL; - case 4: // I32_VAL - return I32_VAL; - case 5: // I64_VAL - return I64_VAL; - case 6: // DOUBLE_VAL - return DOUBLE_VAL; - case 7: // STRING_VAL - return STRING_VAL; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.BOOL_VAL, new org.apache.thrift.meta_data.FieldMetaData("boolVal", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TBoolValue.class))); - tmpMap.put(_Fields.BYTE_VAL, new org.apache.thrift.meta_data.FieldMetaData("byteVal", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TByteValue.class))); - tmpMap.put(_Fields.I16_VAL, new org.apache.thrift.meta_data.FieldMetaData("i16Val", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TI16Value.class))); - tmpMap.put(_Fields.I32_VAL, new org.apache.thrift.meta_data.FieldMetaData("i32Val", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TI32Value.class))); - tmpMap.put(_Fields.I64_VAL, new org.apache.thrift.meta_data.FieldMetaData("i64Val", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TI64Value.class))); - tmpMap.put(_Fields.DOUBLE_VAL, new org.apache.thrift.meta_data.FieldMetaData("doubleVal", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TDoubleValue.class))); - tmpMap.put(_Fields.STRING_VAL, new org.apache.thrift.meta_data.FieldMetaData("stringVal", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TStringValue.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TColumnValue.class, metaDataMap); - } - - public TColumnValue() { - super(); - } - - public TColumnValue(TColumnValue._Fields setField, Object value) { - super(setField, value); - } - - public TColumnValue(TColumnValue other) { - super(other); - } - public TColumnValue deepCopy() { - return new TColumnValue(this); - } - - public static TColumnValue boolVal(TBoolValue value) { - TColumnValue x = new TColumnValue(); - x.setBoolVal(value); - return x; - } - - public static TColumnValue byteVal(TByteValue value) { - TColumnValue x = new TColumnValue(); - x.setByteVal(value); - return x; - } - - public static TColumnValue i16Val(TI16Value value) { - TColumnValue x = new TColumnValue(); - x.setI16Val(value); - return x; - } - - public static TColumnValue i32Val(TI32Value value) { - TColumnValue x = new TColumnValue(); - x.setI32Val(value); - return x; - } - - public static TColumnValue i64Val(TI64Value value) { - TColumnValue x = new TColumnValue(); - x.setI64Val(value); - return x; - } - - public static TColumnValue doubleVal(TDoubleValue value) { - TColumnValue x = new TColumnValue(); - x.setDoubleVal(value); - return x; - } - - public static TColumnValue stringVal(TStringValue value) { - TColumnValue x = new TColumnValue(); - x.setStringVal(value); - return x; - } - - - @Override - protected void checkType(_Fields setField, Object value) throws ClassCastException { - switch (setField) { - case BOOL_VAL: - if (value instanceof TBoolValue) { - break; - } - throw new ClassCastException("Was expecting value of type TBoolValue for field 'boolVal', but got " + value.getClass().getSimpleName()); - case BYTE_VAL: - if (value instanceof TByteValue) { - break; - } - throw new ClassCastException("Was expecting value of type TByteValue for field 'byteVal', but got " + value.getClass().getSimpleName()); - case I16_VAL: - if (value instanceof TI16Value) { - break; - } - throw new ClassCastException("Was expecting value of type TI16Value for field 'i16Val', but got " + value.getClass().getSimpleName()); - case I32_VAL: - if (value instanceof TI32Value) { - break; - } - throw new ClassCastException("Was expecting value of type TI32Value for field 'i32Val', but got " + value.getClass().getSimpleName()); - case I64_VAL: - if (value instanceof TI64Value) { - break; - } - throw new ClassCastException("Was expecting value of type TI64Value for field 'i64Val', but got " + value.getClass().getSimpleName()); - case DOUBLE_VAL: - if (value instanceof TDoubleValue) { - break; - } - throw new ClassCastException("Was expecting value of type TDoubleValue for field 'doubleVal', but got " + value.getClass().getSimpleName()); - case STRING_VAL: - if (value instanceof TStringValue) { - break; - } - throw new ClassCastException("Was expecting value of type TStringValue for field 'stringVal', but got " + value.getClass().getSimpleName()); - default: - throw new IllegalArgumentException("Unknown field id " + setField); - } - } - - @Override - protected Object standardSchemeReadValue(org.apache.thrift.protocol.TProtocol iprot, org.apache.thrift.protocol.TField field) throws org.apache.thrift.TException { - _Fields setField = _Fields.findByThriftId(field.id); - if (setField != null) { - switch (setField) { - case BOOL_VAL: - if (field.type == BOOL_VAL_FIELD_DESC.type) { - TBoolValue boolVal; - boolVal = new TBoolValue(); - boolVal.read(iprot); - return boolVal; - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); - return null; - } - case BYTE_VAL: - if (field.type == BYTE_VAL_FIELD_DESC.type) { - TByteValue byteVal; - byteVal = new TByteValue(); - byteVal.read(iprot); - return byteVal; - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); - return null; - } - case I16_VAL: - if (field.type == I16_VAL_FIELD_DESC.type) { - TI16Value i16Val; - i16Val = new TI16Value(); - i16Val.read(iprot); - return i16Val; - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); - return null; - } - case I32_VAL: - if (field.type == I32_VAL_FIELD_DESC.type) { - TI32Value i32Val; - i32Val = new TI32Value(); - i32Val.read(iprot); - return i32Val; - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); - return null; - } - case I64_VAL: - if (field.type == I64_VAL_FIELD_DESC.type) { - TI64Value i64Val; - i64Val = new TI64Value(); - i64Val.read(iprot); - return i64Val; - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); - return null; - } - case DOUBLE_VAL: - if (field.type == DOUBLE_VAL_FIELD_DESC.type) { - TDoubleValue doubleVal; - doubleVal = new TDoubleValue(); - doubleVal.read(iprot); - return doubleVal; - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); - return null; - } - case STRING_VAL: - if (field.type == STRING_VAL_FIELD_DESC.type) { - TStringValue stringVal; - stringVal = new TStringValue(); - stringVal.read(iprot); - return stringVal; - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); - return null; - } - default: - throw new IllegalStateException("setField wasn't null, but didn't match any of the case statements!"); - } - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); - return null; - } - } - - @Override - protected void standardSchemeWriteValue(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - switch (setField_) { - case BOOL_VAL: - TBoolValue boolVal = (TBoolValue)value_; - boolVal.write(oprot); - return; - case BYTE_VAL: - TByteValue byteVal = (TByteValue)value_; - byteVal.write(oprot); - return; - case I16_VAL: - TI16Value i16Val = (TI16Value)value_; - i16Val.write(oprot); - return; - case I32_VAL: - TI32Value i32Val = (TI32Value)value_; - i32Val.write(oprot); - return; - case I64_VAL: - TI64Value i64Val = (TI64Value)value_; - i64Val.write(oprot); - return; - case DOUBLE_VAL: - TDoubleValue doubleVal = (TDoubleValue)value_; - doubleVal.write(oprot); - return; - case STRING_VAL: - TStringValue stringVal = (TStringValue)value_; - stringVal.write(oprot); - return; - default: - throw new IllegalStateException("Cannot write union with unknown field " + setField_); - } - } - - @Override - protected Object tupleSchemeReadValue(org.apache.thrift.protocol.TProtocol iprot, short fieldID) throws org.apache.thrift.TException { - _Fields setField = _Fields.findByThriftId(fieldID); - if (setField != null) { - switch (setField) { - case BOOL_VAL: - TBoolValue boolVal; - boolVal = new TBoolValue(); - boolVal.read(iprot); - return boolVal; - case BYTE_VAL: - TByteValue byteVal; - byteVal = new TByteValue(); - byteVal.read(iprot); - return byteVal; - case I16_VAL: - TI16Value i16Val; - i16Val = new TI16Value(); - i16Val.read(iprot); - return i16Val; - case I32_VAL: - TI32Value i32Val; - i32Val = new TI32Value(); - i32Val.read(iprot); - return i32Val; - case I64_VAL: - TI64Value i64Val; - i64Val = new TI64Value(); - i64Val.read(iprot); - return i64Val; - case DOUBLE_VAL: - TDoubleValue doubleVal; - doubleVal = new TDoubleValue(); - doubleVal.read(iprot); - return doubleVal; - case STRING_VAL: - TStringValue stringVal; - stringVal = new TStringValue(); - stringVal.read(iprot); - return stringVal; - default: - throw new IllegalStateException("setField wasn't null, but didn't match any of the case statements!"); - } - } else { - throw new TProtocolException("Couldn't find a field with field id " + fieldID); - } - } - - @Override - protected void tupleSchemeWriteValue(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - switch (setField_) { - case BOOL_VAL: - TBoolValue boolVal = (TBoolValue)value_; - boolVal.write(oprot); - return; - case BYTE_VAL: - TByteValue byteVal = (TByteValue)value_; - byteVal.write(oprot); - return; - case I16_VAL: - TI16Value i16Val = (TI16Value)value_; - i16Val.write(oprot); - return; - case I32_VAL: - TI32Value i32Val = (TI32Value)value_; - i32Val.write(oprot); - return; - case I64_VAL: - TI64Value i64Val = (TI64Value)value_; - i64Val.write(oprot); - return; - case DOUBLE_VAL: - TDoubleValue doubleVal = (TDoubleValue)value_; - doubleVal.write(oprot); - return; - case STRING_VAL: - TStringValue stringVal = (TStringValue)value_; - stringVal.write(oprot); - return; - default: - throw new IllegalStateException("Cannot write union with unknown field " + setField_); - } - } - - @Override - protected org.apache.thrift.protocol.TField getFieldDesc(_Fields setField) { - switch (setField) { - case BOOL_VAL: - return BOOL_VAL_FIELD_DESC; - case BYTE_VAL: - return BYTE_VAL_FIELD_DESC; - case I16_VAL: - return I16_VAL_FIELD_DESC; - case I32_VAL: - return I32_VAL_FIELD_DESC; - case I64_VAL: - return I64_VAL_FIELD_DESC; - case DOUBLE_VAL: - return DOUBLE_VAL_FIELD_DESC; - case STRING_VAL: - return STRING_VAL_FIELD_DESC; - default: - throw new IllegalArgumentException("Unknown field id " + setField); - } - } - - @Override - protected org.apache.thrift.protocol.TStruct getStructDesc() { - return STRUCT_DESC; - } - - @Override - protected _Fields enumForId(short id) { - return _Fields.findByThriftIdOrThrow(id); - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - - public TBoolValue getBoolVal() { - if (getSetField() == _Fields.BOOL_VAL) { - return (TBoolValue)getFieldValue(); - } else { - throw new RuntimeException("Cannot get field 'boolVal' because union is currently set to " + getFieldDesc(getSetField()).name); - } - } - - public void setBoolVal(TBoolValue value) { - if (value == null) throw new NullPointerException(); - setField_ = _Fields.BOOL_VAL; - value_ = value; - } - - public TByteValue getByteVal() { - if (getSetField() == _Fields.BYTE_VAL) { - return (TByteValue)getFieldValue(); - } else { - throw new RuntimeException("Cannot get field 'byteVal' because union is currently set to " + getFieldDesc(getSetField()).name); - } - } - - public void setByteVal(TByteValue value) { - if (value == null) throw new NullPointerException(); - setField_ = _Fields.BYTE_VAL; - value_ = value; - } - - public TI16Value getI16Val() { - if (getSetField() == _Fields.I16_VAL) { - return (TI16Value)getFieldValue(); - } else { - throw new RuntimeException("Cannot get field 'i16Val' because union is currently set to " + getFieldDesc(getSetField()).name); - } - } - - public void setI16Val(TI16Value value) { - if (value == null) throw new NullPointerException(); - setField_ = _Fields.I16_VAL; - value_ = value; - } - - public TI32Value getI32Val() { - if (getSetField() == _Fields.I32_VAL) { - return (TI32Value)getFieldValue(); - } else { - throw new RuntimeException("Cannot get field 'i32Val' because union is currently set to " + getFieldDesc(getSetField()).name); - } - } - - public void setI32Val(TI32Value value) { - if (value == null) throw new NullPointerException(); - setField_ = _Fields.I32_VAL; - value_ = value; - } - - public TI64Value getI64Val() { - if (getSetField() == _Fields.I64_VAL) { - return (TI64Value)getFieldValue(); - } else { - throw new RuntimeException("Cannot get field 'i64Val' because union is currently set to " + getFieldDesc(getSetField()).name); - } - } - - public void setI64Val(TI64Value value) { - if (value == null) throw new NullPointerException(); - setField_ = _Fields.I64_VAL; - value_ = value; - } - - public TDoubleValue getDoubleVal() { - if (getSetField() == _Fields.DOUBLE_VAL) { - return (TDoubleValue)getFieldValue(); - } else { - throw new RuntimeException("Cannot get field 'doubleVal' because union is currently set to " + getFieldDesc(getSetField()).name); - } - } - - public void setDoubleVal(TDoubleValue value) { - if (value == null) throw new NullPointerException(); - setField_ = _Fields.DOUBLE_VAL; - value_ = value; - } - - public TStringValue getStringVal() { - if (getSetField() == _Fields.STRING_VAL) { - return (TStringValue)getFieldValue(); - } else { - throw new RuntimeException("Cannot get field 'stringVal' because union is currently set to " + getFieldDesc(getSetField()).name); - } - } - - public void setStringVal(TStringValue value) { - if (value == null) throw new NullPointerException(); - setField_ = _Fields.STRING_VAL; - value_ = value; - } - - public boolean isSetBoolVal() { - return setField_ == _Fields.BOOL_VAL; - } - - - public boolean isSetByteVal() { - return setField_ == _Fields.BYTE_VAL; - } - - - public boolean isSetI16Val() { - return setField_ == _Fields.I16_VAL; - } - - - public boolean isSetI32Val() { - return setField_ == _Fields.I32_VAL; - } - - - public boolean isSetI64Val() { - return setField_ == _Fields.I64_VAL; - } - - - public boolean isSetDoubleVal() { - return setField_ == _Fields.DOUBLE_VAL; - } - - - public boolean isSetStringVal() { - return setField_ == _Fields.STRING_VAL; - } - - - public boolean equals(Object other) { - if (other instanceof TColumnValue) { - return equals((TColumnValue)other); - } else { - return false; - } - } - - public boolean equals(TColumnValue other) { - return other != null && getSetField() == other.getSetField() && getFieldValue().equals(other.getFieldValue()); - } - - @Override - public int compareTo(TColumnValue other) { - int lastComparison = org.apache.thrift.TBaseHelper.compareTo(getSetField(), other.getSetField()); - if (lastComparison == 0) { - return org.apache.thrift.TBaseHelper.compareTo(getFieldValue(), other.getFieldValue()); - } - return lastComparison; - } - - - @Override - public int hashCode() { - List list = new ArrayList(); - list.add(this.getClass().getName()); - org.apache.thrift.TFieldIdEnum setField = getSetField(); - if (setField != null) { - list.add(setField.getThriftFieldId()); - Object value = getFieldValue(); - if (value instanceof org.apache.thrift.TEnum) { - list.add(((org.apache.thrift.TEnum)getFieldValue()).getValue()); - } else { - list.add(value); - } - } - return list.hashCode(); - } - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - -} diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TDoubleColumn.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TDoubleColumn.java deleted file mode 100644 index f93c9b4f0edc3..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TDoubleColumn.java +++ /dev/null @@ -1,548 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TDoubleColumn implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TDoubleColumn"); - - private static final org.apache.thrift.protocol.TField VALUES_FIELD_DESC = new org.apache.thrift.protocol.TField("values", org.apache.thrift.protocol.TType.LIST, (short)1); - private static final org.apache.thrift.protocol.TField NULLS_FIELD_DESC = new org.apache.thrift.protocol.TField("nulls", org.apache.thrift.protocol.TType.STRING, (short)2); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TDoubleColumnStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TDoubleColumnTupleSchemeFactory()); - } - - private List values; // required - private ByteBuffer nulls; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - VALUES((short)1, "values"), - NULLS((short)2, "nulls"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // VALUES - return VALUES; - case 2: // NULLS - return NULLS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.VALUES, new org.apache.thrift.meta_data.FieldMetaData("values", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.DOUBLE)))); - tmpMap.put(_Fields.NULLS, new org.apache.thrift.meta_data.FieldMetaData("nulls", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TDoubleColumn.class, metaDataMap); - } - - public TDoubleColumn() { - } - - public TDoubleColumn( - List values, - ByteBuffer nulls) - { - this(); - this.values = values; - this.nulls = org.apache.thrift.TBaseHelper.copyBinary(nulls); - } - - /** - * Performs a deep copy on other. - */ - public TDoubleColumn(TDoubleColumn other) { - if (other.isSetValues()) { - List __this__values = new ArrayList(other.values); - this.values = __this__values; - } - if (other.isSetNulls()) { - this.nulls = org.apache.thrift.TBaseHelper.copyBinary(other.nulls); - } - } - - public TDoubleColumn deepCopy() { - return new TDoubleColumn(this); - } - - @Override - public void clear() { - this.values = null; - this.nulls = null; - } - - public int getValuesSize() { - return (this.values == null) ? 0 : this.values.size(); - } - - public java.util.Iterator getValuesIterator() { - return (this.values == null) ? null : this.values.iterator(); - } - - public void addToValues(double elem) { - if (this.values == null) { - this.values = new ArrayList(); - } - this.values.add(elem); - } - - public List getValues() { - return this.values; - } - - public void setValues(List values) { - this.values = values; - } - - public void unsetValues() { - this.values = null; - } - - /** Returns true if field values is set (has been assigned a value) and false otherwise */ - public boolean isSetValues() { - return this.values != null; - } - - public void setValuesIsSet(boolean value) { - if (!value) { - this.values = null; - } - } - - public byte[] getNulls() { - setNulls(org.apache.thrift.TBaseHelper.rightSize(nulls)); - return nulls == null ? null : nulls.array(); - } - - public ByteBuffer bufferForNulls() { - return org.apache.thrift.TBaseHelper.copyBinary(nulls); - } - - public void setNulls(byte[] nulls) { - this.nulls = nulls == null ? (ByteBuffer)null : ByteBuffer.wrap(Arrays.copyOf(nulls, nulls.length)); - } - - public void setNulls(ByteBuffer nulls) { - this.nulls = org.apache.thrift.TBaseHelper.copyBinary(nulls); - } - - public void unsetNulls() { - this.nulls = null; - } - - /** Returns true if field nulls is set (has been assigned a value) and false otherwise */ - public boolean isSetNulls() { - return this.nulls != null; - } - - public void setNullsIsSet(boolean value) { - if (!value) { - this.nulls = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case VALUES: - if (value == null) { - unsetValues(); - } else { - setValues((List)value); - } - break; - - case NULLS: - if (value == null) { - unsetNulls(); - } else { - setNulls((ByteBuffer)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case VALUES: - return getValues(); - - case NULLS: - return getNulls(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case VALUES: - return isSetValues(); - case NULLS: - return isSetNulls(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TDoubleColumn) - return this.equals((TDoubleColumn)that); - return false; - } - - public boolean equals(TDoubleColumn that) { - if (that == null) - return false; - - boolean this_present_values = true && this.isSetValues(); - boolean that_present_values = true && that.isSetValues(); - if (this_present_values || that_present_values) { - if (!(this_present_values && that_present_values)) - return false; - if (!this.values.equals(that.values)) - return false; - } - - boolean this_present_nulls = true && this.isSetNulls(); - boolean that_present_nulls = true && that.isSetNulls(); - if (this_present_nulls || that_present_nulls) { - if (!(this_present_nulls && that_present_nulls)) - return false; - if (!this.nulls.equals(that.nulls)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_values = true && (isSetValues()); - list.add(present_values); - if (present_values) - list.add(values); - - boolean present_nulls = true && (isSetNulls()); - list.add(present_nulls); - if (present_nulls) - list.add(nulls); - - return list.hashCode(); - } - - @Override - public int compareTo(TDoubleColumn other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetValues()).compareTo(other.isSetValues()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetValues()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.values, other.values); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetNulls()).compareTo(other.isSetNulls()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetNulls()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.nulls, other.nulls); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TDoubleColumn("); - boolean first = true; - - sb.append("values:"); - if (this.values == null) { - sb.append("null"); - } else { - sb.append(this.values); - } - first = false; - if (!first) sb.append(", "); - sb.append("nulls:"); - if (this.nulls == null) { - sb.append("null"); - } else { - org.apache.thrift.TBaseHelper.toString(this.nulls, sb); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetValues()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'values' is unset! Struct:" + toString()); - } - - if (!isSetNulls()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'nulls' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TDoubleColumnStandardSchemeFactory implements SchemeFactory { - public TDoubleColumnStandardScheme getScheme() { - return new TDoubleColumnStandardScheme(); - } - } - - private static class TDoubleColumnStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TDoubleColumn struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // VALUES - if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { - { - org.apache.thrift.protocol.TList _list94 = iprot.readListBegin(); - struct.values = new ArrayList(_list94.size); - double _elem95; - for (int _i96 = 0; _i96 < _list94.size; ++_i96) - { - _elem95 = iprot.readDouble(); - struct.values.add(_elem95); - } - iprot.readListEnd(); - } - struct.setValuesIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // NULLS - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.nulls = iprot.readBinary(); - struct.setNullsIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TDoubleColumn struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.values != null) { - oprot.writeFieldBegin(VALUES_FIELD_DESC); - { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.DOUBLE, struct.values.size())); - for (double _iter97 : struct.values) - { - oprot.writeDouble(_iter97); - } - oprot.writeListEnd(); - } - oprot.writeFieldEnd(); - } - if (struct.nulls != null) { - oprot.writeFieldBegin(NULLS_FIELD_DESC); - oprot.writeBinary(struct.nulls); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TDoubleColumnTupleSchemeFactory implements SchemeFactory { - public TDoubleColumnTupleScheme getScheme() { - return new TDoubleColumnTupleScheme(); - } - } - - private static class TDoubleColumnTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TDoubleColumn struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - { - oprot.writeI32(struct.values.size()); - for (double _iter98 : struct.values) - { - oprot.writeDouble(_iter98); - } - } - oprot.writeBinary(struct.nulls); - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TDoubleColumn struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - { - org.apache.thrift.protocol.TList _list99 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.DOUBLE, iprot.readI32()); - struct.values = new ArrayList(_list99.size); - double _elem100; - for (int _i101 = 0; _i101 < _list99.size; ++_i101) - { - _elem100 = iprot.readDouble(); - struct.values.add(_elem100); - } - } - struct.setValuesIsSet(true); - struct.nulls = iprot.readBinary(); - struct.setNullsIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TDoubleValue.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TDoubleValue.java deleted file mode 100644 index 5700355aad94d..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TDoubleValue.java +++ /dev/null @@ -1,390 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TDoubleValue implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TDoubleValue"); - - private static final org.apache.thrift.protocol.TField VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("value", org.apache.thrift.protocol.TType.DOUBLE, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TDoubleValueStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TDoubleValueTupleSchemeFactory()); - } - - private double value; // optional - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - VALUE((short)1, "value"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // VALUE - return VALUE; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private static final int __VALUE_ISSET_ID = 0; - private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.VALUE}; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.VALUE, new org.apache.thrift.meta_data.FieldMetaData("value", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.DOUBLE))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TDoubleValue.class, metaDataMap); - } - - public TDoubleValue() { - } - - /** - * Performs a deep copy on other. - */ - public TDoubleValue(TDoubleValue other) { - __isset_bitfield = other.__isset_bitfield; - this.value = other.value; - } - - public TDoubleValue deepCopy() { - return new TDoubleValue(this); - } - - @Override - public void clear() { - setValueIsSet(false); - this.value = 0.0; - } - - public double getValue() { - return this.value; - } - - public void setValue(double value) { - this.value = value; - setValueIsSet(true); - } - - public void unsetValue() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __VALUE_ISSET_ID); - } - - /** Returns true if field value is set (has been assigned a value) and false otherwise */ - public boolean isSetValue() { - return EncodingUtils.testBit(__isset_bitfield, __VALUE_ISSET_ID); - } - - public void setValueIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __VALUE_ISSET_ID, value); - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case VALUE: - if (value == null) { - unsetValue(); - } else { - setValue((Double)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case VALUE: - return getValue(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case VALUE: - return isSetValue(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TDoubleValue) - return this.equals((TDoubleValue)that); - return false; - } - - public boolean equals(TDoubleValue that) { - if (that == null) - return false; - - boolean this_present_value = true && this.isSetValue(); - boolean that_present_value = true && that.isSetValue(); - if (this_present_value || that_present_value) { - if (!(this_present_value && that_present_value)) - return false; - if (this.value != that.value) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_value = true && (isSetValue()); - list.add(present_value); - if (present_value) - list.add(value); - - return list.hashCode(); - } - - @Override - public int compareTo(TDoubleValue other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetValue()).compareTo(other.isSetValue()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetValue()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.value, other.value); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TDoubleValue("); - boolean first = true; - - if (isSetValue()) { - sb.append("value:"); - sb.append(this.value); - first = false; - } - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. - __isset_bitfield = 0; - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TDoubleValueStandardSchemeFactory implements SchemeFactory { - public TDoubleValueStandardScheme getScheme() { - return new TDoubleValueStandardScheme(); - } - } - - private static class TDoubleValueStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TDoubleValue struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // VALUE - if (schemeField.type == org.apache.thrift.protocol.TType.DOUBLE) { - struct.value = iprot.readDouble(); - struct.setValueIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TDoubleValue struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.isSetValue()) { - oprot.writeFieldBegin(VALUE_FIELD_DESC); - oprot.writeDouble(struct.value); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TDoubleValueTupleSchemeFactory implements SchemeFactory { - public TDoubleValueTupleScheme getScheme() { - return new TDoubleValueTupleScheme(); - } - } - - private static class TDoubleValueTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TDoubleValue struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetValue()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetValue()) { - oprot.writeDouble(struct.value); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TDoubleValue struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.value = iprot.readDouble(); - struct.setValueIsSet(true); - } - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TExecuteStatementReq.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TExecuteStatementReq.java deleted file mode 100644 index 1f73cec61af78..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TExecuteStatementReq.java +++ /dev/null @@ -1,863 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TExecuteStatementReq implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TExecuteStatementReq"); - - private static final org.apache.thrift.protocol.TField SESSION_HANDLE_FIELD_DESC = new org.apache.thrift.protocol.TField("sessionHandle", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField STATEMENT_FIELD_DESC = new org.apache.thrift.protocol.TField("statement", org.apache.thrift.protocol.TType.STRING, (short)2); - private static final org.apache.thrift.protocol.TField CONF_OVERLAY_FIELD_DESC = new org.apache.thrift.protocol.TField("confOverlay", org.apache.thrift.protocol.TType.MAP, (short)3); - private static final org.apache.thrift.protocol.TField RUN_ASYNC_FIELD_DESC = new org.apache.thrift.protocol.TField("runAsync", org.apache.thrift.protocol.TType.BOOL, (short)4); - private static final org.apache.thrift.protocol.TField QUERY_TIMEOUT_FIELD_DESC = new org.apache.thrift.protocol.TField("queryTimeout", org.apache.thrift.protocol.TType.I64, (short)5); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TExecuteStatementReqStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TExecuteStatementReqTupleSchemeFactory()); - } - - private TSessionHandle sessionHandle; // required - private String statement; // required - private Map confOverlay; // optional - private boolean runAsync; // optional - private long queryTimeout; // optional - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SESSION_HANDLE((short)1, "sessionHandle"), - STATEMENT((short)2, "statement"), - CONF_OVERLAY((short)3, "confOverlay"), - RUN_ASYNC((short)4, "runAsync"), - QUERY_TIMEOUT((short)5, "queryTimeout"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // SESSION_HANDLE - return SESSION_HANDLE; - case 2: // STATEMENT - return STATEMENT; - case 3: // CONF_OVERLAY - return CONF_OVERLAY; - case 4: // RUN_ASYNC - return RUN_ASYNC; - case 5: // QUERY_TIMEOUT - return QUERY_TIMEOUT; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private static final int __RUNASYNC_ISSET_ID = 0; - private static final int __QUERYTIMEOUT_ISSET_ID = 1; - private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.CONF_OVERLAY,_Fields.RUN_ASYNC,_Fields.QUERY_TIMEOUT}; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SESSION_HANDLE, new org.apache.thrift.meta_data.FieldMetaData("sessionHandle", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TSessionHandle.class))); - tmpMap.put(_Fields.STATEMENT, new org.apache.thrift.meta_data.FieldMetaData("statement", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.CONF_OVERLAY, new org.apache.thrift.meta_data.FieldMetaData("confOverlay", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); - tmpMap.put(_Fields.RUN_ASYNC, new org.apache.thrift.meta_data.FieldMetaData("runAsync", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); - tmpMap.put(_Fields.QUERY_TIMEOUT, new org.apache.thrift.meta_data.FieldMetaData("queryTimeout", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TExecuteStatementReq.class, metaDataMap); - } - - public TExecuteStatementReq() { - this.runAsync = false; - - this.queryTimeout = 0L; - - } - - public TExecuteStatementReq( - TSessionHandle sessionHandle, - String statement) - { - this(); - this.sessionHandle = sessionHandle; - this.statement = statement; - } - - /** - * Performs a deep copy on other. - */ - public TExecuteStatementReq(TExecuteStatementReq other) { - __isset_bitfield = other.__isset_bitfield; - if (other.isSetSessionHandle()) { - this.sessionHandle = new TSessionHandle(other.sessionHandle); - } - if (other.isSetStatement()) { - this.statement = other.statement; - } - if (other.isSetConfOverlay()) { - Map __this__confOverlay = new HashMap(other.confOverlay); - this.confOverlay = __this__confOverlay; - } - this.runAsync = other.runAsync; - this.queryTimeout = other.queryTimeout; - } - - public TExecuteStatementReq deepCopy() { - return new TExecuteStatementReq(this); - } - - @Override - public void clear() { - this.sessionHandle = null; - this.statement = null; - this.confOverlay = null; - this.runAsync = false; - - this.queryTimeout = 0L; - - } - - public TSessionHandle getSessionHandle() { - return this.sessionHandle; - } - - public void setSessionHandle(TSessionHandle sessionHandle) { - this.sessionHandle = sessionHandle; - } - - public void unsetSessionHandle() { - this.sessionHandle = null; - } - - /** Returns true if field sessionHandle is set (has been assigned a value) and false otherwise */ - public boolean isSetSessionHandle() { - return this.sessionHandle != null; - } - - public void setSessionHandleIsSet(boolean value) { - if (!value) { - this.sessionHandle = null; - } - } - - public String getStatement() { - return this.statement; - } - - public void setStatement(String statement) { - this.statement = statement; - } - - public void unsetStatement() { - this.statement = null; - } - - /** Returns true if field statement is set (has been assigned a value) and false otherwise */ - public boolean isSetStatement() { - return this.statement != null; - } - - public void setStatementIsSet(boolean value) { - if (!value) { - this.statement = null; - } - } - - public int getConfOverlaySize() { - return (this.confOverlay == null) ? 0 : this.confOverlay.size(); - } - - public void putToConfOverlay(String key, String val) { - if (this.confOverlay == null) { - this.confOverlay = new HashMap(); - } - this.confOverlay.put(key, val); - } - - public Map getConfOverlay() { - return this.confOverlay; - } - - public void setConfOverlay(Map confOverlay) { - this.confOverlay = confOverlay; - } - - public void unsetConfOverlay() { - this.confOverlay = null; - } - - /** Returns true if field confOverlay is set (has been assigned a value) and false otherwise */ - public boolean isSetConfOverlay() { - return this.confOverlay != null; - } - - public void setConfOverlayIsSet(boolean value) { - if (!value) { - this.confOverlay = null; - } - } - - public boolean isRunAsync() { - return this.runAsync; - } - - public void setRunAsync(boolean runAsync) { - this.runAsync = runAsync; - setRunAsyncIsSet(true); - } - - public void unsetRunAsync() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __RUNASYNC_ISSET_ID); - } - - /** Returns true if field runAsync is set (has been assigned a value) and false otherwise */ - public boolean isSetRunAsync() { - return EncodingUtils.testBit(__isset_bitfield, __RUNASYNC_ISSET_ID); - } - - public void setRunAsyncIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __RUNASYNC_ISSET_ID, value); - } - - public long getQueryTimeout() { - return this.queryTimeout; - } - - public void setQueryTimeout(long queryTimeout) { - this.queryTimeout = queryTimeout; - setQueryTimeoutIsSet(true); - } - - public void unsetQueryTimeout() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __QUERYTIMEOUT_ISSET_ID); - } - - /** Returns true if field queryTimeout is set (has been assigned a value) and false otherwise */ - public boolean isSetQueryTimeout() { - return EncodingUtils.testBit(__isset_bitfield, __QUERYTIMEOUT_ISSET_ID); - } - - public void setQueryTimeoutIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __QUERYTIMEOUT_ISSET_ID, value); - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case SESSION_HANDLE: - if (value == null) { - unsetSessionHandle(); - } else { - setSessionHandle((TSessionHandle)value); - } - break; - - case STATEMENT: - if (value == null) { - unsetStatement(); - } else { - setStatement((String)value); - } - break; - - case CONF_OVERLAY: - if (value == null) { - unsetConfOverlay(); - } else { - setConfOverlay((Map)value); - } - break; - - case RUN_ASYNC: - if (value == null) { - unsetRunAsync(); - } else { - setRunAsync((Boolean)value); - } - break; - - case QUERY_TIMEOUT: - if (value == null) { - unsetQueryTimeout(); - } else { - setQueryTimeout((Long)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case SESSION_HANDLE: - return getSessionHandle(); - - case STATEMENT: - return getStatement(); - - case CONF_OVERLAY: - return getConfOverlay(); - - case RUN_ASYNC: - return isRunAsync(); - - case QUERY_TIMEOUT: - return getQueryTimeout(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case SESSION_HANDLE: - return isSetSessionHandle(); - case STATEMENT: - return isSetStatement(); - case CONF_OVERLAY: - return isSetConfOverlay(); - case RUN_ASYNC: - return isSetRunAsync(); - case QUERY_TIMEOUT: - return isSetQueryTimeout(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TExecuteStatementReq) - return this.equals((TExecuteStatementReq)that); - return false; - } - - public boolean equals(TExecuteStatementReq that) { - if (that == null) - return false; - - boolean this_present_sessionHandle = true && this.isSetSessionHandle(); - boolean that_present_sessionHandle = true && that.isSetSessionHandle(); - if (this_present_sessionHandle || that_present_sessionHandle) { - if (!(this_present_sessionHandle && that_present_sessionHandle)) - return false; - if (!this.sessionHandle.equals(that.sessionHandle)) - return false; - } - - boolean this_present_statement = true && this.isSetStatement(); - boolean that_present_statement = true && that.isSetStatement(); - if (this_present_statement || that_present_statement) { - if (!(this_present_statement && that_present_statement)) - return false; - if (!this.statement.equals(that.statement)) - return false; - } - - boolean this_present_confOverlay = true && this.isSetConfOverlay(); - boolean that_present_confOverlay = true && that.isSetConfOverlay(); - if (this_present_confOverlay || that_present_confOverlay) { - if (!(this_present_confOverlay && that_present_confOverlay)) - return false; - if (!this.confOverlay.equals(that.confOverlay)) - return false; - } - - boolean this_present_runAsync = true && this.isSetRunAsync(); - boolean that_present_runAsync = true && that.isSetRunAsync(); - if (this_present_runAsync || that_present_runAsync) { - if (!(this_present_runAsync && that_present_runAsync)) - return false; - if (this.runAsync != that.runAsync) - return false; - } - - boolean this_present_queryTimeout = true && this.isSetQueryTimeout(); - boolean that_present_queryTimeout = true && that.isSetQueryTimeout(); - if (this_present_queryTimeout || that_present_queryTimeout) { - if (!(this_present_queryTimeout && that_present_queryTimeout)) - return false; - if (this.queryTimeout != that.queryTimeout) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_sessionHandle = true && (isSetSessionHandle()); - list.add(present_sessionHandle); - if (present_sessionHandle) - list.add(sessionHandle); - - boolean present_statement = true && (isSetStatement()); - list.add(present_statement); - if (present_statement) - list.add(statement); - - boolean present_confOverlay = true && (isSetConfOverlay()); - list.add(present_confOverlay); - if (present_confOverlay) - list.add(confOverlay); - - boolean present_runAsync = true && (isSetRunAsync()); - list.add(present_runAsync); - if (present_runAsync) - list.add(runAsync); - - boolean present_queryTimeout = true && (isSetQueryTimeout()); - list.add(present_queryTimeout); - if (present_queryTimeout) - list.add(queryTimeout); - - return list.hashCode(); - } - - @Override - public int compareTo(TExecuteStatementReq other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetSessionHandle()).compareTo(other.isSetSessionHandle()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSessionHandle()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.sessionHandle, other.sessionHandle); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetStatement()).compareTo(other.isSetStatement()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetStatement()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.statement, other.statement); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetConfOverlay()).compareTo(other.isSetConfOverlay()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetConfOverlay()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.confOverlay, other.confOverlay); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetRunAsync()).compareTo(other.isSetRunAsync()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetRunAsync()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.runAsync, other.runAsync); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetQueryTimeout()).compareTo(other.isSetQueryTimeout()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetQueryTimeout()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.queryTimeout, other.queryTimeout); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TExecuteStatementReq("); - boolean first = true; - - sb.append("sessionHandle:"); - if (this.sessionHandle == null) { - sb.append("null"); - } else { - sb.append(this.sessionHandle); - } - first = false; - if (!first) sb.append(", "); - sb.append("statement:"); - if (this.statement == null) { - sb.append("null"); - } else { - sb.append(this.statement); - } - first = false; - if (isSetConfOverlay()) { - if (!first) sb.append(", "); - sb.append("confOverlay:"); - if (this.confOverlay == null) { - sb.append("null"); - } else { - sb.append(this.confOverlay); - } - first = false; - } - if (isSetRunAsync()) { - if (!first) sb.append(", "); - sb.append("runAsync:"); - sb.append(this.runAsync); - first = false; - } - if (isSetQueryTimeout()) { - if (!first) sb.append(", "); - sb.append("queryTimeout:"); - sb.append(this.queryTimeout); - first = false; - } - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetSessionHandle()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'sessionHandle' is unset! Struct:" + toString()); - } - - if (!isSetStatement()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'statement' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (sessionHandle != null) { - sessionHandle.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. - __isset_bitfield = 0; - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TExecuteStatementReqStandardSchemeFactory implements SchemeFactory { - public TExecuteStatementReqStandardScheme getScheme() { - return new TExecuteStatementReqStandardScheme(); - } - } - - private static class TExecuteStatementReqStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TExecuteStatementReq struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // SESSION_HANDLE - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.sessionHandle = new TSessionHandle(); - struct.sessionHandle.read(iprot); - struct.setSessionHandleIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // STATEMENT - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.statement = iprot.readString(); - struct.setStatementIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 3: // CONF_OVERLAY - if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { - { - org.apache.thrift.protocol.TMap _map162 = iprot.readMapBegin(); - struct.confOverlay = new HashMap(2*_map162.size); - String _key163; - String _val164; - for (int _i165 = 0; _i165 < _map162.size; ++_i165) - { - _key163 = iprot.readString(); - _val164 = iprot.readString(); - struct.confOverlay.put(_key163, _val164); - } - iprot.readMapEnd(); - } - struct.setConfOverlayIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 4: // RUN_ASYNC - if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { - struct.runAsync = iprot.readBool(); - struct.setRunAsyncIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 5: // QUERY_TIMEOUT - if (schemeField.type == org.apache.thrift.protocol.TType.I64) { - struct.queryTimeout = iprot.readI64(); - struct.setQueryTimeoutIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TExecuteStatementReq struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.sessionHandle != null) { - oprot.writeFieldBegin(SESSION_HANDLE_FIELD_DESC); - struct.sessionHandle.write(oprot); - oprot.writeFieldEnd(); - } - if (struct.statement != null) { - oprot.writeFieldBegin(STATEMENT_FIELD_DESC); - oprot.writeString(struct.statement); - oprot.writeFieldEnd(); - } - if (struct.confOverlay != null) { - if (struct.isSetConfOverlay()) { - oprot.writeFieldBegin(CONF_OVERLAY_FIELD_DESC); - { - oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.confOverlay.size())); - for (Map.Entry _iter166 : struct.confOverlay.entrySet()) - { - oprot.writeString(_iter166.getKey()); - oprot.writeString(_iter166.getValue()); - } - oprot.writeMapEnd(); - } - oprot.writeFieldEnd(); - } - } - if (struct.isSetRunAsync()) { - oprot.writeFieldBegin(RUN_ASYNC_FIELD_DESC); - oprot.writeBool(struct.runAsync); - oprot.writeFieldEnd(); - } - if (struct.isSetQueryTimeout()) { - oprot.writeFieldBegin(QUERY_TIMEOUT_FIELD_DESC); - oprot.writeI64(struct.queryTimeout); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TExecuteStatementReqTupleSchemeFactory implements SchemeFactory { - public TExecuteStatementReqTupleScheme getScheme() { - return new TExecuteStatementReqTupleScheme(); - } - } - - private static class TExecuteStatementReqTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TExecuteStatementReq struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.sessionHandle.write(oprot); - oprot.writeString(struct.statement); - BitSet optionals = new BitSet(); - if (struct.isSetConfOverlay()) { - optionals.set(0); - } - if (struct.isSetRunAsync()) { - optionals.set(1); - } - if (struct.isSetQueryTimeout()) { - optionals.set(2); - } - oprot.writeBitSet(optionals, 3); - if (struct.isSetConfOverlay()) { - { - oprot.writeI32(struct.confOverlay.size()); - for (Map.Entry _iter167 : struct.confOverlay.entrySet()) - { - oprot.writeString(_iter167.getKey()); - oprot.writeString(_iter167.getValue()); - } - } - } - if (struct.isSetRunAsync()) { - oprot.writeBool(struct.runAsync); - } - if (struct.isSetQueryTimeout()) { - oprot.writeI64(struct.queryTimeout); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TExecuteStatementReq struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.sessionHandle = new TSessionHandle(); - struct.sessionHandle.read(iprot); - struct.setSessionHandleIsSet(true); - struct.statement = iprot.readString(); - struct.setStatementIsSet(true); - BitSet incoming = iprot.readBitSet(3); - if (incoming.get(0)) { - { - org.apache.thrift.protocol.TMap _map168 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.confOverlay = new HashMap(2*_map168.size); - String _key169; - String _val170; - for (int _i171 = 0; _i171 < _map168.size; ++_i171) - { - _key169 = iprot.readString(); - _val170 = iprot.readString(); - struct.confOverlay.put(_key169, _val170); - } - } - struct.setConfOverlayIsSet(true); - } - if (incoming.get(1)) { - struct.runAsync = iprot.readBool(); - struct.setRunAsyncIsSet(true); - } - if (incoming.get(2)) { - struct.queryTimeout = iprot.readI64(); - struct.setQueryTimeoutIsSet(true); - } - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TExecuteStatementResp.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TExecuteStatementResp.java deleted file mode 100644 index 7101fa5bdb84c..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TExecuteStatementResp.java +++ /dev/null @@ -1,509 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TExecuteStatementResp implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TExecuteStatementResp"); - - private static final org.apache.thrift.protocol.TField STATUS_FIELD_DESC = new org.apache.thrift.protocol.TField("status", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField OPERATION_HANDLE_FIELD_DESC = new org.apache.thrift.protocol.TField("operationHandle", org.apache.thrift.protocol.TType.STRUCT, (short)2); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TExecuteStatementRespStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TExecuteStatementRespTupleSchemeFactory()); - } - - private TStatus status; // required - private TOperationHandle operationHandle; // optional - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - STATUS((short)1, "status"), - OPERATION_HANDLE((short)2, "operationHandle"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // STATUS - return STATUS; - case 2: // OPERATION_HANDLE - return OPERATION_HANDLE; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private static final _Fields optionals[] = {_Fields.OPERATION_HANDLE}; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.STATUS, new org.apache.thrift.meta_data.FieldMetaData("status", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TStatus.class))); - tmpMap.put(_Fields.OPERATION_HANDLE, new org.apache.thrift.meta_data.FieldMetaData("operationHandle", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TOperationHandle.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TExecuteStatementResp.class, metaDataMap); - } - - public TExecuteStatementResp() { - } - - public TExecuteStatementResp( - TStatus status) - { - this(); - this.status = status; - } - - /** - * Performs a deep copy on other. - */ - public TExecuteStatementResp(TExecuteStatementResp other) { - if (other.isSetStatus()) { - this.status = new TStatus(other.status); - } - if (other.isSetOperationHandle()) { - this.operationHandle = new TOperationHandle(other.operationHandle); - } - } - - public TExecuteStatementResp deepCopy() { - return new TExecuteStatementResp(this); - } - - @Override - public void clear() { - this.status = null; - this.operationHandle = null; - } - - public TStatus getStatus() { - return this.status; - } - - public void setStatus(TStatus status) { - this.status = status; - } - - public void unsetStatus() { - this.status = null; - } - - /** Returns true if field status is set (has been assigned a value) and false otherwise */ - public boolean isSetStatus() { - return this.status != null; - } - - public void setStatusIsSet(boolean value) { - if (!value) { - this.status = null; - } - } - - public TOperationHandle getOperationHandle() { - return this.operationHandle; - } - - public void setOperationHandle(TOperationHandle operationHandle) { - this.operationHandle = operationHandle; - } - - public void unsetOperationHandle() { - this.operationHandle = null; - } - - /** Returns true if field operationHandle is set (has been assigned a value) and false otherwise */ - public boolean isSetOperationHandle() { - return this.operationHandle != null; - } - - public void setOperationHandleIsSet(boolean value) { - if (!value) { - this.operationHandle = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case STATUS: - if (value == null) { - unsetStatus(); - } else { - setStatus((TStatus)value); - } - break; - - case OPERATION_HANDLE: - if (value == null) { - unsetOperationHandle(); - } else { - setOperationHandle((TOperationHandle)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case STATUS: - return getStatus(); - - case OPERATION_HANDLE: - return getOperationHandle(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case STATUS: - return isSetStatus(); - case OPERATION_HANDLE: - return isSetOperationHandle(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TExecuteStatementResp) - return this.equals((TExecuteStatementResp)that); - return false; - } - - public boolean equals(TExecuteStatementResp that) { - if (that == null) - return false; - - boolean this_present_status = true && this.isSetStatus(); - boolean that_present_status = true && that.isSetStatus(); - if (this_present_status || that_present_status) { - if (!(this_present_status && that_present_status)) - return false; - if (!this.status.equals(that.status)) - return false; - } - - boolean this_present_operationHandle = true && this.isSetOperationHandle(); - boolean that_present_operationHandle = true && that.isSetOperationHandle(); - if (this_present_operationHandle || that_present_operationHandle) { - if (!(this_present_operationHandle && that_present_operationHandle)) - return false; - if (!this.operationHandle.equals(that.operationHandle)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_status = true && (isSetStatus()); - list.add(present_status); - if (present_status) - list.add(status); - - boolean present_operationHandle = true && (isSetOperationHandle()); - list.add(present_operationHandle); - if (present_operationHandle) - list.add(operationHandle); - - return list.hashCode(); - } - - @Override - public int compareTo(TExecuteStatementResp other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetStatus()).compareTo(other.isSetStatus()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetStatus()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.status, other.status); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetOperationHandle()).compareTo(other.isSetOperationHandle()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetOperationHandle()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.operationHandle, other.operationHandle); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TExecuteStatementResp("); - boolean first = true; - - sb.append("status:"); - if (this.status == null) { - sb.append("null"); - } else { - sb.append(this.status); - } - first = false; - if (isSetOperationHandle()) { - if (!first) sb.append(", "); - sb.append("operationHandle:"); - if (this.operationHandle == null) { - sb.append("null"); - } else { - sb.append(this.operationHandle); - } - first = false; - } - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetStatus()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'status' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (status != null) { - status.validate(); - } - if (operationHandle != null) { - operationHandle.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TExecuteStatementRespStandardSchemeFactory implements SchemeFactory { - public TExecuteStatementRespStandardScheme getScheme() { - return new TExecuteStatementRespStandardScheme(); - } - } - - private static class TExecuteStatementRespStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TExecuteStatementResp struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // STATUS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // OPERATION_HANDLE - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.operationHandle = new TOperationHandle(); - struct.operationHandle.read(iprot); - struct.setOperationHandleIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TExecuteStatementResp struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.status != null) { - oprot.writeFieldBegin(STATUS_FIELD_DESC); - struct.status.write(oprot); - oprot.writeFieldEnd(); - } - if (struct.operationHandle != null) { - if (struct.isSetOperationHandle()) { - oprot.writeFieldBegin(OPERATION_HANDLE_FIELD_DESC); - struct.operationHandle.write(oprot); - oprot.writeFieldEnd(); - } - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TExecuteStatementRespTupleSchemeFactory implements SchemeFactory { - public TExecuteStatementRespTupleScheme getScheme() { - return new TExecuteStatementRespTupleScheme(); - } - } - - private static class TExecuteStatementRespTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TExecuteStatementResp struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.status.write(oprot); - BitSet optionals = new BitSet(); - if (struct.isSetOperationHandle()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetOperationHandle()) { - struct.operationHandle.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TExecuteStatementResp struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.operationHandle = new TOperationHandle(); - struct.operationHandle.read(iprot); - struct.setOperationHandleIsSet(true); - } - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TFetchOrientation.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TFetchOrientation.java deleted file mode 100644 index 159be45259434..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TFetchOrientation.java +++ /dev/null @@ -1,57 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - - -import java.util.Map; -import java.util.HashMap; -import org.apache.thrift.TEnum; - -public enum TFetchOrientation implements org.apache.thrift.TEnum { - FETCH_NEXT(0), - FETCH_PRIOR(1), - FETCH_RELATIVE(2), - FETCH_ABSOLUTE(3), - FETCH_FIRST(4), - FETCH_LAST(5); - - private final int value; - - private TFetchOrientation(int value) { - this.value = value; - } - - /** - * Get the integer value of this enum value, as defined in the Thrift IDL. - */ - public int getValue() { - return value; - } - - /** - * Find a the enum type by its integer value, as defined in the Thrift IDL. - * @return null if the value is not found. - */ - public static TFetchOrientation findByValue(int value) { - switch (value) { - case 0: - return FETCH_NEXT; - case 1: - return FETCH_PRIOR; - case 2: - return FETCH_RELATIVE; - case 3: - return FETCH_ABSOLUTE; - case 4: - return FETCH_FIRST; - case 5: - return FETCH_LAST; - default: - return null; - } - } -} diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TFetchResultsReq.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TFetchResultsReq.java deleted file mode 100644 index 2c93339d0c68b..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TFetchResultsReq.java +++ /dev/null @@ -1,714 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TFetchResultsReq implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TFetchResultsReq"); - - private static final org.apache.thrift.protocol.TField OPERATION_HANDLE_FIELD_DESC = new org.apache.thrift.protocol.TField("operationHandle", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField ORIENTATION_FIELD_DESC = new org.apache.thrift.protocol.TField("orientation", org.apache.thrift.protocol.TType.I32, (short)2); - private static final org.apache.thrift.protocol.TField MAX_ROWS_FIELD_DESC = new org.apache.thrift.protocol.TField("maxRows", org.apache.thrift.protocol.TType.I64, (short)3); - private static final org.apache.thrift.protocol.TField FETCH_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("fetchType", org.apache.thrift.protocol.TType.I16, (short)4); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TFetchResultsReqStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TFetchResultsReqTupleSchemeFactory()); - } - - private TOperationHandle operationHandle; // required - private TFetchOrientation orientation; // required - private long maxRows; // required - private short fetchType; // optional - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - OPERATION_HANDLE((short)1, "operationHandle"), - /** - * - * @see TFetchOrientation - */ - ORIENTATION((short)2, "orientation"), - MAX_ROWS((short)3, "maxRows"), - FETCH_TYPE((short)4, "fetchType"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // OPERATION_HANDLE - return OPERATION_HANDLE; - case 2: // ORIENTATION - return ORIENTATION; - case 3: // MAX_ROWS - return MAX_ROWS; - case 4: // FETCH_TYPE - return FETCH_TYPE; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private static final int __MAXROWS_ISSET_ID = 0; - private static final int __FETCHTYPE_ISSET_ID = 1; - private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.FETCH_TYPE}; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.OPERATION_HANDLE, new org.apache.thrift.meta_data.FieldMetaData("operationHandle", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TOperationHandle.class))); - tmpMap.put(_Fields.ORIENTATION, new org.apache.thrift.meta_data.FieldMetaData("orientation", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, TFetchOrientation.class))); - tmpMap.put(_Fields.MAX_ROWS, new org.apache.thrift.meta_data.FieldMetaData("maxRows", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); - tmpMap.put(_Fields.FETCH_TYPE, new org.apache.thrift.meta_data.FieldMetaData("fetchType", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I16))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TFetchResultsReq.class, metaDataMap); - } - - public TFetchResultsReq() { - this.orientation = org.apache.hive.service.rpc.thrift.TFetchOrientation.FETCH_NEXT; - - this.fetchType = (short)0; - - } - - public TFetchResultsReq( - TOperationHandle operationHandle, - TFetchOrientation orientation, - long maxRows) - { - this(); - this.operationHandle = operationHandle; - this.orientation = orientation; - this.maxRows = maxRows; - setMaxRowsIsSet(true); - } - - /** - * Performs a deep copy on other. - */ - public TFetchResultsReq(TFetchResultsReq other) { - __isset_bitfield = other.__isset_bitfield; - if (other.isSetOperationHandle()) { - this.operationHandle = new TOperationHandle(other.operationHandle); - } - if (other.isSetOrientation()) { - this.orientation = other.orientation; - } - this.maxRows = other.maxRows; - this.fetchType = other.fetchType; - } - - public TFetchResultsReq deepCopy() { - return new TFetchResultsReq(this); - } - - @Override - public void clear() { - this.operationHandle = null; - this.orientation = org.apache.hive.service.rpc.thrift.TFetchOrientation.FETCH_NEXT; - - setMaxRowsIsSet(false); - this.maxRows = 0; - this.fetchType = (short)0; - - } - - public TOperationHandle getOperationHandle() { - return this.operationHandle; - } - - public void setOperationHandle(TOperationHandle operationHandle) { - this.operationHandle = operationHandle; - } - - public void unsetOperationHandle() { - this.operationHandle = null; - } - - /** Returns true if field operationHandle is set (has been assigned a value) and false otherwise */ - public boolean isSetOperationHandle() { - return this.operationHandle != null; - } - - public void setOperationHandleIsSet(boolean value) { - if (!value) { - this.operationHandle = null; - } - } - - /** - * - * @see TFetchOrientation - */ - public TFetchOrientation getOrientation() { - return this.orientation; - } - - /** - * - * @see TFetchOrientation - */ - public void setOrientation(TFetchOrientation orientation) { - this.orientation = orientation; - } - - public void unsetOrientation() { - this.orientation = null; - } - - /** Returns true if field orientation is set (has been assigned a value) and false otherwise */ - public boolean isSetOrientation() { - return this.orientation != null; - } - - public void setOrientationIsSet(boolean value) { - if (!value) { - this.orientation = null; - } - } - - public long getMaxRows() { - return this.maxRows; - } - - public void setMaxRows(long maxRows) { - this.maxRows = maxRows; - setMaxRowsIsSet(true); - } - - public void unsetMaxRows() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __MAXROWS_ISSET_ID); - } - - /** Returns true if field maxRows is set (has been assigned a value) and false otherwise */ - public boolean isSetMaxRows() { - return EncodingUtils.testBit(__isset_bitfield, __MAXROWS_ISSET_ID); - } - - public void setMaxRowsIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MAXROWS_ISSET_ID, value); - } - - public short getFetchType() { - return this.fetchType; - } - - public void setFetchType(short fetchType) { - this.fetchType = fetchType; - setFetchTypeIsSet(true); - } - - public void unsetFetchType() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __FETCHTYPE_ISSET_ID); - } - - /** Returns true if field fetchType is set (has been assigned a value) and false otherwise */ - public boolean isSetFetchType() { - return EncodingUtils.testBit(__isset_bitfield, __FETCHTYPE_ISSET_ID); - } - - public void setFetchTypeIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __FETCHTYPE_ISSET_ID, value); - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case OPERATION_HANDLE: - if (value == null) { - unsetOperationHandle(); - } else { - setOperationHandle((TOperationHandle)value); - } - break; - - case ORIENTATION: - if (value == null) { - unsetOrientation(); - } else { - setOrientation((TFetchOrientation)value); - } - break; - - case MAX_ROWS: - if (value == null) { - unsetMaxRows(); - } else { - setMaxRows((Long)value); - } - break; - - case FETCH_TYPE: - if (value == null) { - unsetFetchType(); - } else { - setFetchType((Short)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case OPERATION_HANDLE: - return getOperationHandle(); - - case ORIENTATION: - return getOrientation(); - - case MAX_ROWS: - return getMaxRows(); - - case FETCH_TYPE: - return getFetchType(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case OPERATION_HANDLE: - return isSetOperationHandle(); - case ORIENTATION: - return isSetOrientation(); - case MAX_ROWS: - return isSetMaxRows(); - case FETCH_TYPE: - return isSetFetchType(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TFetchResultsReq) - return this.equals((TFetchResultsReq)that); - return false; - } - - public boolean equals(TFetchResultsReq that) { - if (that == null) - return false; - - boolean this_present_operationHandle = true && this.isSetOperationHandle(); - boolean that_present_operationHandle = true && that.isSetOperationHandle(); - if (this_present_operationHandle || that_present_operationHandle) { - if (!(this_present_operationHandle && that_present_operationHandle)) - return false; - if (!this.operationHandle.equals(that.operationHandle)) - return false; - } - - boolean this_present_orientation = true && this.isSetOrientation(); - boolean that_present_orientation = true && that.isSetOrientation(); - if (this_present_orientation || that_present_orientation) { - if (!(this_present_orientation && that_present_orientation)) - return false; - if (!this.orientation.equals(that.orientation)) - return false; - } - - boolean this_present_maxRows = true; - boolean that_present_maxRows = true; - if (this_present_maxRows || that_present_maxRows) { - if (!(this_present_maxRows && that_present_maxRows)) - return false; - if (this.maxRows != that.maxRows) - return false; - } - - boolean this_present_fetchType = true && this.isSetFetchType(); - boolean that_present_fetchType = true && that.isSetFetchType(); - if (this_present_fetchType || that_present_fetchType) { - if (!(this_present_fetchType && that_present_fetchType)) - return false; - if (this.fetchType != that.fetchType) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_operationHandle = true && (isSetOperationHandle()); - list.add(present_operationHandle); - if (present_operationHandle) - list.add(operationHandle); - - boolean present_orientation = true && (isSetOrientation()); - list.add(present_orientation); - if (present_orientation) - list.add(orientation.getValue()); - - boolean present_maxRows = true; - list.add(present_maxRows); - if (present_maxRows) - list.add(maxRows); - - boolean present_fetchType = true && (isSetFetchType()); - list.add(present_fetchType); - if (present_fetchType) - list.add(fetchType); - - return list.hashCode(); - } - - @Override - public int compareTo(TFetchResultsReq other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetOperationHandle()).compareTo(other.isSetOperationHandle()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetOperationHandle()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.operationHandle, other.operationHandle); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetOrientation()).compareTo(other.isSetOrientation()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetOrientation()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.orientation, other.orientation); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetMaxRows()).compareTo(other.isSetMaxRows()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetMaxRows()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.maxRows, other.maxRows); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetFetchType()).compareTo(other.isSetFetchType()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetFetchType()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.fetchType, other.fetchType); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TFetchResultsReq("); - boolean first = true; - - sb.append("operationHandle:"); - if (this.operationHandle == null) { - sb.append("null"); - } else { - sb.append(this.operationHandle); - } - first = false; - if (!first) sb.append(", "); - sb.append("orientation:"); - if (this.orientation == null) { - sb.append("null"); - } else { - sb.append(this.orientation); - } - first = false; - if (!first) sb.append(", "); - sb.append("maxRows:"); - sb.append(this.maxRows); - first = false; - if (isSetFetchType()) { - if (!first) sb.append(", "); - sb.append("fetchType:"); - sb.append(this.fetchType); - first = false; - } - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetOperationHandle()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'operationHandle' is unset! Struct:" + toString()); - } - - if (!isSetOrientation()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'orientation' is unset! Struct:" + toString()); - } - - if (!isSetMaxRows()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'maxRows' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (operationHandle != null) { - operationHandle.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. - __isset_bitfield = 0; - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TFetchResultsReqStandardSchemeFactory implements SchemeFactory { - public TFetchResultsReqStandardScheme getScheme() { - return new TFetchResultsReqStandardScheme(); - } - } - - private static class TFetchResultsReqStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TFetchResultsReq struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // OPERATION_HANDLE - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.operationHandle = new TOperationHandle(); - struct.operationHandle.read(iprot); - struct.setOperationHandleIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // ORIENTATION - if (schemeField.type == org.apache.thrift.protocol.TType.I32) { - struct.orientation = org.apache.hive.service.rpc.thrift.TFetchOrientation.findByValue(iprot.readI32()); - struct.setOrientationIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 3: // MAX_ROWS - if (schemeField.type == org.apache.thrift.protocol.TType.I64) { - struct.maxRows = iprot.readI64(); - struct.setMaxRowsIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 4: // FETCH_TYPE - if (schemeField.type == org.apache.thrift.protocol.TType.I16) { - struct.fetchType = iprot.readI16(); - struct.setFetchTypeIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TFetchResultsReq struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.operationHandle != null) { - oprot.writeFieldBegin(OPERATION_HANDLE_FIELD_DESC); - struct.operationHandle.write(oprot); - oprot.writeFieldEnd(); - } - if (struct.orientation != null) { - oprot.writeFieldBegin(ORIENTATION_FIELD_DESC); - oprot.writeI32(struct.orientation.getValue()); - oprot.writeFieldEnd(); - } - oprot.writeFieldBegin(MAX_ROWS_FIELD_DESC); - oprot.writeI64(struct.maxRows); - oprot.writeFieldEnd(); - if (struct.isSetFetchType()) { - oprot.writeFieldBegin(FETCH_TYPE_FIELD_DESC); - oprot.writeI16(struct.fetchType); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TFetchResultsReqTupleSchemeFactory implements SchemeFactory { - public TFetchResultsReqTupleScheme getScheme() { - return new TFetchResultsReqTupleScheme(); - } - } - - private static class TFetchResultsReqTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TFetchResultsReq struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.operationHandle.write(oprot); - oprot.writeI32(struct.orientation.getValue()); - oprot.writeI64(struct.maxRows); - BitSet optionals = new BitSet(); - if (struct.isSetFetchType()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetFetchType()) { - oprot.writeI16(struct.fetchType); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TFetchResultsReq struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.operationHandle = new TOperationHandle(); - struct.operationHandle.read(iprot); - struct.setOperationHandleIsSet(true); - struct.orientation = org.apache.hive.service.rpc.thrift.TFetchOrientation.findByValue(iprot.readI32()); - struct.setOrientationIsSet(true); - struct.maxRows = iprot.readI64(); - struct.setMaxRowsIsSet(true); - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.fetchType = iprot.readI16(); - struct.setFetchTypeIsSet(true); - } - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TFetchResultsResp.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TFetchResultsResp.java deleted file mode 100644 index 8f86cee3ad468..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TFetchResultsResp.java +++ /dev/null @@ -1,612 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TFetchResultsResp implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TFetchResultsResp"); - - private static final org.apache.thrift.protocol.TField STATUS_FIELD_DESC = new org.apache.thrift.protocol.TField("status", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField HAS_MORE_ROWS_FIELD_DESC = new org.apache.thrift.protocol.TField("hasMoreRows", org.apache.thrift.protocol.TType.BOOL, (short)2); - private static final org.apache.thrift.protocol.TField RESULTS_FIELD_DESC = new org.apache.thrift.protocol.TField("results", org.apache.thrift.protocol.TType.STRUCT, (short)3); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TFetchResultsRespStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TFetchResultsRespTupleSchemeFactory()); - } - - private TStatus status; // required - private boolean hasMoreRows; // optional - private TRowSet results; // optional - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - STATUS((short)1, "status"), - HAS_MORE_ROWS((short)2, "hasMoreRows"), - RESULTS((short)3, "results"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // STATUS - return STATUS; - case 2: // HAS_MORE_ROWS - return HAS_MORE_ROWS; - case 3: // RESULTS - return RESULTS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private static final int __HASMOREROWS_ISSET_ID = 0; - private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.HAS_MORE_ROWS,_Fields.RESULTS}; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.STATUS, new org.apache.thrift.meta_data.FieldMetaData("status", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TStatus.class))); - tmpMap.put(_Fields.HAS_MORE_ROWS, new org.apache.thrift.meta_data.FieldMetaData("hasMoreRows", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); - tmpMap.put(_Fields.RESULTS, new org.apache.thrift.meta_data.FieldMetaData("results", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TRowSet.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TFetchResultsResp.class, metaDataMap); - } - - public TFetchResultsResp() { - } - - public TFetchResultsResp( - TStatus status) - { - this(); - this.status = status; - } - - /** - * Performs a deep copy on other. - */ - public TFetchResultsResp(TFetchResultsResp other) { - __isset_bitfield = other.__isset_bitfield; - if (other.isSetStatus()) { - this.status = new TStatus(other.status); - } - this.hasMoreRows = other.hasMoreRows; - if (other.isSetResults()) { - this.results = new TRowSet(other.results); - } - } - - public TFetchResultsResp deepCopy() { - return new TFetchResultsResp(this); - } - - @Override - public void clear() { - this.status = null; - setHasMoreRowsIsSet(false); - this.hasMoreRows = false; - this.results = null; - } - - public TStatus getStatus() { - return this.status; - } - - public void setStatus(TStatus status) { - this.status = status; - } - - public void unsetStatus() { - this.status = null; - } - - /** Returns true if field status is set (has been assigned a value) and false otherwise */ - public boolean isSetStatus() { - return this.status != null; - } - - public void setStatusIsSet(boolean value) { - if (!value) { - this.status = null; - } - } - - public boolean isHasMoreRows() { - return this.hasMoreRows; - } - - public void setHasMoreRows(boolean hasMoreRows) { - this.hasMoreRows = hasMoreRows; - setHasMoreRowsIsSet(true); - } - - public void unsetHasMoreRows() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __HASMOREROWS_ISSET_ID); - } - - /** Returns true if field hasMoreRows is set (has been assigned a value) and false otherwise */ - public boolean isSetHasMoreRows() { - return EncodingUtils.testBit(__isset_bitfield, __HASMOREROWS_ISSET_ID); - } - - public void setHasMoreRowsIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __HASMOREROWS_ISSET_ID, value); - } - - public TRowSet getResults() { - return this.results; - } - - public void setResults(TRowSet results) { - this.results = results; - } - - public void unsetResults() { - this.results = null; - } - - /** Returns true if field results is set (has been assigned a value) and false otherwise */ - public boolean isSetResults() { - return this.results != null; - } - - public void setResultsIsSet(boolean value) { - if (!value) { - this.results = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case STATUS: - if (value == null) { - unsetStatus(); - } else { - setStatus((TStatus)value); - } - break; - - case HAS_MORE_ROWS: - if (value == null) { - unsetHasMoreRows(); - } else { - setHasMoreRows((Boolean)value); - } - break; - - case RESULTS: - if (value == null) { - unsetResults(); - } else { - setResults((TRowSet)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case STATUS: - return getStatus(); - - case HAS_MORE_ROWS: - return isHasMoreRows(); - - case RESULTS: - return getResults(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case STATUS: - return isSetStatus(); - case HAS_MORE_ROWS: - return isSetHasMoreRows(); - case RESULTS: - return isSetResults(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TFetchResultsResp) - return this.equals((TFetchResultsResp)that); - return false; - } - - public boolean equals(TFetchResultsResp that) { - if (that == null) - return false; - - boolean this_present_status = true && this.isSetStatus(); - boolean that_present_status = true && that.isSetStatus(); - if (this_present_status || that_present_status) { - if (!(this_present_status && that_present_status)) - return false; - if (!this.status.equals(that.status)) - return false; - } - - boolean this_present_hasMoreRows = true && this.isSetHasMoreRows(); - boolean that_present_hasMoreRows = true && that.isSetHasMoreRows(); - if (this_present_hasMoreRows || that_present_hasMoreRows) { - if (!(this_present_hasMoreRows && that_present_hasMoreRows)) - return false; - if (this.hasMoreRows != that.hasMoreRows) - return false; - } - - boolean this_present_results = true && this.isSetResults(); - boolean that_present_results = true && that.isSetResults(); - if (this_present_results || that_present_results) { - if (!(this_present_results && that_present_results)) - return false; - if (!this.results.equals(that.results)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_status = true && (isSetStatus()); - list.add(present_status); - if (present_status) - list.add(status); - - boolean present_hasMoreRows = true && (isSetHasMoreRows()); - list.add(present_hasMoreRows); - if (present_hasMoreRows) - list.add(hasMoreRows); - - boolean present_results = true && (isSetResults()); - list.add(present_results); - if (present_results) - list.add(results); - - return list.hashCode(); - } - - @Override - public int compareTo(TFetchResultsResp other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetStatus()).compareTo(other.isSetStatus()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetStatus()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.status, other.status); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetHasMoreRows()).compareTo(other.isSetHasMoreRows()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetHasMoreRows()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.hasMoreRows, other.hasMoreRows); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetResults()).compareTo(other.isSetResults()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetResults()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.results, other.results); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TFetchResultsResp("); - boolean first = true; - - sb.append("status:"); - if (this.status == null) { - sb.append("null"); - } else { - sb.append(this.status); - } - first = false; - if (isSetHasMoreRows()) { - if (!first) sb.append(", "); - sb.append("hasMoreRows:"); - sb.append(this.hasMoreRows); - first = false; - } - if (isSetResults()) { - if (!first) sb.append(", "); - sb.append("results:"); - if (this.results == null) { - sb.append("null"); - } else { - sb.append(this.results); - } - first = false; - } - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetStatus()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'status' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (status != null) { - status.validate(); - } - if (results != null) { - results.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. - __isset_bitfield = 0; - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TFetchResultsRespStandardSchemeFactory implements SchemeFactory { - public TFetchResultsRespStandardScheme getScheme() { - return new TFetchResultsRespStandardScheme(); - } - } - - private static class TFetchResultsRespStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TFetchResultsResp struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // STATUS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // HAS_MORE_ROWS - if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { - struct.hasMoreRows = iprot.readBool(); - struct.setHasMoreRowsIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 3: // RESULTS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.results = new TRowSet(); - struct.results.read(iprot); - struct.setResultsIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TFetchResultsResp struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.status != null) { - oprot.writeFieldBegin(STATUS_FIELD_DESC); - struct.status.write(oprot); - oprot.writeFieldEnd(); - } - if (struct.isSetHasMoreRows()) { - oprot.writeFieldBegin(HAS_MORE_ROWS_FIELD_DESC); - oprot.writeBool(struct.hasMoreRows); - oprot.writeFieldEnd(); - } - if (struct.results != null) { - if (struct.isSetResults()) { - oprot.writeFieldBegin(RESULTS_FIELD_DESC); - struct.results.write(oprot); - oprot.writeFieldEnd(); - } - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TFetchResultsRespTupleSchemeFactory implements SchemeFactory { - public TFetchResultsRespTupleScheme getScheme() { - return new TFetchResultsRespTupleScheme(); - } - } - - private static class TFetchResultsRespTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TFetchResultsResp struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.status.write(oprot); - BitSet optionals = new BitSet(); - if (struct.isSetHasMoreRows()) { - optionals.set(0); - } - if (struct.isSetResults()) { - optionals.set(1); - } - oprot.writeBitSet(optionals, 2); - if (struct.isSetHasMoreRows()) { - oprot.writeBool(struct.hasMoreRows); - } - if (struct.isSetResults()) { - struct.results.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TFetchResultsResp struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - BitSet incoming = iprot.readBitSet(2); - if (incoming.get(0)) { - struct.hasMoreRows = iprot.readBool(); - struct.setHasMoreRowsIsSet(true); - } - if (incoming.get(1)) { - struct.results = new TRowSet(); - struct.results.read(iprot); - struct.setResultsIsSet(true); - } - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetCatalogsReq.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetCatalogsReq.java deleted file mode 100644 index b8a2ca6648069..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetCatalogsReq.java +++ /dev/null @@ -1,394 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TGetCatalogsReq implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetCatalogsReq"); - - private static final org.apache.thrift.protocol.TField SESSION_HANDLE_FIELD_DESC = new org.apache.thrift.protocol.TField("sessionHandle", org.apache.thrift.protocol.TType.STRUCT, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TGetCatalogsReqStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TGetCatalogsReqTupleSchemeFactory()); - } - - private TSessionHandle sessionHandle; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SESSION_HANDLE((short)1, "sessionHandle"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // SESSION_HANDLE - return SESSION_HANDLE; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SESSION_HANDLE, new org.apache.thrift.meta_data.FieldMetaData("sessionHandle", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TSessionHandle.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TGetCatalogsReq.class, metaDataMap); - } - - public TGetCatalogsReq() { - } - - public TGetCatalogsReq( - TSessionHandle sessionHandle) - { - this(); - this.sessionHandle = sessionHandle; - } - - /** - * Performs a deep copy on other. - */ - public TGetCatalogsReq(TGetCatalogsReq other) { - if (other.isSetSessionHandle()) { - this.sessionHandle = new TSessionHandle(other.sessionHandle); - } - } - - public TGetCatalogsReq deepCopy() { - return new TGetCatalogsReq(this); - } - - @Override - public void clear() { - this.sessionHandle = null; - } - - public TSessionHandle getSessionHandle() { - return this.sessionHandle; - } - - public void setSessionHandle(TSessionHandle sessionHandle) { - this.sessionHandle = sessionHandle; - } - - public void unsetSessionHandle() { - this.sessionHandle = null; - } - - /** Returns true if field sessionHandle is set (has been assigned a value) and false otherwise */ - public boolean isSetSessionHandle() { - return this.sessionHandle != null; - } - - public void setSessionHandleIsSet(boolean value) { - if (!value) { - this.sessionHandle = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case SESSION_HANDLE: - if (value == null) { - unsetSessionHandle(); - } else { - setSessionHandle((TSessionHandle)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case SESSION_HANDLE: - return getSessionHandle(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case SESSION_HANDLE: - return isSetSessionHandle(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TGetCatalogsReq) - return this.equals((TGetCatalogsReq)that); - return false; - } - - public boolean equals(TGetCatalogsReq that) { - if (that == null) - return false; - - boolean this_present_sessionHandle = true && this.isSetSessionHandle(); - boolean that_present_sessionHandle = true && that.isSetSessionHandle(); - if (this_present_sessionHandle || that_present_sessionHandle) { - if (!(this_present_sessionHandle && that_present_sessionHandle)) - return false; - if (!this.sessionHandle.equals(that.sessionHandle)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_sessionHandle = true && (isSetSessionHandle()); - list.add(present_sessionHandle); - if (present_sessionHandle) - list.add(sessionHandle); - - return list.hashCode(); - } - - @Override - public int compareTo(TGetCatalogsReq other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetSessionHandle()).compareTo(other.isSetSessionHandle()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSessionHandle()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.sessionHandle, other.sessionHandle); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TGetCatalogsReq("); - boolean first = true; - - sb.append("sessionHandle:"); - if (this.sessionHandle == null) { - sb.append("null"); - } else { - sb.append(this.sessionHandle); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetSessionHandle()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'sessionHandle' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (sessionHandle != null) { - sessionHandle.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TGetCatalogsReqStandardSchemeFactory implements SchemeFactory { - public TGetCatalogsReqStandardScheme getScheme() { - return new TGetCatalogsReqStandardScheme(); - } - } - - private static class TGetCatalogsReqStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TGetCatalogsReq struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // SESSION_HANDLE - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.sessionHandle = new TSessionHandle(); - struct.sessionHandle.read(iprot); - struct.setSessionHandleIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TGetCatalogsReq struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.sessionHandle != null) { - oprot.writeFieldBegin(SESSION_HANDLE_FIELD_DESC); - struct.sessionHandle.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TGetCatalogsReqTupleSchemeFactory implements SchemeFactory { - public TGetCatalogsReqTupleScheme getScheme() { - return new TGetCatalogsReqTupleScheme(); - } - } - - private static class TGetCatalogsReqTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TGetCatalogsReq struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.sessionHandle.write(oprot); - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TGetCatalogsReq struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.sessionHandle = new TSessionHandle(); - struct.sessionHandle.read(iprot); - struct.setSessionHandleIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetCatalogsResp.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetCatalogsResp.java deleted file mode 100644 index eeeac9a1f9292..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetCatalogsResp.java +++ /dev/null @@ -1,509 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TGetCatalogsResp implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetCatalogsResp"); - - private static final org.apache.thrift.protocol.TField STATUS_FIELD_DESC = new org.apache.thrift.protocol.TField("status", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField OPERATION_HANDLE_FIELD_DESC = new org.apache.thrift.protocol.TField("operationHandle", org.apache.thrift.protocol.TType.STRUCT, (short)2); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TGetCatalogsRespStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TGetCatalogsRespTupleSchemeFactory()); - } - - private TStatus status; // required - private TOperationHandle operationHandle; // optional - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - STATUS((short)1, "status"), - OPERATION_HANDLE((short)2, "operationHandle"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // STATUS - return STATUS; - case 2: // OPERATION_HANDLE - return OPERATION_HANDLE; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private static final _Fields optionals[] = {_Fields.OPERATION_HANDLE}; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.STATUS, new org.apache.thrift.meta_data.FieldMetaData("status", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TStatus.class))); - tmpMap.put(_Fields.OPERATION_HANDLE, new org.apache.thrift.meta_data.FieldMetaData("operationHandle", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TOperationHandle.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TGetCatalogsResp.class, metaDataMap); - } - - public TGetCatalogsResp() { - } - - public TGetCatalogsResp( - TStatus status) - { - this(); - this.status = status; - } - - /** - * Performs a deep copy on other. - */ - public TGetCatalogsResp(TGetCatalogsResp other) { - if (other.isSetStatus()) { - this.status = new TStatus(other.status); - } - if (other.isSetOperationHandle()) { - this.operationHandle = new TOperationHandle(other.operationHandle); - } - } - - public TGetCatalogsResp deepCopy() { - return new TGetCatalogsResp(this); - } - - @Override - public void clear() { - this.status = null; - this.operationHandle = null; - } - - public TStatus getStatus() { - return this.status; - } - - public void setStatus(TStatus status) { - this.status = status; - } - - public void unsetStatus() { - this.status = null; - } - - /** Returns true if field status is set (has been assigned a value) and false otherwise */ - public boolean isSetStatus() { - return this.status != null; - } - - public void setStatusIsSet(boolean value) { - if (!value) { - this.status = null; - } - } - - public TOperationHandle getOperationHandle() { - return this.operationHandle; - } - - public void setOperationHandle(TOperationHandle operationHandle) { - this.operationHandle = operationHandle; - } - - public void unsetOperationHandle() { - this.operationHandle = null; - } - - /** Returns true if field operationHandle is set (has been assigned a value) and false otherwise */ - public boolean isSetOperationHandle() { - return this.operationHandle != null; - } - - public void setOperationHandleIsSet(boolean value) { - if (!value) { - this.operationHandle = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case STATUS: - if (value == null) { - unsetStatus(); - } else { - setStatus((TStatus)value); - } - break; - - case OPERATION_HANDLE: - if (value == null) { - unsetOperationHandle(); - } else { - setOperationHandle((TOperationHandle)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case STATUS: - return getStatus(); - - case OPERATION_HANDLE: - return getOperationHandle(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case STATUS: - return isSetStatus(); - case OPERATION_HANDLE: - return isSetOperationHandle(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TGetCatalogsResp) - return this.equals((TGetCatalogsResp)that); - return false; - } - - public boolean equals(TGetCatalogsResp that) { - if (that == null) - return false; - - boolean this_present_status = true && this.isSetStatus(); - boolean that_present_status = true && that.isSetStatus(); - if (this_present_status || that_present_status) { - if (!(this_present_status && that_present_status)) - return false; - if (!this.status.equals(that.status)) - return false; - } - - boolean this_present_operationHandle = true && this.isSetOperationHandle(); - boolean that_present_operationHandle = true && that.isSetOperationHandle(); - if (this_present_operationHandle || that_present_operationHandle) { - if (!(this_present_operationHandle && that_present_operationHandle)) - return false; - if (!this.operationHandle.equals(that.operationHandle)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_status = true && (isSetStatus()); - list.add(present_status); - if (present_status) - list.add(status); - - boolean present_operationHandle = true && (isSetOperationHandle()); - list.add(present_operationHandle); - if (present_operationHandle) - list.add(operationHandle); - - return list.hashCode(); - } - - @Override - public int compareTo(TGetCatalogsResp other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetStatus()).compareTo(other.isSetStatus()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetStatus()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.status, other.status); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetOperationHandle()).compareTo(other.isSetOperationHandle()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetOperationHandle()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.operationHandle, other.operationHandle); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TGetCatalogsResp("); - boolean first = true; - - sb.append("status:"); - if (this.status == null) { - sb.append("null"); - } else { - sb.append(this.status); - } - first = false; - if (isSetOperationHandle()) { - if (!first) sb.append(", "); - sb.append("operationHandle:"); - if (this.operationHandle == null) { - sb.append("null"); - } else { - sb.append(this.operationHandle); - } - first = false; - } - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetStatus()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'status' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (status != null) { - status.validate(); - } - if (operationHandle != null) { - operationHandle.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TGetCatalogsRespStandardSchemeFactory implements SchemeFactory { - public TGetCatalogsRespStandardScheme getScheme() { - return new TGetCatalogsRespStandardScheme(); - } - } - - private static class TGetCatalogsRespStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TGetCatalogsResp struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // STATUS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // OPERATION_HANDLE - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.operationHandle = new TOperationHandle(); - struct.operationHandle.read(iprot); - struct.setOperationHandleIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TGetCatalogsResp struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.status != null) { - oprot.writeFieldBegin(STATUS_FIELD_DESC); - struct.status.write(oprot); - oprot.writeFieldEnd(); - } - if (struct.operationHandle != null) { - if (struct.isSetOperationHandle()) { - oprot.writeFieldBegin(OPERATION_HANDLE_FIELD_DESC); - struct.operationHandle.write(oprot); - oprot.writeFieldEnd(); - } - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TGetCatalogsRespTupleSchemeFactory implements SchemeFactory { - public TGetCatalogsRespTupleScheme getScheme() { - return new TGetCatalogsRespTupleScheme(); - } - } - - private static class TGetCatalogsRespTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TGetCatalogsResp struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.status.write(oprot); - BitSet optionals = new BitSet(); - if (struct.isSetOperationHandle()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetOperationHandle()) { - struct.operationHandle.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TGetCatalogsResp struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.operationHandle = new TOperationHandle(); - struct.operationHandle.read(iprot); - struct.setOperationHandleIsSet(true); - } - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetColumnsReq.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetColumnsReq.java deleted file mode 100644 index ba80279294957..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetColumnsReq.java +++ /dev/null @@ -1,822 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TGetColumnsReq implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetColumnsReq"); - - private static final org.apache.thrift.protocol.TField SESSION_HANDLE_FIELD_DESC = new org.apache.thrift.protocol.TField("sessionHandle", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField CATALOG_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catalogName", org.apache.thrift.protocol.TType.STRING, (short)2); - private static final org.apache.thrift.protocol.TField SCHEMA_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("schemaName", org.apache.thrift.protocol.TType.STRING, (short)3); - private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)4); - private static final org.apache.thrift.protocol.TField COLUMN_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("columnName", org.apache.thrift.protocol.TType.STRING, (short)5); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TGetColumnsReqStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TGetColumnsReqTupleSchemeFactory()); - } - - private TSessionHandle sessionHandle; // required - private String catalogName; // optional - private String schemaName; // optional - private String tableName; // optional - private String columnName; // optional - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SESSION_HANDLE((short)1, "sessionHandle"), - CATALOG_NAME((short)2, "catalogName"), - SCHEMA_NAME((short)3, "schemaName"), - TABLE_NAME((short)4, "tableName"), - COLUMN_NAME((short)5, "columnName"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // SESSION_HANDLE - return SESSION_HANDLE; - case 2: // CATALOG_NAME - return CATALOG_NAME; - case 3: // SCHEMA_NAME - return SCHEMA_NAME; - case 4: // TABLE_NAME - return TABLE_NAME; - case 5: // COLUMN_NAME - return COLUMN_NAME; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private static final _Fields optionals[] = {_Fields.CATALOG_NAME,_Fields.SCHEMA_NAME,_Fields.TABLE_NAME,_Fields.COLUMN_NAME}; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SESSION_HANDLE, new org.apache.thrift.meta_data.FieldMetaData("sessionHandle", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TSessionHandle.class))); - tmpMap.put(_Fields.CATALOG_NAME, new org.apache.thrift.meta_data.FieldMetaData("catalogName", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "TIdentifier"))); - tmpMap.put(_Fields.SCHEMA_NAME, new org.apache.thrift.meta_data.FieldMetaData("schemaName", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "TPatternOrIdentifier"))); - tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "TPatternOrIdentifier"))); - tmpMap.put(_Fields.COLUMN_NAME, new org.apache.thrift.meta_data.FieldMetaData("columnName", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "TPatternOrIdentifier"))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TGetColumnsReq.class, metaDataMap); - } - - public TGetColumnsReq() { - } - - public TGetColumnsReq( - TSessionHandle sessionHandle) - { - this(); - this.sessionHandle = sessionHandle; - } - - /** - * Performs a deep copy on other. - */ - public TGetColumnsReq(TGetColumnsReq other) { - if (other.isSetSessionHandle()) { - this.sessionHandle = new TSessionHandle(other.sessionHandle); - } - if (other.isSetCatalogName()) { - this.catalogName = other.catalogName; - } - if (other.isSetSchemaName()) { - this.schemaName = other.schemaName; - } - if (other.isSetTableName()) { - this.tableName = other.tableName; - } - if (other.isSetColumnName()) { - this.columnName = other.columnName; - } - } - - public TGetColumnsReq deepCopy() { - return new TGetColumnsReq(this); - } - - @Override - public void clear() { - this.sessionHandle = null; - this.catalogName = null; - this.schemaName = null; - this.tableName = null; - this.columnName = null; - } - - public TSessionHandle getSessionHandle() { - return this.sessionHandle; - } - - public void setSessionHandle(TSessionHandle sessionHandle) { - this.sessionHandle = sessionHandle; - } - - public void unsetSessionHandle() { - this.sessionHandle = null; - } - - /** Returns true if field sessionHandle is set (has been assigned a value) and false otherwise */ - public boolean isSetSessionHandle() { - return this.sessionHandle != null; - } - - public void setSessionHandleIsSet(boolean value) { - if (!value) { - this.sessionHandle = null; - } - } - - public String getCatalogName() { - return this.catalogName; - } - - public void setCatalogName(String catalogName) { - this.catalogName = catalogName; - } - - public void unsetCatalogName() { - this.catalogName = null; - } - - /** Returns true if field catalogName is set (has been assigned a value) and false otherwise */ - public boolean isSetCatalogName() { - return this.catalogName != null; - } - - public void setCatalogNameIsSet(boolean value) { - if (!value) { - this.catalogName = null; - } - } - - public String getSchemaName() { - return this.schemaName; - } - - public void setSchemaName(String schemaName) { - this.schemaName = schemaName; - } - - public void unsetSchemaName() { - this.schemaName = null; - } - - /** Returns true if field schemaName is set (has been assigned a value) and false otherwise */ - public boolean isSetSchemaName() { - return this.schemaName != null; - } - - public void setSchemaNameIsSet(boolean value) { - if (!value) { - this.schemaName = null; - } - } - - public String getTableName() { - return this.tableName; - } - - public void setTableName(String tableName) { - this.tableName = tableName; - } - - public void unsetTableName() { - this.tableName = null; - } - - /** Returns true if field tableName is set (has been assigned a value) and false otherwise */ - public boolean isSetTableName() { - return this.tableName != null; - } - - public void setTableNameIsSet(boolean value) { - if (!value) { - this.tableName = null; - } - } - - public String getColumnName() { - return this.columnName; - } - - public void setColumnName(String columnName) { - this.columnName = columnName; - } - - public void unsetColumnName() { - this.columnName = null; - } - - /** Returns true if field columnName is set (has been assigned a value) and false otherwise */ - public boolean isSetColumnName() { - return this.columnName != null; - } - - public void setColumnNameIsSet(boolean value) { - if (!value) { - this.columnName = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case SESSION_HANDLE: - if (value == null) { - unsetSessionHandle(); - } else { - setSessionHandle((TSessionHandle)value); - } - break; - - case CATALOG_NAME: - if (value == null) { - unsetCatalogName(); - } else { - setCatalogName((String)value); - } - break; - - case SCHEMA_NAME: - if (value == null) { - unsetSchemaName(); - } else { - setSchemaName((String)value); - } - break; - - case TABLE_NAME: - if (value == null) { - unsetTableName(); - } else { - setTableName((String)value); - } - break; - - case COLUMN_NAME: - if (value == null) { - unsetColumnName(); - } else { - setColumnName((String)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case SESSION_HANDLE: - return getSessionHandle(); - - case CATALOG_NAME: - return getCatalogName(); - - case SCHEMA_NAME: - return getSchemaName(); - - case TABLE_NAME: - return getTableName(); - - case COLUMN_NAME: - return getColumnName(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case SESSION_HANDLE: - return isSetSessionHandle(); - case CATALOG_NAME: - return isSetCatalogName(); - case SCHEMA_NAME: - return isSetSchemaName(); - case TABLE_NAME: - return isSetTableName(); - case COLUMN_NAME: - return isSetColumnName(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TGetColumnsReq) - return this.equals((TGetColumnsReq)that); - return false; - } - - public boolean equals(TGetColumnsReq that) { - if (that == null) - return false; - - boolean this_present_sessionHandle = true && this.isSetSessionHandle(); - boolean that_present_sessionHandle = true && that.isSetSessionHandle(); - if (this_present_sessionHandle || that_present_sessionHandle) { - if (!(this_present_sessionHandle && that_present_sessionHandle)) - return false; - if (!this.sessionHandle.equals(that.sessionHandle)) - return false; - } - - boolean this_present_catalogName = true && this.isSetCatalogName(); - boolean that_present_catalogName = true && that.isSetCatalogName(); - if (this_present_catalogName || that_present_catalogName) { - if (!(this_present_catalogName && that_present_catalogName)) - return false; - if (!this.catalogName.equals(that.catalogName)) - return false; - } - - boolean this_present_schemaName = true && this.isSetSchemaName(); - boolean that_present_schemaName = true && that.isSetSchemaName(); - if (this_present_schemaName || that_present_schemaName) { - if (!(this_present_schemaName && that_present_schemaName)) - return false; - if (!this.schemaName.equals(that.schemaName)) - return false; - } - - boolean this_present_tableName = true && this.isSetTableName(); - boolean that_present_tableName = true && that.isSetTableName(); - if (this_present_tableName || that_present_tableName) { - if (!(this_present_tableName && that_present_tableName)) - return false; - if (!this.tableName.equals(that.tableName)) - return false; - } - - boolean this_present_columnName = true && this.isSetColumnName(); - boolean that_present_columnName = true && that.isSetColumnName(); - if (this_present_columnName || that_present_columnName) { - if (!(this_present_columnName && that_present_columnName)) - return false; - if (!this.columnName.equals(that.columnName)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_sessionHandle = true && (isSetSessionHandle()); - list.add(present_sessionHandle); - if (present_sessionHandle) - list.add(sessionHandle); - - boolean present_catalogName = true && (isSetCatalogName()); - list.add(present_catalogName); - if (present_catalogName) - list.add(catalogName); - - boolean present_schemaName = true && (isSetSchemaName()); - list.add(present_schemaName); - if (present_schemaName) - list.add(schemaName); - - boolean present_tableName = true && (isSetTableName()); - list.add(present_tableName); - if (present_tableName) - list.add(tableName); - - boolean present_columnName = true && (isSetColumnName()); - list.add(present_columnName); - if (present_columnName) - list.add(columnName); - - return list.hashCode(); - } - - @Override - public int compareTo(TGetColumnsReq other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetSessionHandle()).compareTo(other.isSetSessionHandle()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSessionHandle()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.sessionHandle, other.sessionHandle); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetCatalogName()).compareTo(other.isSetCatalogName()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetCatalogName()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catalogName, other.catalogName); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetSchemaName()).compareTo(other.isSetSchemaName()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSchemaName()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.schemaName, other.schemaName); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetTableName()).compareTo(other.isSetTableName()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetTableName()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tableName, other.tableName); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetColumnName()).compareTo(other.isSetColumnName()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetColumnName()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.columnName, other.columnName); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TGetColumnsReq("); - boolean first = true; - - sb.append("sessionHandle:"); - if (this.sessionHandle == null) { - sb.append("null"); - } else { - sb.append(this.sessionHandle); - } - first = false; - if (isSetCatalogName()) { - if (!first) sb.append(", "); - sb.append("catalogName:"); - if (this.catalogName == null) { - sb.append("null"); - } else { - sb.append(this.catalogName); - } - first = false; - } - if (isSetSchemaName()) { - if (!first) sb.append(", "); - sb.append("schemaName:"); - if (this.schemaName == null) { - sb.append("null"); - } else { - sb.append(this.schemaName); - } - first = false; - } - if (isSetTableName()) { - if (!first) sb.append(", "); - sb.append("tableName:"); - if (this.tableName == null) { - sb.append("null"); - } else { - sb.append(this.tableName); - } - first = false; - } - if (isSetColumnName()) { - if (!first) sb.append(", "); - sb.append("columnName:"); - if (this.columnName == null) { - sb.append("null"); - } else { - sb.append(this.columnName); - } - first = false; - } - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetSessionHandle()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'sessionHandle' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (sessionHandle != null) { - sessionHandle.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TGetColumnsReqStandardSchemeFactory implements SchemeFactory { - public TGetColumnsReqStandardScheme getScheme() { - return new TGetColumnsReqStandardScheme(); - } - } - - private static class TGetColumnsReqStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TGetColumnsReq struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // SESSION_HANDLE - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.sessionHandle = new TSessionHandle(); - struct.sessionHandle.read(iprot); - struct.setSessionHandleIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // CATALOG_NAME - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.catalogName = iprot.readString(); - struct.setCatalogNameIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 3: // SCHEMA_NAME - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.schemaName = iprot.readString(); - struct.setSchemaNameIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 4: // TABLE_NAME - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.tableName = iprot.readString(); - struct.setTableNameIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 5: // COLUMN_NAME - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.columnName = iprot.readString(); - struct.setColumnNameIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TGetColumnsReq struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.sessionHandle != null) { - oprot.writeFieldBegin(SESSION_HANDLE_FIELD_DESC); - struct.sessionHandle.write(oprot); - oprot.writeFieldEnd(); - } - if (struct.catalogName != null) { - if (struct.isSetCatalogName()) { - oprot.writeFieldBegin(CATALOG_NAME_FIELD_DESC); - oprot.writeString(struct.catalogName); - oprot.writeFieldEnd(); - } - } - if (struct.schemaName != null) { - if (struct.isSetSchemaName()) { - oprot.writeFieldBegin(SCHEMA_NAME_FIELD_DESC); - oprot.writeString(struct.schemaName); - oprot.writeFieldEnd(); - } - } - if (struct.tableName != null) { - if (struct.isSetTableName()) { - oprot.writeFieldBegin(TABLE_NAME_FIELD_DESC); - oprot.writeString(struct.tableName); - oprot.writeFieldEnd(); - } - } - if (struct.columnName != null) { - if (struct.isSetColumnName()) { - oprot.writeFieldBegin(COLUMN_NAME_FIELD_DESC); - oprot.writeString(struct.columnName); - oprot.writeFieldEnd(); - } - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TGetColumnsReqTupleSchemeFactory implements SchemeFactory { - public TGetColumnsReqTupleScheme getScheme() { - return new TGetColumnsReqTupleScheme(); - } - } - - private static class TGetColumnsReqTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TGetColumnsReq struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.sessionHandle.write(oprot); - BitSet optionals = new BitSet(); - if (struct.isSetCatalogName()) { - optionals.set(0); - } - if (struct.isSetSchemaName()) { - optionals.set(1); - } - if (struct.isSetTableName()) { - optionals.set(2); - } - if (struct.isSetColumnName()) { - optionals.set(3); - } - oprot.writeBitSet(optionals, 4); - if (struct.isSetCatalogName()) { - oprot.writeString(struct.catalogName); - } - if (struct.isSetSchemaName()) { - oprot.writeString(struct.schemaName); - } - if (struct.isSetTableName()) { - oprot.writeString(struct.tableName); - } - if (struct.isSetColumnName()) { - oprot.writeString(struct.columnName); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TGetColumnsReq struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.sessionHandle = new TSessionHandle(); - struct.sessionHandle.read(iprot); - struct.setSessionHandleIsSet(true); - BitSet incoming = iprot.readBitSet(4); - if (incoming.get(0)) { - struct.catalogName = iprot.readString(); - struct.setCatalogNameIsSet(true); - } - if (incoming.get(1)) { - struct.schemaName = iprot.readString(); - struct.setSchemaNameIsSet(true); - } - if (incoming.get(2)) { - struct.tableName = iprot.readString(); - struct.setTableNameIsSet(true); - } - if (incoming.get(3)) { - struct.columnName = iprot.readString(); - struct.setColumnNameIsSet(true); - } - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetColumnsResp.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetColumnsResp.java deleted file mode 100644 index c68aac9042fc1..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetColumnsResp.java +++ /dev/null @@ -1,509 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TGetColumnsResp implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetColumnsResp"); - - private static final org.apache.thrift.protocol.TField STATUS_FIELD_DESC = new org.apache.thrift.protocol.TField("status", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField OPERATION_HANDLE_FIELD_DESC = new org.apache.thrift.protocol.TField("operationHandle", org.apache.thrift.protocol.TType.STRUCT, (short)2); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TGetColumnsRespStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TGetColumnsRespTupleSchemeFactory()); - } - - private TStatus status; // required - private TOperationHandle operationHandle; // optional - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - STATUS((short)1, "status"), - OPERATION_HANDLE((short)2, "operationHandle"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // STATUS - return STATUS; - case 2: // OPERATION_HANDLE - return OPERATION_HANDLE; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private static final _Fields optionals[] = {_Fields.OPERATION_HANDLE}; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.STATUS, new org.apache.thrift.meta_data.FieldMetaData("status", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TStatus.class))); - tmpMap.put(_Fields.OPERATION_HANDLE, new org.apache.thrift.meta_data.FieldMetaData("operationHandle", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TOperationHandle.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TGetColumnsResp.class, metaDataMap); - } - - public TGetColumnsResp() { - } - - public TGetColumnsResp( - TStatus status) - { - this(); - this.status = status; - } - - /** - * Performs a deep copy on other. - */ - public TGetColumnsResp(TGetColumnsResp other) { - if (other.isSetStatus()) { - this.status = new TStatus(other.status); - } - if (other.isSetOperationHandle()) { - this.operationHandle = new TOperationHandle(other.operationHandle); - } - } - - public TGetColumnsResp deepCopy() { - return new TGetColumnsResp(this); - } - - @Override - public void clear() { - this.status = null; - this.operationHandle = null; - } - - public TStatus getStatus() { - return this.status; - } - - public void setStatus(TStatus status) { - this.status = status; - } - - public void unsetStatus() { - this.status = null; - } - - /** Returns true if field status is set (has been assigned a value) and false otherwise */ - public boolean isSetStatus() { - return this.status != null; - } - - public void setStatusIsSet(boolean value) { - if (!value) { - this.status = null; - } - } - - public TOperationHandle getOperationHandle() { - return this.operationHandle; - } - - public void setOperationHandle(TOperationHandle operationHandle) { - this.operationHandle = operationHandle; - } - - public void unsetOperationHandle() { - this.operationHandle = null; - } - - /** Returns true if field operationHandle is set (has been assigned a value) and false otherwise */ - public boolean isSetOperationHandle() { - return this.operationHandle != null; - } - - public void setOperationHandleIsSet(boolean value) { - if (!value) { - this.operationHandle = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case STATUS: - if (value == null) { - unsetStatus(); - } else { - setStatus((TStatus)value); - } - break; - - case OPERATION_HANDLE: - if (value == null) { - unsetOperationHandle(); - } else { - setOperationHandle((TOperationHandle)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case STATUS: - return getStatus(); - - case OPERATION_HANDLE: - return getOperationHandle(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case STATUS: - return isSetStatus(); - case OPERATION_HANDLE: - return isSetOperationHandle(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TGetColumnsResp) - return this.equals((TGetColumnsResp)that); - return false; - } - - public boolean equals(TGetColumnsResp that) { - if (that == null) - return false; - - boolean this_present_status = true && this.isSetStatus(); - boolean that_present_status = true && that.isSetStatus(); - if (this_present_status || that_present_status) { - if (!(this_present_status && that_present_status)) - return false; - if (!this.status.equals(that.status)) - return false; - } - - boolean this_present_operationHandle = true && this.isSetOperationHandle(); - boolean that_present_operationHandle = true && that.isSetOperationHandle(); - if (this_present_operationHandle || that_present_operationHandle) { - if (!(this_present_operationHandle && that_present_operationHandle)) - return false; - if (!this.operationHandle.equals(that.operationHandle)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_status = true && (isSetStatus()); - list.add(present_status); - if (present_status) - list.add(status); - - boolean present_operationHandle = true && (isSetOperationHandle()); - list.add(present_operationHandle); - if (present_operationHandle) - list.add(operationHandle); - - return list.hashCode(); - } - - @Override - public int compareTo(TGetColumnsResp other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetStatus()).compareTo(other.isSetStatus()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetStatus()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.status, other.status); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetOperationHandle()).compareTo(other.isSetOperationHandle()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetOperationHandle()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.operationHandle, other.operationHandle); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TGetColumnsResp("); - boolean first = true; - - sb.append("status:"); - if (this.status == null) { - sb.append("null"); - } else { - sb.append(this.status); - } - first = false; - if (isSetOperationHandle()) { - if (!first) sb.append(", "); - sb.append("operationHandle:"); - if (this.operationHandle == null) { - sb.append("null"); - } else { - sb.append(this.operationHandle); - } - first = false; - } - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetStatus()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'status' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (status != null) { - status.validate(); - } - if (operationHandle != null) { - operationHandle.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TGetColumnsRespStandardSchemeFactory implements SchemeFactory { - public TGetColumnsRespStandardScheme getScheme() { - return new TGetColumnsRespStandardScheme(); - } - } - - private static class TGetColumnsRespStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TGetColumnsResp struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // STATUS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // OPERATION_HANDLE - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.operationHandle = new TOperationHandle(); - struct.operationHandle.read(iprot); - struct.setOperationHandleIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TGetColumnsResp struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.status != null) { - oprot.writeFieldBegin(STATUS_FIELD_DESC); - struct.status.write(oprot); - oprot.writeFieldEnd(); - } - if (struct.operationHandle != null) { - if (struct.isSetOperationHandle()) { - oprot.writeFieldBegin(OPERATION_HANDLE_FIELD_DESC); - struct.operationHandle.write(oprot); - oprot.writeFieldEnd(); - } - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TGetColumnsRespTupleSchemeFactory implements SchemeFactory { - public TGetColumnsRespTupleScheme getScheme() { - return new TGetColumnsRespTupleScheme(); - } - } - - private static class TGetColumnsRespTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TGetColumnsResp struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.status.write(oprot); - BitSet optionals = new BitSet(); - if (struct.isSetOperationHandle()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetOperationHandle()) { - struct.operationHandle.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TGetColumnsResp struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.operationHandle = new TOperationHandle(); - struct.operationHandle.read(iprot); - struct.setOperationHandleIsSet(true); - } - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetCrossReferenceReq.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetCrossReferenceReq.java deleted file mode 100644 index 972957063b297..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetCrossReferenceReq.java +++ /dev/null @@ -1,1034 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TGetCrossReferenceReq implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetCrossReferenceReq"); - - private static final org.apache.thrift.protocol.TField SESSION_HANDLE_FIELD_DESC = new org.apache.thrift.protocol.TField("sessionHandle", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField PARENT_CATALOG_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("parentCatalogName", org.apache.thrift.protocol.TType.STRING, (short)2); - private static final org.apache.thrift.protocol.TField PARENT_SCHEMA_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("parentSchemaName", org.apache.thrift.protocol.TType.STRING, (short)3); - private static final org.apache.thrift.protocol.TField PARENT_TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("parentTableName", org.apache.thrift.protocol.TType.STRING, (short)4); - private static final org.apache.thrift.protocol.TField FOREIGN_CATALOG_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("foreignCatalogName", org.apache.thrift.protocol.TType.STRING, (short)5); - private static final org.apache.thrift.protocol.TField FOREIGN_SCHEMA_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("foreignSchemaName", org.apache.thrift.protocol.TType.STRING, (short)6); - private static final org.apache.thrift.protocol.TField FOREIGN_TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("foreignTableName", org.apache.thrift.protocol.TType.STRING, (short)7); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TGetCrossReferenceReqStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TGetCrossReferenceReqTupleSchemeFactory()); - } - - private TSessionHandle sessionHandle; // required - private String parentCatalogName; // optional - private String parentSchemaName; // optional - private String parentTableName; // optional - private String foreignCatalogName; // optional - private String foreignSchemaName; // optional - private String foreignTableName; // optional - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SESSION_HANDLE((short)1, "sessionHandle"), - PARENT_CATALOG_NAME((short)2, "parentCatalogName"), - PARENT_SCHEMA_NAME((short)3, "parentSchemaName"), - PARENT_TABLE_NAME((short)4, "parentTableName"), - FOREIGN_CATALOG_NAME((short)5, "foreignCatalogName"), - FOREIGN_SCHEMA_NAME((short)6, "foreignSchemaName"), - FOREIGN_TABLE_NAME((short)7, "foreignTableName"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // SESSION_HANDLE - return SESSION_HANDLE; - case 2: // PARENT_CATALOG_NAME - return PARENT_CATALOG_NAME; - case 3: // PARENT_SCHEMA_NAME - return PARENT_SCHEMA_NAME; - case 4: // PARENT_TABLE_NAME - return PARENT_TABLE_NAME; - case 5: // FOREIGN_CATALOG_NAME - return FOREIGN_CATALOG_NAME; - case 6: // FOREIGN_SCHEMA_NAME - return FOREIGN_SCHEMA_NAME; - case 7: // FOREIGN_TABLE_NAME - return FOREIGN_TABLE_NAME; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private static final _Fields optionals[] = {_Fields.PARENT_CATALOG_NAME,_Fields.PARENT_SCHEMA_NAME,_Fields.PARENT_TABLE_NAME,_Fields.FOREIGN_CATALOG_NAME,_Fields.FOREIGN_SCHEMA_NAME,_Fields.FOREIGN_TABLE_NAME}; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SESSION_HANDLE, new org.apache.thrift.meta_data.FieldMetaData("sessionHandle", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TSessionHandle.class))); - tmpMap.put(_Fields.PARENT_CATALOG_NAME, new org.apache.thrift.meta_data.FieldMetaData("parentCatalogName", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "TIdentifier"))); - tmpMap.put(_Fields.PARENT_SCHEMA_NAME, new org.apache.thrift.meta_data.FieldMetaData("parentSchemaName", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "TIdentifier"))); - tmpMap.put(_Fields.PARENT_TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("parentTableName", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "TIdentifier"))); - tmpMap.put(_Fields.FOREIGN_CATALOG_NAME, new org.apache.thrift.meta_data.FieldMetaData("foreignCatalogName", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "TIdentifier"))); - tmpMap.put(_Fields.FOREIGN_SCHEMA_NAME, new org.apache.thrift.meta_data.FieldMetaData("foreignSchemaName", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "TIdentifier"))); - tmpMap.put(_Fields.FOREIGN_TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("foreignTableName", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "TIdentifier"))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TGetCrossReferenceReq.class, metaDataMap); - } - - public TGetCrossReferenceReq() { - } - - public TGetCrossReferenceReq( - TSessionHandle sessionHandle) - { - this(); - this.sessionHandle = sessionHandle; - } - - /** - * Performs a deep copy on other. - */ - public TGetCrossReferenceReq(TGetCrossReferenceReq other) { - if (other.isSetSessionHandle()) { - this.sessionHandle = new TSessionHandle(other.sessionHandle); - } - if (other.isSetParentCatalogName()) { - this.parentCatalogName = other.parentCatalogName; - } - if (other.isSetParentSchemaName()) { - this.parentSchemaName = other.parentSchemaName; - } - if (other.isSetParentTableName()) { - this.parentTableName = other.parentTableName; - } - if (other.isSetForeignCatalogName()) { - this.foreignCatalogName = other.foreignCatalogName; - } - if (other.isSetForeignSchemaName()) { - this.foreignSchemaName = other.foreignSchemaName; - } - if (other.isSetForeignTableName()) { - this.foreignTableName = other.foreignTableName; - } - } - - public TGetCrossReferenceReq deepCopy() { - return new TGetCrossReferenceReq(this); - } - - @Override - public void clear() { - this.sessionHandle = null; - this.parentCatalogName = null; - this.parentSchemaName = null; - this.parentTableName = null; - this.foreignCatalogName = null; - this.foreignSchemaName = null; - this.foreignTableName = null; - } - - public TSessionHandle getSessionHandle() { - return this.sessionHandle; - } - - public void setSessionHandle(TSessionHandle sessionHandle) { - this.sessionHandle = sessionHandle; - } - - public void unsetSessionHandle() { - this.sessionHandle = null; - } - - /** Returns true if field sessionHandle is set (has been assigned a value) and false otherwise */ - public boolean isSetSessionHandle() { - return this.sessionHandle != null; - } - - public void setSessionHandleIsSet(boolean value) { - if (!value) { - this.sessionHandle = null; - } - } - - public String getParentCatalogName() { - return this.parentCatalogName; - } - - public void setParentCatalogName(String parentCatalogName) { - this.parentCatalogName = parentCatalogName; - } - - public void unsetParentCatalogName() { - this.parentCatalogName = null; - } - - /** Returns true if field parentCatalogName is set (has been assigned a value) and false otherwise */ - public boolean isSetParentCatalogName() { - return this.parentCatalogName != null; - } - - public void setParentCatalogNameIsSet(boolean value) { - if (!value) { - this.parentCatalogName = null; - } - } - - public String getParentSchemaName() { - return this.parentSchemaName; - } - - public void setParentSchemaName(String parentSchemaName) { - this.parentSchemaName = parentSchemaName; - } - - public void unsetParentSchemaName() { - this.parentSchemaName = null; - } - - /** Returns true if field parentSchemaName is set (has been assigned a value) and false otherwise */ - public boolean isSetParentSchemaName() { - return this.parentSchemaName != null; - } - - public void setParentSchemaNameIsSet(boolean value) { - if (!value) { - this.parentSchemaName = null; - } - } - - public String getParentTableName() { - return this.parentTableName; - } - - public void setParentTableName(String parentTableName) { - this.parentTableName = parentTableName; - } - - public void unsetParentTableName() { - this.parentTableName = null; - } - - /** Returns true if field parentTableName is set (has been assigned a value) and false otherwise */ - public boolean isSetParentTableName() { - return this.parentTableName != null; - } - - public void setParentTableNameIsSet(boolean value) { - if (!value) { - this.parentTableName = null; - } - } - - public String getForeignCatalogName() { - return this.foreignCatalogName; - } - - public void setForeignCatalogName(String foreignCatalogName) { - this.foreignCatalogName = foreignCatalogName; - } - - public void unsetForeignCatalogName() { - this.foreignCatalogName = null; - } - - /** Returns true if field foreignCatalogName is set (has been assigned a value) and false otherwise */ - public boolean isSetForeignCatalogName() { - return this.foreignCatalogName != null; - } - - public void setForeignCatalogNameIsSet(boolean value) { - if (!value) { - this.foreignCatalogName = null; - } - } - - public String getForeignSchemaName() { - return this.foreignSchemaName; - } - - public void setForeignSchemaName(String foreignSchemaName) { - this.foreignSchemaName = foreignSchemaName; - } - - public void unsetForeignSchemaName() { - this.foreignSchemaName = null; - } - - /** Returns true if field foreignSchemaName is set (has been assigned a value) and false otherwise */ - public boolean isSetForeignSchemaName() { - return this.foreignSchemaName != null; - } - - public void setForeignSchemaNameIsSet(boolean value) { - if (!value) { - this.foreignSchemaName = null; - } - } - - public String getForeignTableName() { - return this.foreignTableName; - } - - public void setForeignTableName(String foreignTableName) { - this.foreignTableName = foreignTableName; - } - - public void unsetForeignTableName() { - this.foreignTableName = null; - } - - /** Returns true if field foreignTableName is set (has been assigned a value) and false otherwise */ - public boolean isSetForeignTableName() { - return this.foreignTableName != null; - } - - public void setForeignTableNameIsSet(boolean value) { - if (!value) { - this.foreignTableName = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case SESSION_HANDLE: - if (value == null) { - unsetSessionHandle(); - } else { - setSessionHandle((TSessionHandle)value); - } - break; - - case PARENT_CATALOG_NAME: - if (value == null) { - unsetParentCatalogName(); - } else { - setParentCatalogName((String)value); - } - break; - - case PARENT_SCHEMA_NAME: - if (value == null) { - unsetParentSchemaName(); - } else { - setParentSchemaName((String)value); - } - break; - - case PARENT_TABLE_NAME: - if (value == null) { - unsetParentTableName(); - } else { - setParentTableName((String)value); - } - break; - - case FOREIGN_CATALOG_NAME: - if (value == null) { - unsetForeignCatalogName(); - } else { - setForeignCatalogName((String)value); - } - break; - - case FOREIGN_SCHEMA_NAME: - if (value == null) { - unsetForeignSchemaName(); - } else { - setForeignSchemaName((String)value); - } - break; - - case FOREIGN_TABLE_NAME: - if (value == null) { - unsetForeignTableName(); - } else { - setForeignTableName((String)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case SESSION_HANDLE: - return getSessionHandle(); - - case PARENT_CATALOG_NAME: - return getParentCatalogName(); - - case PARENT_SCHEMA_NAME: - return getParentSchemaName(); - - case PARENT_TABLE_NAME: - return getParentTableName(); - - case FOREIGN_CATALOG_NAME: - return getForeignCatalogName(); - - case FOREIGN_SCHEMA_NAME: - return getForeignSchemaName(); - - case FOREIGN_TABLE_NAME: - return getForeignTableName(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case SESSION_HANDLE: - return isSetSessionHandle(); - case PARENT_CATALOG_NAME: - return isSetParentCatalogName(); - case PARENT_SCHEMA_NAME: - return isSetParentSchemaName(); - case PARENT_TABLE_NAME: - return isSetParentTableName(); - case FOREIGN_CATALOG_NAME: - return isSetForeignCatalogName(); - case FOREIGN_SCHEMA_NAME: - return isSetForeignSchemaName(); - case FOREIGN_TABLE_NAME: - return isSetForeignTableName(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TGetCrossReferenceReq) - return this.equals((TGetCrossReferenceReq)that); - return false; - } - - public boolean equals(TGetCrossReferenceReq that) { - if (that == null) - return false; - - boolean this_present_sessionHandle = true && this.isSetSessionHandle(); - boolean that_present_sessionHandle = true && that.isSetSessionHandle(); - if (this_present_sessionHandle || that_present_sessionHandle) { - if (!(this_present_sessionHandle && that_present_sessionHandle)) - return false; - if (!this.sessionHandle.equals(that.sessionHandle)) - return false; - } - - boolean this_present_parentCatalogName = true && this.isSetParentCatalogName(); - boolean that_present_parentCatalogName = true && that.isSetParentCatalogName(); - if (this_present_parentCatalogName || that_present_parentCatalogName) { - if (!(this_present_parentCatalogName && that_present_parentCatalogName)) - return false; - if (!this.parentCatalogName.equals(that.parentCatalogName)) - return false; - } - - boolean this_present_parentSchemaName = true && this.isSetParentSchemaName(); - boolean that_present_parentSchemaName = true && that.isSetParentSchemaName(); - if (this_present_parentSchemaName || that_present_parentSchemaName) { - if (!(this_present_parentSchemaName && that_present_parentSchemaName)) - return false; - if (!this.parentSchemaName.equals(that.parentSchemaName)) - return false; - } - - boolean this_present_parentTableName = true && this.isSetParentTableName(); - boolean that_present_parentTableName = true && that.isSetParentTableName(); - if (this_present_parentTableName || that_present_parentTableName) { - if (!(this_present_parentTableName && that_present_parentTableName)) - return false; - if (!this.parentTableName.equals(that.parentTableName)) - return false; - } - - boolean this_present_foreignCatalogName = true && this.isSetForeignCatalogName(); - boolean that_present_foreignCatalogName = true && that.isSetForeignCatalogName(); - if (this_present_foreignCatalogName || that_present_foreignCatalogName) { - if (!(this_present_foreignCatalogName && that_present_foreignCatalogName)) - return false; - if (!this.foreignCatalogName.equals(that.foreignCatalogName)) - return false; - } - - boolean this_present_foreignSchemaName = true && this.isSetForeignSchemaName(); - boolean that_present_foreignSchemaName = true && that.isSetForeignSchemaName(); - if (this_present_foreignSchemaName || that_present_foreignSchemaName) { - if (!(this_present_foreignSchemaName && that_present_foreignSchemaName)) - return false; - if (!this.foreignSchemaName.equals(that.foreignSchemaName)) - return false; - } - - boolean this_present_foreignTableName = true && this.isSetForeignTableName(); - boolean that_present_foreignTableName = true && that.isSetForeignTableName(); - if (this_present_foreignTableName || that_present_foreignTableName) { - if (!(this_present_foreignTableName && that_present_foreignTableName)) - return false; - if (!this.foreignTableName.equals(that.foreignTableName)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_sessionHandle = true && (isSetSessionHandle()); - list.add(present_sessionHandle); - if (present_sessionHandle) - list.add(sessionHandle); - - boolean present_parentCatalogName = true && (isSetParentCatalogName()); - list.add(present_parentCatalogName); - if (present_parentCatalogName) - list.add(parentCatalogName); - - boolean present_parentSchemaName = true && (isSetParentSchemaName()); - list.add(present_parentSchemaName); - if (present_parentSchemaName) - list.add(parentSchemaName); - - boolean present_parentTableName = true && (isSetParentTableName()); - list.add(present_parentTableName); - if (present_parentTableName) - list.add(parentTableName); - - boolean present_foreignCatalogName = true && (isSetForeignCatalogName()); - list.add(present_foreignCatalogName); - if (present_foreignCatalogName) - list.add(foreignCatalogName); - - boolean present_foreignSchemaName = true && (isSetForeignSchemaName()); - list.add(present_foreignSchemaName); - if (present_foreignSchemaName) - list.add(foreignSchemaName); - - boolean present_foreignTableName = true && (isSetForeignTableName()); - list.add(present_foreignTableName); - if (present_foreignTableName) - list.add(foreignTableName); - - return list.hashCode(); - } - - @Override - public int compareTo(TGetCrossReferenceReq other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetSessionHandle()).compareTo(other.isSetSessionHandle()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSessionHandle()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.sessionHandle, other.sessionHandle); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetParentCatalogName()).compareTo(other.isSetParentCatalogName()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetParentCatalogName()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.parentCatalogName, other.parentCatalogName); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetParentSchemaName()).compareTo(other.isSetParentSchemaName()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetParentSchemaName()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.parentSchemaName, other.parentSchemaName); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetParentTableName()).compareTo(other.isSetParentTableName()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetParentTableName()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.parentTableName, other.parentTableName); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetForeignCatalogName()).compareTo(other.isSetForeignCatalogName()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetForeignCatalogName()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.foreignCatalogName, other.foreignCatalogName); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetForeignSchemaName()).compareTo(other.isSetForeignSchemaName()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetForeignSchemaName()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.foreignSchemaName, other.foreignSchemaName); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetForeignTableName()).compareTo(other.isSetForeignTableName()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetForeignTableName()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.foreignTableName, other.foreignTableName); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TGetCrossReferenceReq("); - boolean first = true; - - sb.append("sessionHandle:"); - if (this.sessionHandle == null) { - sb.append("null"); - } else { - sb.append(this.sessionHandle); - } - first = false; - if (isSetParentCatalogName()) { - if (!first) sb.append(", "); - sb.append("parentCatalogName:"); - if (this.parentCatalogName == null) { - sb.append("null"); - } else { - sb.append(this.parentCatalogName); - } - first = false; - } - if (isSetParentSchemaName()) { - if (!first) sb.append(", "); - sb.append("parentSchemaName:"); - if (this.parentSchemaName == null) { - sb.append("null"); - } else { - sb.append(this.parentSchemaName); - } - first = false; - } - if (isSetParentTableName()) { - if (!first) sb.append(", "); - sb.append("parentTableName:"); - if (this.parentTableName == null) { - sb.append("null"); - } else { - sb.append(this.parentTableName); - } - first = false; - } - if (isSetForeignCatalogName()) { - if (!first) sb.append(", "); - sb.append("foreignCatalogName:"); - if (this.foreignCatalogName == null) { - sb.append("null"); - } else { - sb.append(this.foreignCatalogName); - } - first = false; - } - if (isSetForeignSchemaName()) { - if (!first) sb.append(", "); - sb.append("foreignSchemaName:"); - if (this.foreignSchemaName == null) { - sb.append("null"); - } else { - sb.append(this.foreignSchemaName); - } - first = false; - } - if (isSetForeignTableName()) { - if (!first) sb.append(", "); - sb.append("foreignTableName:"); - if (this.foreignTableName == null) { - sb.append("null"); - } else { - sb.append(this.foreignTableName); - } - first = false; - } - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetSessionHandle()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'sessionHandle' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (sessionHandle != null) { - sessionHandle.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TGetCrossReferenceReqStandardSchemeFactory implements SchemeFactory { - public TGetCrossReferenceReqStandardScheme getScheme() { - return new TGetCrossReferenceReqStandardScheme(); - } - } - - private static class TGetCrossReferenceReqStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TGetCrossReferenceReq struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // SESSION_HANDLE - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.sessionHandle = new TSessionHandle(); - struct.sessionHandle.read(iprot); - struct.setSessionHandleIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // PARENT_CATALOG_NAME - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.parentCatalogName = iprot.readString(); - struct.setParentCatalogNameIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 3: // PARENT_SCHEMA_NAME - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.parentSchemaName = iprot.readString(); - struct.setParentSchemaNameIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 4: // PARENT_TABLE_NAME - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.parentTableName = iprot.readString(); - struct.setParentTableNameIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 5: // FOREIGN_CATALOG_NAME - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.foreignCatalogName = iprot.readString(); - struct.setForeignCatalogNameIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 6: // FOREIGN_SCHEMA_NAME - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.foreignSchemaName = iprot.readString(); - struct.setForeignSchemaNameIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 7: // FOREIGN_TABLE_NAME - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.foreignTableName = iprot.readString(); - struct.setForeignTableNameIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TGetCrossReferenceReq struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.sessionHandle != null) { - oprot.writeFieldBegin(SESSION_HANDLE_FIELD_DESC); - struct.sessionHandle.write(oprot); - oprot.writeFieldEnd(); - } - if (struct.parentCatalogName != null) { - if (struct.isSetParentCatalogName()) { - oprot.writeFieldBegin(PARENT_CATALOG_NAME_FIELD_DESC); - oprot.writeString(struct.parentCatalogName); - oprot.writeFieldEnd(); - } - } - if (struct.parentSchemaName != null) { - if (struct.isSetParentSchemaName()) { - oprot.writeFieldBegin(PARENT_SCHEMA_NAME_FIELD_DESC); - oprot.writeString(struct.parentSchemaName); - oprot.writeFieldEnd(); - } - } - if (struct.parentTableName != null) { - if (struct.isSetParentTableName()) { - oprot.writeFieldBegin(PARENT_TABLE_NAME_FIELD_DESC); - oprot.writeString(struct.parentTableName); - oprot.writeFieldEnd(); - } - } - if (struct.foreignCatalogName != null) { - if (struct.isSetForeignCatalogName()) { - oprot.writeFieldBegin(FOREIGN_CATALOG_NAME_FIELD_DESC); - oprot.writeString(struct.foreignCatalogName); - oprot.writeFieldEnd(); - } - } - if (struct.foreignSchemaName != null) { - if (struct.isSetForeignSchemaName()) { - oprot.writeFieldBegin(FOREIGN_SCHEMA_NAME_FIELD_DESC); - oprot.writeString(struct.foreignSchemaName); - oprot.writeFieldEnd(); - } - } - if (struct.foreignTableName != null) { - if (struct.isSetForeignTableName()) { - oprot.writeFieldBegin(FOREIGN_TABLE_NAME_FIELD_DESC); - oprot.writeString(struct.foreignTableName); - oprot.writeFieldEnd(); - } - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TGetCrossReferenceReqTupleSchemeFactory implements SchemeFactory { - public TGetCrossReferenceReqTupleScheme getScheme() { - return new TGetCrossReferenceReqTupleScheme(); - } - } - - private static class TGetCrossReferenceReqTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TGetCrossReferenceReq struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.sessionHandle.write(oprot); - BitSet optionals = new BitSet(); - if (struct.isSetParentCatalogName()) { - optionals.set(0); - } - if (struct.isSetParentSchemaName()) { - optionals.set(1); - } - if (struct.isSetParentTableName()) { - optionals.set(2); - } - if (struct.isSetForeignCatalogName()) { - optionals.set(3); - } - if (struct.isSetForeignSchemaName()) { - optionals.set(4); - } - if (struct.isSetForeignTableName()) { - optionals.set(5); - } - oprot.writeBitSet(optionals, 6); - if (struct.isSetParentCatalogName()) { - oprot.writeString(struct.parentCatalogName); - } - if (struct.isSetParentSchemaName()) { - oprot.writeString(struct.parentSchemaName); - } - if (struct.isSetParentTableName()) { - oprot.writeString(struct.parentTableName); - } - if (struct.isSetForeignCatalogName()) { - oprot.writeString(struct.foreignCatalogName); - } - if (struct.isSetForeignSchemaName()) { - oprot.writeString(struct.foreignSchemaName); - } - if (struct.isSetForeignTableName()) { - oprot.writeString(struct.foreignTableName); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TGetCrossReferenceReq struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.sessionHandle = new TSessionHandle(); - struct.sessionHandle.read(iprot); - struct.setSessionHandleIsSet(true); - BitSet incoming = iprot.readBitSet(6); - if (incoming.get(0)) { - struct.parentCatalogName = iprot.readString(); - struct.setParentCatalogNameIsSet(true); - } - if (incoming.get(1)) { - struct.parentSchemaName = iprot.readString(); - struct.setParentSchemaNameIsSet(true); - } - if (incoming.get(2)) { - struct.parentTableName = iprot.readString(); - struct.setParentTableNameIsSet(true); - } - if (incoming.get(3)) { - struct.foreignCatalogName = iprot.readString(); - struct.setForeignCatalogNameIsSet(true); - } - if (incoming.get(4)) { - struct.foreignSchemaName = iprot.readString(); - struct.setForeignSchemaNameIsSet(true); - } - if (incoming.get(5)) { - struct.foreignTableName = iprot.readString(); - struct.setForeignTableNameIsSet(true); - } - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetCrossReferenceResp.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetCrossReferenceResp.java deleted file mode 100644 index 1bfe6d192df06..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetCrossReferenceResp.java +++ /dev/null @@ -1,509 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TGetCrossReferenceResp implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetCrossReferenceResp"); - - private static final org.apache.thrift.protocol.TField STATUS_FIELD_DESC = new org.apache.thrift.protocol.TField("status", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField OPERATION_HANDLE_FIELD_DESC = new org.apache.thrift.protocol.TField("operationHandle", org.apache.thrift.protocol.TType.STRUCT, (short)2); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TGetCrossReferenceRespStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TGetCrossReferenceRespTupleSchemeFactory()); - } - - private TStatus status; // required - private TOperationHandle operationHandle; // optional - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - STATUS((short)1, "status"), - OPERATION_HANDLE((short)2, "operationHandle"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // STATUS - return STATUS; - case 2: // OPERATION_HANDLE - return OPERATION_HANDLE; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private static final _Fields optionals[] = {_Fields.OPERATION_HANDLE}; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.STATUS, new org.apache.thrift.meta_data.FieldMetaData("status", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TStatus.class))); - tmpMap.put(_Fields.OPERATION_HANDLE, new org.apache.thrift.meta_data.FieldMetaData("operationHandle", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TOperationHandle.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TGetCrossReferenceResp.class, metaDataMap); - } - - public TGetCrossReferenceResp() { - } - - public TGetCrossReferenceResp( - TStatus status) - { - this(); - this.status = status; - } - - /** - * Performs a deep copy on other. - */ - public TGetCrossReferenceResp(TGetCrossReferenceResp other) { - if (other.isSetStatus()) { - this.status = new TStatus(other.status); - } - if (other.isSetOperationHandle()) { - this.operationHandle = new TOperationHandle(other.operationHandle); - } - } - - public TGetCrossReferenceResp deepCopy() { - return new TGetCrossReferenceResp(this); - } - - @Override - public void clear() { - this.status = null; - this.operationHandle = null; - } - - public TStatus getStatus() { - return this.status; - } - - public void setStatus(TStatus status) { - this.status = status; - } - - public void unsetStatus() { - this.status = null; - } - - /** Returns true if field status is set (has been assigned a value) and false otherwise */ - public boolean isSetStatus() { - return this.status != null; - } - - public void setStatusIsSet(boolean value) { - if (!value) { - this.status = null; - } - } - - public TOperationHandle getOperationHandle() { - return this.operationHandle; - } - - public void setOperationHandle(TOperationHandle operationHandle) { - this.operationHandle = operationHandle; - } - - public void unsetOperationHandle() { - this.operationHandle = null; - } - - /** Returns true if field operationHandle is set (has been assigned a value) and false otherwise */ - public boolean isSetOperationHandle() { - return this.operationHandle != null; - } - - public void setOperationHandleIsSet(boolean value) { - if (!value) { - this.operationHandle = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case STATUS: - if (value == null) { - unsetStatus(); - } else { - setStatus((TStatus)value); - } - break; - - case OPERATION_HANDLE: - if (value == null) { - unsetOperationHandle(); - } else { - setOperationHandle((TOperationHandle)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case STATUS: - return getStatus(); - - case OPERATION_HANDLE: - return getOperationHandle(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case STATUS: - return isSetStatus(); - case OPERATION_HANDLE: - return isSetOperationHandle(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TGetCrossReferenceResp) - return this.equals((TGetCrossReferenceResp)that); - return false; - } - - public boolean equals(TGetCrossReferenceResp that) { - if (that == null) - return false; - - boolean this_present_status = true && this.isSetStatus(); - boolean that_present_status = true && that.isSetStatus(); - if (this_present_status || that_present_status) { - if (!(this_present_status && that_present_status)) - return false; - if (!this.status.equals(that.status)) - return false; - } - - boolean this_present_operationHandle = true && this.isSetOperationHandle(); - boolean that_present_operationHandle = true && that.isSetOperationHandle(); - if (this_present_operationHandle || that_present_operationHandle) { - if (!(this_present_operationHandle && that_present_operationHandle)) - return false; - if (!this.operationHandle.equals(that.operationHandle)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_status = true && (isSetStatus()); - list.add(present_status); - if (present_status) - list.add(status); - - boolean present_operationHandle = true && (isSetOperationHandle()); - list.add(present_operationHandle); - if (present_operationHandle) - list.add(operationHandle); - - return list.hashCode(); - } - - @Override - public int compareTo(TGetCrossReferenceResp other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetStatus()).compareTo(other.isSetStatus()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetStatus()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.status, other.status); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetOperationHandle()).compareTo(other.isSetOperationHandle()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetOperationHandle()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.operationHandle, other.operationHandle); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TGetCrossReferenceResp("); - boolean first = true; - - sb.append("status:"); - if (this.status == null) { - sb.append("null"); - } else { - sb.append(this.status); - } - first = false; - if (isSetOperationHandle()) { - if (!first) sb.append(", "); - sb.append("operationHandle:"); - if (this.operationHandle == null) { - sb.append("null"); - } else { - sb.append(this.operationHandle); - } - first = false; - } - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetStatus()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'status' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (status != null) { - status.validate(); - } - if (operationHandle != null) { - operationHandle.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TGetCrossReferenceRespStandardSchemeFactory implements SchemeFactory { - public TGetCrossReferenceRespStandardScheme getScheme() { - return new TGetCrossReferenceRespStandardScheme(); - } - } - - private static class TGetCrossReferenceRespStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TGetCrossReferenceResp struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // STATUS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // OPERATION_HANDLE - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.operationHandle = new TOperationHandle(); - struct.operationHandle.read(iprot); - struct.setOperationHandleIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TGetCrossReferenceResp struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.status != null) { - oprot.writeFieldBegin(STATUS_FIELD_DESC); - struct.status.write(oprot); - oprot.writeFieldEnd(); - } - if (struct.operationHandle != null) { - if (struct.isSetOperationHandle()) { - oprot.writeFieldBegin(OPERATION_HANDLE_FIELD_DESC); - struct.operationHandle.write(oprot); - oprot.writeFieldEnd(); - } - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TGetCrossReferenceRespTupleSchemeFactory implements SchemeFactory { - public TGetCrossReferenceRespTupleScheme getScheme() { - return new TGetCrossReferenceRespTupleScheme(); - } - } - - private static class TGetCrossReferenceRespTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TGetCrossReferenceResp struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.status.write(oprot); - BitSet optionals = new BitSet(); - if (struct.isSetOperationHandle()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetOperationHandle()) { - struct.operationHandle.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TGetCrossReferenceResp struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.operationHandle = new TOperationHandle(); - struct.operationHandle.read(iprot); - struct.setOperationHandleIsSet(true); - } - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetDelegationTokenReq.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetDelegationTokenReq.java deleted file mode 100644 index e3e28c5860522..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetDelegationTokenReq.java +++ /dev/null @@ -1,596 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TGetDelegationTokenReq implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetDelegationTokenReq"); - - private static final org.apache.thrift.protocol.TField SESSION_HANDLE_FIELD_DESC = new org.apache.thrift.protocol.TField("sessionHandle", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField OWNER_FIELD_DESC = new org.apache.thrift.protocol.TField("owner", org.apache.thrift.protocol.TType.STRING, (short)2); - private static final org.apache.thrift.protocol.TField RENEWER_FIELD_DESC = new org.apache.thrift.protocol.TField("renewer", org.apache.thrift.protocol.TType.STRING, (short)3); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TGetDelegationTokenReqStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TGetDelegationTokenReqTupleSchemeFactory()); - } - - private TSessionHandle sessionHandle; // required - private String owner; // required - private String renewer; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SESSION_HANDLE((short)1, "sessionHandle"), - OWNER((short)2, "owner"), - RENEWER((short)3, "renewer"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // SESSION_HANDLE - return SESSION_HANDLE; - case 2: // OWNER - return OWNER; - case 3: // RENEWER - return RENEWER; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SESSION_HANDLE, new org.apache.thrift.meta_data.FieldMetaData("sessionHandle", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TSessionHandle.class))); - tmpMap.put(_Fields.OWNER, new org.apache.thrift.meta_data.FieldMetaData("owner", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.RENEWER, new org.apache.thrift.meta_data.FieldMetaData("renewer", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TGetDelegationTokenReq.class, metaDataMap); - } - - public TGetDelegationTokenReq() { - } - - public TGetDelegationTokenReq( - TSessionHandle sessionHandle, - String owner, - String renewer) - { - this(); - this.sessionHandle = sessionHandle; - this.owner = owner; - this.renewer = renewer; - } - - /** - * Performs a deep copy on other. - */ - public TGetDelegationTokenReq(TGetDelegationTokenReq other) { - if (other.isSetSessionHandle()) { - this.sessionHandle = new TSessionHandle(other.sessionHandle); - } - if (other.isSetOwner()) { - this.owner = other.owner; - } - if (other.isSetRenewer()) { - this.renewer = other.renewer; - } - } - - public TGetDelegationTokenReq deepCopy() { - return new TGetDelegationTokenReq(this); - } - - @Override - public void clear() { - this.sessionHandle = null; - this.owner = null; - this.renewer = null; - } - - public TSessionHandle getSessionHandle() { - return this.sessionHandle; - } - - public void setSessionHandle(TSessionHandle sessionHandle) { - this.sessionHandle = sessionHandle; - } - - public void unsetSessionHandle() { - this.sessionHandle = null; - } - - /** Returns true if field sessionHandle is set (has been assigned a value) and false otherwise */ - public boolean isSetSessionHandle() { - return this.sessionHandle != null; - } - - public void setSessionHandleIsSet(boolean value) { - if (!value) { - this.sessionHandle = null; - } - } - - public String getOwner() { - return this.owner; - } - - public void setOwner(String owner) { - this.owner = owner; - } - - public void unsetOwner() { - this.owner = null; - } - - /** Returns true if field owner is set (has been assigned a value) and false otherwise */ - public boolean isSetOwner() { - return this.owner != null; - } - - public void setOwnerIsSet(boolean value) { - if (!value) { - this.owner = null; - } - } - - public String getRenewer() { - return this.renewer; - } - - public void setRenewer(String renewer) { - this.renewer = renewer; - } - - public void unsetRenewer() { - this.renewer = null; - } - - /** Returns true if field renewer is set (has been assigned a value) and false otherwise */ - public boolean isSetRenewer() { - return this.renewer != null; - } - - public void setRenewerIsSet(boolean value) { - if (!value) { - this.renewer = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case SESSION_HANDLE: - if (value == null) { - unsetSessionHandle(); - } else { - setSessionHandle((TSessionHandle)value); - } - break; - - case OWNER: - if (value == null) { - unsetOwner(); - } else { - setOwner((String)value); - } - break; - - case RENEWER: - if (value == null) { - unsetRenewer(); - } else { - setRenewer((String)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case SESSION_HANDLE: - return getSessionHandle(); - - case OWNER: - return getOwner(); - - case RENEWER: - return getRenewer(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case SESSION_HANDLE: - return isSetSessionHandle(); - case OWNER: - return isSetOwner(); - case RENEWER: - return isSetRenewer(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TGetDelegationTokenReq) - return this.equals((TGetDelegationTokenReq)that); - return false; - } - - public boolean equals(TGetDelegationTokenReq that) { - if (that == null) - return false; - - boolean this_present_sessionHandle = true && this.isSetSessionHandle(); - boolean that_present_sessionHandle = true && that.isSetSessionHandle(); - if (this_present_sessionHandle || that_present_sessionHandle) { - if (!(this_present_sessionHandle && that_present_sessionHandle)) - return false; - if (!this.sessionHandle.equals(that.sessionHandle)) - return false; - } - - boolean this_present_owner = true && this.isSetOwner(); - boolean that_present_owner = true && that.isSetOwner(); - if (this_present_owner || that_present_owner) { - if (!(this_present_owner && that_present_owner)) - return false; - if (!this.owner.equals(that.owner)) - return false; - } - - boolean this_present_renewer = true && this.isSetRenewer(); - boolean that_present_renewer = true && that.isSetRenewer(); - if (this_present_renewer || that_present_renewer) { - if (!(this_present_renewer && that_present_renewer)) - return false; - if (!this.renewer.equals(that.renewer)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_sessionHandle = true && (isSetSessionHandle()); - list.add(present_sessionHandle); - if (present_sessionHandle) - list.add(sessionHandle); - - boolean present_owner = true && (isSetOwner()); - list.add(present_owner); - if (present_owner) - list.add(owner); - - boolean present_renewer = true && (isSetRenewer()); - list.add(present_renewer); - if (present_renewer) - list.add(renewer); - - return list.hashCode(); - } - - @Override - public int compareTo(TGetDelegationTokenReq other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetSessionHandle()).compareTo(other.isSetSessionHandle()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSessionHandle()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.sessionHandle, other.sessionHandle); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetOwner()).compareTo(other.isSetOwner()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetOwner()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.owner, other.owner); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetRenewer()).compareTo(other.isSetRenewer()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetRenewer()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.renewer, other.renewer); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TGetDelegationTokenReq("); - boolean first = true; - - sb.append("sessionHandle:"); - if (this.sessionHandle == null) { - sb.append("null"); - } else { - sb.append(this.sessionHandle); - } - first = false; - if (!first) sb.append(", "); - sb.append("owner:"); - if (this.owner == null) { - sb.append("null"); - } else { - sb.append(this.owner); - } - first = false; - if (!first) sb.append(", "); - sb.append("renewer:"); - if (this.renewer == null) { - sb.append("null"); - } else { - sb.append(this.renewer); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetSessionHandle()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'sessionHandle' is unset! Struct:" + toString()); - } - - if (!isSetOwner()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'owner' is unset! Struct:" + toString()); - } - - if (!isSetRenewer()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'renewer' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (sessionHandle != null) { - sessionHandle.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TGetDelegationTokenReqStandardSchemeFactory implements SchemeFactory { - public TGetDelegationTokenReqStandardScheme getScheme() { - return new TGetDelegationTokenReqStandardScheme(); - } - } - - private static class TGetDelegationTokenReqStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TGetDelegationTokenReq struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // SESSION_HANDLE - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.sessionHandle = new TSessionHandle(); - struct.sessionHandle.read(iprot); - struct.setSessionHandleIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // OWNER - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.owner = iprot.readString(); - struct.setOwnerIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 3: // RENEWER - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.renewer = iprot.readString(); - struct.setRenewerIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TGetDelegationTokenReq struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.sessionHandle != null) { - oprot.writeFieldBegin(SESSION_HANDLE_FIELD_DESC); - struct.sessionHandle.write(oprot); - oprot.writeFieldEnd(); - } - if (struct.owner != null) { - oprot.writeFieldBegin(OWNER_FIELD_DESC); - oprot.writeString(struct.owner); - oprot.writeFieldEnd(); - } - if (struct.renewer != null) { - oprot.writeFieldBegin(RENEWER_FIELD_DESC); - oprot.writeString(struct.renewer); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TGetDelegationTokenReqTupleSchemeFactory implements SchemeFactory { - public TGetDelegationTokenReqTupleScheme getScheme() { - return new TGetDelegationTokenReqTupleScheme(); - } - } - - private static class TGetDelegationTokenReqTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TGetDelegationTokenReq struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.sessionHandle.write(oprot); - oprot.writeString(struct.owner); - oprot.writeString(struct.renewer); - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TGetDelegationTokenReq struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.sessionHandle = new TSessionHandle(); - struct.sessionHandle.read(iprot); - struct.setSessionHandleIsSet(true); - struct.owner = iprot.readString(); - struct.setOwnerIsSet(true); - struct.renewer = iprot.readString(); - struct.setRenewerIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetDelegationTokenResp.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetDelegationTokenResp.java deleted file mode 100644 index 6ef2acbbd9435..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetDelegationTokenResp.java +++ /dev/null @@ -1,504 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TGetDelegationTokenResp implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetDelegationTokenResp"); - - private static final org.apache.thrift.protocol.TField STATUS_FIELD_DESC = new org.apache.thrift.protocol.TField("status", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField DELEGATION_TOKEN_FIELD_DESC = new org.apache.thrift.protocol.TField("delegationToken", org.apache.thrift.protocol.TType.STRING, (short)2); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TGetDelegationTokenRespStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TGetDelegationTokenRespTupleSchemeFactory()); - } - - private TStatus status; // required - private String delegationToken; // optional - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - STATUS((short)1, "status"), - DELEGATION_TOKEN((short)2, "delegationToken"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // STATUS - return STATUS; - case 2: // DELEGATION_TOKEN - return DELEGATION_TOKEN; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private static final _Fields optionals[] = {_Fields.DELEGATION_TOKEN}; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.STATUS, new org.apache.thrift.meta_data.FieldMetaData("status", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TStatus.class))); - tmpMap.put(_Fields.DELEGATION_TOKEN, new org.apache.thrift.meta_data.FieldMetaData("delegationToken", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TGetDelegationTokenResp.class, metaDataMap); - } - - public TGetDelegationTokenResp() { - } - - public TGetDelegationTokenResp( - TStatus status) - { - this(); - this.status = status; - } - - /** - * Performs a deep copy on other. - */ - public TGetDelegationTokenResp(TGetDelegationTokenResp other) { - if (other.isSetStatus()) { - this.status = new TStatus(other.status); - } - if (other.isSetDelegationToken()) { - this.delegationToken = other.delegationToken; - } - } - - public TGetDelegationTokenResp deepCopy() { - return new TGetDelegationTokenResp(this); - } - - @Override - public void clear() { - this.status = null; - this.delegationToken = null; - } - - public TStatus getStatus() { - return this.status; - } - - public void setStatus(TStatus status) { - this.status = status; - } - - public void unsetStatus() { - this.status = null; - } - - /** Returns true if field status is set (has been assigned a value) and false otherwise */ - public boolean isSetStatus() { - return this.status != null; - } - - public void setStatusIsSet(boolean value) { - if (!value) { - this.status = null; - } - } - - public String getDelegationToken() { - return this.delegationToken; - } - - public void setDelegationToken(String delegationToken) { - this.delegationToken = delegationToken; - } - - public void unsetDelegationToken() { - this.delegationToken = null; - } - - /** Returns true if field delegationToken is set (has been assigned a value) and false otherwise */ - public boolean isSetDelegationToken() { - return this.delegationToken != null; - } - - public void setDelegationTokenIsSet(boolean value) { - if (!value) { - this.delegationToken = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case STATUS: - if (value == null) { - unsetStatus(); - } else { - setStatus((TStatus)value); - } - break; - - case DELEGATION_TOKEN: - if (value == null) { - unsetDelegationToken(); - } else { - setDelegationToken((String)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case STATUS: - return getStatus(); - - case DELEGATION_TOKEN: - return getDelegationToken(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case STATUS: - return isSetStatus(); - case DELEGATION_TOKEN: - return isSetDelegationToken(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TGetDelegationTokenResp) - return this.equals((TGetDelegationTokenResp)that); - return false; - } - - public boolean equals(TGetDelegationTokenResp that) { - if (that == null) - return false; - - boolean this_present_status = true && this.isSetStatus(); - boolean that_present_status = true && that.isSetStatus(); - if (this_present_status || that_present_status) { - if (!(this_present_status && that_present_status)) - return false; - if (!this.status.equals(that.status)) - return false; - } - - boolean this_present_delegationToken = true && this.isSetDelegationToken(); - boolean that_present_delegationToken = true && that.isSetDelegationToken(); - if (this_present_delegationToken || that_present_delegationToken) { - if (!(this_present_delegationToken && that_present_delegationToken)) - return false; - if (!this.delegationToken.equals(that.delegationToken)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_status = true && (isSetStatus()); - list.add(present_status); - if (present_status) - list.add(status); - - boolean present_delegationToken = true && (isSetDelegationToken()); - list.add(present_delegationToken); - if (present_delegationToken) - list.add(delegationToken); - - return list.hashCode(); - } - - @Override - public int compareTo(TGetDelegationTokenResp other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetStatus()).compareTo(other.isSetStatus()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetStatus()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.status, other.status); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetDelegationToken()).compareTo(other.isSetDelegationToken()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetDelegationToken()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.delegationToken, other.delegationToken); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TGetDelegationTokenResp("); - boolean first = true; - - sb.append("status:"); - if (this.status == null) { - sb.append("null"); - } else { - sb.append(this.status); - } - first = false; - if (isSetDelegationToken()) { - if (!first) sb.append(", "); - sb.append("delegationToken:"); - if (this.delegationToken == null) { - sb.append("null"); - } else { - sb.append(this.delegationToken); - } - first = false; - } - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetStatus()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'status' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (status != null) { - status.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TGetDelegationTokenRespStandardSchemeFactory implements SchemeFactory { - public TGetDelegationTokenRespStandardScheme getScheme() { - return new TGetDelegationTokenRespStandardScheme(); - } - } - - private static class TGetDelegationTokenRespStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TGetDelegationTokenResp struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // STATUS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // DELEGATION_TOKEN - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.delegationToken = iprot.readString(); - struct.setDelegationTokenIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TGetDelegationTokenResp struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.status != null) { - oprot.writeFieldBegin(STATUS_FIELD_DESC); - struct.status.write(oprot); - oprot.writeFieldEnd(); - } - if (struct.delegationToken != null) { - if (struct.isSetDelegationToken()) { - oprot.writeFieldBegin(DELEGATION_TOKEN_FIELD_DESC); - oprot.writeString(struct.delegationToken); - oprot.writeFieldEnd(); - } - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TGetDelegationTokenRespTupleSchemeFactory implements SchemeFactory { - public TGetDelegationTokenRespTupleScheme getScheme() { - return new TGetDelegationTokenRespTupleScheme(); - } - } - - private static class TGetDelegationTokenRespTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TGetDelegationTokenResp struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.status.write(oprot); - BitSet optionals = new BitSet(); - if (struct.isSetDelegationToken()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetDelegationToken()) { - oprot.writeString(struct.delegationToken); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TGetDelegationTokenResp struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.delegationToken = iprot.readString(); - struct.setDelegationTokenIsSet(true); - } - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetFunctionsReq.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetFunctionsReq.java deleted file mode 100644 index ad4f8a5b031e8..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetFunctionsReq.java +++ /dev/null @@ -1,711 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TGetFunctionsReq implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetFunctionsReq"); - - private static final org.apache.thrift.protocol.TField SESSION_HANDLE_FIELD_DESC = new org.apache.thrift.protocol.TField("sessionHandle", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField CATALOG_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catalogName", org.apache.thrift.protocol.TType.STRING, (short)2); - private static final org.apache.thrift.protocol.TField SCHEMA_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("schemaName", org.apache.thrift.protocol.TType.STRING, (short)3); - private static final org.apache.thrift.protocol.TField FUNCTION_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("functionName", org.apache.thrift.protocol.TType.STRING, (short)4); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TGetFunctionsReqStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TGetFunctionsReqTupleSchemeFactory()); - } - - private TSessionHandle sessionHandle; // required - private String catalogName; // optional - private String schemaName; // optional - private String functionName; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SESSION_HANDLE((short)1, "sessionHandle"), - CATALOG_NAME((short)2, "catalogName"), - SCHEMA_NAME((short)3, "schemaName"), - FUNCTION_NAME((short)4, "functionName"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // SESSION_HANDLE - return SESSION_HANDLE; - case 2: // CATALOG_NAME - return CATALOG_NAME; - case 3: // SCHEMA_NAME - return SCHEMA_NAME; - case 4: // FUNCTION_NAME - return FUNCTION_NAME; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private static final _Fields optionals[] = {_Fields.CATALOG_NAME,_Fields.SCHEMA_NAME}; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SESSION_HANDLE, new org.apache.thrift.meta_data.FieldMetaData("sessionHandle", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TSessionHandle.class))); - tmpMap.put(_Fields.CATALOG_NAME, new org.apache.thrift.meta_data.FieldMetaData("catalogName", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "TIdentifier"))); - tmpMap.put(_Fields.SCHEMA_NAME, new org.apache.thrift.meta_data.FieldMetaData("schemaName", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "TPatternOrIdentifier"))); - tmpMap.put(_Fields.FUNCTION_NAME, new org.apache.thrift.meta_data.FieldMetaData("functionName", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "TPatternOrIdentifier"))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TGetFunctionsReq.class, metaDataMap); - } - - public TGetFunctionsReq() { - } - - public TGetFunctionsReq( - TSessionHandle sessionHandle, - String functionName) - { - this(); - this.sessionHandle = sessionHandle; - this.functionName = functionName; - } - - /** - * Performs a deep copy on other. - */ - public TGetFunctionsReq(TGetFunctionsReq other) { - if (other.isSetSessionHandle()) { - this.sessionHandle = new TSessionHandle(other.sessionHandle); - } - if (other.isSetCatalogName()) { - this.catalogName = other.catalogName; - } - if (other.isSetSchemaName()) { - this.schemaName = other.schemaName; - } - if (other.isSetFunctionName()) { - this.functionName = other.functionName; - } - } - - public TGetFunctionsReq deepCopy() { - return new TGetFunctionsReq(this); - } - - @Override - public void clear() { - this.sessionHandle = null; - this.catalogName = null; - this.schemaName = null; - this.functionName = null; - } - - public TSessionHandle getSessionHandle() { - return this.sessionHandle; - } - - public void setSessionHandle(TSessionHandle sessionHandle) { - this.sessionHandle = sessionHandle; - } - - public void unsetSessionHandle() { - this.sessionHandle = null; - } - - /** Returns true if field sessionHandle is set (has been assigned a value) and false otherwise */ - public boolean isSetSessionHandle() { - return this.sessionHandle != null; - } - - public void setSessionHandleIsSet(boolean value) { - if (!value) { - this.sessionHandle = null; - } - } - - public String getCatalogName() { - return this.catalogName; - } - - public void setCatalogName(String catalogName) { - this.catalogName = catalogName; - } - - public void unsetCatalogName() { - this.catalogName = null; - } - - /** Returns true if field catalogName is set (has been assigned a value) and false otherwise */ - public boolean isSetCatalogName() { - return this.catalogName != null; - } - - public void setCatalogNameIsSet(boolean value) { - if (!value) { - this.catalogName = null; - } - } - - public String getSchemaName() { - return this.schemaName; - } - - public void setSchemaName(String schemaName) { - this.schemaName = schemaName; - } - - public void unsetSchemaName() { - this.schemaName = null; - } - - /** Returns true if field schemaName is set (has been assigned a value) and false otherwise */ - public boolean isSetSchemaName() { - return this.schemaName != null; - } - - public void setSchemaNameIsSet(boolean value) { - if (!value) { - this.schemaName = null; - } - } - - public String getFunctionName() { - return this.functionName; - } - - public void setFunctionName(String functionName) { - this.functionName = functionName; - } - - public void unsetFunctionName() { - this.functionName = null; - } - - /** Returns true if field functionName is set (has been assigned a value) and false otherwise */ - public boolean isSetFunctionName() { - return this.functionName != null; - } - - public void setFunctionNameIsSet(boolean value) { - if (!value) { - this.functionName = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case SESSION_HANDLE: - if (value == null) { - unsetSessionHandle(); - } else { - setSessionHandle((TSessionHandle)value); - } - break; - - case CATALOG_NAME: - if (value == null) { - unsetCatalogName(); - } else { - setCatalogName((String)value); - } - break; - - case SCHEMA_NAME: - if (value == null) { - unsetSchemaName(); - } else { - setSchemaName((String)value); - } - break; - - case FUNCTION_NAME: - if (value == null) { - unsetFunctionName(); - } else { - setFunctionName((String)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case SESSION_HANDLE: - return getSessionHandle(); - - case CATALOG_NAME: - return getCatalogName(); - - case SCHEMA_NAME: - return getSchemaName(); - - case FUNCTION_NAME: - return getFunctionName(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case SESSION_HANDLE: - return isSetSessionHandle(); - case CATALOG_NAME: - return isSetCatalogName(); - case SCHEMA_NAME: - return isSetSchemaName(); - case FUNCTION_NAME: - return isSetFunctionName(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TGetFunctionsReq) - return this.equals((TGetFunctionsReq)that); - return false; - } - - public boolean equals(TGetFunctionsReq that) { - if (that == null) - return false; - - boolean this_present_sessionHandle = true && this.isSetSessionHandle(); - boolean that_present_sessionHandle = true && that.isSetSessionHandle(); - if (this_present_sessionHandle || that_present_sessionHandle) { - if (!(this_present_sessionHandle && that_present_sessionHandle)) - return false; - if (!this.sessionHandle.equals(that.sessionHandle)) - return false; - } - - boolean this_present_catalogName = true && this.isSetCatalogName(); - boolean that_present_catalogName = true && that.isSetCatalogName(); - if (this_present_catalogName || that_present_catalogName) { - if (!(this_present_catalogName && that_present_catalogName)) - return false; - if (!this.catalogName.equals(that.catalogName)) - return false; - } - - boolean this_present_schemaName = true && this.isSetSchemaName(); - boolean that_present_schemaName = true && that.isSetSchemaName(); - if (this_present_schemaName || that_present_schemaName) { - if (!(this_present_schemaName && that_present_schemaName)) - return false; - if (!this.schemaName.equals(that.schemaName)) - return false; - } - - boolean this_present_functionName = true && this.isSetFunctionName(); - boolean that_present_functionName = true && that.isSetFunctionName(); - if (this_present_functionName || that_present_functionName) { - if (!(this_present_functionName && that_present_functionName)) - return false; - if (!this.functionName.equals(that.functionName)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_sessionHandle = true && (isSetSessionHandle()); - list.add(present_sessionHandle); - if (present_sessionHandle) - list.add(sessionHandle); - - boolean present_catalogName = true && (isSetCatalogName()); - list.add(present_catalogName); - if (present_catalogName) - list.add(catalogName); - - boolean present_schemaName = true && (isSetSchemaName()); - list.add(present_schemaName); - if (present_schemaName) - list.add(schemaName); - - boolean present_functionName = true && (isSetFunctionName()); - list.add(present_functionName); - if (present_functionName) - list.add(functionName); - - return list.hashCode(); - } - - @Override - public int compareTo(TGetFunctionsReq other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetSessionHandle()).compareTo(other.isSetSessionHandle()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSessionHandle()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.sessionHandle, other.sessionHandle); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetCatalogName()).compareTo(other.isSetCatalogName()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetCatalogName()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catalogName, other.catalogName); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetSchemaName()).compareTo(other.isSetSchemaName()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSchemaName()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.schemaName, other.schemaName); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetFunctionName()).compareTo(other.isSetFunctionName()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetFunctionName()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.functionName, other.functionName); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TGetFunctionsReq("); - boolean first = true; - - sb.append("sessionHandle:"); - if (this.sessionHandle == null) { - sb.append("null"); - } else { - sb.append(this.sessionHandle); - } - first = false; - if (isSetCatalogName()) { - if (!first) sb.append(", "); - sb.append("catalogName:"); - if (this.catalogName == null) { - sb.append("null"); - } else { - sb.append(this.catalogName); - } - first = false; - } - if (isSetSchemaName()) { - if (!first) sb.append(", "); - sb.append("schemaName:"); - if (this.schemaName == null) { - sb.append("null"); - } else { - sb.append(this.schemaName); - } - first = false; - } - if (!first) sb.append(", "); - sb.append("functionName:"); - if (this.functionName == null) { - sb.append("null"); - } else { - sb.append(this.functionName); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetSessionHandle()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'sessionHandle' is unset! Struct:" + toString()); - } - - if (!isSetFunctionName()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'functionName' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (sessionHandle != null) { - sessionHandle.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TGetFunctionsReqStandardSchemeFactory implements SchemeFactory { - public TGetFunctionsReqStandardScheme getScheme() { - return new TGetFunctionsReqStandardScheme(); - } - } - - private static class TGetFunctionsReqStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TGetFunctionsReq struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // SESSION_HANDLE - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.sessionHandle = new TSessionHandle(); - struct.sessionHandle.read(iprot); - struct.setSessionHandleIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // CATALOG_NAME - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.catalogName = iprot.readString(); - struct.setCatalogNameIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 3: // SCHEMA_NAME - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.schemaName = iprot.readString(); - struct.setSchemaNameIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 4: // FUNCTION_NAME - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.functionName = iprot.readString(); - struct.setFunctionNameIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TGetFunctionsReq struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.sessionHandle != null) { - oprot.writeFieldBegin(SESSION_HANDLE_FIELD_DESC); - struct.sessionHandle.write(oprot); - oprot.writeFieldEnd(); - } - if (struct.catalogName != null) { - if (struct.isSetCatalogName()) { - oprot.writeFieldBegin(CATALOG_NAME_FIELD_DESC); - oprot.writeString(struct.catalogName); - oprot.writeFieldEnd(); - } - } - if (struct.schemaName != null) { - if (struct.isSetSchemaName()) { - oprot.writeFieldBegin(SCHEMA_NAME_FIELD_DESC); - oprot.writeString(struct.schemaName); - oprot.writeFieldEnd(); - } - } - if (struct.functionName != null) { - oprot.writeFieldBegin(FUNCTION_NAME_FIELD_DESC); - oprot.writeString(struct.functionName); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TGetFunctionsReqTupleSchemeFactory implements SchemeFactory { - public TGetFunctionsReqTupleScheme getScheme() { - return new TGetFunctionsReqTupleScheme(); - } - } - - private static class TGetFunctionsReqTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TGetFunctionsReq struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.sessionHandle.write(oprot); - oprot.writeString(struct.functionName); - BitSet optionals = new BitSet(); - if (struct.isSetCatalogName()) { - optionals.set(0); - } - if (struct.isSetSchemaName()) { - optionals.set(1); - } - oprot.writeBitSet(optionals, 2); - if (struct.isSetCatalogName()) { - oprot.writeString(struct.catalogName); - } - if (struct.isSetSchemaName()) { - oprot.writeString(struct.schemaName); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TGetFunctionsReq struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.sessionHandle = new TSessionHandle(); - struct.sessionHandle.read(iprot); - struct.setSessionHandleIsSet(true); - struct.functionName = iprot.readString(); - struct.setFunctionNameIsSet(true); - BitSet incoming = iprot.readBitSet(2); - if (incoming.get(0)) { - struct.catalogName = iprot.readString(); - struct.setCatalogNameIsSet(true); - } - if (incoming.get(1)) { - struct.schemaName = iprot.readString(); - struct.setSchemaNameIsSet(true); - } - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetFunctionsResp.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetFunctionsResp.java deleted file mode 100644 index ead37fb91cc2f..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetFunctionsResp.java +++ /dev/null @@ -1,509 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TGetFunctionsResp implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetFunctionsResp"); - - private static final org.apache.thrift.protocol.TField STATUS_FIELD_DESC = new org.apache.thrift.protocol.TField("status", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField OPERATION_HANDLE_FIELD_DESC = new org.apache.thrift.protocol.TField("operationHandle", org.apache.thrift.protocol.TType.STRUCT, (short)2); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TGetFunctionsRespStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TGetFunctionsRespTupleSchemeFactory()); - } - - private TStatus status; // required - private TOperationHandle operationHandle; // optional - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - STATUS((short)1, "status"), - OPERATION_HANDLE((short)2, "operationHandle"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // STATUS - return STATUS; - case 2: // OPERATION_HANDLE - return OPERATION_HANDLE; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private static final _Fields optionals[] = {_Fields.OPERATION_HANDLE}; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.STATUS, new org.apache.thrift.meta_data.FieldMetaData("status", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TStatus.class))); - tmpMap.put(_Fields.OPERATION_HANDLE, new org.apache.thrift.meta_data.FieldMetaData("operationHandle", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TOperationHandle.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TGetFunctionsResp.class, metaDataMap); - } - - public TGetFunctionsResp() { - } - - public TGetFunctionsResp( - TStatus status) - { - this(); - this.status = status; - } - - /** - * Performs a deep copy on other. - */ - public TGetFunctionsResp(TGetFunctionsResp other) { - if (other.isSetStatus()) { - this.status = new TStatus(other.status); - } - if (other.isSetOperationHandle()) { - this.operationHandle = new TOperationHandle(other.operationHandle); - } - } - - public TGetFunctionsResp deepCopy() { - return new TGetFunctionsResp(this); - } - - @Override - public void clear() { - this.status = null; - this.operationHandle = null; - } - - public TStatus getStatus() { - return this.status; - } - - public void setStatus(TStatus status) { - this.status = status; - } - - public void unsetStatus() { - this.status = null; - } - - /** Returns true if field status is set (has been assigned a value) and false otherwise */ - public boolean isSetStatus() { - return this.status != null; - } - - public void setStatusIsSet(boolean value) { - if (!value) { - this.status = null; - } - } - - public TOperationHandle getOperationHandle() { - return this.operationHandle; - } - - public void setOperationHandle(TOperationHandle operationHandle) { - this.operationHandle = operationHandle; - } - - public void unsetOperationHandle() { - this.operationHandle = null; - } - - /** Returns true if field operationHandle is set (has been assigned a value) and false otherwise */ - public boolean isSetOperationHandle() { - return this.operationHandle != null; - } - - public void setOperationHandleIsSet(boolean value) { - if (!value) { - this.operationHandle = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case STATUS: - if (value == null) { - unsetStatus(); - } else { - setStatus((TStatus)value); - } - break; - - case OPERATION_HANDLE: - if (value == null) { - unsetOperationHandle(); - } else { - setOperationHandle((TOperationHandle)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case STATUS: - return getStatus(); - - case OPERATION_HANDLE: - return getOperationHandle(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case STATUS: - return isSetStatus(); - case OPERATION_HANDLE: - return isSetOperationHandle(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TGetFunctionsResp) - return this.equals((TGetFunctionsResp)that); - return false; - } - - public boolean equals(TGetFunctionsResp that) { - if (that == null) - return false; - - boolean this_present_status = true && this.isSetStatus(); - boolean that_present_status = true && that.isSetStatus(); - if (this_present_status || that_present_status) { - if (!(this_present_status && that_present_status)) - return false; - if (!this.status.equals(that.status)) - return false; - } - - boolean this_present_operationHandle = true && this.isSetOperationHandle(); - boolean that_present_operationHandle = true && that.isSetOperationHandle(); - if (this_present_operationHandle || that_present_operationHandle) { - if (!(this_present_operationHandle && that_present_operationHandle)) - return false; - if (!this.operationHandle.equals(that.operationHandle)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_status = true && (isSetStatus()); - list.add(present_status); - if (present_status) - list.add(status); - - boolean present_operationHandle = true && (isSetOperationHandle()); - list.add(present_operationHandle); - if (present_operationHandle) - list.add(operationHandle); - - return list.hashCode(); - } - - @Override - public int compareTo(TGetFunctionsResp other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetStatus()).compareTo(other.isSetStatus()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetStatus()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.status, other.status); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetOperationHandle()).compareTo(other.isSetOperationHandle()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetOperationHandle()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.operationHandle, other.operationHandle); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TGetFunctionsResp("); - boolean first = true; - - sb.append("status:"); - if (this.status == null) { - sb.append("null"); - } else { - sb.append(this.status); - } - first = false; - if (isSetOperationHandle()) { - if (!first) sb.append(", "); - sb.append("operationHandle:"); - if (this.operationHandle == null) { - sb.append("null"); - } else { - sb.append(this.operationHandle); - } - first = false; - } - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetStatus()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'status' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (status != null) { - status.validate(); - } - if (operationHandle != null) { - operationHandle.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TGetFunctionsRespStandardSchemeFactory implements SchemeFactory { - public TGetFunctionsRespStandardScheme getScheme() { - return new TGetFunctionsRespStandardScheme(); - } - } - - private static class TGetFunctionsRespStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TGetFunctionsResp struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // STATUS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // OPERATION_HANDLE - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.operationHandle = new TOperationHandle(); - struct.operationHandle.read(iprot); - struct.setOperationHandleIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TGetFunctionsResp struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.status != null) { - oprot.writeFieldBegin(STATUS_FIELD_DESC); - struct.status.write(oprot); - oprot.writeFieldEnd(); - } - if (struct.operationHandle != null) { - if (struct.isSetOperationHandle()) { - oprot.writeFieldBegin(OPERATION_HANDLE_FIELD_DESC); - struct.operationHandle.write(oprot); - oprot.writeFieldEnd(); - } - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TGetFunctionsRespTupleSchemeFactory implements SchemeFactory { - public TGetFunctionsRespTupleScheme getScheme() { - return new TGetFunctionsRespTupleScheme(); - } - } - - private static class TGetFunctionsRespTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TGetFunctionsResp struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.status.write(oprot); - BitSet optionals = new BitSet(); - if (struct.isSetOperationHandle()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetOperationHandle()) { - struct.operationHandle.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TGetFunctionsResp struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.operationHandle = new TOperationHandle(); - struct.operationHandle.read(iprot); - struct.setOperationHandleIsSet(true); - } - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetInfoReq.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetInfoReq.java deleted file mode 100644 index b319b70e5eba5..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetInfoReq.java +++ /dev/null @@ -1,507 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TGetInfoReq implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetInfoReq"); - - private static final org.apache.thrift.protocol.TField SESSION_HANDLE_FIELD_DESC = new org.apache.thrift.protocol.TField("sessionHandle", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField INFO_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("infoType", org.apache.thrift.protocol.TType.I32, (short)2); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TGetInfoReqStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TGetInfoReqTupleSchemeFactory()); - } - - private TSessionHandle sessionHandle; // required - private TGetInfoType infoType; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SESSION_HANDLE((short)1, "sessionHandle"), - /** - * - * @see TGetInfoType - */ - INFO_TYPE((short)2, "infoType"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // SESSION_HANDLE - return SESSION_HANDLE; - case 2: // INFO_TYPE - return INFO_TYPE; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SESSION_HANDLE, new org.apache.thrift.meta_data.FieldMetaData("sessionHandle", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TSessionHandle.class))); - tmpMap.put(_Fields.INFO_TYPE, new org.apache.thrift.meta_data.FieldMetaData("infoType", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, TGetInfoType.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TGetInfoReq.class, metaDataMap); - } - - public TGetInfoReq() { - } - - public TGetInfoReq( - TSessionHandle sessionHandle, - TGetInfoType infoType) - { - this(); - this.sessionHandle = sessionHandle; - this.infoType = infoType; - } - - /** - * Performs a deep copy on other. - */ - public TGetInfoReq(TGetInfoReq other) { - if (other.isSetSessionHandle()) { - this.sessionHandle = new TSessionHandle(other.sessionHandle); - } - if (other.isSetInfoType()) { - this.infoType = other.infoType; - } - } - - public TGetInfoReq deepCopy() { - return new TGetInfoReq(this); - } - - @Override - public void clear() { - this.sessionHandle = null; - this.infoType = null; - } - - public TSessionHandle getSessionHandle() { - return this.sessionHandle; - } - - public void setSessionHandle(TSessionHandle sessionHandle) { - this.sessionHandle = sessionHandle; - } - - public void unsetSessionHandle() { - this.sessionHandle = null; - } - - /** Returns true if field sessionHandle is set (has been assigned a value) and false otherwise */ - public boolean isSetSessionHandle() { - return this.sessionHandle != null; - } - - public void setSessionHandleIsSet(boolean value) { - if (!value) { - this.sessionHandle = null; - } - } - - /** - * - * @see TGetInfoType - */ - public TGetInfoType getInfoType() { - return this.infoType; - } - - /** - * - * @see TGetInfoType - */ - public void setInfoType(TGetInfoType infoType) { - this.infoType = infoType; - } - - public void unsetInfoType() { - this.infoType = null; - } - - /** Returns true if field infoType is set (has been assigned a value) and false otherwise */ - public boolean isSetInfoType() { - return this.infoType != null; - } - - public void setInfoTypeIsSet(boolean value) { - if (!value) { - this.infoType = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case SESSION_HANDLE: - if (value == null) { - unsetSessionHandle(); - } else { - setSessionHandle((TSessionHandle)value); - } - break; - - case INFO_TYPE: - if (value == null) { - unsetInfoType(); - } else { - setInfoType((TGetInfoType)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case SESSION_HANDLE: - return getSessionHandle(); - - case INFO_TYPE: - return getInfoType(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case SESSION_HANDLE: - return isSetSessionHandle(); - case INFO_TYPE: - return isSetInfoType(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TGetInfoReq) - return this.equals((TGetInfoReq)that); - return false; - } - - public boolean equals(TGetInfoReq that) { - if (that == null) - return false; - - boolean this_present_sessionHandle = true && this.isSetSessionHandle(); - boolean that_present_sessionHandle = true && that.isSetSessionHandle(); - if (this_present_sessionHandle || that_present_sessionHandle) { - if (!(this_present_sessionHandle && that_present_sessionHandle)) - return false; - if (!this.sessionHandle.equals(that.sessionHandle)) - return false; - } - - boolean this_present_infoType = true && this.isSetInfoType(); - boolean that_present_infoType = true && that.isSetInfoType(); - if (this_present_infoType || that_present_infoType) { - if (!(this_present_infoType && that_present_infoType)) - return false; - if (!this.infoType.equals(that.infoType)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_sessionHandle = true && (isSetSessionHandle()); - list.add(present_sessionHandle); - if (present_sessionHandle) - list.add(sessionHandle); - - boolean present_infoType = true && (isSetInfoType()); - list.add(present_infoType); - if (present_infoType) - list.add(infoType.getValue()); - - return list.hashCode(); - } - - @Override - public int compareTo(TGetInfoReq other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetSessionHandle()).compareTo(other.isSetSessionHandle()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSessionHandle()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.sessionHandle, other.sessionHandle); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetInfoType()).compareTo(other.isSetInfoType()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetInfoType()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.infoType, other.infoType); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TGetInfoReq("); - boolean first = true; - - sb.append("sessionHandle:"); - if (this.sessionHandle == null) { - sb.append("null"); - } else { - sb.append(this.sessionHandle); - } - first = false; - if (!first) sb.append(", "); - sb.append("infoType:"); - if (this.infoType == null) { - sb.append("null"); - } else { - sb.append(this.infoType); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetSessionHandle()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'sessionHandle' is unset! Struct:" + toString()); - } - - if (!isSetInfoType()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'infoType' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (sessionHandle != null) { - sessionHandle.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TGetInfoReqStandardSchemeFactory implements SchemeFactory { - public TGetInfoReqStandardScheme getScheme() { - return new TGetInfoReqStandardScheme(); - } - } - - private static class TGetInfoReqStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TGetInfoReq struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // SESSION_HANDLE - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.sessionHandle = new TSessionHandle(); - struct.sessionHandle.read(iprot); - struct.setSessionHandleIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // INFO_TYPE - if (schemeField.type == org.apache.thrift.protocol.TType.I32) { - struct.infoType = org.apache.hive.service.rpc.thrift.TGetInfoType.findByValue(iprot.readI32()); - struct.setInfoTypeIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TGetInfoReq struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.sessionHandle != null) { - oprot.writeFieldBegin(SESSION_HANDLE_FIELD_DESC); - struct.sessionHandle.write(oprot); - oprot.writeFieldEnd(); - } - if (struct.infoType != null) { - oprot.writeFieldBegin(INFO_TYPE_FIELD_DESC); - oprot.writeI32(struct.infoType.getValue()); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TGetInfoReqTupleSchemeFactory implements SchemeFactory { - public TGetInfoReqTupleScheme getScheme() { - return new TGetInfoReqTupleScheme(); - } - } - - private static class TGetInfoReqTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TGetInfoReq struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.sessionHandle.write(oprot); - oprot.writeI32(struct.infoType.getValue()); - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TGetInfoReq struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.sessionHandle = new TSessionHandle(); - struct.sessionHandle.read(iprot); - struct.setSessionHandleIsSet(true); - struct.infoType = org.apache.hive.service.rpc.thrift.TGetInfoType.findByValue(iprot.readI32()); - struct.setInfoTypeIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetInfoResp.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetInfoResp.java deleted file mode 100644 index 9be810b024987..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetInfoResp.java +++ /dev/null @@ -1,497 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TGetInfoResp implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetInfoResp"); - - private static final org.apache.thrift.protocol.TField STATUS_FIELD_DESC = new org.apache.thrift.protocol.TField("status", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField INFO_VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("infoValue", org.apache.thrift.protocol.TType.STRUCT, (short)2); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TGetInfoRespStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TGetInfoRespTupleSchemeFactory()); - } - - private TStatus status; // required - private TGetInfoValue infoValue; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - STATUS((short)1, "status"), - INFO_VALUE((short)2, "infoValue"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // STATUS - return STATUS; - case 2: // INFO_VALUE - return INFO_VALUE; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.STATUS, new org.apache.thrift.meta_data.FieldMetaData("status", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TStatus.class))); - tmpMap.put(_Fields.INFO_VALUE, new org.apache.thrift.meta_data.FieldMetaData("infoValue", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TGetInfoValue.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TGetInfoResp.class, metaDataMap); - } - - public TGetInfoResp() { - } - - public TGetInfoResp( - TStatus status, - TGetInfoValue infoValue) - { - this(); - this.status = status; - this.infoValue = infoValue; - } - - /** - * Performs a deep copy on other. - */ - public TGetInfoResp(TGetInfoResp other) { - if (other.isSetStatus()) { - this.status = new TStatus(other.status); - } - if (other.isSetInfoValue()) { - this.infoValue = new TGetInfoValue(other.infoValue); - } - } - - public TGetInfoResp deepCopy() { - return new TGetInfoResp(this); - } - - @Override - public void clear() { - this.status = null; - this.infoValue = null; - } - - public TStatus getStatus() { - return this.status; - } - - public void setStatus(TStatus status) { - this.status = status; - } - - public void unsetStatus() { - this.status = null; - } - - /** Returns true if field status is set (has been assigned a value) and false otherwise */ - public boolean isSetStatus() { - return this.status != null; - } - - public void setStatusIsSet(boolean value) { - if (!value) { - this.status = null; - } - } - - public TGetInfoValue getInfoValue() { - return this.infoValue; - } - - public void setInfoValue(TGetInfoValue infoValue) { - this.infoValue = infoValue; - } - - public void unsetInfoValue() { - this.infoValue = null; - } - - /** Returns true if field infoValue is set (has been assigned a value) and false otherwise */ - public boolean isSetInfoValue() { - return this.infoValue != null; - } - - public void setInfoValueIsSet(boolean value) { - if (!value) { - this.infoValue = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case STATUS: - if (value == null) { - unsetStatus(); - } else { - setStatus((TStatus)value); - } - break; - - case INFO_VALUE: - if (value == null) { - unsetInfoValue(); - } else { - setInfoValue((TGetInfoValue)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case STATUS: - return getStatus(); - - case INFO_VALUE: - return getInfoValue(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case STATUS: - return isSetStatus(); - case INFO_VALUE: - return isSetInfoValue(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TGetInfoResp) - return this.equals((TGetInfoResp)that); - return false; - } - - public boolean equals(TGetInfoResp that) { - if (that == null) - return false; - - boolean this_present_status = true && this.isSetStatus(); - boolean that_present_status = true && that.isSetStatus(); - if (this_present_status || that_present_status) { - if (!(this_present_status && that_present_status)) - return false; - if (!this.status.equals(that.status)) - return false; - } - - boolean this_present_infoValue = true && this.isSetInfoValue(); - boolean that_present_infoValue = true && that.isSetInfoValue(); - if (this_present_infoValue || that_present_infoValue) { - if (!(this_present_infoValue && that_present_infoValue)) - return false; - if (!this.infoValue.equals(that.infoValue)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_status = true && (isSetStatus()); - list.add(present_status); - if (present_status) - list.add(status); - - boolean present_infoValue = true && (isSetInfoValue()); - list.add(present_infoValue); - if (present_infoValue) - list.add(infoValue); - - return list.hashCode(); - } - - @Override - public int compareTo(TGetInfoResp other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetStatus()).compareTo(other.isSetStatus()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetStatus()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.status, other.status); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetInfoValue()).compareTo(other.isSetInfoValue()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetInfoValue()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.infoValue, other.infoValue); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TGetInfoResp("); - boolean first = true; - - sb.append("status:"); - if (this.status == null) { - sb.append("null"); - } else { - sb.append(this.status); - } - first = false; - if (!first) sb.append(", "); - sb.append("infoValue:"); - if (this.infoValue == null) { - sb.append("null"); - } else { - sb.append(this.infoValue); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetStatus()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'status' is unset! Struct:" + toString()); - } - - if (!isSetInfoValue()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'infoValue' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (status != null) { - status.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TGetInfoRespStandardSchemeFactory implements SchemeFactory { - public TGetInfoRespStandardScheme getScheme() { - return new TGetInfoRespStandardScheme(); - } - } - - private static class TGetInfoRespStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TGetInfoResp struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // STATUS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // INFO_VALUE - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.infoValue = new TGetInfoValue(); - struct.infoValue.read(iprot); - struct.setInfoValueIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TGetInfoResp struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.status != null) { - oprot.writeFieldBegin(STATUS_FIELD_DESC); - struct.status.write(oprot); - oprot.writeFieldEnd(); - } - if (struct.infoValue != null) { - oprot.writeFieldBegin(INFO_VALUE_FIELD_DESC); - struct.infoValue.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TGetInfoRespTupleSchemeFactory implements SchemeFactory { - public TGetInfoRespTupleScheme getScheme() { - return new TGetInfoRespTupleScheme(); - } - } - - private static class TGetInfoRespTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TGetInfoResp struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.status.write(oprot); - struct.infoValue.write(oprot); - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TGetInfoResp struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - struct.infoValue = new TGetInfoValue(); - struct.infoValue.read(iprot); - struct.setInfoValueIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetInfoType.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetInfoType.java deleted file mode 100644 index 5b219b62656d7..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetInfoType.java +++ /dev/null @@ -1,180 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - - -import java.util.Map; -import java.util.HashMap; -import org.apache.thrift.TEnum; - -public enum TGetInfoType implements org.apache.thrift.TEnum { - CLI_MAX_DRIVER_CONNECTIONS(0), - CLI_MAX_CONCURRENT_ACTIVITIES(1), - CLI_DATA_SOURCE_NAME(2), - CLI_FETCH_DIRECTION(8), - CLI_SERVER_NAME(13), - CLI_SEARCH_PATTERN_ESCAPE(14), - CLI_DBMS_NAME(17), - CLI_DBMS_VER(18), - CLI_ACCESSIBLE_TABLES(19), - CLI_ACCESSIBLE_PROCEDURES(20), - CLI_CURSOR_COMMIT_BEHAVIOR(23), - CLI_DATA_SOURCE_READ_ONLY(25), - CLI_DEFAULT_TXN_ISOLATION(26), - CLI_IDENTIFIER_CASE(28), - CLI_IDENTIFIER_QUOTE_CHAR(29), - CLI_MAX_COLUMN_NAME_LEN(30), - CLI_MAX_CURSOR_NAME_LEN(31), - CLI_MAX_SCHEMA_NAME_LEN(32), - CLI_MAX_CATALOG_NAME_LEN(34), - CLI_MAX_TABLE_NAME_LEN(35), - CLI_SCROLL_CONCURRENCY(43), - CLI_TXN_CAPABLE(46), - CLI_USER_NAME(47), - CLI_TXN_ISOLATION_OPTION(72), - CLI_INTEGRITY(73), - CLI_GETDATA_EXTENSIONS(81), - CLI_NULL_COLLATION(85), - CLI_ALTER_TABLE(86), - CLI_ORDER_BY_COLUMNS_IN_SELECT(90), - CLI_SPECIAL_CHARACTERS(94), - CLI_MAX_COLUMNS_IN_GROUP_BY(97), - CLI_MAX_COLUMNS_IN_INDEX(98), - CLI_MAX_COLUMNS_IN_ORDER_BY(99), - CLI_MAX_COLUMNS_IN_SELECT(100), - CLI_MAX_COLUMNS_IN_TABLE(101), - CLI_MAX_INDEX_SIZE(102), - CLI_MAX_ROW_SIZE(104), - CLI_MAX_STATEMENT_LEN(105), - CLI_MAX_TABLES_IN_SELECT(106), - CLI_MAX_USER_NAME_LEN(107), - CLI_OJ_CAPABILITIES(115), - CLI_XOPEN_CLI_YEAR(10000), - CLI_CURSOR_SENSITIVITY(10001), - CLI_DESCRIBE_PARAMETER(10002), - CLI_CATALOG_NAME(10003), - CLI_COLLATION_SEQ(10004), - CLI_MAX_IDENTIFIER_LEN(10005); - - private final int value; - - private TGetInfoType(int value) { - this.value = value; - } - - /** - * Get the integer value of this enum value, as defined in the Thrift IDL. - */ - public int getValue() { - return value; - } - - /** - * Find a the enum type by its integer value, as defined in the Thrift IDL. - * @return null if the value is not found. - */ - public static TGetInfoType findByValue(int value) { - switch (value) { - case 0: - return CLI_MAX_DRIVER_CONNECTIONS; - case 1: - return CLI_MAX_CONCURRENT_ACTIVITIES; - case 2: - return CLI_DATA_SOURCE_NAME; - case 8: - return CLI_FETCH_DIRECTION; - case 13: - return CLI_SERVER_NAME; - case 14: - return CLI_SEARCH_PATTERN_ESCAPE; - case 17: - return CLI_DBMS_NAME; - case 18: - return CLI_DBMS_VER; - case 19: - return CLI_ACCESSIBLE_TABLES; - case 20: - return CLI_ACCESSIBLE_PROCEDURES; - case 23: - return CLI_CURSOR_COMMIT_BEHAVIOR; - case 25: - return CLI_DATA_SOURCE_READ_ONLY; - case 26: - return CLI_DEFAULT_TXN_ISOLATION; - case 28: - return CLI_IDENTIFIER_CASE; - case 29: - return CLI_IDENTIFIER_QUOTE_CHAR; - case 30: - return CLI_MAX_COLUMN_NAME_LEN; - case 31: - return CLI_MAX_CURSOR_NAME_LEN; - case 32: - return CLI_MAX_SCHEMA_NAME_LEN; - case 34: - return CLI_MAX_CATALOG_NAME_LEN; - case 35: - return CLI_MAX_TABLE_NAME_LEN; - case 43: - return CLI_SCROLL_CONCURRENCY; - case 46: - return CLI_TXN_CAPABLE; - case 47: - return CLI_USER_NAME; - case 72: - return CLI_TXN_ISOLATION_OPTION; - case 73: - return CLI_INTEGRITY; - case 81: - return CLI_GETDATA_EXTENSIONS; - case 85: - return CLI_NULL_COLLATION; - case 86: - return CLI_ALTER_TABLE; - case 90: - return CLI_ORDER_BY_COLUMNS_IN_SELECT; - case 94: - return CLI_SPECIAL_CHARACTERS; - case 97: - return CLI_MAX_COLUMNS_IN_GROUP_BY; - case 98: - return CLI_MAX_COLUMNS_IN_INDEX; - case 99: - return CLI_MAX_COLUMNS_IN_ORDER_BY; - case 100: - return CLI_MAX_COLUMNS_IN_SELECT; - case 101: - return CLI_MAX_COLUMNS_IN_TABLE; - case 102: - return CLI_MAX_INDEX_SIZE; - case 104: - return CLI_MAX_ROW_SIZE; - case 105: - return CLI_MAX_STATEMENT_LEN; - case 106: - return CLI_MAX_TABLES_IN_SELECT; - case 107: - return CLI_MAX_USER_NAME_LEN; - case 115: - return CLI_OJ_CAPABILITIES; - case 10000: - return CLI_XOPEN_CLI_YEAR; - case 10001: - return CLI_CURSOR_SENSITIVITY; - case 10002: - return CLI_DESCRIBE_PARAMETER; - case 10003: - return CLI_CATALOG_NAME; - case 10004: - return CLI_COLLATION_SEQ; - case 10005: - return CLI_MAX_IDENTIFIER_LEN; - default: - return null; - } - } -} diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetInfoValue.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetInfoValue.java deleted file mode 100644 index 8e3045a58e5ac..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetInfoValue.java +++ /dev/null @@ -1,597 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -public class TGetInfoValue extends org.apache.thrift.TUnion { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetInfoValue"); - private static final org.apache.thrift.protocol.TField STRING_VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("stringValue", org.apache.thrift.protocol.TType.STRING, (short)1); - private static final org.apache.thrift.protocol.TField SMALL_INT_VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("smallIntValue", org.apache.thrift.protocol.TType.I16, (short)2); - private static final org.apache.thrift.protocol.TField INTEGER_BITMASK_FIELD_DESC = new org.apache.thrift.protocol.TField("integerBitmask", org.apache.thrift.protocol.TType.I32, (short)3); - private static final org.apache.thrift.protocol.TField INTEGER_FLAG_FIELD_DESC = new org.apache.thrift.protocol.TField("integerFlag", org.apache.thrift.protocol.TType.I32, (short)4); - private static final org.apache.thrift.protocol.TField BINARY_VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("binaryValue", org.apache.thrift.protocol.TType.I32, (short)5); - private static final org.apache.thrift.protocol.TField LEN_VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("lenValue", org.apache.thrift.protocol.TType.I64, (short)6); - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - STRING_VALUE((short)1, "stringValue"), - SMALL_INT_VALUE((short)2, "smallIntValue"), - INTEGER_BITMASK((short)3, "integerBitmask"), - INTEGER_FLAG((short)4, "integerFlag"), - BINARY_VALUE((short)5, "binaryValue"), - LEN_VALUE((short)6, "lenValue"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // STRING_VALUE - return STRING_VALUE; - case 2: // SMALL_INT_VALUE - return SMALL_INT_VALUE; - case 3: // INTEGER_BITMASK - return INTEGER_BITMASK; - case 4: // INTEGER_FLAG - return INTEGER_FLAG; - case 5: // BINARY_VALUE - return BINARY_VALUE; - case 6: // LEN_VALUE - return LEN_VALUE; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.STRING_VALUE, new org.apache.thrift.meta_data.FieldMetaData("stringValue", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.SMALL_INT_VALUE, new org.apache.thrift.meta_data.FieldMetaData("smallIntValue", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I16))); - tmpMap.put(_Fields.INTEGER_BITMASK, new org.apache.thrift.meta_data.FieldMetaData("integerBitmask", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); - tmpMap.put(_Fields.INTEGER_FLAG, new org.apache.thrift.meta_data.FieldMetaData("integerFlag", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); - tmpMap.put(_Fields.BINARY_VALUE, new org.apache.thrift.meta_data.FieldMetaData("binaryValue", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); - tmpMap.put(_Fields.LEN_VALUE, new org.apache.thrift.meta_data.FieldMetaData("lenValue", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TGetInfoValue.class, metaDataMap); - } - - public TGetInfoValue() { - super(); - } - - public TGetInfoValue(TGetInfoValue._Fields setField, Object value) { - super(setField, value); - } - - public TGetInfoValue(TGetInfoValue other) { - super(other); - } - public TGetInfoValue deepCopy() { - return new TGetInfoValue(this); - } - - public static TGetInfoValue stringValue(String value) { - TGetInfoValue x = new TGetInfoValue(); - x.setStringValue(value); - return x; - } - - public static TGetInfoValue smallIntValue(short value) { - TGetInfoValue x = new TGetInfoValue(); - x.setSmallIntValue(value); - return x; - } - - public static TGetInfoValue integerBitmask(int value) { - TGetInfoValue x = new TGetInfoValue(); - x.setIntegerBitmask(value); - return x; - } - - public static TGetInfoValue integerFlag(int value) { - TGetInfoValue x = new TGetInfoValue(); - x.setIntegerFlag(value); - return x; - } - - public static TGetInfoValue binaryValue(int value) { - TGetInfoValue x = new TGetInfoValue(); - x.setBinaryValue(value); - return x; - } - - public static TGetInfoValue lenValue(long value) { - TGetInfoValue x = new TGetInfoValue(); - x.setLenValue(value); - return x; - } - - - @Override - protected void checkType(_Fields setField, Object value) throws ClassCastException { - switch (setField) { - case STRING_VALUE: - if (value instanceof String) { - break; - } - throw new ClassCastException("Was expecting value of type String for field 'stringValue', but got " + value.getClass().getSimpleName()); - case SMALL_INT_VALUE: - if (value instanceof Short) { - break; - } - throw new ClassCastException("Was expecting value of type Short for field 'smallIntValue', but got " + value.getClass().getSimpleName()); - case INTEGER_BITMASK: - if (value instanceof Integer) { - break; - } - throw new ClassCastException("Was expecting value of type Integer for field 'integerBitmask', but got " + value.getClass().getSimpleName()); - case INTEGER_FLAG: - if (value instanceof Integer) { - break; - } - throw new ClassCastException("Was expecting value of type Integer for field 'integerFlag', but got " + value.getClass().getSimpleName()); - case BINARY_VALUE: - if (value instanceof Integer) { - break; - } - throw new ClassCastException("Was expecting value of type Integer for field 'binaryValue', but got " + value.getClass().getSimpleName()); - case LEN_VALUE: - if (value instanceof Long) { - break; - } - throw new ClassCastException("Was expecting value of type Long for field 'lenValue', but got " + value.getClass().getSimpleName()); - default: - throw new IllegalArgumentException("Unknown field id " + setField); - } - } - - @Override - protected Object standardSchemeReadValue(org.apache.thrift.protocol.TProtocol iprot, org.apache.thrift.protocol.TField field) throws org.apache.thrift.TException { - _Fields setField = _Fields.findByThriftId(field.id); - if (setField != null) { - switch (setField) { - case STRING_VALUE: - if (field.type == STRING_VALUE_FIELD_DESC.type) { - String stringValue; - stringValue = iprot.readString(); - return stringValue; - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); - return null; - } - case SMALL_INT_VALUE: - if (field.type == SMALL_INT_VALUE_FIELD_DESC.type) { - Short smallIntValue; - smallIntValue = iprot.readI16(); - return smallIntValue; - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); - return null; - } - case INTEGER_BITMASK: - if (field.type == INTEGER_BITMASK_FIELD_DESC.type) { - Integer integerBitmask; - integerBitmask = iprot.readI32(); - return integerBitmask; - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); - return null; - } - case INTEGER_FLAG: - if (field.type == INTEGER_FLAG_FIELD_DESC.type) { - Integer integerFlag; - integerFlag = iprot.readI32(); - return integerFlag; - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); - return null; - } - case BINARY_VALUE: - if (field.type == BINARY_VALUE_FIELD_DESC.type) { - Integer binaryValue; - binaryValue = iprot.readI32(); - return binaryValue; - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); - return null; - } - case LEN_VALUE: - if (field.type == LEN_VALUE_FIELD_DESC.type) { - Long lenValue; - lenValue = iprot.readI64(); - return lenValue; - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); - return null; - } - default: - throw new IllegalStateException("setField wasn't null, but didn't match any of the case statements!"); - } - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); - return null; - } - } - - @Override - protected void standardSchemeWriteValue(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - switch (setField_) { - case STRING_VALUE: - String stringValue = (String)value_; - oprot.writeString(stringValue); - return; - case SMALL_INT_VALUE: - Short smallIntValue = (Short)value_; - oprot.writeI16(smallIntValue); - return; - case INTEGER_BITMASK: - Integer integerBitmask = (Integer)value_; - oprot.writeI32(integerBitmask); - return; - case INTEGER_FLAG: - Integer integerFlag = (Integer)value_; - oprot.writeI32(integerFlag); - return; - case BINARY_VALUE: - Integer binaryValue = (Integer)value_; - oprot.writeI32(binaryValue); - return; - case LEN_VALUE: - Long lenValue = (Long)value_; - oprot.writeI64(lenValue); - return; - default: - throw new IllegalStateException("Cannot write union with unknown field " + setField_); - } - } - - @Override - protected Object tupleSchemeReadValue(org.apache.thrift.protocol.TProtocol iprot, short fieldID) throws org.apache.thrift.TException { - _Fields setField = _Fields.findByThriftId(fieldID); - if (setField != null) { - switch (setField) { - case STRING_VALUE: - String stringValue; - stringValue = iprot.readString(); - return stringValue; - case SMALL_INT_VALUE: - Short smallIntValue; - smallIntValue = iprot.readI16(); - return smallIntValue; - case INTEGER_BITMASK: - Integer integerBitmask; - integerBitmask = iprot.readI32(); - return integerBitmask; - case INTEGER_FLAG: - Integer integerFlag; - integerFlag = iprot.readI32(); - return integerFlag; - case BINARY_VALUE: - Integer binaryValue; - binaryValue = iprot.readI32(); - return binaryValue; - case LEN_VALUE: - Long lenValue; - lenValue = iprot.readI64(); - return lenValue; - default: - throw new IllegalStateException("setField wasn't null, but didn't match any of the case statements!"); - } - } else { - throw new TProtocolException("Couldn't find a field with field id " + fieldID); - } - } - - @Override - protected void tupleSchemeWriteValue(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - switch (setField_) { - case STRING_VALUE: - String stringValue = (String)value_; - oprot.writeString(stringValue); - return; - case SMALL_INT_VALUE: - Short smallIntValue = (Short)value_; - oprot.writeI16(smallIntValue); - return; - case INTEGER_BITMASK: - Integer integerBitmask = (Integer)value_; - oprot.writeI32(integerBitmask); - return; - case INTEGER_FLAG: - Integer integerFlag = (Integer)value_; - oprot.writeI32(integerFlag); - return; - case BINARY_VALUE: - Integer binaryValue = (Integer)value_; - oprot.writeI32(binaryValue); - return; - case LEN_VALUE: - Long lenValue = (Long)value_; - oprot.writeI64(lenValue); - return; - default: - throw new IllegalStateException("Cannot write union with unknown field " + setField_); - } - } - - @Override - protected org.apache.thrift.protocol.TField getFieldDesc(_Fields setField) { - switch (setField) { - case STRING_VALUE: - return STRING_VALUE_FIELD_DESC; - case SMALL_INT_VALUE: - return SMALL_INT_VALUE_FIELD_DESC; - case INTEGER_BITMASK: - return INTEGER_BITMASK_FIELD_DESC; - case INTEGER_FLAG: - return INTEGER_FLAG_FIELD_DESC; - case BINARY_VALUE: - return BINARY_VALUE_FIELD_DESC; - case LEN_VALUE: - return LEN_VALUE_FIELD_DESC; - default: - throw new IllegalArgumentException("Unknown field id " + setField); - } - } - - @Override - protected org.apache.thrift.protocol.TStruct getStructDesc() { - return STRUCT_DESC; - } - - @Override - protected _Fields enumForId(short id) { - return _Fields.findByThriftIdOrThrow(id); - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - - public String getStringValue() { - if (getSetField() == _Fields.STRING_VALUE) { - return (String)getFieldValue(); - } else { - throw new RuntimeException("Cannot get field 'stringValue' because union is currently set to " + getFieldDesc(getSetField()).name); - } - } - - public void setStringValue(String value) { - if (value == null) throw new NullPointerException(); - setField_ = _Fields.STRING_VALUE; - value_ = value; - } - - public short getSmallIntValue() { - if (getSetField() == _Fields.SMALL_INT_VALUE) { - return (Short)getFieldValue(); - } else { - throw new RuntimeException("Cannot get field 'smallIntValue' because union is currently set to " + getFieldDesc(getSetField()).name); - } - } - - public void setSmallIntValue(short value) { - setField_ = _Fields.SMALL_INT_VALUE; - value_ = value; - } - - public int getIntegerBitmask() { - if (getSetField() == _Fields.INTEGER_BITMASK) { - return (Integer)getFieldValue(); - } else { - throw new RuntimeException("Cannot get field 'integerBitmask' because union is currently set to " + getFieldDesc(getSetField()).name); - } - } - - public void setIntegerBitmask(int value) { - setField_ = _Fields.INTEGER_BITMASK; - value_ = value; - } - - public int getIntegerFlag() { - if (getSetField() == _Fields.INTEGER_FLAG) { - return (Integer)getFieldValue(); - } else { - throw new RuntimeException("Cannot get field 'integerFlag' because union is currently set to " + getFieldDesc(getSetField()).name); - } - } - - public void setIntegerFlag(int value) { - setField_ = _Fields.INTEGER_FLAG; - value_ = value; - } - - public int getBinaryValue() { - if (getSetField() == _Fields.BINARY_VALUE) { - return (Integer)getFieldValue(); - } else { - throw new RuntimeException("Cannot get field 'binaryValue' because union is currently set to " + getFieldDesc(getSetField()).name); - } - } - - public void setBinaryValue(int value) { - setField_ = _Fields.BINARY_VALUE; - value_ = value; - } - - public long getLenValue() { - if (getSetField() == _Fields.LEN_VALUE) { - return (Long)getFieldValue(); - } else { - throw new RuntimeException("Cannot get field 'lenValue' because union is currently set to " + getFieldDesc(getSetField()).name); - } - } - - public void setLenValue(long value) { - setField_ = _Fields.LEN_VALUE; - value_ = value; - } - - public boolean isSetStringValue() { - return setField_ == _Fields.STRING_VALUE; - } - - - public boolean isSetSmallIntValue() { - return setField_ == _Fields.SMALL_INT_VALUE; - } - - - public boolean isSetIntegerBitmask() { - return setField_ == _Fields.INTEGER_BITMASK; - } - - - public boolean isSetIntegerFlag() { - return setField_ == _Fields.INTEGER_FLAG; - } - - - public boolean isSetBinaryValue() { - return setField_ == _Fields.BINARY_VALUE; - } - - - public boolean isSetLenValue() { - return setField_ == _Fields.LEN_VALUE; - } - - - public boolean equals(Object other) { - if (other instanceof TGetInfoValue) { - return equals((TGetInfoValue)other); - } else { - return false; - } - } - - public boolean equals(TGetInfoValue other) { - return other != null && getSetField() == other.getSetField() && getFieldValue().equals(other.getFieldValue()); - } - - @Override - public int compareTo(TGetInfoValue other) { - int lastComparison = org.apache.thrift.TBaseHelper.compareTo(getSetField(), other.getSetField()); - if (lastComparison == 0) { - return org.apache.thrift.TBaseHelper.compareTo(getFieldValue(), other.getFieldValue()); - } - return lastComparison; - } - - - @Override - public int hashCode() { - List list = new ArrayList(); - list.add(this.getClass().getName()); - org.apache.thrift.TFieldIdEnum setField = getSetField(); - if (setField != null) { - list.add(setField.getThriftFieldId()); - Object value = getFieldValue(); - if (value instanceof org.apache.thrift.TEnum) { - list.add(((org.apache.thrift.TEnum)getFieldValue()).getValue()); - } else { - list.add(value); - } - } - return list.hashCode(); - } - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - -} diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetOperationStatusReq.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetOperationStatusReq.java deleted file mode 100644 index af31ce2b22819..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetOperationStatusReq.java +++ /dev/null @@ -1,501 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TGetOperationStatusReq implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetOperationStatusReq"); - - private static final org.apache.thrift.protocol.TField OPERATION_HANDLE_FIELD_DESC = new org.apache.thrift.protocol.TField("operationHandle", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField GET_PROGRESS_UPDATE_FIELD_DESC = new org.apache.thrift.protocol.TField("getProgressUpdate", org.apache.thrift.protocol.TType.BOOL, (short)2); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TGetOperationStatusReqStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TGetOperationStatusReqTupleSchemeFactory()); - } - - private TOperationHandle operationHandle; // required - private boolean getProgressUpdate; // optional - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - OPERATION_HANDLE((short)1, "operationHandle"), - GET_PROGRESS_UPDATE((short)2, "getProgressUpdate"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // OPERATION_HANDLE - return OPERATION_HANDLE; - case 2: // GET_PROGRESS_UPDATE - return GET_PROGRESS_UPDATE; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private static final int __GETPROGRESSUPDATE_ISSET_ID = 0; - private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.GET_PROGRESS_UPDATE}; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.OPERATION_HANDLE, new org.apache.thrift.meta_data.FieldMetaData("operationHandle", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TOperationHandle.class))); - tmpMap.put(_Fields.GET_PROGRESS_UPDATE, new org.apache.thrift.meta_data.FieldMetaData("getProgressUpdate", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TGetOperationStatusReq.class, metaDataMap); - } - - public TGetOperationStatusReq() { - } - - public TGetOperationStatusReq( - TOperationHandle operationHandle) - { - this(); - this.operationHandle = operationHandle; - } - - /** - * Performs a deep copy on other. - */ - public TGetOperationStatusReq(TGetOperationStatusReq other) { - __isset_bitfield = other.__isset_bitfield; - if (other.isSetOperationHandle()) { - this.operationHandle = new TOperationHandle(other.operationHandle); - } - this.getProgressUpdate = other.getProgressUpdate; - } - - public TGetOperationStatusReq deepCopy() { - return new TGetOperationStatusReq(this); - } - - @Override - public void clear() { - this.operationHandle = null; - setGetProgressUpdateIsSet(false); - this.getProgressUpdate = false; - } - - public TOperationHandle getOperationHandle() { - return this.operationHandle; - } - - public void setOperationHandle(TOperationHandle operationHandle) { - this.operationHandle = operationHandle; - } - - public void unsetOperationHandle() { - this.operationHandle = null; - } - - /** Returns true if field operationHandle is set (has been assigned a value) and false otherwise */ - public boolean isSetOperationHandle() { - return this.operationHandle != null; - } - - public void setOperationHandleIsSet(boolean value) { - if (!value) { - this.operationHandle = null; - } - } - - public boolean isGetProgressUpdate() { - return this.getProgressUpdate; - } - - public void setGetProgressUpdate(boolean getProgressUpdate) { - this.getProgressUpdate = getProgressUpdate; - setGetProgressUpdateIsSet(true); - } - - public void unsetGetProgressUpdate() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __GETPROGRESSUPDATE_ISSET_ID); - } - - /** Returns true if field getProgressUpdate is set (has been assigned a value) and false otherwise */ - public boolean isSetGetProgressUpdate() { - return EncodingUtils.testBit(__isset_bitfield, __GETPROGRESSUPDATE_ISSET_ID); - } - - public void setGetProgressUpdateIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __GETPROGRESSUPDATE_ISSET_ID, value); - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case OPERATION_HANDLE: - if (value == null) { - unsetOperationHandle(); - } else { - setOperationHandle((TOperationHandle)value); - } - break; - - case GET_PROGRESS_UPDATE: - if (value == null) { - unsetGetProgressUpdate(); - } else { - setGetProgressUpdate((Boolean)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case OPERATION_HANDLE: - return getOperationHandle(); - - case GET_PROGRESS_UPDATE: - return isGetProgressUpdate(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case OPERATION_HANDLE: - return isSetOperationHandle(); - case GET_PROGRESS_UPDATE: - return isSetGetProgressUpdate(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TGetOperationStatusReq) - return this.equals((TGetOperationStatusReq)that); - return false; - } - - public boolean equals(TGetOperationStatusReq that) { - if (that == null) - return false; - - boolean this_present_operationHandle = true && this.isSetOperationHandle(); - boolean that_present_operationHandle = true && that.isSetOperationHandle(); - if (this_present_operationHandle || that_present_operationHandle) { - if (!(this_present_operationHandle && that_present_operationHandle)) - return false; - if (!this.operationHandle.equals(that.operationHandle)) - return false; - } - - boolean this_present_getProgressUpdate = true && this.isSetGetProgressUpdate(); - boolean that_present_getProgressUpdate = true && that.isSetGetProgressUpdate(); - if (this_present_getProgressUpdate || that_present_getProgressUpdate) { - if (!(this_present_getProgressUpdate && that_present_getProgressUpdate)) - return false; - if (this.getProgressUpdate != that.getProgressUpdate) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_operationHandle = true && (isSetOperationHandle()); - list.add(present_operationHandle); - if (present_operationHandle) - list.add(operationHandle); - - boolean present_getProgressUpdate = true && (isSetGetProgressUpdate()); - list.add(present_getProgressUpdate); - if (present_getProgressUpdate) - list.add(getProgressUpdate); - - return list.hashCode(); - } - - @Override - public int compareTo(TGetOperationStatusReq other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetOperationHandle()).compareTo(other.isSetOperationHandle()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetOperationHandle()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.operationHandle, other.operationHandle); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetGetProgressUpdate()).compareTo(other.isSetGetProgressUpdate()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetGetProgressUpdate()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.getProgressUpdate, other.getProgressUpdate); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TGetOperationStatusReq("); - boolean first = true; - - sb.append("operationHandle:"); - if (this.operationHandle == null) { - sb.append("null"); - } else { - sb.append(this.operationHandle); - } - first = false; - if (isSetGetProgressUpdate()) { - if (!first) sb.append(", "); - sb.append("getProgressUpdate:"); - sb.append(this.getProgressUpdate); - first = false; - } - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetOperationHandle()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'operationHandle' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (operationHandle != null) { - operationHandle.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. - __isset_bitfield = 0; - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TGetOperationStatusReqStandardSchemeFactory implements SchemeFactory { - public TGetOperationStatusReqStandardScheme getScheme() { - return new TGetOperationStatusReqStandardScheme(); - } - } - - private static class TGetOperationStatusReqStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TGetOperationStatusReq struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // OPERATION_HANDLE - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.operationHandle = new TOperationHandle(); - struct.operationHandle.read(iprot); - struct.setOperationHandleIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // GET_PROGRESS_UPDATE - if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { - struct.getProgressUpdate = iprot.readBool(); - struct.setGetProgressUpdateIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TGetOperationStatusReq struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.operationHandle != null) { - oprot.writeFieldBegin(OPERATION_HANDLE_FIELD_DESC); - struct.operationHandle.write(oprot); - oprot.writeFieldEnd(); - } - if (struct.isSetGetProgressUpdate()) { - oprot.writeFieldBegin(GET_PROGRESS_UPDATE_FIELD_DESC); - oprot.writeBool(struct.getProgressUpdate); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TGetOperationStatusReqTupleSchemeFactory implements SchemeFactory { - public TGetOperationStatusReqTupleScheme getScheme() { - return new TGetOperationStatusReqTupleScheme(); - } - } - - private static class TGetOperationStatusReqTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TGetOperationStatusReq struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.operationHandle.write(oprot); - BitSet optionals = new BitSet(); - if (struct.isSetGetProgressUpdate()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetGetProgressUpdate()) { - oprot.writeBool(struct.getProgressUpdate); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TGetOperationStatusReq struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.operationHandle = new TOperationHandle(); - struct.operationHandle.read(iprot); - struct.setOperationHandleIsSet(true); - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.getProgressUpdate = iprot.readBool(); - struct.setGetProgressUpdateIsSet(true); - } - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetOperationStatusResp.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetOperationStatusResp.java deleted file mode 100644 index dbfbb44aa6986..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetOperationStatusResp.java +++ /dev/null @@ -1,1342 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TGetOperationStatusResp implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetOperationStatusResp"); - - private static final org.apache.thrift.protocol.TField STATUS_FIELD_DESC = new org.apache.thrift.protocol.TField("status", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField OPERATION_STATE_FIELD_DESC = new org.apache.thrift.protocol.TField("operationState", org.apache.thrift.protocol.TType.I32, (short)2); - private static final org.apache.thrift.protocol.TField SQL_STATE_FIELD_DESC = new org.apache.thrift.protocol.TField("sqlState", org.apache.thrift.protocol.TType.STRING, (short)3); - private static final org.apache.thrift.protocol.TField ERROR_CODE_FIELD_DESC = new org.apache.thrift.protocol.TField("errorCode", org.apache.thrift.protocol.TType.I32, (short)4); - private static final org.apache.thrift.protocol.TField ERROR_MESSAGE_FIELD_DESC = new org.apache.thrift.protocol.TField("errorMessage", org.apache.thrift.protocol.TType.STRING, (short)5); - private static final org.apache.thrift.protocol.TField TASK_STATUS_FIELD_DESC = new org.apache.thrift.protocol.TField("taskStatus", org.apache.thrift.protocol.TType.STRING, (short)6); - private static final org.apache.thrift.protocol.TField OPERATION_STARTED_FIELD_DESC = new org.apache.thrift.protocol.TField("operationStarted", org.apache.thrift.protocol.TType.I64, (short)7); - private static final org.apache.thrift.protocol.TField OPERATION_COMPLETED_FIELD_DESC = new org.apache.thrift.protocol.TField("operationCompleted", org.apache.thrift.protocol.TType.I64, (short)8); - private static final org.apache.thrift.protocol.TField HAS_RESULT_SET_FIELD_DESC = new org.apache.thrift.protocol.TField("hasResultSet", org.apache.thrift.protocol.TType.BOOL, (short)9); - private static final org.apache.thrift.protocol.TField PROGRESS_UPDATE_RESPONSE_FIELD_DESC = new org.apache.thrift.protocol.TField("progressUpdateResponse", org.apache.thrift.protocol.TType.STRUCT, (short)10); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TGetOperationStatusRespStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TGetOperationStatusRespTupleSchemeFactory()); - } - - private TStatus status; // required - private TOperationState operationState; // optional - private String sqlState; // optional - private int errorCode; // optional - private String errorMessage; // optional - private String taskStatus; // optional - private long operationStarted; // optional - private long operationCompleted; // optional - private boolean hasResultSet; // optional - private TProgressUpdateResp progressUpdateResponse; // optional - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - STATUS((short)1, "status"), - /** - * - * @see TOperationState - */ - OPERATION_STATE((short)2, "operationState"), - SQL_STATE((short)3, "sqlState"), - ERROR_CODE((short)4, "errorCode"), - ERROR_MESSAGE((short)5, "errorMessage"), - TASK_STATUS((short)6, "taskStatus"), - OPERATION_STARTED((short)7, "operationStarted"), - OPERATION_COMPLETED((short)8, "operationCompleted"), - HAS_RESULT_SET((short)9, "hasResultSet"), - PROGRESS_UPDATE_RESPONSE((short)10, "progressUpdateResponse"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // STATUS - return STATUS; - case 2: // OPERATION_STATE - return OPERATION_STATE; - case 3: // SQL_STATE - return SQL_STATE; - case 4: // ERROR_CODE - return ERROR_CODE; - case 5: // ERROR_MESSAGE - return ERROR_MESSAGE; - case 6: // TASK_STATUS - return TASK_STATUS; - case 7: // OPERATION_STARTED - return OPERATION_STARTED; - case 8: // OPERATION_COMPLETED - return OPERATION_COMPLETED; - case 9: // HAS_RESULT_SET - return HAS_RESULT_SET; - case 10: // PROGRESS_UPDATE_RESPONSE - return PROGRESS_UPDATE_RESPONSE; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private static final int __ERRORCODE_ISSET_ID = 0; - private static final int __OPERATIONSTARTED_ISSET_ID = 1; - private static final int __OPERATIONCOMPLETED_ISSET_ID = 2; - private static final int __HASRESULTSET_ISSET_ID = 3; - private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.OPERATION_STATE,_Fields.SQL_STATE,_Fields.ERROR_CODE,_Fields.ERROR_MESSAGE,_Fields.TASK_STATUS,_Fields.OPERATION_STARTED,_Fields.OPERATION_COMPLETED,_Fields.HAS_RESULT_SET,_Fields.PROGRESS_UPDATE_RESPONSE}; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.STATUS, new org.apache.thrift.meta_data.FieldMetaData("status", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TStatus.class))); - tmpMap.put(_Fields.OPERATION_STATE, new org.apache.thrift.meta_data.FieldMetaData("operationState", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, TOperationState.class))); - tmpMap.put(_Fields.SQL_STATE, new org.apache.thrift.meta_data.FieldMetaData("sqlState", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.ERROR_CODE, new org.apache.thrift.meta_data.FieldMetaData("errorCode", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); - tmpMap.put(_Fields.ERROR_MESSAGE, new org.apache.thrift.meta_data.FieldMetaData("errorMessage", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.TASK_STATUS, new org.apache.thrift.meta_data.FieldMetaData("taskStatus", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.OPERATION_STARTED, new org.apache.thrift.meta_data.FieldMetaData("operationStarted", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); - tmpMap.put(_Fields.OPERATION_COMPLETED, new org.apache.thrift.meta_data.FieldMetaData("operationCompleted", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); - tmpMap.put(_Fields.HAS_RESULT_SET, new org.apache.thrift.meta_data.FieldMetaData("hasResultSet", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); - tmpMap.put(_Fields.PROGRESS_UPDATE_RESPONSE, new org.apache.thrift.meta_data.FieldMetaData("progressUpdateResponse", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT , "TProgressUpdateResp"))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TGetOperationStatusResp.class, metaDataMap); - } - - public TGetOperationStatusResp() { - } - - public TGetOperationStatusResp( - TStatus status) - { - this(); - this.status = status; - } - - /** - * Performs a deep copy on other. - */ - public TGetOperationStatusResp(TGetOperationStatusResp other) { - __isset_bitfield = other.__isset_bitfield; - if (other.isSetStatus()) { - this.status = new TStatus(other.status); - } - if (other.isSetOperationState()) { - this.operationState = other.operationState; - } - if (other.isSetSqlState()) { - this.sqlState = other.sqlState; - } - this.errorCode = other.errorCode; - if (other.isSetErrorMessage()) { - this.errorMessage = other.errorMessage; - } - if (other.isSetTaskStatus()) { - this.taskStatus = other.taskStatus; - } - this.operationStarted = other.operationStarted; - this.operationCompleted = other.operationCompleted; - this.hasResultSet = other.hasResultSet; - if (other.isSetProgressUpdateResponse()) { - this.progressUpdateResponse = other.progressUpdateResponse; - } - } - - public TGetOperationStatusResp deepCopy() { - return new TGetOperationStatusResp(this); - } - - @Override - public void clear() { - this.status = null; - this.operationState = null; - this.sqlState = null; - setErrorCodeIsSet(false); - this.errorCode = 0; - this.errorMessage = null; - this.taskStatus = null; - setOperationStartedIsSet(false); - this.operationStarted = 0; - setOperationCompletedIsSet(false); - this.operationCompleted = 0; - setHasResultSetIsSet(false); - this.hasResultSet = false; - this.progressUpdateResponse = null; - } - - public TStatus getStatus() { - return this.status; - } - - public void setStatus(TStatus status) { - this.status = status; - } - - public void unsetStatus() { - this.status = null; - } - - /** Returns true if field status is set (has been assigned a value) and false otherwise */ - public boolean isSetStatus() { - return this.status != null; - } - - public void setStatusIsSet(boolean value) { - if (!value) { - this.status = null; - } - } - - /** - * - * @see TOperationState - */ - public TOperationState getOperationState() { - return this.operationState; - } - - /** - * - * @see TOperationState - */ - public void setOperationState(TOperationState operationState) { - this.operationState = operationState; - } - - public void unsetOperationState() { - this.operationState = null; - } - - /** Returns true if field operationState is set (has been assigned a value) and false otherwise */ - public boolean isSetOperationState() { - return this.operationState != null; - } - - public void setOperationStateIsSet(boolean value) { - if (!value) { - this.operationState = null; - } - } - - public String getSqlState() { - return this.sqlState; - } - - public void setSqlState(String sqlState) { - this.sqlState = sqlState; - } - - public void unsetSqlState() { - this.sqlState = null; - } - - /** Returns true if field sqlState is set (has been assigned a value) and false otherwise */ - public boolean isSetSqlState() { - return this.sqlState != null; - } - - public void setSqlStateIsSet(boolean value) { - if (!value) { - this.sqlState = null; - } - } - - public int getErrorCode() { - return this.errorCode; - } - - public void setErrorCode(int errorCode) { - this.errorCode = errorCode; - setErrorCodeIsSet(true); - } - - public void unsetErrorCode() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ERRORCODE_ISSET_ID); - } - - /** Returns true if field errorCode is set (has been assigned a value) and false otherwise */ - public boolean isSetErrorCode() { - return EncodingUtils.testBit(__isset_bitfield, __ERRORCODE_ISSET_ID); - } - - public void setErrorCodeIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ERRORCODE_ISSET_ID, value); - } - - public String getErrorMessage() { - return this.errorMessage; - } - - public void setErrorMessage(String errorMessage) { - this.errorMessage = errorMessage; - } - - public void unsetErrorMessage() { - this.errorMessage = null; - } - - /** Returns true if field errorMessage is set (has been assigned a value) and false otherwise */ - public boolean isSetErrorMessage() { - return this.errorMessage != null; - } - - public void setErrorMessageIsSet(boolean value) { - if (!value) { - this.errorMessage = null; - } - } - - public String getTaskStatus() { - return this.taskStatus; - } - - public void setTaskStatus(String taskStatus) { - this.taskStatus = taskStatus; - } - - public void unsetTaskStatus() { - this.taskStatus = null; - } - - /** Returns true if field taskStatus is set (has been assigned a value) and false otherwise */ - public boolean isSetTaskStatus() { - return this.taskStatus != null; - } - - public void setTaskStatusIsSet(boolean value) { - if (!value) { - this.taskStatus = null; - } - } - - public long getOperationStarted() { - return this.operationStarted; - } - - public void setOperationStarted(long operationStarted) { - this.operationStarted = operationStarted; - setOperationStartedIsSet(true); - } - - public void unsetOperationStarted() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __OPERATIONSTARTED_ISSET_ID); - } - - /** Returns true if field operationStarted is set (has been assigned a value) and false otherwise */ - public boolean isSetOperationStarted() { - return EncodingUtils.testBit(__isset_bitfield, __OPERATIONSTARTED_ISSET_ID); - } - - public void setOperationStartedIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __OPERATIONSTARTED_ISSET_ID, value); - } - - public long getOperationCompleted() { - return this.operationCompleted; - } - - public void setOperationCompleted(long operationCompleted) { - this.operationCompleted = operationCompleted; - setOperationCompletedIsSet(true); - } - - public void unsetOperationCompleted() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __OPERATIONCOMPLETED_ISSET_ID); - } - - /** Returns true if field operationCompleted is set (has been assigned a value) and false otherwise */ - public boolean isSetOperationCompleted() { - return EncodingUtils.testBit(__isset_bitfield, __OPERATIONCOMPLETED_ISSET_ID); - } - - public void setOperationCompletedIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __OPERATIONCOMPLETED_ISSET_ID, value); - } - - public boolean isHasResultSet() { - return this.hasResultSet; - } - - public void setHasResultSet(boolean hasResultSet) { - this.hasResultSet = hasResultSet; - setHasResultSetIsSet(true); - } - - public void unsetHasResultSet() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __HASRESULTSET_ISSET_ID); - } - - /** Returns true if field hasResultSet is set (has been assigned a value) and false otherwise */ - public boolean isSetHasResultSet() { - return EncodingUtils.testBit(__isset_bitfield, __HASRESULTSET_ISSET_ID); - } - - public void setHasResultSetIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __HASRESULTSET_ISSET_ID, value); - } - - public TProgressUpdateResp getProgressUpdateResponse() { - return this.progressUpdateResponse; - } - - public void setProgressUpdateResponse(TProgressUpdateResp progressUpdateResponse) { - this.progressUpdateResponse = progressUpdateResponse; - } - - public void unsetProgressUpdateResponse() { - this.progressUpdateResponse = null; - } - - /** Returns true if field progressUpdateResponse is set (has been assigned a value) and false otherwise */ - public boolean isSetProgressUpdateResponse() { - return this.progressUpdateResponse != null; - } - - public void setProgressUpdateResponseIsSet(boolean value) { - if (!value) { - this.progressUpdateResponse = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case STATUS: - if (value == null) { - unsetStatus(); - } else { - setStatus((TStatus)value); - } - break; - - case OPERATION_STATE: - if (value == null) { - unsetOperationState(); - } else { - setOperationState((TOperationState)value); - } - break; - - case SQL_STATE: - if (value == null) { - unsetSqlState(); - } else { - setSqlState((String)value); - } - break; - - case ERROR_CODE: - if (value == null) { - unsetErrorCode(); - } else { - setErrorCode((Integer)value); - } - break; - - case ERROR_MESSAGE: - if (value == null) { - unsetErrorMessage(); - } else { - setErrorMessage((String)value); - } - break; - - case TASK_STATUS: - if (value == null) { - unsetTaskStatus(); - } else { - setTaskStatus((String)value); - } - break; - - case OPERATION_STARTED: - if (value == null) { - unsetOperationStarted(); - } else { - setOperationStarted((Long)value); - } - break; - - case OPERATION_COMPLETED: - if (value == null) { - unsetOperationCompleted(); - } else { - setOperationCompleted((Long)value); - } - break; - - case HAS_RESULT_SET: - if (value == null) { - unsetHasResultSet(); - } else { - setHasResultSet((Boolean)value); - } - break; - - case PROGRESS_UPDATE_RESPONSE: - if (value == null) { - unsetProgressUpdateResponse(); - } else { - setProgressUpdateResponse((TProgressUpdateResp)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case STATUS: - return getStatus(); - - case OPERATION_STATE: - return getOperationState(); - - case SQL_STATE: - return getSqlState(); - - case ERROR_CODE: - return getErrorCode(); - - case ERROR_MESSAGE: - return getErrorMessage(); - - case TASK_STATUS: - return getTaskStatus(); - - case OPERATION_STARTED: - return getOperationStarted(); - - case OPERATION_COMPLETED: - return getOperationCompleted(); - - case HAS_RESULT_SET: - return isHasResultSet(); - - case PROGRESS_UPDATE_RESPONSE: - return getProgressUpdateResponse(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case STATUS: - return isSetStatus(); - case OPERATION_STATE: - return isSetOperationState(); - case SQL_STATE: - return isSetSqlState(); - case ERROR_CODE: - return isSetErrorCode(); - case ERROR_MESSAGE: - return isSetErrorMessage(); - case TASK_STATUS: - return isSetTaskStatus(); - case OPERATION_STARTED: - return isSetOperationStarted(); - case OPERATION_COMPLETED: - return isSetOperationCompleted(); - case HAS_RESULT_SET: - return isSetHasResultSet(); - case PROGRESS_UPDATE_RESPONSE: - return isSetProgressUpdateResponse(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TGetOperationStatusResp) - return this.equals((TGetOperationStatusResp)that); - return false; - } - - public boolean equals(TGetOperationStatusResp that) { - if (that == null) - return false; - - boolean this_present_status = true && this.isSetStatus(); - boolean that_present_status = true && that.isSetStatus(); - if (this_present_status || that_present_status) { - if (!(this_present_status && that_present_status)) - return false; - if (!this.status.equals(that.status)) - return false; - } - - boolean this_present_operationState = true && this.isSetOperationState(); - boolean that_present_operationState = true && that.isSetOperationState(); - if (this_present_operationState || that_present_operationState) { - if (!(this_present_operationState && that_present_operationState)) - return false; - if (!this.operationState.equals(that.operationState)) - return false; - } - - boolean this_present_sqlState = true && this.isSetSqlState(); - boolean that_present_sqlState = true && that.isSetSqlState(); - if (this_present_sqlState || that_present_sqlState) { - if (!(this_present_sqlState && that_present_sqlState)) - return false; - if (!this.sqlState.equals(that.sqlState)) - return false; - } - - boolean this_present_errorCode = true && this.isSetErrorCode(); - boolean that_present_errorCode = true && that.isSetErrorCode(); - if (this_present_errorCode || that_present_errorCode) { - if (!(this_present_errorCode && that_present_errorCode)) - return false; - if (this.errorCode != that.errorCode) - return false; - } - - boolean this_present_errorMessage = true && this.isSetErrorMessage(); - boolean that_present_errorMessage = true && that.isSetErrorMessage(); - if (this_present_errorMessage || that_present_errorMessage) { - if (!(this_present_errorMessage && that_present_errorMessage)) - return false; - if (!this.errorMessage.equals(that.errorMessage)) - return false; - } - - boolean this_present_taskStatus = true && this.isSetTaskStatus(); - boolean that_present_taskStatus = true && that.isSetTaskStatus(); - if (this_present_taskStatus || that_present_taskStatus) { - if (!(this_present_taskStatus && that_present_taskStatus)) - return false; - if (!this.taskStatus.equals(that.taskStatus)) - return false; - } - - boolean this_present_operationStarted = true && this.isSetOperationStarted(); - boolean that_present_operationStarted = true && that.isSetOperationStarted(); - if (this_present_operationStarted || that_present_operationStarted) { - if (!(this_present_operationStarted && that_present_operationStarted)) - return false; - if (this.operationStarted != that.operationStarted) - return false; - } - - boolean this_present_operationCompleted = true && this.isSetOperationCompleted(); - boolean that_present_operationCompleted = true && that.isSetOperationCompleted(); - if (this_present_operationCompleted || that_present_operationCompleted) { - if (!(this_present_operationCompleted && that_present_operationCompleted)) - return false; - if (this.operationCompleted != that.operationCompleted) - return false; - } - - boolean this_present_hasResultSet = true && this.isSetHasResultSet(); - boolean that_present_hasResultSet = true && that.isSetHasResultSet(); - if (this_present_hasResultSet || that_present_hasResultSet) { - if (!(this_present_hasResultSet && that_present_hasResultSet)) - return false; - if (this.hasResultSet != that.hasResultSet) - return false; - } - - boolean this_present_progressUpdateResponse = true && this.isSetProgressUpdateResponse(); - boolean that_present_progressUpdateResponse = true && that.isSetProgressUpdateResponse(); - if (this_present_progressUpdateResponse || that_present_progressUpdateResponse) { - if (!(this_present_progressUpdateResponse && that_present_progressUpdateResponse)) - return false; - if (!this.progressUpdateResponse.equals(that.progressUpdateResponse)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_status = true && (isSetStatus()); - list.add(present_status); - if (present_status) - list.add(status); - - boolean present_operationState = true && (isSetOperationState()); - list.add(present_operationState); - if (present_operationState) - list.add(operationState.getValue()); - - boolean present_sqlState = true && (isSetSqlState()); - list.add(present_sqlState); - if (present_sqlState) - list.add(sqlState); - - boolean present_errorCode = true && (isSetErrorCode()); - list.add(present_errorCode); - if (present_errorCode) - list.add(errorCode); - - boolean present_errorMessage = true && (isSetErrorMessage()); - list.add(present_errorMessage); - if (present_errorMessage) - list.add(errorMessage); - - boolean present_taskStatus = true && (isSetTaskStatus()); - list.add(present_taskStatus); - if (present_taskStatus) - list.add(taskStatus); - - boolean present_operationStarted = true && (isSetOperationStarted()); - list.add(present_operationStarted); - if (present_operationStarted) - list.add(operationStarted); - - boolean present_operationCompleted = true && (isSetOperationCompleted()); - list.add(present_operationCompleted); - if (present_operationCompleted) - list.add(operationCompleted); - - boolean present_hasResultSet = true && (isSetHasResultSet()); - list.add(present_hasResultSet); - if (present_hasResultSet) - list.add(hasResultSet); - - boolean present_progressUpdateResponse = true && (isSetProgressUpdateResponse()); - list.add(present_progressUpdateResponse); - if (present_progressUpdateResponse) - list.add(progressUpdateResponse); - - return list.hashCode(); - } - - @Override - public int compareTo(TGetOperationStatusResp other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetStatus()).compareTo(other.isSetStatus()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetStatus()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.status, other.status); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetOperationState()).compareTo(other.isSetOperationState()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetOperationState()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.operationState, other.operationState); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetSqlState()).compareTo(other.isSetSqlState()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSqlState()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.sqlState, other.sqlState); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetErrorCode()).compareTo(other.isSetErrorCode()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetErrorCode()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.errorCode, other.errorCode); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetErrorMessage()).compareTo(other.isSetErrorMessage()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetErrorMessage()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.errorMessage, other.errorMessage); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetTaskStatus()).compareTo(other.isSetTaskStatus()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetTaskStatus()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.taskStatus, other.taskStatus); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetOperationStarted()).compareTo(other.isSetOperationStarted()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetOperationStarted()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.operationStarted, other.operationStarted); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetOperationCompleted()).compareTo(other.isSetOperationCompleted()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetOperationCompleted()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.operationCompleted, other.operationCompleted); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetHasResultSet()).compareTo(other.isSetHasResultSet()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetHasResultSet()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.hasResultSet, other.hasResultSet); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetProgressUpdateResponse()).compareTo(other.isSetProgressUpdateResponse()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetProgressUpdateResponse()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.progressUpdateResponse, other.progressUpdateResponse); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TGetOperationStatusResp("); - boolean first = true; - - sb.append("status:"); - if (this.status == null) { - sb.append("null"); - } else { - sb.append(this.status); - } - first = false; - if (isSetOperationState()) { - if (!first) sb.append(", "); - sb.append("operationState:"); - if (this.operationState == null) { - sb.append("null"); - } else { - sb.append(this.operationState); - } - first = false; - } - if (isSetSqlState()) { - if (!first) sb.append(", "); - sb.append("sqlState:"); - if (this.sqlState == null) { - sb.append("null"); - } else { - sb.append(this.sqlState); - } - first = false; - } - if (isSetErrorCode()) { - if (!first) sb.append(", "); - sb.append("errorCode:"); - sb.append(this.errorCode); - first = false; - } - if (isSetErrorMessage()) { - if (!first) sb.append(", "); - sb.append("errorMessage:"); - if (this.errorMessage == null) { - sb.append("null"); - } else { - sb.append(this.errorMessage); - } - first = false; - } - if (isSetTaskStatus()) { - if (!first) sb.append(", "); - sb.append("taskStatus:"); - if (this.taskStatus == null) { - sb.append("null"); - } else { - sb.append(this.taskStatus); - } - first = false; - } - if (isSetOperationStarted()) { - if (!first) sb.append(", "); - sb.append("operationStarted:"); - sb.append(this.operationStarted); - first = false; - } - if (isSetOperationCompleted()) { - if (!first) sb.append(", "); - sb.append("operationCompleted:"); - sb.append(this.operationCompleted); - first = false; - } - if (isSetHasResultSet()) { - if (!first) sb.append(", "); - sb.append("hasResultSet:"); - sb.append(this.hasResultSet); - first = false; - } - if (isSetProgressUpdateResponse()) { - if (!first) sb.append(", "); - sb.append("progressUpdateResponse:"); - if (this.progressUpdateResponse == null) { - sb.append("null"); - } else { - sb.append(this.progressUpdateResponse); - } - first = false; - } - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetStatus()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'status' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (status != null) { - status.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. - __isset_bitfield = 0; - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TGetOperationStatusRespStandardSchemeFactory implements SchemeFactory { - public TGetOperationStatusRespStandardScheme getScheme() { - return new TGetOperationStatusRespStandardScheme(); - } - } - - private static class TGetOperationStatusRespStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TGetOperationStatusResp struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // STATUS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // OPERATION_STATE - if (schemeField.type == org.apache.thrift.protocol.TType.I32) { - struct.operationState = org.apache.hive.service.rpc.thrift.TOperationState.findByValue(iprot.readI32()); - struct.setOperationStateIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 3: // SQL_STATE - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.sqlState = iprot.readString(); - struct.setSqlStateIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 4: // ERROR_CODE - if (schemeField.type == org.apache.thrift.protocol.TType.I32) { - struct.errorCode = iprot.readI32(); - struct.setErrorCodeIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 5: // ERROR_MESSAGE - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.errorMessage = iprot.readString(); - struct.setErrorMessageIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 6: // TASK_STATUS - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.taskStatus = iprot.readString(); - struct.setTaskStatusIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 7: // OPERATION_STARTED - if (schemeField.type == org.apache.thrift.protocol.TType.I64) { - struct.operationStarted = iprot.readI64(); - struct.setOperationStartedIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 8: // OPERATION_COMPLETED - if (schemeField.type == org.apache.thrift.protocol.TType.I64) { - struct.operationCompleted = iprot.readI64(); - struct.setOperationCompletedIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 9: // HAS_RESULT_SET - if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { - struct.hasResultSet = iprot.readBool(); - struct.setHasResultSetIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 10: // PROGRESS_UPDATE_RESPONSE - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.progressUpdateResponse = new TProgressUpdateResp(); - struct.progressUpdateResponse.read(iprot); - struct.setProgressUpdateResponseIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TGetOperationStatusResp struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.status != null) { - oprot.writeFieldBegin(STATUS_FIELD_DESC); - struct.status.write(oprot); - oprot.writeFieldEnd(); - } - if (struct.operationState != null) { - if (struct.isSetOperationState()) { - oprot.writeFieldBegin(OPERATION_STATE_FIELD_DESC); - oprot.writeI32(struct.operationState.getValue()); - oprot.writeFieldEnd(); - } - } - if (struct.sqlState != null) { - if (struct.isSetSqlState()) { - oprot.writeFieldBegin(SQL_STATE_FIELD_DESC); - oprot.writeString(struct.sqlState); - oprot.writeFieldEnd(); - } - } - if (struct.isSetErrorCode()) { - oprot.writeFieldBegin(ERROR_CODE_FIELD_DESC); - oprot.writeI32(struct.errorCode); - oprot.writeFieldEnd(); - } - if (struct.errorMessage != null) { - if (struct.isSetErrorMessage()) { - oprot.writeFieldBegin(ERROR_MESSAGE_FIELD_DESC); - oprot.writeString(struct.errorMessage); - oprot.writeFieldEnd(); - } - } - if (struct.taskStatus != null) { - if (struct.isSetTaskStatus()) { - oprot.writeFieldBegin(TASK_STATUS_FIELD_DESC); - oprot.writeString(struct.taskStatus); - oprot.writeFieldEnd(); - } - } - if (struct.isSetOperationStarted()) { - oprot.writeFieldBegin(OPERATION_STARTED_FIELD_DESC); - oprot.writeI64(struct.operationStarted); - oprot.writeFieldEnd(); - } - if (struct.isSetOperationCompleted()) { - oprot.writeFieldBegin(OPERATION_COMPLETED_FIELD_DESC); - oprot.writeI64(struct.operationCompleted); - oprot.writeFieldEnd(); - } - if (struct.isSetHasResultSet()) { - oprot.writeFieldBegin(HAS_RESULT_SET_FIELD_DESC); - oprot.writeBool(struct.hasResultSet); - oprot.writeFieldEnd(); - } - if (struct.progressUpdateResponse != null) { - if (struct.isSetProgressUpdateResponse()) { - oprot.writeFieldBegin(PROGRESS_UPDATE_RESPONSE_FIELD_DESC); - struct.progressUpdateResponse.write(oprot); - oprot.writeFieldEnd(); - } - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TGetOperationStatusRespTupleSchemeFactory implements SchemeFactory { - public TGetOperationStatusRespTupleScheme getScheme() { - return new TGetOperationStatusRespTupleScheme(); - } - } - - private static class TGetOperationStatusRespTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TGetOperationStatusResp struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.status.write(oprot); - BitSet optionals = new BitSet(); - if (struct.isSetOperationState()) { - optionals.set(0); - } - if (struct.isSetSqlState()) { - optionals.set(1); - } - if (struct.isSetErrorCode()) { - optionals.set(2); - } - if (struct.isSetErrorMessage()) { - optionals.set(3); - } - if (struct.isSetTaskStatus()) { - optionals.set(4); - } - if (struct.isSetOperationStarted()) { - optionals.set(5); - } - if (struct.isSetOperationCompleted()) { - optionals.set(6); - } - if (struct.isSetHasResultSet()) { - optionals.set(7); - } - if (struct.isSetProgressUpdateResponse()) { - optionals.set(8); - } - oprot.writeBitSet(optionals, 9); - if (struct.isSetOperationState()) { - oprot.writeI32(struct.operationState.getValue()); - } - if (struct.isSetSqlState()) { - oprot.writeString(struct.sqlState); - } - if (struct.isSetErrorCode()) { - oprot.writeI32(struct.errorCode); - } - if (struct.isSetErrorMessage()) { - oprot.writeString(struct.errorMessage); - } - if (struct.isSetTaskStatus()) { - oprot.writeString(struct.taskStatus); - } - if (struct.isSetOperationStarted()) { - oprot.writeI64(struct.operationStarted); - } - if (struct.isSetOperationCompleted()) { - oprot.writeI64(struct.operationCompleted); - } - if (struct.isSetHasResultSet()) { - oprot.writeBool(struct.hasResultSet); - } - if (struct.isSetProgressUpdateResponse()) { - struct.progressUpdateResponse.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TGetOperationStatusResp struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - BitSet incoming = iprot.readBitSet(9); - if (incoming.get(0)) { - struct.operationState = org.apache.hive.service.rpc.thrift.TOperationState.findByValue(iprot.readI32()); - struct.setOperationStateIsSet(true); - } - if (incoming.get(1)) { - struct.sqlState = iprot.readString(); - struct.setSqlStateIsSet(true); - } - if (incoming.get(2)) { - struct.errorCode = iprot.readI32(); - struct.setErrorCodeIsSet(true); - } - if (incoming.get(3)) { - struct.errorMessage = iprot.readString(); - struct.setErrorMessageIsSet(true); - } - if (incoming.get(4)) { - struct.taskStatus = iprot.readString(); - struct.setTaskStatusIsSet(true); - } - if (incoming.get(5)) { - struct.operationStarted = iprot.readI64(); - struct.setOperationStartedIsSet(true); - } - if (incoming.get(6)) { - struct.operationCompleted = iprot.readI64(); - struct.setOperationCompletedIsSet(true); - } - if (incoming.get(7)) { - struct.hasResultSet = iprot.readBool(); - struct.setHasResultSetIsSet(true); - } - if (incoming.get(8)) { - struct.progressUpdateResponse = new TProgressUpdateResp(); - struct.progressUpdateResponse.read(iprot); - struct.setProgressUpdateResponseIsSet(true); - } - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetPrimaryKeysReq.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetPrimaryKeysReq.java deleted file mode 100644 index 1bec9b51c72d8..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetPrimaryKeysReq.java +++ /dev/null @@ -1,716 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TGetPrimaryKeysReq implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetPrimaryKeysReq"); - - private static final org.apache.thrift.protocol.TField SESSION_HANDLE_FIELD_DESC = new org.apache.thrift.protocol.TField("sessionHandle", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField CATALOG_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catalogName", org.apache.thrift.protocol.TType.STRING, (short)2); - private static final org.apache.thrift.protocol.TField SCHEMA_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("schemaName", org.apache.thrift.protocol.TType.STRING, (short)3); - private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)4); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TGetPrimaryKeysReqStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TGetPrimaryKeysReqTupleSchemeFactory()); - } - - private TSessionHandle sessionHandle; // required - private String catalogName; // optional - private String schemaName; // optional - private String tableName; // optional - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SESSION_HANDLE((short)1, "sessionHandle"), - CATALOG_NAME((short)2, "catalogName"), - SCHEMA_NAME((short)3, "schemaName"), - TABLE_NAME((short)4, "tableName"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // SESSION_HANDLE - return SESSION_HANDLE; - case 2: // CATALOG_NAME - return CATALOG_NAME; - case 3: // SCHEMA_NAME - return SCHEMA_NAME; - case 4: // TABLE_NAME - return TABLE_NAME; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private static final _Fields optionals[] = {_Fields.CATALOG_NAME,_Fields.SCHEMA_NAME,_Fields.TABLE_NAME}; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SESSION_HANDLE, new org.apache.thrift.meta_data.FieldMetaData("sessionHandle", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TSessionHandle.class))); - tmpMap.put(_Fields.CATALOG_NAME, new org.apache.thrift.meta_data.FieldMetaData("catalogName", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "TIdentifier"))); - tmpMap.put(_Fields.SCHEMA_NAME, new org.apache.thrift.meta_data.FieldMetaData("schemaName", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "TIdentifier"))); - tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "TIdentifier"))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TGetPrimaryKeysReq.class, metaDataMap); - } - - public TGetPrimaryKeysReq() { - } - - public TGetPrimaryKeysReq( - TSessionHandle sessionHandle) - { - this(); - this.sessionHandle = sessionHandle; - } - - /** - * Performs a deep copy on other. - */ - public TGetPrimaryKeysReq(TGetPrimaryKeysReq other) { - if (other.isSetSessionHandle()) { - this.sessionHandle = new TSessionHandle(other.sessionHandle); - } - if (other.isSetCatalogName()) { - this.catalogName = other.catalogName; - } - if (other.isSetSchemaName()) { - this.schemaName = other.schemaName; - } - if (other.isSetTableName()) { - this.tableName = other.tableName; - } - } - - public TGetPrimaryKeysReq deepCopy() { - return new TGetPrimaryKeysReq(this); - } - - @Override - public void clear() { - this.sessionHandle = null; - this.catalogName = null; - this.schemaName = null; - this.tableName = null; - } - - public TSessionHandle getSessionHandle() { - return this.sessionHandle; - } - - public void setSessionHandle(TSessionHandle sessionHandle) { - this.sessionHandle = sessionHandle; - } - - public void unsetSessionHandle() { - this.sessionHandle = null; - } - - /** Returns true if field sessionHandle is set (has been assigned a value) and false otherwise */ - public boolean isSetSessionHandle() { - return this.sessionHandle != null; - } - - public void setSessionHandleIsSet(boolean value) { - if (!value) { - this.sessionHandle = null; - } - } - - public String getCatalogName() { - return this.catalogName; - } - - public void setCatalogName(String catalogName) { - this.catalogName = catalogName; - } - - public void unsetCatalogName() { - this.catalogName = null; - } - - /** Returns true if field catalogName is set (has been assigned a value) and false otherwise */ - public boolean isSetCatalogName() { - return this.catalogName != null; - } - - public void setCatalogNameIsSet(boolean value) { - if (!value) { - this.catalogName = null; - } - } - - public String getSchemaName() { - return this.schemaName; - } - - public void setSchemaName(String schemaName) { - this.schemaName = schemaName; - } - - public void unsetSchemaName() { - this.schemaName = null; - } - - /** Returns true if field schemaName is set (has been assigned a value) and false otherwise */ - public boolean isSetSchemaName() { - return this.schemaName != null; - } - - public void setSchemaNameIsSet(boolean value) { - if (!value) { - this.schemaName = null; - } - } - - public String getTableName() { - return this.tableName; - } - - public void setTableName(String tableName) { - this.tableName = tableName; - } - - public void unsetTableName() { - this.tableName = null; - } - - /** Returns true if field tableName is set (has been assigned a value) and false otherwise */ - public boolean isSetTableName() { - return this.tableName != null; - } - - public void setTableNameIsSet(boolean value) { - if (!value) { - this.tableName = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case SESSION_HANDLE: - if (value == null) { - unsetSessionHandle(); - } else { - setSessionHandle((TSessionHandle)value); - } - break; - - case CATALOG_NAME: - if (value == null) { - unsetCatalogName(); - } else { - setCatalogName((String)value); - } - break; - - case SCHEMA_NAME: - if (value == null) { - unsetSchemaName(); - } else { - setSchemaName((String)value); - } - break; - - case TABLE_NAME: - if (value == null) { - unsetTableName(); - } else { - setTableName((String)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case SESSION_HANDLE: - return getSessionHandle(); - - case CATALOG_NAME: - return getCatalogName(); - - case SCHEMA_NAME: - return getSchemaName(); - - case TABLE_NAME: - return getTableName(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case SESSION_HANDLE: - return isSetSessionHandle(); - case CATALOG_NAME: - return isSetCatalogName(); - case SCHEMA_NAME: - return isSetSchemaName(); - case TABLE_NAME: - return isSetTableName(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TGetPrimaryKeysReq) - return this.equals((TGetPrimaryKeysReq)that); - return false; - } - - public boolean equals(TGetPrimaryKeysReq that) { - if (that == null) - return false; - - boolean this_present_sessionHandle = true && this.isSetSessionHandle(); - boolean that_present_sessionHandle = true && that.isSetSessionHandle(); - if (this_present_sessionHandle || that_present_sessionHandle) { - if (!(this_present_sessionHandle && that_present_sessionHandle)) - return false; - if (!this.sessionHandle.equals(that.sessionHandle)) - return false; - } - - boolean this_present_catalogName = true && this.isSetCatalogName(); - boolean that_present_catalogName = true && that.isSetCatalogName(); - if (this_present_catalogName || that_present_catalogName) { - if (!(this_present_catalogName && that_present_catalogName)) - return false; - if (!this.catalogName.equals(that.catalogName)) - return false; - } - - boolean this_present_schemaName = true && this.isSetSchemaName(); - boolean that_present_schemaName = true && that.isSetSchemaName(); - if (this_present_schemaName || that_present_schemaName) { - if (!(this_present_schemaName && that_present_schemaName)) - return false; - if (!this.schemaName.equals(that.schemaName)) - return false; - } - - boolean this_present_tableName = true && this.isSetTableName(); - boolean that_present_tableName = true && that.isSetTableName(); - if (this_present_tableName || that_present_tableName) { - if (!(this_present_tableName && that_present_tableName)) - return false; - if (!this.tableName.equals(that.tableName)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_sessionHandle = true && (isSetSessionHandle()); - list.add(present_sessionHandle); - if (present_sessionHandle) - list.add(sessionHandle); - - boolean present_catalogName = true && (isSetCatalogName()); - list.add(present_catalogName); - if (present_catalogName) - list.add(catalogName); - - boolean present_schemaName = true && (isSetSchemaName()); - list.add(present_schemaName); - if (present_schemaName) - list.add(schemaName); - - boolean present_tableName = true && (isSetTableName()); - list.add(present_tableName); - if (present_tableName) - list.add(tableName); - - return list.hashCode(); - } - - @Override - public int compareTo(TGetPrimaryKeysReq other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetSessionHandle()).compareTo(other.isSetSessionHandle()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSessionHandle()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.sessionHandle, other.sessionHandle); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetCatalogName()).compareTo(other.isSetCatalogName()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetCatalogName()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catalogName, other.catalogName); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetSchemaName()).compareTo(other.isSetSchemaName()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSchemaName()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.schemaName, other.schemaName); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetTableName()).compareTo(other.isSetTableName()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetTableName()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tableName, other.tableName); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TGetPrimaryKeysReq("); - boolean first = true; - - sb.append("sessionHandle:"); - if (this.sessionHandle == null) { - sb.append("null"); - } else { - sb.append(this.sessionHandle); - } - first = false; - if (isSetCatalogName()) { - if (!first) sb.append(", "); - sb.append("catalogName:"); - if (this.catalogName == null) { - sb.append("null"); - } else { - sb.append(this.catalogName); - } - first = false; - } - if (isSetSchemaName()) { - if (!first) sb.append(", "); - sb.append("schemaName:"); - if (this.schemaName == null) { - sb.append("null"); - } else { - sb.append(this.schemaName); - } - first = false; - } - if (isSetTableName()) { - if (!first) sb.append(", "); - sb.append("tableName:"); - if (this.tableName == null) { - sb.append("null"); - } else { - sb.append(this.tableName); - } - first = false; - } - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetSessionHandle()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'sessionHandle' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (sessionHandle != null) { - sessionHandle.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TGetPrimaryKeysReqStandardSchemeFactory implements SchemeFactory { - public TGetPrimaryKeysReqStandardScheme getScheme() { - return new TGetPrimaryKeysReqStandardScheme(); - } - } - - private static class TGetPrimaryKeysReqStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TGetPrimaryKeysReq struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // SESSION_HANDLE - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.sessionHandle = new TSessionHandle(); - struct.sessionHandle.read(iprot); - struct.setSessionHandleIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // CATALOG_NAME - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.catalogName = iprot.readString(); - struct.setCatalogNameIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 3: // SCHEMA_NAME - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.schemaName = iprot.readString(); - struct.setSchemaNameIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 4: // TABLE_NAME - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.tableName = iprot.readString(); - struct.setTableNameIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TGetPrimaryKeysReq struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.sessionHandle != null) { - oprot.writeFieldBegin(SESSION_HANDLE_FIELD_DESC); - struct.sessionHandle.write(oprot); - oprot.writeFieldEnd(); - } - if (struct.catalogName != null) { - if (struct.isSetCatalogName()) { - oprot.writeFieldBegin(CATALOG_NAME_FIELD_DESC); - oprot.writeString(struct.catalogName); - oprot.writeFieldEnd(); - } - } - if (struct.schemaName != null) { - if (struct.isSetSchemaName()) { - oprot.writeFieldBegin(SCHEMA_NAME_FIELD_DESC); - oprot.writeString(struct.schemaName); - oprot.writeFieldEnd(); - } - } - if (struct.tableName != null) { - if (struct.isSetTableName()) { - oprot.writeFieldBegin(TABLE_NAME_FIELD_DESC); - oprot.writeString(struct.tableName); - oprot.writeFieldEnd(); - } - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TGetPrimaryKeysReqTupleSchemeFactory implements SchemeFactory { - public TGetPrimaryKeysReqTupleScheme getScheme() { - return new TGetPrimaryKeysReqTupleScheme(); - } - } - - private static class TGetPrimaryKeysReqTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TGetPrimaryKeysReq struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.sessionHandle.write(oprot); - BitSet optionals = new BitSet(); - if (struct.isSetCatalogName()) { - optionals.set(0); - } - if (struct.isSetSchemaName()) { - optionals.set(1); - } - if (struct.isSetTableName()) { - optionals.set(2); - } - oprot.writeBitSet(optionals, 3); - if (struct.isSetCatalogName()) { - oprot.writeString(struct.catalogName); - } - if (struct.isSetSchemaName()) { - oprot.writeString(struct.schemaName); - } - if (struct.isSetTableName()) { - oprot.writeString(struct.tableName); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TGetPrimaryKeysReq struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.sessionHandle = new TSessionHandle(); - struct.sessionHandle.read(iprot); - struct.setSessionHandleIsSet(true); - BitSet incoming = iprot.readBitSet(3); - if (incoming.get(0)) { - struct.catalogName = iprot.readString(); - struct.setCatalogNameIsSet(true); - } - if (incoming.get(1)) { - struct.schemaName = iprot.readString(); - struct.setSchemaNameIsSet(true); - } - if (incoming.get(2)) { - struct.tableName = iprot.readString(); - struct.setTableNameIsSet(true); - } - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetPrimaryKeysResp.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetPrimaryKeysResp.java deleted file mode 100644 index 72d9507fe1031..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetPrimaryKeysResp.java +++ /dev/null @@ -1,509 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TGetPrimaryKeysResp implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetPrimaryKeysResp"); - - private static final org.apache.thrift.protocol.TField STATUS_FIELD_DESC = new org.apache.thrift.protocol.TField("status", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField OPERATION_HANDLE_FIELD_DESC = new org.apache.thrift.protocol.TField("operationHandle", org.apache.thrift.protocol.TType.STRUCT, (short)2); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TGetPrimaryKeysRespStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TGetPrimaryKeysRespTupleSchemeFactory()); - } - - private TStatus status; // required - private TOperationHandle operationHandle; // optional - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - STATUS((short)1, "status"), - OPERATION_HANDLE((short)2, "operationHandle"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // STATUS - return STATUS; - case 2: // OPERATION_HANDLE - return OPERATION_HANDLE; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private static final _Fields optionals[] = {_Fields.OPERATION_HANDLE}; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.STATUS, new org.apache.thrift.meta_data.FieldMetaData("status", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TStatus.class))); - tmpMap.put(_Fields.OPERATION_HANDLE, new org.apache.thrift.meta_data.FieldMetaData("operationHandle", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TOperationHandle.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TGetPrimaryKeysResp.class, metaDataMap); - } - - public TGetPrimaryKeysResp() { - } - - public TGetPrimaryKeysResp( - TStatus status) - { - this(); - this.status = status; - } - - /** - * Performs a deep copy on other. - */ - public TGetPrimaryKeysResp(TGetPrimaryKeysResp other) { - if (other.isSetStatus()) { - this.status = new TStatus(other.status); - } - if (other.isSetOperationHandle()) { - this.operationHandle = new TOperationHandle(other.operationHandle); - } - } - - public TGetPrimaryKeysResp deepCopy() { - return new TGetPrimaryKeysResp(this); - } - - @Override - public void clear() { - this.status = null; - this.operationHandle = null; - } - - public TStatus getStatus() { - return this.status; - } - - public void setStatus(TStatus status) { - this.status = status; - } - - public void unsetStatus() { - this.status = null; - } - - /** Returns true if field status is set (has been assigned a value) and false otherwise */ - public boolean isSetStatus() { - return this.status != null; - } - - public void setStatusIsSet(boolean value) { - if (!value) { - this.status = null; - } - } - - public TOperationHandle getOperationHandle() { - return this.operationHandle; - } - - public void setOperationHandle(TOperationHandle operationHandle) { - this.operationHandle = operationHandle; - } - - public void unsetOperationHandle() { - this.operationHandle = null; - } - - /** Returns true if field operationHandle is set (has been assigned a value) and false otherwise */ - public boolean isSetOperationHandle() { - return this.operationHandle != null; - } - - public void setOperationHandleIsSet(boolean value) { - if (!value) { - this.operationHandle = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case STATUS: - if (value == null) { - unsetStatus(); - } else { - setStatus((TStatus)value); - } - break; - - case OPERATION_HANDLE: - if (value == null) { - unsetOperationHandle(); - } else { - setOperationHandle((TOperationHandle)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case STATUS: - return getStatus(); - - case OPERATION_HANDLE: - return getOperationHandle(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case STATUS: - return isSetStatus(); - case OPERATION_HANDLE: - return isSetOperationHandle(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TGetPrimaryKeysResp) - return this.equals((TGetPrimaryKeysResp)that); - return false; - } - - public boolean equals(TGetPrimaryKeysResp that) { - if (that == null) - return false; - - boolean this_present_status = true && this.isSetStatus(); - boolean that_present_status = true && that.isSetStatus(); - if (this_present_status || that_present_status) { - if (!(this_present_status && that_present_status)) - return false; - if (!this.status.equals(that.status)) - return false; - } - - boolean this_present_operationHandle = true && this.isSetOperationHandle(); - boolean that_present_operationHandle = true && that.isSetOperationHandle(); - if (this_present_operationHandle || that_present_operationHandle) { - if (!(this_present_operationHandle && that_present_operationHandle)) - return false; - if (!this.operationHandle.equals(that.operationHandle)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_status = true && (isSetStatus()); - list.add(present_status); - if (present_status) - list.add(status); - - boolean present_operationHandle = true && (isSetOperationHandle()); - list.add(present_operationHandle); - if (present_operationHandle) - list.add(operationHandle); - - return list.hashCode(); - } - - @Override - public int compareTo(TGetPrimaryKeysResp other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetStatus()).compareTo(other.isSetStatus()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetStatus()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.status, other.status); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetOperationHandle()).compareTo(other.isSetOperationHandle()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetOperationHandle()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.operationHandle, other.operationHandle); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TGetPrimaryKeysResp("); - boolean first = true; - - sb.append("status:"); - if (this.status == null) { - sb.append("null"); - } else { - sb.append(this.status); - } - first = false; - if (isSetOperationHandle()) { - if (!first) sb.append(", "); - sb.append("operationHandle:"); - if (this.operationHandle == null) { - sb.append("null"); - } else { - sb.append(this.operationHandle); - } - first = false; - } - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetStatus()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'status' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (status != null) { - status.validate(); - } - if (operationHandle != null) { - operationHandle.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TGetPrimaryKeysRespStandardSchemeFactory implements SchemeFactory { - public TGetPrimaryKeysRespStandardScheme getScheme() { - return new TGetPrimaryKeysRespStandardScheme(); - } - } - - private static class TGetPrimaryKeysRespStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TGetPrimaryKeysResp struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // STATUS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // OPERATION_HANDLE - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.operationHandle = new TOperationHandle(); - struct.operationHandle.read(iprot); - struct.setOperationHandleIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TGetPrimaryKeysResp struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.status != null) { - oprot.writeFieldBegin(STATUS_FIELD_DESC); - struct.status.write(oprot); - oprot.writeFieldEnd(); - } - if (struct.operationHandle != null) { - if (struct.isSetOperationHandle()) { - oprot.writeFieldBegin(OPERATION_HANDLE_FIELD_DESC); - struct.operationHandle.write(oprot); - oprot.writeFieldEnd(); - } - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TGetPrimaryKeysRespTupleSchemeFactory implements SchemeFactory { - public TGetPrimaryKeysRespTupleScheme getScheme() { - return new TGetPrimaryKeysRespTupleScheme(); - } - } - - private static class TGetPrimaryKeysRespTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TGetPrimaryKeysResp struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.status.write(oprot); - BitSet optionals = new BitSet(); - if (struct.isSetOperationHandle()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetOperationHandle()) { - struct.operationHandle.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TGetPrimaryKeysResp struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.operationHandle = new TOperationHandle(); - struct.operationHandle.read(iprot); - struct.setOperationHandleIsSet(true); - } - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetResultSetMetadataReq.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetResultSetMetadataReq.java deleted file mode 100644 index b94d827de264d..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetResultSetMetadataReq.java +++ /dev/null @@ -1,394 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TGetResultSetMetadataReq implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetResultSetMetadataReq"); - - private static final org.apache.thrift.protocol.TField OPERATION_HANDLE_FIELD_DESC = new org.apache.thrift.protocol.TField("operationHandle", org.apache.thrift.protocol.TType.STRUCT, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TGetResultSetMetadataReqStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TGetResultSetMetadataReqTupleSchemeFactory()); - } - - private TOperationHandle operationHandle; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - OPERATION_HANDLE((short)1, "operationHandle"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // OPERATION_HANDLE - return OPERATION_HANDLE; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.OPERATION_HANDLE, new org.apache.thrift.meta_data.FieldMetaData("operationHandle", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TOperationHandle.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TGetResultSetMetadataReq.class, metaDataMap); - } - - public TGetResultSetMetadataReq() { - } - - public TGetResultSetMetadataReq( - TOperationHandle operationHandle) - { - this(); - this.operationHandle = operationHandle; - } - - /** - * Performs a deep copy on other. - */ - public TGetResultSetMetadataReq(TGetResultSetMetadataReq other) { - if (other.isSetOperationHandle()) { - this.operationHandle = new TOperationHandle(other.operationHandle); - } - } - - public TGetResultSetMetadataReq deepCopy() { - return new TGetResultSetMetadataReq(this); - } - - @Override - public void clear() { - this.operationHandle = null; - } - - public TOperationHandle getOperationHandle() { - return this.operationHandle; - } - - public void setOperationHandle(TOperationHandle operationHandle) { - this.operationHandle = operationHandle; - } - - public void unsetOperationHandle() { - this.operationHandle = null; - } - - /** Returns true if field operationHandle is set (has been assigned a value) and false otherwise */ - public boolean isSetOperationHandle() { - return this.operationHandle != null; - } - - public void setOperationHandleIsSet(boolean value) { - if (!value) { - this.operationHandle = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case OPERATION_HANDLE: - if (value == null) { - unsetOperationHandle(); - } else { - setOperationHandle((TOperationHandle)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case OPERATION_HANDLE: - return getOperationHandle(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case OPERATION_HANDLE: - return isSetOperationHandle(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TGetResultSetMetadataReq) - return this.equals((TGetResultSetMetadataReq)that); - return false; - } - - public boolean equals(TGetResultSetMetadataReq that) { - if (that == null) - return false; - - boolean this_present_operationHandle = true && this.isSetOperationHandle(); - boolean that_present_operationHandle = true && that.isSetOperationHandle(); - if (this_present_operationHandle || that_present_operationHandle) { - if (!(this_present_operationHandle && that_present_operationHandle)) - return false; - if (!this.operationHandle.equals(that.operationHandle)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_operationHandle = true && (isSetOperationHandle()); - list.add(present_operationHandle); - if (present_operationHandle) - list.add(operationHandle); - - return list.hashCode(); - } - - @Override - public int compareTo(TGetResultSetMetadataReq other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetOperationHandle()).compareTo(other.isSetOperationHandle()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetOperationHandle()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.operationHandle, other.operationHandle); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TGetResultSetMetadataReq("); - boolean first = true; - - sb.append("operationHandle:"); - if (this.operationHandle == null) { - sb.append("null"); - } else { - sb.append(this.operationHandle); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetOperationHandle()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'operationHandle' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (operationHandle != null) { - operationHandle.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TGetResultSetMetadataReqStandardSchemeFactory implements SchemeFactory { - public TGetResultSetMetadataReqStandardScheme getScheme() { - return new TGetResultSetMetadataReqStandardScheme(); - } - } - - private static class TGetResultSetMetadataReqStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TGetResultSetMetadataReq struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // OPERATION_HANDLE - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.operationHandle = new TOperationHandle(); - struct.operationHandle.read(iprot); - struct.setOperationHandleIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TGetResultSetMetadataReq struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.operationHandle != null) { - oprot.writeFieldBegin(OPERATION_HANDLE_FIELD_DESC); - struct.operationHandle.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TGetResultSetMetadataReqTupleSchemeFactory implements SchemeFactory { - public TGetResultSetMetadataReqTupleScheme getScheme() { - return new TGetResultSetMetadataReqTupleScheme(); - } - } - - private static class TGetResultSetMetadataReqTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TGetResultSetMetadataReq struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.operationHandle.write(oprot); - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TGetResultSetMetadataReq struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.operationHandle = new TOperationHandle(); - struct.operationHandle.read(iprot); - struct.setOperationHandleIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetResultSetMetadataResp.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetResultSetMetadataResp.java deleted file mode 100644 index ae2021ebd5a10..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetResultSetMetadataResp.java +++ /dev/null @@ -1,509 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TGetResultSetMetadataResp implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetResultSetMetadataResp"); - - private static final org.apache.thrift.protocol.TField STATUS_FIELD_DESC = new org.apache.thrift.protocol.TField("status", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField SCHEMA_FIELD_DESC = new org.apache.thrift.protocol.TField("schema", org.apache.thrift.protocol.TType.STRUCT, (short)2); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TGetResultSetMetadataRespStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TGetResultSetMetadataRespTupleSchemeFactory()); - } - - private TStatus status; // required - private TTableSchema schema; // optional - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - STATUS((short)1, "status"), - SCHEMA((short)2, "schema"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // STATUS - return STATUS; - case 2: // SCHEMA - return SCHEMA; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private static final _Fields optionals[] = {_Fields.SCHEMA}; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.STATUS, new org.apache.thrift.meta_data.FieldMetaData("status", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TStatus.class))); - tmpMap.put(_Fields.SCHEMA, new org.apache.thrift.meta_data.FieldMetaData("schema", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TTableSchema.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TGetResultSetMetadataResp.class, metaDataMap); - } - - public TGetResultSetMetadataResp() { - } - - public TGetResultSetMetadataResp( - TStatus status) - { - this(); - this.status = status; - } - - /** - * Performs a deep copy on other. - */ - public TGetResultSetMetadataResp(TGetResultSetMetadataResp other) { - if (other.isSetStatus()) { - this.status = new TStatus(other.status); - } - if (other.isSetSchema()) { - this.schema = new TTableSchema(other.schema); - } - } - - public TGetResultSetMetadataResp deepCopy() { - return new TGetResultSetMetadataResp(this); - } - - @Override - public void clear() { - this.status = null; - this.schema = null; - } - - public TStatus getStatus() { - return this.status; - } - - public void setStatus(TStatus status) { - this.status = status; - } - - public void unsetStatus() { - this.status = null; - } - - /** Returns true if field status is set (has been assigned a value) and false otherwise */ - public boolean isSetStatus() { - return this.status != null; - } - - public void setStatusIsSet(boolean value) { - if (!value) { - this.status = null; - } - } - - public TTableSchema getSchema() { - return this.schema; - } - - public void setSchema(TTableSchema schema) { - this.schema = schema; - } - - public void unsetSchema() { - this.schema = null; - } - - /** Returns true if field schema is set (has been assigned a value) and false otherwise */ - public boolean isSetSchema() { - return this.schema != null; - } - - public void setSchemaIsSet(boolean value) { - if (!value) { - this.schema = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case STATUS: - if (value == null) { - unsetStatus(); - } else { - setStatus((TStatus)value); - } - break; - - case SCHEMA: - if (value == null) { - unsetSchema(); - } else { - setSchema((TTableSchema)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case STATUS: - return getStatus(); - - case SCHEMA: - return getSchema(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case STATUS: - return isSetStatus(); - case SCHEMA: - return isSetSchema(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TGetResultSetMetadataResp) - return this.equals((TGetResultSetMetadataResp)that); - return false; - } - - public boolean equals(TGetResultSetMetadataResp that) { - if (that == null) - return false; - - boolean this_present_status = true && this.isSetStatus(); - boolean that_present_status = true && that.isSetStatus(); - if (this_present_status || that_present_status) { - if (!(this_present_status && that_present_status)) - return false; - if (!this.status.equals(that.status)) - return false; - } - - boolean this_present_schema = true && this.isSetSchema(); - boolean that_present_schema = true && that.isSetSchema(); - if (this_present_schema || that_present_schema) { - if (!(this_present_schema && that_present_schema)) - return false; - if (!this.schema.equals(that.schema)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_status = true && (isSetStatus()); - list.add(present_status); - if (present_status) - list.add(status); - - boolean present_schema = true && (isSetSchema()); - list.add(present_schema); - if (present_schema) - list.add(schema); - - return list.hashCode(); - } - - @Override - public int compareTo(TGetResultSetMetadataResp other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetStatus()).compareTo(other.isSetStatus()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetStatus()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.status, other.status); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetSchema()).compareTo(other.isSetSchema()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSchema()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.schema, other.schema); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TGetResultSetMetadataResp("); - boolean first = true; - - sb.append("status:"); - if (this.status == null) { - sb.append("null"); - } else { - sb.append(this.status); - } - first = false; - if (isSetSchema()) { - if (!first) sb.append(", "); - sb.append("schema:"); - if (this.schema == null) { - sb.append("null"); - } else { - sb.append(this.schema); - } - first = false; - } - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetStatus()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'status' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (status != null) { - status.validate(); - } - if (schema != null) { - schema.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TGetResultSetMetadataRespStandardSchemeFactory implements SchemeFactory { - public TGetResultSetMetadataRespStandardScheme getScheme() { - return new TGetResultSetMetadataRespStandardScheme(); - } - } - - private static class TGetResultSetMetadataRespStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TGetResultSetMetadataResp struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // STATUS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // SCHEMA - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.schema = new TTableSchema(); - struct.schema.read(iprot); - struct.setSchemaIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TGetResultSetMetadataResp struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.status != null) { - oprot.writeFieldBegin(STATUS_FIELD_DESC); - struct.status.write(oprot); - oprot.writeFieldEnd(); - } - if (struct.schema != null) { - if (struct.isSetSchema()) { - oprot.writeFieldBegin(SCHEMA_FIELD_DESC); - struct.schema.write(oprot); - oprot.writeFieldEnd(); - } - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TGetResultSetMetadataRespTupleSchemeFactory implements SchemeFactory { - public TGetResultSetMetadataRespTupleScheme getScheme() { - return new TGetResultSetMetadataRespTupleScheme(); - } - } - - private static class TGetResultSetMetadataRespTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TGetResultSetMetadataResp struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.status.write(oprot); - BitSet optionals = new BitSet(); - if (struct.isSetSchema()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetSchema()) { - struct.schema.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TGetResultSetMetadataResp struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.schema = new TTableSchema(); - struct.schema.read(iprot); - struct.setSchemaIsSet(true); - } - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetSchemasReq.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetSchemasReq.java deleted file mode 100644 index 17eed87ae096f..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetSchemasReq.java +++ /dev/null @@ -1,610 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TGetSchemasReq implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetSchemasReq"); - - private static final org.apache.thrift.protocol.TField SESSION_HANDLE_FIELD_DESC = new org.apache.thrift.protocol.TField("sessionHandle", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField CATALOG_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catalogName", org.apache.thrift.protocol.TType.STRING, (short)2); - private static final org.apache.thrift.protocol.TField SCHEMA_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("schemaName", org.apache.thrift.protocol.TType.STRING, (short)3); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TGetSchemasReqStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TGetSchemasReqTupleSchemeFactory()); - } - - private TSessionHandle sessionHandle; // required - private String catalogName; // optional - private String schemaName; // optional - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SESSION_HANDLE((short)1, "sessionHandle"), - CATALOG_NAME((short)2, "catalogName"), - SCHEMA_NAME((short)3, "schemaName"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // SESSION_HANDLE - return SESSION_HANDLE; - case 2: // CATALOG_NAME - return CATALOG_NAME; - case 3: // SCHEMA_NAME - return SCHEMA_NAME; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private static final _Fields optionals[] = {_Fields.CATALOG_NAME,_Fields.SCHEMA_NAME}; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SESSION_HANDLE, new org.apache.thrift.meta_data.FieldMetaData("sessionHandle", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TSessionHandle.class))); - tmpMap.put(_Fields.CATALOG_NAME, new org.apache.thrift.meta_data.FieldMetaData("catalogName", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "TIdentifier"))); - tmpMap.put(_Fields.SCHEMA_NAME, new org.apache.thrift.meta_data.FieldMetaData("schemaName", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "TPatternOrIdentifier"))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TGetSchemasReq.class, metaDataMap); - } - - public TGetSchemasReq() { - } - - public TGetSchemasReq( - TSessionHandle sessionHandle) - { - this(); - this.sessionHandle = sessionHandle; - } - - /** - * Performs a deep copy on other. - */ - public TGetSchemasReq(TGetSchemasReq other) { - if (other.isSetSessionHandle()) { - this.sessionHandle = new TSessionHandle(other.sessionHandle); - } - if (other.isSetCatalogName()) { - this.catalogName = other.catalogName; - } - if (other.isSetSchemaName()) { - this.schemaName = other.schemaName; - } - } - - public TGetSchemasReq deepCopy() { - return new TGetSchemasReq(this); - } - - @Override - public void clear() { - this.sessionHandle = null; - this.catalogName = null; - this.schemaName = null; - } - - public TSessionHandle getSessionHandle() { - return this.sessionHandle; - } - - public void setSessionHandle(TSessionHandle sessionHandle) { - this.sessionHandle = sessionHandle; - } - - public void unsetSessionHandle() { - this.sessionHandle = null; - } - - /** Returns true if field sessionHandle is set (has been assigned a value) and false otherwise */ - public boolean isSetSessionHandle() { - return this.sessionHandle != null; - } - - public void setSessionHandleIsSet(boolean value) { - if (!value) { - this.sessionHandle = null; - } - } - - public String getCatalogName() { - return this.catalogName; - } - - public void setCatalogName(String catalogName) { - this.catalogName = catalogName; - } - - public void unsetCatalogName() { - this.catalogName = null; - } - - /** Returns true if field catalogName is set (has been assigned a value) and false otherwise */ - public boolean isSetCatalogName() { - return this.catalogName != null; - } - - public void setCatalogNameIsSet(boolean value) { - if (!value) { - this.catalogName = null; - } - } - - public String getSchemaName() { - return this.schemaName; - } - - public void setSchemaName(String schemaName) { - this.schemaName = schemaName; - } - - public void unsetSchemaName() { - this.schemaName = null; - } - - /** Returns true if field schemaName is set (has been assigned a value) and false otherwise */ - public boolean isSetSchemaName() { - return this.schemaName != null; - } - - public void setSchemaNameIsSet(boolean value) { - if (!value) { - this.schemaName = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case SESSION_HANDLE: - if (value == null) { - unsetSessionHandle(); - } else { - setSessionHandle((TSessionHandle)value); - } - break; - - case CATALOG_NAME: - if (value == null) { - unsetCatalogName(); - } else { - setCatalogName((String)value); - } - break; - - case SCHEMA_NAME: - if (value == null) { - unsetSchemaName(); - } else { - setSchemaName((String)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case SESSION_HANDLE: - return getSessionHandle(); - - case CATALOG_NAME: - return getCatalogName(); - - case SCHEMA_NAME: - return getSchemaName(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case SESSION_HANDLE: - return isSetSessionHandle(); - case CATALOG_NAME: - return isSetCatalogName(); - case SCHEMA_NAME: - return isSetSchemaName(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TGetSchemasReq) - return this.equals((TGetSchemasReq)that); - return false; - } - - public boolean equals(TGetSchemasReq that) { - if (that == null) - return false; - - boolean this_present_sessionHandle = true && this.isSetSessionHandle(); - boolean that_present_sessionHandle = true && that.isSetSessionHandle(); - if (this_present_sessionHandle || that_present_sessionHandle) { - if (!(this_present_sessionHandle && that_present_sessionHandle)) - return false; - if (!this.sessionHandle.equals(that.sessionHandle)) - return false; - } - - boolean this_present_catalogName = true && this.isSetCatalogName(); - boolean that_present_catalogName = true && that.isSetCatalogName(); - if (this_present_catalogName || that_present_catalogName) { - if (!(this_present_catalogName && that_present_catalogName)) - return false; - if (!this.catalogName.equals(that.catalogName)) - return false; - } - - boolean this_present_schemaName = true && this.isSetSchemaName(); - boolean that_present_schemaName = true && that.isSetSchemaName(); - if (this_present_schemaName || that_present_schemaName) { - if (!(this_present_schemaName && that_present_schemaName)) - return false; - if (!this.schemaName.equals(that.schemaName)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_sessionHandle = true && (isSetSessionHandle()); - list.add(present_sessionHandle); - if (present_sessionHandle) - list.add(sessionHandle); - - boolean present_catalogName = true && (isSetCatalogName()); - list.add(present_catalogName); - if (present_catalogName) - list.add(catalogName); - - boolean present_schemaName = true && (isSetSchemaName()); - list.add(present_schemaName); - if (present_schemaName) - list.add(schemaName); - - return list.hashCode(); - } - - @Override - public int compareTo(TGetSchemasReq other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetSessionHandle()).compareTo(other.isSetSessionHandle()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSessionHandle()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.sessionHandle, other.sessionHandle); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetCatalogName()).compareTo(other.isSetCatalogName()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetCatalogName()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catalogName, other.catalogName); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetSchemaName()).compareTo(other.isSetSchemaName()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSchemaName()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.schemaName, other.schemaName); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TGetSchemasReq("); - boolean first = true; - - sb.append("sessionHandle:"); - if (this.sessionHandle == null) { - sb.append("null"); - } else { - sb.append(this.sessionHandle); - } - first = false; - if (isSetCatalogName()) { - if (!first) sb.append(", "); - sb.append("catalogName:"); - if (this.catalogName == null) { - sb.append("null"); - } else { - sb.append(this.catalogName); - } - first = false; - } - if (isSetSchemaName()) { - if (!first) sb.append(", "); - sb.append("schemaName:"); - if (this.schemaName == null) { - sb.append("null"); - } else { - sb.append(this.schemaName); - } - first = false; - } - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetSessionHandle()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'sessionHandle' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (sessionHandle != null) { - sessionHandle.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TGetSchemasReqStandardSchemeFactory implements SchemeFactory { - public TGetSchemasReqStandardScheme getScheme() { - return new TGetSchemasReqStandardScheme(); - } - } - - private static class TGetSchemasReqStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TGetSchemasReq struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // SESSION_HANDLE - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.sessionHandle = new TSessionHandle(); - struct.sessionHandle.read(iprot); - struct.setSessionHandleIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // CATALOG_NAME - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.catalogName = iprot.readString(); - struct.setCatalogNameIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 3: // SCHEMA_NAME - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.schemaName = iprot.readString(); - struct.setSchemaNameIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TGetSchemasReq struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.sessionHandle != null) { - oprot.writeFieldBegin(SESSION_HANDLE_FIELD_DESC); - struct.sessionHandle.write(oprot); - oprot.writeFieldEnd(); - } - if (struct.catalogName != null) { - if (struct.isSetCatalogName()) { - oprot.writeFieldBegin(CATALOG_NAME_FIELD_DESC); - oprot.writeString(struct.catalogName); - oprot.writeFieldEnd(); - } - } - if (struct.schemaName != null) { - if (struct.isSetSchemaName()) { - oprot.writeFieldBegin(SCHEMA_NAME_FIELD_DESC); - oprot.writeString(struct.schemaName); - oprot.writeFieldEnd(); - } - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TGetSchemasReqTupleSchemeFactory implements SchemeFactory { - public TGetSchemasReqTupleScheme getScheme() { - return new TGetSchemasReqTupleScheme(); - } - } - - private static class TGetSchemasReqTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TGetSchemasReq struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.sessionHandle.write(oprot); - BitSet optionals = new BitSet(); - if (struct.isSetCatalogName()) { - optionals.set(0); - } - if (struct.isSetSchemaName()) { - optionals.set(1); - } - oprot.writeBitSet(optionals, 2); - if (struct.isSetCatalogName()) { - oprot.writeString(struct.catalogName); - } - if (struct.isSetSchemaName()) { - oprot.writeString(struct.schemaName); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TGetSchemasReq struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.sessionHandle = new TSessionHandle(); - struct.sessionHandle.read(iprot); - struct.setSessionHandleIsSet(true); - BitSet incoming = iprot.readBitSet(2); - if (incoming.get(0)) { - struct.catalogName = iprot.readString(); - struct.setCatalogNameIsSet(true); - } - if (incoming.get(1)) { - struct.schemaName = iprot.readString(); - struct.setSchemaNameIsSet(true); - } - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetSchemasResp.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetSchemasResp.java deleted file mode 100644 index e5317f7ff5046..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetSchemasResp.java +++ /dev/null @@ -1,509 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TGetSchemasResp implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetSchemasResp"); - - private static final org.apache.thrift.protocol.TField STATUS_FIELD_DESC = new org.apache.thrift.protocol.TField("status", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField OPERATION_HANDLE_FIELD_DESC = new org.apache.thrift.protocol.TField("operationHandle", org.apache.thrift.protocol.TType.STRUCT, (short)2); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TGetSchemasRespStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TGetSchemasRespTupleSchemeFactory()); - } - - private TStatus status; // required - private TOperationHandle operationHandle; // optional - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - STATUS((short)1, "status"), - OPERATION_HANDLE((short)2, "operationHandle"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // STATUS - return STATUS; - case 2: // OPERATION_HANDLE - return OPERATION_HANDLE; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private static final _Fields optionals[] = {_Fields.OPERATION_HANDLE}; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.STATUS, new org.apache.thrift.meta_data.FieldMetaData("status", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TStatus.class))); - tmpMap.put(_Fields.OPERATION_HANDLE, new org.apache.thrift.meta_data.FieldMetaData("operationHandle", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TOperationHandle.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TGetSchemasResp.class, metaDataMap); - } - - public TGetSchemasResp() { - } - - public TGetSchemasResp( - TStatus status) - { - this(); - this.status = status; - } - - /** - * Performs a deep copy on other. - */ - public TGetSchemasResp(TGetSchemasResp other) { - if (other.isSetStatus()) { - this.status = new TStatus(other.status); - } - if (other.isSetOperationHandle()) { - this.operationHandle = new TOperationHandle(other.operationHandle); - } - } - - public TGetSchemasResp deepCopy() { - return new TGetSchemasResp(this); - } - - @Override - public void clear() { - this.status = null; - this.operationHandle = null; - } - - public TStatus getStatus() { - return this.status; - } - - public void setStatus(TStatus status) { - this.status = status; - } - - public void unsetStatus() { - this.status = null; - } - - /** Returns true if field status is set (has been assigned a value) and false otherwise */ - public boolean isSetStatus() { - return this.status != null; - } - - public void setStatusIsSet(boolean value) { - if (!value) { - this.status = null; - } - } - - public TOperationHandle getOperationHandle() { - return this.operationHandle; - } - - public void setOperationHandle(TOperationHandle operationHandle) { - this.operationHandle = operationHandle; - } - - public void unsetOperationHandle() { - this.operationHandle = null; - } - - /** Returns true if field operationHandle is set (has been assigned a value) and false otherwise */ - public boolean isSetOperationHandle() { - return this.operationHandle != null; - } - - public void setOperationHandleIsSet(boolean value) { - if (!value) { - this.operationHandle = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case STATUS: - if (value == null) { - unsetStatus(); - } else { - setStatus((TStatus)value); - } - break; - - case OPERATION_HANDLE: - if (value == null) { - unsetOperationHandle(); - } else { - setOperationHandle((TOperationHandle)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case STATUS: - return getStatus(); - - case OPERATION_HANDLE: - return getOperationHandle(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case STATUS: - return isSetStatus(); - case OPERATION_HANDLE: - return isSetOperationHandle(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TGetSchemasResp) - return this.equals((TGetSchemasResp)that); - return false; - } - - public boolean equals(TGetSchemasResp that) { - if (that == null) - return false; - - boolean this_present_status = true && this.isSetStatus(); - boolean that_present_status = true && that.isSetStatus(); - if (this_present_status || that_present_status) { - if (!(this_present_status && that_present_status)) - return false; - if (!this.status.equals(that.status)) - return false; - } - - boolean this_present_operationHandle = true && this.isSetOperationHandle(); - boolean that_present_operationHandle = true && that.isSetOperationHandle(); - if (this_present_operationHandle || that_present_operationHandle) { - if (!(this_present_operationHandle && that_present_operationHandle)) - return false; - if (!this.operationHandle.equals(that.operationHandle)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_status = true && (isSetStatus()); - list.add(present_status); - if (present_status) - list.add(status); - - boolean present_operationHandle = true && (isSetOperationHandle()); - list.add(present_operationHandle); - if (present_operationHandle) - list.add(operationHandle); - - return list.hashCode(); - } - - @Override - public int compareTo(TGetSchemasResp other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetStatus()).compareTo(other.isSetStatus()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetStatus()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.status, other.status); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetOperationHandle()).compareTo(other.isSetOperationHandle()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetOperationHandle()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.operationHandle, other.operationHandle); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TGetSchemasResp("); - boolean first = true; - - sb.append("status:"); - if (this.status == null) { - sb.append("null"); - } else { - sb.append(this.status); - } - first = false; - if (isSetOperationHandle()) { - if (!first) sb.append(", "); - sb.append("operationHandle:"); - if (this.operationHandle == null) { - sb.append("null"); - } else { - sb.append(this.operationHandle); - } - first = false; - } - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetStatus()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'status' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (status != null) { - status.validate(); - } - if (operationHandle != null) { - operationHandle.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TGetSchemasRespStandardSchemeFactory implements SchemeFactory { - public TGetSchemasRespStandardScheme getScheme() { - return new TGetSchemasRespStandardScheme(); - } - } - - private static class TGetSchemasRespStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TGetSchemasResp struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // STATUS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // OPERATION_HANDLE - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.operationHandle = new TOperationHandle(); - struct.operationHandle.read(iprot); - struct.setOperationHandleIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TGetSchemasResp struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.status != null) { - oprot.writeFieldBegin(STATUS_FIELD_DESC); - struct.status.write(oprot); - oprot.writeFieldEnd(); - } - if (struct.operationHandle != null) { - if (struct.isSetOperationHandle()) { - oprot.writeFieldBegin(OPERATION_HANDLE_FIELD_DESC); - struct.operationHandle.write(oprot); - oprot.writeFieldEnd(); - } - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TGetSchemasRespTupleSchemeFactory implements SchemeFactory { - public TGetSchemasRespTupleScheme getScheme() { - return new TGetSchemasRespTupleScheme(); - } - } - - private static class TGetSchemasRespTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TGetSchemasResp struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.status.write(oprot); - BitSet optionals = new BitSet(); - if (struct.isSetOperationHandle()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetOperationHandle()) { - struct.operationHandle.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TGetSchemasResp struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.operationHandle = new TOperationHandle(); - struct.operationHandle.read(iprot); - struct.setOperationHandleIsSet(true); - } - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetTableTypesReq.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetTableTypesReq.java deleted file mode 100644 index c027748a336e6..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetTableTypesReq.java +++ /dev/null @@ -1,394 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TGetTableTypesReq implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetTableTypesReq"); - - private static final org.apache.thrift.protocol.TField SESSION_HANDLE_FIELD_DESC = new org.apache.thrift.protocol.TField("sessionHandle", org.apache.thrift.protocol.TType.STRUCT, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TGetTableTypesReqStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TGetTableTypesReqTupleSchemeFactory()); - } - - private TSessionHandle sessionHandle; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SESSION_HANDLE((short)1, "sessionHandle"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // SESSION_HANDLE - return SESSION_HANDLE; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SESSION_HANDLE, new org.apache.thrift.meta_data.FieldMetaData("sessionHandle", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TSessionHandle.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TGetTableTypesReq.class, metaDataMap); - } - - public TGetTableTypesReq() { - } - - public TGetTableTypesReq( - TSessionHandle sessionHandle) - { - this(); - this.sessionHandle = sessionHandle; - } - - /** - * Performs a deep copy on other. - */ - public TGetTableTypesReq(TGetTableTypesReq other) { - if (other.isSetSessionHandle()) { - this.sessionHandle = new TSessionHandle(other.sessionHandle); - } - } - - public TGetTableTypesReq deepCopy() { - return new TGetTableTypesReq(this); - } - - @Override - public void clear() { - this.sessionHandle = null; - } - - public TSessionHandle getSessionHandle() { - return this.sessionHandle; - } - - public void setSessionHandle(TSessionHandle sessionHandle) { - this.sessionHandle = sessionHandle; - } - - public void unsetSessionHandle() { - this.sessionHandle = null; - } - - /** Returns true if field sessionHandle is set (has been assigned a value) and false otherwise */ - public boolean isSetSessionHandle() { - return this.sessionHandle != null; - } - - public void setSessionHandleIsSet(boolean value) { - if (!value) { - this.sessionHandle = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case SESSION_HANDLE: - if (value == null) { - unsetSessionHandle(); - } else { - setSessionHandle((TSessionHandle)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case SESSION_HANDLE: - return getSessionHandle(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case SESSION_HANDLE: - return isSetSessionHandle(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TGetTableTypesReq) - return this.equals((TGetTableTypesReq)that); - return false; - } - - public boolean equals(TGetTableTypesReq that) { - if (that == null) - return false; - - boolean this_present_sessionHandle = true && this.isSetSessionHandle(); - boolean that_present_sessionHandle = true && that.isSetSessionHandle(); - if (this_present_sessionHandle || that_present_sessionHandle) { - if (!(this_present_sessionHandle && that_present_sessionHandle)) - return false; - if (!this.sessionHandle.equals(that.sessionHandle)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_sessionHandle = true && (isSetSessionHandle()); - list.add(present_sessionHandle); - if (present_sessionHandle) - list.add(sessionHandle); - - return list.hashCode(); - } - - @Override - public int compareTo(TGetTableTypesReq other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetSessionHandle()).compareTo(other.isSetSessionHandle()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSessionHandle()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.sessionHandle, other.sessionHandle); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TGetTableTypesReq("); - boolean first = true; - - sb.append("sessionHandle:"); - if (this.sessionHandle == null) { - sb.append("null"); - } else { - sb.append(this.sessionHandle); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetSessionHandle()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'sessionHandle' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (sessionHandle != null) { - sessionHandle.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TGetTableTypesReqStandardSchemeFactory implements SchemeFactory { - public TGetTableTypesReqStandardScheme getScheme() { - return new TGetTableTypesReqStandardScheme(); - } - } - - private static class TGetTableTypesReqStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TGetTableTypesReq struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // SESSION_HANDLE - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.sessionHandle = new TSessionHandle(); - struct.sessionHandle.read(iprot); - struct.setSessionHandleIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TGetTableTypesReq struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.sessionHandle != null) { - oprot.writeFieldBegin(SESSION_HANDLE_FIELD_DESC); - struct.sessionHandle.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TGetTableTypesReqTupleSchemeFactory implements SchemeFactory { - public TGetTableTypesReqTupleScheme getScheme() { - return new TGetTableTypesReqTupleScheme(); - } - } - - private static class TGetTableTypesReqTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TGetTableTypesReq struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.sessionHandle.write(oprot); - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TGetTableTypesReq struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.sessionHandle = new TSessionHandle(); - struct.sessionHandle.read(iprot); - struct.setSessionHandleIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetTableTypesResp.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetTableTypesResp.java deleted file mode 100644 index c6ce0d4368fdd..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetTableTypesResp.java +++ /dev/null @@ -1,509 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TGetTableTypesResp implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetTableTypesResp"); - - private static final org.apache.thrift.protocol.TField STATUS_FIELD_DESC = new org.apache.thrift.protocol.TField("status", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField OPERATION_HANDLE_FIELD_DESC = new org.apache.thrift.protocol.TField("operationHandle", org.apache.thrift.protocol.TType.STRUCT, (short)2); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TGetTableTypesRespStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TGetTableTypesRespTupleSchemeFactory()); - } - - private TStatus status; // required - private TOperationHandle operationHandle; // optional - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - STATUS((short)1, "status"), - OPERATION_HANDLE((short)2, "operationHandle"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // STATUS - return STATUS; - case 2: // OPERATION_HANDLE - return OPERATION_HANDLE; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private static final _Fields optionals[] = {_Fields.OPERATION_HANDLE}; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.STATUS, new org.apache.thrift.meta_data.FieldMetaData("status", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TStatus.class))); - tmpMap.put(_Fields.OPERATION_HANDLE, new org.apache.thrift.meta_data.FieldMetaData("operationHandle", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TOperationHandle.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TGetTableTypesResp.class, metaDataMap); - } - - public TGetTableTypesResp() { - } - - public TGetTableTypesResp( - TStatus status) - { - this(); - this.status = status; - } - - /** - * Performs a deep copy on other. - */ - public TGetTableTypesResp(TGetTableTypesResp other) { - if (other.isSetStatus()) { - this.status = new TStatus(other.status); - } - if (other.isSetOperationHandle()) { - this.operationHandle = new TOperationHandle(other.operationHandle); - } - } - - public TGetTableTypesResp deepCopy() { - return new TGetTableTypesResp(this); - } - - @Override - public void clear() { - this.status = null; - this.operationHandle = null; - } - - public TStatus getStatus() { - return this.status; - } - - public void setStatus(TStatus status) { - this.status = status; - } - - public void unsetStatus() { - this.status = null; - } - - /** Returns true if field status is set (has been assigned a value) and false otherwise */ - public boolean isSetStatus() { - return this.status != null; - } - - public void setStatusIsSet(boolean value) { - if (!value) { - this.status = null; - } - } - - public TOperationHandle getOperationHandle() { - return this.operationHandle; - } - - public void setOperationHandle(TOperationHandle operationHandle) { - this.operationHandle = operationHandle; - } - - public void unsetOperationHandle() { - this.operationHandle = null; - } - - /** Returns true if field operationHandle is set (has been assigned a value) and false otherwise */ - public boolean isSetOperationHandle() { - return this.operationHandle != null; - } - - public void setOperationHandleIsSet(boolean value) { - if (!value) { - this.operationHandle = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case STATUS: - if (value == null) { - unsetStatus(); - } else { - setStatus((TStatus)value); - } - break; - - case OPERATION_HANDLE: - if (value == null) { - unsetOperationHandle(); - } else { - setOperationHandle((TOperationHandle)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case STATUS: - return getStatus(); - - case OPERATION_HANDLE: - return getOperationHandle(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case STATUS: - return isSetStatus(); - case OPERATION_HANDLE: - return isSetOperationHandle(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TGetTableTypesResp) - return this.equals((TGetTableTypesResp)that); - return false; - } - - public boolean equals(TGetTableTypesResp that) { - if (that == null) - return false; - - boolean this_present_status = true && this.isSetStatus(); - boolean that_present_status = true && that.isSetStatus(); - if (this_present_status || that_present_status) { - if (!(this_present_status && that_present_status)) - return false; - if (!this.status.equals(that.status)) - return false; - } - - boolean this_present_operationHandle = true && this.isSetOperationHandle(); - boolean that_present_operationHandle = true && that.isSetOperationHandle(); - if (this_present_operationHandle || that_present_operationHandle) { - if (!(this_present_operationHandle && that_present_operationHandle)) - return false; - if (!this.operationHandle.equals(that.operationHandle)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_status = true && (isSetStatus()); - list.add(present_status); - if (present_status) - list.add(status); - - boolean present_operationHandle = true && (isSetOperationHandle()); - list.add(present_operationHandle); - if (present_operationHandle) - list.add(operationHandle); - - return list.hashCode(); - } - - @Override - public int compareTo(TGetTableTypesResp other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetStatus()).compareTo(other.isSetStatus()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetStatus()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.status, other.status); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetOperationHandle()).compareTo(other.isSetOperationHandle()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetOperationHandle()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.operationHandle, other.operationHandle); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TGetTableTypesResp("); - boolean first = true; - - sb.append("status:"); - if (this.status == null) { - sb.append("null"); - } else { - sb.append(this.status); - } - first = false; - if (isSetOperationHandle()) { - if (!first) sb.append(", "); - sb.append("operationHandle:"); - if (this.operationHandle == null) { - sb.append("null"); - } else { - sb.append(this.operationHandle); - } - first = false; - } - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetStatus()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'status' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (status != null) { - status.validate(); - } - if (operationHandle != null) { - operationHandle.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TGetTableTypesRespStandardSchemeFactory implements SchemeFactory { - public TGetTableTypesRespStandardScheme getScheme() { - return new TGetTableTypesRespStandardScheme(); - } - } - - private static class TGetTableTypesRespStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TGetTableTypesResp struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // STATUS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // OPERATION_HANDLE - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.operationHandle = new TOperationHandle(); - struct.operationHandle.read(iprot); - struct.setOperationHandleIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TGetTableTypesResp struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.status != null) { - oprot.writeFieldBegin(STATUS_FIELD_DESC); - struct.status.write(oprot); - oprot.writeFieldEnd(); - } - if (struct.operationHandle != null) { - if (struct.isSetOperationHandle()) { - oprot.writeFieldBegin(OPERATION_HANDLE_FIELD_DESC); - struct.operationHandle.write(oprot); - oprot.writeFieldEnd(); - } - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TGetTableTypesRespTupleSchemeFactory implements SchemeFactory { - public TGetTableTypesRespTupleScheme getScheme() { - return new TGetTableTypesRespTupleScheme(); - } - } - - private static class TGetTableTypesRespTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TGetTableTypesResp struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.status.write(oprot); - BitSet optionals = new BitSet(); - if (struct.isSetOperationHandle()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetOperationHandle()) { - struct.operationHandle.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TGetTableTypesResp struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.operationHandle = new TOperationHandle(); - struct.operationHandle.read(iprot); - struct.setOperationHandleIsSet(true); - } - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetTablesReq.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetTablesReq.java deleted file mode 100644 index 1aa3f946727b6..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetTablesReq.java +++ /dev/null @@ -1,871 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TGetTablesReq implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetTablesReq"); - - private static final org.apache.thrift.protocol.TField SESSION_HANDLE_FIELD_DESC = new org.apache.thrift.protocol.TField("sessionHandle", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField CATALOG_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catalogName", org.apache.thrift.protocol.TType.STRING, (short)2); - private static final org.apache.thrift.protocol.TField SCHEMA_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("schemaName", org.apache.thrift.protocol.TType.STRING, (short)3); - private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)4); - private static final org.apache.thrift.protocol.TField TABLE_TYPES_FIELD_DESC = new org.apache.thrift.protocol.TField("tableTypes", org.apache.thrift.protocol.TType.LIST, (short)5); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TGetTablesReqStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TGetTablesReqTupleSchemeFactory()); - } - - private TSessionHandle sessionHandle; // required - private String catalogName; // optional - private String schemaName; // optional - private String tableName; // optional - private List tableTypes; // optional - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SESSION_HANDLE((short)1, "sessionHandle"), - CATALOG_NAME((short)2, "catalogName"), - SCHEMA_NAME((short)3, "schemaName"), - TABLE_NAME((short)4, "tableName"), - TABLE_TYPES((short)5, "tableTypes"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // SESSION_HANDLE - return SESSION_HANDLE; - case 2: // CATALOG_NAME - return CATALOG_NAME; - case 3: // SCHEMA_NAME - return SCHEMA_NAME; - case 4: // TABLE_NAME - return TABLE_NAME; - case 5: // TABLE_TYPES - return TABLE_TYPES; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private static final _Fields optionals[] = {_Fields.CATALOG_NAME,_Fields.SCHEMA_NAME,_Fields.TABLE_NAME,_Fields.TABLE_TYPES}; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SESSION_HANDLE, new org.apache.thrift.meta_data.FieldMetaData("sessionHandle", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TSessionHandle.class))); - tmpMap.put(_Fields.CATALOG_NAME, new org.apache.thrift.meta_data.FieldMetaData("catalogName", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "TPatternOrIdentifier"))); - tmpMap.put(_Fields.SCHEMA_NAME, new org.apache.thrift.meta_data.FieldMetaData("schemaName", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "TPatternOrIdentifier"))); - tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "TPatternOrIdentifier"))); - tmpMap.put(_Fields.TABLE_TYPES, new org.apache.thrift.meta_data.FieldMetaData("tableTypes", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TGetTablesReq.class, metaDataMap); - } - - public TGetTablesReq() { - } - - public TGetTablesReq( - TSessionHandle sessionHandle) - { - this(); - this.sessionHandle = sessionHandle; - } - - /** - * Performs a deep copy on other. - */ - public TGetTablesReq(TGetTablesReq other) { - if (other.isSetSessionHandle()) { - this.sessionHandle = new TSessionHandle(other.sessionHandle); - } - if (other.isSetCatalogName()) { - this.catalogName = other.catalogName; - } - if (other.isSetSchemaName()) { - this.schemaName = other.schemaName; - } - if (other.isSetTableName()) { - this.tableName = other.tableName; - } - if (other.isSetTableTypes()) { - List __this__tableTypes = new ArrayList(other.tableTypes); - this.tableTypes = __this__tableTypes; - } - } - - public TGetTablesReq deepCopy() { - return new TGetTablesReq(this); - } - - @Override - public void clear() { - this.sessionHandle = null; - this.catalogName = null; - this.schemaName = null; - this.tableName = null; - this.tableTypes = null; - } - - public TSessionHandle getSessionHandle() { - return this.sessionHandle; - } - - public void setSessionHandle(TSessionHandle sessionHandle) { - this.sessionHandle = sessionHandle; - } - - public void unsetSessionHandle() { - this.sessionHandle = null; - } - - /** Returns true if field sessionHandle is set (has been assigned a value) and false otherwise */ - public boolean isSetSessionHandle() { - return this.sessionHandle != null; - } - - public void setSessionHandleIsSet(boolean value) { - if (!value) { - this.sessionHandle = null; - } - } - - public String getCatalogName() { - return this.catalogName; - } - - public void setCatalogName(String catalogName) { - this.catalogName = catalogName; - } - - public void unsetCatalogName() { - this.catalogName = null; - } - - /** Returns true if field catalogName is set (has been assigned a value) and false otherwise */ - public boolean isSetCatalogName() { - return this.catalogName != null; - } - - public void setCatalogNameIsSet(boolean value) { - if (!value) { - this.catalogName = null; - } - } - - public String getSchemaName() { - return this.schemaName; - } - - public void setSchemaName(String schemaName) { - this.schemaName = schemaName; - } - - public void unsetSchemaName() { - this.schemaName = null; - } - - /** Returns true if field schemaName is set (has been assigned a value) and false otherwise */ - public boolean isSetSchemaName() { - return this.schemaName != null; - } - - public void setSchemaNameIsSet(boolean value) { - if (!value) { - this.schemaName = null; - } - } - - public String getTableName() { - return this.tableName; - } - - public void setTableName(String tableName) { - this.tableName = tableName; - } - - public void unsetTableName() { - this.tableName = null; - } - - /** Returns true if field tableName is set (has been assigned a value) and false otherwise */ - public boolean isSetTableName() { - return this.tableName != null; - } - - public void setTableNameIsSet(boolean value) { - if (!value) { - this.tableName = null; - } - } - - public int getTableTypesSize() { - return (this.tableTypes == null) ? 0 : this.tableTypes.size(); - } - - public java.util.Iterator getTableTypesIterator() { - return (this.tableTypes == null) ? null : this.tableTypes.iterator(); - } - - public void addToTableTypes(String elem) { - if (this.tableTypes == null) { - this.tableTypes = new ArrayList(); - } - this.tableTypes.add(elem); - } - - public List getTableTypes() { - return this.tableTypes; - } - - public void setTableTypes(List tableTypes) { - this.tableTypes = tableTypes; - } - - public void unsetTableTypes() { - this.tableTypes = null; - } - - /** Returns true if field tableTypes is set (has been assigned a value) and false otherwise */ - public boolean isSetTableTypes() { - return this.tableTypes != null; - } - - public void setTableTypesIsSet(boolean value) { - if (!value) { - this.tableTypes = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case SESSION_HANDLE: - if (value == null) { - unsetSessionHandle(); - } else { - setSessionHandle((TSessionHandle)value); - } - break; - - case CATALOG_NAME: - if (value == null) { - unsetCatalogName(); - } else { - setCatalogName((String)value); - } - break; - - case SCHEMA_NAME: - if (value == null) { - unsetSchemaName(); - } else { - setSchemaName((String)value); - } - break; - - case TABLE_NAME: - if (value == null) { - unsetTableName(); - } else { - setTableName((String)value); - } - break; - - case TABLE_TYPES: - if (value == null) { - unsetTableTypes(); - } else { - setTableTypes((List)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case SESSION_HANDLE: - return getSessionHandle(); - - case CATALOG_NAME: - return getCatalogName(); - - case SCHEMA_NAME: - return getSchemaName(); - - case TABLE_NAME: - return getTableName(); - - case TABLE_TYPES: - return getTableTypes(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case SESSION_HANDLE: - return isSetSessionHandle(); - case CATALOG_NAME: - return isSetCatalogName(); - case SCHEMA_NAME: - return isSetSchemaName(); - case TABLE_NAME: - return isSetTableName(); - case TABLE_TYPES: - return isSetTableTypes(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TGetTablesReq) - return this.equals((TGetTablesReq)that); - return false; - } - - public boolean equals(TGetTablesReq that) { - if (that == null) - return false; - - boolean this_present_sessionHandle = true && this.isSetSessionHandle(); - boolean that_present_sessionHandle = true && that.isSetSessionHandle(); - if (this_present_sessionHandle || that_present_sessionHandle) { - if (!(this_present_sessionHandle && that_present_sessionHandle)) - return false; - if (!this.sessionHandle.equals(that.sessionHandle)) - return false; - } - - boolean this_present_catalogName = true && this.isSetCatalogName(); - boolean that_present_catalogName = true && that.isSetCatalogName(); - if (this_present_catalogName || that_present_catalogName) { - if (!(this_present_catalogName && that_present_catalogName)) - return false; - if (!this.catalogName.equals(that.catalogName)) - return false; - } - - boolean this_present_schemaName = true && this.isSetSchemaName(); - boolean that_present_schemaName = true && that.isSetSchemaName(); - if (this_present_schemaName || that_present_schemaName) { - if (!(this_present_schemaName && that_present_schemaName)) - return false; - if (!this.schemaName.equals(that.schemaName)) - return false; - } - - boolean this_present_tableName = true && this.isSetTableName(); - boolean that_present_tableName = true && that.isSetTableName(); - if (this_present_tableName || that_present_tableName) { - if (!(this_present_tableName && that_present_tableName)) - return false; - if (!this.tableName.equals(that.tableName)) - return false; - } - - boolean this_present_tableTypes = true && this.isSetTableTypes(); - boolean that_present_tableTypes = true && that.isSetTableTypes(); - if (this_present_tableTypes || that_present_tableTypes) { - if (!(this_present_tableTypes && that_present_tableTypes)) - return false; - if (!this.tableTypes.equals(that.tableTypes)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_sessionHandle = true && (isSetSessionHandle()); - list.add(present_sessionHandle); - if (present_sessionHandle) - list.add(sessionHandle); - - boolean present_catalogName = true && (isSetCatalogName()); - list.add(present_catalogName); - if (present_catalogName) - list.add(catalogName); - - boolean present_schemaName = true && (isSetSchemaName()); - list.add(present_schemaName); - if (present_schemaName) - list.add(schemaName); - - boolean present_tableName = true && (isSetTableName()); - list.add(present_tableName); - if (present_tableName) - list.add(tableName); - - boolean present_tableTypes = true && (isSetTableTypes()); - list.add(present_tableTypes); - if (present_tableTypes) - list.add(tableTypes); - - return list.hashCode(); - } - - @Override - public int compareTo(TGetTablesReq other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetSessionHandle()).compareTo(other.isSetSessionHandle()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSessionHandle()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.sessionHandle, other.sessionHandle); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetCatalogName()).compareTo(other.isSetCatalogName()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetCatalogName()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catalogName, other.catalogName); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetSchemaName()).compareTo(other.isSetSchemaName()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSchemaName()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.schemaName, other.schemaName); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetTableName()).compareTo(other.isSetTableName()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetTableName()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tableName, other.tableName); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetTableTypes()).compareTo(other.isSetTableTypes()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetTableTypes()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tableTypes, other.tableTypes); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TGetTablesReq("); - boolean first = true; - - sb.append("sessionHandle:"); - if (this.sessionHandle == null) { - sb.append("null"); - } else { - sb.append(this.sessionHandle); - } - first = false; - if (isSetCatalogName()) { - if (!first) sb.append(", "); - sb.append("catalogName:"); - if (this.catalogName == null) { - sb.append("null"); - } else { - sb.append(this.catalogName); - } - first = false; - } - if (isSetSchemaName()) { - if (!first) sb.append(", "); - sb.append("schemaName:"); - if (this.schemaName == null) { - sb.append("null"); - } else { - sb.append(this.schemaName); - } - first = false; - } - if (isSetTableName()) { - if (!first) sb.append(", "); - sb.append("tableName:"); - if (this.tableName == null) { - sb.append("null"); - } else { - sb.append(this.tableName); - } - first = false; - } - if (isSetTableTypes()) { - if (!first) sb.append(", "); - sb.append("tableTypes:"); - if (this.tableTypes == null) { - sb.append("null"); - } else { - sb.append(this.tableTypes); - } - first = false; - } - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetSessionHandle()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'sessionHandle' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (sessionHandle != null) { - sessionHandle.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TGetTablesReqStandardSchemeFactory implements SchemeFactory { - public TGetTablesReqStandardScheme getScheme() { - return new TGetTablesReqStandardScheme(); - } - } - - private static class TGetTablesReqStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TGetTablesReq struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // SESSION_HANDLE - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.sessionHandle = new TSessionHandle(); - struct.sessionHandle.read(iprot); - struct.setSessionHandleIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // CATALOG_NAME - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.catalogName = iprot.readString(); - struct.setCatalogNameIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 3: // SCHEMA_NAME - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.schemaName = iprot.readString(); - struct.setSchemaNameIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 4: // TABLE_NAME - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.tableName = iprot.readString(); - struct.setTableNameIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 5: // TABLE_TYPES - if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { - { - org.apache.thrift.protocol.TList _list172 = iprot.readListBegin(); - struct.tableTypes = new ArrayList(_list172.size); - String _elem173; - for (int _i174 = 0; _i174 < _list172.size; ++_i174) - { - _elem173 = iprot.readString(); - struct.tableTypes.add(_elem173); - } - iprot.readListEnd(); - } - struct.setTableTypesIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TGetTablesReq struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.sessionHandle != null) { - oprot.writeFieldBegin(SESSION_HANDLE_FIELD_DESC); - struct.sessionHandle.write(oprot); - oprot.writeFieldEnd(); - } - if (struct.catalogName != null) { - if (struct.isSetCatalogName()) { - oprot.writeFieldBegin(CATALOG_NAME_FIELD_DESC); - oprot.writeString(struct.catalogName); - oprot.writeFieldEnd(); - } - } - if (struct.schemaName != null) { - if (struct.isSetSchemaName()) { - oprot.writeFieldBegin(SCHEMA_NAME_FIELD_DESC); - oprot.writeString(struct.schemaName); - oprot.writeFieldEnd(); - } - } - if (struct.tableName != null) { - if (struct.isSetTableName()) { - oprot.writeFieldBegin(TABLE_NAME_FIELD_DESC); - oprot.writeString(struct.tableName); - oprot.writeFieldEnd(); - } - } - if (struct.tableTypes != null) { - if (struct.isSetTableTypes()) { - oprot.writeFieldBegin(TABLE_TYPES_FIELD_DESC); - { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tableTypes.size())); - for (String _iter175 : struct.tableTypes) - { - oprot.writeString(_iter175); - } - oprot.writeListEnd(); - } - oprot.writeFieldEnd(); - } - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TGetTablesReqTupleSchemeFactory implements SchemeFactory { - public TGetTablesReqTupleScheme getScheme() { - return new TGetTablesReqTupleScheme(); - } - } - - private static class TGetTablesReqTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TGetTablesReq struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.sessionHandle.write(oprot); - BitSet optionals = new BitSet(); - if (struct.isSetCatalogName()) { - optionals.set(0); - } - if (struct.isSetSchemaName()) { - optionals.set(1); - } - if (struct.isSetTableName()) { - optionals.set(2); - } - if (struct.isSetTableTypes()) { - optionals.set(3); - } - oprot.writeBitSet(optionals, 4); - if (struct.isSetCatalogName()) { - oprot.writeString(struct.catalogName); - } - if (struct.isSetSchemaName()) { - oprot.writeString(struct.schemaName); - } - if (struct.isSetTableName()) { - oprot.writeString(struct.tableName); - } - if (struct.isSetTableTypes()) { - { - oprot.writeI32(struct.tableTypes.size()); - for (String _iter176 : struct.tableTypes) - { - oprot.writeString(_iter176); - } - } - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TGetTablesReq struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.sessionHandle = new TSessionHandle(); - struct.sessionHandle.read(iprot); - struct.setSessionHandleIsSet(true); - BitSet incoming = iprot.readBitSet(4); - if (incoming.get(0)) { - struct.catalogName = iprot.readString(); - struct.setCatalogNameIsSet(true); - } - if (incoming.get(1)) { - struct.schemaName = iprot.readString(); - struct.setSchemaNameIsSet(true); - } - if (incoming.get(2)) { - struct.tableName = iprot.readString(); - struct.setTableNameIsSet(true); - } - if (incoming.get(3)) { - { - org.apache.thrift.protocol.TList _list177 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.tableTypes = new ArrayList(_list177.size); - String _elem178; - for (int _i179 = 0; _i179 < _list177.size; ++_i179) - { - _elem178 = iprot.readString(); - struct.tableTypes.add(_elem178); - } - } - struct.setTableTypesIsSet(true); - } - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetTablesResp.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetTablesResp.java deleted file mode 100644 index 0b7c3825d35a5..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetTablesResp.java +++ /dev/null @@ -1,509 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TGetTablesResp implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetTablesResp"); - - private static final org.apache.thrift.protocol.TField STATUS_FIELD_DESC = new org.apache.thrift.protocol.TField("status", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField OPERATION_HANDLE_FIELD_DESC = new org.apache.thrift.protocol.TField("operationHandle", org.apache.thrift.protocol.TType.STRUCT, (short)2); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TGetTablesRespStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TGetTablesRespTupleSchemeFactory()); - } - - private TStatus status; // required - private TOperationHandle operationHandle; // optional - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - STATUS((short)1, "status"), - OPERATION_HANDLE((short)2, "operationHandle"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // STATUS - return STATUS; - case 2: // OPERATION_HANDLE - return OPERATION_HANDLE; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private static final _Fields optionals[] = {_Fields.OPERATION_HANDLE}; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.STATUS, new org.apache.thrift.meta_data.FieldMetaData("status", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TStatus.class))); - tmpMap.put(_Fields.OPERATION_HANDLE, new org.apache.thrift.meta_data.FieldMetaData("operationHandle", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TOperationHandle.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TGetTablesResp.class, metaDataMap); - } - - public TGetTablesResp() { - } - - public TGetTablesResp( - TStatus status) - { - this(); - this.status = status; - } - - /** - * Performs a deep copy on other. - */ - public TGetTablesResp(TGetTablesResp other) { - if (other.isSetStatus()) { - this.status = new TStatus(other.status); - } - if (other.isSetOperationHandle()) { - this.operationHandle = new TOperationHandle(other.operationHandle); - } - } - - public TGetTablesResp deepCopy() { - return new TGetTablesResp(this); - } - - @Override - public void clear() { - this.status = null; - this.operationHandle = null; - } - - public TStatus getStatus() { - return this.status; - } - - public void setStatus(TStatus status) { - this.status = status; - } - - public void unsetStatus() { - this.status = null; - } - - /** Returns true if field status is set (has been assigned a value) and false otherwise */ - public boolean isSetStatus() { - return this.status != null; - } - - public void setStatusIsSet(boolean value) { - if (!value) { - this.status = null; - } - } - - public TOperationHandle getOperationHandle() { - return this.operationHandle; - } - - public void setOperationHandle(TOperationHandle operationHandle) { - this.operationHandle = operationHandle; - } - - public void unsetOperationHandle() { - this.operationHandle = null; - } - - /** Returns true if field operationHandle is set (has been assigned a value) and false otherwise */ - public boolean isSetOperationHandle() { - return this.operationHandle != null; - } - - public void setOperationHandleIsSet(boolean value) { - if (!value) { - this.operationHandle = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case STATUS: - if (value == null) { - unsetStatus(); - } else { - setStatus((TStatus)value); - } - break; - - case OPERATION_HANDLE: - if (value == null) { - unsetOperationHandle(); - } else { - setOperationHandle((TOperationHandle)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case STATUS: - return getStatus(); - - case OPERATION_HANDLE: - return getOperationHandle(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case STATUS: - return isSetStatus(); - case OPERATION_HANDLE: - return isSetOperationHandle(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TGetTablesResp) - return this.equals((TGetTablesResp)that); - return false; - } - - public boolean equals(TGetTablesResp that) { - if (that == null) - return false; - - boolean this_present_status = true && this.isSetStatus(); - boolean that_present_status = true && that.isSetStatus(); - if (this_present_status || that_present_status) { - if (!(this_present_status && that_present_status)) - return false; - if (!this.status.equals(that.status)) - return false; - } - - boolean this_present_operationHandle = true && this.isSetOperationHandle(); - boolean that_present_operationHandle = true && that.isSetOperationHandle(); - if (this_present_operationHandle || that_present_operationHandle) { - if (!(this_present_operationHandle && that_present_operationHandle)) - return false; - if (!this.operationHandle.equals(that.operationHandle)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_status = true && (isSetStatus()); - list.add(present_status); - if (present_status) - list.add(status); - - boolean present_operationHandle = true && (isSetOperationHandle()); - list.add(present_operationHandle); - if (present_operationHandle) - list.add(operationHandle); - - return list.hashCode(); - } - - @Override - public int compareTo(TGetTablesResp other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetStatus()).compareTo(other.isSetStatus()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetStatus()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.status, other.status); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetOperationHandle()).compareTo(other.isSetOperationHandle()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetOperationHandle()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.operationHandle, other.operationHandle); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TGetTablesResp("); - boolean first = true; - - sb.append("status:"); - if (this.status == null) { - sb.append("null"); - } else { - sb.append(this.status); - } - first = false; - if (isSetOperationHandle()) { - if (!first) sb.append(", "); - sb.append("operationHandle:"); - if (this.operationHandle == null) { - sb.append("null"); - } else { - sb.append(this.operationHandle); - } - first = false; - } - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetStatus()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'status' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (status != null) { - status.validate(); - } - if (operationHandle != null) { - operationHandle.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TGetTablesRespStandardSchemeFactory implements SchemeFactory { - public TGetTablesRespStandardScheme getScheme() { - return new TGetTablesRespStandardScheme(); - } - } - - private static class TGetTablesRespStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TGetTablesResp struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // STATUS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // OPERATION_HANDLE - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.operationHandle = new TOperationHandle(); - struct.operationHandle.read(iprot); - struct.setOperationHandleIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TGetTablesResp struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.status != null) { - oprot.writeFieldBegin(STATUS_FIELD_DESC); - struct.status.write(oprot); - oprot.writeFieldEnd(); - } - if (struct.operationHandle != null) { - if (struct.isSetOperationHandle()) { - oprot.writeFieldBegin(OPERATION_HANDLE_FIELD_DESC); - struct.operationHandle.write(oprot); - oprot.writeFieldEnd(); - } - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TGetTablesRespTupleSchemeFactory implements SchemeFactory { - public TGetTablesRespTupleScheme getScheme() { - return new TGetTablesRespTupleScheme(); - } - } - - private static class TGetTablesRespTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TGetTablesResp struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.status.write(oprot); - BitSet optionals = new BitSet(); - if (struct.isSetOperationHandle()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetOperationHandle()) { - struct.operationHandle.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TGetTablesResp struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.operationHandle = new TOperationHandle(); - struct.operationHandle.read(iprot); - struct.setOperationHandleIsSet(true); - } - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetTypeInfoReq.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetTypeInfoReq.java deleted file mode 100644 index 2e0ec60e4bc3d..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetTypeInfoReq.java +++ /dev/null @@ -1,394 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TGetTypeInfoReq implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetTypeInfoReq"); - - private static final org.apache.thrift.protocol.TField SESSION_HANDLE_FIELD_DESC = new org.apache.thrift.protocol.TField("sessionHandle", org.apache.thrift.protocol.TType.STRUCT, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TGetTypeInfoReqStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TGetTypeInfoReqTupleSchemeFactory()); - } - - private TSessionHandle sessionHandle; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SESSION_HANDLE((short)1, "sessionHandle"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // SESSION_HANDLE - return SESSION_HANDLE; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SESSION_HANDLE, new org.apache.thrift.meta_data.FieldMetaData("sessionHandle", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TSessionHandle.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TGetTypeInfoReq.class, metaDataMap); - } - - public TGetTypeInfoReq() { - } - - public TGetTypeInfoReq( - TSessionHandle sessionHandle) - { - this(); - this.sessionHandle = sessionHandle; - } - - /** - * Performs a deep copy on other. - */ - public TGetTypeInfoReq(TGetTypeInfoReq other) { - if (other.isSetSessionHandle()) { - this.sessionHandle = new TSessionHandle(other.sessionHandle); - } - } - - public TGetTypeInfoReq deepCopy() { - return new TGetTypeInfoReq(this); - } - - @Override - public void clear() { - this.sessionHandle = null; - } - - public TSessionHandle getSessionHandle() { - return this.sessionHandle; - } - - public void setSessionHandle(TSessionHandle sessionHandle) { - this.sessionHandle = sessionHandle; - } - - public void unsetSessionHandle() { - this.sessionHandle = null; - } - - /** Returns true if field sessionHandle is set (has been assigned a value) and false otherwise */ - public boolean isSetSessionHandle() { - return this.sessionHandle != null; - } - - public void setSessionHandleIsSet(boolean value) { - if (!value) { - this.sessionHandle = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case SESSION_HANDLE: - if (value == null) { - unsetSessionHandle(); - } else { - setSessionHandle((TSessionHandle)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case SESSION_HANDLE: - return getSessionHandle(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case SESSION_HANDLE: - return isSetSessionHandle(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TGetTypeInfoReq) - return this.equals((TGetTypeInfoReq)that); - return false; - } - - public boolean equals(TGetTypeInfoReq that) { - if (that == null) - return false; - - boolean this_present_sessionHandle = true && this.isSetSessionHandle(); - boolean that_present_sessionHandle = true && that.isSetSessionHandle(); - if (this_present_sessionHandle || that_present_sessionHandle) { - if (!(this_present_sessionHandle && that_present_sessionHandle)) - return false; - if (!this.sessionHandle.equals(that.sessionHandle)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_sessionHandle = true && (isSetSessionHandle()); - list.add(present_sessionHandle); - if (present_sessionHandle) - list.add(sessionHandle); - - return list.hashCode(); - } - - @Override - public int compareTo(TGetTypeInfoReq other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetSessionHandle()).compareTo(other.isSetSessionHandle()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSessionHandle()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.sessionHandle, other.sessionHandle); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TGetTypeInfoReq("); - boolean first = true; - - sb.append("sessionHandle:"); - if (this.sessionHandle == null) { - sb.append("null"); - } else { - sb.append(this.sessionHandle); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetSessionHandle()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'sessionHandle' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (sessionHandle != null) { - sessionHandle.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TGetTypeInfoReqStandardSchemeFactory implements SchemeFactory { - public TGetTypeInfoReqStandardScheme getScheme() { - return new TGetTypeInfoReqStandardScheme(); - } - } - - private static class TGetTypeInfoReqStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TGetTypeInfoReq struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // SESSION_HANDLE - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.sessionHandle = new TSessionHandle(); - struct.sessionHandle.read(iprot); - struct.setSessionHandleIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TGetTypeInfoReq struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.sessionHandle != null) { - oprot.writeFieldBegin(SESSION_HANDLE_FIELD_DESC); - struct.sessionHandle.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TGetTypeInfoReqTupleSchemeFactory implements SchemeFactory { - public TGetTypeInfoReqTupleScheme getScheme() { - return new TGetTypeInfoReqTupleScheme(); - } - } - - private static class TGetTypeInfoReqTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TGetTypeInfoReq struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.sessionHandle.write(oprot); - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TGetTypeInfoReq struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.sessionHandle = new TSessionHandle(); - struct.sessionHandle.read(iprot); - struct.setSessionHandleIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetTypeInfoResp.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetTypeInfoResp.java deleted file mode 100644 index cc2910ef29feb..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TGetTypeInfoResp.java +++ /dev/null @@ -1,509 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TGetTypeInfoResp implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetTypeInfoResp"); - - private static final org.apache.thrift.protocol.TField STATUS_FIELD_DESC = new org.apache.thrift.protocol.TField("status", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField OPERATION_HANDLE_FIELD_DESC = new org.apache.thrift.protocol.TField("operationHandle", org.apache.thrift.protocol.TType.STRUCT, (short)2); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TGetTypeInfoRespStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TGetTypeInfoRespTupleSchemeFactory()); - } - - private TStatus status; // required - private TOperationHandle operationHandle; // optional - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - STATUS((short)1, "status"), - OPERATION_HANDLE((short)2, "operationHandle"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // STATUS - return STATUS; - case 2: // OPERATION_HANDLE - return OPERATION_HANDLE; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private static final _Fields optionals[] = {_Fields.OPERATION_HANDLE}; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.STATUS, new org.apache.thrift.meta_data.FieldMetaData("status", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TStatus.class))); - tmpMap.put(_Fields.OPERATION_HANDLE, new org.apache.thrift.meta_data.FieldMetaData("operationHandle", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TOperationHandle.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TGetTypeInfoResp.class, metaDataMap); - } - - public TGetTypeInfoResp() { - } - - public TGetTypeInfoResp( - TStatus status) - { - this(); - this.status = status; - } - - /** - * Performs a deep copy on other. - */ - public TGetTypeInfoResp(TGetTypeInfoResp other) { - if (other.isSetStatus()) { - this.status = new TStatus(other.status); - } - if (other.isSetOperationHandle()) { - this.operationHandle = new TOperationHandle(other.operationHandle); - } - } - - public TGetTypeInfoResp deepCopy() { - return new TGetTypeInfoResp(this); - } - - @Override - public void clear() { - this.status = null; - this.operationHandle = null; - } - - public TStatus getStatus() { - return this.status; - } - - public void setStatus(TStatus status) { - this.status = status; - } - - public void unsetStatus() { - this.status = null; - } - - /** Returns true if field status is set (has been assigned a value) and false otherwise */ - public boolean isSetStatus() { - return this.status != null; - } - - public void setStatusIsSet(boolean value) { - if (!value) { - this.status = null; - } - } - - public TOperationHandle getOperationHandle() { - return this.operationHandle; - } - - public void setOperationHandle(TOperationHandle operationHandle) { - this.operationHandle = operationHandle; - } - - public void unsetOperationHandle() { - this.operationHandle = null; - } - - /** Returns true if field operationHandle is set (has been assigned a value) and false otherwise */ - public boolean isSetOperationHandle() { - return this.operationHandle != null; - } - - public void setOperationHandleIsSet(boolean value) { - if (!value) { - this.operationHandle = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case STATUS: - if (value == null) { - unsetStatus(); - } else { - setStatus((TStatus)value); - } - break; - - case OPERATION_HANDLE: - if (value == null) { - unsetOperationHandle(); - } else { - setOperationHandle((TOperationHandle)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case STATUS: - return getStatus(); - - case OPERATION_HANDLE: - return getOperationHandle(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case STATUS: - return isSetStatus(); - case OPERATION_HANDLE: - return isSetOperationHandle(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TGetTypeInfoResp) - return this.equals((TGetTypeInfoResp)that); - return false; - } - - public boolean equals(TGetTypeInfoResp that) { - if (that == null) - return false; - - boolean this_present_status = true && this.isSetStatus(); - boolean that_present_status = true && that.isSetStatus(); - if (this_present_status || that_present_status) { - if (!(this_present_status && that_present_status)) - return false; - if (!this.status.equals(that.status)) - return false; - } - - boolean this_present_operationHandle = true && this.isSetOperationHandle(); - boolean that_present_operationHandle = true && that.isSetOperationHandle(); - if (this_present_operationHandle || that_present_operationHandle) { - if (!(this_present_operationHandle && that_present_operationHandle)) - return false; - if (!this.operationHandle.equals(that.operationHandle)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_status = true && (isSetStatus()); - list.add(present_status); - if (present_status) - list.add(status); - - boolean present_operationHandle = true && (isSetOperationHandle()); - list.add(present_operationHandle); - if (present_operationHandle) - list.add(operationHandle); - - return list.hashCode(); - } - - @Override - public int compareTo(TGetTypeInfoResp other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetStatus()).compareTo(other.isSetStatus()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetStatus()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.status, other.status); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetOperationHandle()).compareTo(other.isSetOperationHandle()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetOperationHandle()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.operationHandle, other.operationHandle); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TGetTypeInfoResp("); - boolean first = true; - - sb.append("status:"); - if (this.status == null) { - sb.append("null"); - } else { - sb.append(this.status); - } - first = false; - if (isSetOperationHandle()) { - if (!first) sb.append(", "); - sb.append("operationHandle:"); - if (this.operationHandle == null) { - sb.append("null"); - } else { - sb.append(this.operationHandle); - } - first = false; - } - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetStatus()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'status' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (status != null) { - status.validate(); - } - if (operationHandle != null) { - operationHandle.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TGetTypeInfoRespStandardSchemeFactory implements SchemeFactory { - public TGetTypeInfoRespStandardScheme getScheme() { - return new TGetTypeInfoRespStandardScheme(); - } - } - - private static class TGetTypeInfoRespStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TGetTypeInfoResp struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // STATUS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // OPERATION_HANDLE - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.operationHandle = new TOperationHandle(); - struct.operationHandle.read(iprot); - struct.setOperationHandleIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TGetTypeInfoResp struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.status != null) { - oprot.writeFieldBegin(STATUS_FIELD_DESC); - struct.status.write(oprot); - oprot.writeFieldEnd(); - } - if (struct.operationHandle != null) { - if (struct.isSetOperationHandle()) { - oprot.writeFieldBegin(OPERATION_HANDLE_FIELD_DESC); - struct.operationHandle.write(oprot); - oprot.writeFieldEnd(); - } - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TGetTypeInfoRespTupleSchemeFactory implements SchemeFactory { - public TGetTypeInfoRespTupleScheme getScheme() { - return new TGetTypeInfoRespTupleScheme(); - } - } - - private static class TGetTypeInfoRespTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TGetTypeInfoResp struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.status.write(oprot); - BitSet optionals = new BitSet(); - if (struct.isSetOperationHandle()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetOperationHandle()) { - struct.operationHandle.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TGetTypeInfoResp struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.operationHandle = new TOperationHandle(); - struct.operationHandle.read(iprot); - struct.setOperationHandleIsSet(true); - } - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/THandleIdentifier.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/THandleIdentifier.java deleted file mode 100644 index a3879d830000b..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/THandleIdentifier.java +++ /dev/null @@ -1,508 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class THandleIdentifier implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("THandleIdentifier"); - - private static final org.apache.thrift.protocol.TField GUID_FIELD_DESC = new org.apache.thrift.protocol.TField("guid", org.apache.thrift.protocol.TType.STRING, (short)1); - private static final org.apache.thrift.protocol.TField SECRET_FIELD_DESC = new org.apache.thrift.protocol.TField("secret", org.apache.thrift.protocol.TType.STRING, (short)2); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new THandleIdentifierStandardSchemeFactory()); - schemes.put(TupleScheme.class, new THandleIdentifierTupleSchemeFactory()); - } - - private ByteBuffer guid; // required - private ByteBuffer secret; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - GUID((short)1, "guid"), - SECRET((short)2, "secret"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // GUID - return GUID; - case 2: // SECRET - return SECRET; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.GUID, new org.apache.thrift.meta_data.FieldMetaData("guid", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); - tmpMap.put(_Fields.SECRET, new org.apache.thrift.meta_data.FieldMetaData("secret", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(THandleIdentifier.class, metaDataMap); - } - - public THandleIdentifier() { - } - - public THandleIdentifier( - ByteBuffer guid, - ByteBuffer secret) - { - this(); - this.guid = org.apache.thrift.TBaseHelper.copyBinary(guid); - this.secret = org.apache.thrift.TBaseHelper.copyBinary(secret); - } - - /** - * Performs a deep copy on other. - */ - public THandleIdentifier(THandleIdentifier other) { - if (other.isSetGuid()) { - this.guid = org.apache.thrift.TBaseHelper.copyBinary(other.guid); - } - if (other.isSetSecret()) { - this.secret = org.apache.thrift.TBaseHelper.copyBinary(other.secret); - } - } - - public THandleIdentifier deepCopy() { - return new THandleIdentifier(this); - } - - @Override - public void clear() { - this.guid = null; - this.secret = null; - } - - public byte[] getGuid() { - setGuid(org.apache.thrift.TBaseHelper.rightSize(guid)); - return guid == null ? null : guid.array(); - } - - public ByteBuffer bufferForGuid() { - return org.apache.thrift.TBaseHelper.copyBinary(guid); - } - - public void setGuid(byte[] guid) { - this.guid = guid == null ? (ByteBuffer)null : ByteBuffer.wrap(Arrays.copyOf(guid, guid.length)); - } - - public void setGuid(ByteBuffer guid) { - this.guid = org.apache.thrift.TBaseHelper.copyBinary(guid); - } - - public void unsetGuid() { - this.guid = null; - } - - /** Returns true if field guid is set (has been assigned a value) and false otherwise */ - public boolean isSetGuid() { - return this.guid != null; - } - - public void setGuidIsSet(boolean value) { - if (!value) { - this.guid = null; - } - } - - public byte[] getSecret() { - setSecret(org.apache.thrift.TBaseHelper.rightSize(secret)); - return secret == null ? null : secret.array(); - } - - public ByteBuffer bufferForSecret() { - return org.apache.thrift.TBaseHelper.copyBinary(secret); - } - - public void setSecret(byte[] secret) { - this.secret = secret == null ? (ByteBuffer)null : ByteBuffer.wrap(Arrays.copyOf(secret, secret.length)); - } - - public void setSecret(ByteBuffer secret) { - this.secret = org.apache.thrift.TBaseHelper.copyBinary(secret); - } - - public void unsetSecret() { - this.secret = null; - } - - /** Returns true if field secret is set (has been assigned a value) and false otherwise */ - public boolean isSetSecret() { - return this.secret != null; - } - - public void setSecretIsSet(boolean value) { - if (!value) { - this.secret = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case GUID: - if (value == null) { - unsetGuid(); - } else { - setGuid((ByteBuffer)value); - } - break; - - case SECRET: - if (value == null) { - unsetSecret(); - } else { - setSecret((ByteBuffer)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case GUID: - return getGuid(); - - case SECRET: - return getSecret(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case GUID: - return isSetGuid(); - case SECRET: - return isSetSecret(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof THandleIdentifier) - return this.equals((THandleIdentifier)that); - return false; - } - - public boolean equals(THandleIdentifier that) { - if (that == null) - return false; - - boolean this_present_guid = true && this.isSetGuid(); - boolean that_present_guid = true && that.isSetGuid(); - if (this_present_guid || that_present_guid) { - if (!(this_present_guid && that_present_guid)) - return false; - if (!this.guid.equals(that.guid)) - return false; - } - - boolean this_present_secret = true && this.isSetSecret(); - boolean that_present_secret = true && that.isSetSecret(); - if (this_present_secret || that_present_secret) { - if (!(this_present_secret && that_present_secret)) - return false; - if (!this.secret.equals(that.secret)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_guid = true && (isSetGuid()); - list.add(present_guid); - if (present_guid) - list.add(guid); - - boolean present_secret = true && (isSetSecret()); - list.add(present_secret); - if (present_secret) - list.add(secret); - - return list.hashCode(); - } - - @Override - public int compareTo(THandleIdentifier other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetGuid()).compareTo(other.isSetGuid()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetGuid()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.guid, other.guid); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetSecret()).compareTo(other.isSetSecret()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSecret()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.secret, other.secret); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("THandleIdentifier("); - boolean first = true; - - sb.append("guid:"); - if (this.guid == null) { - sb.append("null"); - } else { - org.apache.thrift.TBaseHelper.toString(this.guid, sb); - } - first = false; - if (!first) sb.append(", "); - sb.append("secret:"); - if (this.secret == null) { - sb.append("null"); - } else { - org.apache.thrift.TBaseHelper.toString(this.secret, sb); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetGuid()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'guid' is unset! Struct:" + toString()); - } - - if (!isSetSecret()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'secret' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class THandleIdentifierStandardSchemeFactory implements SchemeFactory { - public THandleIdentifierStandardScheme getScheme() { - return new THandleIdentifierStandardScheme(); - } - } - - private static class THandleIdentifierStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, THandleIdentifier struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // GUID - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.guid = iprot.readBinary(); - struct.setGuidIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // SECRET - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.secret = iprot.readBinary(); - struct.setSecretIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, THandleIdentifier struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.guid != null) { - oprot.writeFieldBegin(GUID_FIELD_DESC); - oprot.writeBinary(struct.guid); - oprot.writeFieldEnd(); - } - if (struct.secret != null) { - oprot.writeFieldBegin(SECRET_FIELD_DESC); - oprot.writeBinary(struct.secret); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class THandleIdentifierTupleSchemeFactory implements SchemeFactory { - public THandleIdentifierTupleScheme getScheme() { - return new THandleIdentifierTupleScheme(); - } - } - - private static class THandleIdentifierTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, THandleIdentifier struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - oprot.writeBinary(struct.guid); - oprot.writeBinary(struct.secret); - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, THandleIdentifier struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.guid = iprot.readBinary(); - struct.setGuidIsSet(true); - struct.secret = iprot.readBinary(); - struct.setSecretIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TI16Column.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TI16Column.java deleted file mode 100644 index 3c44b602b4ff7..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TI16Column.java +++ /dev/null @@ -1,548 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TI16Column implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TI16Column"); - - private static final org.apache.thrift.protocol.TField VALUES_FIELD_DESC = new org.apache.thrift.protocol.TField("values", org.apache.thrift.protocol.TType.LIST, (short)1); - private static final org.apache.thrift.protocol.TField NULLS_FIELD_DESC = new org.apache.thrift.protocol.TField("nulls", org.apache.thrift.protocol.TType.STRING, (short)2); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TI16ColumnStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TI16ColumnTupleSchemeFactory()); - } - - private List values; // required - private ByteBuffer nulls; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - VALUES((short)1, "values"), - NULLS((short)2, "nulls"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // VALUES - return VALUES; - case 2: // NULLS - return NULLS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.VALUES, new org.apache.thrift.meta_data.FieldMetaData("values", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I16)))); - tmpMap.put(_Fields.NULLS, new org.apache.thrift.meta_data.FieldMetaData("nulls", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TI16Column.class, metaDataMap); - } - - public TI16Column() { - } - - public TI16Column( - List values, - ByteBuffer nulls) - { - this(); - this.values = values; - this.nulls = org.apache.thrift.TBaseHelper.copyBinary(nulls); - } - - /** - * Performs a deep copy on other. - */ - public TI16Column(TI16Column other) { - if (other.isSetValues()) { - List __this__values = new ArrayList(other.values); - this.values = __this__values; - } - if (other.isSetNulls()) { - this.nulls = org.apache.thrift.TBaseHelper.copyBinary(other.nulls); - } - } - - public TI16Column deepCopy() { - return new TI16Column(this); - } - - @Override - public void clear() { - this.values = null; - this.nulls = null; - } - - public int getValuesSize() { - return (this.values == null) ? 0 : this.values.size(); - } - - public java.util.Iterator getValuesIterator() { - return (this.values == null) ? null : this.values.iterator(); - } - - public void addToValues(short elem) { - if (this.values == null) { - this.values = new ArrayList(); - } - this.values.add(elem); - } - - public List getValues() { - return this.values; - } - - public void setValues(List values) { - this.values = values; - } - - public void unsetValues() { - this.values = null; - } - - /** Returns true if field values is set (has been assigned a value) and false otherwise */ - public boolean isSetValues() { - return this.values != null; - } - - public void setValuesIsSet(boolean value) { - if (!value) { - this.values = null; - } - } - - public byte[] getNulls() { - setNulls(org.apache.thrift.TBaseHelper.rightSize(nulls)); - return nulls == null ? null : nulls.array(); - } - - public ByteBuffer bufferForNulls() { - return org.apache.thrift.TBaseHelper.copyBinary(nulls); - } - - public void setNulls(byte[] nulls) { - this.nulls = nulls == null ? (ByteBuffer)null : ByteBuffer.wrap(Arrays.copyOf(nulls, nulls.length)); - } - - public void setNulls(ByteBuffer nulls) { - this.nulls = org.apache.thrift.TBaseHelper.copyBinary(nulls); - } - - public void unsetNulls() { - this.nulls = null; - } - - /** Returns true if field nulls is set (has been assigned a value) and false otherwise */ - public boolean isSetNulls() { - return this.nulls != null; - } - - public void setNullsIsSet(boolean value) { - if (!value) { - this.nulls = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case VALUES: - if (value == null) { - unsetValues(); - } else { - setValues((List)value); - } - break; - - case NULLS: - if (value == null) { - unsetNulls(); - } else { - setNulls((ByteBuffer)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case VALUES: - return getValues(); - - case NULLS: - return getNulls(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case VALUES: - return isSetValues(); - case NULLS: - return isSetNulls(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TI16Column) - return this.equals((TI16Column)that); - return false; - } - - public boolean equals(TI16Column that) { - if (that == null) - return false; - - boolean this_present_values = true && this.isSetValues(); - boolean that_present_values = true && that.isSetValues(); - if (this_present_values || that_present_values) { - if (!(this_present_values && that_present_values)) - return false; - if (!this.values.equals(that.values)) - return false; - } - - boolean this_present_nulls = true && this.isSetNulls(); - boolean that_present_nulls = true && that.isSetNulls(); - if (this_present_nulls || that_present_nulls) { - if (!(this_present_nulls && that_present_nulls)) - return false; - if (!this.nulls.equals(that.nulls)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_values = true && (isSetValues()); - list.add(present_values); - if (present_values) - list.add(values); - - boolean present_nulls = true && (isSetNulls()); - list.add(present_nulls); - if (present_nulls) - list.add(nulls); - - return list.hashCode(); - } - - @Override - public int compareTo(TI16Column other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetValues()).compareTo(other.isSetValues()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetValues()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.values, other.values); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetNulls()).compareTo(other.isSetNulls()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetNulls()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.nulls, other.nulls); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TI16Column("); - boolean first = true; - - sb.append("values:"); - if (this.values == null) { - sb.append("null"); - } else { - sb.append(this.values); - } - first = false; - if (!first) sb.append(", "); - sb.append("nulls:"); - if (this.nulls == null) { - sb.append("null"); - } else { - org.apache.thrift.TBaseHelper.toString(this.nulls, sb); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetValues()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'values' is unset! Struct:" + toString()); - } - - if (!isSetNulls()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'nulls' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TI16ColumnStandardSchemeFactory implements SchemeFactory { - public TI16ColumnStandardScheme getScheme() { - return new TI16ColumnStandardScheme(); - } - } - - private static class TI16ColumnStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TI16Column struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // VALUES - if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { - { - org.apache.thrift.protocol.TList _list70 = iprot.readListBegin(); - struct.values = new ArrayList(_list70.size); - short _elem71; - for (int _i72 = 0; _i72 < _list70.size; ++_i72) - { - _elem71 = iprot.readI16(); - struct.values.add(_elem71); - } - iprot.readListEnd(); - } - struct.setValuesIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // NULLS - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.nulls = iprot.readBinary(); - struct.setNullsIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TI16Column struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.values != null) { - oprot.writeFieldBegin(VALUES_FIELD_DESC); - { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I16, struct.values.size())); - for (short _iter73 : struct.values) - { - oprot.writeI16(_iter73); - } - oprot.writeListEnd(); - } - oprot.writeFieldEnd(); - } - if (struct.nulls != null) { - oprot.writeFieldBegin(NULLS_FIELD_DESC); - oprot.writeBinary(struct.nulls); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TI16ColumnTupleSchemeFactory implements SchemeFactory { - public TI16ColumnTupleScheme getScheme() { - return new TI16ColumnTupleScheme(); - } - } - - private static class TI16ColumnTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TI16Column struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - { - oprot.writeI32(struct.values.size()); - for (short _iter74 : struct.values) - { - oprot.writeI16(_iter74); - } - } - oprot.writeBinary(struct.nulls); - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TI16Column struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - { - org.apache.thrift.protocol.TList _list75 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I16, iprot.readI32()); - struct.values = new ArrayList(_list75.size); - short _elem76; - for (int _i77 = 0; _i77 < _list75.size; ++_i77) - { - _elem76 = iprot.readI16(); - struct.values.add(_elem76); - } - } - struct.setValuesIsSet(true); - struct.nulls = iprot.readBinary(); - struct.setNullsIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TI16Value.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TI16Value.java deleted file mode 100644 index 29fb4cb85201d..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TI16Value.java +++ /dev/null @@ -1,390 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TI16Value implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TI16Value"); - - private static final org.apache.thrift.protocol.TField VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("value", org.apache.thrift.protocol.TType.I16, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TI16ValueStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TI16ValueTupleSchemeFactory()); - } - - private short value; // optional - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - VALUE((short)1, "value"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // VALUE - return VALUE; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private static final int __VALUE_ISSET_ID = 0; - private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.VALUE}; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.VALUE, new org.apache.thrift.meta_data.FieldMetaData("value", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I16))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TI16Value.class, metaDataMap); - } - - public TI16Value() { - } - - /** - * Performs a deep copy on other. - */ - public TI16Value(TI16Value other) { - __isset_bitfield = other.__isset_bitfield; - this.value = other.value; - } - - public TI16Value deepCopy() { - return new TI16Value(this); - } - - @Override - public void clear() { - setValueIsSet(false); - this.value = 0; - } - - public short getValue() { - return this.value; - } - - public void setValue(short value) { - this.value = value; - setValueIsSet(true); - } - - public void unsetValue() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __VALUE_ISSET_ID); - } - - /** Returns true if field value is set (has been assigned a value) and false otherwise */ - public boolean isSetValue() { - return EncodingUtils.testBit(__isset_bitfield, __VALUE_ISSET_ID); - } - - public void setValueIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __VALUE_ISSET_ID, value); - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case VALUE: - if (value == null) { - unsetValue(); - } else { - setValue((Short)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case VALUE: - return getValue(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case VALUE: - return isSetValue(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TI16Value) - return this.equals((TI16Value)that); - return false; - } - - public boolean equals(TI16Value that) { - if (that == null) - return false; - - boolean this_present_value = true && this.isSetValue(); - boolean that_present_value = true && that.isSetValue(); - if (this_present_value || that_present_value) { - if (!(this_present_value && that_present_value)) - return false; - if (this.value != that.value) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_value = true && (isSetValue()); - list.add(present_value); - if (present_value) - list.add(value); - - return list.hashCode(); - } - - @Override - public int compareTo(TI16Value other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetValue()).compareTo(other.isSetValue()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetValue()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.value, other.value); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TI16Value("); - boolean first = true; - - if (isSetValue()) { - sb.append("value:"); - sb.append(this.value); - first = false; - } - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. - __isset_bitfield = 0; - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TI16ValueStandardSchemeFactory implements SchemeFactory { - public TI16ValueStandardScheme getScheme() { - return new TI16ValueStandardScheme(); - } - } - - private static class TI16ValueStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TI16Value struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // VALUE - if (schemeField.type == org.apache.thrift.protocol.TType.I16) { - struct.value = iprot.readI16(); - struct.setValueIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TI16Value struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.isSetValue()) { - oprot.writeFieldBegin(VALUE_FIELD_DESC); - oprot.writeI16(struct.value); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TI16ValueTupleSchemeFactory implements SchemeFactory { - public TI16ValueTupleScheme getScheme() { - return new TI16ValueTupleScheme(); - } - } - - private static class TI16ValueTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TI16Value struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetValue()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetValue()) { - oprot.writeI16(struct.value); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TI16Value struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.value = iprot.readI16(); - struct.setValueIsSet(true); - } - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TI32Column.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TI32Column.java deleted file mode 100644 index 9834f1ce8f01b..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TI32Column.java +++ /dev/null @@ -1,548 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TI32Column implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TI32Column"); - - private static final org.apache.thrift.protocol.TField VALUES_FIELD_DESC = new org.apache.thrift.protocol.TField("values", org.apache.thrift.protocol.TType.LIST, (short)1); - private static final org.apache.thrift.protocol.TField NULLS_FIELD_DESC = new org.apache.thrift.protocol.TField("nulls", org.apache.thrift.protocol.TType.STRING, (short)2); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TI32ColumnStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TI32ColumnTupleSchemeFactory()); - } - - private List values; // required - private ByteBuffer nulls; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - VALUES((short)1, "values"), - NULLS((short)2, "nulls"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // VALUES - return VALUES; - case 2: // NULLS - return NULLS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.VALUES, new org.apache.thrift.meta_data.FieldMetaData("values", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)))); - tmpMap.put(_Fields.NULLS, new org.apache.thrift.meta_data.FieldMetaData("nulls", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TI32Column.class, metaDataMap); - } - - public TI32Column() { - } - - public TI32Column( - List values, - ByteBuffer nulls) - { - this(); - this.values = values; - this.nulls = org.apache.thrift.TBaseHelper.copyBinary(nulls); - } - - /** - * Performs a deep copy on other. - */ - public TI32Column(TI32Column other) { - if (other.isSetValues()) { - List __this__values = new ArrayList(other.values); - this.values = __this__values; - } - if (other.isSetNulls()) { - this.nulls = org.apache.thrift.TBaseHelper.copyBinary(other.nulls); - } - } - - public TI32Column deepCopy() { - return new TI32Column(this); - } - - @Override - public void clear() { - this.values = null; - this.nulls = null; - } - - public int getValuesSize() { - return (this.values == null) ? 0 : this.values.size(); - } - - public java.util.Iterator getValuesIterator() { - return (this.values == null) ? null : this.values.iterator(); - } - - public void addToValues(int elem) { - if (this.values == null) { - this.values = new ArrayList(); - } - this.values.add(elem); - } - - public List getValues() { - return this.values; - } - - public void setValues(List values) { - this.values = values; - } - - public void unsetValues() { - this.values = null; - } - - /** Returns true if field values is set (has been assigned a value) and false otherwise */ - public boolean isSetValues() { - return this.values != null; - } - - public void setValuesIsSet(boolean value) { - if (!value) { - this.values = null; - } - } - - public byte[] getNulls() { - setNulls(org.apache.thrift.TBaseHelper.rightSize(nulls)); - return nulls == null ? null : nulls.array(); - } - - public ByteBuffer bufferForNulls() { - return org.apache.thrift.TBaseHelper.copyBinary(nulls); - } - - public void setNulls(byte[] nulls) { - this.nulls = nulls == null ? (ByteBuffer)null : ByteBuffer.wrap(Arrays.copyOf(nulls, nulls.length)); - } - - public void setNulls(ByteBuffer nulls) { - this.nulls = org.apache.thrift.TBaseHelper.copyBinary(nulls); - } - - public void unsetNulls() { - this.nulls = null; - } - - /** Returns true if field nulls is set (has been assigned a value) and false otherwise */ - public boolean isSetNulls() { - return this.nulls != null; - } - - public void setNullsIsSet(boolean value) { - if (!value) { - this.nulls = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case VALUES: - if (value == null) { - unsetValues(); - } else { - setValues((List)value); - } - break; - - case NULLS: - if (value == null) { - unsetNulls(); - } else { - setNulls((ByteBuffer)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case VALUES: - return getValues(); - - case NULLS: - return getNulls(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case VALUES: - return isSetValues(); - case NULLS: - return isSetNulls(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TI32Column) - return this.equals((TI32Column)that); - return false; - } - - public boolean equals(TI32Column that) { - if (that == null) - return false; - - boolean this_present_values = true && this.isSetValues(); - boolean that_present_values = true && that.isSetValues(); - if (this_present_values || that_present_values) { - if (!(this_present_values && that_present_values)) - return false; - if (!this.values.equals(that.values)) - return false; - } - - boolean this_present_nulls = true && this.isSetNulls(); - boolean that_present_nulls = true && that.isSetNulls(); - if (this_present_nulls || that_present_nulls) { - if (!(this_present_nulls && that_present_nulls)) - return false; - if (!this.nulls.equals(that.nulls)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_values = true && (isSetValues()); - list.add(present_values); - if (present_values) - list.add(values); - - boolean present_nulls = true && (isSetNulls()); - list.add(present_nulls); - if (present_nulls) - list.add(nulls); - - return list.hashCode(); - } - - @Override - public int compareTo(TI32Column other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetValues()).compareTo(other.isSetValues()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetValues()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.values, other.values); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetNulls()).compareTo(other.isSetNulls()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetNulls()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.nulls, other.nulls); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TI32Column("); - boolean first = true; - - sb.append("values:"); - if (this.values == null) { - sb.append("null"); - } else { - sb.append(this.values); - } - first = false; - if (!first) sb.append(", "); - sb.append("nulls:"); - if (this.nulls == null) { - sb.append("null"); - } else { - org.apache.thrift.TBaseHelper.toString(this.nulls, sb); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetValues()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'values' is unset! Struct:" + toString()); - } - - if (!isSetNulls()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'nulls' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TI32ColumnStandardSchemeFactory implements SchemeFactory { - public TI32ColumnStandardScheme getScheme() { - return new TI32ColumnStandardScheme(); - } - } - - private static class TI32ColumnStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TI32Column struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // VALUES - if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { - { - org.apache.thrift.protocol.TList _list78 = iprot.readListBegin(); - struct.values = new ArrayList(_list78.size); - int _elem79; - for (int _i80 = 0; _i80 < _list78.size; ++_i80) - { - _elem79 = iprot.readI32(); - struct.values.add(_elem79); - } - iprot.readListEnd(); - } - struct.setValuesIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // NULLS - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.nulls = iprot.readBinary(); - struct.setNullsIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TI32Column struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.values != null) { - oprot.writeFieldBegin(VALUES_FIELD_DESC); - { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I32, struct.values.size())); - for (int _iter81 : struct.values) - { - oprot.writeI32(_iter81); - } - oprot.writeListEnd(); - } - oprot.writeFieldEnd(); - } - if (struct.nulls != null) { - oprot.writeFieldBegin(NULLS_FIELD_DESC); - oprot.writeBinary(struct.nulls); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TI32ColumnTupleSchemeFactory implements SchemeFactory { - public TI32ColumnTupleScheme getScheme() { - return new TI32ColumnTupleScheme(); - } - } - - private static class TI32ColumnTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TI32Column struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - { - oprot.writeI32(struct.values.size()); - for (int _iter82 : struct.values) - { - oprot.writeI32(_iter82); - } - } - oprot.writeBinary(struct.nulls); - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TI32Column struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - { - org.apache.thrift.protocol.TList _list83 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I32, iprot.readI32()); - struct.values = new ArrayList(_list83.size); - int _elem84; - for (int _i85 = 0; _i85 < _list83.size; ++_i85) - { - _elem84 = iprot.readI32(); - struct.values.add(_elem84); - } - } - struct.setValuesIsSet(true); - struct.nulls = iprot.readBinary(); - struct.setNullsIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TI32Value.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TI32Value.java deleted file mode 100644 index 8a69632b2d76e..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TI32Value.java +++ /dev/null @@ -1,390 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TI32Value implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TI32Value"); - - private static final org.apache.thrift.protocol.TField VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("value", org.apache.thrift.protocol.TType.I32, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TI32ValueStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TI32ValueTupleSchemeFactory()); - } - - private int value; // optional - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - VALUE((short)1, "value"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // VALUE - return VALUE; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private static final int __VALUE_ISSET_ID = 0; - private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.VALUE}; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.VALUE, new org.apache.thrift.meta_data.FieldMetaData("value", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TI32Value.class, metaDataMap); - } - - public TI32Value() { - } - - /** - * Performs a deep copy on other. - */ - public TI32Value(TI32Value other) { - __isset_bitfield = other.__isset_bitfield; - this.value = other.value; - } - - public TI32Value deepCopy() { - return new TI32Value(this); - } - - @Override - public void clear() { - setValueIsSet(false); - this.value = 0; - } - - public int getValue() { - return this.value; - } - - public void setValue(int value) { - this.value = value; - setValueIsSet(true); - } - - public void unsetValue() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __VALUE_ISSET_ID); - } - - /** Returns true if field value is set (has been assigned a value) and false otherwise */ - public boolean isSetValue() { - return EncodingUtils.testBit(__isset_bitfield, __VALUE_ISSET_ID); - } - - public void setValueIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __VALUE_ISSET_ID, value); - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case VALUE: - if (value == null) { - unsetValue(); - } else { - setValue((Integer)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case VALUE: - return getValue(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case VALUE: - return isSetValue(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TI32Value) - return this.equals((TI32Value)that); - return false; - } - - public boolean equals(TI32Value that) { - if (that == null) - return false; - - boolean this_present_value = true && this.isSetValue(); - boolean that_present_value = true && that.isSetValue(); - if (this_present_value || that_present_value) { - if (!(this_present_value && that_present_value)) - return false; - if (this.value != that.value) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_value = true && (isSetValue()); - list.add(present_value); - if (present_value) - list.add(value); - - return list.hashCode(); - } - - @Override - public int compareTo(TI32Value other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetValue()).compareTo(other.isSetValue()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetValue()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.value, other.value); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TI32Value("); - boolean first = true; - - if (isSetValue()) { - sb.append("value:"); - sb.append(this.value); - first = false; - } - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. - __isset_bitfield = 0; - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TI32ValueStandardSchemeFactory implements SchemeFactory { - public TI32ValueStandardScheme getScheme() { - return new TI32ValueStandardScheme(); - } - } - - private static class TI32ValueStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TI32Value struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // VALUE - if (schemeField.type == org.apache.thrift.protocol.TType.I32) { - struct.value = iprot.readI32(); - struct.setValueIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TI32Value struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.isSetValue()) { - oprot.writeFieldBegin(VALUE_FIELD_DESC); - oprot.writeI32(struct.value); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TI32ValueTupleSchemeFactory implements SchemeFactory { - public TI32ValueTupleScheme getScheme() { - return new TI32ValueTupleScheme(); - } - } - - private static class TI32ValueTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TI32Value struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetValue()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetValue()) { - oprot.writeI32(struct.value); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TI32Value struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.value = iprot.readI32(); - struct.setValueIsSet(true); - } - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TI64Column.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TI64Column.java deleted file mode 100644 index cd5ef2d7a9ed9..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TI64Column.java +++ /dev/null @@ -1,548 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TI64Column implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TI64Column"); - - private static final org.apache.thrift.protocol.TField VALUES_FIELD_DESC = new org.apache.thrift.protocol.TField("values", org.apache.thrift.protocol.TType.LIST, (short)1); - private static final org.apache.thrift.protocol.TField NULLS_FIELD_DESC = new org.apache.thrift.protocol.TField("nulls", org.apache.thrift.protocol.TType.STRING, (short)2); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TI64ColumnStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TI64ColumnTupleSchemeFactory()); - } - - private List values; // required - private ByteBuffer nulls; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - VALUES((short)1, "values"), - NULLS((short)2, "nulls"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // VALUES - return VALUES; - case 2: // NULLS - return NULLS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.VALUES, new org.apache.thrift.meta_data.FieldMetaData("values", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)))); - tmpMap.put(_Fields.NULLS, new org.apache.thrift.meta_data.FieldMetaData("nulls", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TI64Column.class, metaDataMap); - } - - public TI64Column() { - } - - public TI64Column( - List values, - ByteBuffer nulls) - { - this(); - this.values = values; - this.nulls = org.apache.thrift.TBaseHelper.copyBinary(nulls); - } - - /** - * Performs a deep copy on other. - */ - public TI64Column(TI64Column other) { - if (other.isSetValues()) { - List __this__values = new ArrayList(other.values); - this.values = __this__values; - } - if (other.isSetNulls()) { - this.nulls = org.apache.thrift.TBaseHelper.copyBinary(other.nulls); - } - } - - public TI64Column deepCopy() { - return new TI64Column(this); - } - - @Override - public void clear() { - this.values = null; - this.nulls = null; - } - - public int getValuesSize() { - return (this.values == null) ? 0 : this.values.size(); - } - - public java.util.Iterator getValuesIterator() { - return (this.values == null) ? null : this.values.iterator(); - } - - public void addToValues(long elem) { - if (this.values == null) { - this.values = new ArrayList(); - } - this.values.add(elem); - } - - public List getValues() { - return this.values; - } - - public void setValues(List values) { - this.values = values; - } - - public void unsetValues() { - this.values = null; - } - - /** Returns true if field values is set (has been assigned a value) and false otherwise */ - public boolean isSetValues() { - return this.values != null; - } - - public void setValuesIsSet(boolean value) { - if (!value) { - this.values = null; - } - } - - public byte[] getNulls() { - setNulls(org.apache.thrift.TBaseHelper.rightSize(nulls)); - return nulls == null ? null : nulls.array(); - } - - public ByteBuffer bufferForNulls() { - return org.apache.thrift.TBaseHelper.copyBinary(nulls); - } - - public void setNulls(byte[] nulls) { - this.nulls = nulls == null ? (ByteBuffer)null : ByteBuffer.wrap(Arrays.copyOf(nulls, nulls.length)); - } - - public void setNulls(ByteBuffer nulls) { - this.nulls = org.apache.thrift.TBaseHelper.copyBinary(nulls); - } - - public void unsetNulls() { - this.nulls = null; - } - - /** Returns true if field nulls is set (has been assigned a value) and false otherwise */ - public boolean isSetNulls() { - return this.nulls != null; - } - - public void setNullsIsSet(boolean value) { - if (!value) { - this.nulls = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case VALUES: - if (value == null) { - unsetValues(); - } else { - setValues((List)value); - } - break; - - case NULLS: - if (value == null) { - unsetNulls(); - } else { - setNulls((ByteBuffer)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case VALUES: - return getValues(); - - case NULLS: - return getNulls(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case VALUES: - return isSetValues(); - case NULLS: - return isSetNulls(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TI64Column) - return this.equals((TI64Column)that); - return false; - } - - public boolean equals(TI64Column that) { - if (that == null) - return false; - - boolean this_present_values = true && this.isSetValues(); - boolean that_present_values = true && that.isSetValues(); - if (this_present_values || that_present_values) { - if (!(this_present_values && that_present_values)) - return false; - if (!this.values.equals(that.values)) - return false; - } - - boolean this_present_nulls = true && this.isSetNulls(); - boolean that_present_nulls = true && that.isSetNulls(); - if (this_present_nulls || that_present_nulls) { - if (!(this_present_nulls && that_present_nulls)) - return false; - if (!this.nulls.equals(that.nulls)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_values = true && (isSetValues()); - list.add(present_values); - if (present_values) - list.add(values); - - boolean present_nulls = true && (isSetNulls()); - list.add(present_nulls); - if (present_nulls) - list.add(nulls); - - return list.hashCode(); - } - - @Override - public int compareTo(TI64Column other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetValues()).compareTo(other.isSetValues()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetValues()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.values, other.values); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetNulls()).compareTo(other.isSetNulls()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetNulls()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.nulls, other.nulls); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TI64Column("); - boolean first = true; - - sb.append("values:"); - if (this.values == null) { - sb.append("null"); - } else { - sb.append(this.values); - } - first = false; - if (!first) sb.append(", "); - sb.append("nulls:"); - if (this.nulls == null) { - sb.append("null"); - } else { - org.apache.thrift.TBaseHelper.toString(this.nulls, sb); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetValues()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'values' is unset! Struct:" + toString()); - } - - if (!isSetNulls()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'nulls' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TI64ColumnStandardSchemeFactory implements SchemeFactory { - public TI64ColumnStandardScheme getScheme() { - return new TI64ColumnStandardScheme(); - } - } - - private static class TI64ColumnStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TI64Column struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // VALUES - if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { - { - org.apache.thrift.protocol.TList _list86 = iprot.readListBegin(); - struct.values = new ArrayList(_list86.size); - long _elem87; - for (int _i88 = 0; _i88 < _list86.size; ++_i88) - { - _elem87 = iprot.readI64(); - struct.values.add(_elem87); - } - iprot.readListEnd(); - } - struct.setValuesIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // NULLS - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.nulls = iprot.readBinary(); - struct.setNullsIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TI64Column struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.values != null) { - oprot.writeFieldBegin(VALUES_FIELD_DESC); - { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.values.size())); - for (long _iter89 : struct.values) - { - oprot.writeI64(_iter89); - } - oprot.writeListEnd(); - } - oprot.writeFieldEnd(); - } - if (struct.nulls != null) { - oprot.writeFieldBegin(NULLS_FIELD_DESC); - oprot.writeBinary(struct.nulls); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TI64ColumnTupleSchemeFactory implements SchemeFactory { - public TI64ColumnTupleScheme getScheme() { - return new TI64ColumnTupleScheme(); - } - } - - private static class TI64ColumnTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TI64Column struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - { - oprot.writeI32(struct.values.size()); - for (long _iter90 : struct.values) - { - oprot.writeI64(_iter90); - } - } - oprot.writeBinary(struct.nulls); - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TI64Column struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - { - org.apache.thrift.protocol.TList _list91 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.values = new ArrayList(_list91.size); - long _elem92; - for (int _i93 = 0; _i93 < _list91.size; ++_i93) - { - _elem92 = iprot.readI64(); - struct.values.add(_elem92); - } - } - struct.setValuesIsSet(true); - struct.nulls = iprot.readBinary(); - struct.setNullsIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TI64Value.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TI64Value.java deleted file mode 100644 index 393c0bd28610d..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TI64Value.java +++ /dev/null @@ -1,390 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TI64Value implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TI64Value"); - - private static final org.apache.thrift.protocol.TField VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("value", org.apache.thrift.protocol.TType.I64, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TI64ValueStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TI64ValueTupleSchemeFactory()); - } - - private long value; // optional - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - VALUE((short)1, "value"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // VALUE - return VALUE; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private static final int __VALUE_ISSET_ID = 0; - private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.VALUE}; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.VALUE, new org.apache.thrift.meta_data.FieldMetaData("value", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TI64Value.class, metaDataMap); - } - - public TI64Value() { - } - - /** - * Performs a deep copy on other. - */ - public TI64Value(TI64Value other) { - __isset_bitfield = other.__isset_bitfield; - this.value = other.value; - } - - public TI64Value deepCopy() { - return new TI64Value(this); - } - - @Override - public void clear() { - setValueIsSet(false); - this.value = 0; - } - - public long getValue() { - return this.value; - } - - public void setValue(long value) { - this.value = value; - setValueIsSet(true); - } - - public void unsetValue() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __VALUE_ISSET_ID); - } - - /** Returns true if field value is set (has been assigned a value) and false otherwise */ - public boolean isSetValue() { - return EncodingUtils.testBit(__isset_bitfield, __VALUE_ISSET_ID); - } - - public void setValueIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __VALUE_ISSET_ID, value); - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case VALUE: - if (value == null) { - unsetValue(); - } else { - setValue((Long)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case VALUE: - return getValue(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case VALUE: - return isSetValue(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TI64Value) - return this.equals((TI64Value)that); - return false; - } - - public boolean equals(TI64Value that) { - if (that == null) - return false; - - boolean this_present_value = true && this.isSetValue(); - boolean that_present_value = true && that.isSetValue(); - if (this_present_value || that_present_value) { - if (!(this_present_value && that_present_value)) - return false; - if (this.value != that.value) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_value = true && (isSetValue()); - list.add(present_value); - if (present_value) - list.add(value); - - return list.hashCode(); - } - - @Override - public int compareTo(TI64Value other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetValue()).compareTo(other.isSetValue()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetValue()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.value, other.value); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TI64Value("); - boolean first = true; - - if (isSetValue()) { - sb.append("value:"); - sb.append(this.value); - first = false; - } - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. - __isset_bitfield = 0; - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TI64ValueStandardSchemeFactory implements SchemeFactory { - public TI64ValueStandardScheme getScheme() { - return new TI64ValueStandardScheme(); - } - } - - private static class TI64ValueStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TI64Value struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // VALUE - if (schemeField.type == org.apache.thrift.protocol.TType.I64) { - struct.value = iprot.readI64(); - struct.setValueIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TI64Value struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.isSetValue()) { - oprot.writeFieldBegin(VALUE_FIELD_DESC); - oprot.writeI64(struct.value); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TI64ValueTupleSchemeFactory implements SchemeFactory { - public TI64ValueTupleScheme getScheme() { - return new TI64ValueTupleScheme(); - } - } - - private static class TI64ValueTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TI64Value struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetValue()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetValue()) { - oprot.writeI64(struct.value); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TI64Value struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.value = iprot.readI64(); - struct.setValueIsSet(true); - } - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TJobExecutionStatus.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TJobExecutionStatus.java deleted file mode 100644 index b39f208c1b878..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TJobExecutionStatus.java +++ /dev/null @@ -1,48 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - - -import java.util.Map; -import java.util.HashMap; -import org.apache.thrift.TEnum; - -public enum TJobExecutionStatus implements org.apache.thrift.TEnum { - IN_PROGRESS(0), - COMPLETE(1), - NOT_AVAILABLE(2); - - private final int value; - - private TJobExecutionStatus(int value) { - this.value = value; - } - - /** - * Get the integer value of this enum value, as defined in the Thrift IDL. - */ - public int getValue() { - return value; - } - - /** - * Find a the enum type by its integer value, as defined in the Thrift IDL. - * @return null if the value is not found. - */ - public static TJobExecutionStatus findByValue(int value) { - switch (value) { - case 0: - return IN_PROGRESS; - case 1: - return COMPLETE; - case 2: - return NOT_AVAILABLE; - default: - return null; - } - } -} diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TMapTypeEntry.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TMapTypeEntry.java deleted file mode 100644 index 7ebc15c9432be..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TMapTypeEntry.java +++ /dev/null @@ -1,482 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TMapTypeEntry implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TMapTypeEntry"); - - private static final org.apache.thrift.protocol.TField KEY_TYPE_PTR_FIELD_DESC = new org.apache.thrift.protocol.TField("keyTypePtr", org.apache.thrift.protocol.TType.I32, (short)1); - private static final org.apache.thrift.protocol.TField VALUE_TYPE_PTR_FIELD_DESC = new org.apache.thrift.protocol.TField("valueTypePtr", org.apache.thrift.protocol.TType.I32, (short)2); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TMapTypeEntryStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TMapTypeEntryTupleSchemeFactory()); - } - - private int keyTypePtr; // required - private int valueTypePtr; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - KEY_TYPE_PTR((short)1, "keyTypePtr"), - VALUE_TYPE_PTR((short)2, "valueTypePtr"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // KEY_TYPE_PTR - return KEY_TYPE_PTR; - case 2: // VALUE_TYPE_PTR - return VALUE_TYPE_PTR; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private static final int __KEYTYPEPTR_ISSET_ID = 0; - private static final int __VALUETYPEPTR_ISSET_ID = 1; - private byte __isset_bitfield = 0; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.KEY_TYPE_PTR, new org.apache.thrift.meta_data.FieldMetaData("keyTypePtr", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32 , "TTypeEntryPtr"))); - tmpMap.put(_Fields.VALUE_TYPE_PTR, new org.apache.thrift.meta_data.FieldMetaData("valueTypePtr", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32 , "TTypeEntryPtr"))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TMapTypeEntry.class, metaDataMap); - } - - public TMapTypeEntry() { - } - - public TMapTypeEntry( - int keyTypePtr, - int valueTypePtr) - { - this(); - this.keyTypePtr = keyTypePtr; - setKeyTypePtrIsSet(true); - this.valueTypePtr = valueTypePtr; - setValueTypePtrIsSet(true); - } - - /** - * Performs a deep copy on other. - */ - public TMapTypeEntry(TMapTypeEntry other) { - __isset_bitfield = other.__isset_bitfield; - this.keyTypePtr = other.keyTypePtr; - this.valueTypePtr = other.valueTypePtr; - } - - public TMapTypeEntry deepCopy() { - return new TMapTypeEntry(this); - } - - @Override - public void clear() { - setKeyTypePtrIsSet(false); - this.keyTypePtr = 0; - setValueTypePtrIsSet(false); - this.valueTypePtr = 0; - } - - public int getKeyTypePtr() { - return this.keyTypePtr; - } - - public void setKeyTypePtr(int keyTypePtr) { - this.keyTypePtr = keyTypePtr; - setKeyTypePtrIsSet(true); - } - - public void unsetKeyTypePtr() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __KEYTYPEPTR_ISSET_ID); - } - - /** Returns true if field keyTypePtr is set (has been assigned a value) and false otherwise */ - public boolean isSetKeyTypePtr() { - return EncodingUtils.testBit(__isset_bitfield, __KEYTYPEPTR_ISSET_ID); - } - - public void setKeyTypePtrIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __KEYTYPEPTR_ISSET_ID, value); - } - - public int getValueTypePtr() { - return this.valueTypePtr; - } - - public void setValueTypePtr(int valueTypePtr) { - this.valueTypePtr = valueTypePtr; - setValueTypePtrIsSet(true); - } - - public void unsetValueTypePtr() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __VALUETYPEPTR_ISSET_ID); - } - - /** Returns true if field valueTypePtr is set (has been assigned a value) and false otherwise */ - public boolean isSetValueTypePtr() { - return EncodingUtils.testBit(__isset_bitfield, __VALUETYPEPTR_ISSET_ID); - } - - public void setValueTypePtrIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __VALUETYPEPTR_ISSET_ID, value); - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case KEY_TYPE_PTR: - if (value == null) { - unsetKeyTypePtr(); - } else { - setKeyTypePtr((Integer)value); - } - break; - - case VALUE_TYPE_PTR: - if (value == null) { - unsetValueTypePtr(); - } else { - setValueTypePtr((Integer)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case KEY_TYPE_PTR: - return getKeyTypePtr(); - - case VALUE_TYPE_PTR: - return getValueTypePtr(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case KEY_TYPE_PTR: - return isSetKeyTypePtr(); - case VALUE_TYPE_PTR: - return isSetValueTypePtr(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TMapTypeEntry) - return this.equals((TMapTypeEntry)that); - return false; - } - - public boolean equals(TMapTypeEntry that) { - if (that == null) - return false; - - boolean this_present_keyTypePtr = true; - boolean that_present_keyTypePtr = true; - if (this_present_keyTypePtr || that_present_keyTypePtr) { - if (!(this_present_keyTypePtr && that_present_keyTypePtr)) - return false; - if (this.keyTypePtr != that.keyTypePtr) - return false; - } - - boolean this_present_valueTypePtr = true; - boolean that_present_valueTypePtr = true; - if (this_present_valueTypePtr || that_present_valueTypePtr) { - if (!(this_present_valueTypePtr && that_present_valueTypePtr)) - return false; - if (this.valueTypePtr != that.valueTypePtr) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_keyTypePtr = true; - list.add(present_keyTypePtr); - if (present_keyTypePtr) - list.add(keyTypePtr); - - boolean present_valueTypePtr = true; - list.add(present_valueTypePtr); - if (present_valueTypePtr) - list.add(valueTypePtr); - - return list.hashCode(); - } - - @Override - public int compareTo(TMapTypeEntry other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetKeyTypePtr()).compareTo(other.isSetKeyTypePtr()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetKeyTypePtr()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.keyTypePtr, other.keyTypePtr); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetValueTypePtr()).compareTo(other.isSetValueTypePtr()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetValueTypePtr()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.valueTypePtr, other.valueTypePtr); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TMapTypeEntry("); - boolean first = true; - - sb.append("keyTypePtr:"); - sb.append(this.keyTypePtr); - first = false; - if (!first) sb.append(", "); - sb.append("valueTypePtr:"); - sb.append(this.valueTypePtr); - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetKeyTypePtr()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'keyTypePtr' is unset! Struct:" + toString()); - } - - if (!isSetValueTypePtr()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'valueTypePtr' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. - __isset_bitfield = 0; - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TMapTypeEntryStandardSchemeFactory implements SchemeFactory { - public TMapTypeEntryStandardScheme getScheme() { - return new TMapTypeEntryStandardScheme(); - } - } - - private static class TMapTypeEntryStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TMapTypeEntry struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // KEY_TYPE_PTR - if (schemeField.type == org.apache.thrift.protocol.TType.I32) { - struct.keyTypePtr = iprot.readI32(); - struct.setKeyTypePtrIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // VALUE_TYPE_PTR - if (schemeField.type == org.apache.thrift.protocol.TType.I32) { - struct.valueTypePtr = iprot.readI32(); - struct.setValueTypePtrIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TMapTypeEntry struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - oprot.writeFieldBegin(KEY_TYPE_PTR_FIELD_DESC); - oprot.writeI32(struct.keyTypePtr); - oprot.writeFieldEnd(); - oprot.writeFieldBegin(VALUE_TYPE_PTR_FIELD_DESC); - oprot.writeI32(struct.valueTypePtr); - oprot.writeFieldEnd(); - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TMapTypeEntryTupleSchemeFactory implements SchemeFactory { - public TMapTypeEntryTupleScheme getScheme() { - return new TMapTypeEntryTupleScheme(); - } - } - - private static class TMapTypeEntryTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TMapTypeEntry struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - oprot.writeI32(struct.keyTypePtr); - oprot.writeI32(struct.valueTypePtr); - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TMapTypeEntry struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.keyTypePtr = iprot.readI32(); - struct.setKeyTypePtrIsSet(true); - struct.valueTypePtr = iprot.readI32(); - struct.setValueTypePtrIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TOpenSessionReq.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TOpenSessionReq.java deleted file mode 100644 index e47abbb862cf1..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TOpenSessionReq.java +++ /dev/null @@ -1,778 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TOpenSessionReq implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TOpenSessionReq"); - - private static final org.apache.thrift.protocol.TField CLIENT_PROTOCOL_FIELD_DESC = new org.apache.thrift.protocol.TField("client_protocol", org.apache.thrift.protocol.TType.I32, (short)1); - private static final org.apache.thrift.protocol.TField USERNAME_FIELD_DESC = new org.apache.thrift.protocol.TField("username", org.apache.thrift.protocol.TType.STRING, (short)2); - private static final org.apache.thrift.protocol.TField PASSWORD_FIELD_DESC = new org.apache.thrift.protocol.TField("password", org.apache.thrift.protocol.TType.STRING, (short)3); - private static final org.apache.thrift.protocol.TField CONFIGURATION_FIELD_DESC = new org.apache.thrift.protocol.TField("configuration", org.apache.thrift.protocol.TType.MAP, (short)4); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TOpenSessionReqStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TOpenSessionReqTupleSchemeFactory()); - } - - private TProtocolVersion client_protocol; // required - private String username; // optional - private String password; // optional - private Map configuration; // optional - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - /** - * - * @see TProtocolVersion - */ - CLIENT_PROTOCOL((short)1, "client_protocol"), - USERNAME((short)2, "username"), - PASSWORD((short)3, "password"), - CONFIGURATION((short)4, "configuration"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // CLIENT_PROTOCOL - return CLIENT_PROTOCOL; - case 2: // USERNAME - return USERNAME; - case 3: // PASSWORD - return PASSWORD; - case 4: // CONFIGURATION - return CONFIGURATION; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private static final _Fields optionals[] = {_Fields.USERNAME,_Fields.PASSWORD,_Fields.CONFIGURATION}; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.CLIENT_PROTOCOL, new org.apache.thrift.meta_data.FieldMetaData("client_protocol", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, TProtocolVersion.class))); - tmpMap.put(_Fields.USERNAME, new org.apache.thrift.meta_data.FieldMetaData("username", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.PASSWORD, new org.apache.thrift.meta_data.FieldMetaData("password", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.CONFIGURATION, new org.apache.thrift.meta_data.FieldMetaData("configuration", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TOpenSessionReq.class, metaDataMap); - } - - public TOpenSessionReq() { - this.client_protocol = org.apache.hive.service.rpc.thrift.TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V10; - - } - - public TOpenSessionReq( - TProtocolVersion client_protocol) - { - this(); - this.client_protocol = client_protocol; - } - - /** - * Performs a deep copy on other. - */ - public TOpenSessionReq(TOpenSessionReq other) { - if (other.isSetClient_protocol()) { - this.client_protocol = other.client_protocol; - } - if (other.isSetUsername()) { - this.username = other.username; - } - if (other.isSetPassword()) { - this.password = other.password; - } - if (other.isSetConfiguration()) { - Map __this__configuration = new HashMap(other.configuration); - this.configuration = __this__configuration; - } - } - - public TOpenSessionReq deepCopy() { - return new TOpenSessionReq(this); - } - - @Override - public void clear() { - this.client_protocol = org.apache.hive.service.rpc.thrift.TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V10; - - this.username = null; - this.password = null; - this.configuration = null; - } - - /** - * - * @see TProtocolVersion - */ - public TProtocolVersion getClient_protocol() { - return this.client_protocol; - } - - /** - * - * @see TProtocolVersion - */ - public void setClient_protocol(TProtocolVersion client_protocol) { - this.client_protocol = client_protocol; - } - - public void unsetClient_protocol() { - this.client_protocol = null; - } - - /** Returns true if field client_protocol is set (has been assigned a value) and false otherwise */ - public boolean isSetClient_protocol() { - return this.client_protocol != null; - } - - public void setClient_protocolIsSet(boolean value) { - if (!value) { - this.client_protocol = null; - } - } - - public String getUsername() { - return this.username; - } - - public void setUsername(String username) { - this.username = username; - } - - public void unsetUsername() { - this.username = null; - } - - /** Returns true if field username is set (has been assigned a value) and false otherwise */ - public boolean isSetUsername() { - return this.username != null; - } - - public void setUsernameIsSet(boolean value) { - if (!value) { - this.username = null; - } - } - - public String getPassword() { - return this.password; - } - - public void setPassword(String password) { - this.password = password; - } - - public void unsetPassword() { - this.password = null; - } - - /** Returns true if field password is set (has been assigned a value) and false otherwise */ - public boolean isSetPassword() { - return this.password != null; - } - - public void setPasswordIsSet(boolean value) { - if (!value) { - this.password = null; - } - } - - public int getConfigurationSize() { - return (this.configuration == null) ? 0 : this.configuration.size(); - } - - public void putToConfiguration(String key, String val) { - if (this.configuration == null) { - this.configuration = new HashMap(); - } - this.configuration.put(key, val); - } - - public Map getConfiguration() { - return this.configuration; - } - - public void setConfiguration(Map configuration) { - this.configuration = configuration; - } - - public void unsetConfiguration() { - this.configuration = null; - } - - /** Returns true if field configuration is set (has been assigned a value) and false otherwise */ - public boolean isSetConfiguration() { - return this.configuration != null; - } - - public void setConfigurationIsSet(boolean value) { - if (!value) { - this.configuration = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case CLIENT_PROTOCOL: - if (value == null) { - unsetClient_protocol(); - } else { - setClient_protocol((TProtocolVersion)value); - } - break; - - case USERNAME: - if (value == null) { - unsetUsername(); - } else { - setUsername((String)value); - } - break; - - case PASSWORD: - if (value == null) { - unsetPassword(); - } else { - setPassword((String)value); - } - break; - - case CONFIGURATION: - if (value == null) { - unsetConfiguration(); - } else { - setConfiguration((Map)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case CLIENT_PROTOCOL: - return getClient_protocol(); - - case USERNAME: - return getUsername(); - - case PASSWORD: - return getPassword(); - - case CONFIGURATION: - return getConfiguration(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case CLIENT_PROTOCOL: - return isSetClient_protocol(); - case USERNAME: - return isSetUsername(); - case PASSWORD: - return isSetPassword(); - case CONFIGURATION: - return isSetConfiguration(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TOpenSessionReq) - return this.equals((TOpenSessionReq)that); - return false; - } - - public boolean equals(TOpenSessionReq that) { - if (that == null) - return false; - - boolean this_present_client_protocol = true && this.isSetClient_protocol(); - boolean that_present_client_protocol = true && that.isSetClient_protocol(); - if (this_present_client_protocol || that_present_client_protocol) { - if (!(this_present_client_protocol && that_present_client_protocol)) - return false; - if (!this.client_protocol.equals(that.client_protocol)) - return false; - } - - boolean this_present_username = true && this.isSetUsername(); - boolean that_present_username = true && that.isSetUsername(); - if (this_present_username || that_present_username) { - if (!(this_present_username && that_present_username)) - return false; - if (!this.username.equals(that.username)) - return false; - } - - boolean this_present_password = true && this.isSetPassword(); - boolean that_present_password = true && that.isSetPassword(); - if (this_present_password || that_present_password) { - if (!(this_present_password && that_present_password)) - return false; - if (!this.password.equals(that.password)) - return false; - } - - boolean this_present_configuration = true && this.isSetConfiguration(); - boolean that_present_configuration = true && that.isSetConfiguration(); - if (this_present_configuration || that_present_configuration) { - if (!(this_present_configuration && that_present_configuration)) - return false; - if (!this.configuration.equals(that.configuration)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_client_protocol = true && (isSetClient_protocol()); - list.add(present_client_protocol); - if (present_client_protocol) - list.add(client_protocol.getValue()); - - boolean present_username = true && (isSetUsername()); - list.add(present_username); - if (present_username) - list.add(username); - - boolean present_password = true && (isSetPassword()); - list.add(present_password); - if (present_password) - list.add(password); - - boolean present_configuration = true && (isSetConfiguration()); - list.add(present_configuration); - if (present_configuration) - list.add(configuration); - - return list.hashCode(); - } - - @Override - public int compareTo(TOpenSessionReq other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetClient_protocol()).compareTo(other.isSetClient_protocol()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetClient_protocol()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.client_protocol, other.client_protocol); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetUsername()).compareTo(other.isSetUsername()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetUsername()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.username, other.username); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetPassword()).compareTo(other.isSetPassword()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetPassword()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.password, other.password); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetConfiguration()).compareTo(other.isSetConfiguration()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetConfiguration()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.configuration, other.configuration); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TOpenSessionReq("); - boolean first = true; - - sb.append("client_protocol:"); - if (this.client_protocol == null) { - sb.append("null"); - } else { - sb.append(this.client_protocol); - } - first = false; - if (isSetUsername()) { - if (!first) sb.append(", "); - sb.append("username:"); - if (this.username == null) { - sb.append("null"); - } else { - sb.append(this.username); - } - first = false; - } - if (isSetPassword()) { - if (!first) sb.append(", "); - sb.append("password:"); - if (this.password == null) { - sb.append("null"); - } else { - sb.append(this.password); - } - first = false; - } - if (isSetConfiguration()) { - if (!first) sb.append(", "); - sb.append("configuration:"); - if (this.configuration == null) { - sb.append("null"); - } else { - sb.append(this.configuration); - } - first = false; - } - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetClient_protocol()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'client_protocol' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TOpenSessionReqStandardSchemeFactory implements SchemeFactory { - public TOpenSessionReqStandardScheme getScheme() { - return new TOpenSessionReqStandardScheme(); - } - } - - private static class TOpenSessionReqStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TOpenSessionReq struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // CLIENT_PROTOCOL - if (schemeField.type == org.apache.thrift.protocol.TType.I32) { - struct.client_protocol = org.apache.hive.service.rpc.thrift.TProtocolVersion.findByValue(iprot.readI32()); - struct.setClient_protocolIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // USERNAME - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.username = iprot.readString(); - struct.setUsernameIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 3: // PASSWORD - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.password = iprot.readString(); - struct.setPasswordIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 4: // CONFIGURATION - if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { - { - org.apache.thrift.protocol.TMap _map142 = iprot.readMapBegin(); - struct.configuration = new HashMap(2*_map142.size); - String _key143; - String _val144; - for (int _i145 = 0; _i145 < _map142.size; ++_i145) - { - _key143 = iprot.readString(); - _val144 = iprot.readString(); - struct.configuration.put(_key143, _val144); - } - iprot.readMapEnd(); - } - struct.setConfigurationIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TOpenSessionReq struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.client_protocol != null) { - oprot.writeFieldBegin(CLIENT_PROTOCOL_FIELD_DESC); - oprot.writeI32(struct.client_protocol.getValue()); - oprot.writeFieldEnd(); - } - if (struct.username != null) { - if (struct.isSetUsername()) { - oprot.writeFieldBegin(USERNAME_FIELD_DESC); - oprot.writeString(struct.username); - oprot.writeFieldEnd(); - } - } - if (struct.password != null) { - if (struct.isSetPassword()) { - oprot.writeFieldBegin(PASSWORD_FIELD_DESC); - oprot.writeString(struct.password); - oprot.writeFieldEnd(); - } - } - if (struct.configuration != null) { - if (struct.isSetConfiguration()) { - oprot.writeFieldBegin(CONFIGURATION_FIELD_DESC); - { - oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.configuration.size())); - for (Map.Entry _iter146 : struct.configuration.entrySet()) - { - oprot.writeString(_iter146.getKey()); - oprot.writeString(_iter146.getValue()); - } - oprot.writeMapEnd(); - } - oprot.writeFieldEnd(); - } - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TOpenSessionReqTupleSchemeFactory implements SchemeFactory { - public TOpenSessionReqTupleScheme getScheme() { - return new TOpenSessionReqTupleScheme(); - } - } - - private static class TOpenSessionReqTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TOpenSessionReq struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - oprot.writeI32(struct.client_protocol.getValue()); - BitSet optionals = new BitSet(); - if (struct.isSetUsername()) { - optionals.set(0); - } - if (struct.isSetPassword()) { - optionals.set(1); - } - if (struct.isSetConfiguration()) { - optionals.set(2); - } - oprot.writeBitSet(optionals, 3); - if (struct.isSetUsername()) { - oprot.writeString(struct.username); - } - if (struct.isSetPassword()) { - oprot.writeString(struct.password); - } - if (struct.isSetConfiguration()) { - { - oprot.writeI32(struct.configuration.size()); - for (Map.Entry _iter147 : struct.configuration.entrySet()) - { - oprot.writeString(_iter147.getKey()); - oprot.writeString(_iter147.getValue()); - } - } - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TOpenSessionReq struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.client_protocol = org.apache.hive.service.rpc.thrift.TProtocolVersion.findByValue(iprot.readI32()); - struct.setClient_protocolIsSet(true); - BitSet incoming = iprot.readBitSet(3); - if (incoming.get(0)) { - struct.username = iprot.readString(); - struct.setUsernameIsSet(true); - } - if (incoming.get(1)) { - struct.password = iprot.readString(); - struct.setPasswordIsSet(true); - } - if (incoming.get(2)) { - { - org.apache.thrift.protocol.TMap _map148 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.configuration = new HashMap(2*_map148.size); - String _key149; - String _val150; - for (int _i151 = 0; _i151 < _map148.size; ++_i151) - { - _key149 = iprot.readString(); - _val150 = iprot.readString(); - struct.configuration.put(_key149, _val150); - } - } - struct.setConfigurationIsSet(true); - } - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TOpenSessionResp.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TOpenSessionResp.java deleted file mode 100644 index ee1c87bfd76fa..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TOpenSessionResp.java +++ /dev/null @@ -1,783 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TOpenSessionResp implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TOpenSessionResp"); - - private static final org.apache.thrift.protocol.TField STATUS_FIELD_DESC = new org.apache.thrift.protocol.TField("status", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField SERVER_PROTOCOL_VERSION_FIELD_DESC = new org.apache.thrift.protocol.TField("serverProtocolVersion", org.apache.thrift.protocol.TType.I32, (short)2); - private static final org.apache.thrift.protocol.TField SESSION_HANDLE_FIELD_DESC = new org.apache.thrift.protocol.TField("sessionHandle", org.apache.thrift.protocol.TType.STRUCT, (short)3); - private static final org.apache.thrift.protocol.TField CONFIGURATION_FIELD_DESC = new org.apache.thrift.protocol.TField("configuration", org.apache.thrift.protocol.TType.MAP, (short)4); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TOpenSessionRespStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TOpenSessionRespTupleSchemeFactory()); - } - - private TStatus status; // required - private TProtocolVersion serverProtocolVersion; // required - private TSessionHandle sessionHandle; // optional - private Map configuration; // optional - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - STATUS((short)1, "status"), - /** - * - * @see TProtocolVersion - */ - SERVER_PROTOCOL_VERSION((short)2, "serverProtocolVersion"), - SESSION_HANDLE((short)3, "sessionHandle"), - CONFIGURATION((short)4, "configuration"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // STATUS - return STATUS; - case 2: // SERVER_PROTOCOL_VERSION - return SERVER_PROTOCOL_VERSION; - case 3: // SESSION_HANDLE - return SESSION_HANDLE; - case 4: // CONFIGURATION - return CONFIGURATION; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private static final _Fields optionals[] = {_Fields.SESSION_HANDLE,_Fields.CONFIGURATION}; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.STATUS, new org.apache.thrift.meta_data.FieldMetaData("status", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TStatus.class))); - tmpMap.put(_Fields.SERVER_PROTOCOL_VERSION, new org.apache.thrift.meta_data.FieldMetaData("serverProtocolVersion", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, TProtocolVersion.class))); - tmpMap.put(_Fields.SESSION_HANDLE, new org.apache.thrift.meta_data.FieldMetaData("sessionHandle", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TSessionHandle.class))); - tmpMap.put(_Fields.CONFIGURATION, new org.apache.thrift.meta_data.FieldMetaData("configuration", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TOpenSessionResp.class, metaDataMap); - } - - public TOpenSessionResp() { - this.serverProtocolVersion = org.apache.hive.service.rpc.thrift.TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V10; - - } - - public TOpenSessionResp( - TStatus status, - TProtocolVersion serverProtocolVersion) - { - this(); - this.status = status; - this.serverProtocolVersion = serverProtocolVersion; - } - - /** - * Performs a deep copy on other. - */ - public TOpenSessionResp(TOpenSessionResp other) { - if (other.isSetStatus()) { - this.status = new TStatus(other.status); - } - if (other.isSetServerProtocolVersion()) { - this.serverProtocolVersion = other.serverProtocolVersion; - } - if (other.isSetSessionHandle()) { - this.sessionHandle = new TSessionHandle(other.sessionHandle); - } - if (other.isSetConfiguration()) { - Map __this__configuration = new HashMap(other.configuration); - this.configuration = __this__configuration; - } - } - - public TOpenSessionResp deepCopy() { - return new TOpenSessionResp(this); - } - - @Override - public void clear() { - this.status = null; - this.serverProtocolVersion = org.apache.hive.service.rpc.thrift.TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V10; - - this.sessionHandle = null; - this.configuration = null; - } - - public TStatus getStatus() { - return this.status; - } - - public void setStatus(TStatus status) { - this.status = status; - } - - public void unsetStatus() { - this.status = null; - } - - /** Returns true if field status is set (has been assigned a value) and false otherwise */ - public boolean isSetStatus() { - return this.status != null; - } - - public void setStatusIsSet(boolean value) { - if (!value) { - this.status = null; - } - } - - /** - * - * @see TProtocolVersion - */ - public TProtocolVersion getServerProtocolVersion() { - return this.serverProtocolVersion; - } - - /** - * - * @see TProtocolVersion - */ - public void setServerProtocolVersion(TProtocolVersion serverProtocolVersion) { - this.serverProtocolVersion = serverProtocolVersion; - } - - public void unsetServerProtocolVersion() { - this.serverProtocolVersion = null; - } - - /** Returns true if field serverProtocolVersion is set (has been assigned a value) and false otherwise */ - public boolean isSetServerProtocolVersion() { - return this.serverProtocolVersion != null; - } - - public void setServerProtocolVersionIsSet(boolean value) { - if (!value) { - this.serverProtocolVersion = null; - } - } - - public TSessionHandle getSessionHandle() { - return this.sessionHandle; - } - - public void setSessionHandle(TSessionHandle sessionHandle) { - this.sessionHandle = sessionHandle; - } - - public void unsetSessionHandle() { - this.sessionHandle = null; - } - - /** Returns true if field sessionHandle is set (has been assigned a value) and false otherwise */ - public boolean isSetSessionHandle() { - return this.sessionHandle != null; - } - - public void setSessionHandleIsSet(boolean value) { - if (!value) { - this.sessionHandle = null; - } - } - - public int getConfigurationSize() { - return (this.configuration == null) ? 0 : this.configuration.size(); - } - - public void putToConfiguration(String key, String val) { - if (this.configuration == null) { - this.configuration = new HashMap(); - } - this.configuration.put(key, val); - } - - public Map getConfiguration() { - return this.configuration; - } - - public void setConfiguration(Map configuration) { - this.configuration = configuration; - } - - public void unsetConfiguration() { - this.configuration = null; - } - - /** Returns true if field configuration is set (has been assigned a value) and false otherwise */ - public boolean isSetConfiguration() { - return this.configuration != null; - } - - public void setConfigurationIsSet(boolean value) { - if (!value) { - this.configuration = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case STATUS: - if (value == null) { - unsetStatus(); - } else { - setStatus((TStatus)value); - } - break; - - case SERVER_PROTOCOL_VERSION: - if (value == null) { - unsetServerProtocolVersion(); - } else { - setServerProtocolVersion((TProtocolVersion)value); - } - break; - - case SESSION_HANDLE: - if (value == null) { - unsetSessionHandle(); - } else { - setSessionHandle((TSessionHandle)value); - } - break; - - case CONFIGURATION: - if (value == null) { - unsetConfiguration(); - } else { - setConfiguration((Map)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case STATUS: - return getStatus(); - - case SERVER_PROTOCOL_VERSION: - return getServerProtocolVersion(); - - case SESSION_HANDLE: - return getSessionHandle(); - - case CONFIGURATION: - return getConfiguration(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case STATUS: - return isSetStatus(); - case SERVER_PROTOCOL_VERSION: - return isSetServerProtocolVersion(); - case SESSION_HANDLE: - return isSetSessionHandle(); - case CONFIGURATION: - return isSetConfiguration(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TOpenSessionResp) - return this.equals((TOpenSessionResp)that); - return false; - } - - public boolean equals(TOpenSessionResp that) { - if (that == null) - return false; - - boolean this_present_status = true && this.isSetStatus(); - boolean that_present_status = true && that.isSetStatus(); - if (this_present_status || that_present_status) { - if (!(this_present_status && that_present_status)) - return false; - if (!this.status.equals(that.status)) - return false; - } - - boolean this_present_serverProtocolVersion = true && this.isSetServerProtocolVersion(); - boolean that_present_serverProtocolVersion = true && that.isSetServerProtocolVersion(); - if (this_present_serverProtocolVersion || that_present_serverProtocolVersion) { - if (!(this_present_serverProtocolVersion && that_present_serverProtocolVersion)) - return false; - if (!this.serverProtocolVersion.equals(that.serverProtocolVersion)) - return false; - } - - boolean this_present_sessionHandle = true && this.isSetSessionHandle(); - boolean that_present_sessionHandle = true && that.isSetSessionHandle(); - if (this_present_sessionHandle || that_present_sessionHandle) { - if (!(this_present_sessionHandle && that_present_sessionHandle)) - return false; - if (!this.sessionHandle.equals(that.sessionHandle)) - return false; - } - - boolean this_present_configuration = true && this.isSetConfiguration(); - boolean that_present_configuration = true && that.isSetConfiguration(); - if (this_present_configuration || that_present_configuration) { - if (!(this_present_configuration && that_present_configuration)) - return false; - if (!this.configuration.equals(that.configuration)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_status = true && (isSetStatus()); - list.add(present_status); - if (present_status) - list.add(status); - - boolean present_serverProtocolVersion = true && (isSetServerProtocolVersion()); - list.add(present_serverProtocolVersion); - if (present_serverProtocolVersion) - list.add(serverProtocolVersion.getValue()); - - boolean present_sessionHandle = true && (isSetSessionHandle()); - list.add(present_sessionHandle); - if (present_sessionHandle) - list.add(sessionHandle); - - boolean present_configuration = true && (isSetConfiguration()); - list.add(present_configuration); - if (present_configuration) - list.add(configuration); - - return list.hashCode(); - } - - @Override - public int compareTo(TOpenSessionResp other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetStatus()).compareTo(other.isSetStatus()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetStatus()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.status, other.status); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetServerProtocolVersion()).compareTo(other.isSetServerProtocolVersion()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetServerProtocolVersion()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.serverProtocolVersion, other.serverProtocolVersion); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetSessionHandle()).compareTo(other.isSetSessionHandle()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSessionHandle()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.sessionHandle, other.sessionHandle); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetConfiguration()).compareTo(other.isSetConfiguration()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetConfiguration()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.configuration, other.configuration); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TOpenSessionResp("); - boolean first = true; - - sb.append("status:"); - if (this.status == null) { - sb.append("null"); - } else { - sb.append(this.status); - } - first = false; - if (!first) sb.append(", "); - sb.append("serverProtocolVersion:"); - if (this.serverProtocolVersion == null) { - sb.append("null"); - } else { - sb.append(this.serverProtocolVersion); - } - first = false; - if (isSetSessionHandle()) { - if (!first) sb.append(", "); - sb.append("sessionHandle:"); - if (this.sessionHandle == null) { - sb.append("null"); - } else { - sb.append(this.sessionHandle); - } - first = false; - } - if (isSetConfiguration()) { - if (!first) sb.append(", "); - sb.append("configuration:"); - if (this.configuration == null) { - sb.append("null"); - } else { - sb.append(this.configuration); - } - first = false; - } - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetStatus()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'status' is unset! Struct:" + toString()); - } - - if (!isSetServerProtocolVersion()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'serverProtocolVersion' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (status != null) { - status.validate(); - } - if (sessionHandle != null) { - sessionHandle.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TOpenSessionRespStandardSchemeFactory implements SchemeFactory { - public TOpenSessionRespStandardScheme getScheme() { - return new TOpenSessionRespStandardScheme(); - } - } - - private static class TOpenSessionRespStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TOpenSessionResp struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // STATUS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // SERVER_PROTOCOL_VERSION - if (schemeField.type == org.apache.thrift.protocol.TType.I32) { - struct.serverProtocolVersion = org.apache.hive.service.rpc.thrift.TProtocolVersion.findByValue(iprot.readI32()); - struct.setServerProtocolVersionIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 3: // SESSION_HANDLE - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.sessionHandle = new TSessionHandle(); - struct.sessionHandle.read(iprot); - struct.setSessionHandleIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 4: // CONFIGURATION - if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { - { - org.apache.thrift.protocol.TMap _map152 = iprot.readMapBegin(); - struct.configuration = new HashMap(2*_map152.size); - String _key153; - String _val154; - for (int _i155 = 0; _i155 < _map152.size; ++_i155) - { - _key153 = iprot.readString(); - _val154 = iprot.readString(); - struct.configuration.put(_key153, _val154); - } - iprot.readMapEnd(); - } - struct.setConfigurationIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TOpenSessionResp struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.status != null) { - oprot.writeFieldBegin(STATUS_FIELD_DESC); - struct.status.write(oprot); - oprot.writeFieldEnd(); - } - if (struct.serverProtocolVersion != null) { - oprot.writeFieldBegin(SERVER_PROTOCOL_VERSION_FIELD_DESC); - oprot.writeI32(struct.serverProtocolVersion.getValue()); - oprot.writeFieldEnd(); - } - if (struct.sessionHandle != null) { - if (struct.isSetSessionHandle()) { - oprot.writeFieldBegin(SESSION_HANDLE_FIELD_DESC); - struct.sessionHandle.write(oprot); - oprot.writeFieldEnd(); - } - } - if (struct.configuration != null) { - if (struct.isSetConfiguration()) { - oprot.writeFieldBegin(CONFIGURATION_FIELD_DESC); - { - oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.configuration.size())); - for (Map.Entry _iter156 : struct.configuration.entrySet()) - { - oprot.writeString(_iter156.getKey()); - oprot.writeString(_iter156.getValue()); - } - oprot.writeMapEnd(); - } - oprot.writeFieldEnd(); - } - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TOpenSessionRespTupleSchemeFactory implements SchemeFactory { - public TOpenSessionRespTupleScheme getScheme() { - return new TOpenSessionRespTupleScheme(); - } - } - - private static class TOpenSessionRespTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TOpenSessionResp struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.status.write(oprot); - oprot.writeI32(struct.serverProtocolVersion.getValue()); - BitSet optionals = new BitSet(); - if (struct.isSetSessionHandle()) { - optionals.set(0); - } - if (struct.isSetConfiguration()) { - optionals.set(1); - } - oprot.writeBitSet(optionals, 2); - if (struct.isSetSessionHandle()) { - struct.sessionHandle.write(oprot); - } - if (struct.isSetConfiguration()) { - { - oprot.writeI32(struct.configuration.size()); - for (Map.Entry _iter157 : struct.configuration.entrySet()) - { - oprot.writeString(_iter157.getKey()); - oprot.writeString(_iter157.getValue()); - } - } - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TOpenSessionResp struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - struct.serverProtocolVersion = org.apache.hive.service.rpc.thrift.TProtocolVersion.findByValue(iprot.readI32()); - struct.setServerProtocolVersionIsSet(true); - BitSet incoming = iprot.readBitSet(2); - if (incoming.get(0)) { - struct.sessionHandle = new TSessionHandle(); - struct.sessionHandle.read(iprot); - struct.setSessionHandleIsSet(true); - } - if (incoming.get(1)) { - { - org.apache.thrift.protocol.TMap _map158 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.configuration = new HashMap(2*_map158.size); - String _key159; - String _val160; - for (int _i161 = 0; _i161 < _map158.size; ++_i161) - { - _key159 = iprot.readString(); - _val160 = iprot.readString(); - struct.configuration.put(_key159, _val160); - } - } - struct.setConfigurationIsSet(true); - } - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TOperationHandle.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TOperationHandle.java deleted file mode 100644 index 9eaf2be3ed5ea..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TOperationHandle.java +++ /dev/null @@ -1,709 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TOperationHandle implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TOperationHandle"); - - private static final org.apache.thrift.protocol.TField OPERATION_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("operationId", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField OPERATION_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("operationType", org.apache.thrift.protocol.TType.I32, (short)2); - private static final org.apache.thrift.protocol.TField HAS_RESULT_SET_FIELD_DESC = new org.apache.thrift.protocol.TField("hasResultSet", org.apache.thrift.protocol.TType.BOOL, (short)3); - private static final org.apache.thrift.protocol.TField MODIFIED_ROW_COUNT_FIELD_DESC = new org.apache.thrift.protocol.TField("modifiedRowCount", org.apache.thrift.protocol.TType.DOUBLE, (short)4); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TOperationHandleStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TOperationHandleTupleSchemeFactory()); - } - - private THandleIdentifier operationId; // required - private TOperationType operationType; // required - private boolean hasResultSet; // required - private double modifiedRowCount; // optional - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - OPERATION_ID((short)1, "operationId"), - /** - * - * @see TOperationType - */ - OPERATION_TYPE((short)2, "operationType"), - HAS_RESULT_SET((short)3, "hasResultSet"), - MODIFIED_ROW_COUNT((short)4, "modifiedRowCount"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // OPERATION_ID - return OPERATION_ID; - case 2: // OPERATION_TYPE - return OPERATION_TYPE; - case 3: // HAS_RESULT_SET - return HAS_RESULT_SET; - case 4: // MODIFIED_ROW_COUNT - return MODIFIED_ROW_COUNT; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private static final int __HASRESULTSET_ISSET_ID = 0; - private static final int __MODIFIEDROWCOUNT_ISSET_ID = 1; - private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.MODIFIED_ROW_COUNT}; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.OPERATION_ID, new org.apache.thrift.meta_data.FieldMetaData("operationId", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, THandleIdentifier.class))); - tmpMap.put(_Fields.OPERATION_TYPE, new org.apache.thrift.meta_data.FieldMetaData("operationType", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, TOperationType.class))); - tmpMap.put(_Fields.HAS_RESULT_SET, new org.apache.thrift.meta_data.FieldMetaData("hasResultSet", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); - tmpMap.put(_Fields.MODIFIED_ROW_COUNT, new org.apache.thrift.meta_data.FieldMetaData("modifiedRowCount", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.DOUBLE))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TOperationHandle.class, metaDataMap); - } - - public TOperationHandle() { - } - - public TOperationHandle( - THandleIdentifier operationId, - TOperationType operationType, - boolean hasResultSet) - { - this(); - this.operationId = operationId; - this.operationType = operationType; - this.hasResultSet = hasResultSet; - setHasResultSetIsSet(true); - } - - /** - * Performs a deep copy on other. - */ - public TOperationHandle(TOperationHandle other) { - __isset_bitfield = other.__isset_bitfield; - if (other.isSetOperationId()) { - this.operationId = new THandleIdentifier(other.operationId); - } - if (other.isSetOperationType()) { - this.operationType = other.operationType; - } - this.hasResultSet = other.hasResultSet; - this.modifiedRowCount = other.modifiedRowCount; - } - - public TOperationHandle deepCopy() { - return new TOperationHandle(this); - } - - @Override - public void clear() { - this.operationId = null; - this.operationType = null; - setHasResultSetIsSet(false); - this.hasResultSet = false; - setModifiedRowCountIsSet(false); - this.modifiedRowCount = 0.0; - } - - public THandleIdentifier getOperationId() { - return this.operationId; - } - - public void setOperationId(THandleIdentifier operationId) { - this.operationId = operationId; - } - - public void unsetOperationId() { - this.operationId = null; - } - - /** Returns true if field operationId is set (has been assigned a value) and false otherwise */ - public boolean isSetOperationId() { - return this.operationId != null; - } - - public void setOperationIdIsSet(boolean value) { - if (!value) { - this.operationId = null; - } - } - - /** - * - * @see TOperationType - */ - public TOperationType getOperationType() { - return this.operationType; - } - - /** - * - * @see TOperationType - */ - public void setOperationType(TOperationType operationType) { - this.operationType = operationType; - } - - public void unsetOperationType() { - this.operationType = null; - } - - /** Returns true if field operationType is set (has been assigned a value) and false otherwise */ - public boolean isSetOperationType() { - return this.operationType != null; - } - - public void setOperationTypeIsSet(boolean value) { - if (!value) { - this.operationType = null; - } - } - - public boolean isHasResultSet() { - return this.hasResultSet; - } - - public void setHasResultSet(boolean hasResultSet) { - this.hasResultSet = hasResultSet; - setHasResultSetIsSet(true); - } - - public void unsetHasResultSet() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __HASRESULTSET_ISSET_ID); - } - - /** Returns true if field hasResultSet is set (has been assigned a value) and false otherwise */ - public boolean isSetHasResultSet() { - return EncodingUtils.testBit(__isset_bitfield, __HASRESULTSET_ISSET_ID); - } - - public void setHasResultSetIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __HASRESULTSET_ISSET_ID, value); - } - - public double getModifiedRowCount() { - return this.modifiedRowCount; - } - - public void setModifiedRowCount(double modifiedRowCount) { - this.modifiedRowCount = modifiedRowCount; - setModifiedRowCountIsSet(true); - } - - public void unsetModifiedRowCount() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __MODIFIEDROWCOUNT_ISSET_ID); - } - - /** Returns true if field modifiedRowCount is set (has been assigned a value) and false otherwise */ - public boolean isSetModifiedRowCount() { - return EncodingUtils.testBit(__isset_bitfield, __MODIFIEDROWCOUNT_ISSET_ID); - } - - public void setModifiedRowCountIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MODIFIEDROWCOUNT_ISSET_ID, value); - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case OPERATION_ID: - if (value == null) { - unsetOperationId(); - } else { - setOperationId((THandleIdentifier)value); - } - break; - - case OPERATION_TYPE: - if (value == null) { - unsetOperationType(); - } else { - setOperationType((TOperationType)value); - } - break; - - case HAS_RESULT_SET: - if (value == null) { - unsetHasResultSet(); - } else { - setHasResultSet((Boolean)value); - } - break; - - case MODIFIED_ROW_COUNT: - if (value == null) { - unsetModifiedRowCount(); - } else { - setModifiedRowCount((Double)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case OPERATION_ID: - return getOperationId(); - - case OPERATION_TYPE: - return getOperationType(); - - case HAS_RESULT_SET: - return isHasResultSet(); - - case MODIFIED_ROW_COUNT: - return getModifiedRowCount(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case OPERATION_ID: - return isSetOperationId(); - case OPERATION_TYPE: - return isSetOperationType(); - case HAS_RESULT_SET: - return isSetHasResultSet(); - case MODIFIED_ROW_COUNT: - return isSetModifiedRowCount(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TOperationHandle) - return this.equals((TOperationHandle)that); - return false; - } - - public boolean equals(TOperationHandle that) { - if (that == null) - return false; - - boolean this_present_operationId = true && this.isSetOperationId(); - boolean that_present_operationId = true && that.isSetOperationId(); - if (this_present_operationId || that_present_operationId) { - if (!(this_present_operationId && that_present_operationId)) - return false; - if (!this.operationId.equals(that.operationId)) - return false; - } - - boolean this_present_operationType = true && this.isSetOperationType(); - boolean that_present_operationType = true && that.isSetOperationType(); - if (this_present_operationType || that_present_operationType) { - if (!(this_present_operationType && that_present_operationType)) - return false; - if (!this.operationType.equals(that.operationType)) - return false; - } - - boolean this_present_hasResultSet = true; - boolean that_present_hasResultSet = true; - if (this_present_hasResultSet || that_present_hasResultSet) { - if (!(this_present_hasResultSet && that_present_hasResultSet)) - return false; - if (this.hasResultSet != that.hasResultSet) - return false; - } - - boolean this_present_modifiedRowCount = true && this.isSetModifiedRowCount(); - boolean that_present_modifiedRowCount = true && that.isSetModifiedRowCount(); - if (this_present_modifiedRowCount || that_present_modifiedRowCount) { - if (!(this_present_modifiedRowCount && that_present_modifiedRowCount)) - return false; - if (this.modifiedRowCount != that.modifiedRowCount) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_operationId = true && (isSetOperationId()); - list.add(present_operationId); - if (present_operationId) - list.add(operationId); - - boolean present_operationType = true && (isSetOperationType()); - list.add(present_operationType); - if (present_operationType) - list.add(operationType.getValue()); - - boolean present_hasResultSet = true; - list.add(present_hasResultSet); - if (present_hasResultSet) - list.add(hasResultSet); - - boolean present_modifiedRowCount = true && (isSetModifiedRowCount()); - list.add(present_modifiedRowCount); - if (present_modifiedRowCount) - list.add(modifiedRowCount); - - return list.hashCode(); - } - - @Override - public int compareTo(TOperationHandle other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetOperationId()).compareTo(other.isSetOperationId()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetOperationId()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.operationId, other.operationId); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetOperationType()).compareTo(other.isSetOperationType()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetOperationType()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.operationType, other.operationType); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetHasResultSet()).compareTo(other.isSetHasResultSet()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetHasResultSet()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.hasResultSet, other.hasResultSet); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetModifiedRowCount()).compareTo(other.isSetModifiedRowCount()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetModifiedRowCount()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.modifiedRowCount, other.modifiedRowCount); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TOperationHandle("); - boolean first = true; - - sb.append("operationId:"); - if (this.operationId == null) { - sb.append("null"); - } else { - sb.append(this.operationId); - } - first = false; - if (!first) sb.append(", "); - sb.append("operationType:"); - if (this.operationType == null) { - sb.append("null"); - } else { - sb.append(this.operationType); - } - first = false; - if (!first) sb.append(", "); - sb.append("hasResultSet:"); - sb.append(this.hasResultSet); - first = false; - if (isSetModifiedRowCount()) { - if (!first) sb.append(", "); - sb.append("modifiedRowCount:"); - sb.append(this.modifiedRowCount); - first = false; - } - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetOperationId()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'operationId' is unset! Struct:" + toString()); - } - - if (!isSetOperationType()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'operationType' is unset! Struct:" + toString()); - } - - if (!isSetHasResultSet()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'hasResultSet' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (operationId != null) { - operationId.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. - __isset_bitfield = 0; - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TOperationHandleStandardSchemeFactory implements SchemeFactory { - public TOperationHandleStandardScheme getScheme() { - return new TOperationHandleStandardScheme(); - } - } - - private static class TOperationHandleStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TOperationHandle struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // OPERATION_ID - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.operationId = new THandleIdentifier(); - struct.operationId.read(iprot); - struct.setOperationIdIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // OPERATION_TYPE - if (schemeField.type == org.apache.thrift.protocol.TType.I32) { - struct.operationType = org.apache.hive.service.rpc.thrift.TOperationType.findByValue(iprot.readI32()); - struct.setOperationTypeIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 3: // HAS_RESULT_SET - if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { - struct.hasResultSet = iprot.readBool(); - struct.setHasResultSetIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 4: // MODIFIED_ROW_COUNT - if (schemeField.type == org.apache.thrift.protocol.TType.DOUBLE) { - struct.modifiedRowCount = iprot.readDouble(); - struct.setModifiedRowCountIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TOperationHandle struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.operationId != null) { - oprot.writeFieldBegin(OPERATION_ID_FIELD_DESC); - struct.operationId.write(oprot); - oprot.writeFieldEnd(); - } - if (struct.operationType != null) { - oprot.writeFieldBegin(OPERATION_TYPE_FIELD_DESC); - oprot.writeI32(struct.operationType.getValue()); - oprot.writeFieldEnd(); - } - oprot.writeFieldBegin(HAS_RESULT_SET_FIELD_DESC); - oprot.writeBool(struct.hasResultSet); - oprot.writeFieldEnd(); - if (struct.isSetModifiedRowCount()) { - oprot.writeFieldBegin(MODIFIED_ROW_COUNT_FIELD_DESC); - oprot.writeDouble(struct.modifiedRowCount); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TOperationHandleTupleSchemeFactory implements SchemeFactory { - public TOperationHandleTupleScheme getScheme() { - return new TOperationHandleTupleScheme(); - } - } - - private static class TOperationHandleTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TOperationHandle struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.operationId.write(oprot); - oprot.writeI32(struct.operationType.getValue()); - oprot.writeBool(struct.hasResultSet); - BitSet optionals = new BitSet(); - if (struct.isSetModifiedRowCount()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetModifiedRowCount()) { - oprot.writeDouble(struct.modifiedRowCount); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TOperationHandle struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.operationId = new THandleIdentifier(); - struct.operationId.read(iprot); - struct.setOperationIdIsSet(true); - struct.operationType = org.apache.hive.service.rpc.thrift.TOperationType.findByValue(iprot.readI32()); - struct.setOperationTypeIsSet(true); - struct.hasResultSet = iprot.readBool(); - struct.setHasResultSetIsSet(true); - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.modifiedRowCount = iprot.readDouble(); - struct.setModifiedRowCountIsSet(true); - } - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TOperationState.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TOperationState.java deleted file mode 100644 index 4390b4b887583..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TOperationState.java +++ /dev/null @@ -1,66 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - - -import java.util.Map; -import java.util.HashMap; -import org.apache.thrift.TEnum; - -public enum TOperationState implements org.apache.thrift.TEnum { - INITIALIZED_STATE(0), - RUNNING_STATE(1), - FINISHED_STATE(2), - CANCELED_STATE(3), - CLOSED_STATE(4), - ERROR_STATE(5), - UKNOWN_STATE(6), - PENDING_STATE(7), - TIMEDOUT_STATE(8); - - private final int value; - - private TOperationState(int value) { - this.value = value; - } - - /** - * Get the integer value of this enum value, as defined in the Thrift IDL. - */ - public int getValue() { - return value; - } - - /** - * Find a the enum type by its integer value, as defined in the Thrift IDL. - * @return null if the value is not found. - */ - public static TOperationState findByValue(int value) { - switch (value) { - case 0: - return INITIALIZED_STATE; - case 1: - return RUNNING_STATE; - case 2: - return FINISHED_STATE; - case 3: - return CANCELED_STATE; - case 4: - return CLOSED_STATE; - case 5: - return ERROR_STATE; - case 6: - return UKNOWN_STATE; - case 7: - return PENDING_STATE; - case 8: - return TIMEDOUT_STATE; - default: - return null; - } - } -} diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TOperationType.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TOperationType.java deleted file mode 100644 index 08002ad1dc8e8..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TOperationType.java +++ /dev/null @@ -1,66 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - - -import java.util.Map; -import java.util.HashMap; -import org.apache.thrift.TEnum; - -public enum TOperationType implements org.apache.thrift.TEnum { - EXECUTE_STATEMENT(0), - GET_TYPE_INFO(1), - GET_CATALOGS(2), - GET_SCHEMAS(3), - GET_TABLES(4), - GET_TABLE_TYPES(5), - GET_COLUMNS(6), - GET_FUNCTIONS(7), - UNKNOWN(8); - - private final int value; - - private TOperationType(int value) { - this.value = value; - } - - /** - * Get the integer value of this enum value, as defined in the Thrift IDL. - */ - public int getValue() { - return value; - } - - /** - * Find a the enum type by its integer value, as defined in the Thrift IDL. - * @return null if the value is not found. - */ - public static TOperationType findByValue(int value) { - switch (value) { - case 0: - return EXECUTE_STATEMENT; - case 1: - return GET_TYPE_INFO; - case 2: - return GET_CATALOGS; - case 3: - return GET_SCHEMAS; - case 4: - return GET_TABLES; - case 5: - return GET_TABLE_TYPES; - case 6: - return GET_COLUMNS; - case 7: - return GET_FUNCTIONS; - case 8: - return UNKNOWN; - default: - return null; - } - } -} diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TPrimitiveTypeEntry.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TPrimitiveTypeEntry.java deleted file mode 100644 index 910c90967f614..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TPrimitiveTypeEntry.java +++ /dev/null @@ -1,516 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TPrimitiveTypeEntry implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TPrimitiveTypeEntry"); - - private static final org.apache.thrift.protocol.TField TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("type", org.apache.thrift.protocol.TType.I32, (short)1); - private static final org.apache.thrift.protocol.TField TYPE_QUALIFIERS_FIELD_DESC = new org.apache.thrift.protocol.TField("typeQualifiers", org.apache.thrift.protocol.TType.STRUCT, (short)2); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TPrimitiveTypeEntryStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TPrimitiveTypeEntryTupleSchemeFactory()); - } - - private TTypeId type; // required - private TTypeQualifiers typeQualifiers; // optional - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - /** - * - * @see TTypeId - */ - TYPE((short)1, "type"), - TYPE_QUALIFIERS((short)2, "typeQualifiers"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // TYPE - return TYPE; - case 2: // TYPE_QUALIFIERS - return TYPE_QUALIFIERS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private static final _Fields optionals[] = {_Fields.TYPE_QUALIFIERS}; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.TYPE, new org.apache.thrift.meta_data.FieldMetaData("type", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, TTypeId.class))); - tmpMap.put(_Fields.TYPE_QUALIFIERS, new org.apache.thrift.meta_data.FieldMetaData("typeQualifiers", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TTypeQualifiers.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TPrimitiveTypeEntry.class, metaDataMap); - } - - public TPrimitiveTypeEntry() { - } - - public TPrimitiveTypeEntry( - TTypeId type) - { - this(); - this.type = type; - } - - /** - * Performs a deep copy on other. - */ - public TPrimitiveTypeEntry(TPrimitiveTypeEntry other) { - if (other.isSetType()) { - this.type = other.type; - } - if (other.isSetTypeQualifiers()) { - this.typeQualifiers = new TTypeQualifiers(other.typeQualifiers); - } - } - - public TPrimitiveTypeEntry deepCopy() { - return new TPrimitiveTypeEntry(this); - } - - @Override - public void clear() { - this.type = null; - this.typeQualifiers = null; - } - - /** - * - * @see TTypeId - */ - public TTypeId getType() { - return this.type; - } - - /** - * - * @see TTypeId - */ - public void setType(TTypeId type) { - this.type = type; - } - - public void unsetType() { - this.type = null; - } - - /** Returns true if field type is set (has been assigned a value) and false otherwise */ - public boolean isSetType() { - return this.type != null; - } - - public void setTypeIsSet(boolean value) { - if (!value) { - this.type = null; - } - } - - public TTypeQualifiers getTypeQualifiers() { - return this.typeQualifiers; - } - - public void setTypeQualifiers(TTypeQualifiers typeQualifiers) { - this.typeQualifiers = typeQualifiers; - } - - public void unsetTypeQualifiers() { - this.typeQualifiers = null; - } - - /** Returns true if field typeQualifiers is set (has been assigned a value) and false otherwise */ - public boolean isSetTypeQualifiers() { - return this.typeQualifiers != null; - } - - public void setTypeQualifiersIsSet(boolean value) { - if (!value) { - this.typeQualifiers = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case TYPE: - if (value == null) { - unsetType(); - } else { - setType((TTypeId)value); - } - break; - - case TYPE_QUALIFIERS: - if (value == null) { - unsetTypeQualifiers(); - } else { - setTypeQualifiers((TTypeQualifiers)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case TYPE: - return getType(); - - case TYPE_QUALIFIERS: - return getTypeQualifiers(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case TYPE: - return isSetType(); - case TYPE_QUALIFIERS: - return isSetTypeQualifiers(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TPrimitiveTypeEntry) - return this.equals((TPrimitiveTypeEntry)that); - return false; - } - - public boolean equals(TPrimitiveTypeEntry that) { - if (that == null) - return false; - - boolean this_present_type = true && this.isSetType(); - boolean that_present_type = true && that.isSetType(); - if (this_present_type || that_present_type) { - if (!(this_present_type && that_present_type)) - return false; - if (!this.type.equals(that.type)) - return false; - } - - boolean this_present_typeQualifiers = true && this.isSetTypeQualifiers(); - boolean that_present_typeQualifiers = true && that.isSetTypeQualifiers(); - if (this_present_typeQualifiers || that_present_typeQualifiers) { - if (!(this_present_typeQualifiers && that_present_typeQualifiers)) - return false; - if (!this.typeQualifiers.equals(that.typeQualifiers)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_type = true && (isSetType()); - list.add(present_type); - if (present_type) - list.add(type.getValue()); - - boolean present_typeQualifiers = true && (isSetTypeQualifiers()); - list.add(present_typeQualifiers); - if (present_typeQualifiers) - list.add(typeQualifiers); - - return list.hashCode(); - } - - @Override - public int compareTo(TPrimitiveTypeEntry other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetType()).compareTo(other.isSetType()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetType()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.type, other.type); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetTypeQualifiers()).compareTo(other.isSetTypeQualifiers()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetTypeQualifiers()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.typeQualifiers, other.typeQualifiers); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TPrimitiveTypeEntry("); - boolean first = true; - - sb.append("type:"); - if (this.type == null) { - sb.append("null"); - } else { - sb.append(this.type); - } - first = false; - if (isSetTypeQualifiers()) { - if (!first) sb.append(", "); - sb.append("typeQualifiers:"); - if (this.typeQualifiers == null) { - sb.append("null"); - } else { - sb.append(this.typeQualifiers); - } - first = false; - } - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetType()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'type' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (typeQualifiers != null) { - typeQualifiers.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TPrimitiveTypeEntryStandardSchemeFactory implements SchemeFactory { - public TPrimitiveTypeEntryStandardScheme getScheme() { - return new TPrimitiveTypeEntryStandardScheme(); - } - } - - private static class TPrimitiveTypeEntryStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TPrimitiveTypeEntry struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // TYPE - if (schemeField.type == org.apache.thrift.protocol.TType.I32) { - struct.type = org.apache.hive.service.rpc.thrift.TTypeId.findByValue(iprot.readI32()); - struct.setTypeIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // TYPE_QUALIFIERS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.typeQualifiers = new TTypeQualifiers(); - struct.typeQualifiers.read(iprot); - struct.setTypeQualifiersIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TPrimitiveTypeEntry struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.type != null) { - oprot.writeFieldBegin(TYPE_FIELD_DESC); - oprot.writeI32(struct.type.getValue()); - oprot.writeFieldEnd(); - } - if (struct.typeQualifiers != null) { - if (struct.isSetTypeQualifiers()) { - oprot.writeFieldBegin(TYPE_QUALIFIERS_FIELD_DESC); - struct.typeQualifiers.write(oprot); - oprot.writeFieldEnd(); - } - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TPrimitiveTypeEntryTupleSchemeFactory implements SchemeFactory { - public TPrimitiveTypeEntryTupleScheme getScheme() { - return new TPrimitiveTypeEntryTupleScheme(); - } - } - - private static class TPrimitiveTypeEntryTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TPrimitiveTypeEntry struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - oprot.writeI32(struct.type.getValue()); - BitSet optionals = new BitSet(); - if (struct.isSetTypeQualifiers()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetTypeQualifiers()) { - struct.typeQualifiers.write(oprot); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TPrimitiveTypeEntry struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.type = org.apache.hive.service.rpc.thrift.TTypeId.findByValue(iprot.readI32()); - struct.setTypeIsSet(true); - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.typeQualifiers = new TTypeQualifiers(); - struct.typeQualifiers.read(iprot); - struct.setTypeQualifiersIsSet(true); - } - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TProgressUpdateResp.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TProgressUpdateResp.java deleted file mode 100644 index ecc413aad4cdc..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TProgressUpdateResp.java +++ /dev/null @@ -1,1033 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TProgressUpdateResp implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TProgressUpdateResp"); - - private static final org.apache.thrift.protocol.TField HEADER_NAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("headerNames", org.apache.thrift.protocol.TType.LIST, (short)1); - private static final org.apache.thrift.protocol.TField ROWS_FIELD_DESC = new org.apache.thrift.protocol.TField("rows", org.apache.thrift.protocol.TType.LIST, (short)2); - private static final org.apache.thrift.protocol.TField PROGRESSED_PERCENTAGE_FIELD_DESC = new org.apache.thrift.protocol.TField("progressedPercentage", org.apache.thrift.protocol.TType.DOUBLE, (short)3); - private static final org.apache.thrift.protocol.TField STATUS_FIELD_DESC = new org.apache.thrift.protocol.TField("status", org.apache.thrift.protocol.TType.I32, (short)4); - private static final org.apache.thrift.protocol.TField FOOTER_SUMMARY_FIELD_DESC = new org.apache.thrift.protocol.TField("footerSummary", org.apache.thrift.protocol.TType.STRING, (short)5); - private static final org.apache.thrift.protocol.TField START_TIME_FIELD_DESC = new org.apache.thrift.protocol.TField("startTime", org.apache.thrift.protocol.TType.I64, (short)6); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TProgressUpdateRespStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TProgressUpdateRespTupleSchemeFactory()); - } - - private List headerNames; // required - private List> rows; // required - private double progressedPercentage; // required - private TJobExecutionStatus status; // required - private String footerSummary; // required - private long startTime; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - HEADER_NAMES((short)1, "headerNames"), - ROWS((short)2, "rows"), - PROGRESSED_PERCENTAGE((short)3, "progressedPercentage"), - /** - * - * @see TJobExecutionStatus - */ - STATUS((short)4, "status"), - FOOTER_SUMMARY((short)5, "footerSummary"), - START_TIME((short)6, "startTime"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // HEADER_NAMES - return HEADER_NAMES; - case 2: // ROWS - return ROWS; - case 3: // PROGRESSED_PERCENTAGE - return PROGRESSED_PERCENTAGE; - case 4: // STATUS - return STATUS; - case 5: // FOOTER_SUMMARY - return FOOTER_SUMMARY; - case 6: // START_TIME - return START_TIME; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private static final int __PROGRESSEDPERCENTAGE_ISSET_ID = 0; - private static final int __STARTTIME_ISSET_ID = 1; - private byte __isset_bitfield = 0; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.HEADER_NAMES, new org.apache.thrift.meta_data.FieldMetaData("headerNames", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); - tmpMap.put(_Fields.ROWS, new org.apache.thrift.meta_data.FieldMetaData("rows", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, - new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))))); - tmpMap.put(_Fields.PROGRESSED_PERCENTAGE, new org.apache.thrift.meta_data.FieldMetaData("progressedPercentage", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.DOUBLE))); - tmpMap.put(_Fields.STATUS, new org.apache.thrift.meta_data.FieldMetaData("status", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, TJobExecutionStatus.class))); - tmpMap.put(_Fields.FOOTER_SUMMARY, new org.apache.thrift.meta_data.FieldMetaData("footerSummary", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.START_TIME, new org.apache.thrift.meta_data.FieldMetaData("startTime", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TProgressUpdateResp.class, metaDataMap); - } - - public TProgressUpdateResp() { - } - - public TProgressUpdateResp( - List headerNames, - List> rows, - double progressedPercentage, - TJobExecutionStatus status, - String footerSummary, - long startTime) - { - this(); - this.headerNames = headerNames; - this.rows = rows; - this.progressedPercentage = progressedPercentage; - setProgressedPercentageIsSet(true); - this.status = status; - this.footerSummary = footerSummary; - this.startTime = startTime; - setStartTimeIsSet(true); - } - - /** - * Performs a deep copy on other. - */ - public TProgressUpdateResp(TProgressUpdateResp other) { - __isset_bitfield = other.__isset_bitfield; - if (other.isSetHeaderNames()) { - List __this__headerNames = new ArrayList(other.headerNames); - this.headerNames = __this__headerNames; - } - if (other.isSetRows()) { - List> __this__rows = new ArrayList>(other.rows.size()); - for (List other_element : other.rows) { - List __this__rows_copy = new ArrayList(other_element); - __this__rows.add(__this__rows_copy); - } - this.rows = __this__rows; - } - this.progressedPercentage = other.progressedPercentage; - if (other.isSetStatus()) { - this.status = other.status; - } - if (other.isSetFooterSummary()) { - this.footerSummary = other.footerSummary; - } - this.startTime = other.startTime; - } - - public TProgressUpdateResp deepCopy() { - return new TProgressUpdateResp(this); - } - - @Override - public void clear() { - this.headerNames = null; - this.rows = null; - setProgressedPercentageIsSet(false); - this.progressedPercentage = 0.0; - this.status = null; - this.footerSummary = null; - setStartTimeIsSet(false); - this.startTime = 0; - } - - public int getHeaderNamesSize() { - return (this.headerNames == null) ? 0 : this.headerNames.size(); - } - - public java.util.Iterator getHeaderNamesIterator() { - return (this.headerNames == null) ? null : this.headerNames.iterator(); - } - - public void addToHeaderNames(String elem) { - if (this.headerNames == null) { - this.headerNames = new ArrayList(); - } - this.headerNames.add(elem); - } - - public List getHeaderNames() { - return this.headerNames; - } - - public void setHeaderNames(List headerNames) { - this.headerNames = headerNames; - } - - public void unsetHeaderNames() { - this.headerNames = null; - } - - /** Returns true if field headerNames is set (has been assigned a value) and false otherwise */ - public boolean isSetHeaderNames() { - return this.headerNames != null; - } - - public void setHeaderNamesIsSet(boolean value) { - if (!value) { - this.headerNames = null; - } - } - - public int getRowsSize() { - return (this.rows == null) ? 0 : this.rows.size(); - } - - public java.util.Iterator> getRowsIterator() { - return (this.rows == null) ? null : this.rows.iterator(); - } - - public void addToRows(List elem) { - if (this.rows == null) { - this.rows = new ArrayList>(); - } - this.rows.add(elem); - } - - public List> getRows() { - return this.rows; - } - - public void setRows(List> rows) { - this.rows = rows; - } - - public void unsetRows() { - this.rows = null; - } - - /** Returns true if field rows is set (has been assigned a value) and false otherwise */ - public boolean isSetRows() { - return this.rows != null; - } - - public void setRowsIsSet(boolean value) { - if (!value) { - this.rows = null; - } - } - - public double getProgressedPercentage() { - return this.progressedPercentage; - } - - public void setProgressedPercentage(double progressedPercentage) { - this.progressedPercentage = progressedPercentage; - setProgressedPercentageIsSet(true); - } - - public void unsetProgressedPercentage() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __PROGRESSEDPERCENTAGE_ISSET_ID); - } - - /** Returns true if field progressedPercentage is set (has been assigned a value) and false otherwise */ - public boolean isSetProgressedPercentage() { - return EncodingUtils.testBit(__isset_bitfield, __PROGRESSEDPERCENTAGE_ISSET_ID); - } - - public void setProgressedPercentageIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __PROGRESSEDPERCENTAGE_ISSET_ID, value); - } - - /** - * - * @see TJobExecutionStatus - */ - public TJobExecutionStatus getStatus() { - return this.status; - } - - /** - * - * @see TJobExecutionStatus - */ - public void setStatus(TJobExecutionStatus status) { - this.status = status; - } - - public void unsetStatus() { - this.status = null; - } - - /** Returns true if field status is set (has been assigned a value) and false otherwise */ - public boolean isSetStatus() { - return this.status != null; - } - - public void setStatusIsSet(boolean value) { - if (!value) { - this.status = null; - } - } - - public String getFooterSummary() { - return this.footerSummary; - } - - public void setFooterSummary(String footerSummary) { - this.footerSummary = footerSummary; - } - - public void unsetFooterSummary() { - this.footerSummary = null; - } - - /** Returns true if field footerSummary is set (has been assigned a value) and false otherwise */ - public boolean isSetFooterSummary() { - return this.footerSummary != null; - } - - public void setFooterSummaryIsSet(boolean value) { - if (!value) { - this.footerSummary = null; - } - } - - public long getStartTime() { - return this.startTime; - } - - public void setStartTime(long startTime) { - this.startTime = startTime; - setStartTimeIsSet(true); - } - - public void unsetStartTime() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __STARTTIME_ISSET_ID); - } - - /** Returns true if field startTime is set (has been assigned a value) and false otherwise */ - public boolean isSetStartTime() { - return EncodingUtils.testBit(__isset_bitfield, __STARTTIME_ISSET_ID); - } - - public void setStartTimeIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __STARTTIME_ISSET_ID, value); - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case HEADER_NAMES: - if (value == null) { - unsetHeaderNames(); - } else { - setHeaderNames((List)value); - } - break; - - case ROWS: - if (value == null) { - unsetRows(); - } else { - setRows((List>)value); - } - break; - - case PROGRESSED_PERCENTAGE: - if (value == null) { - unsetProgressedPercentage(); - } else { - setProgressedPercentage((Double)value); - } - break; - - case STATUS: - if (value == null) { - unsetStatus(); - } else { - setStatus((TJobExecutionStatus)value); - } - break; - - case FOOTER_SUMMARY: - if (value == null) { - unsetFooterSummary(); - } else { - setFooterSummary((String)value); - } - break; - - case START_TIME: - if (value == null) { - unsetStartTime(); - } else { - setStartTime((Long)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case HEADER_NAMES: - return getHeaderNames(); - - case ROWS: - return getRows(); - - case PROGRESSED_PERCENTAGE: - return getProgressedPercentage(); - - case STATUS: - return getStatus(); - - case FOOTER_SUMMARY: - return getFooterSummary(); - - case START_TIME: - return getStartTime(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case HEADER_NAMES: - return isSetHeaderNames(); - case ROWS: - return isSetRows(); - case PROGRESSED_PERCENTAGE: - return isSetProgressedPercentage(); - case STATUS: - return isSetStatus(); - case FOOTER_SUMMARY: - return isSetFooterSummary(); - case START_TIME: - return isSetStartTime(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TProgressUpdateResp) - return this.equals((TProgressUpdateResp)that); - return false; - } - - public boolean equals(TProgressUpdateResp that) { - if (that == null) - return false; - - boolean this_present_headerNames = true && this.isSetHeaderNames(); - boolean that_present_headerNames = true && that.isSetHeaderNames(); - if (this_present_headerNames || that_present_headerNames) { - if (!(this_present_headerNames && that_present_headerNames)) - return false; - if (!this.headerNames.equals(that.headerNames)) - return false; - } - - boolean this_present_rows = true && this.isSetRows(); - boolean that_present_rows = true && that.isSetRows(); - if (this_present_rows || that_present_rows) { - if (!(this_present_rows && that_present_rows)) - return false; - if (!this.rows.equals(that.rows)) - return false; - } - - boolean this_present_progressedPercentage = true; - boolean that_present_progressedPercentage = true; - if (this_present_progressedPercentage || that_present_progressedPercentage) { - if (!(this_present_progressedPercentage && that_present_progressedPercentage)) - return false; - if (this.progressedPercentage != that.progressedPercentage) - return false; - } - - boolean this_present_status = true && this.isSetStatus(); - boolean that_present_status = true && that.isSetStatus(); - if (this_present_status || that_present_status) { - if (!(this_present_status && that_present_status)) - return false; - if (!this.status.equals(that.status)) - return false; - } - - boolean this_present_footerSummary = true && this.isSetFooterSummary(); - boolean that_present_footerSummary = true && that.isSetFooterSummary(); - if (this_present_footerSummary || that_present_footerSummary) { - if (!(this_present_footerSummary && that_present_footerSummary)) - return false; - if (!this.footerSummary.equals(that.footerSummary)) - return false; - } - - boolean this_present_startTime = true; - boolean that_present_startTime = true; - if (this_present_startTime || that_present_startTime) { - if (!(this_present_startTime && that_present_startTime)) - return false; - if (this.startTime != that.startTime) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_headerNames = true && (isSetHeaderNames()); - list.add(present_headerNames); - if (present_headerNames) - list.add(headerNames); - - boolean present_rows = true && (isSetRows()); - list.add(present_rows); - if (present_rows) - list.add(rows); - - boolean present_progressedPercentage = true; - list.add(present_progressedPercentage); - if (present_progressedPercentage) - list.add(progressedPercentage); - - boolean present_status = true && (isSetStatus()); - list.add(present_status); - if (present_status) - list.add(status.getValue()); - - boolean present_footerSummary = true && (isSetFooterSummary()); - list.add(present_footerSummary); - if (present_footerSummary) - list.add(footerSummary); - - boolean present_startTime = true; - list.add(present_startTime); - if (present_startTime) - list.add(startTime); - - return list.hashCode(); - } - - @Override - public int compareTo(TProgressUpdateResp other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetHeaderNames()).compareTo(other.isSetHeaderNames()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetHeaderNames()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.headerNames, other.headerNames); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetRows()).compareTo(other.isSetRows()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetRows()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.rows, other.rows); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetProgressedPercentage()).compareTo(other.isSetProgressedPercentage()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetProgressedPercentage()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.progressedPercentage, other.progressedPercentage); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetStatus()).compareTo(other.isSetStatus()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetStatus()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.status, other.status); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetFooterSummary()).compareTo(other.isSetFooterSummary()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetFooterSummary()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.footerSummary, other.footerSummary); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetStartTime()).compareTo(other.isSetStartTime()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetStartTime()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.startTime, other.startTime); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TProgressUpdateResp("); - boolean first = true; - - sb.append("headerNames:"); - if (this.headerNames == null) { - sb.append("null"); - } else { - sb.append(this.headerNames); - } - first = false; - if (!first) sb.append(", "); - sb.append("rows:"); - if (this.rows == null) { - sb.append("null"); - } else { - sb.append(this.rows); - } - first = false; - if (!first) sb.append(", "); - sb.append("progressedPercentage:"); - sb.append(this.progressedPercentage); - first = false; - if (!first) sb.append(", "); - sb.append("status:"); - if (this.status == null) { - sb.append("null"); - } else { - sb.append(this.status); - } - first = false; - if (!first) sb.append(", "); - sb.append("footerSummary:"); - if (this.footerSummary == null) { - sb.append("null"); - } else { - sb.append(this.footerSummary); - } - first = false; - if (!first) sb.append(", "); - sb.append("startTime:"); - sb.append(this.startTime); - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetHeaderNames()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'headerNames' is unset! Struct:" + toString()); - } - - if (!isSetRows()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'rows' is unset! Struct:" + toString()); - } - - if (!isSetProgressedPercentage()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'progressedPercentage' is unset! Struct:" + toString()); - } - - if (!isSetStatus()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'status' is unset! Struct:" + toString()); - } - - if (!isSetFooterSummary()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'footerSummary' is unset! Struct:" + toString()); - } - - if (!isSetStartTime()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'startTime' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. - __isset_bitfield = 0; - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TProgressUpdateRespStandardSchemeFactory implements SchemeFactory { - public TProgressUpdateRespStandardScheme getScheme() { - return new TProgressUpdateRespStandardScheme(); - } - } - - private static class TProgressUpdateRespStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TProgressUpdateResp struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // HEADER_NAMES - if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { - { - org.apache.thrift.protocol.TList _list180 = iprot.readListBegin(); - struct.headerNames = new ArrayList(_list180.size); - String _elem181; - for (int _i182 = 0; _i182 < _list180.size; ++_i182) - { - _elem181 = iprot.readString(); - struct.headerNames.add(_elem181); - } - iprot.readListEnd(); - } - struct.setHeaderNamesIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // ROWS - if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { - { - org.apache.thrift.protocol.TList _list183 = iprot.readListBegin(); - struct.rows = new ArrayList>(_list183.size); - List _elem184; - for (int _i185 = 0; _i185 < _list183.size; ++_i185) - { - { - org.apache.thrift.protocol.TList _list186 = iprot.readListBegin(); - _elem184 = new ArrayList(_list186.size); - String _elem187; - for (int _i188 = 0; _i188 < _list186.size; ++_i188) - { - _elem187 = iprot.readString(); - _elem184.add(_elem187); - } - iprot.readListEnd(); - } - struct.rows.add(_elem184); - } - iprot.readListEnd(); - } - struct.setRowsIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 3: // PROGRESSED_PERCENTAGE - if (schemeField.type == org.apache.thrift.protocol.TType.DOUBLE) { - struct.progressedPercentage = iprot.readDouble(); - struct.setProgressedPercentageIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 4: // STATUS - if (schemeField.type == org.apache.thrift.protocol.TType.I32) { - struct.status = org.apache.hive.service.rpc.thrift.TJobExecutionStatus.findByValue(iprot.readI32()); - struct.setStatusIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 5: // FOOTER_SUMMARY - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.footerSummary = iprot.readString(); - struct.setFooterSummaryIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 6: // START_TIME - if (schemeField.type == org.apache.thrift.protocol.TType.I64) { - struct.startTime = iprot.readI64(); - struct.setStartTimeIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TProgressUpdateResp struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.headerNames != null) { - oprot.writeFieldBegin(HEADER_NAMES_FIELD_DESC); - { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.headerNames.size())); - for (String _iter189 : struct.headerNames) - { - oprot.writeString(_iter189); - } - oprot.writeListEnd(); - } - oprot.writeFieldEnd(); - } - if (struct.rows != null) { - oprot.writeFieldBegin(ROWS_FIELD_DESC); - { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.LIST, struct.rows.size())); - for (List _iter190 : struct.rows) - { - { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, _iter190.size())); - for (String _iter191 : _iter190) - { - oprot.writeString(_iter191); - } - oprot.writeListEnd(); - } - } - oprot.writeListEnd(); - } - oprot.writeFieldEnd(); - } - oprot.writeFieldBegin(PROGRESSED_PERCENTAGE_FIELD_DESC); - oprot.writeDouble(struct.progressedPercentage); - oprot.writeFieldEnd(); - if (struct.status != null) { - oprot.writeFieldBegin(STATUS_FIELD_DESC); - oprot.writeI32(struct.status.getValue()); - oprot.writeFieldEnd(); - } - if (struct.footerSummary != null) { - oprot.writeFieldBegin(FOOTER_SUMMARY_FIELD_DESC); - oprot.writeString(struct.footerSummary); - oprot.writeFieldEnd(); - } - oprot.writeFieldBegin(START_TIME_FIELD_DESC); - oprot.writeI64(struct.startTime); - oprot.writeFieldEnd(); - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TProgressUpdateRespTupleSchemeFactory implements SchemeFactory { - public TProgressUpdateRespTupleScheme getScheme() { - return new TProgressUpdateRespTupleScheme(); - } - } - - private static class TProgressUpdateRespTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TProgressUpdateResp struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - { - oprot.writeI32(struct.headerNames.size()); - for (String _iter192 : struct.headerNames) - { - oprot.writeString(_iter192); - } - } - { - oprot.writeI32(struct.rows.size()); - for (List _iter193 : struct.rows) - { - { - oprot.writeI32(_iter193.size()); - for (String _iter194 : _iter193) - { - oprot.writeString(_iter194); - } - } - } - } - oprot.writeDouble(struct.progressedPercentage); - oprot.writeI32(struct.status.getValue()); - oprot.writeString(struct.footerSummary); - oprot.writeI64(struct.startTime); - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TProgressUpdateResp struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - { - org.apache.thrift.protocol.TList _list195 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.headerNames = new ArrayList(_list195.size); - String _elem196; - for (int _i197 = 0; _i197 < _list195.size; ++_i197) - { - _elem196 = iprot.readString(); - struct.headerNames.add(_elem196); - } - } - struct.setHeaderNamesIsSet(true); - { - org.apache.thrift.protocol.TList _list198 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.LIST, iprot.readI32()); - struct.rows = new ArrayList>(_list198.size); - List _elem199; - for (int _i200 = 0; _i200 < _list198.size; ++_i200) - { - { - org.apache.thrift.protocol.TList _list201 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - _elem199 = new ArrayList(_list201.size); - String _elem202; - for (int _i203 = 0; _i203 < _list201.size; ++_i203) - { - _elem202 = iprot.readString(); - _elem199.add(_elem202); - } - } - struct.rows.add(_elem199); - } - } - struct.setRowsIsSet(true); - struct.progressedPercentage = iprot.readDouble(); - struct.setProgressedPercentageIsSet(true); - struct.status = org.apache.hive.service.rpc.thrift.TJobExecutionStatus.findByValue(iprot.readI32()); - struct.setStatusIsSet(true); - struct.footerSummary = iprot.readString(); - struct.setFooterSummaryIsSet(true); - struct.startTime = iprot.readI64(); - struct.setStartTimeIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TProtocolVersion.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TProtocolVersion.java deleted file mode 100644 index 18a782513c500..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TProtocolVersion.java +++ /dev/null @@ -1,69 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - - -import java.util.Map; -import java.util.HashMap; -import org.apache.thrift.TEnum; - -public enum TProtocolVersion implements org.apache.thrift.TEnum { - HIVE_CLI_SERVICE_PROTOCOL_V1(0), - HIVE_CLI_SERVICE_PROTOCOL_V2(1), - HIVE_CLI_SERVICE_PROTOCOL_V3(2), - HIVE_CLI_SERVICE_PROTOCOL_V4(3), - HIVE_CLI_SERVICE_PROTOCOL_V5(4), - HIVE_CLI_SERVICE_PROTOCOL_V6(5), - HIVE_CLI_SERVICE_PROTOCOL_V7(6), - HIVE_CLI_SERVICE_PROTOCOL_V8(7), - HIVE_CLI_SERVICE_PROTOCOL_V9(8), - HIVE_CLI_SERVICE_PROTOCOL_V10(9); - - private final int value; - - private TProtocolVersion(int value) { - this.value = value; - } - - /** - * Get the integer value of this enum value, as defined in the Thrift IDL. - */ - public int getValue() { - return value; - } - - /** - * Find a the enum type by its integer value, as defined in the Thrift IDL. - * @return null if the value is not found. - */ - public static TProtocolVersion findByValue(int value) { - switch (value) { - case 0: - return HIVE_CLI_SERVICE_PROTOCOL_V1; - case 1: - return HIVE_CLI_SERVICE_PROTOCOL_V2; - case 2: - return HIVE_CLI_SERVICE_PROTOCOL_V3; - case 3: - return HIVE_CLI_SERVICE_PROTOCOL_V4; - case 4: - return HIVE_CLI_SERVICE_PROTOCOL_V5; - case 5: - return HIVE_CLI_SERVICE_PROTOCOL_V6; - case 6: - return HIVE_CLI_SERVICE_PROTOCOL_V7; - case 7: - return HIVE_CLI_SERVICE_PROTOCOL_V8; - case 8: - return HIVE_CLI_SERVICE_PROTOCOL_V9; - case 9: - return HIVE_CLI_SERVICE_PROTOCOL_V10; - default: - return null; - } - } -} diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TRenewDelegationTokenReq.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TRenewDelegationTokenReq.java deleted file mode 100644 index 8957ebc8d2fff..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TRenewDelegationTokenReq.java +++ /dev/null @@ -1,495 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TRenewDelegationTokenReq implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TRenewDelegationTokenReq"); - - private static final org.apache.thrift.protocol.TField SESSION_HANDLE_FIELD_DESC = new org.apache.thrift.protocol.TField("sessionHandle", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField DELEGATION_TOKEN_FIELD_DESC = new org.apache.thrift.protocol.TField("delegationToken", org.apache.thrift.protocol.TType.STRING, (short)2); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TRenewDelegationTokenReqStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TRenewDelegationTokenReqTupleSchemeFactory()); - } - - private TSessionHandle sessionHandle; // required - private String delegationToken; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SESSION_HANDLE((short)1, "sessionHandle"), - DELEGATION_TOKEN((short)2, "delegationToken"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // SESSION_HANDLE - return SESSION_HANDLE; - case 2: // DELEGATION_TOKEN - return DELEGATION_TOKEN; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SESSION_HANDLE, new org.apache.thrift.meta_data.FieldMetaData("sessionHandle", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TSessionHandle.class))); - tmpMap.put(_Fields.DELEGATION_TOKEN, new org.apache.thrift.meta_data.FieldMetaData("delegationToken", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TRenewDelegationTokenReq.class, metaDataMap); - } - - public TRenewDelegationTokenReq() { - } - - public TRenewDelegationTokenReq( - TSessionHandle sessionHandle, - String delegationToken) - { - this(); - this.sessionHandle = sessionHandle; - this.delegationToken = delegationToken; - } - - /** - * Performs a deep copy on other. - */ - public TRenewDelegationTokenReq(TRenewDelegationTokenReq other) { - if (other.isSetSessionHandle()) { - this.sessionHandle = new TSessionHandle(other.sessionHandle); - } - if (other.isSetDelegationToken()) { - this.delegationToken = other.delegationToken; - } - } - - public TRenewDelegationTokenReq deepCopy() { - return new TRenewDelegationTokenReq(this); - } - - @Override - public void clear() { - this.sessionHandle = null; - this.delegationToken = null; - } - - public TSessionHandle getSessionHandle() { - return this.sessionHandle; - } - - public void setSessionHandle(TSessionHandle sessionHandle) { - this.sessionHandle = sessionHandle; - } - - public void unsetSessionHandle() { - this.sessionHandle = null; - } - - /** Returns true if field sessionHandle is set (has been assigned a value) and false otherwise */ - public boolean isSetSessionHandle() { - return this.sessionHandle != null; - } - - public void setSessionHandleIsSet(boolean value) { - if (!value) { - this.sessionHandle = null; - } - } - - public String getDelegationToken() { - return this.delegationToken; - } - - public void setDelegationToken(String delegationToken) { - this.delegationToken = delegationToken; - } - - public void unsetDelegationToken() { - this.delegationToken = null; - } - - /** Returns true if field delegationToken is set (has been assigned a value) and false otherwise */ - public boolean isSetDelegationToken() { - return this.delegationToken != null; - } - - public void setDelegationTokenIsSet(boolean value) { - if (!value) { - this.delegationToken = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case SESSION_HANDLE: - if (value == null) { - unsetSessionHandle(); - } else { - setSessionHandle((TSessionHandle)value); - } - break; - - case DELEGATION_TOKEN: - if (value == null) { - unsetDelegationToken(); - } else { - setDelegationToken((String)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case SESSION_HANDLE: - return getSessionHandle(); - - case DELEGATION_TOKEN: - return getDelegationToken(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case SESSION_HANDLE: - return isSetSessionHandle(); - case DELEGATION_TOKEN: - return isSetDelegationToken(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TRenewDelegationTokenReq) - return this.equals((TRenewDelegationTokenReq)that); - return false; - } - - public boolean equals(TRenewDelegationTokenReq that) { - if (that == null) - return false; - - boolean this_present_sessionHandle = true && this.isSetSessionHandle(); - boolean that_present_sessionHandle = true && that.isSetSessionHandle(); - if (this_present_sessionHandle || that_present_sessionHandle) { - if (!(this_present_sessionHandle && that_present_sessionHandle)) - return false; - if (!this.sessionHandle.equals(that.sessionHandle)) - return false; - } - - boolean this_present_delegationToken = true && this.isSetDelegationToken(); - boolean that_present_delegationToken = true && that.isSetDelegationToken(); - if (this_present_delegationToken || that_present_delegationToken) { - if (!(this_present_delegationToken && that_present_delegationToken)) - return false; - if (!this.delegationToken.equals(that.delegationToken)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_sessionHandle = true && (isSetSessionHandle()); - list.add(present_sessionHandle); - if (present_sessionHandle) - list.add(sessionHandle); - - boolean present_delegationToken = true && (isSetDelegationToken()); - list.add(present_delegationToken); - if (present_delegationToken) - list.add(delegationToken); - - return list.hashCode(); - } - - @Override - public int compareTo(TRenewDelegationTokenReq other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetSessionHandle()).compareTo(other.isSetSessionHandle()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSessionHandle()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.sessionHandle, other.sessionHandle); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetDelegationToken()).compareTo(other.isSetDelegationToken()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetDelegationToken()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.delegationToken, other.delegationToken); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TRenewDelegationTokenReq("); - boolean first = true; - - sb.append("sessionHandle:"); - if (this.sessionHandle == null) { - sb.append("null"); - } else { - sb.append(this.sessionHandle); - } - first = false; - if (!first) sb.append(", "); - sb.append("delegationToken:"); - if (this.delegationToken == null) { - sb.append("null"); - } else { - sb.append(this.delegationToken); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetSessionHandle()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'sessionHandle' is unset! Struct:" + toString()); - } - - if (!isSetDelegationToken()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'delegationToken' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (sessionHandle != null) { - sessionHandle.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TRenewDelegationTokenReqStandardSchemeFactory implements SchemeFactory { - public TRenewDelegationTokenReqStandardScheme getScheme() { - return new TRenewDelegationTokenReqStandardScheme(); - } - } - - private static class TRenewDelegationTokenReqStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TRenewDelegationTokenReq struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // SESSION_HANDLE - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.sessionHandle = new TSessionHandle(); - struct.sessionHandle.read(iprot); - struct.setSessionHandleIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // DELEGATION_TOKEN - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.delegationToken = iprot.readString(); - struct.setDelegationTokenIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TRenewDelegationTokenReq struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.sessionHandle != null) { - oprot.writeFieldBegin(SESSION_HANDLE_FIELD_DESC); - struct.sessionHandle.write(oprot); - oprot.writeFieldEnd(); - } - if (struct.delegationToken != null) { - oprot.writeFieldBegin(DELEGATION_TOKEN_FIELD_DESC); - oprot.writeString(struct.delegationToken); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TRenewDelegationTokenReqTupleSchemeFactory implements SchemeFactory { - public TRenewDelegationTokenReqTupleScheme getScheme() { - return new TRenewDelegationTokenReqTupleScheme(); - } - } - - private static class TRenewDelegationTokenReqTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TRenewDelegationTokenReq struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.sessionHandle.write(oprot); - oprot.writeString(struct.delegationToken); - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TRenewDelegationTokenReq struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.sessionHandle = new TSessionHandle(); - struct.sessionHandle.read(iprot); - struct.setSessionHandleIsSet(true); - struct.delegationToken = iprot.readString(); - struct.setDelegationTokenIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TRenewDelegationTokenResp.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TRenewDelegationTokenResp.java deleted file mode 100644 index 6f5004ccc38e4..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TRenewDelegationTokenResp.java +++ /dev/null @@ -1,394 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TRenewDelegationTokenResp implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TRenewDelegationTokenResp"); - - private static final org.apache.thrift.protocol.TField STATUS_FIELD_DESC = new org.apache.thrift.protocol.TField("status", org.apache.thrift.protocol.TType.STRUCT, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TRenewDelegationTokenRespStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TRenewDelegationTokenRespTupleSchemeFactory()); - } - - private TStatus status; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - STATUS((short)1, "status"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // STATUS - return STATUS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.STATUS, new org.apache.thrift.meta_data.FieldMetaData("status", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TStatus.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TRenewDelegationTokenResp.class, metaDataMap); - } - - public TRenewDelegationTokenResp() { - } - - public TRenewDelegationTokenResp( - TStatus status) - { - this(); - this.status = status; - } - - /** - * Performs a deep copy on other. - */ - public TRenewDelegationTokenResp(TRenewDelegationTokenResp other) { - if (other.isSetStatus()) { - this.status = new TStatus(other.status); - } - } - - public TRenewDelegationTokenResp deepCopy() { - return new TRenewDelegationTokenResp(this); - } - - @Override - public void clear() { - this.status = null; - } - - public TStatus getStatus() { - return this.status; - } - - public void setStatus(TStatus status) { - this.status = status; - } - - public void unsetStatus() { - this.status = null; - } - - /** Returns true if field status is set (has been assigned a value) and false otherwise */ - public boolean isSetStatus() { - return this.status != null; - } - - public void setStatusIsSet(boolean value) { - if (!value) { - this.status = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case STATUS: - if (value == null) { - unsetStatus(); - } else { - setStatus((TStatus)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case STATUS: - return getStatus(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case STATUS: - return isSetStatus(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TRenewDelegationTokenResp) - return this.equals((TRenewDelegationTokenResp)that); - return false; - } - - public boolean equals(TRenewDelegationTokenResp that) { - if (that == null) - return false; - - boolean this_present_status = true && this.isSetStatus(); - boolean that_present_status = true && that.isSetStatus(); - if (this_present_status || that_present_status) { - if (!(this_present_status && that_present_status)) - return false; - if (!this.status.equals(that.status)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_status = true && (isSetStatus()); - list.add(present_status); - if (present_status) - list.add(status); - - return list.hashCode(); - } - - @Override - public int compareTo(TRenewDelegationTokenResp other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetStatus()).compareTo(other.isSetStatus()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetStatus()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.status, other.status); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TRenewDelegationTokenResp("); - boolean first = true; - - sb.append("status:"); - if (this.status == null) { - sb.append("null"); - } else { - sb.append(this.status); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetStatus()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'status' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (status != null) { - status.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TRenewDelegationTokenRespStandardSchemeFactory implements SchemeFactory { - public TRenewDelegationTokenRespStandardScheme getScheme() { - return new TRenewDelegationTokenRespStandardScheme(); - } - } - - private static class TRenewDelegationTokenRespStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TRenewDelegationTokenResp struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // STATUS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TRenewDelegationTokenResp struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.status != null) { - oprot.writeFieldBegin(STATUS_FIELD_DESC); - struct.status.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TRenewDelegationTokenRespTupleSchemeFactory implements SchemeFactory { - public TRenewDelegationTokenRespTupleScheme getScheme() { - return new TRenewDelegationTokenRespTupleScheme(); - } - } - - private static class TRenewDelegationTokenRespTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TRenewDelegationTokenResp struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.status.write(oprot); - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TRenewDelegationTokenResp struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.status = new TStatus(); - struct.status.read(iprot); - struct.setStatusIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TRow.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TRow.java deleted file mode 100644 index e95299df97c3a..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TRow.java +++ /dev/null @@ -1,443 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TRow implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TRow"); - - private static final org.apache.thrift.protocol.TField COL_VALS_FIELD_DESC = new org.apache.thrift.protocol.TField("colVals", org.apache.thrift.protocol.TType.LIST, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TRowStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TRowTupleSchemeFactory()); - } - - private List colVals; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - COL_VALS((short)1, "colVals"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // COL_VALS - return COL_VALS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.COL_VALS, new org.apache.thrift.meta_data.FieldMetaData("colVals", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TColumnValue.class)))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TRow.class, metaDataMap); - } - - public TRow() { - } - - public TRow( - List colVals) - { - this(); - this.colVals = colVals; - } - - /** - * Performs a deep copy on other. - */ - public TRow(TRow other) { - if (other.isSetColVals()) { - List __this__colVals = new ArrayList(other.colVals.size()); - for (TColumnValue other_element : other.colVals) { - __this__colVals.add(new TColumnValue(other_element)); - } - this.colVals = __this__colVals; - } - } - - public TRow deepCopy() { - return new TRow(this); - } - - @Override - public void clear() { - this.colVals = null; - } - - public int getColValsSize() { - return (this.colVals == null) ? 0 : this.colVals.size(); - } - - public java.util.Iterator getColValsIterator() { - return (this.colVals == null) ? null : this.colVals.iterator(); - } - - public void addToColVals(TColumnValue elem) { - if (this.colVals == null) { - this.colVals = new ArrayList(); - } - this.colVals.add(elem); - } - - public List getColVals() { - return this.colVals; - } - - public void setColVals(List colVals) { - this.colVals = colVals; - } - - public void unsetColVals() { - this.colVals = null; - } - - /** Returns true if field colVals is set (has been assigned a value) and false otherwise */ - public boolean isSetColVals() { - return this.colVals != null; - } - - public void setColValsIsSet(boolean value) { - if (!value) { - this.colVals = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case COL_VALS: - if (value == null) { - unsetColVals(); - } else { - setColVals((List)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case COL_VALS: - return getColVals(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case COL_VALS: - return isSetColVals(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TRow) - return this.equals((TRow)that); - return false; - } - - public boolean equals(TRow that) { - if (that == null) - return false; - - boolean this_present_colVals = true && this.isSetColVals(); - boolean that_present_colVals = true && that.isSetColVals(); - if (this_present_colVals || that_present_colVals) { - if (!(this_present_colVals && that_present_colVals)) - return false; - if (!this.colVals.equals(that.colVals)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_colVals = true && (isSetColVals()); - list.add(present_colVals); - if (present_colVals) - list.add(colVals); - - return list.hashCode(); - } - - @Override - public int compareTo(TRow other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetColVals()).compareTo(other.isSetColVals()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetColVals()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.colVals, other.colVals); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TRow("); - boolean first = true; - - sb.append("colVals:"); - if (this.colVals == null) { - sb.append("null"); - } else { - sb.append(this.colVals); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetColVals()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'colVals' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TRowStandardSchemeFactory implements SchemeFactory { - public TRowStandardScheme getScheme() { - return new TRowStandardScheme(); - } - } - - private static class TRowStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TRow struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // COL_VALS - if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { - { - org.apache.thrift.protocol.TList _list46 = iprot.readListBegin(); - struct.colVals = new ArrayList(_list46.size); - TColumnValue _elem47; - for (int _i48 = 0; _i48 < _list46.size; ++_i48) - { - _elem47 = new TColumnValue(); - _elem47.read(iprot); - struct.colVals.add(_elem47); - } - iprot.readListEnd(); - } - struct.setColValsIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TRow struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.colVals != null) { - oprot.writeFieldBegin(COL_VALS_FIELD_DESC); - { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.colVals.size())); - for (TColumnValue _iter49 : struct.colVals) - { - _iter49.write(oprot); - } - oprot.writeListEnd(); - } - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TRowTupleSchemeFactory implements SchemeFactory { - public TRowTupleScheme getScheme() { - return new TRowTupleScheme(); - } - } - - private static class TRowTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TRow struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - { - oprot.writeI32(struct.colVals.size()); - for (TColumnValue _iter50 : struct.colVals) - { - _iter50.write(oprot); - } - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TRow struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - { - org.apache.thrift.protocol.TList _list51 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.colVals = new ArrayList(_list51.size); - TColumnValue _elem52; - for (int _i53 = 0; _i53 < _list51.size; ++_i53) - { - _elem52 = new TColumnValue(); - _elem52.read(iprot); - struct.colVals.add(_elem52); - } - } - struct.setColValsIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TRowSet.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TRowSet.java deleted file mode 100644 index da3d9d3ca8820..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TRowSet.java +++ /dev/null @@ -1,920 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TRowSet implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TRowSet"); - - private static final org.apache.thrift.protocol.TField START_ROW_OFFSET_FIELD_DESC = new org.apache.thrift.protocol.TField("startRowOffset", org.apache.thrift.protocol.TType.I64, (short)1); - private static final org.apache.thrift.protocol.TField ROWS_FIELD_DESC = new org.apache.thrift.protocol.TField("rows", org.apache.thrift.protocol.TType.LIST, (short)2); - private static final org.apache.thrift.protocol.TField COLUMNS_FIELD_DESC = new org.apache.thrift.protocol.TField("columns", org.apache.thrift.protocol.TType.LIST, (short)3); - private static final org.apache.thrift.protocol.TField BINARY_COLUMNS_FIELD_DESC = new org.apache.thrift.protocol.TField("binaryColumns", org.apache.thrift.protocol.TType.STRING, (short)4); - private static final org.apache.thrift.protocol.TField COLUMN_COUNT_FIELD_DESC = new org.apache.thrift.protocol.TField("columnCount", org.apache.thrift.protocol.TType.I32, (short)5); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TRowSetStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TRowSetTupleSchemeFactory()); - } - - private long startRowOffset; // required - private List rows; // required - private List columns; // optional - private ByteBuffer binaryColumns; // optional - private int columnCount; // optional - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - START_ROW_OFFSET((short)1, "startRowOffset"), - ROWS((short)2, "rows"), - COLUMNS((short)3, "columns"), - BINARY_COLUMNS((short)4, "binaryColumns"), - COLUMN_COUNT((short)5, "columnCount"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // START_ROW_OFFSET - return START_ROW_OFFSET; - case 2: // ROWS - return ROWS; - case 3: // COLUMNS - return COLUMNS; - case 4: // BINARY_COLUMNS - return BINARY_COLUMNS; - case 5: // COLUMN_COUNT - return COLUMN_COUNT; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private static final int __STARTROWOFFSET_ISSET_ID = 0; - private static final int __COLUMNCOUNT_ISSET_ID = 1; - private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.COLUMNS,_Fields.BINARY_COLUMNS,_Fields.COLUMN_COUNT}; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.START_ROW_OFFSET, new org.apache.thrift.meta_data.FieldMetaData("startRowOffset", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); - tmpMap.put(_Fields.ROWS, new org.apache.thrift.meta_data.FieldMetaData("rows", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TRow.class)))); - tmpMap.put(_Fields.COLUMNS, new org.apache.thrift.meta_data.FieldMetaData("columns", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TColumn.class)))); - tmpMap.put(_Fields.BINARY_COLUMNS, new org.apache.thrift.meta_data.FieldMetaData("binaryColumns", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); - tmpMap.put(_Fields.COLUMN_COUNT, new org.apache.thrift.meta_data.FieldMetaData("columnCount", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TRowSet.class, metaDataMap); - } - - public TRowSet() { - } - - public TRowSet( - long startRowOffset, - List rows) - { - this(); - this.startRowOffset = startRowOffset; - setStartRowOffsetIsSet(true); - this.rows = rows; - } - - /** - * Performs a deep copy on other. - */ - public TRowSet(TRowSet other) { - __isset_bitfield = other.__isset_bitfield; - this.startRowOffset = other.startRowOffset; - if (other.isSetRows()) { - List __this__rows = new ArrayList(other.rows.size()); - for (TRow other_element : other.rows) { - __this__rows.add(new TRow(other_element)); - } - this.rows = __this__rows; - } - if (other.isSetColumns()) { - List __this__columns = new ArrayList(other.columns.size()); - for (TColumn other_element : other.columns) { - __this__columns.add(new TColumn(other_element)); - } - this.columns = __this__columns; - } - if (other.isSetBinaryColumns()) { - this.binaryColumns = org.apache.thrift.TBaseHelper.copyBinary(other.binaryColumns); - } - this.columnCount = other.columnCount; - } - - public TRowSet deepCopy() { - return new TRowSet(this); - } - - @Override - public void clear() { - setStartRowOffsetIsSet(false); - this.startRowOffset = 0; - this.rows = null; - this.columns = null; - this.binaryColumns = null; - setColumnCountIsSet(false); - this.columnCount = 0; - } - - public long getStartRowOffset() { - return this.startRowOffset; - } - - public void setStartRowOffset(long startRowOffset) { - this.startRowOffset = startRowOffset; - setStartRowOffsetIsSet(true); - } - - public void unsetStartRowOffset() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __STARTROWOFFSET_ISSET_ID); - } - - /** Returns true if field startRowOffset is set (has been assigned a value) and false otherwise */ - public boolean isSetStartRowOffset() { - return EncodingUtils.testBit(__isset_bitfield, __STARTROWOFFSET_ISSET_ID); - } - - public void setStartRowOffsetIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __STARTROWOFFSET_ISSET_ID, value); - } - - public int getRowsSize() { - return (this.rows == null) ? 0 : this.rows.size(); - } - - public java.util.Iterator getRowsIterator() { - return (this.rows == null) ? null : this.rows.iterator(); - } - - public void addToRows(TRow elem) { - if (this.rows == null) { - this.rows = new ArrayList(); - } - this.rows.add(elem); - } - - public List getRows() { - return this.rows; - } - - public void setRows(List rows) { - this.rows = rows; - } - - public void unsetRows() { - this.rows = null; - } - - /** Returns true if field rows is set (has been assigned a value) and false otherwise */ - public boolean isSetRows() { - return this.rows != null; - } - - public void setRowsIsSet(boolean value) { - if (!value) { - this.rows = null; - } - } - - public int getColumnsSize() { - return (this.columns == null) ? 0 : this.columns.size(); - } - - public java.util.Iterator getColumnsIterator() { - return (this.columns == null) ? null : this.columns.iterator(); - } - - public void addToColumns(TColumn elem) { - if (this.columns == null) { - this.columns = new ArrayList(); - } - this.columns.add(elem); - } - - public List getColumns() { - return this.columns; - } - - public void setColumns(List columns) { - this.columns = columns; - } - - public void unsetColumns() { - this.columns = null; - } - - /** Returns true if field columns is set (has been assigned a value) and false otherwise */ - public boolean isSetColumns() { - return this.columns != null; - } - - public void setColumnsIsSet(boolean value) { - if (!value) { - this.columns = null; - } - } - - public byte[] getBinaryColumns() { - setBinaryColumns(org.apache.thrift.TBaseHelper.rightSize(binaryColumns)); - return binaryColumns == null ? null : binaryColumns.array(); - } - - public ByteBuffer bufferForBinaryColumns() { - return org.apache.thrift.TBaseHelper.copyBinary(binaryColumns); - } - - public void setBinaryColumns(byte[] binaryColumns) { - this.binaryColumns = binaryColumns == null ? (ByteBuffer)null : ByteBuffer.wrap(Arrays.copyOf(binaryColumns, binaryColumns.length)); - } - - public void setBinaryColumns(ByteBuffer binaryColumns) { - this.binaryColumns = org.apache.thrift.TBaseHelper.copyBinary(binaryColumns); - } - - public void unsetBinaryColumns() { - this.binaryColumns = null; - } - - /** Returns true if field binaryColumns is set (has been assigned a value) and false otherwise */ - public boolean isSetBinaryColumns() { - return this.binaryColumns != null; - } - - public void setBinaryColumnsIsSet(boolean value) { - if (!value) { - this.binaryColumns = null; - } - } - - public int getColumnCount() { - return this.columnCount; - } - - public void setColumnCount(int columnCount) { - this.columnCount = columnCount; - setColumnCountIsSet(true); - } - - public void unsetColumnCount() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __COLUMNCOUNT_ISSET_ID); - } - - /** Returns true if field columnCount is set (has been assigned a value) and false otherwise */ - public boolean isSetColumnCount() { - return EncodingUtils.testBit(__isset_bitfield, __COLUMNCOUNT_ISSET_ID); - } - - public void setColumnCountIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __COLUMNCOUNT_ISSET_ID, value); - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case START_ROW_OFFSET: - if (value == null) { - unsetStartRowOffset(); - } else { - setStartRowOffset((Long)value); - } - break; - - case ROWS: - if (value == null) { - unsetRows(); - } else { - setRows((List)value); - } - break; - - case COLUMNS: - if (value == null) { - unsetColumns(); - } else { - setColumns((List)value); - } - break; - - case BINARY_COLUMNS: - if (value == null) { - unsetBinaryColumns(); - } else { - setBinaryColumns((ByteBuffer)value); - } - break; - - case COLUMN_COUNT: - if (value == null) { - unsetColumnCount(); - } else { - setColumnCount((Integer)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case START_ROW_OFFSET: - return getStartRowOffset(); - - case ROWS: - return getRows(); - - case COLUMNS: - return getColumns(); - - case BINARY_COLUMNS: - return getBinaryColumns(); - - case COLUMN_COUNT: - return getColumnCount(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case START_ROW_OFFSET: - return isSetStartRowOffset(); - case ROWS: - return isSetRows(); - case COLUMNS: - return isSetColumns(); - case BINARY_COLUMNS: - return isSetBinaryColumns(); - case COLUMN_COUNT: - return isSetColumnCount(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TRowSet) - return this.equals((TRowSet)that); - return false; - } - - public boolean equals(TRowSet that) { - if (that == null) - return false; - - boolean this_present_startRowOffset = true; - boolean that_present_startRowOffset = true; - if (this_present_startRowOffset || that_present_startRowOffset) { - if (!(this_present_startRowOffset && that_present_startRowOffset)) - return false; - if (this.startRowOffset != that.startRowOffset) - return false; - } - - boolean this_present_rows = true && this.isSetRows(); - boolean that_present_rows = true && that.isSetRows(); - if (this_present_rows || that_present_rows) { - if (!(this_present_rows && that_present_rows)) - return false; - if (!this.rows.equals(that.rows)) - return false; - } - - boolean this_present_columns = true && this.isSetColumns(); - boolean that_present_columns = true && that.isSetColumns(); - if (this_present_columns || that_present_columns) { - if (!(this_present_columns && that_present_columns)) - return false; - if (!this.columns.equals(that.columns)) - return false; - } - - boolean this_present_binaryColumns = true && this.isSetBinaryColumns(); - boolean that_present_binaryColumns = true && that.isSetBinaryColumns(); - if (this_present_binaryColumns || that_present_binaryColumns) { - if (!(this_present_binaryColumns && that_present_binaryColumns)) - return false; - if (!this.binaryColumns.equals(that.binaryColumns)) - return false; - } - - boolean this_present_columnCount = true && this.isSetColumnCount(); - boolean that_present_columnCount = true && that.isSetColumnCount(); - if (this_present_columnCount || that_present_columnCount) { - if (!(this_present_columnCount && that_present_columnCount)) - return false; - if (this.columnCount != that.columnCount) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_startRowOffset = true; - list.add(present_startRowOffset); - if (present_startRowOffset) - list.add(startRowOffset); - - boolean present_rows = true && (isSetRows()); - list.add(present_rows); - if (present_rows) - list.add(rows); - - boolean present_columns = true && (isSetColumns()); - list.add(present_columns); - if (present_columns) - list.add(columns); - - boolean present_binaryColumns = true && (isSetBinaryColumns()); - list.add(present_binaryColumns); - if (present_binaryColumns) - list.add(binaryColumns); - - boolean present_columnCount = true && (isSetColumnCount()); - list.add(present_columnCount); - if (present_columnCount) - list.add(columnCount); - - return list.hashCode(); - } - - @Override - public int compareTo(TRowSet other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetStartRowOffset()).compareTo(other.isSetStartRowOffset()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetStartRowOffset()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.startRowOffset, other.startRowOffset); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetRows()).compareTo(other.isSetRows()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetRows()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.rows, other.rows); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetColumns()).compareTo(other.isSetColumns()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetColumns()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.columns, other.columns); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetBinaryColumns()).compareTo(other.isSetBinaryColumns()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetBinaryColumns()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.binaryColumns, other.binaryColumns); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetColumnCount()).compareTo(other.isSetColumnCount()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetColumnCount()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.columnCount, other.columnCount); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TRowSet("); - boolean first = true; - - sb.append("startRowOffset:"); - sb.append(this.startRowOffset); - first = false; - if (!first) sb.append(", "); - sb.append("rows:"); - if (this.rows == null) { - sb.append("null"); - } else { - sb.append(this.rows); - } - first = false; - if (isSetColumns()) { - if (!first) sb.append(", "); - sb.append("columns:"); - if (this.columns == null) { - sb.append("null"); - } else { - sb.append(this.columns); - } - first = false; - } - if (isSetBinaryColumns()) { - if (!first) sb.append(", "); - sb.append("binaryColumns:"); - if (this.binaryColumns == null) { - sb.append("null"); - } else { - org.apache.thrift.TBaseHelper.toString(this.binaryColumns, sb); - } - first = false; - } - if (isSetColumnCount()) { - if (!first) sb.append(", "); - sb.append("columnCount:"); - sb.append(this.columnCount); - first = false; - } - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetStartRowOffset()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'startRowOffset' is unset! Struct:" + toString()); - } - - if (!isSetRows()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'rows' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. - __isset_bitfield = 0; - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TRowSetStandardSchemeFactory implements SchemeFactory { - public TRowSetStandardScheme getScheme() { - return new TRowSetStandardScheme(); - } - } - - private static class TRowSetStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TRowSet struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // START_ROW_OFFSET - if (schemeField.type == org.apache.thrift.protocol.TType.I64) { - struct.startRowOffset = iprot.readI64(); - struct.setStartRowOffsetIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // ROWS - if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { - { - org.apache.thrift.protocol.TList _list118 = iprot.readListBegin(); - struct.rows = new ArrayList(_list118.size); - TRow _elem119; - for (int _i120 = 0; _i120 < _list118.size; ++_i120) - { - _elem119 = new TRow(); - _elem119.read(iprot); - struct.rows.add(_elem119); - } - iprot.readListEnd(); - } - struct.setRowsIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 3: // COLUMNS - if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { - { - org.apache.thrift.protocol.TList _list121 = iprot.readListBegin(); - struct.columns = new ArrayList(_list121.size); - TColumn _elem122; - for (int _i123 = 0; _i123 < _list121.size; ++_i123) - { - _elem122 = new TColumn(); - _elem122.read(iprot); - struct.columns.add(_elem122); - } - iprot.readListEnd(); - } - struct.setColumnsIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 4: // BINARY_COLUMNS - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.binaryColumns = iprot.readBinary(); - struct.setBinaryColumnsIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 5: // COLUMN_COUNT - if (schemeField.type == org.apache.thrift.protocol.TType.I32) { - struct.columnCount = iprot.readI32(); - struct.setColumnCountIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TRowSet struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - oprot.writeFieldBegin(START_ROW_OFFSET_FIELD_DESC); - oprot.writeI64(struct.startRowOffset); - oprot.writeFieldEnd(); - if (struct.rows != null) { - oprot.writeFieldBegin(ROWS_FIELD_DESC); - { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.rows.size())); - for (TRow _iter124 : struct.rows) - { - _iter124.write(oprot); - } - oprot.writeListEnd(); - } - oprot.writeFieldEnd(); - } - if (struct.columns != null) { - if (struct.isSetColumns()) { - oprot.writeFieldBegin(COLUMNS_FIELD_DESC); - { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.columns.size())); - for (TColumn _iter125 : struct.columns) - { - _iter125.write(oprot); - } - oprot.writeListEnd(); - } - oprot.writeFieldEnd(); - } - } - if (struct.binaryColumns != null) { - if (struct.isSetBinaryColumns()) { - oprot.writeFieldBegin(BINARY_COLUMNS_FIELD_DESC); - oprot.writeBinary(struct.binaryColumns); - oprot.writeFieldEnd(); - } - } - if (struct.isSetColumnCount()) { - oprot.writeFieldBegin(COLUMN_COUNT_FIELD_DESC); - oprot.writeI32(struct.columnCount); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TRowSetTupleSchemeFactory implements SchemeFactory { - public TRowSetTupleScheme getScheme() { - return new TRowSetTupleScheme(); - } - } - - private static class TRowSetTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TRowSet struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - oprot.writeI64(struct.startRowOffset); - { - oprot.writeI32(struct.rows.size()); - for (TRow _iter126 : struct.rows) - { - _iter126.write(oprot); - } - } - BitSet optionals = new BitSet(); - if (struct.isSetColumns()) { - optionals.set(0); - } - if (struct.isSetBinaryColumns()) { - optionals.set(1); - } - if (struct.isSetColumnCount()) { - optionals.set(2); - } - oprot.writeBitSet(optionals, 3); - if (struct.isSetColumns()) { - { - oprot.writeI32(struct.columns.size()); - for (TColumn _iter127 : struct.columns) - { - _iter127.write(oprot); - } - } - } - if (struct.isSetBinaryColumns()) { - oprot.writeBinary(struct.binaryColumns); - } - if (struct.isSetColumnCount()) { - oprot.writeI32(struct.columnCount); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TRowSet struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.startRowOffset = iprot.readI64(); - struct.setStartRowOffsetIsSet(true); - { - org.apache.thrift.protocol.TList _list128 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.rows = new ArrayList(_list128.size); - TRow _elem129; - for (int _i130 = 0; _i130 < _list128.size; ++_i130) - { - _elem129 = new TRow(); - _elem129.read(iprot); - struct.rows.add(_elem129); - } - } - struct.setRowsIsSet(true); - BitSet incoming = iprot.readBitSet(3); - if (incoming.get(0)) { - { - org.apache.thrift.protocol.TList _list131 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.columns = new ArrayList(_list131.size); - TColumn _elem132; - for (int _i133 = 0; _i133 < _list131.size; ++_i133) - { - _elem132 = new TColumn(); - _elem132.read(iprot); - struct.columns.add(_elem132); - } - } - struct.setColumnsIsSet(true); - } - if (incoming.get(1)) { - struct.binaryColumns = iprot.readBinary(); - struct.setBinaryColumnsIsSet(true); - } - if (incoming.get(2)) { - struct.columnCount = iprot.readI32(); - struct.setColumnCountIsSet(true); - } - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TSessionHandle.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TSessionHandle.java deleted file mode 100644 index b5cb6e7b15aa6..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TSessionHandle.java +++ /dev/null @@ -1,394 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TSessionHandle implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TSessionHandle"); - - private static final org.apache.thrift.protocol.TField SESSION_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("sessionId", org.apache.thrift.protocol.TType.STRUCT, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TSessionHandleStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TSessionHandleTupleSchemeFactory()); - } - - private THandleIdentifier sessionId; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SESSION_ID((short)1, "sessionId"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // SESSION_ID - return SESSION_ID; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SESSION_ID, new org.apache.thrift.meta_data.FieldMetaData("sessionId", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, THandleIdentifier.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TSessionHandle.class, metaDataMap); - } - - public TSessionHandle() { - } - - public TSessionHandle( - THandleIdentifier sessionId) - { - this(); - this.sessionId = sessionId; - } - - /** - * Performs a deep copy on other. - */ - public TSessionHandle(TSessionHandle other) { - if (other.isSetSessionId()) { - this.sessionId = new THandleIdentifier(other.sessionId); - } - } - - public TSessionHandle deepCopy() { - return new TSessionHandle(this); - } - - @Override - public void clear() { - this.sessionId = null; - } - - public THandleIdentifier getSessionId() { - return this.sessionId; - } - - public void setSessionId(THandleIdentifier sessionId) { - this.sessionId = sessionId; - } - - public void unsetSessionId() { - this.sessionId = null; - } - - /** Returns true if field sessionId is set (has been assigned a value) and false otherwise */ - public boolean isSetSessionId() { - return this.sessionId != null; - } - - public void setSessionIdIsSet(boolean value) { - if (!value) { - this.sessionId = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case SESSION_ID: - if (value == null) { - unsetSessionId(); - } else { - setSessionId((THandleIdentifier)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case SESSION_ID: - return getSessionId(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case SESSION_ID: - return isSetSessionId(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TSessionHandle) - return this.equals((TSessionHandle)that); - return false; - } - - public boolean equals(TSessionHandle that) { - if (that == null) - return false; - - boolean this_present_sessionId = true && this.isSetSessionId(); - boolean that_present_sessionId = true && that.isSetSessionId(); - if (this_present_sessionId || that_present_sessionId) { - if (!(this_present_sessionId && that_present_sessionId)) - return false; - if (!this.sessionId.equals(that.sessionId)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_sessionId = true && (isSetSessionId()); - list.add(present_sessionId); - if (present_sessionId) - list.add(sessionId); - - return list.hashCode(); - } - - @Override - public int compareTo(TSessionHandle other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetSessionId()).compareTo(other.isSetSessionId()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSessionId()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.sessionId, other.sessionId); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TSessionHandle("); - boolean first = true; - - sb.append("sessionId:"); - if (this.sessionId == null) { - sb.append("null"); - } else { - sb.append(this.sessionId); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetSessionId()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'sessionId' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - if (sessionId != null) { - sessionId.validate(); - } - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TSessionHandleStandardSchemeFactory implements SchemeFactory { - public TSessionHandleStandardScheme getScheme() { - return new TSessionHandleStandardScheme(); - } - } - - private static class TSessionHandleStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TSessionHandle struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // SESSION_ID - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.sessionId = new THandleIdentifier(); - struct.sessionId.read(iprot); - struct.setSessionIdIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TSessionHandle struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.sessionId != null) { - oprot.writeFieldBegin(SESSION_ID_FIELD_DESC); - struct.sessionId.write(oprot); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TSessionHandleTupleSchemeFactory implements SchemeFactory { - public TSessionHandleTupleScheme getScheme() { - return new TSessionHandleTupleScheme(); - } - } - - private static class TSessionHandleTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TSessionHandle struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - struct.sessionId.write(oprot); - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TSessionHandle struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.sessionId = new THandleIdentifier(); - struct.sessionId.read(iprot); - struct.setSessionIdIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TStatus.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TStatus.java deleted file mode 100644 index 50f4531b0a209..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TStatus.java +++ /dev/null @@ -1,875 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TStatus implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TStatus"); - - private static final org.apache.thrift.protocol.TField STATUS_CODE_FIELD_DESC = new org.apache.thrift.protocol.TField("statusCode", org.apache.thrift.protocol.TType.I32, (short)1); - private static final org.apache.thrift.protocol.TField INFO_MESSAGES_FIELD_DESC = new org.apache.thrift.protocol.TField("infoMessages", org.apache.thrift.protocol.TType.LIST, (short)2); - private static final org.apache.thrift.protocol.TField SQL_STATE_FIELD_DESC = new org.apache.thrift.protocol.TField("sqlState", org.apache.thrift.protocol.TType.STRING, (short)3); - private static final org.apache.thrift.protocol.TField ERROR_CODE_FIELD_DESC = new org.apache.thrift.protocol.TField("errorCode", org.apache.thrift.protocol.TType.I32, (short)4); - private static final org.apache.thrift.protocol.TField ERROR_MESSAGE_FIELD_DESC = new org.apache.thrift.protocol.TField("errorMessage", org.apache.thrift.protocol.TType.STRING, (short)5); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TStatusStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TStatusTupleSchemeFactory()); - } - - private TStatusCode statusCode; // required - private List infoMessages; // optional - private String sqlState; // optional - private int errorCode; // optional - private String errorMessage; // optional - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - /** - * - * @see TStatusCode - */ - STATUS_CODE((short)1, "statusCode"), - INFO_MESSAGES((short)2, "infoMessages"), - SQL_STATE((short)3, "sqlState"), - ERROR_CODE((short)4, "errorCode"), - ERROR_MESSAGE((short)5, "errorMessage"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // STATUS_CODE - return STATUS_CODE; - case 2: // INFO_MESSAGES - return INFO_MESSAGES; - case 3: // SQL_STATE - return SQL_STATE; - case 4: // ERROR_CODE - return ERROR_CODE; - case 5: // ERROR_MESSAGE - return ERROR_MESSAGE; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private static final int __ERRORCODE_ISSET_ID = 0; - private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.INFO_MESSAGES,_Fields.SQL_STATE,_Fields.ERROR_CODE,_Fields.ERROR_MESSAGE}; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.STATUS_CODE, new org.apache.thrift.meta_data.FieldMetaData("statusCode", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, TStatusCode.class))); - tmpMap.put(_Fields.INFO_MESSAGES, new org.apache.thrift.meta_data.FieldMetaData("infoMessages", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); - tmpMap.put(_Fields.SQL_STATE, new org.apache.thrift.meta_data.FieldMetaData("sqlState", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.ERROR_CODE, new org.apache.thrift.meta_data.FieldMetaData("errorCode", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); - tmpMap.put(_Fields.ERROR_MESSAGE, new org.apache.thrift.meta_data.FieldMetaData("errorMessage", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TStatus.class, metaDataMap); - } - - public TStatus() { - } - - public TStatus( - TStatusCode statusCode) - { - this(); - this.statusCode = statusCode; - } - - /** - * Performs a deep copy on other. - */ - public TStatus(TStatus other) { - __isset_bitfield = other.__isset_bitfield; - if (other.isSetStatusCode()) { - this.statusCode = other.statusCode; - } - if (other.isSetInfoMessages()) { - List __this__infoMessages = new ArrayList(other.infoMessages); - this.infoMessages = __this__infoMessages; - } - if (other.isSetSqlState()) { - this.sqlState = other.sqlState; - } - this.errorCode = other.errorCode; - if (other.isSetErrorMessage()) { - this.errorMessage = other.errorMessage; - } - } - - public TStatus deepCopy() { - return new TStatus(this); - } - - @Override - public void clear() { - this.statusCode = null; - this.infoMessages = null; - this.sqlState = null; - setErrorCodeIsSet(false); - this.errorCode = 0; - this.errorMessage = null; - } - - /** - * - * @see TStatusCode - */ - public TStatusCode getStatusCode() { - return this.statusCode; - } - - /** - * - * @see TStatusCode - */ - public void setStatusCode(TStatusCode statusCode) { - this.statusCode = statusCode; - } - - public void unsetStatusCode() { - this.statusCode = null; - } - - /** Returns true if field statusCode is set (has been assigned a value) and false otherwise */ - public boolean isSetStatusCode() { - return this.statusCode != null; - } - - public void setStatusCodeIsSet(boolean value) { - if (!value) { - this.statusCode = null; - } - } - - public int getInfoMessagesSize() { - return (this.infoMessages == null) ? 0 : this.infoMessages.size(); - } - - public java.util.Iterator getInfoMessagesIterator() { - return (this.infoMessages == null) ? null : this.infoMessages.iterator(); - } - - public void addToInfoMessages(String elem) { - if (this.infoMessages == null) { - this.infoMessages = new ArrayList(); - } - this.infoMessages.add(elem); - } - - public List getInfoMessages() { - return this.infoMessages; - } - - public void setInfoMessages(List infoMessages) { - this.infoMessages = infoMessages; - } - - public void unsetInfoMessages() { - this.infoMessages = null; - } - - /** Returns true if field infoMessages is set (has been assigned a value) and false otherwise */ - public boolean isSetInfoMessages() { - return this.infoMessages != null; - } - - public void setInfoMessagesIsSet(boolean value) { - if (!value) { - this.infoMessages = null; - } - } - - public String getSqlState() { - return this.sqlState; - } - - public void setSqlState(String sqlState) { - this.sqlState = sqlState; - } - - public void unsetSqlState() { - this.sqlState = null; - } - - /** Returns true if field sqlState is set (has been assigned a value) and false otherwise */ - public boolean isSetSqlState() { - return this.sqlState != null; - } - - public void setSqlStateIsSet(boolean value) { - if (!value) { - this.sqlState = null; - } - } - - public int getErrorCode() { - return this.errorCode; - } - - public void setErrorCode(int errorCode) { - this.errorCode = errorCode; - setErrorCodeIsSet(true); - } - - public void unsetErrorCode() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ERRORCODE_ISSET_ID); - } - - /** Returns true if field errorCode is set (has been assigned a value) and false otherwise */ - public boolean isSetErrorCode() { - return EncodingUtils.testBit(__isset_bitfield, __ERRORCODE_ISSET_ID); - } - - public void setErrorCodeIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ERRORCODE_ISSET_ID, value); - } - - public String getErrorMessage() { - return this.errorMessage; - } - - public void setErrorMessage(String errorMessage) { - this.errorMessage = errorMessage; - } - - public void unsetErrorMessage() { - this.errorMessage = null; - } - - /** Returns true if field errorMessage is set (has been assigned a value) and false otherwise */ - public boolean isSetErrorMessage() { - return this.errorMessage != null; - } - - public void setErrorMessageIsSet(boolean value) { - if (!value) { - this.errorMessage = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case STATUS_CODE: - if (value == null) { - unsetStatusCode(); - } else { - setStatusCode((TStatusCode)value); - } - break; - - case INFO_MESSAGES: - if (value == null) { - unsetInfoMessages(); - } else { - setInfoMessages((List)value); - } - break; - - case SQL_STATE: - if (value == null) { - unsetSqlState(); - } else { - setSqlState((String)value); - } - break; - - case ERROR_CODE: - if (value == null) { - unsetErrorCode(); - } else { - setErrorCode((Integer)value); - } - break; - - case ERROR_MESSAGE: - if (value == null) { - unsetErrorMessage(); - } else { - setErrorMessage((String)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case STATUS_CODE: - return getStatusCode(); - - case INFO_MESSAGES: - return getInfoMessages(); - - case SQL_STATE: - return getSqlState(); - - case ERROR_CODE: - return getErrorCode(); - - case ERROR_MESSAGE: - return getErrorMessage(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case STATUS_CODE: - return isSetStatusCode(); - case INFO_MESSAGES: - return isSetInfoMessages(); - case SQL_STATE: - return isSetSqlState(); - case ERROR_CODE: - return isSetErrorCode(); - case ERROR_MESSAGE: - return isSetErrorMessage(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TStatus) - return this.equals((TStatus)that); - return false; - } - - public boolean equals(TStatus that) { - if (that == null) - return false; - - boolean this_present_statusCode = true && this.isSetStatusCode(); - boolean that_present_statusCode = true && that.isSetStatusCode(); - if (this_present_statusCode || that_present_statusCode) { - if (!(this_present_statusCode && that_present_statusCode)) - return false; - if (!this.statusCode.equals(that.statusCode)) - return false; - } - - boolean this_present_infoMessages = true && this.isSetInfoMessages(); - boolean that_present_infoMessages = true && that.isSetInfoMessages(); - if (this_present_infoMessages || that_present_infoMessages) { - if (!(this_present_infoMessages && that_present_infoMessages)) - return false; - if (!this.infoMessages.equals(that.infoMessages)) - return false; - } - - boolean this_present_sqlState = true && this.isSetSqlState(); - boolean that_present_sqlState = true && that.isSetSqlState(); - if (this_present_sqlState || that_present_sqlState) { - if (!(this_present_sqlState && that_present_sqlState)) - return false; - if (!this.sqlState.equals(that.sqlState)) - return false; - } - - boolean this_present_errorCode = true && this.isSetErrorCode(); - boolean that_present_errorCode = true && that.isSetErrorCode(); - if (this_present_errorCode || that_present_errorCode) { - if (!(this_present_errorCode && that_present_errorCode)) - return false; - if (this.errorCode != that.errorCode) - return false; - } - - boolean this_present_errorMessage = true && this.isSetErrorMessage(); - boolean that_present_errorMessage = true && that.isSetErrorMessage(); - if (this_present_errorMessage || that_present_errorMessage) { - if (!(this_present_errorMessage && that_present_errorMessage)) - return false; - if (!this.errorMessage.equals(that.errorMessage)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_statusCode = true && (isSetStatusCode()); - list.add(present_statusCode); - if (present_statusCode) - list.add(statusCode.getValue()); - - boolean present_infoMessages = true && (isSetInfoMessages()); - list.add(present_infoMessages); - if (present_infoMessages) - list.add(infoMessages); - - boolean present_sqlState = true && (isSetSqlState()); - list.add(present_sqlState); - if (present_sqlState) - list.add(sqlState); - - boolean present_errorCode = true && (isSetErrorCode()); - list.add(present_errorCode); - if (present_errorCode) - list.add(errorCode); - - boolean present_errorMessage = true && (isSetErrorMessage()); - list.add(present_errorMessage); - if (present_errorMessage) - list.add(errorMessage); - - return list.hashCode(); - } - - @Override - public int compareTo(TStatus other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetStatusCode()).compareTo(other.isSetStatusCode()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetStatusCode()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.statusCode, other.statusCode); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetInfoMessages()).compareTo(other.isSetInfoMessages()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetInfoMessages()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.infoMessages, other.infoMessages); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetSqlState()).compareTo(other.isSetSqlState()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSqlState()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.sqlState, other.sqlState); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetErrorCode()).compareTo(other.isSetErrorCode()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetErrorCode()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.errorCode, other.errorCode); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetErrorMessage()).compareTo(other.isSetErrorMessage()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetErrorMessage()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.errorMessage, other.errorMessage); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TStatus("); - boolean first = true; - - sb.append("statusCode:"); - if (this.statusCode == null) { - sb.append("null"); - } else { - sb.append(this.statusCode); - } - first = false; - if (isSetInfoMessages()) { - if (!first) sb.append(", "); - sb.append("infoMessages:"); - if (this.infoMessages == null) { - sb.append("null"); - } else { - sb.append(this.infoMessages); - } - first = false; - } - if (isSetSqlState()) { - if (!first) sb.append(", "); - sb.append("sqlState:"); - if (this.sqlState == null) { - sb.append("null"); - } else { - sb.append(this.sqlState); - } - first = false; - } - if (isSetErrorCode()) { - if (!first) sb.append(", "); - sb.append("errorCode:"); - sb.append(this.errorCode); - first = false; - } - if (isSetErrorMessage()) { - if (!first) sb.append(", "); - sb.append("errorMessage:"); - if (this.errorMessage == null) { - sb.append("null"); - } else { - sb.append(this.errorMessage); - } - first = false; - } - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetStatusCode()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'statusCode' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. - __isset_bitfield = 0; - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TStatusStandardSchemeFactory implements SchemeFactory { - public TStatusStandardScheme getScheme() { - return new TStatusStandardScheme(); - } - } - - private static class TStatusStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TStatus struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // STATUS_CODE - if (schemeField.type == org.apache.thrift.protocol.TType.I32) { - struct.statusCode = org.apache.hive.service.rpc.thrift.TStatusCode.findByValue(iprot.readI32()); - struct.setStatusCodeIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // INFO_MESSAGES - if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { - { - org.apache.thrift.protocol.TList _list134 = iprot.readListBegin(); - struct.infoMessages = new ArrayList(_list134.size); - String _elem135; - for (int _i136 = 0; _i136 < _list134.size; ++_i136) - { - _elem135 = iprot.readString(); - struct.infoMessages.add(_elem135); - } - iprot.readListEnd(); - } - struct.setInfoMessagesIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 3: // SQL_STATE - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.sqlState = iprot.readString(); - struct.setSqlStateIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 4: // ERROR_CODE - if (schemeField.type == org.apache.thrift.protocol.TType.I32) { - struct.errorCode = iprot.readI32(); - struct.setErrorCodeIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 5: // ERROR_MESSAGE - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.errorMessage = iprot.readString(); - struct.setErrorMessageIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TStatus struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.statusCode != null) { - oprot.writeFieldBegin(STATUS_CODE_FIELD_DESC); - oprot.writeI32(struct.statusCode.getValue()); - oprot.writeFieldEnd(); - } - if (struct.infoMessages != null) { - if (struct.isSetInfoMessages()) { - oprot.writeFieldBegin(INFO_MESSAGES_FIELD_DESC); - { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.infoMessages.size())); - for (String _iter137 : struct.infoMessages) - { - oprot.writeString(_iter137); - } - oprot.writeListEnd(); - } - oprot.writeFieldEnd(); - } - } - if (struct.sqlState != null) { - if (struct.isSetSqlState()) { - oprot.writeFieldBegin(SQL_STATE_FIELD_DESC); - oprot.writeString(struct.sqlState); - oprot.writeFieldEnd(); - } - } - if (struct.isSetErrorCode()) { - oprot.writeFieldBegin(ERROR_CODE_FIELD_DESC); - oprot.writeI32(struct.errorCode); - oprot.writeFieldEnd(); - } - if (struct.errorMessage != null) { - if (struct.isSetErrorMessage()) { - oprot.writeFieldBegin(ERROR_MESSAGE_FIELD_DESC); - oprot.writeString(struct.errorMessage); - oprot.writeFieldEnd(); - } - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TStatusTupleSchemeFactory implements SchemeFactory { - public TStatusTupleScheme getScheme() { - return new TStatusTupleScheme(); - } - } - - private static class TStatusTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TStatus struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - oprot.writeI32(struct.statusCode.getValue()); - BitSet optionals = new BitSet(); - if (struct.isSetInfoMessages()) { - optionals.set(0); - } - if (struct.isSetSqlState()) { - optionals.set(1); - } - if (struct.isSetErrorCode()) { - optionals.set(2); - } - if (struct.isSetErrorMessage()) { - optionals.set(3); - } - oprot.writeBitSet(optionals, 4); - if (struct.isSetInfoMessages()) { - { - oprot.writeI32(struct.infoMessages.size()); - for (String _iter138 : struct.infoMessages) - { - oprot.writeString(_iter138); - } - } - } - if (struct.isSetSqlState()) { - oprot.writeString(struct.sqlState); - } - if (struct.isSetErrorCode()) { - oprot.writeI32(struct.errorCode); - } - if (struct.isSetErrorMessage()) { - oprot.writeString(struct.errorMessage); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TStatus struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.statusCode = org.apache.hive.service.rpc.thrift.TStatusCode.findByValue(iprot.readI32()); - struct.setStatusCodeIsSet(true); - BitSet incoming = iprot.readBitSet(4); - if (incoming.get(0)) { - { - org.apache.thrift.protocol.TList _list139 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.infoMessages = new ArrayList(_list139.size); - String _elem140; - for (int _i141 = 0; _i141 < _list139.size; ++_i141) - { - _elem140 = iprot.readString(); - struct.infoMessages.add(_elem140); - } - } - struct.setInfoMessagesIsSet(true); - } - if (incoming.get(1)) { - struct.sqlState = iprot.readString(); - struct.setSqlStateIsSet(true); - } - if (incoming.get(2)) { - struct.errorCode = iprot.readI32(); - struct.setErrorCodeIsSet(true); - } - if (incoming.get(3)) { - struct.errorMessage = iprot.readString(); - struct.setErrorMessageIsSet(true); - } - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TStatusCode.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TStatusCode.java deleted file mode 100644 index fbf14184fa9a8..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TStatusCode.java +++ /dev/null @@ -1,54 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - - -import java.util.Map; -import java.util.HashMap; -import org.apache.thrift.TEnum; - -public enum TStatusCode implements org.apache.thrift.TEnum { - SUCCESS_STATUS(0), - SUCCESS_WITH_INFO_STATUS(1), - STILL_EXECUTING_STATUS(2), - ERROR_STATUS(3), - INVALID_HANDLE_STATUS(4); - - private final int value; - - private TStatusCode(int value) { - this.value = value; - } - - /** - * Get the integer value of this enum value, as defined in the Thrift IDL. - */ - public int getValue() { - return value; - } - - /** - * Find a the enum type by its integer value, as defined in the Thrift IDL. - * @return null if the value is not found. - */ - public static TStatusCode findByValue(int value) { - switch (value) { - case 0: - return SUCCESS_STATUS; - case 1: - return SUCCESS_WITH_INFO_STATUS; - case 2: - return STILL_EXECUTING_STATUS; - case 3: - return ERROR_STATUS; - case 4: - return INVALID_HANDLE_STATUS; - default: - return null; - } - } -} diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TStringColumn.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TStringColumn.java deleted file mode 100644 index c83a1fd0de3c2..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TStringColumn.java +++ /dev/null @@ -1,548 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TStringColumn implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TStringColumn"); - - private static final org.apache.thrift.protocol.TField VALUES_FIELD_DESC = new org.apache.thrift.protocol.TField("values", org.apache.thrift.protocol.TType.LIST, (short)1); - private static final org.apache.thrift.protocol.TField NULLS_FIELD_DESC = new org.apache.thrift.protocol.TField("nulls", org.apache.thrift.protocol.TType.STRING, (short)2); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TStringColumnStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TStringColumnTupleSchemeFactory()); - } - - private List values; // required - private ByteBuffer nulls; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - VALUES((short)1, "values"), - NULLS((short)2, "nulls"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // VALUES - return VALUES; - case 2: // NULLS - return NULLS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.VALUES, new org.apache.thrift.meta_data.FieldMetaData("values", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); - tmpMap.put(_Fields.NULLS, new org.apache.thrift.meta_data.FieldMetaData("nulls", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , true))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TStringColumn.class, metaDataMap); - } - - public TStringColumn() { - } - - public TStringColumn( - List values, - ByteBuffer nulls) - { - this(); - this.values = values; - this.nulls = org.apache.thrift.TBaseHelper.copyBinary(nulls); - } - - /** - * Performs a deep copy on other. - */ - public TStringColumn(TStringColumn other) { - if (other.isSetValues()) { - List __this__values = new ArrayList(other.values); - this.values = __this__values; - } - if (other.isSetNulls()) { - this.nulls = org.apache.thrift.TBaseHelper.copyBinary(other.nulls); - } - } - - public TStringColumn deepCopy() { - return new TStringColumn(this); - } - - @Override - public void clear() { - this.values = null; - this.nulls = null; - } - - public int getValuesSize() { - return (this.values == null) ? 0 : this.values.size(); - } - - public java.util.Iterator getValuesIterator() { - return (this.values == null) ? null : this.values.iterator(); - } - - public void addToValues(String elem) { - if (this.values == null) { - this.values = new ArrayList(); - } - this.values.add(elem); - } - - public List getValues() { - return this.values; - } - - public void setValues(List values) { - this.values = values; - } - - public void unsetValues() { - this.values = null; - } - - /** Returns true if field values is set (has been assigned a value) and false otherwise */ - public boolean isSetValues() { - return this.values != null; - } - - public void setValuesIsSet(boolean value) { - if (!value) { - this.values = null; - } - } - - public byte[] getNulls() { - setNulls(org.apache.thrift.TBaseHelper.rightSize(nulls)); - return nulls == null ? null : nulls.array(); - } - - public ByteBuffer bufferForNulls() { - return org.apache.thrift.TBaseHelper.copyBinary(nulls); - } - - public void setNulls(byte[] nulls) { - this.nulls = nulls == null ? (ByteBuffer)null : ByteBuffer.wrap(Arrays.copyOf(nulls, nulls.length)); - } - - public void setNulls(ByteBuffer nulls) { - this.nulls = org.apache.thrift.TBaseHelper.copyBinary(nulls); - } - - public void unsetNulls() { - this.nulls = null; - } - - /** Returns true if field nulls is set (has been assigned a value) and false otherwise */ - public boolean isSetNulls() { - return this.nulls != null; - } - - public void setNullsIsSet(boolean value) { - if (!value) { - this.nulls = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case VALUES: - if (value == null) { - unsetValues(); - } else { - setValues((List)value); - } - break; - - case NULLS: - if (value == null) { - unsetNulls(); - } else { - setNulls((ByteBuffer)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case VALUES: - return getValues(); - - case NULLS: - return getNulls(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case VALUES: - return isSetValues(); - case NULLS: - return isSetNulls(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TStringColumn) - return this.equals((TStringColumn)that); - return false; - } - - public boolean equals(TStringColumn that) { - if (that == null) - return false; - - boolean this_present_values = true && this.isSetValues(); - boolean that_present_values = true && that.isSetValues(); - if (this_present_values || that_present_values) { - if (!(this_present_values && that_present_values)) - return false; - if (!this.values.equals(that.values)) - return false; - } - - boolean this_present_nulls = true && this.isSetNulls(); - boolean that_present_nulls = true && that.isSetNulls(); - if (this_present_nulls || that_present_nulls) { - if (!(this_present_nulls && that_present_nulls)) - return false; - if (!this.nulls.equals(that.nulls)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_values = true && (isSetValues()); - list.add(present_values); - if (present_values) - list.add(values); - - boolean present_nulls = true && (isSetNulls()); - list.add(present_nulls); - if (present_nulls) - list.add(nulls); - - return list.hashCode(); - } - - @Override - public int compareTo(TStringColumn other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetValues()).compareTo(other.isSetValues()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetValues()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.values, other.values); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetNulls()).compareTo(other.isSetNulls()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetNulls()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.nulls, other.nulls); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TStringColumn("); - boolean first = true; - - sb.append("values:"); - if (this.values == null) { - sb.append("null"); - } else { - sb.append(this.values); - } - first = false; - if (!first) sb.append(", "); - sb.append("nulls:"); - if (this.nulls == null) { - sb.append("null"); - } else { - org.apache.thrift.TBaseHelper.toString(this.nulls, sb); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetValues()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'values' is unset! Struct:" + toString()); - } - - if (!isSetNulls()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'nulls' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TStringColumnStandardSchemeFactory implements SchemeFactory { - public TStringColumnStandardScheme getScheme() { - return new TStringColumnStandardScheme(); - } - } - - private static class TStringColumnStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TStringColumn struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // VALUES - if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { - { - org.apache.thrift.protocol.TList _list102 = iprot.readListBegin(); - struct.values = new ArrayList(_list102.size); - String _elem103; - for (int _i104 = 0; _i104 < _list102.size; ++_i104) - { - _elem103 = iprot.readString(); - struct.values.add(_elem103); - } - iprot.readListEnd(); - } - struct.setValuesIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // NULLS - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.nulls = iprot.readBinary(); - struct.setNullsIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TStringColumn struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.values != null) { - oprot.writeFieldBegin(VALUES_FIELD_DESC); - { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.values.size())); - for (String _iter105 : struct.values) - { - oprot.writeString(_iter105); - } - oprot.writeListEnd(); - } - oprot.writeFieldEnd(); - } - if (struct.nulls != null) { - oprot.writeFieldBegin(NULLS_FIELD_DESC); - oprot.writeBinary(struct.nulls); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TStringColumnTupleSchemeFactory implements SchemeFactory { - public TStringColumnTupleScheme getScheme() { - return new TStringColumnTupleScheme(); - } - } - - private static class TStringColumnTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TStringColumn struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - { - oprot.writeI32(struct.values.size()); - for (String _iter106 : struct.values) - { - oprot.writeString(_iter106); - } - } - oprot.writeBinary(struct.nulls); - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TStringColumn struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - { - org.apache.thrift.protocol.TList _list107 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.values = new ArrayList(_list107.size); - String _elem108; - for (int _i109 = 0; _i109 < _list107.size; ++_i109) - { - _elem108 = iprot.readString(); - struct.values.add(_elem108); - } - } - struct.setValuesIsSet(true); - struct.nulls = iprot.readBinary(); - struct.setNullsIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TStringValue.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TStringValue.java deleted file mode 100644 index 13874e5516632..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TStringValue.java +++ /dev/null @@ -1,393 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TStringValue implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TStringValue"); - - private static final org.apache.thrift.protocol.TField VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("value", org.apache.thrift.protocol.TType.STRING, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TStringValueStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TStringValueTupleSchemeFactory()); - } - - private String value; // optional - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - VALUE((short)1, "value"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // VALUE - return VALUE; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - private static final _Fields optionals[] = {_Fields.VALUE}; - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.VALUE, new org.apache.thrift.meta_data.FieldMetaData("value", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TStringValue.class, metaDataMap); - } - - public TStringValue() { - } - - /** - * Performs a deep copy on other. - */ - public TStringValue(TStringValue other) { - if (other.isSetValue()) { - this.value = other.value; - } - } - - public TStringValue deepCopy() { - return new TStringValue(this); - } - - @Override - public void clear() { - this.value = null; - } - - public String getValue() { - return this.value; - } - - public void setValue(String value) { - this.value = value; - } - - public void unsetValue() { - this.value = null; - } - - /** Returns true if field value is set (has been assigned a value) and false otherwise */ - public boolean isSetValue() { - return this.value != null; - } - - public void setValueIsSet(boolean value) { - if (!value) { - this.value = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case VALUE: - if (value == null) { - unsetValue(); - } else { - setValue((String)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case VALUE: - return getValue(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case VALUE: - return isSetValue(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TStringValue) - return this.equals((TStringValue)that); - return false; - } - - public boolean equals(TStringValue that) { - if (that == null) - return false; - - boolean this_present_value = true && this.isSetValue(); - boolean that_present_value = true && that.isSetValue(); - if (this_present_value || that_present_value) { - if (!(this_present_value && that_present_value)) - return false; - if (!this.value.equals(that.value)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_value = true && (isSetValue()); - list.add(present_value); - if (present_value) - list.add(value); - - return list.hashCode(); - } - - @Override - public int compareTo(TStringValue other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetValue()).compareTo(other.isSetValue()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetValue()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.value, other.value); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TStringValue("); - boolean first = true; - - if (isSetValue()) { - sb.append("value:"); - if (this.value == null) { - sb.append("null"); - } else { - sb.append(this.value); - } - first = false; - } - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TStringValueStandardSchemeFactory implements SchemeFactory { - public TStringValueStandardScheme getScheme() { - return new TStringValueStandardScheme(); - } - } - - private static class TStringValueStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TStringValue struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // VALUE - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.value = iprot.readString(); - struct.setValueIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TStringValue struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.value != null) { - if (struct.isSetValue()) { - oprot.writeFieldBegin(VALUE_FIELD_DESC); - oprot.writeString(struct.value); - oprot.writeFieldEnd(); - } - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TStringValueTupleSchemeFactory implements SchemeFactory { - public TStringValueTupleScheme getScheme() { - return new TStringValueTupleScheme(); - } - } - - private static class TStringValueTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TStringValue struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetValue()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetValue()) { - oprot.writeString(struct.value); - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TStringValue struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.value = iprot.readString(); - struct.setValueIsSet(true); - } - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TStructTypeEntry.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TStructTypeEntry.java deleted file mode 100644 index 6c2c4f5dd2ddf..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TStructTypeEntry.java +++ /dev/null @@ -1,452 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TStructTypeEntry implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TStructTypeEntry"); - - private static final org.apache.thrift.protocol.TField NAME_TO_TYPE_PTR_FIELD_DESC = new org.apache.thrift.protocol.TField("nameToTypePtr", org.apache.thrift.protocol.TType.MAP, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TStructTypeEntryStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TStructTypeEntryTupleSchemeFactory()); - } - - private Map nameToTypePtr; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - NAME_TO_TYPE_PTR((short)1, "nameToTypePtr"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // NAME_TO_TYPE_PTR - return NAME_TO_TYPE_PTR; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.NAME_TO_TYPE_PTR, new org.apache.thrift.meta_data.FieldMetaData("nameToTypePtr", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32 , "TTypeEntryPtr")))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TStructTypeEntry.class, metaDataMap); - } - - public TStructTypeEntry() { - } - - public TStructTypeEntry( - Map nameToTypePtr) - { - this(); - this.nameToTypePtr = nameToTypePtr; - } - - /** - * Performs a deep copy on other. - */ - public TStructTypeEntry(TStructTypeEntry other) { - if (other.isSetNameToTypePtr()) { - Map __this__nameToTypePtr = new HashMap(other.nameToTypePtr.size()); - for (Map.Entry other_element : other.nameToTypePtr.entrySet()) { - - String other_element_key = other_element.getKey(); - Integer other_element_value = other_element.getValue(); - - String __this__nameToTypePtr_copy_key = other_element_key; - - Integer __this__nameToTypePtr_copy_value = other_element_value; - - __this__nameToTypePtr.put(__this__nameToTypePtr_copy_key, __this__nameToTypePtr_copy_value); - } - this.nameToTypePtr = __this__nameToTypePtr; - } - } - - public TStructTypeEntry deepCopy() { - return new TStructTypeEntry(this); - } - - @Override - public void clear() { - this.nameToTypePtr = null; - } - - public int getNameToTypePtrSize() { - return (this.nameToTypePtr == null) ? 0 : this.nameToTypePtr.size(); - } - - public void putToNameToTypePtr(String key, int val) { - if (this.nameToTypePtr == null) { - this.nameToTypePtr = new HashMap(); - } - this.nameToTypePtr.put(key, val); - } - - public Map getNameToTypePtr() { - return this.nameToTypePtr; - } - - public void setNameToTypePtr(Map nameToTypePtr) { - this.nameToTypePtr = nameToTypePtr; - } - - public void unsetNameToTypePtr() { - this.nameToTypePtr = null; - } - - /** Returns true if field nameToTypePtr is set (has been assigned a value) and false otherwise */ - public boolean isSetNameToTypePtr() { - return this.nameToTypePtr != null; - } - - public void setNameToTypePtrIsSet(boolean value) { - if (!value) { - this.nameToTypePtr = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case NAME_TO_TYPE_PTR: - if (value == null) { - unsetNameToTypePtr(); - } else { - setNameToTypePtr((Map)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case NAME_TO_TYPE_PTR: - return getNameToTypePtr(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case NAME_TO_TYPE_PTR: - return isSetNameToTypePtr(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TStructTypeEntry) - return this.equals((TStructTypeEntry)that); - return false; - } - - public boolean equals(TStructTypeEntry that) { - if (that == null) - return false; - - boolean this_present_nameToTypePtr = true && this.isSetNameToTypePtr(); - boolean that_present_nameToTypePtr = true && that.isSetNameToTypePtr(); - if (this_present_nameToTypePtr || that_present_nameToTypePtr) { - if (!(this_present_nameToTypePtr && that_present_nameToTypePtr)) - return false; - if (!this.nameToTypePtr.equals(that.nameToTypePtr)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_nameToTypePtr = true && (isSetNameToTypePtr()); - list.add(present_nameToTypePtr); - if (present_nameToTypePtr) - list.add(nameToTypePtr); - - return list.hashCode(); - } - - @Override - public int compareTo(TStructTypeEntry other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetNameToTypePtr()).compareTo(other.isSetNameToTypePtr()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetNameToTypePtr()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.nameToTypePtr, other.nameToTypePtr); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TStructTypeEntry("); - boolean first = true; - - sb.append("nameToTypePtr:"); - if (this.nameToTypePtr == null) { - sb.append("null"); - } else { - sb.append(this.nameToTypePtr); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetNameToTypePtr()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'nameToTypePtr' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TStructTypeEntryStandardSchemeFactory implements SchemeFactory { - public TStructTypeEntryStandardScheme getScheme() { - return new TStructTypeEntryStandardScheme(); - } - } - - private static class TStructTypeEntryStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TStructTypeEntry struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // NAME_TO_TYPE_PTR - if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { - { - org.apache.thrift.protocol.TMap _map10 = iprot.readMapBegin(); - struct.nameToTypePtr = new HashMap(2*_map10.size); - String _key11; - int _val12; - for (int _i13 = 0; _i13 < _map10.size; ++_i13) - { - _key11 = iprot.readString(); - _val12 = iprot.readI32(); - struct.nameToTypePtr.put(_key11, _val12); - } - iprot.readMapEnd(); - } - struct.setNameToTypePtrIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TStructTypeEntry struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.nameToTypePtr != null) { - oprot.writeFieldBegin(NAME_TO_TYPE_PTR_FIELD_DESC); - { - oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.I32, struct.nameToTypePtr.size())); - for (Map.Entry _iter14 : struct.nameToTypePtr.entrySet()) - { - oprot.writeString(_iter14.getKey()); - oprot.writeI32(_iter14.getValue()); - } - oprot.writeMapEnd(); - } - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TStructTypeEntryTupleSchemeFactory implements SchemeFactory { - public TStructTypeEntryTupleScheme getScheme() { - return new TStructTypeEntryTupleScheme(); - } - } - - private static class TStructTypeEntryTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TStructTypeEntry struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - { - oprot.writeI32(struct.nameToTypePtr.size()); - for (Map.Entry _iter15 : struct.nameToTypePtr.entrySet()) - { - oprot.writeString(_iter15.getKey()); - oprot.writeI32(_iter15.getValue()); - } - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TStructTypeEntry struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - { - org.apache.thrift.protocol.TMap _map16 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.I32, iprot.readI32()); - struct.nameToTypePtr = new HashMap(2*_map16.size); - String _key17; - int _val18; - for (int _i19 = 0; _i19 < _map16.size; ++_i19) - { - _key17 = iprot.readString(); - _val18 = iprot.readI32(); - struct.nameToTypePtr.put(_key17, _val18); - } - } - struct.setNameToTypePtrIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TTableSchema.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TTableSchema.java deleted file mode 100644 index 007b1603546ac..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TTableSchema.java +++ /dev/null @@ -1,443 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TTableSchema implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TTableSchema"); - - private static final org.apache.thrift.protocol.TField COLUMNS_FIELD_DESC = new org.apache.thrift.protocol.TField("columns", org.apache.thrift.protocol.TType.LIST, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TTableSchemaStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TTableSchemaTupleSchemeFactory()); - } - - private List columns; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - COLUMNS((short)1, "columns"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // COLUMNS - return COLUMNS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.COLUMNS, new org.apache.thrift.meta_data.FieldMetaData("columns", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TColumnDesc.class)))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TTableSchema.class, metaDataMap); - } - - public TTableSchema() { - } - - public TTableSchema( - List columns) - { - this(); - this.columns = columns; - } - - /** - * Performs a deep copy on other. - */ - public TTableSchema(TTableSchema other) { - if (other.isSetColumns()) { - List __this__columns = new ArrayList(other.columns.size()); - for (TColumnDesc other_element : other.columns) { - __this__columns.add(new TColumnDesc(other_element)); - } - this.columns = __this__columns; - } - } - - public TTableSchema deepCopy() { - return new TTableSchema(this); - } - - @Override - public void clear() { - this.columns = null; - } - - public int getColumnsSize() { - return (this.columns == null) ? 0 : this.columns.size(); - } - - public java.util.Iterator getColumnsIterator() { - return (this.columns == null) ? null : this.columns.iterator(); - } - - public void addToColumns(TColumnDesc elem) { - if (this.columns == null) { - this.columns = new ArrayList(); - } - this.columns.add(elem); - } - - public List getColumns() { - return this.columns; - } - - public void setColumns(List columns) { - this.columns = columns; - } - - public void unsetColumns() { - this.columns = null; - } - - /** Returns true if field columns is set (has been assigned a value) and false otherwise */ - public boolean isSetColumns() { - return this.columns != null; - } - - public void setColumnsIsSet(boolean value) { - if (!value) { - this.columns = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case COLUMNS: - if (value == null) { - unsetColumns(); - } else { - setColumns((List)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case COLUMNS: - return getColumns(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case COLUMNS: - return isSetColumns(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TTableSchema) - return this.equals((TTableSchema)that); - return false; - } - - public boolean equals(TTableSchema that) { - if (that == null) - return false; - - boolean this_present_columns = true && this.isSetColumns(); - boolean that_present_columns = true && that.isSetColumns(); - if (this_present_columns || that_present_columns) { - if (!(this_present_columns && that_present_columns)) - return false; - if (!this.columns.equals(that.columns)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_columns = true && (isSetColumns()); - list.add(present_columns); - if (present_columns) - list.add(columns); - - return list.hashCode(); - } - - @Override - public int compareTo(TTableSchema other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetColumns()).compareTo(other.isSetColumns()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetColumns()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.columns, other.columns); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TTableSchema("); - boolean first = true; - - sb.append("columns:"); - if (this.columns == null) { - sb.append("null"); - } else { - sb.append(this.columns); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetColumns()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'columns' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TTableSchemaStandardSchemeFactory implements SchemeFactory { - public TTableSchemaStandardScheme getScheme() { - return new TTableSchemaStandardScheme(); - } - } - - private static class TTableSchemaStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TTableSchema struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // COLUMNS - if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { - { - org.apache.thrift.protocol.TList _list38 = iprot.readListBegin(); - struct.columns = new ArrayList(_list38.size); - TColumnDesc _elem39; - for (int _i40 = 0; _i40 < _list38.size; ++_i40) - { - _elem39 = new TColumnDesc(); - _elem39.read(iprot); - struct.columns.add(_elem39); - } - iprot.readListEnd(); - } - struct.setColumnsIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TTableSchema struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.columns != null) { - oprot.writeFieldBegin(COLUMNS_FIELD_DESC); - { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.columns.size())); - for (TColumnDesc _iter41 : struct.columns) - { - _iter41.write(oprot); - } - oprot.writeListEnd(); - } - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TTableSchemaTupleSchemeFactory implements SchemeFactory { - public TTableSchemaTupleScheme getScheme() { - return new TTableSchemaTupleScheme(); - } - } - - private static class TTableSchemaTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TTableSchema struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - { - oprot.writeI32(struct.columns.size()); - for (TColumnDesc _iter42 : struct.columns) - { - _iter42.write(oprot); - } - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TTableSchema struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - { - org.apache.thrift.protocol.TList _list43 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.columns = new ArrayList(_list43.size); - TColumnDesc _elem44; - for (int _i45 = 0; _i45 < _list43.size; ++_i45) - { - _elem44 = new TColumnDesc(); - _elem44.read(iprot); - struct.columns.add(_elem44); - } - } - struct.setColumnsIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TTypeDesc.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TTypeDesc.java deleted file mode 100644 index 055a14d06a2d6..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TTypeDesc.java +++ /dev/null @@ -1,443 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TTypeDesc implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TTypeDesc"); - - private static final org.apache.thrift.protocol.TField TYPES_FIELD_DESC = new org.apache.thrift.protocol.TField("types", org.apache.thrift.protocol.TType.LIST, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TTypeDescStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TTypeDescTupleSchemeFactory()); - } - - private List types; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - TYPES((short)1, "types"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // TYPES - return TYPES; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.TYPES, new org.apache.thrift.meta_data.FieldMetaData("types", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TTypeEntry.class)))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TTypeDesc.class, metaDataMap); - } - - public TTypeDesc() { - } - - public TTypeDesc( - List types) - { - this(); - this.types = types; - } - - /** - * Performs a deep copy on other. - */ - public TTypeDesc(TTypeDesc other) { - if (other.isSetTypes()) { - List __this__types = new ArrayList(other.types.size()); - for (TTypeEntry other_element : other.types) { - __this__types.add(new TTypeEntry(other_element)); - } - this.types = __this__types; - } - } - - public TTypeDesc deepCopy() { - return new TTypeDesc(this); - } - - @Override - public void clear() { - this.types = null; - } - - public int getTypesSize() { - return (this.types == null) ? 0 : this.types.size(); - } - - public java.util.Iterator getTypesIterator() { - return (this.types == null) ? null : this.types.iterator(); - } - - public void addToTypes(TTypeEntry elem) { - if (this.types == null) { - this.types = new ArrayList(); - } - this.types.add(elem); - } - - public List getTypes() { - return this.types; - } - - public void setTypes(List types) { - this.types = types; - } - - public void unsetTypes() { - this.types = null; - } - - /** Returns true if field types is set (has been assigned a value) and false otherwise */ - public boolean isSetTypes() { - return this.types != null; - } - - public void setTypesIsSet(boolean value) { - if (!value) { - this.types = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case TYPES: - if (value == null) { - unsetTypes(); - } else { - setTypes((List)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case TYPES: - return getTypes(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case TYPES: - return isSetTypes(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TTypeDesc) - return this.equals((TTypeDesc)that); - return false; - } - - public boolean equals(TTypeDesc that) { - if (that == null) - return false; - - boolean this_present_types = true && this.isSetTypes(); - boolean that_present_types = true && that.isSetTypes(); - if (this_present_types || that_present_types) { - if (!(this_present_types && that_present_types)) - return false; - if (!this.types.equals(that.types)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_types = true && (isSetTypes()); - list.add(present_types); - if (present_types) - list.add(types); - - return list.hashCode(); - } - - @Override - public int compareTo(TTypeDesc other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetTypes()).compareTo(other.isSetTypes()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetTypes()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.types, other.types); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TTypeDesc("); - boolean first = true; - - sb.append("types:"); - if (this.types == null) { - sb.append("null"); - } else { - sb.append(this.types); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetTypes()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'types' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TTypeDescStandardSchemeFactory implements SchemeFactory { - public TTypeDescStandardScheme getScheme() { - return new TTypeDescStandardScheme(); - } - } - - private static class TTypeDescStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TTypeDesc struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // TYPES - if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { - { - org.apache.thrift.protocol.TList _list30 = iprot.readListBegin(); - struct.types = new ArrayList(_list30.size); - TTypeEntry _elem31; - for (int _i32 = 0; _i32 < _list30.size; ++_i32) - { - _elem31 = new TTypeEntry(); - _elem31.read(iprot); - struct.types.add(_elem31); - } - iprot.readListEnd(); - } - struct.setTypesIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TTypeDesc struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.types != null) { - oprot.writeFieldBegin(TYPES_FIELD_DESC); - { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.types.size())); - for (TTypeEntry _iter33 : struct.types) - { - _iter33.write(oprot); - } - oprot.writeListEnd(); - } - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TTypeDescTupleSchemeFactory implements SchemeFactory { - public TTypeDescTupleScheme getScheme() { - return new TTypeDescTupleScheme(); - } - } - - private static class TTypeDescTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TTypeDesc struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - { - oprot.writeI32(struct.types.size()); - for (TTypeEntry _iter34 : struct.types) - { - _iter34.write(oprot); - } - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TTypeDesc struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - { - org.apache.thrift.protocol.TList _list35 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.types = new ArrayList(_list35.size); - TTypeEntry _elem36; - for (int _i37 = 0; _i37 < _list35.size; ++_i37) - { - _elem36 = new TTypeEntry(); - _elem36.read(iprot); - struct.types.add(_elem36); - } - } - struct.setTypesIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TTypeEntry.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TTypeEntry.java deleted file mode 100644 index b609151b8fbee..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TTypeEntry.java +++ /dev/null @@ -1,614 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -public class TTypeEntry extends org.apache.thrift.TUnion { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TTypeEntry"); - private static final org.apache.thrift.protocol.TField PRIMITIVE_ENTRY_FIELD_DESC = new org.apache.thrift.protocol.TField("primitiveEntry", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField ARRAY_ENTRY_FIELD_DESC = new org.apache.thrift.protocol.TField("arrayEntry", org.apache.thrift.protocol.TType.STRUCT, (short)2); - private static final org.apache.thrift.protocol.TField MAP_ENTRY_FIELD_DESC = new org.apache.thrift.protocol.TField("mapEntry", org.apache.thrift.protocol.TType.STRUCT, (short)3); - private static final org.apache.thrift.protocol.TField STRUCT_ENTRY_FIELD_DESC = new org.apache.thrift.protocol.TField("structEntry", org.apache.thrift.protocol.TType.STRUCT, (short)4); - private static final org.apache.thrift.protocol.TField UNION_ENTRY_FIELD_DESC = new org.apache.thrift.protocol.TField("unionEntry", org.apache.thrift.protocol.TType.STRUCT, (short)5); - private static final org.apache.thrift.protocol.TField USER_DEFINED_TYPE_ENTRY_FIELD_DESC = new org.apache.thrift.protocol.TField("userDefinedTypeEntry", org.apache.thrift.protocol.TType.STRUCT, (short)6); - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - PRIMITIVE_ENTRY((short)1, "primitiveEntry"), - ARRAY_ENTRY((short)2, "arrayEntry"), - MAP_ENTRY((short)3, "mapEntry"), - STRUCT_ENTRY((short)4, "structEntry"), - UNION_ENTRY((short)5, "unionEntry"), - USER_DEFINED_TYPE_ENTRY((short)6, "userDefinedTypeEntry"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // PRIMITIVE_ENTRY - return PRIMITIVE_ENTRY; - case 2: // ARRAY_ENTRY - return ARRAY_ENTRY; - case 3: // MAP_ENTRY - return MAP_ENTRY; - case 4: // STRUCT_ENTRY - return STRUCT_ENTRY; - case 5: // UNION_ENTRY - return UNION_ENTRY; - case 6: // USER_DEFINED_TYPE_ENTRY - return USER_DEFINED_TYPE_ENTRY; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.PRIMITIVE_ENTRY, new org.apache.thrift.meta_data.FieldMetaData("primitiveEntry", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TPrimitiveTypeEntry.class))); - tmpMap.put(_Fields.ARRAY_ENTRY, new org.apache.thrift.meta_data.FieldMetaData("arrayEntry", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TArrayTypeEntry.class))); - tmpMap.put(_Fields.MAP_ENTRY, new org.apache.thrift.meta_data.FieldMetaData("mapEntry", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TMapTypeEntry.class))); - tmpMap.put(_Fields.STRUCT_ENTRY, new org.apache.thrift.meta_data.FieldMetaData("structEntry", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TStructTypeEntry.class))); - tmpMap.put(_Fields.UNION_ENTRY, new org.apache.thrift.meta_data.FieldMetaData("unionEntry", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TUnionTypeEntry.class))); - tmpMap.put(_Fields.USER_DEFINED_TYPE_ENTRY, new org.apache.thrift.meta_data.FieldMetaData("userDefinedTypeEntry", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TUserDefinedTypeEntry.class))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TTypeEntry.class, metaDataMap); - } - - public TTypeEntry() { - super(); - } - - public TTypeEntry(TTypeEntry._Fields setField, Object value) { - super(setField, value); - } - - public TTypeEntry(TTypeEntry other) { - super(other); - } - public TTypeEntry deepCopy() { - return new TTypeEntry(this); - } - - public static TTypeEntry primitiveEntry(TPrimitiveTypeEntry value) { - TTypeEntry x = new TTypeEntry(); - x.setPrimitiveEntry(value); - return x; - } - - public static TTypeEntry arrayEntry(TArrayTypeEntry value) { - TTypeEntry x = new TTypeEntry(); - x.setArrayEntry(value); - return x; - } - - public static TTypeEntry mapEntry(TMapTypeEntry value) { - TTypeEntry x = new TTypeEntry(); - x.setMapEntry(value); - return x; - } - - public static TTypeEntry structEntry(TStructTypeEntry value) { - TTypeEntry x = new TTypeEntry(); - x.setStructEntry(value); - return x; - } - - public static TTypeEntry unionEntry(TUnionTypeEntry value) { - TTypeEntry x = new TTypeEntry(); - x.setUnionEntry(value); - return x; - } - - public static TTypeEntry userDefinedTypeEntry(TUserDefinedTypeEntry value) { - TTypeEntry x = new TTypeEntry(); - x.setUserDefinedTypeEntry(value); - return x; - } - - - @Override - protected void checkType(_Fields setField, Object value) throws ClassCastException { - switch (setField) { - case PRIMITIVE_ENTRY: - if (value instanceof TPrimitiveTypeEntry) { - break; - } - throw new ClassCastException("Was expecting value of type TPrimitiveTypeEntry for field 'primitiveEntry', but got " + value.getClass().getSimpleName()); - case ARRAY_ENTRY: - if (value instanceof TArrayTypeEntry) { - break; - } - throw new ClassCastException("Was expecting value of type TArrayTypeEntry for field 'arrayEntry', but got " + value.getClass().getSimpleName()); - case MAP_ENTRY: - if (value instanceof TMapTypeEntry) { - break; - } - throw new ClassCastException("Was expecting value of type TMapTypeEntry for field 'mapEntry', but got " + value.getClass().getSimpleName()); - case STRUCT_ENTRY: - if (value instanceof TStructTypeEntry) { - break; - } - throw new ClassCastException("Was expecting value of type TStructTypeEntry for field 'structEntry', but got " + value.getClass().getSimpleName()); - case UNION_ENTRY: - if (value instanceof TUnionTypeEntry) { - break; - } - throw new ClassCastException("Was expecting value of type TUnionTypeEntry for field 'unionEntry', but got " + value.getClass().getSimpleName()); - case USER_DEFINED_TYPE_ENTRY: - if (value instanceof TUserDefinedTypeEntry) { - break; - } - throw new ClassCastException("Was expecting value of type TUserDefinedTypeEntry for field 'userDefinedTypeEntry', but got " + value.getClass().getSimpleName()); - default: - throw new IllegalArgumentException("Unknown field id " + setField); - } - } - - @Override - protected Object standardSchemeReadValue(org.apache.thrift.protocol.TProtocol iprot, org.apache.thrift.protocol.TField field) throws org.apache.thrift.TException { - _Fields setField = _Fields.findByThriftId(field.id); - if (setField != null) { - switch (setField) { - case PRIMITIVE_ENTRY: - if (field.type == PRIMITIVE_ENTRY_FIELD_DESC.type) { - TPrimitiveTypeEntry primitiveEntry; - primitiveEntry = new TPrimitiveTypeEntry(); - primitiveEntry.read(iprot); - return primitiveEntry; - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); - return null; - } - case ARRAY_ENTRY: - if (field.type == ARRAY_ENTRY_FIELD_DESC.type) { - TArrayTypeEntry arrayEntry; - arrayEntry = new TArrayTypeEntry(); - arrayEntry.read(iprot); - return arrayEntry; - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); - return null; - } - case MAP_ENTRY: - if (field.type == MAP_ENTRY_FIELD_DESC.type) { - TMapTypeEntry mapEntry; - mapEntry = new TMapTypeEntry(); - mapEntry.read(iprot); - return mapEntry; - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); - return null; - } - case STRUCT_ENTRY: - if (field.type == STRUCT_ENTRY_FIELD_DESC.type) { - TStructTypeEntry structEntry; - structEntry = new TStructTypeEntry(); - structEntry.read(iprot); - return structEntry; - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); - return null; - } - case UNION_ENTRY: - if (field.type == UNION_ENTRY_FIELD_DESC.type) { - TUnionTypeEntry unionEntry; - unionEntry = new TUnionTypeEntry(); - unionEntry.read(iprot); - return unionEntry; - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); - return null; - } - case USER_DEFINED_TYPE_ENTRY: - if (field.type == USER_DEFINED_TYPE_ENTRY_FIELD_DESC.type) { - TUserDefinedTypeEntry userDefinedTypeEntry; - userDefinedTypeEntry = new TUserDefinedTypeEntry(); - userDefinedTypeEntry.read(iprot); - return userDefinedTypeEntry; - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); - return null; - } - default: - throw new IllegalStateException("setField wasn't null, but didn't match any of the case statements!"); - } - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); - return null; - } - } - - @Override - protected void standardSchemeWriteValue(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - switch (setField_) { - case PRIMITIVE_ENTRY: - TPrimitiveTypeEntry primitiveEntry = (TPrimitiveTypeEntry)value_; - primitiveEntry.write(oprot); - return; - case ARRAY_ENTRY: - TArrayTypeEntry arrayEntry = (TArrayTypeEntry)value_; - arrayEntry.write(oprot); - return; - case MAP_ENTRY: - TMapTypeEntry mapEntry = (TMapTypeEntry)value_; - mapEntry.write(oprot); - return; - case STRUCT_ENTRY: - TStructTypeEntry structEntry = (TStructTypeEntry)value_; - structEntry.write(oprot); - return; - case UNION_ENTRY: - TUnionTypeEntry unionEntry = (TUnionTypeEntry)value_; - unionEntry.write(oprot); - return; - case USER_DEFINED_TYPE_ENTRY: - TUserDefinedTypeEntry userDefinedTypeEntry = (TUserDefinedTypeEntry)value_; - userDefinedTypeEntry.write(oprot); - return; - default: - throw new IllegalStateException("Cannot write union with unknown field " + setField_); - } - } - - @Override - protected Object tupleSchemeReadValue(org.apache.thrift.protocol.TProtocol iprot, short fieldID) throws org.apache.thrift.TException { - _Fields setField = _Fields.findByThriftId(fieldID); - if (setField != null) { - switch (setField) { - case PRIMITIVE_ENTRY: - TPrimitiveTypeEntry primitiveEntry; - primitiveEntry = new TPrimitiveTypeEntry(); - primitiveEntry.read(iprot); - return primitiveEntry; - case ARRAY_ENTRY: - TArrayTypeEntry arrayEntry; - arrayEntry = new TArrayTypeEntry(); - arrayEntry.read(iprot); - return arrayEntry; - case MAP_ENTRY: - TMapTypeEntry mapEntry; - mapEntry = new TMapTypeEntry(); - mapEntry.read(iprot); - return mapEntry; - case STRUCT_ENTRY: - TStructTypeEntry structEntry; - structEntry = new TStructTypeEntry(); - structEntry.read(iprot); - return structEntry; - case UNION_ENTRY: - TUnionTypeEntry unionEntry; - unionEntry = new TUnionTypeEntry(); - unionEntry.read(iprot); - return unionEntry; - case USER_DEFINED_TYPE_ENTRY: - TUserDefinedTypeEntry userDefinedTypeEntry; - userDefinedTypeEntry = new TUserDefinedTypeEntry(); - userDefinedTypeEntry.read(iprot); - return userDefinedTypeEntry; - default: - throw new IllegalStateException("setField wasn't null, but didn't match any of the case statements!"); - } - } else { - throw new TProtocolException("Couldn't find a field with field id " + fieldID); - } - } - - @Override - protected void tupleSchemeWriteValue(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - switch (setField_) { - case PRIMITIVE_ENTRY: - TPrimitiveTypeEntry primitiveEntry = (TPrimitiveTypeEntry)value_; - primitiveEntry.write(oprot); - return; - case ARRAY_ENTRY: - TArrayTypeEntry arrayEntry = (TArrayTypeEntry)value_; - arrayEntry.write(oprot); - return; - case MAP_ENTRY: - TMapTypeEntry mapEntry = (TMapTypeEntry)value_; - mapEntry.write(oprot); - return; - case STRUCT_ENTRY: - TStructTypeEntry structEntry = (TStructTypeEntry)value_; - structEntry.write(oprot); - return; - case UNION_ENTRY: - TUnionTypeEntry unionEntry = (TUnionTypeEntry)value_; - unionEntry.write(oprot); - return; - case USER_DEFINED_TYPE_ENTRY: - TUserDefinedTypeEntry userDefinedTypeEntry = (TUserDefinedTypeEntry)value_; - userDefinedTypeEntry.write(oprot); - return; - default: - throw new IllegalStateException("Cannot write union with unknown field " + setField_); - } - } - - @Override - protected org.apache.thrift.protocol.TField getFieldDesc(_Fields setField) { - switch (setField) { - case PRIMITIVE_ENTRY: - return PRIMITIVE_ENTRY_FIELD_DESC; - case ARRAY_ENTRY: - return ARRAY_ENTRY_FIELD_DESC; - case MAP_ENTRY: - return MAP_ENTRY_FIELD_DESC; - case STRUCT_ENTRY: - return STRUCT_ENTRY_FIELD_DESC; - case UNION_ENTRY: - return UNION_ENTRY_FIELD_DESC; - case USER_DEFINED_TYPE_ENTRY: - return USER_DEFINED_TYPE_ENTRY_FIELD_DESC; - default: - throw new IllegalArgumentException("Unknown field id " + setField); - } - } - - @Override - protected org.apache.thrift.protocol.TStruct getStructDesc() { - return STRUCT_DESC; - } - - @Override - protected _Fields enumForId(short id) { - return _Fields.findByThriftIdOrThrow(id); - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - - public TPrimitiveTypeEntry getPrimitiveEntry() { - if (getSetField() == _Fields.PRIMITIVE_ENTRY) { - return (TPrimitiveTypeEntry)getFieldValue(); - } else { - throw new RuntimeException("Cannot get field 'primitiveEntry' because union is currently set to " + getFieldDesc(getSetField()).name); - } - } - - public void setPrimitiveEntry(TPrimitiveTypeEntry value) { - if (value == null) throw new NullPointerException(); - setField_ = _Fields.PRIMITIVE_ENTRY; - value_ = value; - } - - public TArrayTypeEntry getArrayEntry() { - if (getSetField() == _Fields.ARRAY_ENTRY) { - return (TArrayTypeEntry)getFieldValue(); - } else { - throw new RuntimeException("Cannot get field 'arrayEntry' because union is currently set to " + getFieldDesc(getSetField()).name); - } - } - - public void setArrayEntry(TArrayTypeEntry value) { - if (value == null) throw new NullPointerException(); - setField_ = _Fields.ARRAY_ENTRY; - value_ = value; - } - - public TMapTypeEntry getMapEntry() { - if (getSetField() == _Fields.MAP_ENTRY) { - return (TMapTypeEntry)getFieldValue(); - } else { - throw new RuntimeException("Cannot get field 'mapEntry' because union is currently set to " + getFieldDesc(getSetField()).name); - } - } - - public void setMapEntry(TMapTypeEntry value) { - if (value == null) throw new NullPointerException(); - setField_ = _Fields.MAP_ENTRY; - value_ = value; - } - - public TStructTypeEntry getStructEntry() { - if (getSetField() == _Fields.STRUCT_ENTRY) { - return (TStructTypeEntry)getFieldValue(); - } else { - throw new RuntimeException("Cannot get field 'structEntry' because union is currently set to " + getFieldDesc(getSetField()).name); - } - } - - public void setStructEntry(TStructTypeEntry value) { - if (value == null) throw new NullPointerException(); - setField_ = _Fields.STRUCT_ENTRY; - value_ = value; - } - - public TUnionTypeEntry getUnionEntry() { - if (getSetField() == _Fields.UNION_ENTRY) { - return (TUnionTypeEntry)getFieldValue(); - } else { - throw new RuntimeException("Cannot get field 'unionEntry' because union is currently set to " + getFieldDesc(getSetField()).name); - } - } - - public void setUnionEntry(TUnionTypeEntry value) { - if (value == null) throw new NullPointerException(); - setField_ = _Fields.UNION_ENTRY; - value_ = value; - } - - public TUserDefinedTypeEntry getUserDefinedTypeEntry() { - if (getSetField() == _Fields.USER_DEFINED_TYPE_ENTRY) { - return (TUserDefinedTypeEntry)getFieldValue(); - } else { - throw new RuntimeException("Cannot get field 'userDefinedTypeEntry' because union is currently set to " + getFieldDesc(getSetField()).name); - } - } - - public void setUserDefinedTypeEntry(TUserDefinedTypeEntry value) { - if (value == null) throw new NullPointerException(); - setField_ = _Fields.USER_DEFINED_TYPE_ENTRY; - value_ = value; - } - - public boolean isSetPrimitiveEntry() { - return setField_ == _Fields.PRIMITIVE_ENTRY; - } - - - public boolean isSetArrayEntry() { - return setField_ == _Fields.ARRAY_ENTRY; - } - - - public boolean isSetMapEntry() { - return setField_ == _Fields.MAP_ENTRY; - } - - - public boolean isSetStructEntry() { - return setField_ == _Fields.STRUCT_ENTRY; - } - - - public boolean isSetUnionEntry() { - return setField_ == _Fields.UNION_ENTRY; - } - - - public boolean isSetUserDefinedTypeEntry() { - return setField_ == _Fields.USER_DEFINED_TYPE_ENTRY; - } - - - public boolean equals(Object other) { - if (other instanceof TTypeEntry) { - return equals((TTypeEntry)other); - } else { - return false; - } - } - - public boolean equals(TTypeEntry other) { - return other != null && getSetField() == other.getSetField() && getFieldValue().equals(other.getFieldValue()); - } - - @Override - public int compareTo(TTypeEntry other) { - int lastComparison = org.apache.thrift.TBaseHelper.compareTo(getSetField(), other.getSetField()); - if (lastComparison == 0) { - return org.apache.thrift.TBaseHelper.compareTo(getFieldValue(), other.getFieldValue()); - } - return lastComparison; - } - - - @Override - public int hashCode() { - List list = new ArrayList(); - list.add(this.getClass().getName()); - org.apache.thrift.TFieldIdEnum setField = getSetField(); - if (setField != null) { - list.add(setField.getThriftFieldId()); - Object value = getFieldValue(); - if (value instanceof org.apache.thrift.TEnum) { - list.add(((org.apache.thrift.TEnum)getFieldValue()).getValue()); - } else { - list.add(value); - } - } - return list.hashCode(); - } - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - -} diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TTypeId.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TTypeId.java deleted file mode 100644 index a3735ebf3ec07..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TTypeId.java +++ /dev/null @@ -1,105 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - - -import java.util.Map; -import java.util.HashMap; -import org.apache.thrift.TEnum; - -public enum TTypeId implements org.apache.thrift.TEnum { - BOOLEAN_TYPE(0), - TINYINT_TYPE(1), - SMALLINT_TYPE(2), - INT_TYPE(3), - BIGINT_TYPE(4), - FLOAT_TYPE(5), - DOUBLE_TYPE(6), - STRING_TYPE(7), - TIMESTAMP_TYPE(8), - BINARY_TYPE(9), - ARRAY_TYPE(10), - MAP_TYPE(11), - STRUCT_TYPE(12), - UNION_TYPE(13), - USER_DEFINED_TYPE(14), - DECIMAL_TYPE(15), - NULL_TYPE(16), - DATE_TYPE(17), - VARCHAR_TYPE(18), - CHAR_TYPE(19), - INTERVAL_YEAR_MONTH_TYPE(20), - INTERVAL_DAY_TIME_TYPE(21); - - private final int value; - - private TTypeId(int value) { - this.value = value; - } - - /** - * Get the integer value of this enum value, as defined in the Thrift IDL. - */ - public int getValue() { - return value; - } - - /** - * Find a the enum type by its integer value, as defined in the Thrift IDL. - * @return null if the value is not found. - */ - public static TTypeId findByValue(int value) { - switch (value) { - case 0: - return BOOLEAN_TYPE; - case 1: - return TINYINT_TYPE; - case 2: - return SMALLINT_TYPE; - case 3: - return INT_TYPE; - case 4: - return BIGINT_TYPE; - case 5: - return FLOAT_TYPE; - case 6: - return DOUBLE_TYPE; - case 7: - return STRING_TYPE; - case 8: - return TIMESTAMP_TYPE; - case 9: - return BINARY_TYPE; - case 10: - return ARRAY_TYPE; - case 11: - return MAP_TYPE; - case 12: - return STRUCT_TYPE; - case 13: - return UNION_TYPE; - case 14: - return USER_DEFINED_TYPE; - case 15: - return DECIMAL_TYPE; - case 16: - return NULL_TYPE; - case 17: - return DATE_TYPE; - case 18: - return VARCHAR_TYPE; - case 19: - return CHAR_TYPE; - case 20: - return INTERVAL_YEAR_MONTH_TYPE; - case 21: - return INTERVAL_DAY_TIME_TYPE; - default: - return null; - } - } -} diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TTypeQualifierValue.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TTypeQualifierValue.java deleted file mode 100644 index 1720c0e9a72c2..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TTypeQualifierValue.java +++ /dev/null @@ -1,365 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -public class TTypeQualifierValue extends org.apache.thrift.TUnion { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TTypeQualifierValue"); - private static final org.apache.thrift.protocol.TField I32_VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("i32Value", org.apache.thrift.protocol.TType.I32, (short)1); - private static final org.apache.thrift.protocol.TField STRING_VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("stringValue", org.apache.thrift.protocol.TType.STRING, (short)2); - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - I32_VALUE((short)1, "i32Value"), - STRING_VALUE((short)2, "stringValue"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // I32_VALUE - return I32_VALUE; - case 2: // STRING_VALUE - return STRING_VALUE; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.I32_VALUE, new org.apache.thrift.meta_data.FieldMetaData("i32Value", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); - tmpMap.put(_Fields.STRING_VALUE, new org.apache.thrift.meta_data.FieldMetaData("stringValue", org.apache.thrift.TFieldRequirementType.OPTIONAL, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TTypeQualifierValue.class, metaDataMap); - } - - public TTypeQualifierValue() { - super(); - } - - public TTypeQualifierValue(TTypeQualifierValue._Fields setField, Object value) { - super(setField, value); - } - - public TTypeQualifierValue(TTypeQualifierValue other) { - super(other); - } - public TTypeQualifierValue deepCopy() { - return new TTypeQualifierValue(this); - } - - public static TTypeQualifierValue i32Value(int value) { - TTypeQualifierValue x = new TTypeQualifierValue(); - x.setI32Value(value); - return x; - } - - public static TTypeQualifierValue stringValue(String value) { - TTypeQualifierValue x = new TTypeQualifierValue(); - x.setStringValue(value); - return x; - } - - - @Override - protected void checkType(_Fields setField, Object value) throws ClassCastException { - switch (setField) { - case I32_VALUE: - if (value instanceof Integer) { - break; - } - throw new ClassCastException("Was expecting value of type Integer for field 'i32Value', but got " + value.getClass().getSimpleName()); - case STRING_VALUE: - if (value instanceof String) { - break; - } - throw new ClassCastException("Was expecting value of type String for field 'stringValue', but got " + value.getClass().getSimpleName()); - default: - throw new IllegalArgumentException("Unknown field id " + setField); - } - } - - @Override - protected Object standardSchemeReadValue(org.apache.thrift.protocol.TProtocol iprot, org.apache.thrift.protocol.TField field) throws org.apache.thrift.TException { - _Fields setField = _Fields.findByThriftId(field.id); - if (setField != null) { - switch (setField) { - case I32_VALUE: - if (field.type == I32_VALUE_FIELD_DESC.type) { - Integer i32Value; - i32Value = iprot.readI32(); - return i32Value; - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); - return null; - } - case STRING_VALUE: - if (field.type == STRING_VALUE_FIELD_DESC.type) { - String stringValue; - stringValue = iprot.readString(); - return stringValue; - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); - return null; - } - default: - throw new IllegalStateException("setField wasn't null, but didn't match any of the case statements!"); - } - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type); - return null; - } - } - - @Override - protected void standardSchemeWriteValue(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - switch (setField_) { - case I32_VALUE: - Integer i32Value = (Integer)value_; - oprot.writeI32(i32Value); - return; - case STRING_VALUE: - String stringValue = (String)value_; - oprot.writeString(stringValue); - return; - default: - throw new IllegalStateException("Cannot write union with unknown field " + setField_); - } - } - - @Override - protected Object tupleSchemeReadValue(org.apache.thrift.protocol.TProtocol iprot, short fieldID) throws org.apache.thrift.TException { - _Fields setField = _Fields.findByThriftId(fieldID); - if (setField != null) { - switch (setField) { - case I32_VALUE: - Integer i32Value; - i32Value = iprot.readI32(); - return i32Value; - case STRING_VALUE: - String stringValue; - stringValue = iprot.readString(); - return stringValue; - default: - throw new IllegalStateException("setField wasn't null, but didn't match any of the case statements!"); - } - } else { - throw new TProtocolException("Couldn't find a field with field id " + fieldID); - } - } - - @Override - protected void tupleSchemeWriteValue(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - switch (setField_) { - case I32_VALUE: - Integer i32Value = (Integer)value_; - oprot.writeI32(i32Value); - return; - case STRING_VALUE: - String stringValue = (String)value_; - oprot.writeString(stringValue); - return; - default: - throw new IllegalStateException("Cannot write union with unknown field " + setField_); - } - } - - @Override - protected org.apache.thrift.protocol.TField getFieldDesc(_Fields setField) { - switch (setField) { - case I32_VALUE: - return I32_VALUE_FIELD_DESC; - case STRING_VALUE: - return STRING_VALUE_FIELD_DESC; - default: - throw new IllegalArgumentException("Unknown field id " + setField); - } - } - - @Override - protected org.apache.thrift.protocol.TStruct getStructDesc() { - return STRUCT_DESC; - } - - @Override - protected _Fields enumForId(short id) { - return _Fields.findByThriftIdOrThrow(id); - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - - public int getI32Value() { - if (getSetField() == _Fields.I32_VALUE) { - return (Integer)getFieldValue(); - } else { - throw new RuntimeException("Cannot get field 'i32Value' because union is currently set to " + getFieldDesc(getSetField()).name); - } - } - - public void setI32Value(int value) { - setField_ = _Fields.I32_VALUE; - value_ = value; - } - - public String getStringValue() { - if (getSetField() == _Fields.STRING_VALUE) { - return (String)getFieldValue(); - } else { - throw new RuntimeException("Cannot get field 'stringValue' because union is currently set to " + getFieldDesc(getSetField()).name); - } - } - - public void setStringValue(String value) { - if (value == null) throw new NullPointerException(); - setField_ = _Fields.STRING_VALUE; - value_ = value; - } - - public boolean isSetI32Value() { - return setField_ == _Fields.I32_VALUE; - } - - - public boolean isSetStringValue() { - return setField_ == _Fields.STRING_VALUE; - } - - - public boolean equals(Object other) { - if (other instanceof TTypeQualifierValue) { - return equals((TTypeQualifierValue)other); - } else { - return false; - } - } - - public boolean equals(TTypeQualifierValue other) { - return other != null && getSetField() == other.getSetField() && getFieldValue().equals(other.getFieldValue()); - } - - @Override - public int compareTo(TTypeQualifierValue other) { - int lastComparison = org.apache.thrift.TBaseHelper.compareTo(getSetField(), other.getSetField()); - if (lastComparison == 0) { - return org.apache.thrift.TBaseHelper.compareTo(getFieldValue(), other.getFieldValue()); - } - return lastComparison; - } - - - @Override - public int hashCode() { - List list = new ArrayList(); - list.add(this.getClass().getName()); - org.apache.thrift.TFieldIdEnum setField = getSetField(); - if (setField != null) { - list.add(setField.getThriftFieldId()); - Object value = getFieldValue(); - if (value instanceof org.apache.thrift.TEnum) { - list.add(((org.apache.thrift.TEnum)getFieldValue()).getValue()); - } else { - list.add(value); - } - } - return list.hashCode(); - } - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - -} diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TTypeQualifiers.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TTypeQualifiers.java deleted file mode 100644 index f46d2ceb79caa..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TTypeQualifiers.java +++ /dev/null @@ -1,454 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TTypeQualifiers implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TTypeQualifiers"); - - private static final org.apache.thrift.protocol.TField QUALIFIERS_FIELD_DESC = new org.apache.thrift.protocol.TField("qualifiers", org.apache.thrift.protocol.TType.MAP, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TTypeQualifiersStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TTypeQualifiersTupleSchemeFactory()); - } - - private Map qualifiers; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - QUALIFIERS((short)1, "qualifiers"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // QUALIFIERS - return QUALIFIERS; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.QUALIFIERS, new org.apache.thrift.meta_data.FieldMetaData("qualifiers", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TTypeQualifierValue.class)))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TTypeQualifiers.class, metaDataMap); - } - - public TTypeQualifiers() { - } - - public TTypeQualifiers( - Map qualifiers) - { - this(); - this.qualifiers = qualifiers; - } - - /** - * Performs a deep copy on other. - */ - public TTypeQualifiers(TTypeQualifiers other) { - if (other.isSetQualifiers()) { - Map __this__qualifiers = new HashMap(other.qualifiers.size()); - for (Map.Entry other_element : other.qualifiers.entrySet()) { - - String other_element_key = other_element.getKey(); - TTypeQualifierValue other_element_value = other_element.getValue(); - - String __this__qualifiers_copy_key = other_element_key; - - TTypeQualifierValue __this__qualifiers_copy_value = new TTypeQualifierValue(other_element_value); - - __this__qualifiers.put(__this__qualifiers_copy_key, __this__qualifiers_copy_value); - } - this.qualifiers = __this__qualifiers; - } - } - - public TTypeQualifiers deepCopy() { - return new TTypeQualifiers(this); - } - - @Override - public void clear() { - this.qualifiers = null; - } - - public int getQualifiersSize() { - return (this.qualifiers == null) ? 0 : this.qualifiers.size(); - } - - public void putToQualifiers(String key, TTypeQualifierValue val) { - if (this.qualifiers == null) { - this.qualifiers = new HashMap(); - } - this.qualifiers.put(key, val); - } - - public Map getQualifiers() { - return this.qualifiers; - } - - public void setQualifiers(Map qualifiers) { - this.qualifiers = qualifiers; - } - - public void unsetQualifiers() { - this.qualifiers = null; - } - - /** Returns true if field qualifiers is set (has been assigned a value) and false otherwise */ - public boolean isSetQualifiers() { - return this.qualifiers != null; - } - - public void setQualifiersIsSet(boolean value) { - if (!value) { - this.qualifiers = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case QUALIFIERS: - if (value == null) { - unsetQualifiers(); - } else { - setQualifiers((Map)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case QUALIFIERS: - return getQualifiers(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case QUALIFIERS: - return isSetQualifiers(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TTypeQualifiers) - return this.equals((TTypeQualifiers)that); - return false; - } - - public boolean equals(TTypeQualifiers that) { - if (that == null) - return false; - - boolean this_present_qualifiers = true && this.isSetQualifiers(); - boolean that_present_qualifiers = true && that.isSetQualifiers(); - if (this_present_qualifiers || that_present_qualifiers) { - if (!(this_present_qualifiers && that_present_qualifiers)) - return false; - if (!this.qualifiers.equals(that.qualifiers)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_qualifiers = true && (isSetQualifiers()); - list.add(present_qualifiers); - if (present_qualifiers) - list.add(qualifiers); - - return list.hashCode(); - } - - @Override - public int compareTo(TTypeQualifiers other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetQualifiers()).compareTo(other.isSetQualifiers()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetQualifiers()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.qualifiers, other.qualifiers); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TTypeQualifiers("); - boolean first = true; - - sb.append("qualifiers:"); - if (this.qualifiers == null) { - sb.append("null"); - } else { - sb.append(this.qualifiers); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetQualifiers()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'qualifiers' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TTypeQualifiersStandardSchemeFactory implements SchemeFactory { - public TTypeQualifiersStandardScheme getScheme() { - return new TTypeQualifiersStandardScheme(); - } - } - - private static class TTypeQualifiersStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TTypeQualifiers struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // QUALIFIERS - if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { - { - org.apache.thrift.protocol.TMap _map0 = iprot.readMapBegin(); - struct.qualifiers = new HashMap(2*_map0.size); - String _key1; - TTypeQualifierValue _val2; - for (int _i3 = 0; _i3 < _map0.size; ++_i3) - { - _key1 = iprot.readString(); - _val2 = new TTypeQualifierValue(); - _val2.read(iprot); - struct.qualifiers.put(_key1, _val2); - } - iprot.readMapEnd(); - } - struct.setQualifiersIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TTypeQualifiers struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.qualifiers != null) { - oprot.writeFieldBegin(QUALIFIERS_FIELD_DESC); - { - oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, struct.qualifiers.size())); - for (Map.Entry _iter4 : struct.qualifiers.entrySet()) - { - oprot.writeString(_iter4.getKey()); - _iter4.getValue().write(oprot); - } - oprot.writeMapEnd(); - } - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TTypeQualifiersTupleSchemeFactory implements SchemeFactory { - public TTypeQualifiersTupleScheme getScheme() { - return new TTypeQualifiersTupleScheme(); - } - } - - private static class TTypeQualifiersTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TTypeQualifiers struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - { - oprot.writeI32(struct.qualifiers.size()); - for (Map.Entry _iter5 : struct.qualifiers.entrySet()) - { - oprot.writeString(_iter5.getKey()); - _iter5.getValue().write(oprot); - } - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TTypeQualifiers struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - { - org.apache.thrift.protocol.TMap _map6 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.qualifiers = new HashMap(2*_map6.size); - String _key7; - TTypeQualifierValue _val8; - for (int _i9 = 0; _i9 < _map6.size; ++_i9) - { - _key7 = iprot.readString(); - _val8 = new TTypeQualifierValue(); - _val8.read(iprot); - struct.qualifiers.put(_key7, _val8); - } - } - struct.setQualifiersIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TUnionTypeEntry.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TUnionTypeEntry.java deleted file mode 100644 index d53f74cb8eff1..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TUnionTypeEntry.java +++ /dev/null @@ -1,452 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TUnionTypeEntry implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TUnionTypeEntry"); - - private static final org.apache.thrift.protocol.TField NAME_TO_TYPE_PTR_FIELD_DESC = new org.apache.thrift.protocol.TField("nameToTypePtr", org.apache.thrift.protocol.TType.MAP, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TUnionTypeEntryStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TUnionTypeEntryTupleSchemeFactory()); - } - - private Map nameToTypePtr; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - NAME_TO_TYPE_PTR((short)1, "nameToTypePtr"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // NAME_TO_TYPE_PTR - return NAME_TO_TYPE_PTR; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.NAME_TO_TYPE_PTR, new org.apache.thrift.meta_data.FieldMetaData("nameToTypePtr", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32 , "TTypeEntryPtr")))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TUnionTypeEntry.class, metaDataMap); - } - - public TUnionTypeEntry() { - } - - public TUnionTypeEntry( - Map nameToTypePtr) - { - this(); - this.nameToTypePtr = nameToTypePtr; - } - - /** - * Performs a deep copy on other. - */ - public TUnionTypeEntry(TUnionTypeEntry other) { - if (other.isSetNameToTypePtr()) { - Map __this__nameToTypePtr = new HashMap(other.nameToTypePtr.size()); - for (Map.Entry other_element : other.nameToTypePtr.entrySet()) { - - String other_element_key = other_element.getKey(); - Integer other_element_value = other_element.getValue(); - - String __this__nameToTypePtr_copy_key = other_element_key; - - Integer __this__nameToTypePtr_copy_value = other_element_value; - - __this__nameToTypePtr.put(__this__nameToTypePtr_copy_key, __this__nameToTypePtr_copy_value); - } - this.nameToTypePtr = __this__nameToTypePtr; - } - } - - public TUnionTypeEntry deepCopy() { - return new TUnionTypeEntry(this); - } - - @Override - public void clear() { - this.nameToTypePtr = null; - } - - public int getNameToTypePtrSize() { - return (this.nameToTypePtr == null) ? 0 : this.nameToTypePtr.size(); - } - - public void putToNameToTypePtr(String key, int val) { - if (this.nameToTypePtr == null) { - this.nameToTypePtr = new HashMap(); - } - this.nameToTypePtr.put(key, val); - } - - public Map getNameToTypePtr() { - return this.nameToTypePtr; - } - - public void setNameToTypePtr(Map nameToTypePtr) { - this.nameToTypePtr = nameToTypePtr; - } - - public void unsetNameToTypePtr() { - this.nameToTypePtr = null; - } - - /** Returns true if field nameToTypePtr is set (has been assigned a value) and false otherwise */ - public boolean isSetNameToTypePtr() { - return this.nameToTypePtr != null; - } - - public void setNameToTypePtrIsSet(boolean value) { - if (!value) { - this.nameToTypePtr = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case NAME_TO_TYPE_PTR: - if (value == null) { - unsetNameToTypePtr(); - } else { - setNameToTypePtr((Map)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case NAME_TO_TYPE_PTR: - return getNameToTypePtr(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case NAME_TO_TYPE_PTR: - return isSetNameToTypePtr(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TUnionTypeEntry) - return this.equals((TUnionTypeEntry)that); - return false; - } - - public boolean equals(TUnionTypeEntry that) { - if (that == null) - return false; - - boolean this_present_nameToTypePtr = true && this.isSetNameToTypePtr(); - boolean that_present_nameToTypePtr = true && that.isSetNameToTypePtr(); - if (this_present_nameToTypePtr || that_present_nameToTypePtr) { - if (!(this_present_nameToTypePtr && that_present_nameToTypePtr)) - return false; - if (!this.nameToTypePtr.equals(that.nameToTypePtr)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_nameToTypePtr = true && (isSetNameToTypePtr()); - list.add(present_nameToTypePtr); - if (present_nameToTypePtr) - list.add(nameToTypePtr); - - return list.hashCode(); - } - - @Override - public int compareTo(TUnionTypeEntry other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetNameToTypePtr()).compareTo(other.isSetNameToTypePtr()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetNameToTypePtr()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.nameToTypePtr, other.nameToTypePtr); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TUnionTypeEntry("); - boolean first = true; - - sb.append("nameToTypePtr:"); - if (this.nameToTypePtr == null) { - sb.append("null"); - } else { - sb.append(this.nameToTypePtr); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetNameToTypePtr()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'nameToTypePtr' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TUnionTypeEntryStandardSchemeFactory implements SchemeFactory { - public TUnionTypeEntryStandardScheme getScheme() { - return new TUnionTypeEntryStandardScheme(); - } - } - - private static class TUnionTypeEntryStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TUnionTypeEntry struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // NAME_TO_TYPE_PTR - if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { - { - org.apache.thrift.protocol.TMap _map20 = iprot.readMapBegin(); - struct.nameToTypePtr = new HashMap(2*_map20.size); - String _key21; - int _val22; - for (int _i23 = 0; _i23 < _map20.size; ++_i23) - { - _key21 = iprot.readString(); - _val22 = iprot.readI32(); - struct.nameToTypePtr.put(_key21, _val22); - } - iprot.readMapEnd(); - } - struct.setNameToTypePtrIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TUnionTypeEntry struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.nameToTypePtr != null) { - oprot.writeFieldBegin(NAME_TO_TYPE_PTR_FIELD_DESC); - { - oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.I32, struct.nameToTypePtr.size())); - for (Map.Entry _iter24 : struct.nameToTypePtr.entrySet()) - { - oprot.writeString(_iter24.getKey()); - oprot.writeI32(_iter24.getValue()); - } - oprot.writeMapEnd(); - } - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TUnionTypeEntryTupleSchemeFactory implements SchemeFactory { - public TUnionTypeEntryTupleScheme getScheme() { - return new TUnionTypeEntryTupleScheme(); - } - } - - private static class TUnionTypeEntryTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TUnionTypeEntry struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - { - oprot.writeI32(struct.nameToTypePtr.size()); - for (Map.Entry _iter25 : struct.nameToTypePtr.entrySet()) - { - oprot.writeString(_iter25.getKey()); - oprot.writeI32(_iter25.getValue()); - } - } - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TUnionTypeEntry struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - { - org.apache.thrift.protocol.TMap _map26 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.I32, iprot.readI32()); - struct.nameToTypePtr = new HashMap(2*_map26.size); - String _key27; - int _val28; - for (int _i29 = 0; _i29 < _map26.size; ++_i29) - { - _key27 = iprot.readString(); - _val28 = iprot.readI32(); - struct.nameToTypePtr.put(_key27, _val28); - } - } - struct.setNameToTypePtrIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TUserDefinedTypeEntry.java b/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TUserDefinedTypeEntry.java deleted file mode 100644 index b80c4dd5c6302..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/gen/java/org/apache/hive/service/rpc/thrift/TUserDefinedTypeEntry.java +++ /dev/null @@ -1,389 +0,0 @@ -/** - * Autogenerated by Thrift Compiler (0.9.3) - * - * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - * @generated - */ -package org.apache.hive.service.rpc.thrift; - -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) -@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") -public class TUserDefinedTypeEntry implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TUserDefinedTypeEntry"); - - private static final org.apache.thrift.protocol.TField TYPE_CLASS_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("typeClassName", org.apache.thrift.protocol.TType.STRING, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new TUserDefinedTypeEntryStandardSchemeFactory()); - schemes.put(TupleScheme.class, new TUserDefinedTypeEntryTupleSchemeFactory()); - } - - private String typeClassName; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - TYPE_CLASS_NAME((short)1, "typeClassName"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // TYPE_CLASS_NAME - return TYPE_CLASS_NAME; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; - } - - public String getFieldName() { - return _fieldName; - } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.TYPE_CLASS_NAME, new org.apache.thrift.meta_data.FieldMetaData("typeClassName", org.apache.thrift.TFieldRequirementType.REQUIRED, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TUserDefinedTypeEntry.class, metaDataMap); - } - - public TUserDefinedTypeEntry() { - } - - public TUserDefinedTypeEntry( - String typeClassName) - { - this(); - this.typeClassName = typeClassName; - } - - /** - * Performs a deep copy on other. - */ - public TUserDefinedTypeEntry(TUserDefinedTypeEntry other) { - if (other.isSetTypeClassName()) { - this.typeClassName = other.typeClassName; - } - } - - public TUserDefinedTypeEntry deepCopy() { - return new TUserDefinedTypeEntry(this); - } - - @Override - public void clear() { - this.typeClassName = null; - } - - public String getTypeClassName() { - return this.typeClassName; - } - - public void setTypeClassName(String typeClassName) { - this.typeClassName = typeClassName; - } - - public void unsetTypeClassName() { - this.typeClassName = null; - } - - /** Returns true if field typeClassName is set (has been assigned a value) and false otherwise */ - public boolean isSetTypeClassName() { - return this.typeClassName != null; - } - - public void setTypeClassNameIsSet(boolean value) { - if (!value) { - this.typeClassName = null; - } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case TYPE_CLASS_NAME: - if (value == null) { - unsetTypeClassName(); - } else { - setTypeClassName((String)value); - } - break; - - } - } - - public Object getFieldValue(_Fields field) { - switch (field) { - case TYPE_CLASS_NAME: - return getTypeClassName(); - - } - throw new IllegalStateException(); - } - - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); - } - - switch (field) { - case TYPE_CLASS_NAME: - return isSetTypeClassName(); - } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof TUserDefinedTypeEntry) - return this.equals((TUserDefinedTypeEntry)that); - return false; - } - - public boolean equals(TUserDefinedTypeEntry that) { - if (that == null) - return false; - - boolean this_present_typeClassName = true && this.isSetTypeClassName(); - boolean that_present_typeClassName = true && that.isSetTypeClassName(); - if (this_present_typeClassName || that_present_typeClassName) { - if (!(this_present_typeClassName && that_present_typeClassName)) - return false; - if (!this.typeClassName.equals(that.typeClassName)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_typeClassName = true && (isSetTypeClassName()); - list.add(present_typeClassName); - if (present_typeClassName) - list.add(typeClassName); - - return list.hashCode(); - } - - @Override - public int compareTo(TUserDefinedTypeEntry other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetTypeClassName()).compareTo(other.isSetTypeClassName()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetTypeClassName()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.typeClassName, other.typeClassName); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("TUserDefinedTypeEntry("); - boolean first = true; - - sb.append("typeClassName:"); - if (this.typeClassName == null) { - sb.append("null"); - } else { - sb.append(this.typeClassName); - } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - if (!isSetTypeClassName()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'typeClassName' is unset! Struct:" + toString()); - } - - // check for sub-struct validity - } - - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); - } - } - - private static class TUserDefinedTypeEntryStandardSchemeFactory implements SchemeFactory { - public TUserDefinedTypeEntryStandardScheme getScheme() { - return new TUserDefinedTypeEntryStandardScheme(); - } - } - - private static class TUserDefinedTypeEntryStandardScheme extends StandardScheme { - - public void read(org.apache.thrift.protocol.TProtocol iprot, TUserDefinedTypeEntry struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; - } - switch (schemeField.id) { - case 1: // TYPE_CLASS_NAME - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.typeClassName = iprot.readString(); - struct.setTypeClassNameIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, TUserDefinedTypeEntry struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.typeClassName != null) { - oprot.writeFieldBegin(TYPE_CLASS_NAME_FIELD_DESC); - oprot.writeString(struct.typeClassName); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class TUserDefinedTypeEntryTupleSchemeFactory implements SchemeFactory { - public TUserDefinedTypeEntryTupleScheme getScheme() { - return new TUserDefinedTypeEntryTupleScheme(); - } - } - - private static class TUserDefinedTypeEntryTupleScheme extends TupleScheme { - - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, TUserDefinedTypeEntry struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - oprot.writeString(struct.typeClassName); - } - - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, TUserDefinedTypeEntry struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - struct.typeClassName = iprot.readString(); - struct.setTypeClassNameIsSet(true); - } - } - -} - diff --git a/sql/hive-thriftserver/v2.3/src/main/scala/org/apache/spark/sql/hive/thriftserver/ThriftserverShimUtils.scala b/sql/hive-thriftserver/v2.3/src/main/scala/org/apache/spark/sql/hive/thriftserver/ThriftserverShimUtils.scala deleted file mode 100644 index c8ac5226b296e..0000000000000 --- a/sql/hive-thriftserver/v2.3/src/main/scala/org/apache/spark/sql/hive/thriftserver/ThriftserverShimUtils.scala +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.spark.sql.hive.thriftserver - -import org.apache.hadoop.hive.ql.session.SessionState -import org.apache.hadoop.hive.serde2.thrift.Type -import org.apache.hadoop.hive.serde2.thrift.Type._ -import org.apache.hive.service.cli.{RowSet, RowSetFactory, TableSchema} -import org.apache.hive.service.rpc.thrift.TProtocolVersion._ -import org.slf4j.LoggerFactory - -/** - * Various utilities for hive-thriftserver used to upgrade the built-in Hive. - */ -private[thriftserver] object ThriftserverShimUtils { - - private[thriftserver] object TOperationType { - val GET_TYPE_INFO = org.apache.hive.service.rpc.thrift.TOperationType.GET_TYPE_INFO - } - - private[thriftserver] type TProtocolVersion = org.apache.hive.service.rpc.thrift.TProtocolVersion - private[thriftserver] type Client = org.apache.hive.service.rpc.thrift.TCLIService.Client - private[thriftserver] type TOpenSessionReq = org.apache.hive.service.rpc.thrift.TOpenSessionReq - private[thriftserver] type TGetSchemasReq = org.apache.hive.service.rpc.thrift.TGetSchemasReq - private[thriftserver] type TGetTablesReq = org.apache.hive.service.rpc.thrift.TGetTablesReq - private[thriftserver] type TGetColumnsReq = org.apache.hive.service.rpc.thrift.TGetColumnsReq - private[thriftserver] type TGetInfoReq = org.apache.hive.service.rpc.thrift.TGetInfoReq - private[thriftserver] type TExecuteStatementReq = - org.apache.hive.service.rpc.thrift.TExecuteStatementReq - private[thriftserver] type THandleIdentifier = - org.apache.hive.service.rpc.thrift.THandleIdentifier - private[thriftserver] type TOperationType = org.apache.hive.service.rpc.thrift.TOperationType - private[thriftserver] type TOperationHandle = org.apache.hive.service.rpc.thrift.TOperationHandle - - private[thriftserver] def getConsole: SessionState.LogHelper = { - val LOG = LoggerFactory.getLogger(classOf[SparkSQLCLIDriver]) - new SessionState.LogHelper(LOG) - } - - private[thriftserver] def resultRowSet( - getResultSetSchema: TableSchema, - getProtocolVersion: TProtocolVersion): RowSet = { - RowSetFactory.create(getResultSetSchema, getProtocolVersion, false) - } - - private[thriftserver] def supportedType(): Seq[Type] = { - Seq(NULL_TYPE, BOOLEAN_TYPE, STRING_TYPE, BINARY_TYPE, - TINYINT_TYPE, SMALLINT_TYPE, INT_TYPE, BIGINT_TYPE, - FLOAT_TYPE, DOUBLE_TYPE, DECIMAL_TYPE, - DATE_TYPE, TIMESTAMP_TYPE, - ARRAY_TYPE, MAP_TYPE, STRUCT_TYPE) - } - - private[thriftserver] val testedProtocolVersions = Seq( - HIVE_CLI_SERVICE_PROTOCOL_V1, - HIVE_CLI_SERVICE_PROTOCOL_V2, - HIVE_CLI_SERVICE_PROTOCOL_V3, - HIVE_CLI_SERVICE_PROTOCOL_V4, - HIVE_CLI_SERVICE_PROTOCOL_V5, - HIVE_CLI_SERVICE_PROTOCOL_V6, - HIVE_CLI_SERVICE_PROTOCOL_V7, - HIVE_CLI_SERVICE_PROTOCOL_V8, - HIVE_CLI_SERVICE_PROTOCOL_V9, - HIVE_CLI_SERVICE_PROTOCOL_V10) -} diff --git a/sql/hive/benchmarks/InsertIntoHiveTableBenchmark-hive1.2-results.txt b/sql/hive/benchmarks/InsertIntoHiveTableBenchmark-hive1.2-results.txt deleted file mode 100644 index 85884a1aaf739..0000000000000 --- a/sql/hive/benchmarks/InsertIntoHiveTableBenchmark-hive1.2-results.txt +++ /dev/null @@ -1,11 +0,0 @@ -Java HotSpot(TM) 64-Bit Server VM 1.8.0_251-b08 on Mac OS X 10.15.4 -Intel(R) Core(TM) i9-9980HK CPU @ 2.40GHz -insert hive table benchmark: Best Time(ms) Avg Time(ms) Stdev(ms) Rate(M/s) Per Row(ns) Relative ------------------------------------------------------------------------------------------------------------------------- -INSERT INTO DYNAMIC 6812 7043 328 0.0 665204.8 1.0X -INSERT INTO HYBRID 817 852 32 0.0 79783.6 8.3X -INSERT INTO STATIC 231 246 21 0.0 22568.2 29.5X -INSERT OVERWRITE DYNAMIC 25947 26671 1024 0.0 2533910.2 0.3X -INSERT OVERWRITE HYBRID 2846 2884 54 0.0 277908.7 2.4X -INSERT OVERWRITE STATIC 232 247 26 0.0 22659.9 29.4X - diff --git a/sql/hive/compatibility/src/test/scala/org/apache/spark/sql/hive/execution/HiveCompatibilitySuite.scala b/sql/hive/compatibility/src/test/scala/org/apache/spark/sql/hive/execution/HiveCompatibilitySuite.scala index b7ea0630dd85f..e7c702baba752 100644 --- a/sql/hive/compatibility/src/test/scala/org/apache/spark/sql/hive/execution/HiveCompatibilitySuite.scala +++ b/sql/hive/compatibility/src/test/scala/org/apache/spark/sql/hive/execution/HiveCompatibilitySuite.scala @@ -22,7 +22,6 @@ import java.io.File import org.scalatest.BeforeAndAfter import org.apache.spark.sql.catalyst.rules.RuleExecutor -import org.apache.spark.sql.hive.HiveUtils import org.apache.spark.sql.hive.test.TestHive import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.internal.SQLConf.StoreAssignmentPolicy @@ -41,8 +40,8 @@ class HiveCompatibilitySuite extends HiveQueryFileTest with BeforeAndAfter { private val originalInMemoryPartitionPruning = TestHive.conf.inMemoryPartitionPruning private val originalCrossJoinEnabled = TestHive.conf.crossJoinEnabled private val originalSessionLocalTimeZone = TestHive.conf.sessionLocalTimeZone - private val originalLegacyAllowCastNumericToTimestamp = - TestHive.conf.legacyAllowCastNumericToTimestamp + private val originalCreateHiveTable = + TestHive.conf.getConf(SQLConf.LEGACY_CREATE_HIVE_TABLE_BY_DEFAULT) def testCases: Seq[(String, File)] = { hiveQueryDir.listFiles.map(f => f.getName.stripSuffix(".q") -> f) @@ -57,13 +56,12 @@ class HiveCompatibilitySuite extends HiveQueryFileTest with BeforeAndAfter { TestHive.setConf(SQLConf.IN_MEMORY_PARTITION_PRUNING, true) // Ensures that cross joins are enabled so that we can test them TestHive.setConf(SQLConf.CROSS_JOINS_ENABLED, true) - // Ensures that the table insertion behaivor is consistent with Hive + // Ensures that the table insertion behavior is consistent with Hive TestHive.setConf(SQLConf.STORE_ASSIGNMENT_POLICY, StoreAssignmentPolicy.LEGACY.toString) // Fix session local timezone to America/Los_Angeles for those timezone sensitive tests // (timestamp_*) TestHive.setConf(SQLConf.SESSION_LOCAL_TIMEZONE, "America/Los_Angeles") - // Ensures that cast numeric to timestamp enabled so that we can test them - TestHive.setConf(SQLConf.LEGACY_ALLOW_CAST_NUMERIC_TO_TIMESTAMP, true) + TestHive.setConf(SQLConf.LEGACY_CREATE_HIVE_TABLE_BY_DEFAULT, true) RuleExecutor.resetMetrics() } @@ -74,8 +72,7 @@ class HiveCompatibilitySuite extends HiveQueryFileTest with BeforeAndAfter { TestHive.setConf(SQLConf.IN_MEMORY_PARTITION_PRUNING, originalInMemoryPartitionPruning) TestHive.setConf(SQLConf.CROSS_JOINS_ENABLED, originalCrossJoinEnabled) TestHive.setConf(SQLConf.SESSION_LOCAL_TIMEZONE, originalSessionLocalTimeZone) - TestHive.setConf(SQLConf.LEGACY_ALLOW_CAST_NUMERIC_TO_TIMESTAMP, - originalLegacyAllowCastNumericToTimestamp) + TestHive.setConf(SQLConf.LEGACY_CREATE_HIVE_TABLE_BY_DEFAULT, originalCreateHiveTable) // For debugging dump some statistics about how much time was spent in various optimizer rules logWarning(RuleExecutor.dumpTimeSpent()) @@ -308,7 +305,7 @@ class HiveCompatibilitySuite extends HiveQueryFileTest with BeforeAndAfter { // Unsupported underscore syntax. "inputddl5", - // Thift is broken... + // Thrift is broken... "inputddl8", // Hive changed ordering of ddl: @@ -499,7 +496,7 @@ class HiveCompatibilitySuite extends HiveQueryFileTest with BeforeAndAfter { "drop_partitions_filter2", "drop_partitions_filter3", - // The following failes due to truncate table + // The following fails due to truncate table "truncate_table", // We do not support DFS command. @@ -528,6 +525,9 @@ class HiveCompatibilitySuite extends HiveQueryFileTest with BeforeAndAfter { "udf_xpath_short", "udf_xpath_string", + // [SPARK-33428][SQL] CONV UDF use BigInt to avoid Long value overflow + "udf_conv", + // These tests DROP TABLE that don't exist (but do not specify IF EXISTS) "alter_rename_partition1", "date_1", @@ -719,7 +719,7 @@ class HiveCompatibilitySuite extends HiveQueryFileTest with BeforeAndAfter { "groupby_multi_insert_common_distinct", "groupby_multi_single_reducer2", "groupby_multi_single_reducer3", - "groupby_mutli_insert_common_distinct", + "groupby_multi_insert_common_distinct", "groupby_neg_float", "groupby_ppd", "groupby_ppr", @@ -961,8 +961,8 @@ class HiveCompatibilitySuite extends HiveQueryFileTest with BeforeAndAfter { "subq2", "subquery_exists", "subquery_exists_having", - "subquery_notexists", - "subquery_notexists_having", + "subquery_nonexistent", + "subquery_nonexistent_having", "subquery_in_having", "tablename_with_select", "timestamp_comparison", @@ -1006,7 +1006,6 @@ class HiveCompatibilitySuite extends HiveQueryFileTest with BeforeAndAfter { "udf_concat_insert1", "udf_concat_insert2", "udf_concat_ws", - "udf_conv", "udf_cos", "udf_count", "udf_date_add", @@ -1145,11 +1144,8 @@ class HiveCompatibilitySuite extends HiveQueryFileTest with BeforeAndAfter { * The set of tests that are believed to be working in catalyst. Tests not on includeList or * excludeList are implicitly marked as ignored. */ - override def includeList: Seq[String] = if (HiveUtils.isHive23) { + override def includeList: Seq[String] = commonIncludeList ++ Seq( "decimal_1_1" ) - } else { - commonIncludeList - } } diff --git a/sql/hive/pom.xml b/sql/hive/pom.xml index 474c6066ed040..27d2756c741ef 100644 --- a/sql/hive/pom.xml +++ b/sql/hive/pom.xml @@ -22,7 +22,7 @@ org.apache.spark spark-parent_2.12 - 3.1.0-SNAPSHOT + 3.2.0-SNAPSHOT ../../pom.xml @@ -35,11 +35,6 @@ - - - ${hive.parquet.group} - parquet-hadoop-bundle - org.apache.spark spark-core_${scala.binary.version} diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveExternalCatalog.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveExternalCatalog.scala index f01a03996821a..eeffe4f25d4c6 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveExternalCatalog.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveExternalCatalog.scala @@ -40,8 +40,7 @@ import org.apache.spark.sql.catalyst.analysis.TableAlreadyExistsException import org.apache.spark.sql.catalyst.catalog._ import org.apache.spark.sql.catalyst.catalog.ExternalCatalogUtils._ import org.apache.spark.sql.catalyst.expressions._ -import org.apache.spark.sql.catalyst.util.CaseInsensitiveMap -import org.apache.spark.sql.connector.catalog.TableCatalog +import org.apache.spark.sql.catalyst.util.{CaseInsensitiveMap, DateTimeUtils} import org.apache.spark.sql.execution.command.DDLUtils import org.apache.spark.sql.execution.datasources.{PartitioningUtils, SourceOptions} import org.apache.spark.sql.hive.client.HiveClient @@ -430,18 +429,8 @@ private[spark] class HiveExternalCatalog(conf: SparkConf, hadoopConf: Configurat val properties = new mutable.HashMap[String, String] properties.put(CREATED_SPARK_VERSION, table.createVersion) - - // Serialized JSON schema string may be too long to be stored into a single metastore table - // property. In this case, we split the JSON string and store each part as a separate table - // property. - val threshold = conf.get(SCHEMA_STRING_LENGTH_THRESHOLD) - val schemaJsonString = schema.json - // Split the JSON string. - val parts = schemaJsonString.grouped(threshold).toSeq - properties.put(DATASOURCE_SCHEMA_NUMPARTS, parts.size.toString) - parts.zipWithIndex.foreach { case (part, index) => - properties.put(s"$DATASOURCE_SCHEMA_PART_PREFIX$index", part) - } + CatalogTable.splitLargeTableProp( + DATASOURCE_SCHEMA, schema.json, properties.put, conf.get(SCHEMA_STRING_LENGTH_THRESHOLD)) if (partitionColumns.nonEmpty) { properties.put(DATASOURCE_SCHEMA_NUMPARTCOLS, partitionColumns.length.toString) @@ -745,8 +734,8 @@ private[spark] class HiveExternalCatalog(conf: SparkConf, hadoopConf: Configurat case None if table.tableType == VIEW => // If this is a view created by Spark 2.2 or higher versions, we should restore its schema // from table properties. - if (table.properties.contains(DATASOURCE_SCHEMA_NUMPARTS)) { - table = table.copy(schema = getSchemaFromTableProperties(table)) + CatalogTable.readLargeTableProp(table.properties, DATASOURCE_SCHEMA).foreach { schemaJson => + table = table.copy(schema = DataType.fromJson(schemaJson).asInstanceOf[StructType]) } // No provider in table properties, which means this is a Hive serde table. @@ -796,8 +785,9 @@ private[spark] class HiveExternalCatalog(conf: SparkConf, hadoopConf: Configurat // If this is a Hive serde table created by Spark 2.1 or higher versions, we should restore its // schema from table properties. - if (table.properties.contains(DATASOURCE_SCHEMA_NUMPARTS)) { - val schemaFromTableProps = getSchemaFromTableProperties(table) + val schemaJson = CatalogTable.readLargeTableProp(table.properties, DATASOURCE_SCHEMA) + if (schemaJson.isDefined) { + val schemaFromTableProps = DataType.fromJson(schemaJson.get).asInstanceOf[StructType] val partColumnNames = getPartitionColumnsFromTableProperties(table) val reorderedSchema = reorderSchema(schema = schemaFromTableProps, partColumnNames) @@ -837,7 +827,8 @@ private[spark] class HiveExternalCatalog(conf: SparkConf, hadoopConf: Configurat storageWithLocation.properties.filterKeys(!HIVE_GENERATED_STORAGE_PROPERTIES(_)).toMap) val partitionProvider = table.properties.get(TABLE_PARTITION_PROVIDER) - val schemaFromTableProps = getSchemaFromTableProperties(table) + val schemaFromTableProps = CatalogTable.readLargeTableProp(table.properties, DATASOURCE_SCHEMA) + .map(json => DataType.fromJson(json).asInstanceOf[StructType]).getOrElse(new StructType()) val partColumnNames = getPartitionColumnsFromTableProperties(table) val reorderedSchema = reorderSchema(schema = schemaFromTableProps, partColumnNames) @@ -951,9 +942,10 @@ private[spark] class HiveExternalCatalog(conf: SparkConf, hadoopConf: Configurat // Hive metastore is not case preserving and the partition columns are always lower cased. We need // to lower case the column names in partition specification before calling partition related Hive // APIs, to match this behaviour. - private def lowerCasePartitionSpec(spec: TablePartitionSpec): TablePartitionSpec = { + private def toMetaStorePartitionSpec(spec: TablePartitionSpec): TablePartitionSpec = { // scalastyle:off caselocale - spec.map { case (k, v) => k.toLowerCase -> v } + val lowNames = spec.map { case (k, v) => k.toLowerCase -> v } + ExternalCatalogUtils.convertNullPartitionValues(lowNames) // scalastyle:on caselocale } @@ -1002,8 +994,9 @@ private[spark] class HiveExternalCatalog(conf: SparkConf, hadoopConf: Configurat } p.copy(storage = p.storage.copy(locationUri = Some(partitionPath.toUri))) } - val lowerCasedParts = partsWithLocation.map(p => p.copy(spec = lowerCasePartitionSpec(p.spec))) - client.createPartitions(db, table, lowerCasedParts, ignoreIfExists) + val metaStoreParts = partsWithLocation + .map(p => p.copy(spec = toMetaStorePartitionSpec(p.spec))) + client.createPartitions(db, table, metaStoreParts, ignoreIfExists) } override def dropPartitions( @@ -1015,7 +1008,7 @@ private[spark] class HiveExternalCatalog(conf: SparkConf, hadoopConf: Configurat retainData: Boolean): Unit = withClient { requireTableExists(db, table) client.dropPartitions( - db, table, parts.map(lowerCasePartitionSpec), ignoreIfNotExists, purge, retainData) + db, table, parts.map(toMetaStorePartitionSpec), ignoreIfNotExists, purge, retainData) } override def renamePartitions( @@ -1024,7 +1017,7 @@ private[spark] class HiveExternalCatalog(conf: SparkConf, hadoopConf: Configurat specs: Seq[TablePartitionSpec], newSpecs: Seq[TablePartitionSpec]): Unit = withClient { client.renamePartitions( - db, table, specs.map(lowerCasePartitionSpec), newSpecs.map(lowerCasePartitionSpec)) + db, table, specs.map(toMetaStorePartitionSpec), newSpecs.map(toMetaStorePartitionSpec)) val tableMeta = getTable(db, table) val partitionColumnNames = tableMeta.partitionColumnNames @@ -1040,7 +1033,7 @@ private[spark] class HiveExternalCatalog(conf: SparkConf, hadoopConf: Configurat val fs = tablePath.getFileSystem(hadoopConf) val newParts = newSpecs.map { spec => val rightPath = renamePartitionDirectory(fs, tablePath, partitionColumnNames, spec) - val partition = client.getPartition(db, table, lowerCasePartitionSpec(spec)) + val partition = client.getPartition(db, table, toMetaStorePartitionSpec(spec)) partition.copy(storage = partition.storage.copy(locationUri = Some(rightPath.toUri))) } alterPartitions(db, table, newParts) @@ -1150,12 +1143,12 @@ private[spark] class HiveExternalCatalog(conf: SparkConf, hadoopConf: Configurat db: String, table: String, newParts: Seq[CatalogTablePartition]): Unit = withClient { - val lowerCasedParts = newParts.map(p => p.copy(spec = lowerCasePartitionSpec(p.spec))) + val metaStoreParts = newParts.map(p => p.copy(spec = toMetaStorePartitionSpec(p.spec))) val rawTable = getRawTable(db, table) // convert partition statistics to properties so that we can persist them through hive api - val withStatsProps = lowerCasedParts.map { p => + val withStatsProps = metaStoreParts.map { p => if (p.stats.isDefined) { val statsProperties = statsToProperties(p.stats.get) p.copy(parameters = p.parameters ++ statsProperties) @@ -1171,7 +1164,7 @@ private[spark] class HiveExternalCatalog(conf: SparkConf, hadoopConf: Configurat db: String, table: String, spec: TablePartitionSpec): CatalogTablePartition = withClient { - val part = client.getPartition(db, table, lowerCasePartitionSpec(spec)) + val part = client.getPartition(db, table, toMetaStorePartitionSpec(spec)) restorePartitionMetadata(part, getTable(db, table)) } @@ -1209,7 +1202,7 @@ private[spark] class HiveExternalCatalog(conf: SparkConf, hadoopConf: Configurat db: String, table: String, spec: TablePartitionSpec): Option[CatalogTablePartition] = withClient { - client.getPartitionOption(db, table, lowerCasePartitionSpec(spec)).map { part => + client.getPartitionOption(db, table, toMetaStorePartitionSpec(spec)).map { part => restorePartitionMetadata(part, getTable(db, table)) } } @@ -1224,7 +1217,7 @@ private[spark] class HiveExternalCatalog(conf: SparkConf, hadoopConf: Configurat val catalogTable = getTable(db, table) val partColNameMap = buildLowerCasePartColNameMap(catalogTable).mapValues(escapePathName) val clientPartitionNames = - client.getPartitionNames(catalogTable, partialSpec.map(lowerCasePartitionSpec)) + client.getPartitionNames(catalogTable, partialSpec.map(toMetaStorePartitionSpec)) clientPartitionNames.map { partitionPath => val partSpec = PartitioningUtils.parsePathFragmentAsSeq(partitionPath) partSpec.map { case (partName, partValue) => @@ -1243,11 +1236,12 @@ private[spark] class HiveExternalCatalog(conf: SparkConf, hadoopConf: Configurat table: String, partialSpec: Option[TablePartitionSpec] = None): Seq[CatalogTablePartition] = withClient { val partColNameMap = buildLowerCasePartColNameMap(getTable(db, table)) - val res = client.getPartitions(db, table, partialSpec.map(lowerCasePartitionSpec)).map { part => - part.copy(spec = restorePartitionSpec(part.spec, partColNameMap)) + val metaStoreSpec = partialSpec.map(toMetaStorePartitionSpec) + val res = client.getPartitions(db, table, metaStoreSpec) + .map { part => part.copy(spec = restorePartitionSpec(part.spec, partColNameMap)) } - partialSpec match { + metaStoreSpec match { // This might be a bug of Hive: When the partition value inside the partial partition spec // contains dot, and we ask Hive to list partitions w.r.t. the partial partition spec, Hive // treats dot as matching any single character and may return more partitions than we @@ -1265,11 +1259,13 @@ private[spark] class HiveExternalCatalog(conf: SparkConf, hadoopConf: Configurat defaultTimeZoneId: String): Seq[CatalogTablePartition] = withClient { val rawTable = getRawTable(db, table) val catalogTable = restoreTableMetadata(rawTable) + val timeZoneId = CaseInsensitiveMap(catalogTable.storage.properties).getOrElse( + DateTimeUtils.TIMEZONE_OPTION, defaultTimeZoneId) val partColNameMap = buildLowerCasePartColNameMap(catalogTable) val clientPrunedPartitions = - client.getPartitionsByFilter(rawTable, predicates).map { part => + client.getPartitionsByFilter(rawTable, predicates, timeZoneId).map { part => part.copy(spec = restorePartitionSpec(part.spec, partColNameMap)) } prunePartitionsByFilter(catalogTable, clientPrunedPartitions, predicates, defaultTimeZoneId) @@ -1339,7 +1335,6 @@ object HiveExternalCatalog { val DATASOURCE_PROVIDER = DATASOURCE_PREFIX + "provider" val DATASOURCE_SCHEMA = DATASOURCE_PREFIX + "schema" val DATASOURCE_SCHEMA_PREFIX = DATASOURCE_SCHEMA + "." - val DATASOURCE_SCHEMA_NUMPARTS = DATASOURCE_SCHEMA_PREFIX + "numParts" val DATASOURCE_SCHEMA_NUMPARTCOLS = DATASOURCE_SCHEMA_PREFIX + "numPartCols" val DATASOURCE_SCHEMA_NUMSORTCOLS = DATASOURCE_SCHEMA_PREFIX + "numSortCols" val DATASOURCE_SCHEMA_NUMBUCKETS = DATASOURCE_SCHEMA_PREFIX + "numBuckets" @@ -1372,40 +1367,6 @@ object HiveExternalCatalog { val EMPTY_DATA_SCHEMA = new StructType() .add("col", "array", nullable = true, comment = "from deserializer") - // A persisted data source table always store its schema in the catalog. - private def getSchemaFromTableProperties(metadata: CatalogTable): StructType = { - val errorMessage = "Could not read schema from the hive metastore because it is corrupted." - val props = metadata.properties - val schema = props.get(DATASOURCE_SCHEMA) - if (schema.isDefined) { - // Originally, we used `spark.sql.sources.schema` to store the schema of a data source table. - // After SPARK-6024, we removed this flag. - // Although we are not using `spark.sql.sources.schema` any more, we need to still support. - DataType.fromJson(schema.get).asInstanceOf[StructType] - } else if (props.filterKeys(_.startsWith(DATASOURCE_SCHEMA_PREFIX)).isEmpty) { - // If there is no schema information in table properties, it means the schema of this table - // was empty when saving into metastore, which is possible in older version(prior to 2.1) of - // Spark. We should respect it. - new StructType() - } else { - val numSchemaParts = props.get(DATASOURCE_SCHEMA_NUMPARTS) - if (numSchemaParts.isDefined) { - val parts = (0 until numSchemaParts.get.toInt).map { index => - val part = metadata.properties.get(s"$DATASOURCE_SCHEMA_PART_PREFIX$index").orNull - if (part == null) { - throw new AnalysisException(errorMessage + - s" (missing part $index of the schema, ${numSchemaParts.get} parts are expected).") - } - part - } - // Stick all parts back to a single schema string. - DataType.fromJson(parts.mkString).asInstanceOf[StructType] - } else { - throw new AnalysisException(errorMessage) - } - } - } - private def getColumnNamesByType( props: Map[String, String], colType: String, diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveInspectors.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveInspectors.scala index 8ab6e28366753..9213173bbc9ba 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveInspectors.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveInspectors.scala @@ -1039,6 +1039,7 @@ private[hive] trait HiveInspectors { private def decimalTypeInfo(decimalType: DecimalType): TypeInfo = decimalType match { case DecimalType.Fixed(precision, scale) => new DecimalTypeInfo(precision, scale) + case dt => throw new AnalysisException(s"${dt.catalogString} is not supported.") } def toTypeInfo: TypeInfo = dt match { diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala index a89243c331c7b..e02589e5cad00 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveMetastoreCatalog.scala @@ -332,7 +332,7 @@ private[hive] object HiveMetastoreCatalog { metastoreSchema: StructType, inferredSchema: StructType): StructType = try { // scalastyle:off caselocale - // Find any nullable fields in mestastore schema that are missing from the inferred schema. + // Find any nullable fields in metastore schema that are missing from the inferred schema. val metastoreFields = metastoreSchema.map(f => f.name.toLowerCase -> f).toMap val missingNullables = metastoreFields .filterKeys(!inferredSchema.map(_.name.toLowerCase).contains(_)) diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionCatalog.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionCatalog.scala index bc7760c982aab..f60bad180a710 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionCatalog.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionCatalog.scala @@ -30,11 +30,10 @@ import org.apache.hadoop.hive.ql.udf.generic.{AbstractGenericUDAFResolver, Gener import org.apache.spark.sql.AnalysisException import org.apache.spark.sql.catalyst.FunctionIdentifier import org.apache.spark.sql.catalyst.analysis.FunctionRegistry -import org.apache.spark.sql.catalyst.catalog.{CatalogFunction, ExternalCatalog, FunctionResourceLoader, GlobalTempViewManager, SessionCatalog} +import org.apache.spark.sql.catalyst.catalog._ import org.apache.spark.sql.catalyst.expressions.{Cast, Expression} import org.apache.spark.sql.catalyst.parser.ParserInterface import org.apache.spark.sql.hive.HiveShim.HiveFunctionWrapper -import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.types.{DecimalType, DoubleType} import org.apache.spark.util.Utils @@ -44,7 +43,6 @@ private[sql] class HiveSessionCatalog( globalTempViewManagerBuilder: () => GlobalTempViewManager, val metastoreCatalog: HiveMetastoreCatalog, functionRegistry: FunctionRegistry, - conf: SQLConf, hadoopConf: Configuration, parser: ParserInterface, functionResourceLoader: FunctionResourceLoader) @@ -52,11 +50,60 @@ private[sql] class HiveSessionCatalog( externalCatalogBuilder, globalTempViewManagerBuilder, functionRegistry, - conf, hadoopConf, parser, functionResourceLoader) { + private def makeHiveFunctionExpression( + name: String, + clazz: Class[_], + input: Seq[Expression]): Expression = { + var udfExpr: Option[Expression] = None + try { + // When we instantiate hive UDF wrapper class, we may throw exception if the input + // expressions don't satisfy the hive UDF, such as type mismatch, input number + // mismatch, etc. Here we catch the exception and throw AnalysisException instead. + if (classOf[UDF].isAssignableFrom(clazz)) { + udfExpr = Some(HiveSimpleUDF(name, new HiveFunctionWrapper(clazz.getName), input)) + udfExpr.get.dataType // Force it to check input data types. + } else if (classOf[GenericUDF].isAssignableFrom(clazz)) { + udfExpr = Some(HiveGenericUDF(name, new HiveFunctionWrapper(clazz.getName), input)) + udfExpr.get.dataType // Force it to check input data types. + } else if (classOf[AbstractGenericUDAFResolver].isAssignableFrom(clazz)) { + udfExpr = Some(HiveUDAFFunction(name, new HiveFunctionWrapper(clazz.getName), input)) + udfExpr.get.dataType // Force it to check input data types. + } else if (classOf[UDAF].isAssignableFrom(clazz)) { + udfExpr = Some(HiveUDAFFunction( + name, + new HiveFunctionWrapper(clazz.getName), + input, + isUDAFBridgeRequired = true)) + udfExpr.get.dataType // Force it to check input data types. + } else if (classOf[GenericUDTF].isAssignableFrom(clazz)) { + udfExpr = Some(HiveGenericUDTF(name, new HiveFunctionWrapper(clazz.getName), input)) + // Force it to check data types. + udfExpr.get.asInstanceOf[HiveGenericUDTF].elementSchema + } + } catch { + case NonFatal(e) => + val noHandlerMsg = s"No handler for UDF/UDAF/UDTF '${clazz.getCanonicalName}': $e" + val errorMsg = + if (classOf[GenericUDTF].isAssignableFrom(clazz)) { + s"$noHandlerMsg\nPlease make sure your function overrides " + + "`public StructObjectInspector initialize(ObjectInspector[] args)`." + } else { + noHandlerMsg + } + val analysisException = new AnalysisException(errorMsg) + analysisException.setStackTrace(e.getStackTrace) + throw analysisException + } + udfExpr.getOrElse { + throw new InvalidUDFClassException( + s"No handler for UDF/UDAF/UDTF '${clazz.getCanonicalName}'") + } + } + /** * Constructs a [[Expression]] based on the provided class that represents a function. * @@ -69,49 +116,14 @@ private[sql] class HiveSessionCatalog( // Current thread context classloader may not be the one loaded the class. Need to switch // context classloader to initialize instance properly. Utils.withContextClassLoader(clazz.getClassLoader) { - Try(super.makeFunctionExpression(name, clazz, input)).getOrElse { - var udfExpr: Option[Expression] = None - try { - // When we instantiate hive UDF wrapper class, we may throw exception if the input - // expressions don't satisfy the hive UDF, such as type mismatch, input number - // mismatch, etc. Here we catch the exception and throw AnalysisException instead. - if (classOf[UDF].isAssignableFrom(clazz)) { - udfExpr = Some(HiveSimpleUDF(name, new HiveFunctionWrapper(clazz.getName), input)) - udfExpr.get.dataType // Force it to check input data types. - } else if (classOf[GenericUDF].isAssignableFrom(clazz)) { - udfExpr = Some(HiveGenericUDF(name, new HiveFunctionWrapper(clazz.getName), input)) - udfExpr.get.dataType // Force it to check input data types. - } else if (classOf[AbstractGenericUDAFResolver].isAssignableFrom(clazz)) { - udfExpr = Some(HiveUDAFFunction(name, new HiveFunctionWrapper(clazz.getName), input)) - udfExpr.get.dataType // Force it to check input data types. - } else if (classOf[UDAF].isAssignableFrom(clazz)) { - udfExpr = Some(HiveUDAFFunction( - name, - new HiveFunctionWrapper(clazz.getName), - input, - isUDAFBridgeRequired = true)) - udfExpr.get.dataType // Force it to check input data types. - } else if (classOf[GenericUDTF].isAssignableFrom(clazz)) { - udfExpr = Some(HiveGenericUDTF(name, new HiveFunctionWrapper(clazz.getName), input)) - udfExpr.get.asInstanceOf[HiveGenericUDTF].elementSchema // Force it to check data types. - } - } catch { - case NonFatal(e) => - val noHandlerMsg = s"No handler for UDF/UDAF/UDTF '${clazz.getCanonicalName}': $e" - val errorMsg = - if (classOf[GenericUDTF].isAssignableFrom(clazz)) { - s"$noHandlerMsg\nPlease make sure your function overrides " + - "`public StructObjectInspector initialize(ObjectInspector[] args)`." - } else { - noHandlerMsg - } - val analysisException = new AnalysisException(errorMsg) - analysisException.setStackTrace(e.getStackTrace) - throw analysisException - } - udfExpr.getOrElse { - throw new AnalysisException(s"No handler for UDF/UDAF/UDTF '${clazz.getCanonicalName}'") - } + try { + super.makeFunctionExpression(name, clazz, input) + } catch { + // If `super.makeFunctionExpression` throw `InvalidUDFClassException`, we construct + // Hive UDF/UDAF/UDTF with function definition. Otherwise, we just throw it earlier. + case _: InvalidUDFClassException => + makeHiveFunctionExpression(name, clazz, input) + case NonFatal(e) => throw e } } } diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionStateBuilder.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionStateBuilder.scala index 78ec2b8e2047e..654f9f62ebdd3 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionStateBuilder.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionStateBuilder.scala @@ -17,14 +17,15 @@ package org.apache.spark.sql.hive +import java.net.URI + import org.apache.spark.annotation.Unstable import org.apache.spark.sql._ import org.apache.spark.sql.catalyst.analysis.{Analyzer, ResolveSessionCatalog} import org.apache.spark.sql.catalyst.catalog.ExternalCatalogWithListener -import org.apache.spark.sql.catalyst.optimizer.Optimizer import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan import org.apache.spark.sql.catalyst.rules.Rule -import org.apache.spark.sql.execution.{SparkOptimizer, SparkPlanner} +import org.apache.spark.sql.execution.SparkPlanner import org.apache.spark.sql.execution.aggregate.ResolveEncodersInScalaAgg import org.apache.spark.sql.execution.analysis.DetectAmbiguousSelfJoin import org.apache.spark.sql.execution.command.CommandCheck @@ -38,8 +39,11 @@ import org.apache.spark.sql.internal.{BaseSessionStateBuilder, SessionResourceLo * Builder that produces a Hive-aware `SessionState`. */ @Unstable -class HiveSessionStateBuilder(session: SparkSession, parentState: Option[SessionState] = None) - extends BaseSessionStateBuilder(session, parentState) { +class HiveSessionStateBuilder( + session: SparkSession, + parentState: Option[SessionState], + options: Map[String, String]) + extends BaseSessionStateBuilder(session, parentState, options) { private def externalCatalog: ExternalCatalogWithListener = session.sharedState.externalCatalog @@ -60,7 +64,6 @@ class HiveSessionStateBuilder(session: SparkSession, parentState: Option[Session () => session.sharedState.globalTempViewManager, new HiveMetastoreCatalog(session), functionRegistry, - conf, SessionState.newHadoopConf(session.sparkContext.hadoopConfiguration, conf), sqlParser, resourceLoader) @@ -71,7 +74,7 @@ class HiveSessionStateBuilder(session: SparkSession, parentState: Option[Session /** * A logical query plan `Analyzer` with rules specific to Hive. */ - override protected def analyzer: Analyzer = new Analyzer(catalogManager, conf) { + override protected def analyzer: Analyzer = new Analyzer(catalogManager) { override val extendedResolutionRules: Seq[Rule[LogicalPlan]] = new ResolveHiveSerdeTable(session) +: new FindDataSourceTable(session) +: @@ -79,16 +82,17 @@ class HiveSessionStateBuilder(session: SparkSession, parentState: Option[Session new FallBackFileSourceV2(session) +: ResolveEncodersInScalaAgg +: new ResolveSessionCatalog( - catalogManager, conf, catalog.isTempView, catalog.isTempFunction) +: + catalogManager, catalog.isTempView, catalog.isTempFunction) +: customResolutionRules override val postHocResolutionRules: Seq[Rule[LogicalPlan]] = - new DetectAmbiguousSelfJoin(conf) +: + DetectAmbiguousSelfJoin +: new DetermineTableStats(session) +: - RelationConversions(conf, catalog) +: + RelationConversions(catalog) +: PreprocessTableCreation(session) +: - PreprocessTableInsertion(conf) +: - DataSourceAnalysis(conf) +: + PreprocessTableInsertion +: + DataSourceAnalysis +: + PaddingAndLengthCheckForCharVarchar +: HiveAnalysis +: customPostHocResolutionRules @@ -96,7 +100,7 @@ class HiveSessionStateBuilder(session: SparkSession, parentState: Option[Session PreWriteCheck +: PreReadCheck +: TableCapabilityCheck +: - CommandCheck(conf) +: + CommandCheck +: customCheckRules } @@ -107,7 +111,7 @@ class HiveSessionStateBuilder(session: SparkSession, parentState: Option[Session * Planner that takes into account Hive-specific strategies. */ override protected def planner: SparkPlanner = { - new SparkPlanner(session, conf, experimentalMethods) with HiveStrategies { + new SparkPlanner(session, experimentalMethods) with HiveStrategies { override val sparkSession: SparkSession = session override def extraPlanningStrategies: Seq[Strategy] = @@ -116,7 +120,7 @@ class HiveSessionStateBuilder(session: SparkSession, parentState: Option[Session } } - override protected def newBuilder: NewBuilder = new HiveSessionStateBuilder(_, _) + override protected def newBuilder: NewBuilder = new HiveSessionStateBuilder(_, _, Map.empty) } class HiveSessionResourceLoader( @@ -125,7 +129,10 @@ class HiveSessionResourceLoader( extends SessionResourceLoader(session) { private lazy val client = clientBuilder() override def addJar(path: String): Unit = { - client.addJar(path) - super.addJar(path) + val uri = URI.create(path) + resolveJars(uri).foreach { p => + client.addJar(p) + super.addJar(p) + } } } diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveShim.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveShim.scala index 04a6a8f8aa9a5..3a53a2a8dadd8 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveShim.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveShim.scala @@ -17,18 +17,16 @@ package org.apache.spark.sql.hive -import java.io.{InputStream, OutputStream} -import java.lang.reflect.Method import java.rmi.server.UID import scala.collection.JavaConverters._ import scala.language.implicitConversions -import scala.reflect.ClassTag import com.google.common.base.Objects import org.apache.avro.Schema import org.apache.hadoop.conf.Configuration import org.apache.hadoop.fs.Path +import org.apache.hadoop.hive.ql.exec.SerializationUtilities import org.apache.hadoop.hive.ql.exec.UDF import org.apache.hadoop.hive.ql.plan.{FileSinkDesc, TableDesc} import org.apache.hadoop.hive.ql.udf.generic.GenericUDFMacro @@ -148,60 +146,12 @@ private[hive] object HiveShim { case _ => false } - private lazy val serUtilClass = - Utils.classForName("org.apache.hadoop.hive.ql.exec.SerializationUtilities") - private lazy val utilClass = Utils.classForName("org.apache.hadoop.hive.ql.exec.Utilities") - private val deserializeMethodName = "deserializeObjectByKryo" - private val serializeMethodName = "serializeObjectByKryo" - - private def findMethod(klass: Class[_], name: String, args: Class[_]*): Method = { - val method = klass.getDeclaredMethod(name, args: _*) - method.setAccessible(true) - method - } - def deserializePlan[UDFType](is: java.io.InputStream, clazz: Class[_]): UDFType = { - if (HiveUtils.isHive23) { - val borrowKryo = serUtilClass.getMethod("borrowKryo") - val kryo = borrowKryo.invoke(serUtilClass) - val deserializeObjectByKryo = findMethod(serUtilClass, deserializeMethodName, - kryo.getClass.getSuperclass, classOf[InputStream], classOf[Class[_]]) - try { - deserializeObjectByKryo.invoke(null, kryo, is, clazz).asInstanceOf[UDFType] - } finally { - serUtilClass.getMethod("releaseKryo", kryo.getClass.getSuperclass).invoke(null, kryo) - } - } else { - val runtimeSerializationKryo = utilClass.getField("runtimeSerializationKryo") - val threadLocalValue = runtimeSerializationKryo.get(utilClass) - val getMethod = threadLocalValue.getClass.getMethod("get") - val kryo = getMethod.invoke(threadLocalValue) - val deserializeObjectByKryo = findMethod(utilClass, deserializeMethodName, - kryo.getClass, classOf[InputStream], classOf[Class[_]]) - deserializeObjectByKryo.invoke(null, kryo, is, clazz).asInstanceOf[UDFType] - } + SerializationUtilities.deserializePlan(is, clazz).asInstanceOf[UDFType] } def serializePlan(function: AnyRef, out: java.io.OutputStream): Unit = { - if (HiveUtils.isHive23) { - val borrowKryo = serUtilClass.getMethod("borrowKryo") - val kryo = borrowKryo.invoke(serUtilClass) - val serializeObjectByKryo = findMethod(serUtilClass, serializeMethodName, - kryo.getClass.getSuperclass, classOf[Object], classOf[OutputStream]) - try { - serializeObjectByKryo.invoke(null, kryo, function, out) - } finally { - serUtilClass.getMethod("releaseKryo", kryo.getClass.getSuperclass).invoke(null, kryo) - } - } else { - val runtimeSerializationKryo = utilClass.getField("runtimeSerializationKryo") - val threadLocalValue = runtimeSerializationKryo.get(utilClass) - val getMethod = threadLocalValue.getClass.getMethod("get") - val kryo = getMethod.invoke(threadLocalValue) - val serializeObjectByKryo = findMethod(utilClass, serializeMethodName, - kryo.getClass, classOf[Object], classOf[OutputStream]) - serializeObjectByKryo.invoke(null, kryo, function, out) - } + SerializationUtilities.serializePlan(function, out) } def writeExternal(out: java.io.ObjectOutput): Unit = { diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveStrategies.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveStrategies.scala index 97e1dee5913a4..e10233d2573c9 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveStrategies.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveStrategies.scala @@ -26,12 +26,12 @@ import org.apache.spark.sql._ import org.apache.spark.sql.catalyst.catalog._ import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.catalyst.planning._ -import org.apache.spark.sql.catalyst.plans.logical.{InsertIntoDir, InsertIntoStatement, LogicalPlan, ScriptTransformation, Statistics} +import org.apache.spark.sql.catalyst.plans.logical.{CacheTable, InsertIntoDir, InsertIntoStatement, LogicalPlan, ScriptTransformation, Statistics, UncacheTable} import org.apache.spark.sql.catalyst.rules.Rule import org.apache.spark.sql.connector.catalog.CatalogV2Util.assertNoNullTypeInSchema import org.apache.spark.sql.execution._ import org.apache.spark.sql.execution.command.{CreateTableCommand, DDLUtils} -import org.apache.spark.sql.execution.datasources.CreateTable +import org.apache.spark.sql.execution.datasources.{CreateTable, DataSourceStrategy} import org.apache.spark.sql.hive.execution._ import org.apache.spark.sql.hive.execution.HiveScriptTransformationExec import org.apache.spark.sql.internal.{HiveSerDe, SQLConf} @@ -50,7 +50,7 @@ class ResolveHiveSerdeTable(session: SparkSession) extends Rule[LogicalPlan] { throw new AnalysisException("Creating bucketed Hive serde table is not supported yet.") } - val defaultStorage = HiveSerDe.getDefaultStorage(session.sessionState.conf) + val defaultStorage = HiveSerDe.getDefaultStorage(conf) val options = new HiveOptions(table.storage.properties) val fileStorage = if (options.fileFormat.isDefined) { @@ -117,7 +117,6 @@ class DetermineTableStats(session: SparkSession) extends Rule[LogicalPlan] { private def hiveTableWithStats(relation: HiveTableRelation): HiveTableRelation = { val table = relation.tableMeta val partitionCols = relation.partitionCols - val conf = session.sessionState.conf // For partitioned tables, the partition directory may be outside of the table directory. // Which is expensive to get table size. Please see how we implemented it in the AnalyzeTable. val sizeInBytes = if (conf.fallBackToHdfsForStatsEnabled && partitionCols.isEmpty) { @@ -146,7 +145,7 @@ class DetermineTableStats(session: SparkSession) extends Rule[LogicalPlan] { // handles InsertIntoStatement specially as the table in InsertIntoStatement is not added in its // children, hence not matched directly by previous HiveTableRelation case. - case i @ InsertIntoStatement(relation: HiveTableRelation, _, _, _, _) + case i @ InsertIntoStatement(relation: HiveTableRelation, _, _, _, _, _) if DDLUtils.isHiveTable(relation.tableMeta) && relation.tableMeta.stats.isEmpty => i.copy(table = hiveTableWithStats(relation)) } @@ -160,7 +159,8 @@ class DetermineTableStats(session: SparkSession) extends Rule[LogicalPlan] { */ object HiveAnalysis extends Rule[LogicalPlan] { override def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperators { - case InsertIntoStatement(r: HiveTableRelation, partSpec, query, overwrite, ifPartitionNotExists) + case InsertIntoStatement( + r: HiveTableRelation, partSpec, _, query, overwrite, ifPartitionNotExists) if DDLUtils.isHiveTable(r.tableMeta) => InsertIntoHiveTable(r.tableMeta, partSpec, query, overwrite, ifPartitionNotExists, query.output.map(_.name)) @@ -191,7 +191,6 @@ object HiveAnalysis extends Rule[LogicalPlan] { * `PreprocessTableCreation`, `PreprocessTableInsertion`, `DataSourceAnalysis` and `HiveAnalysis`. */ case class RelationConversions( - conf: SQLConf, sessionCatalog: HiveSessionCatalog) extends Rule[LogicalPlan] { private def isConvertible(relation: HiveTableRelation): Boolean = { isConvertible(relation.tableMeta) @@ -209,11 +208,11 @@ case class RelationConversions( plan resolveOperators { // Write path case InsertIntoStatement( - r: HiveTableRelation, partition, query, overwrite, ifPartitionNotExists) + r: HiveTableRelation, partition, cols, query, overwrite, ifPartitionNotExists) if query.resolved && DDLUtils.isHiveTable(r.tableMeta) && (!r.isPartitioned || SQLConf.get.getConf(HiveUtils.CONVERT_INSERTING_PARTITIONED_TABLE)) && isConvertible(r) => - InsertIntoStatement(metastoreCatalog.convert(r), partition, + InsertIntoStatement(metastoreCatalog.convert(r), partition, cols, query, overwrite, ifPartitionNotExists) // Read path @@ -223,14 +222,25 @@ case class RelationConversions( // CTAS case CreateTable(tableDesc, mode, Some(query)) - if DDLUtils.isHiveTable(tableDesc) && tableDesc.partitionColumnNames.isEmpty && - isConvertible(tableDesc) && SQLConf.get.getConf(HiveUtils.CONVERT_METASTORE_CTAS) => + if query.resolved && DDLUtils.isHiveTable(tableDesc) && + tableDesc.partitionColumnNames.isEmpty && isConvertible(tableDesc) && + SQLConf.get.getConf(HiveUtils.CONVERT_METASTORE_CTAS) => // validation is required to be done here before relation conversion. DDLUtils.checkDataColNames(tableDesc.copy(schema = query.schema)) // This is for CREATE TABLE .. STORED AS PARQUET/ORC AS SELECT null assertNoNullTypeInSchema(query.schema) OptimizedCreateHiveTableAsSelectCommand( tableDesc, query, query.output.map(_.name), mode) + + // Cache table + case c @ CacheTable(relation: HiveTableRelation, _, _, _) + if DDLUtils.isHiveTable(relation.tableMeta) && isConvertible(relation) => + c.copy(table = metastoreCatalog.convert(relation)) + + // Uncache table + case u @ UncacheTable(relation: HiveTableRelation, _, _) + if DDLUtils.isHiveTable(relation.tableMeta) && isConvertible(relation) => + u.copy(table = metastoreCatalog.convert(relation)) } } } @@ -256,20 +266,21 @@ private[hive] trait HiveStrategies { */ object HiveTableScans extends Strategy { def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match { - case ScanOperation(projectList, predicates, relation: HiveTableRelation) => + case ScanOperation(projectList, filters, relation: HiveTableRelation) => // Filter out all predicates that only deal with partition keys, these are given to the // hive table scan operator to be used for partition pruning. val partitionKeyIds = AttributeSet(relation.partitionCols) - val (pruningPredicates, otherPredicates) = predicates.partition { predicate => - !predicate.references.isEmpty && - predicate.references.subsetOf(partitionKeyIds) - } + val normalizedFilters = DataSourceStrategy.normalizeExprs( + filters.filter(_.deterministic), relation.output) + + val partitionKeyFilters = DataSourceStrategy.getPushedDownFilters(relation.partitionCols, + normalizedFilters) pruneFilterProject( projectList, - otherPredicates, + filters.filter(f => f.references.isEmpty || !f.references.subsetOf(partitionKeyIds)), identity[Seq[Expression]], - HiveTableScanExec(_, relation, pruningPredicates)(sparkSession)) :: Nil + HiveTableScanExec(_, relation, partitionKeyFilters.toSeq)(sparkSession)) :: Nil case _ => Nil } diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveUtils.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveUtils.scala index 62ff2db2ecb3c..46a8e9660a207 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveUtils.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveUtils.scala @@ -24,7 +24,6 @@ import java.util.concurrent.TimeUnit import scala.collection.JavaConverters._ import scala.collection.mutable.HashMap -import scala.language.implicitConversions import org.apache.commons.lang3.{JavaVersion, SystemUtils} import org.apache.hadoop.conf.Configuration @@ -40,6 +39,7 @@ import org.apache.spark.internal.Logging import org.apache.spark.sql._ import org.apache.spark.sql.catalyst.catalog.CatalogTable import org.apache.spark.sql.execution.command.DDLUtils +import org.apache.spark.sql.execution.datasources.DataSource import org.apache.spark.sql.hive.client._ import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.internal.SQLConf._ @@ -55,11 +55,8 @@ private[spark] object HiveUtils extends Logging { sc } - private val hiveVersion = HiveVersionInfo.getVersion - val isHive23: Boolean = hiveVersion.startsWith("2.3") - /** The version of hive used internally by Spark SQL. */ - val builtinHiveVersion: String = if (isHive23) hiveVersion else "1.2.1" + val builtinHiveVersion: String = HiveVersionInfo.getVersion val HIVE_METASTORE_VERSION = buildStaticConf("spark.sql.hive.metastore.version") .doc("Version of the Hive metastore. Available options are " + @@ -80,7 +77,7 @@ private[spark] object HiveUtils extends Logging { val HIVE_METASTORE_JARS = buildStaticConf("spark.sql.hive.metastore.jars") .doc(s""" | Location of the jars that should be used to instantiate the HiveMetastoreClient. - | This property can be one of three options: " + | This property can be one of four options: " | 1. "builtin" | Use Hive ${builtinHiveVersion}, which is bundled with the Spark assembly when | -Phive is enabled. When this option is chosen, @@ -88,12 +85,33 @@ private[spark] object HiveUtils extends Logging { | ${builtinHiveVersion} or not defined. | 2. "maven" | Use Hive jars of specified version downloaded from Maven repositories. - | 3. A classpath in the standard format for both Hive and Hadoop. + | 3. "path" + | Use Hive jars configured by `spark.sql.hive.metastore.jars.path` + | in comma separated format. Support both local or remote paths. + | 4. A classpath in the standard format for both Hive and Hadoop. """.stripMargin) .version("1.4.0") .stringConf .createWithDefault("builtin") + val HIVE_METASTORE_JARS_PATH = buildStaticConf("spark.sql.hive.metastore.jars.path") + .doc(s""" + | Comma-separated paths of the jars that used to instantiate the HiveMetastoreClient. + | This configuration is useful only when `{$HIVE_METASTORE_JARS.key}` is set as `path`. + | The paths can be any of the following format: + | 1. file://path/to/jar/foo.jar + | 2. hdfs://nameservice/path/to/jar/foo.jar + | 3. /path/to/jar/ (path without URI scheme follow conf `fs.defaultFS`'s URI schema) + | 4. [http/https/ftp]://path/to/jar/foo.jar + | Note that 1, 2, and 3 support wildcard. For example: + | 1. file://path/to/jar/*,file://path2/to/jar/*/*.jar + | 2. hdfs://nameservice/path/to/jar/*,hdfs://nameservice2/path/to/jar/*/*.jar + """.stripMargin) + .version("3.1.0") + .stringConf + .toSequence + .createWithDefault(Nil) + val CONVERT_METASTORE_PARQUET = buildConf("spark.sql.hive.convertMetastoreParquet") .doc("When set to true, the built-in Parquet reader and writer are used to process " + "parquet tables created by using the HiveQL syntax, instead of Hive serde.") @@ -178,6 +196,7 @@ private[spark] object HiveUtils extends Logging { * The location of the jars that should be used to instantiate the HiveMetastoreClient. This * property can be one of three options: * - a classpath in the standard format for both hive and hadoop. + * - path - attempt to discover the jars with paths configured by `HIVE_METASTORE_JARS_PATH`. * - builtin - attempt to discover the jars that were used to load Spark SQL and use those. This * option is only valid when using the execution version of Hive. * - maven - download the correct version of hive on demand from maven. @@ -186,6 +205,13 @@ private[spark] object HiveUtils extends Logging { conf.getConf(HIVE_METASTORE_JARS) } + /** + * Hive jars paths, only work when `HIVE_METASTORE_JARS` is `path`. + */ + private def hiveMetastoreJarsPath(conf: SQLConf): Seq[String] = { + conf.getConf(HIVE_METASTORE_JARS_PATH) + } + /** * A comma separated list of class prefixes that should be loaded using the classloader that * is shared between Spark SQL and a specific version of Hive. An example of classes that should @@ -336,6 +362,20 @@ private[spark] object HiveUtils extends Logging { val hiveMetastoreBarrierPrefixes = HiveUtils.hiveMetastoreBarrierPrefixes(sqlConf) val metaVersion = IsolatedClientLoader.hiveVersion(hiveMetastoreVersion) + def addLocalHiveJars(file: File): Seq[URL] = { + if (file.getName == "*") { + val files = file.getParentFile.listFiles() + if (files == null) { + logWarning(s"Hive jar path '${file.getPath}' does not exist.") + Nil + } else { + files.filter(_.getName.toLowerCase(Locale.ROOT).endsWith(".jar")).map(_.toURL).toSeq + } + } else { + file.toURL :: Nil + } + } + val isolatedLoader = if (hiveMetastoreJars == "builtin") { if (builtinHiveVersion != hiveMetastoreVersion) { throw new IllegalArgumentException( @@ -396,24 +436,43 @@ private[spark] object HiveUtils extends Logging { config = configurations, barrierPrefixes = hiveMetastoreBarrierPrefixes, sharedPrefixes = hiveMetastoreSharedPrefixes) + } else if (hiveMetastoreJars == "path") { + // Convert to files and expand any directories. + val jars = + HiveUtils.hiveMetastoreJarsPath(sqlConf) + .flatMap { + case path if path.contains("\\") && Utils.isWindows => + addLocalHiveJars(new File(path)) + case path => + DataSource.checkAndGlobPathIfNecessary( + pathStrings = Seq(path), + hadoopConf = hadoopConf, + checkEmptyGlobPath = true, + checkFilesExist = false, + enableGlobbing = true + ).map(_.toUri.toURL) + } + + logInfo( + s"Initializing HiveMetastoreConnection version $hiveMetastoreVersion " + + s"using path: ${jars.mkString(";")}") + new IsolatedClientLoader( + version = metaVersion, + sparkConf = conf, + hadoopConf = hadoopConf, + execJars = jars.toSeq, + config = configurations, + isolationOn = true, + barrierPrefixes = hiveMetastoreBarrierPrefixes, + sharedPrefixes = hiveMetastoreSharedPrefixes) } else { // Convert to files and expand any directories. val jars = hiveMetastoreJars .split(File.pathSeparator) - .flatMap { - case path if new File(path).getName == "*" => - val files = new File(path).getParentFile.listFiles() - if (files == null) { - logWarning(s"Hive jar path '$path' does not exist.") - Nil - } else { - files.filter(_.getName.toLowerCase(Locale.ROOT).endsWith(".jar")).toSeq - } - case path => - new File(path) :: Nil - } - .map(_.toURI.toURL) + .flatMap { path => + addLocalHiveJars(new File(path)) + } logInfo( s"Initializing HiveMetastoreConnection version $hiveMetastoreVersion " + diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/TableReader.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/TableReader.scala index 3e0d44160c8a1..eb9ce877fc8d2 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/TableReader.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/TableReader.scala @@ -39,7 +39,7 @@ import org.apache.spark.deploy.SparkHadoopUtil import org.apache.spark.internal.Logging import org.apache.spark.rdd.{EmptyRDD, HadoopRDD, NewHadoopRDD, RDD, UnionRDD} import org.apache.spark.sql.SparkSession -import org.apache.spark.sql.catalyst.InternalRow +import org.apache.spark.sql.catalyst.{InternalRow, SQLConfHelper} import org.apache.spark.sql.catalyst.analysis.CastSupport import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.catalyst.util.DateTimeUtils @@ -68,7 +68,7 @@ class HadoopTableReader( @transient private val tableDesc: TableDesc, @transient private val sparkSession: SparkSession, hadoopConf: Configuration) - extends TableReader with CastSupport with Logging { + extends TableReader with CastSupport with SQLConfHelper with Logging { // Hadoop honors "mapreduce.job.maps" as hint, // but will ignore when mapreduce.jobtracker.address is "local". diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClient.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClient.scala index 3ea80eaf6f714..48f3837740933 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClient.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClient.scala @@ -233,7 +233,8 @@ private[hive] trait HiveClient { /** Returns partitions filtered by predicates for the given table. */ def getPartitionsByFilter( catalogTable: CatalogTable, - predicates: Seq[Expression]): Seq[CatalogTablePartition] + predicates: Seq[Expression], + timeZoneId: String): Seq[CatalogTablePartition] /** Loads a static partition into an existing table. */ def loadPartition( diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClientImpl.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClientImpl.scala index 805bcb2bc3a60..40bcdefbc351e 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClientImpl.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClientImpl.scala @@ -19,6 +19,7 @@ package org.apache.spark.sql.hive.client import java.io.{File, PrintStream} import java.lang.{Iterable => JIterable} +import java.lang.reflect.InvocationTargetException import java.nio.charset.StandardCharsets.UTF_8 import java.util.{Locale, Map => JMap} import java.util.concurrent.TimeUnit._ @@ -48,7 +49,7 @@ import org.apache.spark.internal.Logging import org.apache.spark.metrics.source.HiveCatalogMetrics import org.apache.spark.sql.AnalysisException import org.apache.spark.sql.catalyst.TableIdentifier -import org.apache.spark.sql.catalyst.analysis.{NoSuchDatabaseException, NoSuchPartitionException} +import org.apache.spark.sql.catalyst.analysis.{NoSuchDatabaseException, NoSuchPartitionException, NoSuchPartitionsException, PartitionAlreadyExistsException, PartitionsAlreadyExistException} import org.apache.spark.sql.catalyst.catalog._ import org.apache.spark.sql.catalyst.catalog.CatalogTypes.TablePartitionSpec import org.apache.spark.sql.catalyst.expressions.Expression @@ -56,8 +57,7 @@ import org.apache.spark.sql.catalyst.parser.{CatalystSqlParser, ParseException} import org.apache.spark.sql.connector.catalog.SupportsNamespaces._ import org.apache.spark.sql.execution.QueryExecutionException import org.apache.spark.sql.hive.HiveExternalCatalog -import org.apache.spark.sql.hive.HiveExternalCatalog.{DATASOURCE_SCHEMA, DATASOURCE_SCHEMA_NUMPARTS, DATASOURCE_SCHEMA_PART_PREFIX} -import org.apache.spark.sql.hive.HiveUtils +import org.apache.spark.sql.hive.HiveExternalCatalog.DATASOURCE_SCHEMA import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.types._ import org.apache.spark.util.{CircularBuffer, Utils} @@ -169,9 +169,7 @@ private[hive] class HiveClientImpl( // since HIVE-11878, and ADDJarCommand will add jars to clientLoader.classLoader. // For this reason we cannot load the jars added by ADDJarCommand because of class loader // got changed. We reset it to clientLoader.ClassLoader here. - if (HiveUtils.isHive23) { - state.getConf.setClassLoader(clientLoader.classLoader) - } + state.getConf.setClassLoader(clientLoader.classLoader) SessionState.start(state) state.out = new PrintStream(outputBuffer, true, UTF_8.name()) state.err = new PrintStream(outputBuffer, true, UTF_8.name()) @@ -179,9 +177,7 @@ private[hive] class HiveClientImpl( } /** Returns the configuration for the current session. */ - def conf: HiveConf = if (!HiveUtils.isHive23) { - state.getConf - } else { + def conf: HiveConf = { val hiveConf = state.getConf // Hive changed the default of datanucleus.schema.autoCreateAll from true to false // and hive.metastore.schema.verification from false to true since Hive 2.0. @@ -293,11 +289,9 @@ private[hive] class HiveClientImpl( val ret = try { f } catch { - case e: NoClassDefFoundError - if HiveUtils.isHive23 && e.getMessage.contains("org/apache/hadoop/hive/serde2/SerDe") => + case e: NoClassDefFoundError if e.getMessage.contains("apache/hadoop/hive/serde2/SerDe") => throw new ClassNotFoundException("The SerDe interface removed since Hive 2.3(HIVE-15167)." + - " Please migrate your custom SerDes to Hive 2.3 or build your own Spark with" + - " hive-1.2 profile. See HIVE-15167 for more details.", e) + " Please migrate your custom SerDes to Hive 2.3. See HIVE-15167 for more details.", e) } finally { state.getConf.setClassLoader(originalConfLoader) Thread.currentThread().setContextClassLoader(original) @@ -374,14 +368,14 @@ private[hive] class HiveClientImpl( override def getDatabase(dbName: String): CatalogDatabase = withHiveState { Option(client.getDatabase(dbName)).map { d => - val paras = Option(d.getParameters).map(_.asScala.toMap).getOrElse(Map()) ++ + val params = Option(d.getParameters).map(_.asScala.toMap).getOrElse(Map()) ++ Map(PROP_OWNER -> shim.getDatabaseOwnerName(d)) CatalogDatabase( name = d.getName, description = Option(d.getDescription).getOrElse(""), locationUri = CatalogUtils.stringToURI(d.getLocationUri), - properties = paras) + properties = params) }.getOrElse(throw new NoSuchDatabaseException(dbName)) } @@ -586,9 +580,7 @@ private[hive] class HiveClientImpl( val it = oldTable.getParameters.entrySet.iterator while (it.hasNext) { val entry = it.next() - val isSchemaProp = entry.getKey.startsWith(DATASOURCE_SCHEMA_PART_PREFIX) || - entry.getKey == DATASOURCE_SCHEMA || entry.getKey == DATASOURCE_SCHEMA_NUMPARTS - if (isSchemaProp) { + if (CatalogTable.isLargeTableProp(DATASOURCE_SCHEMA, entry.getKey)) { it.remove() } } @@ -605,7 +597,17 @@ private[hive] class HiveClientImpl( table: String, parts: Seq[CatalogTablePartition], ignoreIfExists: Boolean): Unit = withHiveState { - shim.createPartitions(client, db, table, parts, ignoreIfExists) + def replaceExistException(e: Throwable): Unit = e match { + case _: HiveException if e.getCause.isInstanceOf[AlreadyExistsException] => + throw new PartitionsAlreadyExistException(db, table, parts.map(_.spec)) + case _ => throw e + } + try { + shim.createPartitions(client, db, table, parts, ignoreIfExists) + } catch { + case e: InvocationTargetException => replaceExistException(e.getCause) + case e: Throwable => replaceExistException(e) + } } override def dropPartitions( @@ -626,9 +628,7 @@ private[hive] class HiveClientImpl( // (b='1', c='1') and (b='1', c='2'), a partial spec of (b='1') will match both. val parts = client.getPartitions(hiveTable, s.asJava).asScala if (parts.isEmpty && !ignoreIfNotExists) { - throw new AnalysisException( - s"No partition is dropped. One partition spec '$s' does not exist in table '$table' " + - s"database '$db'") + throw new NoSuchPartitionsException(db, table, Seq(s)) } parts.map(_.getValues) }.distinct @@ -665,6 +665,9 @@ private[hive] class HiveClientImpl( val catalogTable = getTable(db, table) val hiveTable = toHiveTable(catalogTable, Some(userName)) specs.zip(newSpecs).foreach { case (oldSpec, newSpec) => + if (client.getPartition(hiveTable, newSpec.asJava, false) != null) { + throw new PartitionAlreadyExistsException(db, table, newSpec) + } val hivePart = getPartitionOption(catalogTable, oldSpec) .map { p => toHivePartition(p.copy(spec = newSpec), hiveTable) } .getOrElse { throw new NoSuchPartitionException(db, table, oldSpec) } @@ -740,9 +743,11 @@ private[hive] class HiveClientImpl( override def getPartitionsByFilter( table: CatalogTable, - predicates: Seq[Expression]): Seq[CatalogTablePartition] = withHiveState { + predicates: Seq[Expression], + timeZoneId: String): Seq[CatalogTablePartition] = withHiveState { val hiveTable = toHiveTable(table, Some(userName)) - val parts = shim.getPartitionsByFilter(client, hiveTable, predicates).map(fromHivePartition) + val parts = shim.getPartitionsByFilter(client, hiveTable, predicates, timeZoneId) + .map(fromHivePartition) HiveCatalogMetrics.incrementFetchedPartitions(parts.length) parts } @@ -983,12 +988,7 @@ private[hive] class HiveClientImpl( private[hive] object HiveClientImpl extends Logging { /** Converts the native StructField to Hive's FieldSchema. */ def toHiveColumn(c: StructField): FieldSchema = { - val typeString = if (c.metadata.contains(HIVE_TYPE_STRING)) { - c.metadata.getString(HIVE_TYPE_STRING) - } else { - // replace NullType to HiveVoidType since Hive parse void not null. - HiveVoidType.replaceVoidType(c.dataType).catalogString - } + val typeString = HiveVoidType.replaceVoidType(c.dataType).catalogString new FieldSchema(c.name, typeString, c.getComment().orNull) } @@ -1006,18 +1006,10 @@ private[hive] object HiveClientImpl extends Logging { /** Builds the native StructField from Hive's FieldSchema. */ def fromHiveColumn(hc: FieldSchema): StructField = { val columnType = getSparkSQLDataType(hc) - val replacedVoidType = HiveVoidType.replaceVoidType(columnType) - val metadata = if (hc.getType != replacedVoidType.catalogString) { - new MetadataBuilder().putString(HIVE_TYPE_STRING, hc.getType).build() - } else { - Metadata.empty - } - val field = StructField( name = hc.getName, dataType = columnType, - nullable = true, - metadata = metadata) + nullable = true) Option(hc.getComment).map(field.withComment).getOrElse(field) } diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveShim.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveShim.scala index 99c9199e466f9..ed088648bc20a 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveShim.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveShim.scala @@ -41,13 +41,13 @@ import org.apache.hadoop.hive.serde.serdeConstants import org.apache.spark.internal.Logging import org.apache.spark.sql.AnalysisException -import org.apache.spark.sql.SparkSession import org.apache.spark.sql.catalyst.FunctionIdentifier import org.apache.spark.sql.catalyst.analysis.NoSuchPermanentFunctionException import org.apache.spark.sql.catalyst.catalog.{CatalogFunction, CatalogTablePartition, CatalogUtils, FunctionResource, FunctionResourceType} import org.apache.spark.sql.catalyst.expressions._ +import org.apache.spark.sql.catalyst.util.{DateFormatter, DateTimeUtils, TypeUtils} import org.apache.spark.sql.internal.SQLConf -import org.apache.spark.sql.types.{AtomicType, IntegralType, StringType} +import org.apache.spark.sql.types.{AtomicType, DateType, IntegralType, StringType} import org.apache.spark.unsafe.types.UTF8String import org.apache.spark.util.Utils @@ -79,7 +79,11 @@ private[client] sealed abstract class Shim { def getAllPartitions(hive: Hive, table: Table): Seq[Partition] - def getPartitionsByFilter(hive: Hive, table: Table, predicates: Seq[Expression]): Seq[Partition] + def getPartitionsByFilter( + hive: Hive, + table: Table, + predicates: Seq[Expression], + timeZoneId: String): Seq[Partition] def getCommandProcessor(token: String, conf: HiveConf): CommandProcessor @@ -349,7 +353,8 @@ private[client] class Shim_v0_12 extends Shim with Logging { override def getPartitionsByFilter( hive: Hive, table: Table, - predicates: Seq[Expression]): Seq[Partition] = { + predicates: Seq[Expression], + timeZoneId: String): Seq[Partition] = { // getPartitionsByFilter() doesn't support binary comparison ops in Hive 0.12. // See HIVE-4888. logDebug("Hive 0.12 doesn't support predicate pushdown to metastore. " + @@ -632,7 +637,9 @@ private[client] class Shim_v0_13 extends Shim_v0_12 { * * Unsupported predicates are skipped. */ - def convertFilters(table: Table, filters: Seq[Expression]): String = { + def convertFilters(table: Table, filters: Seq[Expression], timeZoneId: String): String = { + lazy val dateFormatter = DateFormatter(DateTimeUtils.getZoneId(timeZoneId)) + /** * An extractor that matches all binary comparison operators except null-safe equality. * @@ -650,6 +657,8 @@ private[client] class Shim_v0_13 extends Shim_v0_12 { case Literal(null, _) => None // `null`s can be cast as other types; we want to avoid NPEs. case Literal(value, _: IntegralType) => Some(value.toString) case Literal(value, _: StringType) => Some(quoteStringLiteral(value.toString)) + case Literal(value, _: DateType) => + Some(dateFormatter.format(value.asInstanceOf[Int])) case _ => None } } @@ -700,6 +709,21 @@ private[client] class Shim_v0_13 extends Shim_v0_12 { } } + object ExtractableDateValues { + private lazy val valueToLiteralString: PartialFunction[Any, String] = { + case value: Int => dateFormatter.format(value) + } + + def unapply(values: Set[Any]): Option[Seq[String]] = { + val extractables = values.toSeq.map(valueToLiteralString.lift) + if (extractables.nonEmpty && extractables.forall(_.isDefined)) { + Some(extractables.map(_.get)) + } else { + None + } + } + } + object SupportedAttribute { // hive varchar is treated as catalyst string, but hive varchar can't be pushed down. private val varcharKeys = table.getPartitionKeys.asScala @@ -711,7 +735,8 @@ private[client] class Shim_v0_13 extends Shim_v0_12 { val resolver = SQLConf.get.resolver if (varcharKeys.exists(c => resolver(c, attr.name))) { None - } else if (attr.dataType.isInstanceOf[IntegralType] || attr.dataType == StringType) { + } else if (attr.dataType.isInstanceOf[IntegralType] || attr.dataType == StringType || + attr.dataType == DateType) { Some(attr.name) } else { None @@ -724,12 +749,13 @@ private[client] class Shim_v0_13 extends Shim_v0_12 { } val useAdvanced = SQLConf.get.advancedPartitionPredicatePushdownEnabled + val inSetThreshold = SQLConf.get.metastorePartitionPruningInSetThreshold object ExtractAttribute { def unapply(expr: Expression): Option[Attribute] = { expr match { case attr: Attribute => Some(attr) - case Cast(child @ AtomicType(), dt: AtomicType, _) + case Cast(child @ IntegralType(), dt: IntegralType, _) if Cast.canUpCast(child.dataType.asInstanceOf[AtomicType], dt) => unapply(child) case _ => None } @@ -741,6 +767,16 @@ private[client] class Shim_v0_13 extends Shim_v0_12 { if useAdvanced => Some(convertInToOr(name, values)) + case InSet(child, values) if useAdvanced && values.size > inSetThreshold => + val dataType = child.dataType + val sortedValues = values.toSeq.sorted(TypeUtils.getInterpretedOrdering(dataType)) + convert(And(GreaterThanOrEqual(child, Literal(sortedValues.head, dataType)), + LessThanOrEqual(child, Literal(sortedValues.last, dataType)))) + + case InSet(child @ ExtractAttribute(SupportedAttribute(name)), ExtractableDateValues(values)) + if useAdvanced && child.dataType == DateType => + Some(convertInToOr(name, values)) + case InSet(ExtractAttribute(SupportedAttribute(name)), ExtractableValues(values)) if useAdvanced => Some(convertInToOr(name, values)) @@ -753,6 +789,15 @@ private[client] class Shim_v0_13 extends Shim_v0_12 { ExtractableLiteral(value), ExtractAttribute(SupportedAttribute(name))) => Some(s"$value ${op.symbol} $name") + case Contains(ExtractAttribute(SupportedAttribute(name)), ExtractableLiteral(value)) => + Some(s"$name like " + (("\".*" + value.drop(1)).dropRight(1) + ".*\"")) + + case StartsWith(ExtractAttribute(SupportedAttribute(name)), ExtractableLiteral(value)) => + Some(s"$name like " + (value.dropRight(1) + ".*\"")) + + case EndsWith(ExtractAttribute(SupportedAttribute(name)), ExtractableLiteral(value)) => + Some(s"$name like " + ("\".*" + value.drop(1))) + case And(expr1, expr2) if useAdvanced => val converted = convert(expr1) ++ convert(expr2) if (converted.isEmpty) { @@ -767,6 +812,14 @@ private[client] class Shim_v0_13 extends Shim_v0_12 { right <- convert(expr2) } yield s"($left or $right)" + case Not(EqualTo( + ExtractAttribute(SupportedAttribute(name)), ExtractableLiteral(value))) if useAdvanced => + Some(s"$name != $value") + + case Not(EqualTo( + ExtractableLiteral(value), ExtractAttribute(SupportedAttribute(name)))) if useAdvanced => + Some(s"$value != $name") + case _ => None } @@ -787,11 +840,12 @@ private[client] class Shim_v0_13 extends Shim_v0_12 { override def getPartitionsByFilter( hive: Hive, table: Table, - predicates: Seq[Expression]): Seq[Partition] = { + predicates: Seq[Expression], + timeZoneId: String): Seq[Partition] = { // Hive getPartitionsByFilter() takes a string that represents partition // predicates like "str_key=\"value\" and int_key=1 ..." - val filter = convertFilters(table, predicates) + val filter = convertFilters(table, predicates, timeZoneId) val partitions = if (filter.isEmpty) { @@ -1327,8 +1381,6 @@ private[client] class Shim_v3_0 extends Shim_v2_3 { inheritTableSpecs: Boolean, isSkewedStoreAsSubdir: Boolean, isSrcLocal: Boolean): Unit = { - val session = SparkSession.getActiveSession - assert(session.nonEmpty) val table = hive.getTable(tableName) val loadFileType = if (replace) { clazzLoadFileType.getEnumConstants.find(_.toString.equalsIgnoreCase("REPLACE_ALL")) diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/IsolatedClientLoader.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/IsolatedClientLoader.scala index 42a0ec0253b85..02bf86533c89e 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/IsolatedClientLoader.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/IsolatedClientLoader.scala @@ -52,12 +52,9 @@ private[hive] object IsolatedClientLoader extends Logging { config: Map[String, String] = Map.empty, ivyPath: Option[String] = None, sharedPrefixes: Seq[String] = Seq.empty, - barrierPrefixes: Seq[String] = Seq.empty, - sharesHadoopClasses: Boolean = true): IsolatedClientLoader = synchronized { + barrierPrefixes: Seq[String] = Seq.empty): IsolatedClientLoader = synchronized { val resolvedVersion = hiveVersion(hiveMetastoreVersion) - // We will first try to share Hadoop classes. If we cannot resolve the Hadoop artifact - // with the given version, we will use Hadoop 2.7 and then will not share Hadoop classes. - var _sharesHadoopClasses = sharesHadoopClasses + // We will use Hadoop 2.7 if we cannot resolve the Hadoop artifact. val files = if (resolvedVersions.contains((resolvedVersion, hadoopVersion))) { resolvedVersions((resolvedVersion, hadoopVersion)) } else { @@ -72,10 +69,8 @@ private[hive] object IsolatedClientLoader extends Logging { val fallbackVersion = "2.7.4" logWarning(s"Failed to resolve Hadoop artifacts for the version $hadoopVersion. We " + s"will change the hadoop version from $hadoopVersion to $fallbackVersion and try " + - "again. Hadoop classes will not be shared between Spark and Hive metastore client. " + - "It is recommended to set jars used by Hive metastore client through " + + "again. It is recommended to set jars used by Hive metastore client through " + "spark.sql.hive.metastore.jars in the production environment.") - _sharesHadoopClasses = false (downloadVersion( resolvedVersion, fallbackVersion, ivyPath, remoteRepos), fallbackVersion) } @@ -89,7 +84,6 @@ private[hive] object IsolatedClientLoader extends Logging { execJars = files, hadoopConf = hadoopConf, config = config, - sharesHadoopClasses = _sharesHadoopClasses, sharedPrefixes = sharedPrefixes, barrierPrefixes = barrierPrefixes) } @@ -124,15 +118,16 @@ private[hive] object IsolatedClientLoader extends Logging { Seq("com.google.guava:guava:14.0.1", s"org.apache.hadoop:hadoop-client:$hadoopVersion") - val classpath = quietly { + val classpaths = quietly { SparkSubmitUtils.resolveMavenCoordinates( hiveArtifacts.mkString(","), SparkSubmitUtils.buildIvySettings( Some(remoteRepos), ivyPath), + transitive = true, exclusions = version.exclusions) } - val allFiles = classpath.split(",").map(new File(_)).toSet + val allFiles = classpaths.map(new File(_)).toSet // TODO: Remove copy logic. val tempDir = Utils.createTempDir(namePrefix = s"hive-${version}") @@ -164,7 +159,6 @@ private[hive] object IsolatedClientLoader extends Logging { * @param config A set of options that will be added to the HiveConf of the constructed client. * @param isolationOn When true, custom versions of barrier classes will be constructed. Must be * true unless loading the version of hive that is on Spark's classloader. - * @param sharesHadoopClasses When true, we will share Hadoop classes between Spark and * @param baseClassLoader The spark classloader that is used to load shared classes. */ private[hive] class IsolatedClientLoader( @@ -174,7 +168,6 @@ private[hive] class IsolatedClientLoader( val execJars: Seq[URL] = Seq.empty, val config: Map[String, String] = Map.empty, val isolationOn: Boolean = true, - val sharesHadoopClasses: Boolean = true, val baseClassLoader: ClassLoader = Thread.currentThread().getContextClassLoader, val sharedPrefixes: Seq[String] = Seq.empty, val barrierPrefixes: Seq[String] = Seq.empty) @@ -191,7 +184,7 @@ private[hive] class IsolatedClientLoader( name.startsWith("org.apache.log4j") || // log4j1.x name.startsWith("org.apache.logging.log4j") || // log4j2 name.startsWith("org.apache.spark.") || - (sharesHadoopClasses && isHadoopClass) || + isHadoopClass || name.startsWith("scala.") || (name.startsWith("com.google") && !name.startsWith("com.google.cloud")) || name.startsWith("java.") || diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/CreateHiveTableAsSelectCommand.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/CreateHiveTableAsSelectCommand.scala index 9f79997e2979a..ccaa4502d9d2a 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/CreateHiveTableAsSelectCommand.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/CreateHiveTableAsSelectCommand.scala @@ -22,6 +22,7 @@ import scala.util.control.NonFatal import org.apache.spark.sql.{AnalysisException, Row, SaveMode, SparkSession} import org.apache.spark.sql.catalyst.catalog.{CatalogTable, SessionCatalog} import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan +import org.apache.spark.sql.catalyst.util.CharVarcharUtils import org.apache.spark.sql.execution.SparkPlan import org.apache.spark.sql.execution.command.{DataWritingCommand, DDLUtils} import org.apache.spark.sql.execution.datasources.{HadoopFsRelation, InsertIntoHadoopFsRelationCommand, LogicalRelation} @@ -58,9 +59,10 @@ trait CreateHiveTableAsSelectBase extends DataWritingCommand { // TODO ideally, we should get the output data ready first and then // add the relation into catalog, just in case of failure occurs while data // processing. + val tableSchema = CharVarcharUtils.getRawSchema(outputColumns.toStructType) assert(tableDesc.schema.isEmpty) catalog.createTable( - tableDesc.copy(schema = outputColumns.toStructType), ignoreIfExists = false) + tableDesc.copy(schema = tableSchema), ignoreIfExists = false) try { // Read back the metadata of the table which was created just now. diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/HiveScriptTransformationExec.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/HiveScriptTransformationExec.scala index 4096916a100c3..4b03cff5e8c8e 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/HiveScriptTransformationExec.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/HiveScriptTransformationExec.scala @@ -45,6 +45,8 @@ import org.apache.spark.util.{CircularBuffer, Utils} * @param input the set of expression that should be passed to the script. * @param script the command that should be executed. * @param output the attributes that are produced by the script. + * @param child logical plan whose output is transformed. + * @param ioschema the class set that defines how to handle input/output data. */ case class HiveScriptTransformationExec( input: Seq[Expression], @@ -92,7 +94,7 @@ case class HiveScriptTransformationExec( scriptOutputWritable.readFields(scriptOutputStream) } catch { case _: EOFException => - // This means that the stdout of `proc` (ie. TRANSFORM process) has exhausted. + // This means that the stdout of `proc` (i.e. TRANSFORM process) has exhausted. // Ideally the proc should *not* be alive at this point but // there can be a lag between EOF being written out and the process // being terminated. So explicitly waiting for the process to be done. diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/InsertIntoHiveDirCommand.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/InsertIntoHiveDirCommand.scala index b66c302a7d7ea..7ef637ed553ad 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/InsertIntoHiveDirCommand.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/InsertIntoHiveDirCommand.scala @@ -29,6 +29,7 @@ import org.apache.spark.sql.catalyst.TableIdentifier import org.apache.spark.sql.catalyst.catalog.{CatalogStorageFormat, CatalogTable} import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan import org.apache.spark.sql.execution.SparkPlan +import org.apache.spark.sql.execution.command.DDLUtils import org.apache.spark.sql.hive.client.HiveClientImpl import org.apache.spark.sql.util.SchemaUtils @@ -63,12 +64,16 @@ case class InsertIntoHiveDirCommand( s"when inserting into ${storage.locationUri.get}", sparkSession.sessionState.conf.caseSensitiveAnalysis) - val hiveTable = HiveClientImpl.toHiveTable(CatalogTable( + val table = CatalogTable( identifier = TableIdentifier(storage.locationUri.get.toString, Some("default")), + provider = Some(DDLUtils.HIVE_PROVIDER), tableType = org.apache.spark.sql.catalyst.catalog.CatalogTableType.VIEW, storage = storage, schema = outputColumns.toStructType - )) + ) + DDLUtils.checkDataColNames(table) + + val hiveTable = HiveClientImpl.toHiveTable(table) hiveTable.getMetadata.put(serdeConstants.SERIALIZATION_LIB, storage.serde.getOrElse(classOf[LazySimpleSerDe].getName)) diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/InsertIntoHiveTable.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/InsertIntoHiveTable.scala index 3c3f31ac2994a..bfb24cfedb55a 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/InsertIntoHiveTable.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/InsertIntoHiveTable.scala @@ -133,6 +133,7 @@ case class InsertIntoHiveTable( val numDynamicPartitions = partition.values.count(_.isEmpty) val numStaticPartitions = partition.values.count(_.nonEmpty) val partitionSpec = partition.map { + case (key, Some(null)) => key -> ExternalCatalogUtils.DEFAULT_PARTITION_NAME case (key, Some(value)) => key -> value case (key, None) => key -> "" } @@ -172,7 +173,7 @@ case class InsertIntoHiveTable( table.bucketSpec match { case Some(bucketSpec) => // Writes to bucketed hive tables are allowed only if user does not care about maintaining - // table's bucketing ie. both "hive.enforce.bucketing" and "hive.enforce.sorting" are + // table's bucketing i.e. both "hive.enforce.bucketing" and "hive.enforce.sorting" are // set to false val enforceBucketingConfig = "hive.enforce.bucketing" val enforceSortingConfig = "hive.enforce.sorting" @@ -229,6 +230,7 @@ case class InsertIntoHiveTable( val caseInsensitiveDpMap = CaseInsensitiveMap(dpMap) val updatedPartitionSpec = partition.map { + case (key, Some(null)) => key -> ExternalCatalogUtils.DEFAULT_PARTITION_NAME case (key, Some(value)) => key -> value case (key, None) if caseInsensitiveDpMap.contains(key) => key -> caseInsensitiveDpMap(key) diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/PruneHiveTablePartitions.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/PruneHiveTablePartitions.scala index f6aff10cbc147..3fa8449c3cb01 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/PruneHiveTablePartitions.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/PruneHiveTablePartitions.scala @@ -27,7 +27,6 @@ import org.apache.spark.sql.catalyst.planning.PhysicalOperation import org.apache.spark.sql.catalyst.plans.logical.{Filter, LogicalPlan, Project} import org.apache.spark.sql.catalyst.rules.Rule import org.apache.spark.sql.execution.datasources.DataSourceStrategy -import org.apache.spark.sql.internal.SQLConf /** * Prune hive table partitions using partition filters on [[HiveTableRelation]]. The pruned @@ -35,7 +34,7 @@ import org.apache.spark.sql.internal.SQLConf * the hive table relation will be updated based on pruned partitions. * * This rule is executed in optimization phase, so the statistics can be updated before physical - * planning, which is useful for some spark strategy, eg. + * planning, which is useful for some spark strategy, e.g. * [[org.apache.spark.sql.execution.SparkStrategies.JoinSelection]]. * * TODO: merge this with PruneFileSourcePartitions after we completely make hive as a data source. @@ -43,8 +42,6 @@ import org.apache.spark.sql.internal.SQLConf private[sql] class PruneHiveTablePartitions(session: SparkSession) extends Rule[LogicalPlan] with CastSupport with PredicateHelper { - override val conf: SQLConf = session.sessionState.conf - /** * Extract the partition filters from the filters on the table. */ diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/SaveAsHiveFile.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/SaveAsHiveFile.scala index 4be3cd45454c6..c712a4a2b7c23 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/SaveAsHiveFile.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/SaveAsHiveFile.scala @@ -17,7 +17,7 @@ package org.apache.spark.sql.hive.execution -import java.io.{File, IOException} +import java.io.IOException import java.net.URI import java.text.SimpleDateFormat import java.util.{Date, Locale, Random} diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUDFs.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUDFs.scala index 8ad5cb70d248b..c7002853bed54 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUDFs.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/hiveUDFs.scala @@ -17,7 +17,6 @@ package org.apache.spark.sql.hive -import java.lang.{Boolean => JBoolean} import java.nio.ByteBuffer import scala.collection.JavaConverters._ @@ -39,8 +38,12 @@ import org.apache.spark.sql.catalyst.expressions.aggregate._ import org.apache.spark.sql.catalyst.expressions.codegen.CodegenFallback import org.apache.spark.sql.hive.HiveShim._ import org.apache.spark.sql.types._ -import org.apache.spark.util.Utils +/** + * Here we cannot extends `ImplicitTypeCasts` to compatible with UDF input data type, the reason is: + * we use children data type to reflect UDF method first and will get exception if it fails so that + * we can never go into `ImplicitTypeCasts`. + */ private[hive] case class HiveSimpleUDF( name: String, funcWrapper: HiveFunctionWrapper, children: Seq[Expression]) extends Expression @@ -344,20 +347,8 @@ private[hive] case class HiveUDAFFunction( funcWrapper.createFunction[AbstractGenericUDAFResolver]() } - val clazz = Utils.classForName(classOf[SimpleGenericUDAFParameterInfo].getName) - if (HiveUtils.isHive23) { - val ctor = clazz.getDeclaredConstructor( - classOf[Array[ObjectInspector]], JBoolean.TYPE, JBoolean.TYPE, JBoolean.TYPE) - val args = Array[AnyRef](inputInspectors, JBoolean.FALSE, JBoolean.FALSE, JBoolean.FALSE) - val parameterInfo = ctor.newInstance(args: _*).asInstanceOf[SimpleGenericUDAFParameterInfo] - resolver.getEvaluator(parameterInfo) - } else { - val ctor = clazz.getDeclaredConstructor( - classOf[Array[ObjectInspector]], JBoolean.TYPE, JBoolean.TYPE) - val args = Array[AnyRef](inputInspectors, JBoolean.FALSE, JBoolean.FALSE) - val parameterInfo = ctor.newInstance(args: _*).asInstanceOf[SimpleGenericUDAFParameterInfo] - resolver.getEvaluator(parameterInfo) - } + val parameterInfo = new SimpleGenericUDAFParameterInfo(inputInspectors, false, false, false) + resolver.getEvaluator(parameterInfo) } private case class HiveEvaluator( diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/orc/OrcFileFormat.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/orc/OrcFileFormat.scala index 356b92b4652b3..2868bb4ba85d3 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/orc/OrcFileFormat.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/orc/OrcFileFormat.scala @@ -47,7 +47,7 @@ import org.apache.spark.sql.SparkSession import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.execution.datasources._ -import org.apache.spark.sql.execution.datasources.orc.OrcOptions +import org.apache.spark.sql.execution.datasources.orc.{OrcFilters, OrcOptions} import org.apache.spark.sql.hive.{HiveInspectors, HiveShim} import org.apache.spark.sql.sources.{Filter, _} import org.apache.spark.sql.types._ @@ -75,7 +75,7 @@ class OrcFileFormat extends FileFormat with DataSourceRegister with Serializable val ignoreCorruptFiles = sparkSession.sessionState.conf.ignoreCorruptFiles OrcFileOperator.readSchema( files.map(_.getPath.toString), - Some(sparkSession.sessionState.newHadoopConf()), + Some(sparkSession.sessionState.newHadoopConfWithOptions(options)), ignoreCorruptFiles ) } @@ -139,7 +139,7 @@ class OrcFileFormat extends FileFormat with DataSourceRegister with Serializable if (sparkSession.sessionState.conf.orcFilterPushDown) { // Sets pushed predicates - OrcFilters.createFilter(requiredSchema, filters.toArray).foreach { f => + OrcFilters.createFilter(requiredSchema, filters).foreach { f => hadoopConf.set(OrcFileFormat.SARG_PUSHDOWN, toKryo(f)) hadoopConf.setBoolean(ConfVars.HIVEOPTINDEXFILTER.varname, true) } @@ -296,7 +296,7 @@ private[orc] class OrcOutputWriter( override def close(): Unit = { if (recordWriterInstantiated) { - // Hive 1.2.1 ORC initializes its private `writer` field at the first write. + // Hive ORC initializes its private `writer` field at the first write. OrcFileFormat.addSparkVersionMetadata(recordWriter) recordWriter.close(Reporter.NULL) } diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/orc/OrcFilters.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/orc/OrcFilters.scala deleted file mode 100644 index f9c514567c639..0000000000000 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/orc/OrcFilters.scala +++ /dev/null @@ -1,262 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.spark.sql.hive.orc - -import java.lang.reflect.Method - -import org.apache.hadoop.hive.ql.io.sarg.SearchArgument -import org.apache.hadoop.hive.ql.io.sarg.SearchArgument.Builder -import org.apache.hadoop.hive.ql.io.sarg.SearchArgumentFactory.newBuilder - -import org.apache.spark.SparkException -import org.apache.spark.internal.Logging -import org.apache.spark.sql.connector.catalog.CatalogV2Implicits.quoteIfNeeded -import org.apache.spark.sql.execution.datasources.orc.{OrcFilters => DatasourceOrcFilters} -import org.apache.spark.sql.execution.datasources.orc.OrcFilters.buildTree -import org.apache.spark.sql.hive.HiveUtils -import org.apache.spark.sql.sources._ -import org.apache.spark.sql.types._ - -/** - * Helper object for building ORC `SearchArgument`s, which are used for ORC predicate push-down. - * - * Due to limitation of ORC `SearchArgument` builder, we had to end up with a pretty weird double- - * checking pattern when converting `And`/`Or`/`Not` filters. - * - * An ORC `SearchArgument` must be built in one pass using a single builder. For example, you can't - * build `a = 1` and `b = 2` first, and then combine them into `a = 1 AND b = 2`. This is quite - * different from the cases in Spark SQL or Parquet, where complex filters can be easily built using - * existing simpler ones. - * - * The annoying part is that, `SearchArgument` builder methods like `startAnd()`, `startOr()`, and - * `startNot()` mutate internal state of the builder instance. This forces us to translate all - * convertible filters with a single builder instance. However, before actually converting a filter, - * we've no idea whether it can be recognized by ORC or not. Thus, when an inconvertible filter is - * found, we may already end up with a builder whose internal state is inconsistent. - * - * For example, to convert an `And` filter with builder `b`, we call `b.startAnd()` first, and then - * try to convert its children. Say we convert `left` child successfully, but find that `right` - * child is inconvertible. Alas, `b.startAnd()` call can't be rolled back, and `b` is inconsistent - * now. - * - * The workaround employed here is that, for `And`/`Or`/`Not`, we first try to convert their - * children with brand new builders, and only do the actual conversion with the right builder - * instance when the children are proven to be convertible. - * - * P.S.: Hive seems to use `SearchArgument` together with `ExprNodeGenericFuncDesc` only. Usage of - * builder methods mentioned above can only be found in test code, where all tested filters are - * known to be convertible. - */ -private[orc] object OrcFilters extends Logging { - - private def findMethod(klass: Class[_], name: String, args: Class[_]*): Method = { - val method = klass.getMethod(name, args: _*) - method.setAccessible(true) - method - } - - def createFilter(schema: StructType, filters: Array[Filter]): Option[SearchArgument] = { - if (HiveUtils.isHive23) { - DatasourceOrcFilters.createFilter(schema, filters).asInstanceOf[Option[SearchArgument]] - } else { - val dataTypeMap = schema.map(f => quoteIfNeeded(f.name) -> f.dataType).toMap - // TODO (SPARK-25557): ORC doesn't support nested predicate pushdown, so they are removed. - val newFilters = filters.filter(!_.containsNestedColumn) - // Combines all convertible filters using `And` to produce a single conjunction - val conjunctionOptional = buildTree(convertibleFilters(schema, dataTypeMap, newFilters)) - conjunctionOptional.map { conjunction => - // Then tries to build a single ORC `SearchArgument` for the conjunction predicate. - // The input predicate is fully convertible. There should not be any empty result in the - // following recursive method call `buildSearchArgument`. - buildSearchArgument(dataTypeMap, conjunction, newBuilder).build() - } - } - } - - def convertibleFilters( - schema: StructType, - dataTypeMap: Map[String, DataType], - filters: Seq[Filter]): Seq[Filter] = { - import org.apache.spark.sql.sources._ - - def convertibleFiltersHelper( - filter: Filter, - canPartialPushDown: Boolean): Option[Filter] = filter match { - // At here, it is not safe to just convert one side and remove the other side - // if we do not understand what the parent filters are. - // - // Here is an example used to explain the reason. - // Let's say we have NOT(a = 2 AND b in ('1')) and we do not understand how to - // convert b in ('1'). If we only convert a = 2, we will end up with a filter - // NOT(a = 2), which will generate wrong results. - // - // Pushing one side of AND down is only safe to do at the top level or in the child - // AND before hitting NOT or OR conditions, and in this case, the unsupported predicate - // can be safely removed. - case And(left, right) => - val leftResultOptional = convertibleFiltersHelper(left, canPartialPushDown) - val rightResultOptional = convertibleFiltersHelper(right, canPartialPushDown) - (leftResultOptional, rightResultOptional) match { - case (Some(leftResult), Some(rightResult)) => Some(And(leftResult, rightResult)) - case (Some(leftResult), None) if canPartialPushDown => Some(leftResult) - case (None, Some(rightResult)) if canPartialPushDown => Some(rightResult) - case _ => None - } - - // The Or predicate is convertible when both of its children can be pushed down. - // That is to say, if one/both of the children can be partially pushed down, the Or - // predicate can be partially pushed down as well. - // - // Here is an example used to explain the reason. - // Let's say we have - // (a1 AND a2) OR (b1 AND b2), - // a1 and b1 is convertible, while a2 and b2 is not. - // The predicate can be converted as - // (a1 OR b1) AND (a1 OR b2) AND (a2 OR b1) AND (a2 OR b2) - // As per the logical in And predicate, we can push down (a1 OR b1). - case Or(left, right) => - for { - lhs <- convertibleFiltersHelper(left, canPartialPushDown) - rhs <- convertibleFiltersHelper(right, canPartialPushDown) - } yield Or(lhs, rhs) - case Not(pred) => - val childResultOptional = convertibleFiltersHelper(pred, canPartialPushDown = false) - childResultOptional.map(Not) - case other => - for (_ <- buildLeafSearchArgument(dataTypeMap, other, newBuilder())) yield other - } - filters.flatMap { filter => - convertibleFiltersHelper(filter, true) - } - } - - /** - * Build a SearchArgument and return the builder so far. - * - * @param dataTypeMap a map from the attribute name to its data type. - * @param expression the input predicates, which should be fully convertible to SearchArgument. - * @param builder the input SearchArgument.Builder. - * @return the builder so far. - */ - private def buildSearchArgument( - dataTypeMap: Map[String, DataType], - expression: Filter, - builder: Builder): Builder = { - expression match { - case And(left, right) => - val lhs = buildSearchArgument(dataTypeMap, left, builder.startAnd()) - val rhs = buildSearchArgument(dataTypeMap, right, lhs) - rhs.end() - - case Or(left, right) => - val lhs = buildSearchArgument(dataTypeMap, left, builder.startOr()) - val rhs = buildSearchArgument(dataTypeMap, right, lhs) - rhs.end() - - case Not(child) => - buildSearchArgument(dataTypeMap, child, builder.startNot()).end() - - case other => - buildLeafSearchArgument(dataTypeMap, other, builder).getOrElse { - throw new SparkException( - "The input filter of OrcFilters.buildSearchArgument should be fully convertible.") - } - } - } - - /** - * Build a SearchArgument for a leaf predicate and return the builder so far. - * - * @param dataTypeMap a map from the attribute name to its data type. - * @param expression the input filter predicates. - * @param builder the input SearchArgument.Builder. - * @return the builder so far. - */ - private def buildLeafSearchArgument( - dataTypeMap: Map[String, DataType], - expression: Filter, - builder: Builder): Option[Builder] = { - def isSearchableType(dataType: DataType): Boolean = dataType match { - // Only the values in the Spark types below can be recognized by - // the `SearchArgumentImpl.BuilderImpl.boxLiteral()` method. - case ByteType | ShortType | FloatType | DoubleType => true - case IntegerType | LongType | StringType | BooleanType => true - case TimestampType | _: DecimalType => true - case _ => false - } - - import org.apache.spark.sql.sources._ - - // NOTE: For all case branches dealing with leaf predicates below, the additional `startAnd()` - // call is mandatory. ORC `SearchArgument` builder requires that all leaf predicates must be - // wrapped by a "parent" predicate (`And`, `Or`, or `Not`). - expression match { - // NOTE: For all case branches dealing with leaf predicates below, the additional `startAnd()` - // call is mandatory. ORC `SearchArgument` builder requires that all leaf predicates must be - // wrapped by a "parent" predicate (`And`, `Or`, or `Not`). - - case EqualTo(attribute, value) if isSearchableType(dataTypeMap(attribute)) => - val bd = builder.startAnd() - val method = findMethod(bd.getClass, "equals", classOf[String], classOf[Object]) - Some(method.invoke(bd, attribute, value.asInstanceOf[AnyRef]).asInstanceOf[Builder].end()) - - case EqualNullSafe(attribute, value) if isSearchableType(dataTypeMap(attribute)) => - val bd = builder.startAnd() - val method = findMethod(bd.getClass, "nullSafeEquals", classOf[String], classOf[Object]) - Some(method.invoke(bd, attribute, value.asInstanceOf[AnyRef]).asInstanceOf[Builder].end()) - - case LessThan(attribute, value) if isSearchableType(dataTypeMap(attribute)) => - val bd = builder.startAnd() - val method = findMethod(bd.getClass, "lessThan", classOf[String], classOf[Object]) - Some(method.invoke(bd, attribute, value.asInstanceOf[AnyRef]).asInstanceOf[Builder].end()) - - case LessThanOrEqual(attribute, value) if isSearchableType(dataTypeMap(attribute)) => - val bd = builder.startAnd() - val method = findMethod(bd.getClass, "lessThanEquals", classOf[String], classOf[Object]) - Some(method.invoke(bd, attribute, value.asInstanceOf[AnyRef]).asInstanceOf[Builder].end()) - - case GreaterThan(attribute, value) if isSearchableType(dataTypeMap(attribute)) => - val bd = builder.startNot() - val method = findMethod(bd.getClass, "lessThanEquals", classOf[String], classOf[Object]) - Some(method.invoke(bd, attribute, value.asInstanceOf[AnyRef]).asInstanceOf[Builder].end()) - - case GreaterThanOrEqual(attribute, value) if isSearchableType(dataTypeMap(attribute)) => - val bd = builder.startNot() - val method = findMethod(bd.getClass, "lessThan", classOf[String], classOf[Object]) - Some(method.invoke(bd, attribute, value.asInstanceOf[AnyRef]).asInstanceOf[Builder].end()) - - case IsNull(attribute) if isSearchableType(dataTypeMap(attribute)) => - val bd = builder.startAnd() - val method = findMethod(bd.getClass, "isNull", classOf[String]) - Some(method.invoke(bd, attribute).asInstanceOf[Builder].end()) - - case IsNotNull(attribute) if isSearchableType(dataTypeMap(attribute)) => - val bd = builder.startNot() - val method = findMethod(bd.getClass, "isNull", classOf[String]) - Some(method.invoke(bd, attribute).asInstanceOf[Builder].end()) - - case In(attribute, values) if isSearchableType(dataTypeMap(attribute)) => - val bd = builder.startAnd() - val method = findMethod(bd.getClass, "in", classOf[String], classOf[Array[Object]]) - Some(method.invoke(bd, attribute, values.map(_.asInstanceOf[AnyRef])) - .asInstanceOf[Builder].end()) - - case _ => None - } - } -} diff --git a/sql/hive/src/test/resources/data/scripts/test_transform.py b/sql/hive/src/test/resources/data/scripts/test_transform.py index ac6d11d8b919c..dedb370f6c90e 100755 --- a/sql/hive/src/test/resources/data/scripts/test_transform.py +++ b/sql/hive/src/test/resources/data/scripts/test_transform.py @@ -1,3 +1,21 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# import sys delim = sys.argv[1] diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_sortmerge_join_13.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_sortmerge_join_13.q index 28bbc2d8f1a3e..df5334c785f6a 100644 --- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_sortmerge_join_13.q +++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/auto_sortmerge_join_13.q @@ -19,7 +19,7 @@ set hive.optimize.bucketmapjoin = true; set hive.optimize.bucketmapjoin.sortedmerge = true; set hive.auto.convert.join=true; --- A SMB join followed by a mutli-insert +-- A SMB join followed by a multi-insert explain from ( SELECT a.key key1, a.value value1, b.key key2, b.value value2 @@ -41,7 +41,7 @@ select * from dest2 order by k1, k2; set hive.auto.convert.join.noconditionaltask=true; set hive.auto.convert.join.noconditionaltask.size=200; --- A SMB join followed by a mutli-insert +-- A SMB join followed by a multi-insert explain from ( SELECT a.key key1, a.value value1, b.key key2, b.value value2 @@ -61,7 +61,7 @@ select * from dest1 order by k1, k2; select * from dest2 order by k1, k2; set hive.auto.convert.sortmerge.join.to.mapjoin=true; --- A SMB join followed by a mutli-insert +-- A SMB join followed by a multi-insert explain from ( SELECT a.key key1, a.value value1, b.key key2, b.value value2 diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_3.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_3.q index 91e97de62c82f..843ba4a3dbacd 100644 --- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_3.q +++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/bucketsortoptimize_insert_3.q @@ -18,7 +18,7 @@ FROM src INSERT OVERWRITE TABLE test_table1 PARTITION (ds = '1') SELECT *; -- Insert data into the bucketed table by selecting from another bucketed table --- The bucketing positions dont match - although the actual bucketing do. +-- The bucketing positions don't match - although the actual bucketing do. -- This should be a map-only operation EXPLAIN INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') @@ -37,7 +37,7 @@ CREATE TABLE test_table3 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS; -- Insert data into the bucketed table by selecting from another bucketed table --- The bucketing positions dont match - this should be a map-reduce operation +-- The bucketing positions don't match - this should be a map-reduce operation EXPLAIN INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT x.key, x.value from diff --git a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/smb_mapjoin_20.q b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/smb_mapjoin_20.q index f70e7d5c86237..4c56cad2411fc 100644 --- a/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/smb_mapjoin_20.q +++ b/sql/hive/src/test/resources/ql/src/test/queries/clientpositive/smb_mapjoin_20.q @@ -32,7 +32,7 @@ CREATE TABLE test_table3 (key STRING, value1 int, value2 string) PARTITIONED BY CLUSTERED BY (value1) SORTED BY (value1) INTO 2 BUCKETS; -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-only operation, although the bucketing positions dont match +-- This should be a map-only operation, although the bucketing positions don't match EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT a.value, a.key, a.value FROM test_table1 a WHERE a.ds = '1'; diff --git a/sql/hive/src/test/resources/regression-test-SPARK-8489/test-2.13.jar b/sql/hive/src/test/resources/regression-test-SPARK-8489/test-2.13.jar new file mode 100644 index 0000000000000..0d10f7ff03b35 Binary files /dev/null and b/sql/hive/src/test/resources/regression-test-SPARK-8489/test-2.13.jar differ diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/HiveCharVarcharTestSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/HiveCharVarcharTestSuite.scala new file mode 100644 index 0000000000000..bb7918c881c7e --- /dev/null +++ b/sql/hive/src/test/scala/org/apache/spark/sql/HiveCharVarcharTestSuite.scala @@ -0,0 +1,76 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql + +import org.apache.spark.sql.execution.command.CharVarcharDDLTestBase +import org.apache.spark.sql.hive.test.TestHiveSingleton + +class HiveCharVarcharTestSuite extends CharVarcharTestSuite with TestHiveSingleton { + + // The default Hive serde doesn't support nested null values. + override def format: String = "hive OPTIONS(fileFormat='parquet')" + + private var originalPartitionMode = "" + + override protected def beforeAll(): Unit = { + super.beforeAll() + originalPartitionMode = spark.conf.get("hive.exec.dynamic.partition.mode", "") + spark.conf.set("hive.exec.dynamic.partition.mode", "nonstrict") + } + + override protected def afterAll(): Unit = { + if (originalPartitionMode == "") { + spark.conf.unset("hive.exec.dynamic.partition.mode") + } else { + spark.conf.set("hive.exec.dynamic.partition.mode", originalPartitionMode) + } + super.afterAll() + } + + test("SPARK-33892: SHOW CREATE TABLE AS SERDE w/ char/varchar") { + withTable("t") { + sql(s"CREATE TABLE t(v VARCHAR(3), c CHAR(5)) USING $format") + val rest = sql("SHOW CREATE TABLE t AS SERDE").head().getString(0) + assert(rest.contains("VARCHAR(3)")) + assert(rest.contains("CHAR(5)")) + } + } +} + +class HiveCharVarcharDDLTestSuite extends CharVarcharDDLTestBase with TestHiveSingleton { + + // The default Hive serde doesn't support nested null values. + override def format: String = "hive OPTIONS(fileFormat='parquet')" + + private var originalPartitionMode = "" + + override protected def beforeAll(): Unit = { + super.beforeAll() + originalPartitionMode = spark.conf.get("hive.exec.dynamic.partition.mode", "") + spark.conf.set("hive.exec.dynamic.partition.mode", "nonstrict") + } + + override protected def afterAll(): Unit = { + if (originalPartitionMode == "") { + spark.conf.unset("hive.exec.dynamic.partition.mode") + } else { + spark.conf.set("hive.exec.dynamic.partition.mode", originalPartitionMode) + } + super.afterAll() + } +} diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/execution/benchmark/InsertIntoHiveTableBenchmark.scala b/sql/hive/src/test/scala/org/apache/spark/sql/execution/benchmark/InsertIntoHiveTableBenchmark.scala index 81eb5e2591f13..e71b11e7a3f41 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/execution/benchmark/InsertIntoHiveTableBenchmark.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/execution/benchmark/InsertIntoHiveTableBenchmark.scala @@ -19,7 +19,6 @@ package org.apache.spark.sql.execution.benchmark import org.apache.spark.benchmark.Benchmark import org.apache.spark.sql.SparkSession -import org.apache.spark.sql.hive.HiveUtils import org.apache.spark.sql.hive.test.TestHive /** @@ -28,14 +27,11 @@ import org.apache.spark.sql.hive.test.TestHive * {{{ * 1. without sbt: bin/spark-submit --class * --jars ,, - * --packages org.spark-project.hive:hive-exec:1.2.1.spark2 * - * 2. build/sbt "hive/test:runMain " -Phive-1.2 or - * build/sbt "hive/test:runMain " -Phive-2.3 + * 2. build/sbt "hive/test:runMain " * 3. generate result: * SPARK_GENERATE_BENCHMARK_FILES=1 build/sbt "hive/test:runMain " * Results will be written to "benchmarks/InsertIntoHiveTableBenchmark-hive2.3-results.txt". - * 4. -Phive-1.2 does not work for JDK 11 * }}} */ object InsertIntoHiveTableBenchmark extends SqlBasedBenchmark { @@ -136,5 +132,5 @@ object InsertIntoHiveTableBenchmark extends SqlBasedBenchmark { } } - override def suffix: String = if (HiveUtils.isHive23) "-hive2.3" else "-hive1.2" + override def suffix: String = "-hive2.3" } diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/CachedTableSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/CachedTableSuite.scala index fc793534641df..7044e6ff78d4a 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/CachedTableSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/CachedTableSuite.scala @@ -113,7 +113,7 @@ class CachedTableSuite extends QueryTest with SQLTestUtils with TestHiveSingleto e = intercept[AnalysisException] { sql("UNCACHE TABLE nonexistentTable") }.getMessage - assert(e.contains(s"$expectedErrorMsg default.nonexistentTable")) + assert(e.contains("Table or view not found: nonexistentTable")) sql("UNCACHE TABLE IF EXISTS nonexistentTable") } @@ -364,14 +364,14 @@ class CachedTableSuite extends QueryTest with SQLTestUtils with TestHiveSingleto // Cache the table 'cachedTable' in temp db with qualified table name, // and then check whether the table is cached with expected name sql(s"CACHE TABLE $db.cachedTable OPTIONS('storageLevel' 'MEMORY_ONLY')") - assertCached(sql(s"SELECT * FROM $db.cachedTable"), s"`$db`.`cachedTable`", MEMORY_ONLY) + assertCached(sql(s"SELECT * FROM $db.cachedTable"), s"$db.cachedTable", MEMORY_ONLY) assert(spark.catalog.isCached(s"$db.cachedTable"), s"Table '$db.cachedTable' should be cached.") // Refresh the table 'cachedTable' in temp db with qualified table name, and then check // whether the table is still cached with the same name and storage level. sql(s"REFRESH TABLE $db.cachedTable") - assertCached(sql(s"select * from $db.cachedTable"), s"`$db`.`cachedTable`", MEMORY_ONLY) + assertCached(sql(s"select * from $db.cachedTable"), s"$db.cachedTable", MEMORY_ONLY) assert(spark.catalog.isCached(s"$db.cachedTable"), s"Table '$db.cachedTable' should be cached after refreshing with its qualified name.") @@ -382,7 +382,7 @@ class CachedTableSuite extends QueryTest with SQLTestUtils with TestHiveSingleto // 'cachedTable', instead of '$db.cachedTable' activateDatabase(db) { sql("REFRESH TABLE cachedTable") - assertCached(sql("SELECT * FROM cachedTable"), s"`$db`.`cachedTable`", MEMORY_ONLY) + assertCached(sql("SELECT * FROM cachedTable"), s"$db.cachedTable", MEMORY_ONLY) assert(spark.catalog.isCached("cachedTable"), s"Table '$db.cachedTable' should be cached after refreshing with its " + "unqualified name.") @@ -403,13 +403,13 @@ class CachedTableSuite extends QueryTest with SQLTestUtils with TestHiveSingleto // Cache the table 'cachedTable' in default db without qualified table name , and then // check whether the table is cached with expected name. sql("CACHE TABLE cachedTable OPTIONS('storageLevel' 'DISK_ONLY')") - assertCached(sql("SELECT * FROM cachedTable"), "`default`.`cachedTable`", DISK_ONLY) + assertCached(sql("SELECT * FROM cachedTable"), "cachedTable", DISK_ONLY) assert(spark.catalog.isCached("cachedTable"), "Table 'cachedTable' should be cached.") // Refresh the table 'cachedTable' in default db with unqualified table name, and then // check whether the table is still cached with the same name. sql("REFRESH TABLE cachedTable") - assertCached(sql("SELECT * FROM cachedTable"), "`default`.`cachedTable`", DISK_ONLY) + assertCached(sql("SELECT * FROM cachedTable"), "cachedTable", DISK_ONLY) assert(spark.catalog.isCached("cachedTable"), "Table 'cachedTable' should be cached after refreshing with its unqualified name.") @@ -421,7 +421,7 @@ class CachedTableSuite extends QueryTest with SQLTestUtils with TestHiveSingleto activateDatabase(db) { sql("REFRESH TABLE default.cachedTable") assertCached( - sql("SELECT * FROM default.cachedTable"), "`default`.`cachedTable`", DISK_ONLY) + sql("SELECT * FROM default.cachedTable"), "cachedTable", DISK_ONLY) assert(spark.catalog.isCached("default.cachedTable"), "Table 'cachedTable' should be cached after refreshing with its qualified name.") } @@ -429,4 +429,27 @@ class CachedTableSuite extends QueryTest with SQLTestUtils with TestHiveSingleto } } } + + test("SPARK-33963: do not use table stats while looking in table cache") { + val t = "table_on_test" + withTable(t) { + sql(s"CREATE TABLE $t (col int)") + assert(!spark.catalog.isCached(t)) + sql(s"CACHE TABLE $t") + assert(spark.catalog.isCached(t)) + } + } + + test("SPARK-33965: cache table in spark_catalog") { + withNamespace("spark_catalog.ns") { + sql("CREATE NAMESPACE spark_catalog.ns") + val t = "spark_catalog.ns.tbl" + withTable(t) { + sql(s"CREATE TABLE $t (col int)") + assert(!spark.catalog.isCached(t)) + sql(s"CACHE TABLE $t") + assert(spark.catalog.isCached(t)) + } + } + } } diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/ClasspathDependenciesSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/ClasspathDependenciesSuite.scala index a696d6aaff27b..c136c4c9790fd 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/ClasspathDependenciesSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/ClasspathDependenciesSuite.scala @@ -57,20 +57,12 @@ class ClasspathDependenciesSuite extends SparkFunSuite { } } - test("shaded Protobuf") { - if (HiveUtils.isHive23) { - assertLoads("com.google.protobuf.ServiceException") - } else { - assertLoads("org.apache.hive.com.google.protobuf.ServiceException") - } + test("protobuf") { + assertLoads("com.google.protobuf.ServiceException") } - test("shaded Kryo") { - if (HiveUtils.isHive23) { - assertLoads("com.esotericsoftware.kryo.Kryo") - } else { - assertLoads("org.apache.hive.com.esotericsoftware.kryo.Kryo") - } + test("kryo") { + assertLoads("com.esotericsoftware.kryo.Kryo") } test("hive-common") { @@ -89,12 +81,7 @@ class ClasspathDependenciesSuite extends SparkFunSuite { } test("parquet-hadoop-bundle") { - if (HiveUtils.isHive23) { - assertLoads("org.apache.parquet.hadoop.ParquetOutputFormat") - assertLoads("org.apache.parquet.hadoop.ParquetInputFormat") - } else { - assertLoads("parquet.hadoop.ParquetOutputFormat") - assertLoads("parquet.hadoop.ParquetInputFormat") - } + assertLoads("org.apache.parquet.hadoop.ParquetOutputFormat") + assertLoads("org.apache.parquet.hadoop.ParquetInputFormat") } } diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveExternalCatalogSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveExternalCatalogSuite.scala index 270595b0011e9..e413e0ee73cb9 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveExternalCatalogSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveExternalCatalogSuite.scala @@ -17,8 +17,6 @@ package org.apache.spark.sql.hive -import java.net.URI - import org.apache.hadoop.conf.Configuration import org.apache.spark.SparkConf diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveExternalCatalogVersionsSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveExternalCatalogVersionsSuite.scala index aa96fa035c4f0..37287fc394647 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveExternalCatalogVersionsSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveExternalCatalogVersionsSuite.scala @@ -27,7 +27,7 @@ import scala.util.control.NonFatal import org.apache.commons.lang3.{JavaVersion, SystemUtils} import org.apache.hadoop.conf.Configuration -import org.apache.spark.{SecurityManager, SparkConf, TestUtils} +import org.apache.spark.{SparkConf, TestUtils} import org.apache.spark.internal.config.MASTER_REST_SERVER_ENABLED import org.apache.spark.internal.config.UI.UI_ENABLED import org.apache.spark.sql.{QueryTest, Row, SparkSession} @@ -42,26 +42,37 @@ import org.apache.spark.util.Utils * Test HiveExternalCatalog backward compatibility. * * Note that, this test suite will automatically download spark binary packages of different - * versions to a local directory `/tmp/spark-test`. If there is already a spark folder with - * expected version under this local directory, e.g. `/tmp/spark-test/spark-2.0.3`, we will skip the - * downloading for this spark version. + * versions to a local directory. If the `spark.test.cache-dir` system property is defined, this + * directory will be used. If there is already a spark folder with expected version under this + * local directory, e.g. `/{cache-dir}/spark-2.0.3`, downloading for this spark version will be + * skipped. If the system property is not present, a temporary directory will be used and cleaned + * up after the test. */ @SlowHiveTest @ExtendedHiveTest class HiveExternalCatalogVersionsSuite extends SparkSubmitTestUtils { - private val isTestAtLeastJava9 = SystemUtils.isJavaVersionAtLeast(JavaVersion.JAVA_9) + import HiveExternalCatalogVersionsSuite._ private val wareHousePath = Utils.createTempDir(namePrefix = "warehouse") private val tmpDataDir = Utils.createTempDir(namePrefix = "test-data") - // For local test, you can set `sparkTestingDir` to a static value like `/tmp/test-spark`, to + // For local test, you can set `spark.test.cache-dir` to a static value like `/tmp/test-spark`, to // avoid downloading Spark of different versions in each run. - private val sparkTestingDir = new File("/tmp/test-spark") + private val sparkTestingDir = Option(System.getProperty(SPARK_TEST_CACHE_DIR_SYSTEM_PROPERTY)) + .map(new File(_)).getOrElse(Utils.createTempDir(namePrefix = "test-spark")) private val unusedJar = TestUtils.createJarWithClasses(Seq.empty) + val hiveVersion = if (SystemUtils.isJavaVersionAtLeast(JavaVersion.JAVA_9)) { + "2.3.7" + } else { + "1.2.1" + } override def afterAll(): Unit = { try { Utils.deleteRecursively(wareHousePath) Utils.deleteRecursively(tmpDataDir) - Utils.deleteRecursively(sparkTestingDir) + // Only delete sparkTestingDir if it wasn't defined to a static location by the system prop + if (Option(System.getProperty(SPARK_TEST_CACHE_DIR_SYSTEM_PROPERTY)).isEmpty) { + Utils.deleteRecursively(sparkTestingDir) + } } finally { super.afterAll() } @@ -82,7 +93,11 @@ class HiveExternalCatalogVersionsSuite extends SparkSubmitTestUtils { mirrors.distinct :+ "https://archive.apache.org/dist" :+ PROCESS_TABLES.releaseMirror logInfo(s"Trying to download Spark $version from $sites") for (site <- sites) { - val filename = s"spark-$version-bin-hadoop2.7.tgz" + val filename = if (version.startsWith("3")) { + s"spark-$version-bin-hadoop3.2.tgz" + } else { + s"spark-$version-bin-hadoop2.7.tgz" + } val url = s"$site/spark/spark-$version/$filename" logInfo(s"Downloading Spark $version from $url") try { @@ -119,7 +134,6 @@ class HiveExternalCatalogVersionsSuite extends SparkSubmitTestUtils { // if the caller passes the name of an existing file, we want doFetchFile to write over it with // the contents from the specified url. conf.set("spark.files.overwrite", "true") - val securityManager = new SecurityManager(conf) val hadoopConf = new Configuration val outDir = new File(targetDir) @@ -128,7 +142,7 @@ class HiveExternalCatalogVersionsSuite extends SparkSubmitTestUtils { } // propagate exceptions up to the caller of getFileFromUrl - Utils.doFetchFile(urlString, outDir, filename, conf, securityManager, hadoopConf) + Utils.doFetchFile(urlString, outDir, filename, conf, hadoopConf) } private def getStringFromUrl(urlString: String): String = { @@ -142,7 +156,9 @@ class HiveExternalCatalogVersionsSuite extends SparkSubmitTestUtils { new String(Files.readAllBytes(contentPath), StandardCharsets.UTF_8) } - private def prepare(): Unit = { + override def beforeAll(): Unit = { + super.beforeAll() + val tempPyFile = File.createTempFile("test", ".py") // scalastyle:off line.size.limit Files.write(tempPyFile.toPath, @@ -192,7 +208,7 @@ class HiveExternalCatalogVersionsSuite extends SparkSubmitTestUtils { "--master", "local[2]", "--conf", s"${UI_ENABLED.key}=false", "--conf", s"${MASTER_REST_SERVER_ENABLED.key}=false", - "--conf", s"${HiveUtils.HIVE_METASTORE_VERSION.key}=1.2.1", + "--conf", s"${HiveUtils.HIVE_METASTORE_VERSION.key}=$hiveVersion", "--conf", s"${HiveUtils.HIVE_METASTORE_JARS.key}=maven", "--conf", s"${WAREHOUSE_PATH.key}=${wareHousePath.getCanonicalPath}", "--conf", s"spark.sql.test.version.index=$index", @@ -204,23 +220,14 @@ class HiveExternalCatalogVersionsSuite extends SparkSubmitTestUtils { tempPyFile.delete() } - override def beforeAll(): Unit = { - super.beforeAll() - if (!isTestAtLeastJava9) { - prepare() - } - } - test("backward compatibility") { - // TODO SPARK-28704 Test backward compatibility on JDK9+ once we have a version supports JDK9+ - assume(!isTestAtLeastJava9) val args = Seq( "--class", PROCESS_TABLES.getClass.getName.stripSuffix("$"), "--name", "HiveExternalCatalog backward compatibility test", "--master", "local[2]", "--conf", s"${UI_ENABLED.key}=false", "--conf", s"${MASTER_REST_SERVER_ENABLED.key}=false", - "--conf", s"${HiveUtils.HIVE_METASTORE_VERSION.key}=1.2.1", + "--conf", s"${HiveUtils.HIVE_METASTORE_VERSION.key}=$hiveVersion", "--conf", s"${HiveUtils.HIVE_METASTORE_JARS.key}=maven", "--conf", s"${WAREHOUSE_PATH.key}=${wareHousePath.getCanonicalPath}", "--driver-java-options", s"-Dderby.system.home=${wareHousePath.getCanonicalPath}", @@ -234,7 +241,7 @@ object PROCESS_TABLES extends QueryTest with SQLTestUtils { // Tests the latest version of every release line. val testingVersions: Seq[String] = { import scala.io.Source - try { + val versions: Seq[String] = try { Source.fromURL(s"${releaseMirror}/spark").mkString .split("\n") .filter(_.contains("""
  • Seq("2.3.4", "2.4.5") // A temporary fallback to use a specific version + case NonFatal(_) => Seq("3.0.1", "2.4.7") // A temporary fallback to use a specific version } + versions + .filter(v => v.startsWith("3") || !TestUtils.isPythonVersionAtLeast38()) + .filter(v => v.startsWith("3") || !SystemUtils.isJavaVersionAtLeast(JavaVersion.JAVA_9)) } protected var spark: SparkSession = _ @@ -306,3 +316,8 @@ object PROCESS_TABLES extends QueryTest with SQLTestUtils { } } } + +object HiveExternalCatalogVersionsSuite { + private val SPARK_TEST_CACHE_DIR_SYSTEM_PROPERTY = "spark.test.cache-dir" +} + diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveMetastoreCatalogSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveMetastoreCatalogSuite.scala index 95e99c653d6f6..1a6f6843d3911 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveMetastoreCatalogSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveMetastoreCatalogSuite.scala @@ -113,24 +113,19 @@ class HiveMetastoreCatalogSuite extends TestHiveSingleton with SQLTestUtils { .add("c9", "date") .add("c10", "timestamp") .add("c11", "string") - .add("c12", "string", true, - new MetadataBuilder().putString(HIVE_TYPE_STRING, "char(10)").build()) - .add("c13", "string", true, - new MetadataBuilder().putString(HIVE_TYPE_STRING, "varchar(10)").build()) + .add("c12", CharType(10), true) + .add("c13", VarcharType(10), true) .add("c14", "binary") .add("c15", "decimal") .add("c16", "decimal(10)") .add("c17", "decimal(10,2)") .add("c18", "array") .add("c19", "array") - .add("c20", "array", true, - new MetadataBuilder().putString(HIVE_TYPE_STRING, "array").build()) + .add("c20", ArrayType(CharType(10)), true) .add("c21", "map") - .add("c22", "map", true, - new MetadataBuilder().putString(HIVE_TYPE_STRING, "map").build()) + .add("c22", MapType(IntegerType, CharType(10)), true) .add("c23", "struct") - .add("c24", "struct", true, - new MetadataBuilder().putString(HIVE_TYPE_STRING, "struct").build()) + .add("c24", new StructType().add("c", VarcharType(10)).add("d", "int"), true) assert(schema == expectedSchema) } } @@ -206,13 +201,8 @@ class DataSourceWithHiveMetastoreCatalogSuite assert(columns.map(_.dataType) === Seq(DecimalType(10, 3), StringType)) checkAnswer(table("t"), testDF) - if (HiveUtils.isHive23) { - assert(sparkSession.metadataHive.runSqlHive("SELECT * FROM t") === - Seq("1.100\t1", "2.100\t2")) - } else { - assert(sparkSession.metadataHive.runSqlHive("SELECT * FROM t") === - Seq("1.1\t1", "2.1\t2")) - } + assert(sparkSession.metadataHive.runSqlHive("SELECT * FROM t") === + Seq("1.100\t1", "2.100\t2")) } } @@ -244,13 +234,8 @@ class DataSourceWithHiveMetastoreCatalogSuite assert(columns.map(_.dataType) === Seq(DecimalType(10, 3), StringType)) checkAnswer(table("t"), testDF) - if (HiveUtils.isHive23) { - assert(sparkSession.metadataHive.runSqlHive("SELECT * FROM t") === - Seq("1.100\t1", "2.100\t2")) - } else { - assert(sparkSession.metadataHive.runSqlHive("SELECT * FROM t") === - Seq("1.1\t1", "2.1\t2")) - } + assert(sparkSession.metadataHive.runSqlHive("SELECT * FROM t") === + Seq("1.100\t1", "2.100\t2")) } } } diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveParquetSourceSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveParquetSourceSuite.scala index b557fe73f1154..b3ea54a7bc931 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveParquetSourceSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveParquetSourceSuite.scala @@ -25,7 +25,6 @@ import org.apache.spark.sql.catalyst.catalog.HiveTableRelation import org.apache.spark.sql.execution.datasources.LogicalRelation import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.types._ -import org.apache.spark.util.Utils /** * A suite of tests for the Parquet support through the data sources API. @@ -230,6 +229,12 @@ class HiveParquetSourceSuite extends ParquetPartitioningTest { withTempPath { path => withTable("parq_tbl1", "parq_tbl2", "parq_tbl3", "tbl1", "tbl2", "tbl3", "tbl4", "tbl5", "tbl6") { + + def checkErrorMsg(path: String): String = { + s"Path: ${path} is a directory, which is not supported by the record reader " + + s"when `mapreduce.input.fileinputformat.input.dir.recursive` is false." + } + val parquetTblStatement1 = s""" |CREATE EXTERNAL TABLE parq_tbl1( @@ -287,7 +292,7 @@ class HiveParquetSourceSuite extends ParquetPartitioningTest { val msg = intercept[IOException] { sql("SELECT * FROM tbl1").show() }.getMessage - assert(msg.contains("Not a file:")) + assert(msg.contains(checkErrorMsg(s"$path/l1"))) } val l1DirStatement = @@ -305,7 +310,7 @@ class HiveParquetSourceSuite extends ParquetPartitioningTest { val msg = intercept[IOException] { sql("SELECT * FROM tbl2").show() }.getMessage - assert(msg.contains("Not a file:")) + assert(msg.contains(checkErrorMsg(s"$path/l1/l2"))) } val l2DirStatement = @@ -323,7 +328,7 @@ class HiveParquetSourceSuite extends ParquetPartitioningTest { val msg = intercept[IOException] { sql("SELECT * FROM tbl3").show() }.getMessage - assert(msg.contains("Not a file:")) + assert(msg.contains(checkErrorMsg(s"$path/l1/l2/l3"))) } val wildcardTopDirStatement = @@ -341,7 +346,7 @@ class HiveParquetSourceSuite extends ParquetPartitioningTest { val msg = intercept[IOException] { sql("SELECT * FROM tbl4").show() }.getMessage - assert(msg.contains("Not a file:")) + assert(msg.contains(checkErrorMsg(s"$path/l1/l2"))) } val wildcardL1DirStatement = @@ -359,7 +364,7 @@ class HiveParquetSourceSuite extends ParquetPartitioningTest { val msg = intercept[IOException] { sql("SELECT * FROM tbl5").show() }.getMessage - assert(msg.contains("Not a file:")) + assert(msg.contains(checkErrorMsg(s"$path/l1/l2/l3"))) } val wildcardL2DirStatement = diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveParquetSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveParquetSuite.scala index 470c6a342b4dd..df96b0675cc2d 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveParquetSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveParquetSuite.scala @@ -17,7 +17,7 @@ package org.apache.spark.sql.hive -import org.apache.spark.sql.{QueryTest, Row} +import org.apache.spark.sql.{AnalysisException, QueryTest, Row} import org.apache.spark.sql.execution.datasources.parquet.ParquetTest import org.apache.spark.sql.hive.test.TestHiveSingleton import org.apache.spark.sql.internal.SQLConf @@ -106,4 +106,21 @@ class HiveParquetSuite extends QueryTest with ParquetTest with TestHiveSingleton } } } + + test("SPARK-33323: Add query resolved check before convert hive relation") { + withTable("t") { + val msg = intercept[AnalysisException] { + sql( + s""" + |CREATE TABLE t STORED AS PARQUET AS + |SELECT * FROM ( + | SELECT c3 FROM ( + | SELECT c1, c2 from values(1,2) t(c1, c2) + | ) + |) + """.stripMargin) + }.getMessage + assert(msg.contains("cannot resolve '`c3`' given input columns")) + } + } } diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveSQLInsertTestSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveSQLInsertTestSuite.scala new file mode 100644 index 0000000000000..0b1d511f08511 --- /dev/null +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveSQLInsertTestSuite.scala @@ -0,0 +1,40 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.hive + +import org.apache.spark.sql.SQLInsertTestSuite +import org.apache.spark.sql.hive.test.TestHiveSingleton + +class HiveSQLInsertTestSuite extends SQLInsertTestSuite with TestHiveSingleton { + + private val originalPartitionMode = spark.conf.getOption("hive.exec.dynamic.partition.mode") + + override protected def beforeAll(): Unit = { + super.beforeAll() + spark.conf.set("hive.exec.dynamic.partition.mode", "nonstrict") + } + + override protected def afterAll(): Unit = { + originalPartitionMode + .map(v => spark.conf.set("hive.exec.dynamic.partition.mode", v)) + .getOrElse(spark.conf.unset("hive.exec.dynamic.partition.mode")) + super.afterAll() + } + + override def format: String = "hive OPTIONS(fileFormat='parquet')" +} diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveSchemaInferenceSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveSchemaInferenceSuite.scala index 590ef949ffbd7..ce82756428849 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveSchemaInferenceSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveSchemaInferenceSuite.scala @@ -129,7 +129,7 @@ class HiveSchemaInferenceSuite // properties out). assert(!externalCatalog.getTable(DATABASE, TEST_TABLE_NAME).schemaPreservesCase) val rawTable = client.getTable(DATABASE, TEST_TABLE_NAME) - assert(rawTable.properties.filterKeys(_.startsWith(DATASOURCE_SCHEMA_PREFIX)) == Map.empty) + assert(rawTable.properties.filterKeys(_.startsWith(DATASOURCE_SCHEMA_PREFIX)).isEmpty) schema } diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveSharedStateSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveSharedStateSuite.scala index 78535b094b83d..4570e72db0641 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveSharedStateSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveSharedStateSuite.scala @@ -20,35 +20,46 @@ package org.apache.spark.sql.hive import org.apache.hadoop.hive.conf.HiveConf.ConfVars import org.apache.spark.{SparkConf, SparkContext, SparkFunSuite} -import org.apache.spark.sql.internal.SharedState +import org.apache.spark.sql.SparkSession import org.apache.spark.sql.internal.StaticSQLConf._ import org.apache.spark.util.Utils class HiveSharedStateSuite extends SparkFunSuite { + override def beforeEach(): Unit = { + SparkSession.clearActiveSession() + SparkSession.clearDefaultSession() + super.beforeEach() + } + test("initial configs should be passed to SharedState but not SparkContext") { val conf = new SparkConf().setMaster("local").setAppName("SharedState Test") val sc = SparkContext.getOrCreate(conf) + val wareHouseDir = Utils.createTempDir().toString val invalidPath = "invalid/path" val metastorePath = Utils.createTempDir() val tmpDb = "tmp_db" // The initial configs used to generate SharedState, none of these should affect the global - // shared SparkContext's configurations. Especially, all these configs are passed to the cloned - // confs inside SharedState except metastore warehouse dir. + // shared SparkContext's configurations, except spark.sql.warehouse.dir. + // Especially, all these configs are passed to the cloned confs inside SharedState for sharing + // cross sessions. val initialConfigs = Map("spark.foo" -> "bar", - WAREHOUSE_PATH.key -> invalidPath, - ConfVars.METASTOREWAREHOUSE.varname -> invalidPath, + WAREHOUSE_PATH.key -> wareHouseDir, + ConfVars.METASTOREWAREHOUSE.varname -> wareHouseDir, CATALOG_IMPLEMENTATION.key -> "hive", ConfVars.METASTORECONNECTURLKEY.varname -> s"jdbc:derby:;databaseName=$metastorePath/metastore_db;create=true", GLOBAL_TEMP_DATABASE.key -> tmpDb) - val state = new SharedState(sc, initialConfigs) - assert(sc.conf.get(WAREHOUSE_PATH.key) !== invalidPath, - "warehouse conf in session options can't affect application wide spark conf") - assert(sc.hadoopConfiguration.get(ConfVars.METASTOREWAREHOUSE.varname) !== invalidPath, - "warehouse conf in session options can't affect application wide hadoop conf") + val builder = SparkSession.builder() + initialConfigs.foreach { case (k, v) => builder.config(k, v) } + val ss = builder.getOrCreate() + val state = ss.sharedState + assert(sc.conf.get(WAREHOUSE_PATH.key) === wareHouseDir, + "initial warehouse conf in session options can affect application wide spark conf") + assert(sc.hadoopConfiguration.get(ConfVars.METASTOREWAREHOUSE.varname) === wareHouseDir, + "initial warehouse conf in session options can affect application wide hadoop conf") assert(!state.sparkContext.conf.contains("spark.foo"), "static spark conf should not be affected by session") @@ -57,9 +68,20 @@ class HiveSharedStateSuite extends SparkFunSuite { val client = state.externalCatalog.unwrapped.asInstanceOf[HiveExternalCatalog].client assert(client.getConf("spark.foo", "") === "bar", "session level conf should be passed to catalog") - assert(client.getConf(ConfVars.METASTOREWAREHOUSE.varname, invalidPath) !== invalidPath, - "session level conf should be passed to catalog except warehouse dir") + assert(client.getConf(ConfVars.METASTOREWAREHOUSE.varname, "") === wareHouseDir, + "session level conf should be passed to catalog") assert(state.globalTempViewManager.database === tmpDb) + + val ss2 = + builder.config("spark.foo", "bar2222").config(WAREHOUSE_PATH.key, invalidPath).getOrCreate() + + assert(ss2.sparkContext.conf.get(WAREHOUSE_PATH.key) !== invalidPath, + "warehouse conf in session options can't affect application wide spark conf") + assert(ss2.sparkContext.hadoopConfiguration.get(ConfVars.METASTOREWAREHOUSE.varname) !== + invalidPath, "warehouse conf in session options can't affect application wide hadoop conf") + assert(ss.conf.get("spark.foo") === "bar2222", "session level conf should be passed to catalog") + assert(ss.conf.get(WAREHOUSE_PATH) !== invalidPath, + "session level conf should be passed to catalog") } } diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveShimSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveShimSuite.scala index 14d07cdf8db08..89131a79e59de 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveShimSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveShimSuite.scala @@ -16,9 +16,6 @@ */ package org.apache.spark.sql.hive -import scala.collection.JavaConverters._ -import scala.language.implicitConversions - import org.apache.hadoop.conf.Configuration import org.apache.hadoop.hive.serde2.ColumnProjectionUtils @@ -35,18 +32,10 @@ class HiveShimSuite extends SparkFunSuite { // test when READ_COLUMN_NAMES_CONF_STR is empty HiveShim.appendReadColumns(conf, ids, names) - if (HiveUtils.isHive23) { - assert(names === ColumnProjectionUtils.getReadColumnNames(conf)) - } else { - assert(names.asJava === ColumnProjectionUtils.getReadColumnNames(conf)) - } + assert(names === ColumnProjectionUtils.getReadColumnNames(conf)) // test when READ_COLUMN_NAMES_CONF_STR is non-empty HiveShim.appendReadColumns(conf, moreIds, moreNames) - if (HiveUtils.isHive23) { - assert((names ++ moreNames) === ColumnProjectionUtils.getReadColumnNames(conf)) - } else { - assert((names ++ moreNames).asJava === ColumnProjectionUtils.getReadColumnNames(conf)) - } + assert((names ++ moreNames) === ColumnProjectionUtils.getReadColumnNames(conf)) } } diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveShowCreateTableSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveShowCreateTableSuite.scala index 446923ad23201..2fb67c793dc6a 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveShowCreateTableSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveShowCreateTableSuite.scala @@ -25,6 +25,22 @@ import org.apache.spark.sql.internal.{HiveSerDe, SQLConf} class HiveShowCreateTableSuite extends ShowCreateTableSuite with TestHiveSingleton { + private var origCreateHiveTableConfig = false + + protected override def beforeAll(): Unit = { + super.beforeAll() + origCreateHiveTableConfig = + spark.conf.get(SQLConf.LEGACY_CREATE_HIVE_TABLE_BY_DEFAULT) + spark.conf.set(SQLConf.LEGACY_CREATE_HIVE_TABLE_BY_DEFAULT.key, true) + } + + protected override def afterAll(): Unit = { + spark.conf.set( + SQLConf.LEGACY_CREATE_HIVE_TABLE_BY_DEFAULT.key, + origCreateHiveTableConfig) + super.afterAll() + } + test("view") { Seq(true, false).foreach { serde => withView("v1") { diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveSparkSubmitSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveSparkSubmitSuite.scala index 6feaaea3dfb89..77d54ed45a5de 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveSparkSubmitSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveSparkSubmitSuite.scala @@ -153,7 +153,7 @@ class HiveSparkSubmitSuite // For more detail, see sql/hive/src/test/resources/regression-test-SPARK-8489/*scala. // TODO: revisit for Scala 2.13 support val version = Properties.versionNumberString match { - case v if v.startsWith("2.12") => v.substring(0, 4) + case v if v.startsWith("2.12") || v.startsWith("2.13") => v.substring(0, 4) case x => throw new Exception(s"Unsupported Scala Version: $x") } val jarDir = getTestResourcePath("regression-test-SPARK-8489") @@ -770,8 +770,6 @@ object SPARK_14244 extends QueryTest { val hiveContext = new TestHiveContext(sparkContext) spark = hiveContext.sparkSession - import hiveContext.implicits._ - try { val window = Window.orderBy("id") val df = spark.range(2).select(cume_dist().over(window).as("cdist")).orderBy("cdist") diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveUserDefinedTypeSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveUserDefinedTypeSuite.scala index ca1af73b038a7..d0af8dc7ae49f 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveUserDefinedTypeSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveUserDefinedTypeSuite.scala @@ -18,7 +18,6 @@ package org.apache.spark.sql.hive import scala.collection.JavaConverters._ -import scala.util.Random import org.apache.hadoop.hive.ql.udf.generic.GenericUDF import org.apache.hadoop.hive.serde2.objectinspector.{ObjectInspector, StandardListObjectInspector} diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveUtilsSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveUtilsSuite.scala index 4ad97eaa2b1c8..d8e1e01292820 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveUtilsSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveUtilsSuite.scala @@ -23,9 +23,8 @@ import org.apache.hadoop.hive.conf.HiveConf.ConfVars import org.apache.spark.SparkConf import org.apache.spark.deploy.SparkHadoopUtil import org.apache.spark.sql.QueryTest -import org.apache.spark.sql.execution.HiveResult import org.apache.spark.sql.hive.test.TestHiveSingleton -import org.apache.spark.sql.test.{ExamplePoint, ExamplePointUDT, SQLTestUtils} +import org.apache.spark.sql.test.SQLTestUtils import org.apache.spark.util.ChildFirstURLClassLoader class HiveUtilsSuite extends QueryTest with SQLTestUtils with TestHiveSingleton { diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertSuite.scala index 421dcb499bd6a..b715f484fa02a 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertSuite.scala @@ -35,7 +35,7 @@ import org.apache.spark.util.Utils case class TestData(key: Int, value: String) -case class ThreeCloumntable(key: Int, value: String, key1: String) +case class ThreeColumnTable(key: Int, value: String, key1: String) class InsertSuite extends QueryTest with TestHiveSingleton with BeforeAndAfter with SQLTestUtils with PrivateMethodTester { @@ -277,7 +277,8 @@ class InsertSuite extends QueryTest with TestHiveSingleton with BeforeAndAfter test("Test partition mode = strict") { withSQLConf(("hive.exec.dynamic.partition.mode", "strict")) { withTable("partitioned") { - sql("CREATE TABLE partitioned (id bigint, data string) PARTITIONED BY (part string)") + sql("CREATE TABLE partitioned (id bigint, data string) USING hive " + + "PARTITIONED BY (part string)") val data = (1 to 10).map(i => (i, s"data-$i", if ((i % 2) == 0) "even" else "odd")) .toDF("id", "data", "part") @@ -763,7 +764,7 @@ class InsertSuite extends QueryTest with TestHiveSingleton with BeforeAndAfter val path = dir.toURI.getPath val e = intercept[AnalysisException] { - sql(s"INSERT OVERWRITE LOCAL DIRECTORY '${path}' TABLE notexists") + sql(s"INSERT OVERWRITE LOCAL DIRECTORY '${path}' TABLE nonexistent") }.getMessage assert(e.contains("Table or view not found")) } @@ -847,4 +848,26 @@ class InsertSuite extends QueryTest with TestHiveSingleton with BeforeAndAfter } } } + + test("SPARK-32508 " + + "Disallow empty part col values in partition spec before static partition writing") { + withTable("t1") { + spark.sql( + """ + |CREATE TABLE t1 (c1 int) + |PARTITIONED BY (d string) + """.stripMargin) + + val e = intercept[AnalysisException] { + spark.sql( + """ + |INSERT OVERWRITE TABLE t1 PARTITION(d='') + |SELECT 1 + """.stripMargin) + }.getMessage + + assert(!e.contains("get partition: Value for key d is null or empty")) + assert(e.contains("Partition spec is invalid")) + } + } } diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala index 41a26344f7c21..ecbb104070b70 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/MetastoreDataSourcesSuite.scala @@ -711,7 +711,7 @@ class MetastoreDataSourcesSuite extends QueryTest with SQLTestUtils with TestHiv identifier = TableIdentifier("wide_schema"), tableType = CatalogTableType.EXTERNAL, storage = CatalogStorageFormat.empty.copy( - properties = Map("path" -> tempDir.getCanonicalPath) + locationUri = Some(tempDir.toURI) ), schema = schema, provider = Some("json") @@ -1076,7 +1076,8 @@ class MetastoreDataSourcesSuite extends QueryTest with SQLTestUtils with TestHiv identifier = TableIdentifier("skip_hive_metadata", Some("default")), tableType = CatalogTableType.EXTERNAL, storage = CatalogStorageFormat.empty.copy( - properties = Map("path" -> tempPath.getCanonicalPath, "skipHiveMetadata" -> "true") + locationUri = Some(tempPath.toURI), + properties = Map("skipHiveMetadata" -> "true") ), schema = schema, provider = Some("parquet") @@ -1337,7 +1338,7 @@ class MetastoreDataSourcesSuite extends QueryTest with SQLTestUtils with TestHiv val e = intercept[AnalysisException] { sharedState.externalCatalog.getTable("default", "t") }.getMessage - assert(e.contains(s"Could not read schema from the hive metastore because it is corrupted")) + assert(e.contains("Cannot read table property 'spark.sql.sources.schema' as it's corrupted")) withDebugMode { val tableMeta = sharedState.externalCatalog.getTable("default", "t") @@ -1354,7 +1355,7 @@ class MetastoreDataSourcesSuite extends QueryTest with SQLTestUtils with TestHiv val newSession = sparkSession.newSession() newSession.sql("CREATE TABLE abc(i int) USING json") val tableMeta = newSession.sessionState.catalog.getTableMetadata(TableIdentifier("abc")) - assert(tableMeta.properties(DATASOURCE_SCHEMA_NUMPARTS).toInt == 1) + assert(tableMeta.properties.contains(DATASOURCE_SCHEMA)) assert(tableMeta.properties(DATASOURCE_PROVIDER) == "json") } } diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/PartitionProviderCompatibilitySuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/PartitionProviderCompatibilitySuite.scala index 80afc9d8f44bc..e1b0637963b75 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/PartitionProviderCompatibilitySuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/PartitionProviderCompatibilitySuite.scala @@ -53,7 +53,8 @@ class PartitionProviderCompatibilitySuite s"ALTER TABLE $tableName PARTITION (partCol=1) SET LOCATION '/foo'", s"ALTER TABLE $tableName DROP PARTITION (partCol=1)", s"DESCRIBE $tableName PARTITION (partCol=1)", - s"SHOW PARTITIONS $tableName") + s"SHOW PARTITIONS $tableName", + s"SHOW TABLE EXTENDED LIKE '$tableName' PARTITION (partCol=1)") withSQLConf(SQLConf.HIVE_MANAGE_FILESOURCE_PARTITIONS.key -> "true") { for (cmd <- unsupportedCommands) { @@ -124,10 +125,15 @@ class PartitionProviderCompatibilitySuite } // disabled withSQLConf(SQLConf.HIVE_MANAGE_FILESOURCE_PARTITIONS.key -> "false") { - val e = intercept[AnalysisException] { - spark.sql(s"show partitions test") + Seq( + "SHOW PARTITIONS test", + "SHOW TABLE EXTENDED LIKE 'test' PARTITION (partCol=1)" + ).foreach { showPartitions => + val e = intercept[AnalysisException] { + spark.sql(showPartitions) + } + assert(e.getMessage.contains("filesource partition management is disabled")) } - assert(e.getMessage.contains("filesource partition management is disabled")) spark.sql("refresh table test") assert(spark.sql("select * from test").count() == 5) } diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/PartitionedTablePerfStatsSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/PartitionedTablePerfStatsSuite.scala index 3af163af0968c..49e26614e13c4 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/PartitionedTablePerfStatsSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/PartitionedTablePerfStatsSuite.scala @@ -300,7 +300,7 @@ class PartitionedTablePerfStatsSuite HiveCatalogMetrics.reset() assert(spark.sql("show partitions test").count() == 100) - assert(HiveCatalogMetrics.METRIC_HIVE_CLIENT_CALLS.getCount() < 10) + assert(HiveCatalogMetrics.METRIC_HIVE_CLIENT_CALLS.getCount() <= 10) } } } @@ -323,7 +323,7 @@ class PartitionedTablePerfStatsSuite HiveCatalogMetrics.reset() assert(spark.sql("show partitions test").count() == 100) - assert(HiveCatalogMetrics.METRIC_HIVE_CLIENT_CALLS.getCount() < 10) + assert(HiveCatalogMetrics.METRIC_HIVE_CLIENT_CALLS.getCount() <= 10) } } } diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/QueryPartitionSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/QueryPartitionSuite.scala index 1e396553c9c52..cec6ec1ee1275 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/QueryPartitionSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/QueryPartitionSuite.scala @@ -17,12 +17,8 @@ package org.apache.spark.sql.hive -import java.io.File import java.sql.Timestamp -import com.google.common.io.Files -import org.apache.hadoop.fs.FileSystem - import org.apache.spark.internal.config._ import org.apache.spark.sql._ import org.apache.spark.sql.hive.test.TestHiveSingleton @@ -42,7 +38,7 @@ class QueryPartitionSuite extends QueryTest with SQLTestUtils with TestHiveSingl testData.createOrReplaceTempView("testData") // create the table for test - sql(s"CREATE TABLE table_with_partition(key int,value string) " + + sql(s"CREATE TABLE table_with_partition(key int,value string) USING hive " + s"PARTITIONED by (ds string) location '${tmpDir.toURI}' ") sql("INSERT OVERWRITE TABLE table_with_partition partition (ds='1') " + "SELECT key,value FROM testData") @@ -85,7 +81,8 @@ class QueryPartitionSuite extends QueryTest with SQLTestUtils with TestHiveSingl test("SPARK-21739: Cast expression should initialize timezoneId") { withTable("table_with_timestamp_partition") { - sql("CREATE TABLE table_with_timestamp_partition(value int) PARTITIONED BY (ts TIMESTAMP)") + sql("CREATE TABLE table_with_timestamp_partition(value int) USING hive " + + "PARTITIONED BY (ts TIMESTAMP)") sql("INSERT OVERWRITE TABLE table_with_timestamp_partition " + "PARTITION (ts = '2010-01-01 00:00:00.000') VALUES (1)") diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/StatisticsSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/StatisticsSuite.scala index be6d023302293..5357f4b63d794 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/StatisticsSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/StatisticsSuite.scala @@ -31,7 +31,7 @@ import org.apache.spark.sql._ import org.apache.spark.sql.catalyst.TableIdentifier import org.apache.spark.sql.catalyst.analysis.NoSuchPartitionException import org.apache.spark.sql.catalyst.catalog.{CatalogColumnStat, CatalogStatistics, HiveTableRelation} -import org.apache.spark.sql.catalyst.plans.logical.{ColumnStat, HistogramBin, HistogramSerializer} +import org.apache.spark.sql.catalyst.plans.logical.HistogramBin import org.apache.spark.sql.catalyst.util.{DateTimeUtils, StringUtils} import org.apache.spark.sql.execution.command.{AnalyzeColumnCommand, CommandUtils, DDLUtils} import org.apache.spark.sql.execution.datasources.LogicalRelation @@ -101,14 +101,9 @@ class StatisticsSuite extends StatisticsCollectionTestBase with TestHiveSingleto .asInstanceOf[HiveTableRelation] val properties = relation.tableMeta.ignoredProperties - if (HiveUtils.isHive23) { - // Since HIVE-6727, Hive fixes table-level stats for external tables are incorrect. - assert(properties("totalSize").toLong == 6) - assert(properties.get("rawDataSize").isEmpty) - } else { - assert(properties("totalSize").toLong <= 0, "external table totalSize must be <= 0") - assert(properties("rawDataSize").toLong <= 0, "external table rawDataSize must be <= 0") - } + // Since HIVE-6727, Hive fixes table-level stats for external tables are incorrect. + assert(properties("totalSize").toLong == 6) + assert(properties.get("rawDataSize").isEmpty) val sizeInBytes = relation.stats.sizeInBytes assert(sizeInBytes === BigInt(file1.length() + file2.length())) @@ -170,7 +165,8 @@ class StatisticsSuite extends StatisticsCollectionTestBase with TestHiveSingleto // Partitioned table val partTable = "part_table" withTable(partTable) { - sql(s"CREATE TABLE $partTable (key STRING, value STRING) PARTITIONED BY (ds STRING)") + sql(s"CREATE TABLE $partTable (key STRING, value STRING) USING hive " + + "PARTITIONED BY (ds STRING)") sql(s"INSERT INTO TABLE $partTable PARTITION (ds='2010-01-01') SELECT * FROM src") sql(s"INSERT INTO TABLE $partTable PARTITION (ds='2010-01-02') SELECT * FROM src") sql(s"INSERT INTO TABLE $partTable PARTITION (ds='2010-01-03') SELECT * FROM src") @@ -196,7 +192,8 @@ class StatisticsSuite extends StatisticsCollectionTestBase with TestHiveSingleto SQLConf.PARALLEL_FILE_LISTING_IN_STATS_COMPUTATION.key -> "True") { val checkSizeTable = "checkSizeTable" withTable(checkSizeTable) { - sql(s"CREATE TABLE $checkSizeTable (key STRING, value STRING) PARTITIONED BY (ds STRING)") + sql(s"CREATE TABLE $checkSizeTable (key STRING, value STRING) USING hive " + + "PARTITIONED BY (ds STRING)") sql(s"INSERT INTO TABLE $checkSizeTable PARTITION (ds='2010-01-01') SELECT * FROM src") sql(s"INSERT INTO TABLE $checkSizeTable PARTITION (ds='2010-01-02') SELECT * FROM src") sql(s"INSERT INTO TABLE $checkSizeTable PARTITION (ds='2010-01-03') SELECT * FROM src") @@ -279,7 +276,8 @@ class StatisticsSuite extends StatisticsCollectionTestBase with TestHiveSingleto test("SPARK-22745 - read Hive's statistics for partition") { val tableName = "hive_stats_part_table" withTable(tableName) { - sql(s"CREATE TABLE $tableName (key STRING, value STRING) PARTITIONED BY (ds STRING)") + sql(s"CREATE TABLE $tableName (key STRING, value STRING) USING hive " + + "PARTITIONED BY (ds STRING)") sql(s"INSERT INTO TABLE $tableName PARTITION (ds='2017-01-01') SELECT * FROM src") var partition = spark.sessionState.catalog .getPartition(TableIdentifier(tableName), Map("ds" -> "2017-01-01")) @@ -301,7 +299,8 @@ class StatisticsSuite extends StatisticsCollectionTestBase with TestHiveSingleto val tableName = "analyzeTable_part" withTable(tableName) { withTempPath { path => - sql(s"CREATE TABLE $tableName (key STRING, value STRING) PARTITIONED BY (ds STRING)") + sql(s"CREATE TABLE $tableName (key STRING, value STRING) USING hive " + + "PARTITIONED BY (ds STRING)") val partitionDates = List("2010-01-01", "2010-01-02", "2010-01-03") partitionDates.foreach { ds => @@ -326,6 +325,7 @@ class StatisticsSuite extends StatisticsCollectionTestBase with TestHiveSingleto sql( s""" |CREATE TABLE $sourceTableName (key STRING, value STRING) + |USING hive |PARTITIONED BY (ds STRING) |LOCATION '${path.toURI}' """.stripMargin) @@ -343,6 +343,7 @@ class StatisticsSuite extends StatisticsCollectionTestBase with TestHiveSingleto sql( s""" |CREATE TABLE $tableName (key STRING, value STRING) + |USING hive |PARTITIONED BY (ds STRING) |LOCATION '${path.toURI}' """.stripMargin) @@ -376,7 +377,8 @@ class StatisticsSuite extends StatisticsCollectionTestBase with TestHiveSingleto } withTable(tableName) { - sql(s"CREATE TABLE $tableName (key STRING, value STRING) PARTITIONED BY (ds STRING)") + sql(s"CREATE TABLE $tableName (key STRING, value STRING) USING hive " + + "PARTITIONED BY (ds STRING)") createPartition("2010-01-01", "SELECT '1', 'A' from src") createPartition("2010-01-02", "SELECT '1', 'A' from src UNION ALL SELECT '1', 'A' from src") @@ -429,7 +431,8 @@ class StatisticsSuite extends StatisticsCollectionTestBase with TestHiveSingleto } withTable(tableName) { - sql(s"CREATE TABLE $tableName (key STRING, value STRING) PARTITIONED BY (ds STRING, hr INT)") + sql(s"CREATE TABLE $tableName (key STRING, value STRING) USING hive " + + "PARTITIONED BY (ds STRING, hr INT)") createPartition("2010-01-01", 10, "SELECT '1', 'A' from src") createPartition("2010-01-01", 11, "SELECT '1', 'A' from src") @@ -477,7 +480,8 @@ class StatisticsSuite extends StatisticsCollectionTestBase with TestHiveSingleto } withTable(tableName) { - sql(s"CREATE TABLE $tableName (key STRING, value STRING) PARTITIONED BY (ds STRING, hr INT)") + sql(s"CREATE TABLE $tableName (key STRING, value STRING) USING hive " + + "PARTITIONED BY (ds STRING, hr INT)") createPartition("2010-01-01", 10, "SELECT '1', 'A' from src") createPartition("2010-01-01", 11, "SELECT '1', 'A' from src") @@ -731,7 +735,7 @@ class StatisticsSuite extends StatisticsCollectionTestBase with TestHiveSingleto } } - test("analyze column command paramaters validation") { + test("analyze column command parameters validation") { val e1 = intercept[IllegalArgumentException] { AnalyzeColumnCommand(TableIdentifier("test"), Option(Seq("c1")), true).run(spark) } @@ -872,25 +876,10 @@ class StatisticsSuite extends StatisticsCollectionTestBase with TestHiveSingleto assert(totalSize.isDefined && totalSize.get > 0, "totalSize is lost") val numRows = extractStatsPropValues(describeResult, "numRows") - if (HiveUtils.isHive23) { - // Since HIVE-15653(Hive 2.3.0), Hive fixs some ALTER TABLE commands drop table stats. - assert(numRows.isDefined && numRows.get == 500) - val rawDataSize = extractStatsPropValues(describeResult, "rawDataSize") - assert(rawDataSize.isDefined && rawDataSize.get == 5312) - checkTableStats(tabName, hasSizeInBytes = true, expectedRowCounts = Some(500)) - } else { - // ALTER TABLE SET/UNSET TBLPROPERTIES invalidates some Hive specific statistics, but not - // Spark specific statistics. This is triggered by the Hive alterTable API. - assert(numRows.isDefined && numRows.get == -1, "numRows is lost") - val rawDataSize = extractStatsPropValues(describeResult, "rawDataSize") - assert(rawDataSize.isDefined && rawDataSize.get == -1, "rawDataSize is lost") - - if (analyzedBySpark) { - checkTableStats(tabName, hasSizeInBytes = true, expectedRowCounts = Some(500)) - } else { - checkTableStats(tabName, hasSizeInBytes = true, expectedRowCounts = None) - } - } + assert(numRows.isDefined && numRows.get == 500) + val rawDataSize = extractStatsPropValues(describeResult, "rawDataSize") + assert(rawDataSize.isDefined && rawDataSize.get == 5312) + checkTableStats(tabName, hasSizeInBytes = true, expectedRowCounts = Some(500)) } } } @@ -981,7 +970,8 @@ class StatisticsSuite extends StatisticsCollectionTestBase with TestHiveSingleto Seq(false, true).foreach { autoUpdate => withSQLConf(SQLConf.AUTO_SIZE_UPDATE_ENABLED.key -> autoUpdate.toString) { withTable(table) { - sql(s"CREATE TABLE $table (i INT, j STRING) PARTITIONED BY (ds STRING, hr STRING)") + sql(s"CREATE TABLE $table (i INT, j STRING) USING hive " + + "PARTITIONED BY (ds STRING, hr STRING)") // table has two partitions initially for (ds <- Seq("2008-04-08"); hr <- Seq("11", "12")) { sql(s"INSERT OVERWRITE TABLE $table PARTITION (ds='$ds',hr='$hr') SELECT 1, 'a'") @@ -993,12 +983,16 @@ class StatisticsSuite extends StatisticsCollectionTestBase with TestHiveSingleto assert(fetched1.get.colStats.size == 2) withTempPaths(numPaths = 2) { case Seq(dir1, dir2) => - val file1 = new File(dir1 + "/data") + val partDir1 = new File(new File(dir1, "ds=2008-04-09"), "hr=11") + val file1 = new File(partDir1, "data") + file1.getParentFile.mkdirs() Utils.tryWithResource(new PrintWriter(file1)) { writer => writer.write("1,a") } - val file2 = new File(dir2 + "/data") + val partDir2 = new File(new File(dir2, "ds=2008-04-09"), "hr=12") + val file2 = new File(partDir2, "data") + file2.getParentFile.mkdirs() Utils.tryWithResource(new PrintWriter(file2)) { writer => writer.write("1,a") } @@ -1007,8 +1001,8 @@ class StatisticsSuite extends StatisticsCollectionTestBase with TestHiveSingleto sql( s""" |ALTER TABLE $table ADD - |PARTITION (ds='2008-04-09', hr='11') LOCATION '${dir1.toURI.toString}' - |PARTITION (ds='2008-04-09', hr='12') LOCATION '${dir2.toURI.toString}' + |PARTITION (ds='2008-04-09', hr='11') LOCATION '${partDir1.toURI.toString}' + |PARTITION (ds='2008-04-09', hr='12') LOCATION '${partDir1.toURI.toString}' """.stripMargin) if (autoUpdate) { val fetched2 = checkTableStats(table, hasSizeInBytes = true, expectedRowCounts = None) @@ -1054,6 +1048,7 @@ class StatisticsSuite extends StatisticsCollectionTestBase with TestHiveSingleto sql( s""" |CREATE TABLE $managedTable (key INT, value STRING) + |USING hive |PARTITIONED BY (ds STRING, hr STRING) """.stripMargin) @@ -1128,7 +1123,7 @@ class StatisticsSuite extends StatisticsCollectionTestBase with TestHiveSingleto def checkColStatsProps(expected: Map[String, String]): Unit = { sql(s"ANALYZE TABLE $tableName COMPUTE STATISTICS FOR COLUMNS " + stats.keys.mkString(", ")) val table = hiveClient.getTable("default", tableName) - val props = table.properties.filterKeys(_.startsWith("spark.sql.statistics.colStats")) + val props = table.properties.filterKeys(_.startsWith("spark.sql.statistics.colStats")).toMap assert(props == expected) } @@ -1533,26 +1528,27 @@ class StatisticsSuite extends StatisticsCollectionTestBase with TestHiveSingleto Seq(tbl, ext_tbl).foreach { tblName => sql(s"INSERT INTO $tblName VALUES (1, 'a', '2019-12-13')") + val expectedSize = 601 // analyze table sql(s"ANALYZE TABLE $tblName COMPUTE STATISTICS NOSCAN") var tableStats = getTableStats(tblName) - assert(tableStats.sizeInBytes == 601) + assert(tableStats.sizeInBytes == expectedSize) assert(tableStats.rowCount.isEmpty) sql(s"ANALYZE TABLE $tblName COMPUTE STATISTICS") tableStats = getTableStats(tblName) - assert(tableStats.sizeInBytes == 601) + assert(tableStats.sizeInBytes == expectedSize) assert(tableStats.rowCount.get == 1) // analyze a single partition sql(s"ANALYZE TABLE $tblName PARTITION (ds='2019-12-13') COMPUTE STATISTICS NOSCAN") var partStats = getPartitionStats(tblName, Map("ds" -> "2019-12-13")) - assert(partStats.sizeInBytes == 601) + assert(partStats.sizeInBytes == expectedSize) assert(partStats.rowCount.isEmpty) sql(s"ANALYZE TABLE $tblName PARTITION (ds='2019-12-13') COMPUTE STATISTICS") partStats = getPartitionStats(tblName, Map("ds" -> "2019-12-13")) - assert(partStats.sizeInBytes == 601) + assert(partStats.sizeInBytes == expectedSize) assert(partStats.rowCount.get == 1) } } diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/FiltersSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/FiltersSuite.scala index 2a4efd0cce6e0..12ed0e5305299 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/FiltersSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/FiltersSuite.scala @@ -17,6 +17,7 @@ package org.apache.spark.sql.hive.client +import java.sql.Date import java.util.Collections import org.apache.hadoop.hive.metastore.api.FieldSchema @@ -29,6 +30,7 @@ import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.catalyst.plans.PlanTest import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.types._ +import org.apache.spark.unsafe.types.UTF8String /** * A set of tests for the filter conversion logic used when pushing partition pruning into the @@ -63,6 +65,28 @@ class FiltersSuite extends SparkFunSuite with Logging with PlanTest { (Literal(1) === a("intcol", IntegerType)) :: (Literal("a") === a("strcol", IntegerType)) :: Nil, "1 = intcol and \"a\" = strcol") + filterTest("date filter", + (a("datecol", DateType) === Literal(Date.valueOf("2019-01-01"))) :: Nil, + "datecol = 2019-01-01") + + filterTest("date filter with IN predicate", + (a("datecol", DateType) in + (Literal(Date.valueOf("2019-01-01")), Literal(Date.valueOf("2019-01-07")))) :: Nil, + "(datecol = 2019-01-01 or datecol = 2019-01-07)") + + filterTest("date and string filter", + (Literal(Date.valueOf("2019-01-01")) === a("datecol", DateType)) :: + (Literal("a") === a("strcol", IntegerType)) :: Nil, + "2019-01-01 = datecol and \"a\" = strcol") + + filterTest("date filter with null", + (a("datecol", DateType) === Literal(null)) :: Nil, + "") + + filterTest("string filter with InSet predicate", + InSet(a("strcol", StringType), Set("1", "2").map(s => UTF8String.fromString(s))) :: Nil, + "(strcol = \"1\" or strcol = \"2\")") + filterTest("skip varchar", (Literal("") === a("varchar", StringType)) :: Nil, "") @@ -76,6 +100,14 @@ class FiltersSuite extends SparkFunSuite with Logging with PlanTest { (a("intcol", IntegerType) in (Literal(1), Literal(null))) :: Nil, "(intcol = 1)") + filterTest("NOT: int and string filters", + (a("intcol", IntegerType) =!= Literal(1)) :: (Literal("a") =!= a("strcol", IntegerType)) :: Nil, + """intcol != 1 and "a" != strcol""") + + filterTest("NOT: date filter", + (a("datecol", DateType) =!= Literal(Date.valueOf("2019-01-01"))) :: Nil, + "datecol != 2019-01-01") + // Applying the predicate `x IN (NULL)` should return an empty set, but since this optimization // will be applied by Catalyst, this filter converter does not need to account for this. filterTest("SPARK-24879 IN predicates with only NULLs will not cause a NPE", @@ -89,7 +121,7 @@ class FiltersSuite extends SparkFunSuite with Logging with PlanTest { private def filterTest(name: String, filters: Seq[Expression], result: String) = { test(name) { withSQLConf(SQLConf.ADVANCED_PARTITION_PREDICATE_PUSHDOWN.key -> "true") { - val converted = shim.convertFilters(testTable, filters) + val converted = shim.convertFilters(testTable, filters, conf.sessionLocalTimeZone) if (converted != result) { fail(s"Expected ${filters.mkString(",")} to convert to '$result' but got '$converted'") } @@ -104,7 +136,7 @@ class FiltersSuite extends SparkFunSuite with Logging with PlanTest { val filters = (Literal(1) === a("intcol", IntegerType) || Literal(2) === a("intcol", IntegerType)) :: Nil - val converted = shim.convertFilters(testTable, filters) + val converted = shim.convertFilters(testTable, filters, conf.sessionLocalTimeZone) if (enabled) { assert(converted == "(1 = intcol or 2 = intcol)") } else { @@ -114,5 +146,38 @@ class FiltersSuite extends SparkFunSuite with Logging with PlanTest { } } + test("SPARK-33416: Avoid Hive metastore stack overflow when InSet predicate have many values") { + def checkConverted(inSet: InSet, result: String): Unit = { + assert(shim.convertFilters(testTable, inSet :: Nil, conf.sessionLocalTimeZone) == result) + } + + withSQLConf(SQLConf.HIVE_METASTORE_PARTITION_PRUNING_INSET_THRESHOLD.key -> "15") { + checkConverted( + InSet(a("intcol", IntegerType), + Range(1, 20).map(s => Literal(s).eval(EmptyRow)).toSet), + "(intcol >= 1 and intcol <= 19)") + + checkConverted( + InSet(a("stringcol", StringType), + Range(1, 20).map(s => Literal(s.toString).eval(EmptyRow)).toSet), + "(stringcol >= \"1\" and stringcol <= \"9\")") + + checkConverted( + InSet(a("intcol", IntegerType).cast(LongType), + Range(1, 20).map(s => Literal(s.toLong).eval(EmptyRow)).toSet), + "(intcol >= 1 and intcol <= 19)") + + checkConverted( + InSet(a("doublecol", DoubleType), + Range(1, 20).map(s => Literal(s.toDouble).eval(EmptyRow)).toSet), + "") + + checkConverted( + InSet(a("datecol", DateType), + Range(1, 20).map(d => Literal(d, DateType).eval(EmptyRow)).toSet), + "(datecol >= 1970-01-02 and datecol <= 1970-01-20)") + } + } + private def a(name: String, dataType: DataType) = AttributeReference(name, dataType)() } diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/HadoopVersionInfoSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/HadoopVersionInfoSuite.scala index 65492abf38cc0..8d55356da28e6 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/HadoopVersionInfoSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/HadoopVersionInfoSuite.scala @@ -49,8 +49,7 @@ class HadoopVersionInfoSuite extends SparkFunSuite { sparkConf = new SparkConf(), hadoopConf = hadoopConf, config = HiveClientBuilder.buildConf(Map.empty), - ivyPath = Some(ivyPath.getCanonicalPath), - sharesHadoopClasses = true) + ivyPath = Some(ivyPath.getCanonicalPath)) val jars = client.classLoader.getParent.asInstanceOf[URLClassLoader].getURLs .map(u => new File(u.toURI)) // Drop all Hadoop jars to use the existing Hadoop jars on the classpath diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/HiveClientBuilder.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/HiveClientBuilder.scala index 2ad3afcb214b3..f40b4f00d9fd0 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/HiveClientBuilder.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/HiveClientBuilder.scala @@ -46,15 +46,13 @@ private[client] object HiveClientBuilder { def buildClient( version: String, hadoopConf: Configuration, - extraConf: Map[String, String] = Map.empty, - sharesHadoopClasses: Boolean = true): HiveClient = { + extraConf: Map[String, String] = Map.empty): HiveClient = { IsolatedClientLoader.forVersion( hiveMetastoreVersion = version, hadoopVersion = VersionInfo.getVersion, sparkConf = new SparkConf(), hadoopConf = hadoopConf, config = buildConf(extraConf), - ivyPath = ivyPath, - sharesHadoopClasses = sharesHadoopClasses).createClient() + ivyPath = ivyPath).createClient() } } diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/HiveClientUserNameSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/HiveClientUserNameSuite.scala index 77956f4fe69da..b94d517e89e30 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/HiveClientUserNameSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/HiveClientUserNameSuite.scala @@ -21,7 +21,6 @@ import java.security.PrivilegedExceptionAction import org.apache.hadoop.conf.Configuration import org.apache.hadoop.security.UserGroupInformation -import org.scalatest.{BeforeAndAfterAll, PrivateMethodTester} import org.apache.spark.util.Utils diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/HivePartitionFilteringSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/HivePartitionFilteringSuite.scala index 2d615f6fdc261..dc56e6bc4da81 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/HivePartitionFilteringSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/HivePartitionFilteringSuite.scala @@ -17,6 +17,8 @@ package org.apache.spark.sql.hive.client +import java.sql.Date + import org.apache.hadoop.conf.Configuration import org.apache.hadoop.hive.conf.HiveConf import org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -28,7 +30,8 @@ import org.apache.spark.sql.catalyst.TableIdentifier import org.apache.spark.sql.catalyst.catalog._ import org.apache.spark.sql.catalyst.dsl.expressions._ import org.apache.spark.sql.catalyst.expressions._ -import org.apache.spark.sql.types.{BooleanType, IntegerType, LongType, StructType} +import org.apache.spark.sql.internal.SQLConf +import org.apache.spark.sql.types.{BooleanType, DateType, IntegerType, LongType, StringType, StructType} import org.apache.spark.util.Utils class HivePartitionFilteringSuite(version: String) @@ -36,47 +39,52 @@ class HivePartitionFilteringSuite(version: String) private val tryDirectSqlKey = HiveConf.ConfVars.METASTORE_TRY_DIRECT_SQL.varname - private val testPartitionCount = 3 * 5 * 4 + private val dsValue = 20170101 to 20170103 + private val hValue = 0 to 4 + private val chunkValue = Seq("aa", "ab", "ba", "bb") + private val dateValue = Seq("2019-01-01", "2019-01-02", "2019-01-03") + private val dateStrValue = Seq("2020-01-01", "2020-01-02", "2020-01-03") + private val testPartitionCount = + dsValue.size * hValue.size * chunkValue.size * dateValue.size * dateStrValue.size + + private val storageFormat = CatalogStorageFormat( + locationUri = None, + inputFormat = Some(classOf[TextInputFormat].getName), + outputFormat = Some(classOf[HiveIgnoreKeyTextOutputFormat[_, _]].getName), + serde = Some(classOf[LazySimpleSerDe].getName()), + compressed = false, + properties = Map.empty + ) private def init(tryDirectSql: Boolean): HiveClient = { - val storageFormat = CatalogStorageFormat( - locationUri = None, - inputFormat = None, - outputFormat = None, - serde = None, - compressed = false, - properties = Map.empty) - val hadoopConf = new Configuration() hadoopConf.setBoolean(tryDirectSqlKey, tryDirectSql) hadoopConf.set("hive.metastore.warehouse.dir", Utils.createTempDir().toURI().toString()) val client = buildClient(hadoopConf) val tableSchema = new StructType().add("value", "int").add("ds", "int").add("h", "int").add("chunk", "string") + .add("d", "date").add("datestr", "string") val table = CatalogTable( identifier = TableIdentifier("test", Some("default")), tableType = CatalogTableType.MANAGED, schema = tableSchema, - partitionColumnNames = Seq("ds", "h", "chunk"), - storage = CatalogStorageFormat( - locationUri = None, - inputFormat = Some(classOf[TextInputFormat].getName), - outputFormat = Some(classOf[HiveIgnoreKeyTextOutputFormat[_, _]].getName), - serde = Some(classOf[LazySimpleSerDe].getName()), - compressed = false, - properties = Map.empty - )) + partitionColumnNames = Seq("ds", "h", "chunk", "d", "datestr"), + storage = storageFormat) client.createTable(table, ignoreIfExists = false) val partitions = for { - ds <- 20170101 to 20170103 - h <- 0 to 4 - chunk <- Seq("aa", "ab", "ba", "bb") + ds <- dsValue + h <- hValue + chunk <- chunkValue + date <- dateValue + dateStr <- dateStrValue } yield CatalogTablePartition(Map( "ds" -> ds.toString, "h" -> h.toString, - "chunk" -> chunk + "chunk" -> chunk, + "d" -> date, + "datestr" -> dateStr ), storageFormat) assert(partitions.size == testPartitionCount) @@ -102,7 +110,7 @@ class HivePartitionFilteringSuite(version: String) test(s"getPartitionsByFilter returns all partitions when $tryDirectSqlKey=false") { val client = init(false) val filteredPartitions = client.getPartitionsByFilter(client.getTable("default", "test"), - Seq(attr("ds") === 20170101)) + Seq(attr("ds") === 20170101), SQLConf.get.sessionLocalTimeZone) assert(filteredPartitions.size == testPartitionCount) } @@ -111,17 +119,21 @@ class HivePartitionFilteringSuite(version: String) // Should return all partitions where <=> is not supported testMetastorePartitionFiltering( attr("ds") <=> 20170101, - 20170101 to 20170103, - 0 to 4, - "aa" :: "ab" :: "ba" :: "bb" :: Nil) + dsValue, + hValue, + chunkValue, + dateValue, + dateStrValue) } test("getPartitionsByFilter: ds=20170101") { testMetastorePartitionFiltering( attr("ds") === 20170101, 20170101 to 20170101, - 0 to 4, - "aa" :: "ab" :: "ba" :: "bb" :: Nil) + hValue, + chunkValue, + dateValue, + dateStrValue) } test("getPartitionsByFilter: ds=(20170101 + 1) and h=0") { @@ -129,41 +141,51 @@ class HivePartitionFilteringSuite(version: String) // comparisons to non-literal values testMetastorePartitionFiltering( attr("ds") === (Literal(20170101) + 1) && attr("h") === 0, - 20170101 to 20170103, + dsValue, 0 to 0, - "aa" :: "ab" :: "ba" :: "bb" :: Nil) + chunkValue, + dateValue, + dateStrValue) } test("getPartitionsByFilter: chunk='aa'") { testMetastorePartitionFiltering( attr("chunk") === "aa", - 20170101 to 20170103, - 0 to 4, - "aa" :: Nil) + dsValue, + hValue, + "aa" :: Nil, + dateValue, + dateStrValue) } test("getPartitionsByFilter: cast(chunk as int)=1 (not a valid partition predicate)") { testMetastorePartitionFiltering( attr("chunk").cast(IntegerType) === 1, - 20170101 to 20170103, - 0 to 4, - "aa" :: "ab" :: "ba" :: "bb" :: Nil) + dsValue, + hValue, + chunkValue, + dateValue, + dateStrValue) } test("getPartitionsByFilter: cast(chunk as boolean)=true (not a valid partition predicate)") { testMetastorePartitionFiltering( attr("chunk").cast(BooleanType) === true, - 20170101 to 20170103, - 0 to 4, - "aa" :: "ab" :: "ba" :: "bb" :: Nil) + dsValue, + hValue, + chunkValue, + dateValue, + dateStrValue) } test("getPartitionsByFilter: 20170101=ds") { testMetastorePartitionFiltering( Literal(20170101) === attr("ds"), 20170101 to 20170101, - 0 to 4, - "aa" :: "ab" :: "ba" :: "bb" :: Nil) + hValue, + chunkValue, + dateValue, + dateStrValue) } test("getPartitionsByFilter: ds=20170101 and h=2") { @@ -171,7 +193,9 @@ class HivePartitionFilteringSuite(version: String) attr("ds") === 20170101 && attr("h") === 2, 20170101 to 20170101, 2 to 2, - "aa" :: "ab" :: "ba" :: "bb" :: Nil) + chunkValue, + dateValue, + dateStrValue) } test("getPartitionsByFilter: cast(ds as long)=20170101L and h=2") { @@ -179,39 +203,49 @@ class HivePartitionFilteringSuite(version: String) attr("ds").cast(LongType) === 20170101L && attr("h") === 2, 20170101 to 20170101, 2 to 2, - "aa" :: "ab" :: "ba" :: "bb" :: Nil) + chunkValue, + dateValue, + dateStrValue) } test("getPartitionsByFilter: ds=20170101 or ds=20170102") { testMetastorePartitionFiltering( attr("ds") === 20170101 || attr("ds") === 20170102, 20170101 to 20170102, - 0 to 4, - "aa" :: "ab" :: "ba" :: "bb" :: Nil) + hValue, + chunkValue, + dateValue, + dateStrValue) } test("getPartitionsByFilter: ds in (20170102, 20170103) (using IN expression)") { testMetastorePartitionFiltering( attr("ds").in(20170102, 20170103), 20170102 to 20170103, - 0 to 4, - "aa" :: "ab" :: "ba" :: "bb" :: Nil) + hValue, + chunkValue, + dateValue, + dateStrValue) } test("getPartitionsByFilter: cast(ds as long) in (20170102L, 20170103L) (using IN expression)") { testMetastorePartitionFiltering( attr("ds").cast(LongType).in(20170102L, 20170103L), 20170102 to 20170103, - 0 to 4, - "aa" :: "ab" :: "ba" :: "bb" :: Nil) + hValue, + chunkValue, + dateValue, + dateStrValue) } test("getPartitionsByFilter: ds in (20170102, 20170103) (using INSET expression)") { testMetastorePartitionFiltering( attr("ds").in(20170102, 20170103), 20170102 to 20170103, - 0 to 4, - "aa" :: "ab" :: "ba" :: "bb" :: Nil, { + hValue, + chunkValue, + dateValue, + dateStrValue, { case expr @ In(v, list) if expr.inSetConvertible => InSet(v, list.map(_.eval(EmptyRow)).toSet) }) @@ -222,8 +256,10 @@ class HivePartitionFilteringSuite(version: String) testMetastorePartitionFiltering( attr("ds").cast(LongType).in(20170102L, 20170103L), 20170102 to 20170103, - 0 to 4, - "aa" :: "ab" :: "ba" :: "bb" :: Nil, { + hValue, + chunkValue, + dateValue, + dateStrValue, { case expr @ In(v, list) if expr.inSetConvertible => InSet(v, list.map(_.eval(EmptyRow)).toSet) }) @@ -232,58 +268,172 @@ class HivePartitionFilteringSuite(version: String) test("getPartitionsByFilter: chunk in ('ab', 'ba') (using IN expression)") { testMetastorePartitionFiltering( attr("chunk").in("ab", "ba"), - 20170101 to 20170103, - 0 to 4, - "ab" :: "ba" :: Nil) + dsValue, + hValue, + "ab" :: "ba" :: Nil, + dateValue, + dateStrValue) } test("getPartitionsByFilter: chunk in ('ab', 'ba') (using INSET expression)") { testMetastorePartitionFiltering( attr("chunk").in("ab", "ba"), - 20170101 to 20170103, - 0 to 4, - "ab" :: "ba" :: Nil, { + dsValue, + hValue, + "ab" :: "ba" :: Nil, + dateValue, + dateStrValue, { case expr @ In(v, list) if expr.inSetConvertible => InSet(v, list.map(_.eval(EmptyRow)).toSet) }) } test("getPartitionsByFilter: (ds=20170101 and h>=2) or (ds=20170102 and h<2)") { - val day1 = (20170101 to 20170101, 2 to 4, Seq("aa", "ab", "ba", "bb")) - val day2 = (20170102 to 20170102, 0 to 1, Seq("aa", "ab", "ba", "bb")) + val day1 = (20170101 to 20170101, 2 to 4, chunkValue, dateValue, dateStrValue) + val day2 = (20170102 to 20170102, 0 to 1, chunkValue, dateValue, dateStrValue) testMetastorePartitionFiltering((attr("ds") === 20170101 && attr("h") >= 2) || (attr("ds") === 20170102 && attr("h") < 2), day1 :: day2 :: Nil) } test("getPartitionsByFilter: (ds=20170101 and h>=2) or (ds=20170102 and h<(1+1))") { - val day1 = (20170101 to 20170101, 2 to 4, Seq("aa", "ab", "ba", "bb")) + val day1 = (20170101 to 20170101, 2 to 4, chunkValue, dateValue, dateStrValue) // Day 2 should include all hours because we can't build a filter for h<(7+1) - val day2 = (20170102 to 20170102, 0 to 4, Seq("aa", "ab", "ba", "bb")) + val day2 = (20170102 to 20170102, 0 to 4, chunkValue, dateValue, dateStrValue) testMetastorePartitionFiltering((attr("ds") === 20170101 && attr("h") >= 2) || (attr("ds") === 20170102 && attr("h") < (Literal(1) + 1)), day1 :: day2 :: Nil) } test("getPartitionsByFilter: " + "chunk in ('ab', 'ba') and ((ds=20170101 and h>=2) or (ds=20170102 and h<2))") { - val day1 = (20170101 to 20170101, 2 to 4, Seq("ab", "ba")) - val day2 = (20170102 to 20170102, 0 to 1, Seq("ab", "ba")) + val day1 = (20170101 to 20170101, 2 to 4, Seq("ab", "ba"), dateValue, dateStrValue) + val day2 = (20170102 to 20170102, 0 to 1, Seq("ab", "ba"), dateValue, dateStrValue) testMetastorePartitionFiltering(attr("chunk").in("ab", "ba") && ((attr("ds") === 20170101 && attr("h") >= 2) || (attr("ds") === 20170102 && attr("h") < 2)), day1 :: day2 :: Nil) } - test("create client with sharesHadoopClasses = false") { - buildClient(new Configuration(), sharesHadoopClasses = false) + test("getPartitionsByFilter: chunk contains bb") { + testMetastorePartitionFiltering( + attr("chunk").contains("bb"), + dsValue, + hValue, + Seq("bb"), + dateValue, + dateStrValue) + } + + test("getPartitionsByFilter: chunk startsWith b") { + testMetastorePartitionFiltering( + attr("chunk").startsWith("b"), + dsValue, + hValue, + Seq("ba", "bb"), + dateValue, + dateStrValue) + } + + test("getPartitionsByFilter: chunk endsWith b") { + testMetastorePartitionFiltering( + attr("chunk").endsWith("b"), + dsValue, + hValue, + Seq("ab", "bb"), + dateValue, + dateStrValue) + } + + test("getPartitionsByFilter: chunk in ('ab', 'ba') and ((cast(ds as string)>'20170102')") { + testMetastorePartitionFiltering( + attr("chunk").in("ab", "ba") && (attr("ds").cast(StringType) > "20170102"), + dsValue, + hValue, + Seq("ab", "ba"), + dateValue, + dateStrValue) + } + + test("getPartitionsByFilter: ds<>20170101") { + testMetastorePartitionFiltering( + attr("ds") =!= 20170101, + 20170102 to 20170103, + hValue, + chunkValue, + dateValue, + dateStrValue) + } + + test("getPartitionsByFilter: h<>0 and chunk<>ab and d<>2019-01-01") { + testMetastorePartitionFiltering( + attr("h") =!= 0 && attr("chunk") =!= "ab" && attr("d") =!= Date.valueOf("2019-01-01"), + dsValue, + 1 to 4, + Seq("aa", "ba", "bb"), + Seq("2019-01-02", "2019-01-03"), + dateStrValue) + } + + test("getPartitionsByFilter: d=2019-01-01") { + testMetastorePartitionFiltering( + attr("d") === Date.valueOf("2019-01-01"), + dsValue, + hValue, + chunkValue, + Seq("2019-01-01"), + dateStrValue) + } + + test("getPartitionsByFilter: d>2019-01-02") { + testMetastorePartitionFiltering( + attr("d") > Date.valueOf("2019-01-02"), + dsValue, + hValue, + chunkValue, + Seq("2019-01-03"), + dateStrValue) + } + + test("getPartitionsByFilter: In(d, 2019-01-01, 2019-01-02)") { + testMetastorePartitionFiltering( + In(attr("d"), + Seq("2019-01-01", "2019-01-02").map(d => Literal(Date.valueOf(d)))), + dsValue, + hValue, + chunkValue, + Seq("2019-01-01", "2019-01-02"), + dateStrValue) + } + + test("getPartitionsByFilter: InSet(d, 2019-01-01, 2019-01-02)") { + testMetastorePartitionFiltering( + InSet(attr("d"), + Set("2019-01-01", "2019-01-02").map(d => Literal(Date.valueOf(d)).eval(EmptyRow))), + dsValue, + hValue, + chunkValue, + Seq("2019-01-01", "2019-01-02"), + dateStrValue) + } + + test("getPartitionsByFilter: cast(datestr as date)= 2020-01-01") { + testMetastorePartitionFiltering( + attr("datestr").cast(DateType) === Date.valueOf("2020-01-01"), + dsValue, + hValue, + chunkValue, + dateValue, + dateStrValue) } private def testMetastorePartitionFiltering( filterExpr: Expression, expectedDs: Seq[Int], expectedH: Seq[Int], - expectedChunks: Seq[String]): Unit = { + expectedChunks: Seq[String], + expectedD: Seq[String], + expectedDatestr: Seq[String]): Unit = { testMetastorePartitionFiltering( filterExpr, - (expectedDs, expectedH, expectedChunks) :: Nil, + (expectedDs, expectedH, expectedChunks, expectedD, expectedDatestr) :: Nil, identity) } @@ -292,43 +442,51 @@ class HivePartitionFilteringSuite(version: String) expectedDs: Seq[Int], expectedH: Seq[Int], expectedChunks: Seq[String], + expectedD: Seq[String], + expectedDatestr: Seq[String], transform: Expression => Expression): Unit = { testMetastorePartitionFiltering( filterExpr, - (expectedDs, expectedH, expectedChunks) :: Nil, + (expectedDs, expectedH, expectedChunks, expectedD, expectedDatestr) :: Nil, transform) } private def testMetastorePartitionFiltering( filterExpr: Expression, - expectedPartitionCubes: Seq[(Seq[Int], Seq[Int], Seq[String])]): Unit = { + expectedPartitionCubes: + Seq[(Seq[Int], Seq[Int], Seq[String], Seq[String], Seq[String])]): Unit = { testMetastorePartitionFiltering(filterExpr, expectedPartitionCubes, identity) } private def testMetastorePartitionFiltering( filterExpr: Expression, - expectedPartitionCubes: Seq[(Seq[Int], Seq[Int], Seq[String])], + expectedPartitionCubes: Seq[(Seq[Int], Seq[Int], Seq[String], Seq[String], Seq[String])], transform: Expression => Expression): Unit = { val filteredPartitions = client.getPartitionsByFilter(client.getTable("default", "test"), Seq( transform(filterExpr) - )) + ), SQLConf.get.sessionLocalTimeZone) val expectedPartitionCount = expectedPartitionCubes.map { - case (expectedDs, expectedH, expectedChunks) => - expectedDs.size * expectedH.size * expectedChunks.size + case (expectedDs, expectedH, expectedChunks, expectedD, expectedDatestr) => + expectedDs.size * expectedH.size * expectedChunks.size * + expectedD.size * expectedDatestr.size }.sum val expectedPartitions = expectedPartitionCubes.map { - case (expectedDs, expectedH, expectedChunks) => + case (expectedDs, expectedH, expectedChunks, expectedD, expectedDatestr) => for { ds <- expectedDs h <- expectedH chunk <- expectedChunks + d <- expectedD + datestr <- expectedDatestr } yield Set( "ds" -> ds.toString, "h" -> h.toString, - "chunk" -> chunk + "chunk" -> chunk, + "d" -> d, + "datestr" -> datestr ) }.reduce(_ ++ _) diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/HiveVersionSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/HiveVersionSuite.scala index dd58c302e0197..02e9b7fb151fd 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/HiveVersionSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/HiveVersionSuite.scala @@ -28,9 +28,7 @@ private[client] abstract class HiveVersionSuite(version: String) extends SparkFu override protected val enableAutoThreadAudit = false protected var client: HiveClient = null - protected def buildClient( - hadoopConf: Configuration, - sharesHadoopClasses: Boolean = true): HiveClient = { + protected def buildClient(hadoopConf: Configuration): HiveClient = { // Hive changed the default of datanucleus.schema.autoCreateAll from true to false and // hive.metastore.schema.verification from false to true since 2.0 // For details, see the JIRA HIVE-6113 and HIVE-12463 @@ -46,8 +44,7 @@ private[client] abstract class HiveVersionSuite(version: String) extends SparkFu HiveClientBuilder.buildClient( version, hadoopConf, - HiveUtils.formatTimeVarsForHiveClient(hadoopConf), - sharesHadoopClasses = sharesHadoopClasses) + HiveUtils.formatTimeVarsForHiveClient(hadoopConf)) } override def suiteName: String = s"${super.suiteName}($version)" diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/VersionsSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/VersionsSuite.scala index c5c92ddad9014..b5500eaf47158 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/VersionsSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/VersionsSuite.scala @@ -33,7 +33,7 @@ import org.apache.spark.SparkFunSuite import org.apache.spark.internal.Logging import org.apache.spark.sql.{AnalysisException, Row} import org.apache.spark.sql.catalyst.{FunctionIdentifier, TableIdentifier} -import org.apache.spark.sql.catalyst.analysis.{NoSuchDatabaseException, NoSuchPermanentFunctionException} +import org.apache.spark.sql.catalyst.analysis.{NoSuchDatabaseException, NoSuchPermanentFunctionException, PartitionsAlreadyExistException} import org.apache.spark.sql.catalyst.catalog._ import org.apache.spark.sql.catalyst.expressions.{AttributeReference, EqualTo, Literal} import org.apache.spark.sql.catalyst.util.quietly @@ -488,7 +488,8 @@ class VersionsSuite extends SparkFunSuite with Logging { test(s"$version: getPartitionsByFilter") { // Only one partition [1, 1] for key2 == 1 val result = client.getPartitionsByFilter(client.getTable("default", "src_part"), - Seq(EqualTo(AttributeReference("key2", IntegerType)(), Literal(1)))) + Seq(EqualTo(AttributeReference("key2", IntegerType)(), Literal(1))), + versionSpark.conf.sessionLocalTimeZone) // Hive 0.12 doesn't support getPartitionsByFilter, it ignores the filter condition. if (version != "0.12") { @@ -593,6 +594,27 @@ class VersionsSuite extends SparkFunSuite with Logging { assert(client.getPartitionOption("default", "src_part", spec).isEmpty) } + test(s"$version: createPartitions if already exists") { + val partitions = Seq(CatalogTablePartition( + Map("key1" -> "101", "key2" -> "102"), + storageFormat)) + try { + client.createPartitions("default", "src_part", partitions, ignoreIfExists = false) + val errMsg = intercept[PartitionsAlreadyExistException] { + client.createPartitions("default", "src_part", partitions, ignoreIfExists = false) + }.getMessage + assert(errMsg.contains("partitions already exists")) + } finally { + client.dropPartitions( + "default", + "src_part", + partitions.map(_.spec), + ignoreIfNotExists = true, + purge = false, + retainData = false) + } + } + /////////////////////////////////////////////////////////////////////////// // Function related API /////////////////////////////////////////////////////////////////////////// @@ -797,6 +819,7 @@ class VersionsSuite extends SparkFunSuite with Logging { versionSpark.sql( """ |CREATE TABLE tbl(c1 string) + |USING hive |PARTITIONED BY (ds STRING) """.stripMargin) versionSpark.sql("INSERT OVERWRITE TABLE tbl partition (ds='2') SELECT '1'") diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/AggregationQuerySuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/AggregationQuerySuite.scala index 87771eed17b1b..70dcfb05c2ba9 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/AggregationQuerySuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/AggregationQuerySuite.scala @@ -825,7 +825,7 @@ abstract class AggregationQuerySuite extends QueryTest with SQLTestUtils with Te """ |SELECT corr(b, c) FROM covar_tab WHERE a = 3 """.stripMargin), - Row(Double.NaN) :: Nil) + Row(null) :: Nil) checkAnswer( spark.sql( @@ -834,10 +834,10 @@ abstract class AggregationQuerySuite extends QueryTest with SQLTestUtils with Te """.stripMargin), Row(1, null) :: Row(2, null) :: - Row(3, Double.NaN) :: - Row(4, Double.NaN) :: - Row(5, Double.NaN) :: - Row(6, Double.NaN) :: Nil) + Row(3, null) :: + Row(4, null) :: + Row(5, null) :: + Row(6, null) :: Nil) val corr7 = spark.sql("SELECT corr(b, c) FROM covar_tab").collect()(0).getDouble(0) assert(math.abs(corr7 - 0.6633880657639323) < 1e-12) @@ -869,7 +869,7 @@ abstract class AggregationQuerySuite extends QueryTest with SQLTestUtils with Te // one row test val df3 = Seq.tabulate(1)(x => (1 * x, x * x * x - 2)).toDF("a", "b") - checkAnswer(df3.groupBy().agg(covar_samp("a", "b")), Row(Double.NaN)) + checkAnswer(df3.groupBy().agg(covar_samp("a", "b")), Row(null)) checkAnswer(df3.groupBy().agg(covar_pop("a", "b")), Row(0.0)) } diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveCommandSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveCommandSuite.scala index dcec8bf5c0cc6..d3398842afb21 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveCommandSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveCommandSuite.scala @@ -22,9 +22,8 @@ import java.io.File import com.google.common.io.Files import org.apache.hadoop.fs.{FileContext, FsConstants, Path} -import org.apache.spark.sql.{AnalysisException, QueryTest, Row, SaveMode} +import org.apache.spark.sql.{AnalysisException, QueryTest, Row} import org.apache.spark.sql.catalyst.TableIdentifier -import org.apache.spark.sql.catalyst.analysis.NoSuchTableException import org.apache.spark.sql.catalyst.catalog.{CatalogStorageFormat, CatalogTable, CatalogTableType} import org.apache.spark.sql.execution.command.LoadDataCommand import org.apache.spark.sql.hive.test.TestHiveSingleton @@ -33,7 +32,6 @@ import org.apache.spark.sql.test.SQLTestUtils import org.apache.spark.sql.types.StructType class HiveCommandSuite extends QueryTest with SQLTestUtils with TestHiveSingleton { - import testImplicits._ protected override def beforeAll(): Unit = { super.beforeAll() @@ -58,27 +56,11 @@ class HiveCommandSuite extends QueryTest with SQLTestUtils with TestHiveSingleto |STORED AS PARQUET |TBLPROPERTIES('prop1Key'="prop1Val", '`prop2Key`'="prop2Val") """.stripMargin) - sql("CREATE TABLE parquet_tab3(col1 int, `col 2` int) USING hive") sql("CREATE TABLE parquet_tab4 (price int, qty int) partitioned by (year int, month int)") sql("INSERT INTO parquet_tab4 PARTITION(year = 2015, month = 1) SELECT 1, 1") sql("INSERT INTO parquet_tab4 PARTITION(year = 2015, month = 2) SELECT 2, 2") sql("INSERT INTO parquet_tab4 PARTITION(year = 2016, month = 2) SELECT 3, 3") sql("INSERT INTO parquet_tab4 PARTITION(year = 2016, month = 3) SELECT 3, 3") - sql( - """ - |CREATE TABLE parquet_tab5 (price int, qty int) - |PARTITIONED BY (year int, month int, hour int, minute int, sec int, extra int) - """.stripMargin) - sql( - """ - |INSERT INTO parquet_tab5 - |PARTITION(year = 2016, month = 3, hour = 10, minute = 10, sec = 10, extra = 1) SELECT 3, 3 - """.stripMargin) - sql( - """ - |INSERT INTO parquet_tab5 - |PARTITION(year = 2016, month = 4, hour = 10, minute = 10, sec = 10, extra = 1) SELECT 3, 3 - """.stripMargin) sql("CREATE VIEW parquet_view1 as select * from parquet_tab4") } @@ -86,37 +68,13 @@ class HiveCommandSuite extends QueryTest with SQLTestUtils with TestHiveSingleto try { sql("DROP TABLE IF EXISTS parquet_tab1") sql("DROP TABLE IF EXISTS parquet_tab2") - sql("DROP TABLE IF EXISTS parquet_tab3") sql("DROP VIEW IF EXISTS parquet_view1") sql("DROP TABLE IF EXISTS parquet_tab4") - sql("DROP TABLE IF EXISTS parquet_tab5") } finally { super.afterAll() } } - test("show tables") { - withTable("show1a", "show2b") { - sql("CREATE TABLE show1a(c1 int)") - sql("CREATE TABLE show2b(c2 int)") - checkAnswer( - sql("SHOW TABLES IN default 'show1*'"), - Row("default", "show1a", false) :: Nil) - checkAnswer( - sql("SHOW TABLES IN default 'show1*|show2*'"), - Row("default", "show1a", false) :: - Row("default", "show2b", false) :: Nil) - checkAnswer( - sql("SHOW TABLES 'show1*|show2*'"), - Row("default", "show1a", false) :: - Row("default", "show2b", false) :: Nil) - assert( - sql("SHOW TABLES").count() >= 2) - assert( - sql("SHOW TABLES IN default").count() >= 2) - } - } - test("show views") { withView("show1a", "show2b", "global_temp.temp1", "temp2") { sql("CREATE VIEW show1a AS SELECT 1 AS id") @@ -415,88 +373,6 @@ class HiveCommandSuite extends QueryTest with SQLTestUtils with TestHiveSingleto } } - - test("show partitions - show everything") { - checkAnswer( - sql("show partitions parquet_tab4"), - Row("year=2015/month=1") :: - Row("year=2015/month=2") :: - Row("year=2016/month=2") :: - Row("year=2016/month=3") :: Nil) - - checkAnswer( - sql("show partitions default.parquet_tab4"), - Row("year=2015/month=1") :: - Row("year=2015/month=2") :: - Row("year=2016/month=2") :: - Row("year=2016/month=3") :: Nil) - } - - test("show partitions - show everything more than 5 part keys") { - checkAnswer( - sql("show partitions parquet_tab5"), - Row("year=2016/month=3/hour=10/minute=10/sec=10/extra=1") :: - Row("year=2016/month=4/hour=10/minute=10/sec=10/extra=1") :: Nil) - } - - test("show partitions - filter") { - checkAnswer( - sql("show partitions default.parquet_tab4 PARTITION(year=2015)"), - Row("year=2015/month=1") :: - Row("year=2015/month=2") :: Nil) - - checkAnswer( - sql("show partitions default.parquet_tab4 PARTITION(year=2015, month=1)"), - Row("year=2015/month=1") :: Nil) - - checkAnswer( - sql("show partitions default.parquet_tab4 PARTITION(month=2)"), - Row("year=2015/month=2") :: - Row("year=2016/month=2") :: Nil) - } - - test("show partitions - empty row") { - withTempView("parquet_temp") { - sql( - """ - |CREATE TEMPORARY VIEW parquet_temp (c1 INT, c2 STRING) - |USING org.apache.spark.sql.parquet.DefaultSource - """.stripMargin) - // An empty sequence of row is returned for session temporary table. - intercept[NoSuchTableException] { - sql("SHOW PARTITIONS parquet_temp") - } - - val message1 = intercept[AnalysisException] { - sql("SHOW PARTITIONS parquet_tab3") - }.getMessage - assert(message1.contains("not allowed on a table that is not partitioned")) - - val message2 = intercept[AnalysisException] { - sql("SHOW PARTITIONS parquet_tab4 PARTITION(abcd=2015, xyz=1)") - }.getMessage - assert(message2.contains("Non-partitioning column(s) [abcd, xyz] are specified")) - - val message3 = intercept[AnalysisException] { - sql("SHOW PARTITIONS parquet_view1") - }.getMessage - assert(message3.contains("is not allowed on a view")) - } - } - - test("show partitions - datasource") { - withTable("part_datasrc") { - val df = (1 to 3).map(i => (i, s"val_$i", i * 2)).toDF("a", "b", "c") - df.write - .partitionBy("a") - .format("parquet") - .mode(SaveMode.Overwrite) - .saveAsTable("part_datasrc") - - assert(sql("SHOW PARTITIONS part_datasrc").count() == 3) - } - } - test("SPARK-25918: LOAD DATA LOCAL INPATH should handle a relative path") { val localFS = FileContext.getLocalFSFileContext() val workingDir = localFS.getWorkingDirectory diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveComparisonTest.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveComparisonTest.scala index 7f198632a1cd6..01cf214574eeb 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveComparisonTest.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveComparisonTest.scala @@ -375,7 +375,7 @@ abstract class HiveComparisonTest extends SparkFunSuite with BeforeAndAfterAll { (!hiveQuery.logical.isInstanceOf[DescribeFunction]) && (!hiveQuery.logical.isInstanceOf[DescribeCommandBase]) && (!hiveQuery.logical.isInstanceOf[DescribeRelation]) && - (!hiveQuery.logical.isInstanceOf[DescribeColumnStatement]) && + (!hiveQuery.logical.isInstanceOf[DescribeColumn]) && preparedHive != catalyst) { val hivePrintOut = s"== HIVE - ${preparedHive.size} row(s) ==" +: preparedHive diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala index fbd1fc1ea98df..50b1dd952c61e 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala @@ -19,6 +19,7 @@ package org.apache.spark.sql.hive.execution import java.io.File import java.net.URI +import java.util.Locale import org.apache.hadoop.fs.Path import org.apache.parquet.format.converter.ParquetMetadataConverter.NO_FILTER @@ -158,22 +159,6 @@ class HiveCatalogedDDLSuite extends DDLSuite with TestHiveSingleton with BeforeA testChangeColumn(isDatasourceTable = false) } - test("alter table: rename partition") { - testRenamePartitions(isDatasourceTable = false) - } - - test("alter table: drop partition") { - testDropPartitions(isDatasourceTable = false) - } - - test("alter table: add partition") { - testAddPartitions(isDatasourceTable = false) - } - - test("drop table") { - testDropTable(isDatasourceTable = false) - } - test("alter datasource table add columns - orc") { testAddColumn("orc") } @@ -458,15 +443,15 @@ class HiveDDLSuite withTable("tab1", "tab2") { (("a", "b") :: Nil).toDF().write.json(tempDir.getCanonicalPath) - var e = intercept[AnalysisException] { sql("CREATE TABLE tab1 USING hive") }.getMessage - assert(e.contains("Unable to infer the schema. The schema specification is required to " + - "create the table `default`.`tab1`")) + assertAnalysisError( + "CREATE TABLE tab1 USING hive", + "Unable to infer the schema. The schema specification is required to " + + "create the table `default`.`tab1`") - e = intercept[AnalysisException] { - sql(s"CREATE TABLE tab2 USING hive location '${tempDir.getCanonicalPath}'") - }.getMessage - assert(e.contains("Unable to infer the schema. The schema specification is required to " + - "create the table `default`.`tab2`")) + assertAnalysisError( + s"CREATE TABLE tab2 USING hive location '${tempDir.getCanonicalPath}'", + "Unable to infer the schema. The schema specification is required to " + + "create the table `default`.`tab2`") } } } @@ -588,26 +573,23 @@ class HiveDDLSuite } test("create table: partition column names exist in table definition") { - val e = intercept[AnalysisException] { - sql("CREATE TABLE tbl(a int) PARTITIONED BY (a string)") - } - assert(e.message == "Found duplicate column(s) in the table definition of `default`.`tbl`: `a`") + assertAnalysisError( + "CREATE TABLE tbl(a int) PARTITIONED BY (a string)", + "Found duplicate column(s) in the table definition of `default`.`tbl`: `a`") } test("create partitioned table without specifying data type for the partition columns") { - val e = intercept[AnalysisException] { - sql("CREATE TABLE tbl(a int) PARTITIONED BY (b) STORED AS parquet") - } - assert(e.message.contains("Must specify a data type for each partition column while creating " + - "Hive partitioned table.")) + assertAnalysisError( + "CREATE TABLE tbl(a int) PARTITIONED BY (b) STORED AS parquet", + "partition column b is not defined in table") } test("add/drop partition with location - managed table") { val tab = "tab_with_partitions" withTempDir { tmpDir => val basePath = new File(tmpDir.getCanonicalPath) - val part1Path = new File(basePath + "/part1") - val part2Path = new File(basePath + "/part2") + val part1Path = new File(new File(basePath, "part10"), "part11") + val part2Path = new File(new File(basePath, "part20"), "part21") val dirSet = part1Path :: part2Path :: Nil // Before data insertion, all the directory are empty @@ -651,11 +633,10 @@ class HiveDDLSuite test("SPARK-19129: drop partition with a empty string will drop the whole table") { val df = spark.createDataFrame(Seq((0, "a"), (1, "b"))).toDF("partCol1", "name") df.write.mode("overwrite").partitionBy("partCol1").saveAsTable("partitionedTable") - val e = intercept[AnalysisException] { - spark.sql("alter table partitionedTable drop partition(partCol1='')") - }.getMessage - assert(e.contains("Partition spec is invalid. The spec ([partCol1=]) contains an empty " + - "partition column value")) + assertAnalysisError( + "alter table partitionedTable drop partition(partCol1='')", + "Partition spec is invalid. The spec ([partCol1=]) contains an empty " + + "partition column value") } test("add/drop partitions - external table") { @@ -700,11 +681,10 @@ class HiveDDLSuite // After data insertion, all the directory are not empty assert(dirSet.forall(dir => dir.listFiles.nonEmpty)) - val message = intercept[AnalysisException] { - sql(s"ALTER TABLE $externalTab DROP PARTITION (ds='2008-04-09', unknownCol='12')") - } - assert(message.getMessage.contains("unknownCol is not a valid partition column in table " + - "`default`.`exttable_with_partitions`")) + assertAnalysisError( + s"ALTER TABLE $externalTab DROP PARTITION (ds='2008-04-09', unknownCol='12')", + "unknownCol is not a valid partition column in table " + + "`default`.`exttable_with_partitions`") sql( s""" @@ -806,15 +786,18 @@ class HiveDDLSuite sql(s"ALTER VIEW $viewName UNSET TBLPROPERTIES ('p')") checkProperties(Map()) - val message = intercept[AnalysisException] { - sql(s"ALTER VIEW $viewName UNSET TBLPROPERTIES ('p')") - }.getMessage - assert(message.contains( - "Attempted to unset non-existent property 'p' in table '`default`.`view1`'")) + assertAnalysisError( + s"ALTER VIEW $viewName UNSET TBLPROPERTIES ('p')", + "Attempted to unset non-existent property 'p' in table '`default`.`view1`'") } } } + private def assertAnalysisError(sqlText: String, message: String): Unit = { + val e = intercept[AnalysisException](sql(sqlText)) + assert(e.message.contains(message)) + } + private def assertErrorForAlterTableOnView(sqlText: String): Unit = { val message = intercept[AnalysisException](sql(sqlText)).getMessage assert(message.contains("Cannot alter a view with ALTER TABLE. Please use ALTER VIEW instead")) @@ -828,10 +811,9 @@ class HiveDDLSuite test("create table - SET TBLPROPERTIES EXTERNAL to TRUE") { val tabName = "tab1" withTable(tabName) { - val message = intercept[AnalysisException] { - sql(s"CREATE TABLE $tabName (height INT, length INT) TBLPROPERTIES('EXTERNAL'='TRUE')") - }.getMessage - assert(message.contains("Cannot set or change the preserved property key: 'EXTERNAL'")) + assertAnalysisError( + s"CREATE TABLE $tabName (height INT, length INT) TBLPROPERTIES('EXTERNAL'='TRUE')", + "Cannot set or change the preserved property key: 'EXTERNAL'") } } @@ -842,10 +824,9 @@ class HiveDDLSuite sql(s"CREATE TABLE $tabName (height INT, length INT)") assert( catalog.getTableMetadata(TableIdentifier(tabName)).tableType == CatalogTableType.MANAGED) - val message = intercept[AnalysisException] { - sql(s"ALTER TABLE $tabName SET TBLPROPERTIES ('EXTERNAL' = 'TRUE')") - }.getMessage - assert(message.contains("Cannot set or change the preserved property key: 'EXTERNAL'")) + assertAnalysisError( + s"ALTER TABLE $tabName SET TBLPROPERTIES ('EXTERNAL' = 'TRUE')", + "Cannot set or change the preserved property key: 'EXTERNAL'") // The table type is not changed to external assert( catalog.getTableMetadata(TableIdentifier(tabName)).tableType == CatalogTableType.MANAGED) @@ -875,32 +856,48 @@ class HiveDDLSuite assertErrorForAlterTableOnView(s"ALTER TABLE $oldViewName RENAME TO $newViewName") - assertErrorForAlterViewOnTable(s"ALTER VIEW $tabName SET TBLPROPERTIES ('p' = 'an')") + assertAnalysisError( + s"ALTER VIEW $tabName SET TBLPROPERTIES ('p' = 'an')", + s"$tabName is a table. 'ALTER VIEW ... SET TBLPROPERTIES' expects a view. " + + "Please use ALTER TABLE instead.") assertErrorForAlterTableOnView(s"ALTER TABLE $oldViewName SET TBLPROPERTIES ('p' = 'an')") - assertErrorForAlterViewOnTable(s"ALTER VIEW $tabName UNSET TBLPROPERTIES ('p')") + assertAnalysisError( + s"ALTER VIEW $tabName UNSET TBLPROPERTIES ('p')", + s"$tabName is a table. 'ALTER VIEW ... UNSET TBLPROPERTIES' expects a view. " + + "Please use ALTER TABLE instead.") assertErrorForAlterTableOnView(s"ALTER TABLE $oldViewName UNSET TBLPROPERTIES ('p')") assertErrorForAlterTableOnView(s"ALTER TABLE $oldViewName SET LOCATION '/path/to/home'") - assertErrorForAlterTableOnView(s"ALTER TABLE $oldViewName SET SERDE 'whatever'") + assertAnalysisError( + s"ALTER TABLE $oldViewName SET SERDE 'whatever'", + s"$oldViewName is a view. 'ALTER TABLE ... SET [SERDE|SERDEPROPERTIES]' expects a table.") - assertErrorForAlterTableOnView(s"ALTER TABLE $oldViewName SET SERDEPROPERTIES ('x' = 'y')") + assertAnalysisError( + s"ALTER TABLE $oldViewName SET SERDEPROPERTIES ('x' = 'y')", + s"$oldViewName is a view. 'ALTER TABLE ... SET [SERDE|SERDEPROPERTIES]' expects a table.") - assertErrorForAlterTableOnView( - s"ALTER TABLE $oldViewName PARTITION (a=1, b=2) SET SERDEPROPERTIES ('x' = 'y')") + assertAnalysisError( + s"ALTER TABLE $oldViewName PARTITION (a=1, b=2) SET SERDEPROPERTIES ('x' = 'y')", + s"$oldViewName is a view. 'ALTER TABLE ... SET [SERDE|SERDEPROPERTIES]' expects a table.") - assertErrorForAlterTableOnView( - s"ALTER TABLE $oldViewName ADD IF NOT EXISTS PARTITION (a='4', b='8')") + assertAnalysisError( + s"ALTER TABLE $oldViewName RECOVER PARTITIONS", + s"$oldViewName is a view. 'ALTER TABLE ... RECOVER PARTITIONS' expects a table.") - assertErrorForAlterTableOnView(s"ALTER TABLE $oldViewName DROP IF EXISTS PARTITION (a='2')") + assertAnalysisError( + s"ALTER TABLE $oldViewName PARTITION (a='1') RENAME TO PARTITION (a='100')", + s"$oldViewName is a view. 'ALTER TABLE ... RENAME TO PARTITION' expects a table.") - assertErrorForAlterTableOnView(s"ALTER TABLE $oldViewName RECOVER PARTITIONS") - - assertErrorForAlterTableOnView( - s"ALTER TABLE $oldViewName PARTITION (a='1') RENAME TO PARTITION (a='100')") + assertAnalysisError( + s"ALTER TABLE $oldViewName ADD IF NOT EXISTS PARTITION (a='4', b='8')", + s"$oldViewName is a view. 'ALTER TABLE ... ADD PARTITION ...' expects a table.") + assertAnalysisError( + s"ALTER TABLE $oldViewName DROP IF EXISTS PARTITION (a='2')", + s"$oldViewName is a view. 'ALTER TABLE ... DROP PARTITION ...' expects a table.") assert(catalog.tableExists(TableIdentifier(tabName))) assert(catalog.tableExists(TableIdentifier(oldViewName))) @@ -977,7 +974,7 @@ class HiveDDLSuite } test("alter table partition - storage information") { - sql("CREATE TABLE boxes (height INT, length INT) PARTITIONED BY (width INT)") + sql("CREATE TABLE boxes (height INT, length INT) STORED AS textfile PARTITIONED BY (width INT)") sql("INSERT OVERWRITE TABLE boxes PARTITION (width=4) SELECT 4, 4") val catalog = spark.sessionState.catalog val expectedSerde = "com.sparkbricks.serde.ColumnarSerDe" @@ -994,7 +991,7 @@ class HiveDDLSuite |""".stripMargin) val newPart = catalog.getPartition(TableIdentifier("boxes"), Map("width" -> "4")) assert(newPart.storage.serde == Some(expectedSerde)) - assert(newPart.storage.properties.filterKeys(expectedSerdeProps.contains) == + assert(newPart.storage.properties.filterKeys(expectedSerdeProps.contains).toMap == expectedSerdeProps) } @@ -1039,10 +1036,9 @@ class HiveDDLSuite test("drop table using drop view") { withTable("tab1") { sql("CREATE TABLE tab1(c1 int)") - val message = intercept[AnalysisException] { - sql("DROP VIEW tab1") - }.getMessage - assert(message.contains("Cannot drop a table with DROP VIEW. Please use DROP TABLE instead")) + assertAnalysisError( + "DROP VIEW tab1", + "tab1 is a table. 'DROP VIEW' expects a view. Please use DROP TABLE instead.") } } @@ -1051,10 +1047,9 @@ class HiveDDLSuite spark.range(10).write.saveAsTable("tab1") withView("view1") { sql("CREATE VIEW view1 AS SELECT * FROM tab1") - val message = intercept[AnalysisException] { - sql("DROP TABLE view1") - }.getMessage - assert(message.contains("Cannot drop a view with DROP TABLE. Please use DROP VIEW instead")) + assertAnalysisError( + "DROP TABLE view1", + "Cannot drop a view with DROP TABLE. Please use DROP VIEW instead") } } } @@ -1192,7 +1187,7 @@ class HiveDDLSuite expectedDBUri, Map.empty)) // the database directory was created - assert(fs.exists(dbPath) && fs.isDirectory(dbPath)) + assert(fs.exists(dbPath) && fs.getFileStatus(dbPath).isDirectory) sql(s"USE $dbName") val tabName = "tab1" @@ -1208,10 +1203,9 @@ class HiveDDLSuite sql(s"USE default") val sqlDropDatabase = s"DROP DATABASE $dbName ${if (cascade) "CASCADE" else "RESTRICT"}" if (tableExists && !cascade) { - val message = intercept[AnalysisException] { - sql(sqlDropDatabase) - }.getMessage - assert(message.contains(s"Database $dbName is not empty. One or more tables exist.")) + assertAnalysisError( + sqlDropDatabase, + s"Database $dbName is not empty. One or more tables exist.") // the database directory was not removed assert(fs.exists(new Path(expectedDBLocation))) } else { @@ -1240,17 +1234,15 @@ class HiveDDLSuite test("drop default database") { Seq("true", "false").foreach { caseSensitive => withSQLConf(SQLConf.CASE_SENSITIVE.key -> caseSensitive) { - var message = intercept[AnalysisException] { - sql("DROP DATABASE default") - }.getMessage - assert(message.contains("Can not drop default database")) + assertAnalysisError( + "DROP DATABASE default", + "Can not drop default database") // SQLConf.CASE_SENSITIVE does not affect the result // because the Hive metastore is not case sensitive. - message = intercept[AnalysisException] { - sql("DROP DATABASE DeFault") - }.getMessage - assert(message.contains("Can not drop default database")) + assertAnalysisError( + "DROP DATABASE DeFault", + "Can not drop default database") } } } @@ -1640,10 +1632,9 @@ class HiveDDLSuite } // When tableExists is not invoked, we still can get an AnalysisException - val e = intercept[AnalysisException] { - sql(s"DESCRIBE $indexTabName") - }.getMessage - assert(e.contains("Hive index table is not supported.")) + assertAnalysisError( + s"DESCRIBE $indexTabName", + "Hive index table is not supported.") } finally { client.runSqlHive(s"DROP INDEX IF EXISTS $indexName ON $tabName") } @@ -1713,20 +1704,17 @@ class HiveDDLSuite sql("CREATE TABLE tbl(a INT) STORED AS parquet") Seq(DATASOURCE_PREFIX, STATISTICS_PREFIX).foreach { forbiddenPrefix => - val e = intercept[AnalysisException] { - sql(s"ALTER TABLE tbl SET TBLPROPERTIES ('${forbiddenPrefix}foo' = 'loser')") - } - assert(e.getMessage.contains(forbiddenPrefix + "foo")) + assertAnalysisError( + s"ALTER TABLE tbl SET TBLPROPERTIES ('${forbiddenPrefix}foo' = 'loser')", + s"${forbiddenPrefix}foo") - val e2 = intercept[AnalysisException] { - sql(s"ALTER TABLE tbl UNSET TBLPROPERTIES ('${forbiddenPrefix}foo')") - } - assert(e2.getMessage.contains(forbiddenPrefix + "foo")) + assertAnalysisError( + s"ALTER TABLE tbl UNSET TBLPROPERTIES ('${forbiddenPrefix}foo')", + s"${forbiddenPrefix}foo") - val e3 = intercept[AnalysisException] { - sql(s"CREATE TABLE tbl2 (a INT) TBLPROPERTIES ('${forbiddenPrefix}foo'='anything')") - } - assert(e3.getMessage.contains(forbiddenPrefix + "foo")) + assertAnalysisError( + s"CREATE TABLE tbl2 (a INT) TBLPROPERTIES ('${forbiddenPrefix}foo'='anything')", + s"${forbiddenPrefix}foo") } } } @@ -1746,10 +1734,9 @@ class HiveDDLSuite assert(spark.table("rectangles").collect().isEmpty) // not supported since the table is not partitioned - val e = intercept[AnalysisException] { - sql("TRUNCATE TABLE rectangles PARTITION (width=1)") - } - assert(e.message.contains("Operation not allowed")) + assertAnalysisError( + "TRUNCATE TABLE rectangles PARTITION (width=1)", + "Operation not allowed") } } } @@ -1787,10 +1774,9 @@ class HiveDDLSuite } // throw exception if the column in partition spec is not a partition column. - val e = intercept[AnalysisException] { - sql("TRUNCATE TABLE partTable PARTITION (unknown=1)") - } - assert(e.message.contains("unknown is not a valid partition column")) + assertAnalysisError( + "TRUNCATE TABLE partTable PARTITION (unknown=1)", + "unknown is not a valid partition column") } } @@ -2148,10 +2134,9 @@ class HiveDDLSuite assert(loc.listFiles().length >= 1) checkAnswer(spark.table("t"), Row("1") :: Nil) } else { - val e = intercept[AnalysisException] { - spark.sql("INSERT INTO TABLE t SELECT 1") - }.getMessage - assert(e.contains("java.net.URISyntaxException: Relative path in absolute URI: a:b")) + assertAnalysisError( + "INSERT INTO TABLE t SELECT 1", + "java.net.URISyntaxException: Relative path in absolute URI: a:b") } } @@ -2190,15 +2175,13 @@ class HiveDDLSuite Row("1", "2") :: Row("1", "2017-03-03 12:13%3A14") :: Nil) } } else { - val e = intercept[AnalysisException] { - spark.sql("INSERT INTO TABLE t1 PARTITION(b=2) SELECT 1") - }.getMessage - assert(e.contains("java.net.URISyntaxException: Relative path in absolute URI: a:b")) - - val e1 = intercept[AnalysisException] { - spark.sql("INSERT INTO TABLE t1 PARTITION(b='2017-03-03 12:13%3A14') SELECT 1") - }.getMessage - assert(e1.contains("java.net.URISyntaxException: Relative path in absolute URI: a:b")) + assertAnalysisError( + "INSERT INTO TABLE t1 PARTITION(b=2) SELECT 1", + "java.net.URISyntaxException: Relative path in absolute URI: a:b") + + assertAnalysisError( + "INSERT INTO TABLE t1 PARTITION(b='2017-03-03 12:13%3A14') SELECT 1", + "java.net.URISyntaxException: Relative path in absolute URI: a:b") } } } @@ -2245,8 +2228,8 @@ class HiveDDLSuite ) sql("ALTER TABLE tab ADD COLUMNS (c5 char(10))") - assert(spark.table("tab").schema.find(_.name == "c5") - .get.metadata.getString("HIVE_TYPE_STRING") == "char(10)") + assert(spark.sharedState.externalCatalog.getTable("default", "tab") + .schema.find(_.name == "c5").get.dataType == CharType(10)) } } } @@ -2283,30 +2266,26 @@ class HiveDDLSuite sql("CREATE TABLE tab (c1 int) PARTITIONED BY (c2 int) STORED AS PARQUET") if (!caseSensitive) { // duplicating partitioning column name - val e1 = intercept[AnalysisException] { - sql("ALTER TABLE tab ADD COLUMNS (C2 string)") - }.getMessage - assert(e1.contains("Found duplicate column(s)")) + assertAnalysisError( + "ALTER TABLE tab ADD COLUMNS (C2 string)", + "Found duplicate column(s)") // duplicating data column name - val e2 = intercept[AnalysisException] { - sql("ALTER TABLE tab ADD COLUMNS (C1 string)") - }.getMessage - assert(e2.contains("Found duplicate column(s)")) + assertAnalysisError( + "ALTER TABLE tab ADD COLUMNS (C1 string)", + "Found duplicate column(s)") } else { // hive catalog will still complains that c1 is duplicate column name because hive // identifiers are case insensitive. - val e1 = intercept[AnalysisException] { - sql("ALTER TABLE tab ADD COLUMNS (C2 string)") - }.getMessage - assert(e1.contains("HiveException")) + assertAnalysisError( + "ALTER TABLE tab ADD COLUMNS (C2 string)", + "HiveException") // hive catalog will still complains that c1 is duplicate column name because hive // identifiers are case insensitive. - val e2 = intercept[AnalysisException] { - sql("ALTER TABLE tab ADD COLUMNS (C1 string)") - }.getMessage - assert(e2.contains("HiveException")) + assertAnalysisError( + "ALTER TABLE tab ADD COLUMNS (C1 string)", + "HiveException") } } } @@ -2328,58 +2307,49 @@ class HiveDDLSuite // Forbid CTAS with null type withTable("t1", "t2", "t3") { - val e1 = intercept[AnalysisException] { - spark.sql("CREATE TABLE t1 USING PARQUET AS SELECT null as null_col") - }.getMessage - assert(e1.contains("Cannot create tables with null type")) + assertAnalysisError( + "CREATE TABLE t1 USING PARQUET AS SELECT null as null_col", + "Cannot create tables with null type") - val e2 = intercept[AnalysisException] { - spark.sql("CREATE TABLE t2 AS SELECT null as null_col") - }.getMessage - assert(e2.contains("Cannot create tables with null type")) + assertAnalysisError( + "CREATE TABLE t2 AS SELECT null as null_col", + "Cannot create tables with null type") - val e3 = intercept[AnalysisException] { - spark.sql("CREATE TABLE t3 STORED AS PARQUET AS SELECT null as null_col") - }.getMessage - assert(e3.contains("Cannot create tables with null type")) + assertAnalysisError( + "CREATE TABLE t3 STORED AS PARQUET AS SELECT null as null_col", + "Cannot create tables with null type") } // Forbid Replace table AS SELECT with null type withTable("t") { val v2Source = classOf[FakeV2Provider].getName - val e = intercept[AnalysisException] { - spark.sql(s"CREATE OR REPLACE TABLE t USING $v2Source AS SELECT null as null_col") - }.getMessage - assert(e.contains("Cannot create tables with null type")) + assertAnalysisError( + s"CREATE OR REPLACE TABLE t USING $v2Source AS SELECT null as null_col", + "Cannot create tables with null type") } // Forbid creating table with VOID type in Spark withTable("t1", "t2", "t3", "t4") { - val e1 = intercept[AnalysisException] { - spark.sql(s"CREATE TABLE t1 (v VOID) USING PARQUET") - }.getMessage - assert(e1.contains("Cannot create tables with null type")) - val e2 = intercept[AnalysisException] { - spark.sql(s"CREATE TABLE t2 (v VOID) USING hive") - }.getMessage - assert(e2.contains("Cannot create tables with null type")) - val e3 = intercept[AnalysisException] { - spark.sql(s"CREATE TABLE t3 (v VOID)") - }.getMessage - assert(e3.contains("Cannot create tables with null type")) - val e4 = intercept[AnalysisException] { - spark.sql(s"CREATE TABLE t4 (v VOID) STORED AS PARQUET") - }.getMessage - assert(e4.contains("Cannot create tables with null type")) + assertAnalysisError( + "CREATE TABLE t1 (v VOID) USING PARQUET", + "Cannot create tables with null type") + assertAnalysisError( + "CREATE TABLE t2 (v VOID) USING hive", + "Cannot create tables with null type") + assertAnalysisError( + "CREATE TABLE t3 (v VOID)", + "Cannot create tables with null type") + assertAnalysisError( + "CREATE TABLE t4 (v VOID) STORED AS PARQUET", + "Cannot create tables with null type") } // Forbid Replace table with VOID type withTable("t") { val v2Source = classOf[FakeV2Provider].getName - val e = intercept[AnalysisException] { - spark.sql(s"CREATE OR REPLACE TABLE t (v VOID) USING $v2Source") - }.getMessage - assert(e.contains("Cannot create tables with null type")) + assertAnalysisError( + s"CREATE OR REPLACE TABLE t (v VOID) USING $v2Source", + "Cannot create tables with null type") } // Make sure spark.catalog.createTable with null type will fail @@ -2613,9 +2583,9 @@ class HiveDDLSuite test("load command for non local invalid path validation") { withTable("tbl") { sql("CREATE TABLE tbl(i INT, j STRING) USING hive") - val e = intercept[AnalysisException]( - sql("load data inpath '/doesnotexist.csv' into table tbl")) - assert(e.message.contains("LOAD DATA input path does not exist")) + assertAnalysisError( + "load data inpath '/doesnotexist.csv' into table tbl", + "LOAD DATA input path does not exist") } } @@ -2694,8 +2664,7 @@ class HiveDDLSuite |AS SELECT 1 as a, "a" as b """.stripMargin) }.getMessage - assert(err1.contains("Schema may not be specified in a Create Table As Select " + - "(CTAS) statement")) + assert(err1.contains("Schema may not be specified in a Create Table As Select")) val err2 = intercept[ParseException] { spark.sql( @@ -2706,8 +2675,7 @@ class HiveDDLSuite |AS SELECT 1 as a, "a" as b """.stripMargin) }.getMessage - assert(err2.contains("Create Partitioned Table As Select cannot specify data type for " + - "the partition columns of the target table")) + assert(err2.contains("Partition column types may not be specified in Create Table As Select")) } test("Hive CTAS with dynamic partition") { @@ -2764,69 +2732,44 @@ class HiveDDLSuite test("Create Table LIKE with row format") { val catalog = spark.sessionState.catalog - withTable("sourceHiveTable", "sourceDsTable", "targetHiveTable1", "targetHiveTable2") { + withTable("sourceHiveTable", "sourceDsTable") { sql("CREATE TABLE sourceHiveTable(a INT, b INT) STORED AS PARQUET") sql("CREATE TABLE sourceDsTable(a INT, b INT) USING PARQUET") // row format doesn't work in create targetDsTable - var e = intercept[AnalysisException] { - spark.sql( - """ - |CREATE TABLE targetDsTable LIKE sourceHiveTable USING PARQUET - |ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' - """.stripMargin) - }.getMessage - assert(e.contains("'ROW FORMAT' must be used with 'STORED AS'")) + assertAnalysisError( + """ + |CREATE TABLE targetDsTable LIKE sourceHiveTable USING PARQUET + |ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' + """.stripMargin, + "Operation not allowed: CREATE TABLE LIKE ... USING ... ROW FORMAT SERDE") // row format doesn't work with provider hive - e = intercept[AnalysisException] { - spark.sql( - """ - |CREATE TABLE targetHiveTable LIKE sourceHiveTable USING hive - |ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' - |WITH SERDEPROPERTIES ('test' = 'test') - """.stripMargin) - }.getMessage - assert(e.contains("'ROW FORMAT' must be used with 'STORED AS'")) + assertAnalysisError( + """ + |CREATE TABLE targetHiveTable LIKE sourceHiveTable USING hive + |ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' + |WITH SERDEPROPERTIES ('test' = 'test') + """.stripMargin, + "Operation not allowed: CREATE TABLE LIKE ... USING ... ROW FORMAT SERDE") // row format doesn't work without 'STORED AS' - e = intercept[AnalysisException] { - spark.sql( - """ - |CREATE TABLE targetDsTable LIKE sourceDsTable - |ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' - |WITH SERDEPROPERTIES ('test' = 'test') - """.stripMargin) - }.getMessage - assert(e.contains("'ROW FORMAT' must be used with 'STORED AS'")) - - // row format works with STORED AS hive format (from hive table) - spark.sql( + assertAnalysisError( """ - |CREATE TABLE targetHiveTable1 LIKE sourceHiveTable STORED AS PARQUET + |CREATE TABLE targetDsTable LIKE sourceDsTable |ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' |WITH SERDEPROPERTIES ('test' = 'test') - """.stripMargin) - var table = catalog.getTableMetadata(TableIdentifier("targetHiveTable1")) - assert(table.provider === Some("hive")) - assert(table.storage.inputFormat === - Some("org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat")) - assert(table.storage.serde === Some("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe")) - assert(table.storage.properties("test") == "test") + """.stripMargin, + "'ROW FORMAT' must be used with 'STORED AS'") - // row format works with STORED AS hive format (from datasource table) - spark.sql( + // 'INPUTFORMAT' and 'OUTPUTFORMAT' conflict with 'USING' + assertAnalysisError( """ - |CREATE TABLE targetHiveTable2 LIKE sourceDsTable STORED AS PARQUET + |CREATE TABLE targetDsTable LIKE sourceDsTable USING format + |STORED AS INPUTFORMAT 'inFormat' OUTPUTFORMAT 'outFormat' |ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' - |WITH SERDEPROPERTIES ('test' = 'test') - """.stripMargin) - table = catalog.getTableMetadata(TableIdentifier("targetHiveTable2")) - assert(table.provider === Some("hive")) - assert(table.storage.inputFormat === - Some("org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat")) - assert(table.storage.serde === Some("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe")) - assert(table.storage.properties("test") == "test") + """.stripMargin, + "Operation not allowed: CREATE TABLE LIKE ... USING ... STORED AS") } } @@ -2854,4 +2797,112 @@ class HiveDDLSuite assert(sql("SELECT * FROM t2 WHERE c = 'A'").collect().isEmpty) } } + + test("SPARK-33546: CREATE TABLE LIKE should validate row format & file format") { + val catalog = spark.sessionState.catalog + withTable("sourceHiveTable", "sourceDsTable") { + sql("CREATE TABLE sourceHiveTable(a INT, b INT) STORED AS PARQUET") + sql("CREATE TABLE sourceDsTable(a INT, b INT) USING PARQUET") + + // ROW FORMAT SERDE ... STORED AS [SEQUENCEFILE | RCFILE | TEXTFILE] + val allowSerdeFileFormats = Seq("TEXTFILE", "SEQUENCEFILE", "RCFILE") + Seq("sourceHiveTable", "sourceDsTable").foreach { sourceTable => + allowSerdeFileFormats.foreach { format => + withTable("targetTable") { + spark.sql( + s""" + |CREATE TABLE targetTable LIKE $sourceTable + |ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' + |STORED AS $format + """.stripMargin) + + val expectedSerde = HiveSerDe.sourceToSerDe(format) + val table = catalog.getTableMetadata(TableIdentifier("targetTable", Some("default"))) + assert(table.provider === Some("hive")) + assert(table.storage.inputFormat === Some(expectedSerde.get.inputFormat.get)) + assert(table.storage.outputFormat === Some(expectedSerde.get.outputFormat.get)) + assert(table.storage.serde === + Some("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe")) + } + } + + // negative case + hiveFormats.filterNot(allowSerdeFileFormats.contains(_)).foreach { format => + withTable("targetTable") { + assertAnalysisError( + s""" + |CREATE TABLE targetTable LIKE $sourceTable + |ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' + |STORED AS $format + """.stripMargin, + s"ROW FORMAT SERDE is incompatible with format '${format.toLowerCase(Locale.ROOT)}'") + } + } + } + + // ROW FORMAT DELIMITED ... STORED AS TEXTFILE + Seq("sourceHiveTable", "sourceDsTable").foreach { sourceTable => + withTable("targetTable") { + spark.sql( + s""" + |CREATE TABLE targetTable LIKE $sourceTable + |ROW FORMAT DELIMITED + |STORED AS TEXTFILE + """.stripMargin) + + val expectedSerde = HiveSerDe.sourceToSerDe("TEXTFILE") + val table = catalog.getTableMetadata(TableIdentifier("targetTable", Some("default"))) + assert(table.provider === Some("hive")) + assert(table.storage.inputFormat === Some(expectedSerde.get.inputFormat.get)) + assert(table.storage.outputFormat === Some(expectedSerde.get.outputFormat.get)) + assert(table.storage.serde === Some(expectedSerde.get.serde.get)) + + // negative case + assertAnalysisError( + s""" + |CREATE TABLE targetTable LIKE $sourceTable + |ROW FORMAT DELIMITED + |STORED AS PARQUET + """.stripMargin, + "ROW FORMAT DELIMITED is only compatible with 'textfile'") + } + } + + // ROW FORMAT ... STORED AS INPUTFORMAT ... OUTPUTFORMAT ... + hiveFormats.foreach { tableType => + val expectedSerde = HiveSerDe.sourceToSerDe(tableType) + Seq("sourceHiveTable", "sourceDsTable").foreach { sourceTable => + withTable("targetTable") { + spark.sql( + s""" + |CREATE TABLE targetTable LIKE $sourceTable + |ROW FORMAT SERDE '${expectedSerde.get.serde.get}' + |STORED AS INPUTFORMAT '${expectedSerde.get.inputFormat.get}' + |OUTPUTFORMAT '${expectedSerde.get.outputFormat.get}' + """.stripMargin) + + val table = catalog.getTableMetadata(TableIdentifier("targetTable", Some("default"))) + assert(table.provider === Some("hive")) + assert(table.storage.inputFormat === Some(expectedSerde.get.inputFormat.get)) + assert(table.storage.outputFormat === Some(expectedSerde.get.outputFormat.get)) + assert(table.storage.serde === Some(expectedSerde.get.serde.get)) + } + } + } + } + } + + test("SPARK-33844: Insert overwrite directory should check schema too") { + withView("v") { + spark.range(1).createTempView("v") + withTempPath { path => + val e = intercept[AnalysisException] { + spark.sql(s"INSERT OVERWRITE LOCAL DIRECTORY '${path.getCanonicalPath}' " + + s"STORED AS PARQUET SELECT ID, if(1=1, 1, 0), abs(id), '^-' FROM v") + }.getMessage + assert(e.contains("Attribute name \"(IF((1 = 1), 1, 0))\" contains" + + " invalid character(s) among \" ,;{}()\\n\\t=\". Please use alias to rename it.")) + } + } + } } diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala index cea7c5686054a..21cc6af398eec 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala @@ -34,7 +34,6 @@ import org.apache.spark.sql.catalyst.expressions.Cast import org.apache.spark.sql.catalyst.parser.ParseException import org.apache.spark.sql.catalyst.plans.logical.Project import org.apache.spark.sql.execution.joins.BroadcastNestedLoopJoinExec -import org.apache.spark.sql.hive._ import org.apache.spark.sql.hive.test.{HiveTestJars, TestHive} import org.apache.spark.sql.hive.test.TestHive._ import org.apache.spark.sql.internal.SQLConf @@ -1220,6 +1219,23 @@ class HiveQuerySuite extends HiveComparisonTest with SQLTestUtils with BeforeAnd } } } + + test("SPARK-33084: Add jar support Ivy URI in SQL") { + val testData = TestHive.getHiveFile("data/files/sample.json").toURI + withTable("t") { + sql("ADD JAR ivy://org.apache.hive.hcatalog:hive-hcatalog-core:2.3.7") + sql( + """CREATE TABLE t(a string, b string) + |ROW FORMAT SERDE 'org.apache.hive.hcatalog.data.JsonSerDe'""".stripMargin) + sql(s"""LOAD DATA LOCAL INPATH "$testData" INTO TABLE t""") + sql("SELECT * FROM src JOIN t on src.key = t.a") + assert(sql("LIST JARS").filter(_.getString(0).contains( + "org.apache.hive.hcatalog_hive-hcatalog-core-2.3.7.jar")).count() > 0) + assert(sql("LIST JAR"). + filter(_.getString(0).contains( + "org.apache.hive.hcatalog_hive-hcatalog-core-2.3.7.jar")).count() > 0) + } + } } // for SPARK-2180 test diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveSQLViewSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveSQLViewSuite.scala index da7dfd05f33d6..8aae7a1545b1a 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveSQLViewSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveSQLViewSuite.scala @@ -17,11 +17,11 @@ package org.apache.spark.sql.hive.execution -import org.apache.spark.sql.{AnalysisException, Row, SaveMode, SparkSession} +import org.apache.spark.sql.{AnalysisException, Row} import org.apache.spark.sql.catalyst.TableIdentifier import org.apache.spark.sql.catalyst.catalog.{CatalogStorageFormat, CatalogTable, CatalogTableType} import org.apache.spark.sql.execution.SQLViewSuite -import org.apache.spark.sql.hive.test.{TestHive, TestHiveSingleton} +import org.apache.spark.sql.hive.test.TestHiveSingleton import org.apache.spark.sql.types.{NullType, StructType} /** diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveScriptTransformationSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveScriptTransformationSuite.scala index bb87246acf4ca..3892caa51eca9 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveScriptTransformationSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveScriptTransformationSuite.scala @@ -17,18 +17,16 @@ package org.apache.spark.sql.hive.execution -import java.io.File import java.sql.Timestamp -import org.apache.commons.io.FileUtils import org.apache.hadoop.hive.serde2.`lazy`.LazySimpleSerDe import org.scalatest.exceptions.TestFailedException import org.apache.spark.{SparkException, TestUtils} +import org.apache.spark.sql.Row import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeReference, Expression} import org.apache.spark.sql.execution._ import org.apache.spark.sql.functions._ -import org.apache.spark.sql.hive.HiveUtils import org.apache.spark.sql.hive.test.TestHiveSingleton import org.apache.spark.sql.types._ import org.apache.spark.unsafe.types.CalendarInterval @@ -38,8 +36,6 @@ class HiveScriptTransformationSuite extends BaseScriptTransformationSuite with T import ScriptTransformationIOSchema._ - override def isHive23OrSpark: Boolean = HiveUtils.isHive23 - override def createScriptTransformationExec( input: Seq[Expression], script: String, @@ -158,10 +154,7 @@ class HiveScriptTransformationSuite extends BaseScriptTransformationSuite with T assert(uncaughtExceptionHandler.exception.isEmpty) } - test("SPARK-25990: TRANSFORM should handle schema less correctly (hive serde)") { - assume(TestUtils.testCommandAvailable("python")) - val scriptFilePath = copyAndGetResourceFile("test_script.py", ".py").getAbsolutePath - + test("SPARK-32388: TRANSFORM should handle schema less correctly (hive serde)") { withTempView("v") { val df = Seq( (1, "1", 1.0, BigDecimal(1.0), new Timestamp(1)), @@ -170,21 +163,157 @@ class HiveScriptTransformationSuite extends BaseScriptTransformationSuite with T ).toDF("a", "b", "c", "d", "e") // Note column d's data type is Decimal(38, 18) df.createTempView("v") - val query = sql( - s""" - |SELECT TRANSFORM(a, b, c, d, e) - |USING 'python ${scriptFilePath}' - |FROM v - """.stripMargin) + // In hive default serde mode, if we don't define output schema, + // when output column size > 2 and don't specify serde, + // it will choose take rest columns in second column as output schema + // (key: String, value: String) + checkAnswer( + sql( + s""" + |SELECT TRANSFORM(a, b, c, d, e) + | USING 'cat' + |FROM v + """.stripMargin), + identity, + df.select( + 'a.cast("string").as("key"), + concat_ws("\t", + 'b.cast("string"), + 'c.cast("string"), + 'd.cast("string"), + 'e.cast("string")).as("value")).collect()) + + // In hive default serde mode, if we don't define output schema, + // when output column size > 2 and just specify serde, + // it will choose take rest columns in second column as output schema + // (key: String, value: String) + checkAnswer( + sql( + s""" + |SELECT TRANSFORM(a, b, c, d, e) + | ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' + | WITH SERDEPROPERTIES ( + | 'field.delim' = '\t' + | ) + | USING 'cat' + | ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' + | WITH SERDEPROPERTIES ( + | 'field.delim' = '\t' + | ) + |FROM v + """.stripMargin), + identity, + df.select( + 'a.cast("string").as("key"), + 'b.cast("string").as("value")).collect()) + + + // In hive default serde mode, if we don't define output schema, + // when output column size > 2 and specify serde with + // 'serialization.last.column.takes.rest=true', + // it will choose take rest columns in second column as output schema + // (key: String, value: String) + checkAnswer( + sql( + s""" + |SELECT TRANSFORM(a, b, c, d, e) + | ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' + | WITH SERDEPROPERTIES ( + | 'field.delim' = '\t', + | 'serialization.last.column.takes.rest' = 'true' + | ) + | USING 'cat' + | ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' + | WITH SERDEPROPERTIES ( + | 'field.delim' = '\t', + | 'serialization.last.column.takes.rest' = 'true' + | ) + |FROM v + """.stripMargin), + identity, + df.select( + 'a.cast("string").as("key"), + concat_ws("\t", + 'b.cast("string"), + 'c.cast("string"), + 'd.cast("string"), + 'e.cast("string")).as("value")).collect()) + + // In hive default serde mode, if we don't define output schema, + // when output column size > 2 and specify serde + // with 'serialization.last.column.takes.rest=false', + // it will choose first two column as output schema (key: String, value: String) + checkAnswer( + sql( + s""" + |SELECT TRANSFORM(a, b, c, d, e) + | ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' + | WITH SERDEPROPERTIES ( + | 'field.delim' = '\t', + | 'serialization.last.column.takes.rest' = 'false' + | ) + | USING 'cat' + | ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' + | WITH SERDEPROPERTIES ( + | 'field.delim' = '\t', + | 'serialization.last.column.takes.rest' = 'false' + | ) + |FROM v + """.stripMargin), + identity, + df.select( + 'a.cast("string").as("key"), + 'b.cast("string").as("value")).collect()) - // In hive default serde mode, if we don't define output schema, it will choose first - // two column as output schema (key: String, value: String) + // In hive default serde mode, if we don't define output schema, + // when output column size = 2 and specify serde, it will these two column as + // output schema (key: String, value: String) checkAnswer( - query, + sql( + s""" + |SELECT TRANSFORM(a, b) + | ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' + | WITH SERDEPROPERTIES ( + | 'field.delim' = '\t', + | 'serialization.last.column.takes.rest' = 'true' + | ) + | USING 'cat' + | ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' + | WITH SERDEPROPERTIES ( + | 'field.delim' = '\t', + | 'serialization.last.column.takes.rest' = 'true' + | ) + |FROM v + """.stripMargin), identity, df.select( 'a.cast("string").as("key"), 'b.cast("string").as("value")).collect()) + + // In hive default serde mode, if we don't define output schema, + // when output column size < 2 and specify serde, it will return null for deficiency + // output schema (key: String, value: String) + checkAnswer( + sql( + s""" + |SELECT TRANSFORM(a) + | ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' + | WITH SERDEPROPERTIES ( + | 'field.delim' = '\t', + | 'serialization.last.column.takes.rest' = 'true' + | ) + | USING 'cat' + | ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' + | WITH SERDEPROPERTIES ( + | 'field.delim' = '\t', + | 'serialization.last.column.takes.rest' = 'true' + | ) + |FROM v + """.stripMargin), + identity, + df.select( + 'a.cast("string").as("key"), + lit(null)).collect()) } } @@ -244,7 +373,7 @@ class HiveScriptTransformationSuite extends BaseScriptTransformationSuite with T } } - test("SPARK-32400: TRANSFORM doesn't support CalenderIntervalType/UserDefinedType (hive serde)") { + test("SPARK-32400: TRANSFORM doesn't support CalendarIntervalType/UserDefinedType (hive serde)") { assume(TestUtils.testCommandAvailable("/bin/bash")) withTempView("v") { val df = Seq( @@ -282,7 +411,7 @@ class HiveScriptTransformationSuite extends BaseScriptTransformationSuite with T } test("SPARK-32400: TRANSFORM doesn't support" + - " CalenderIntervalType/UserDefinedType end to end (hive serde)") { + " CalendarIntervalType/UserDefinedType end to end (hive serde)") { assume(TestUtils.testCommandAvailable("/bin/bash")) withTempView("v") { val df = Seq( @@ -310,4 +439,93 @@ class HiveScriptTransformationSuite extends BaseScriptTransformationSuite with T assert(e2.contains("array cannot be converted to Hive TypeInfo")) } } + + test("SPARK-32685: When use specified serde, filed.delim's default value is '\t'") { + val query1 = sql( + """ + |SELECT split(value, "\t") FROM ( + |SELECT TRANSFORM(a, b, c) + |USING 'cat' + |FROM (SELECT 1 AS a, 2 AS b, 3 AS c) t + |) temp; + """.stripMargin) + checkAnswer(query1, identity, Row(Seq("2", "3")) :: Nil) + + val query2 = sql( + """ + |SELECT split(value, "\t") FROM ( + |SELECT TRANSFORM(a, b, c) + | ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' + |USING 'cat' + | ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' + | WITH SERDEPROPERTIES ( + | 'serialization.last.column.takes.rest' = 'true' + | ) + |FROM (SELECT 1 AS a, 2 AS b, 3 AS c) t + |) temp; + """.stripMargin) + checkAnswer(query2, identity, Row(Seq("2", "3")) :: Nil) + + val query3 = sql( + """ + |SELECT split(value, "&") FROM ( + |SELECT TRANSFORM(a, b, c) + | ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' + | WITH SERDEPROPERTIES ( + | 'field.delim' = '&' + | ) + |USING 'cat' + | ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' + | WITH SERDEPROPERTIES ( + | 'serialization.last.column.takes.rest' = 'true', + | 'field.delim' = '&' + | ) + |FROM (SELECT 1 AS a, 2 AS b, 3 AS c) t + |) temp; + """.stripMargin) + checkAnswer(query3, identity, Row(Seq("2", "3")) :: Nil) + + val query4 = sql( + """ + |SELECT split(value, "&") FROM ( + |SELECT TRANSFORM(a, b, c) + | ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' + |USING 'cat' + | ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' + | WITH SERDEPROPERTIES ( + | 'serialization.last.column.takes.rest' = 'true', + | 'field.delim' = '&' + | ) + |FROM (SELECT 1 AS a, 2 AS b, 3 AS c) t + |) temp; + """.stripMargin) + checkAnswer(query4, identity, Row(null) :: Nil) + } + + test("SPARK-32684: Script transform hive serde mode null format is same with hive as '\\N'") { + val query1 = sql( + """ + |SELECT TRANSFORM(null, null, null) + |USING 'cat' + |FROM (SELECT 1 AS a) t + """.stripMargin) + checkAnswer(query1, identity, Row(null, "\\N\t\\N") :: Nil) + + val query2 = sql( + """ + |SELECT TRANSFORM(null, null, null) + | ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' + | WITH SERDEPROPERTIES ( + | 'field.delim' = ',' + | ) + |USING 'cat' AS (a) + | ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' + | WITH SERDEPROPERTIES ( + | 'field.delim' = '&' + | ) + |FROM (SELECT 1 AS a) t + """.stripMargin) + checkAnswer(query2, identity, Row("\\N,\\N,\\N") :: Nil) + + } } diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveSerDeReadWriteSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveSerDeReadWriteSuite.scala index ac9ae8c9229db..aae49f70ca93f 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveSerDeReadWriteSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveSerDeReadWriteSuite.scala @@ -135,11 +135,12 @@ class HiveSerDeReadWriteSuite extends QueryTest with SQLTestUtils with TestHiveS } // MAP withTable("hive_serde") { - hiveClient.runSqlHive(s"CREATE TABLE hive_serde (c1 MAP ) STORED AS $fileFormat") - hiveClient.runSqlHive("INSERT INTO TABLE hive_serde SELECT MAP(1, 'a') FROM (SELECT 1) t") - checkAnswer(spark.table("hive_serde"), Row(Map(1 -> "a"))) - spark.sql("INSERT INTO TABLE hive_serde SELECT MAP(2, 'b')") - checkAnswer(spark.table("hive_serde"), Seq(Row(Map(1 -> "a")), Row(Map(2 -> "b")))) + hiveClient.runSqlHive( + s"CREATE TABLE hive_serde (c1 MAP ) STORED AS $fileFormat") + hiveClient.runSqlHive("INSERT INTO TABLE hive_serde SELECT MAP('1', 'a') FROM (SELECT 1) t") + checkAnswer(spark.table("hive_serde"), Row(Map("1" -> "a"))) + spark.sql("INSERT INTO TABLE hive_serde SELECT MAP('2', 'b')") + checkAnswer(spark.table("hive_serde"), Seq(Row(Map("1" -> "a")), Row(Map("2" -> "b")))) } // STRUCT @@ -154,7 +155,7 @@ class HiveSerDeReadWriteSuite extends QueryTest with SQLTestUtils with TestHiveS } } - Seq("PARQUET", "ORC", "TEXTFILE").foreach { fileFormat => + Seq("SEQUENCEFILE", "TEXTFILE", "RCFILE", "ORC", "PARQUET", "AVRO").foreach { fileFormat => test(s"Read/Write Hive $fileFormat serde table") { // Numeric Types checkNumericTypes(fileFormat, "TINYINT", 2) diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveSerDeSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveSerDeSuite.scala index 24b1e3405379c..d7129bcb37e69 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveSerDeSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveSerDeSuite.scala @@ -21,11 +21,10 @@ import java.net.URI import org.scalatest.BeforeAndAfterAll -import org.apache.spark.sql.{AnalysisException, SaveMode, SparkSession} +import org.apache.spark.sql.{AnalysisException, SparkSession} import org.apache.spark.sql.catalyst.catalog.CatalogTable import org.apache.spark.sql.catalyst.plans.PlanTest import org.apache.spark.sql.execution.command.{CreateTableCommand, DDLUtils} -import org.apache.spark.sql.execution.datasources.CreateTable import org.apache.spark.sql.execution.metric.InputOutputMetricsHelper import org.apache.spark.sql.hive.test.TestHive import org.apache.spark.sql.internal.{HiveSerDe, SQLConf} @@ -71,8 +70,8 @@ class HiveSerDeSuite extends HiveComparisonTest with PlanTest with BeforeAndAfte } private def extractTableDesc(sql: String): (CatalogTable, Boolean) = { - TestHive.sessionState.sqlParser.parsePlan(sql).collect { - case CreateTable(tableDesc, mode, _) => (tableDesc, mode == SaveMode.Ignore) + TestHive.sessionState.analyzer.execute(TestHive.sessionState.sqlParser.parsePlan(sql)).collect { + case CreateTableCommand(tableDesc, ifNotExists) => (tableDesc, ifNotExists) }.head } @@ -89,7 +88,7 @@ class HiveSerDeSuite extends HiveComparisonTest with PlanTest with BeforeAndAfte test("Test the default fileformat for Hive-serde tables") { withSQLConf("hive.default.fileformat" -> "orc") { val (desc, exists) = extractTableDesc( - "CREATE TABLE IF NOT EXISTS fileformat_test (id int)") + "CREATE TABLE IF NOT EXISTS fileformat_test (id int) USING hive") assert(exists) assert(desc.storage.inputFormat == Some("org.apache.hadoop.hive.ql.io.orc.OrcInputFormat")) assert(desc.storage.outputFormat == Some("org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat")) @@ -97,7 +96,8 @@ class HiveSerDeSuite extends HiveComparisonTest with PlanTest with BeforeAndAfte } withSQLConf("hive.default.fileformat" -> "parquet") { - val (desc, exists) = extractTableDesc("CREATE TABLE IF NOT EXISTS fileformat_test (id int)") + val (desc, exists) = extractTableDesc( + "CREATE TABLE IF NOT EXISTS fileformat_test (id int) USING hive") assert(exists) val input = desc.storage.inputFormat val output = desc.storage.outputFormat diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveTableScanSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveTableScanSuite.scala index 67d7ed0841abb..5b43f82f253ea 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveTableScanSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveTableScanSuite.scala @@ -17,7 +17,10 @@ package org.apache.spark.sql.hive.execution +import java.io.{File, IOException} + import org.apache.spark.sql.Row +import org.apache.spark.sql.functions.col import org.apache.spark.sql.hive.test.{TestHive, TestHiveSingleton} import org.apache.spark.sql.hive.test.TestHive._ import org.apache.spark.sql.hive.test.TestHive.implicits._ @@ -110,6 +113,7 @@ class HiveTableScanSuite extends HiveComparisonTest with SQLTestUtils with TestH sql( s""" |CREATE TABLE $table(id string) + |USING hive |PARTITIONED BY (p1 string,p2 string,p3 string,p4 string,p5 string) """.stripMargin) sql( @@ -154,6 +158,7 @@ class HiveTableScanSuite extends HiveComparisonTest with SQLTestUtils with TestH sql( s""" |CREATE TABLE $table(id string) + |USING hive |PARTITIONED BY (p1 string,p2 string,p3 string,p4 string,p5 string) """.stripMargin) sql( @@ -179,6 +184,7 @@ class HiveTableScanSuite extends HiveComparisonTest with SQLTestUtils with TestH sql( s""" |CREATE TABLE $table (id int) + |USING hive |PARTITIONED BY (a int, b int) """.stripMargin) val scan1 = getHiveTableScanExec(s"SELECT * FROM $table WHERE a = 1 AND b = 2") @@ -187,6 +193,86 @@ class HiveTableScanSuite extends HiveComparisonTest with SQLTestUtils with TestH } } + test("SPARK-32867: When explain, HiveTableRelation show limited message") { + withSQLConf("hive.exec.dynamic.partition.mode" -> "nonstrict") { + withTable("df") { + spark.range(30) + .select(col("id"), col("id").as("k")) + .write + .partitionBy("k") + .format("hive") + .mode("overwrite") + .saveAsTable("df") + + val scan1 = getHiveTableScanExec("SELECT * FROM df WHERE df.k < 3") + assert(scan1.simpleString(100).replaceAll("#\\d+L", "") == + "Scan hive default.df [id, k]," + + " HiveTableRelation [" + + "`default`.`df`," + + " org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe," + + " Data Cols: [id]," + + " Partition Cols: [k]," + + " Pruned Partitions: [(k=0), (k=1), (k=2)]" + + "]," + + " [isnotnull(k), (k < 3)]") + + val scan2 = getHiveTableScanExec("SELECT * FROM df WHERE df.k < 30") + assert(scan2.simpleString(100).replaceAll("#\\d+L", "") == + "Scan hive default.df [id, k]," + + " HiveTableRelation [" + + "`default`.`df`," + + " org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe," + + " Data Cols: [id]," + + " Partition Cols: [k]," + + " Pruned Partitions: [(k=0), (k=1), (k=10), (k=11), (k=12), (k=13), (k=14), (k=15)," + + " (k=16), (k=17), (k=18), (k=19), (k..." + + "]," + + " [isnotnull(k), (k < 30)]") + + sql( + """ + |ALTER TABLE df PARTITION (k=10) SET SERDE + |'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe'; + """.stripMargin) + + val scan3 = getHiveTableScanExec("SELECT * FROM df WHERE df.k < 30") + assert(scan3.simpleString(100).replaceAll("#\\d+L", "") == + "Scan hive default.df [id, k]," + + " HiveTableRelation [" + + "`default`.`df`," + + " org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe," + + " Data Cols: [id]," + + " Partition Cols: [k]," + + " Pruned Partitions: [(k=0), (k=1)," + + " (k=10, org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe)," + + " (k=11), (k=12), (k=1..." + + "]," + + " [isnotnull(k), (k < 30)]") + } + } + } + + test("SPARK-32069: Improve error message on reading unexpected directory") { + withTable("t") { + withTempDir { f => + sql(s"CREATE TABLE t(i LONG) USING hive LOCATION '${f.getAbsolutePath}'") + sql("INSERT INTO t VALUES(1)") + val dir = new File(f.getCanonicalPath + "/data") + dir.mkdir() + sql("set mapreduce.input.fileinputformat.input.dir.recursive=true") + assert(sql("select * from t").collect().head.getLong(0) == 1) + sql("set mapreduce.input.fileinputformat.input.dir.recursive=false") + val e = intercept[IOException] { + sql("SELECT * FROM t").collect() + } + assert(e.getMessage.contains(s"Path: ${dir.getAbsoluteFile} is a directory, " + + s"which is not supported by the record reader " + + s"when `mapreduce.input.fileinputformat.input.dir.recursive` is false.")) + dir.delete() + } + } + } + private def getHiveTableScanExec(query: String): HiveTableScanExec = { sql(query).queryExecution.sparkPlan.collectFirst { case p: HiveTableScanExec => p diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveUDAFSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveUDAFSuite.scala index 9e33a8ee4cc5c..ed44dcd8d7a29 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveUDAFSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveUDAFSuite.scala @@ -161,6 +161,20 @@ class HiveUDAFSuite extends QueryTest checkAnswer(sql("select histogram_numeric(a,2) from abc where a=3"), Row(null)) } } + + test("SPARK-32243: Spark UDAF Invalid arguments number error should throw earlier") { + // func need two arguments + val functionName = "longProductSum" + val functionClass = "org.apache.spark.sql.hive.execution.LongProductSum" + withUserDefinedFunction(functionName -> true) { + sql(s"CREATE TEMPORARY FUNCTION $functionName AS '$functionClass'") + val e = intercept[AnalysisException] { + sql(s"SELECT $functionName(100)") + }.getMessage + assert(e.contains( + s"Invalid number of arguments for function $functionName. Expected: 2; Found: 1;")) + } + } } /** diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveUDFSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveUDFSuite.scala index 057f2f4ce01be..9e8046b9ef544 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveUDFSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveUDFSuite.scala @@ -658,6 +658,24 @@ class HiveUDFSuite extends QueryTest with TestHiveSingleton with SQLTestUtils { } } + test("SPARK-32877: add test for Hive UDF complex decimal type") { + withUserDefinedFunction("testArraySum" -> false) { + sql(s"CREATE FUNCTION testArraySum AS '${classOf[ArraySumUDF].getName}'") + checkAnswer( + sql("SELECT testArraySum(array(1, 1.1, 1.2))"), + Seq(Row(3.3))) + + val msg = intercept[AnalysisException] { + sql("SELECT testArraySum(1)") + }.getMessage + assert(msg.contains(s"No handler for UDF/UDAF/UDTF '${classOf[ArraySumUDF].getName}'")) + + val msg2 = intercept[AnalysisException] { + sql("SELECT testArraySum(1, 2)") + }.getMessage + assert(msg2.contains(s"No handler for UDF/UDAF/UDTF '${classOf[ArraySumUDF].getName}'")) + } + } } class TestPair(x: Int, y: Int) extends Writable with Serializable { @@ -741,3 +759,14 @@ class StatelessUDF extends UDF { result } } + +class ArraySumUDF extends UDF { + import scala.collection.JavaConverters._ + def evaluate(values: java.util.List[java.lang.Double]): java.lang.Double = { + var r = 0d + for (v <- values.asScala) { + r += v + } + r + } +} diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/PruneHiveTablePartitionsSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/PruneHiveTablePartitionsSuite.scala index 06aea084330fa..018df35403be5 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/PruneHiveTablePartitionsSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/PruneHiveTablePartitionsSuite.scala @@ -75,7 +75,22 @@ class PruneHiveTablePartitionsSuite extends PrunePartitionSuiteBase { } val scale = 20 val predicate = (1 to scale).map(i => s"(p0 = '$i' AND p1 = '$i')").mkString(" OR ") - assertPrunedPartitions(s"SELECT * FROM t WHERE $predicate", scale) + val expectedStr = { + // left + "(((((((`p0` = 1) && (`p1` = 1)) || ((`p0` = 2) && (`p1` = 2))) ||" + + " ((`p0` = 3) && (`p1` = 3))) || (((`p0` = 4) && (`p1` = 4)) ||" + + " ((`p0` = 5) && (`p1` = 5)))) || (((((`p0` = 6) && (`p1` = 6)) ||" + + " ((`p0` = 7) && (`p1` = 7))) || ((`p0` = 8) && (`p1` = 8))) ||" + + " (((`p0` = 9) && (`p1` = 9)) || ((`p0` = 10) && (`p1` = 10))))) ||" + + // right + " ((((((`p0` = 11) && (`p1` = 11)) || ((`p0` = 12) && (`p1` = 12))) ||" + + " ((`p0` = 13) && (`p1` = 13))) || (((`p0` = 14) && (`p1` = 14)) ||" + + " ((`p0` = 15) && (`p1` = 15)))) || (((((`p0` = 16) && (`p1` = 16)) ||" + + " ((`p0` = 17) && (`p1` = 17))) || ((`p0` = 18) && (`p1` = 18))) ||" + + " (((`p0` = 19) && (`p1` = 19)) || ((`p0` = 20) && (`p1` = 20))))))" + } + assertPrunedPartitions(s"SELECT * FROM t WHERE $predicate", scale, + expectedStr) } } } diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/PrunePartitionSuiteBase.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/PrunePartitionSuiteBase.scala index d088061cdc6e5..bc170fcd59026 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/PrunePartitionSuiteBase.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/PrunePartitionSuiteBase.scala @@ -18,8 +18,10 @@ package org.apache.spark.sql.hive.execution import org.apache.spark.sql.QueryTest -import org.apache.spark.sql.execution.SparkPlan +import org.apache.spark.sql.catalyst.expressions.{AttributeReference, BinaryOperator, Expression, IsNotNull, Literal} +import org.apache.spark.sql.execution.{FileSourceScanExec, SparkPlan} import org.apache.spark.sql.hive.test.TestHiveSingleton +import org.apache.spark.sql.internal.SQLConf.ADAPTIVE_EXECUTION_ENABLED import org.apache.spark.sql.test.SQLTestUtils abstract class PrunePartitionSuiteBase extends QueryTest with SQLTestUtils with TestHiveSingleton { @@ -27,49 +29,87 @@ abstract class PrunePartitionSuiteBase extends QueryTest with SQLTestUtils with protected def format: String test("SPARK-28169: Convert scan predicate condition to CNF") { - withTempView("temp") { - withTable("t") { - sql( - s""" - |CREATE TABLE t(i INT, p STRING) - |USING $format - |PARTITIONED BY (p)""".stripMargin) - - spark.range(0, 1000, 1).selectExpr("id as col") - .createOrReplaceTempView("temp") - - for (part <- Seq(1, 2, 3, 4)) { + withSQLConf(ADAPTIVE_EXECUTION_ENABLED.key -> "false") { + withTempView("temp") { + withTable("t") { sql( s""" - |INSERT OVERWRITE TABLE t PARTITION (p='$part') - |SELECT col FROM temp""".stripMargin) - } + |CREATE TABLE t(i INT, p STRING) + |USING $format + |PARTITIONED BY (p)""".stripMargin) - assertPrunedPartitions( - "SELECT * FROM t WHERE p = '1' OR (p = '2' AND i = 1)", 2) - assertPrunedPartitions( - "SELECT * FROM t WHERE (p = '1' AND i = 2) OR (i = 1 OR p = '2')", 4) - assertPrunedPartitions( - "SELECT * FROM t WHERE (p = '1' AND i = 2) OR (p = '3' AND i = 3 )", 2) - assertPrunedPartitions( - "SELECT * FROM t WHERE (p = '1' AND i = 2) OR (p = '2' OR p = '3')", 3) - assertPrunedPartitions( - "SELECT * FROM t", 4) - assertPrunedPartitions( - "SELECT * FROM t WHERE p = '1' AND i = 2", 1) - assertPrunedPartitions( - """ - |SELECT i, COUNT(1) FROM ( - |SELECT * FROM t WHERE p = '1' OR (p = '2' AND i = 1) - |) tmp GROUP BY i - """.stripMargin, 2) + spark.range(0, 1000, 1).selectExpr("id as col") + .createOrReplaceTempView("temp") + + for (part <- Seq(1, 2, 3, 4)) { + sql( + s""" + |INSERT OVERWRITE TABLE t PARTITION (p='$part') + |SELECT col FROM temp""".stripMargin) + } + + assertPrunedPartitions( + "SELECT * FROM t WHERE p = '1' OR (p = '2' AND i = 1)", 2, + "((`p` = '1') || (`p` = '2'))") + assertPrunedPartitions( + "SELECT * FROM t WHERE (p = '1' AND i = 2) OR (i = 1 OR p = '2')", 4, + "") + assertPrunedPartitions( + "SELECT * FROM t WHERE (p = '1' AND i = 2) OR (p = '3' AND i = 3 )", 2, + "((`p` = '1') || (`p` = '3'))") + assertPrunedPartitions( + "SELECT * FROM t WHERE (p = '1' AND i = 2) OR (p = '2' OR p = '3')", 3, + "((`p` = '1') || ((`p` = '2') || (`p` = '3')))") + assertPrunedPartitions( + "SELECT * FROM t", 4, + "") + assertPrunedPartitions( + "SELECT * FROM t WHERE p = '1' AND i = 2", 1, + "(`p` = '1')") + assertPrunedPartitions( + """ + |SELECT i, COUNT(1) FROM ( + |SELECT * FROM t WHERE p = '1' OR (p = '2' AND i = 1) + |) tmp GROUP BY i + """.stripMargin, 2, "((`p` = '1') || (`p` = '2'))") + } } } } - protected def assertPrunedPartitions(query: String, expected: Long): Unit = { - val plan = sql(query).queryExecution.sparkPlan - assert(getScanExecPartitionSize(plan) == expected) + private def getCleanStringRepresentation(exp: Expression): String = exp match { + case attr: AttributeReference => + attr.sql.replaceAll("spark_catalog.default.t.", "") + case l: Literal => + l.sql + case e: BinaryOperator => + s"(${getCleanStringRepresentation(e.left)} ${e.symbol} " + + s"${getCleanStringRepresentation(e.right)})" + } + + protected def assertPrunedPartitions( + query: String, + expectedPartitionCount: Long, + expectedPushedDownFilters: String): Unit = { + val qe = sql(query).queryExecution + val plan = qe.sparkPlan + assert(getScanExecPartitionSize(plan) == expectedPartitionCount) + + val pushedDownPartitionFilters = qe.executedPlan.collectFirst { + case scan: FileSourceScanExec => scan.partitionFilters + case scan: HiveTableScanExec => scan.partitionPruningPred + }.map(exps => exps.filterNot(e => e.isInstanceOf[IsNotNull])) + val pushedFilters = pushedDownPartitionFilters.map(filters => { + filters.foldLeft("")((currentStr, exp) => { + if (currentStr == "") { + s"${getCleanStringRepresentation(exp)}" + } else { + s"$currentStr AND ${getCleanStringRepresentation(exp)}" + } + }) + }) + + assert(pushedFilters == Some(expectedPushedDownFilters)) } protected def getScanExecPartitionSize(plan: SparkPlan): Long diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala index 431790e1fbb6d..3370695245fd0 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala @@ -45,7 +45,6 @@ import org.apache.spark.sql.internal.StaticSQLConf.GLOBAL_TEMP_DATABASE import org.apache.spark.sql.test.SQLTestUtils import org.apache.spark.sql.types._ import org.apache.spark.tags.SlowHiveTest -import org.apache.spark.util.Utils case class Nested1(f1: Nested2) case class Nested2(f2: Nested3) @@ -229,7 +228,7 @@ abstract class SQLQuerySuiteBase extends QueryTest with SQLTestUtils with TestHi checkAnswer(sql(s"SHOW functions $db.temp_abs"), Row("temp_abs")) checkAnswer(sql(s"SHOW functions `$db`.`temp_abs`"), Row("temp_abs")) checkAnswer(sql(s"SHOW functions `$db`.`temp_abs`"), Row("temp_abs")) - checkAnswer(sql("SHOW functions `a function doens't exist`"), Nil) + checkAnswer(sql("SHOW functions `a function doesn't exist`"), Nil) checkAnswer(sql("SHOW functions `temp_weekofyea*`"), Row("temp_weekofyear")) // this probably will failed if we add more function with `sha` prefixing. @@ -713,8 +712,7 @@ abstract class SQLQuerySuiteBase extends QueryTest with SQLTestUtils with TestHi |AS SELECT key, value FROM mytable1 """.stripMargin) }.getMessage - assert(e.contains("Create Partitioned Table As Select cannot specify data type for " + - "the partition columns of the target table")) + assert(e.contains("Partition column types may not be specified in Create Table As Select")) } } } @@ -770,7 +768,7 @@ abstract class SQLQuerySuiteBase extends QueryTest with SQLTestUtils with TestHi sql("SELECT * FROM nested").collect().toSeq) intercept[AnalysisException] { - sql("CREATE TABLE test_ctas_1234 AS SELECT * from notexists").collect() + sql("CREATE TABLE test_ctas_1234 AS SELECT * from nonexistent").collect() } } } @@ -1741,12 +1739,12 @@ abstract class SQLQuerySuiteBase extends QueryTest with SQLTestUtils with TestHi |SELECT 'blarr' """.stripMargin) - // project list is the same order of paritioning columns in table definition + // project list is the same order of partitioning columns in table definition checkAnswer( sql(s"SELECT p1, p2, p3, p4, p5, c1 FROM $table"), Row("a", "b", "c", "d", "e", "blarr") :: Nil) - // project list does not have the same order of paritioning columns in table definition + // project list does not have the same order of partitioning columns in table definition checkAnswer( sql(s"SELECT p2, p3, p4, p1, p5, c1 FROM $table"), Row("b", "c", "d", "a", "e", "blarr") :: Nil) @@ -2028,6 +2026,7 @@ abstract class SQLQuerySuiteBase extends QueryTest with SQLTestUtils with TestHi sql( """ |CREATE TABLE part_table (c STRING) + |STORED AS textfile |PARTITIONED BY (d STRING) """.stripMargin) sql(s"LOAD DATA LOCAL INPATH '$path/part-r-000011' " + @@ -2206,39 +2205,63 @@ abstract class SQLQuerySuiteBase extends QueryTest with SQLTestUtils with TestHi } } - test("SPARK-21912 ORC/Parquet table should not create invalid column names") { + test("SPARK-21912 Parquet table should not create invalid column names") { Seq(" ", ",", ";", "{", "}", "(", ")", "\n", "\t", "=").foreach { name => - Seq("ORC", "PARQUET").foreach { source => - withTable("t21912") { - val m = intercept[AnalysisException] { - sql(s"CREATE TABLE t21912(`col$name` INT) USING $source") - }.getMessage - assert(m.contains(s"contains invalid character(s)")) + val source = "PARQUET" + withTable("t21912") { + val m = intercept[AnalysisException] { + sql(s"CREATE TABLE t21912(`col$name` INT) USING $source") + }.getMessage + assert(m.contains(s"contains invalid character(s)")) - val m1 = intercept[AnalysisException] { - sql(s"CREATE TABLE t21912 STORED AS $source AS SELECT 1 `col$name`") - }.getMessage - assert(m1.contains(s"contains invalid character(s)")) + val m1 = intercept[AnalysisException] { + sql(s"CREATE TABLE t21912 STORED AS $source AS SELECT 1 `col$name`") + }.getMessage + assert(m1.contains(s"contains invalid character(s)")) + + val m2 = intercept[AnalysisException] { + sql(s"CREATE TABLE t21912 USING $source AS SELECT 1 `col$name`") + }.getMessage + assert(m2.contains(s"contains invalid character(s)")) - val m2 = intercept[AnalysisException] { - sql(s"CREATE TABLE t21912 USING $source AS SELECT 1 `col$name`") + withSQLConf(HiveUtils.CONVERT_METASTORE_PARQUET.key -> "false") { + val m3 = intercept[AnalysisException] { + sql(s"CREATE TABLE t21912(`col$name` INT) USING hive OPTIONS (fileFormat '$source')") }.getMessage - assert(m2.contains(s"contains invalid character(s)")) + assert(m3.contains(s"contains invalid character(s)")) + } - withSQLConf(HiveUtils.CONVERT_METASTORE_PARQUET.key -> "false") { - val m3 = intercept[AnalysisException] { - sql(s"CREATE TABLE t21912(`col$name` INT) USING hive OPTIONS (fileFormat '$source')") - }.getMessage - assert(m3.contains(s"contains invalid character(s)")) - } + sql(s"CREATE TABLE t21912(`col` INT) USING $source") + val m4 = intercept[AnalysisException] { + sql(s"ALTER TABLE t21912 ADD COLUMNS(`col$name` INT)") + }.getMessage + assert(m4.contains(s"contains invalid character(s)")) + } + } + } - sql(s"CREATE TABLE t21912(`col` INT) USING $source") - val m4 = intercept[AnalysisException] { - sql(s"ALTER TABLE t21912 ADD COLUMNS(`col$name` INT)") - }.getMessage - assert(m4.contains(s"contains invalid character(s)")) + test("SPARK-32889: ORC table column name supports special characters") { + // " " "," is not allowed. + Seq("$", ";", "{", "}", "(", ")", "\n", "\t", "=").foreach { name => + val source = "ORC" + Seq(s"CREATE TABLE t32889(`$name` INT) USING $source", + s"CREATE TABLE t32889 STORED AS $source AS SELECT 1 `$name`", + s"CREATE TABLE t32889 USING $source AS SELECT 1 `$name`", + s"CREATE TABLE t32889(`$name` INT) USING hive OPTIONS (fileFormat '$source')") + .foreach { command => + withTable("t32889") { + sql(command) + assertResult(name)( + sessionState.catalog.getTableMetadata(TableIdentifier("t32889")).schema.fields(0).name) } } + + withTable("t32889") { + sql(s"CREATE TABLE t32889(`col` INT) USING $source") + sql(s"ALTER TABLE t32889 ADD COLUMNS(`$name` INT)") + assertResult(name)( + sessionState.catalog.getTableMetadata(TableIdentifier("t32889")).schema.fields(1).name) + } } } diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/UDAQuerySuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/UDAQuerySuite.scala index 1f1a5568b0201..50f13efccc915 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/UDAQuerySuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/UDAQuerySuite.scala @@ -17,23 +17,15 @@ package org.apache.spark.sql.hive.execution -import java.lang.{Double => jlDouble, Integer => jlInt, Long => jlLong} - -import scala.collection.JavaConverters._ -import scala.util.Random - -import test.org.apache.spark.sql.MyDoubleAvg -import test.org.apache.spark.sql.MyDoubleSum +import java.lang.{Double => jlDouble, Long => jlLong} import org.apache.spark.sql._ import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.catalyst.encoders.ExpressionEncoder import org.apache.spark.sql.catalyst.expressions.GenericInternalRow -import org.apache.spark.sql.catalyst.expressions.UnsafeRow -import org.apache.spark.sql.expressions.{Aggregator} +import org.apache.spark.sql.expressions.Aggregator import org.apache.spark.sql.functions._ import org.apache.spark.sql.hive.test.TestHiveSingleton -import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.test.SQLTestUtils import org.apache.spark.sql.types._ diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/WindowQuerySuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/WindowQuerySuite.scala index 15712a18ce751..6bf7bd6cbb90e 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/WindowQuerySuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/WindowQuerySuite.scala @@ -62,7 +62,6 @@ class WindowQuerySuite extends QueryTest with SQLTestUtils with TestHiveSingleto // Moved because: // - Spark uses a different default stddev (sample instead of pop) // - Tiny numerical differences in stddev results. - // - Different StdDev behavior when n=1 (NaN instead of 0) checkAnswer(sql(s""" |select p_mfgr,p_name, p_size, |rank() over(distribute by p_mfgr sort by p_name) as r, @@ -88,22 +87,22 @@ class WindowQuerySuite extends QueryTest with SQLTestUtils with TestHiveSingleto Row("Manufacturer#1", "almond antique salmon chartreuse burlywood", 6, 4, 3, 0.6666666666666666, 0.6, 2, 4, 11.0, 15.448840301675292, 2, 6, 2), Row("Manufacturer#1", "almond aquamarine burnished black steel", 28, 5, 4, 0.8333333333333334, 0.8, 3, 5, 14.4, 15.388307249337076, 2, 28, 34), Row("Manufacturer#1", "almond aquamarine pink moccasin thistle", 42, 6, 5, 1.0, 1.0, 3, 6, 19.0, 17.787636155487327, 2, 42, 6), - Row("Manufacturer#2", "almond antique violet chocolate turquoise", 14, 1, 1, 0.2, 0.0, 1, 1, 14.0, Double.NaN, 4, 14, 14), + Row("Manufacturer#2", "almond antique violet chocolate turquoise", 14, 1, 1, 0.2, 0.0, 1, 1, 14.0, null, 4, 14, 14), Row("Manufacturer#2", "almond antique violet turquoise frosted", 40, 2, 2, 0.4, 0.25, 1, 2, 27.0, 18.384776310850235, 4, 40, 14), Row("Manufacturer#2", "almond aquamarine midnight light salmon", 2, 3, 3, 0.6, 0.5, 2, 3, 18.666666666666668, 19.42506971244462, 4, 2, 14), Row("Manufacturer#2", "almond aquamarine rose maroon antique", 25, 4, 4, 0.8, 0.75, 2, 4, 20.25, 16.17353805861084, 4, 25, 40), Row("Manufacturer#2", "almond aquamarine sandy cyan gainsboro", 18, 5, 5, 1.0, 1.0, 3, 5, 19.8, 14.042791745233567, 4, 18, 2), - Row("Manufacturer#3", "almond antique chartreuse khaki white", 17, 1, 1, 0.2, 0.0, 1, 1, 17.0,Double.NaN, 2, 17, 17), + Row("Manufacturer#3", "almond antique chartreuse khaki white", 17, 1, 1, 0.2, 0.0, 1, 1, 17.0, null, 2, 17, 17), Row("Manufacturer#3", "almond antique forest lavender goldenrod", 14, 2, 2, 0.4, 0.25, 1, 2, 15.5, 2.1213203435596424, 2, 14, 17), Row("Manufacturer#3", "almond antique metallic orange dim", 19, 3, 3, 0.6, 0.5, 2, 3, 16.666666666666668, 2.516611478423583, 2, 19, 17), Row("Manufacturer#3", "almond antique misty red olive", 1, 4, 4, 0.8, 0.75, 2, 4, 12.75, 8.098353742170895, 2, 1, 14), Row("Manufacturer#3", "almond antique olive coral navajo", 45, 5, 5, 1.0, 1.0, 3, 5, 19.2, 16.037456157383566, 2, 45, 19), - Row("Manufacturer#4", "almond antique gainsboro frosted violet", 10, 1, 1, 0.2, 0.0, 1, 1, 10.0, Double.NaN, 0, 10, 10), + Row("Manufacturer#4", "almond antique gainsboro frosted violet", 10, 1, 1, 0.2, 0.0, 1, 1, 10.0, null, 0, 10, 10), Row("Manufacturer#4", "almond antique violet mint lemon", 39, 2, 2, 0.4, 0.25, 1, 2, 24.5, 20.506096654409877, 0, 39, 10), Row("Manufacturer#4", "almond aquamarine floral ivory bisque", 27, 3, 3, 0.6, 0.5, 2, 3, 25.333333333333332, 14.571661996262929, 0, 27, 10), Row("Manufacturer#4", "almond aquamarine yellow dodger mint", 7, 4, 4, 0.8, 0.75, 2, 4, 20.75, 15.01943185787443, 0, 7, 39), Row("Manufacturer#4", "almond azure aquamarine papaya violet", 12, 5, 5, 1.0, 1.0, 3, 5, 19.0, 13.583077707206124, 0, 12, 27), - Row("Manufacturer#5", "almond antique blue firebrick mint", 31, 1, 1, 0.2, 0.0, 1, 1, 31.0, Double.NaN, 1, 31, 31), + Row("Manufacturer#5", "almond antique blue firebrick mint", 31, 1, 1, 0.2, 0.0, 1, 1, 31.0, null, 1, 31, 31), Row("Manufacturer#5", "almond antique medium spring khaki", 6, 2, 2, 0.4, 0.25, 1, 2, 18.5, 17.67766952966369, 1, 6, 31), Row("Manufacturer#5", "almond antique sky peru orange", 2, 3, 3, 0.6, 0.5, 2, 3, 13.0, 15.716233645501712, 1, 2, 31), Row("Manufacturer#5", "almond aquamarine dodger light gainsboro", 46, 4, 4, 0.8, 0.75, 2, 4, 21.25, 20.902551678363736, 1, 46, 6), diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/command/AlterTableAddPartitionSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/command/AlterTableAddPartitionSuite.scala new file mode 100644 index 0000000000000..f8fe23f643cda --- /dev/null +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/command/AlterTableAddPartitionSuite.scala @@ -0,0 +1,28 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.hive.execution.command + +import org.apache.spark.sql.execution.command.v1 + +/** + * The class contains tests for the `ALTER TABLE .. ADD PARTITION` command to check + * V1 Hive external table catalog. + */ +class AlterTableAddPartitionSuite + extends v1.AlterTableAddPartitionSuiteBase + with CommandSuiteBase diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/command/AlterTableDropPartitionSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/command/AlterTableDropPartitionSuite.scala new file mode 100644 index 0000000000000..5cac27f0d254a --- /dev/null +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/command/AlterTableDropPartitionSuite.scala @@ -0,0 +1,28 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.hive.execution.command + +import org.apache.spark.sql.execution.command.v1 + +/** + * The class contains tests for the `ALTER TABLE .. DROP PARTITION` command to check + * V1 Hive external table catalog. + */ +class AlterTableDropPartitionSuite + extends v1.AlterTableDropPartitionSuiteBase + with CommandSuiteBase diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/command/AlterTableRenamePartitionSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/command/AlterTableRenamePartitionSuite.scala new file mode 100644 index 0000000000000..5cd5122a2a7fa --- /dev/null +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/command/AlterTableRenamePartitionSuite.scala @@ -0,0 +1,28 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.hive.execution.command + +import org.apache.spark.sql.execution.command.v1 + +/** + * The class contains tests for the `ALTER TABLE .. RENAME PARTITION` command to check + * V1 Hive external table catalog. + */ +class AlterTableRenamePartitionSuite + extends v1.AlterTableRenamePartitionSuiteBase + with CommandSuiteBase diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/command/CommandSuiteBase.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/command/CommandSuiteBase.scala new file mode 100644 index 0000000000000..a1c808647c891 --- /dev/null +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/command/CommandSuiteBase.scala @@ -0,0 +1,50 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.hive.execution.command + +import org.apache.spark.sql.catalyst.catalog.CatalogTypes.TablePartitionSpec +import org.apache.spark.sql.connector.catalog.CatalogManager +import org.apache.spark.sql.hive.test.TestHiveSingleton + +/** + * The trait contains settings and utility functions. It can be mixed to the test suites for + * datasource v1 Hive external catalog. This trait complements the common trait + * `org.apache.spark.sql.execution.command.DDLCommandTestUtils` with utility functions and + * settings for all unified datasource V1 and V2 test suites. + */ +trait CommandSuiteBase extends TestHiveSingleton { + def version: String = "Hive V1" // The prefix is added to test names + def catalog: String = CatalogManager.SESSION_CATALOG_NAME + def defaultUsing: String = "USING HIVE" // The clause is used in creating tables under testing + + def checkLocation( + t: String, + spec: TablePartitionSpec, + expected: String): Unit = { + val tablePath = t.split('.') + val tableName = tablePath.last + val ns = tablePath.init.mkString(".") + val partSpec = spec.map { case (key, value) => s"$key = $value"}.mkString(", ") + val information = + spark.sql(s"SHOW TABLE EXTENDED IN $ns LIKE '$tableName' PARTITION($partSpec)") + .select("information") + .first().getString(0) + val location = information.split("\\r?\\n").filter(_.startsWith("Location:")).head + assert(location.endsWith(expected)) + } +} diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/command/DropTableSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/command/DropTableSuite.scala new file mode 100644 index 0000000000000..c1f17d1280641 --- /dev/null +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/command/DropTableSuite.scala @@ -0,0 +1,25 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.hive.execution.command + +import org.apache.spark.sql.execution.command.v1 + +/** + * The class contains tests for the `DROP TABLE` command to check V1 Hive external table catalog. + */ +class DropTableSuite extends v1.DropTableSuiteBase with CommandSuiteBase diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/command/ShowNamespacesSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/command/ShowNamespacesSuite.scala new file mode 100644 index 0000000000000..eba2569c07736 --- /dev/null +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/command/ShowNamespacesSuite.scala @@ -0,0 +1,43 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.hive.execution.command + +import org.apache.spark.sql.execution.command.v1 +import org.apache.spark.sql.internal.SQLConf + +/** + * The class contains tests for the `SHOW NAMESPACES` and `SHOW DATABASES` commands to check + * V1 Hive external table catalog. + */ +class ShowNamespacesSuite extends v1.ShowNamespacesSuiteBase with CommandSuiteBase { + test("case sensitivity") { + Seq(true, false).foreach { caseSensitive => + withSQLConf(SQLConf.CASE_SENSITIVE.key -> caseSensitive.toString) { + withNamespace(s"$catalog.AAA", s"$catalog.bbb") { + sql(s"CREATE NAMESPACE $catalog.AAA") + sql(s"CREATE NAMESPACE $catalog.bbb") + runShowNamespacesSql( + s"SHOW NAMESPACES IN $catalog", + Seq("aaa", "bbb") ++ builtinTopNamespaces) + runShowNamespacesSql(s"SHOW NAMESPACES IN $catalog LIKE 'AAA'", Seq("aaa")) + runShowNamespacesSql(s"SHOW NAMESPACES IN $catalog LIKE 'aaa'", Seq("aaa")) + } + } + } + } +} diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/command/ShowPartitionsSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/command/ShowPartitionsSuite.scala new file mode 100644 index 0000000000000..ded53cc3ea7f0 --- /dev/null +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/command/ShowPartitionsSuite.scala @@ -0,0 +1,41 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.hive.execution.command + +import org.apache.spark.sql.Row +import org.apache.spark.sql.execution.command.v1 + +/** + * The class contains tests for the `SHOW PARTITIONS` command to check + * V1 Hive external table catalog. + */ +class ShowPartitionsSuite extends v1.ShowPartitionsSuiteBase with CommandSuiteBase { + test("SPARK-33904: null and empty string as partition values") { + withSQLConf("hive.exec.dynamic.partition.mode" -> "nonstrict") { + withNamespaceAndTable("ns", "tbl") { t => + createNullPartTable(t, "hive") + runShowPartitionsSql( + s"SHOW PARTITIONS $t", + Row("part=__HIVE_DEFAULT_PARTITION__") :: Nil) + checkAnswer(spark.table(t), + Row(0, "__HIVE_DEFAULT_PARTITION__") :: + Row(1, "__HIVE_DEFAULT_PARTITION__") :: Nil) + } + } + } +} diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/command/ShowTablesSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/command/ShowTablesSuite.scala new file mode 100644 index 0000000000000..7b3652a86034a --- /dev/null +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/command/ShowTablesSuite.scala @@ -0,0 +1,25 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.hive.execution.command + +import org.apache.spark.sql.execution.command.v1 + +/** + * The class contains tests for the `SHOW TABLES` command to check V1 Hive external table catalog. + */ +class ShowTablesSuite extends v1.ShowTablesSuiteBase with CommandSuiteBase diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/HiveOrcFilterSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/HiveOrcFilterSuite.scala deleted file mode 100644 index 5fc41067f661d..0000000000000 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/HiveOrcFilterSuite.scala +++ /dev/null @@ -1,484 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.spark.sql.hive.orc - -import java.nio.charset.StandardCharsets -import java.sql.{Date, Timestamp} - -import scala.collection.JavaConverters._ - -import org.apache.hadoop.hive.ql.io.sarg.{PredicateLeaf, SearchArgument} - -import org.apache.spark.sql.{Column, DataFrame} -import org.apache.spark.sql.catalyst.dsl.expressions._ -import org.apache.spark.sql.catalyst.expressions._ -import org.apache.spark.sql.catalyst.planning.PhysicalOperation -import org.apache.spark.sql.execution.datasources.{DataSourceStrategy, HadoopFsRelation, LogicalRelation} -import org.apache.spark.sql.execution.datasources.orc.OrcTest -import org.apache.spark.sql.hive.HiveUtils -import org.apache.spark.sql.hive.test.TestHiveSingleton -import org.apache.spark.sql.types._ - -/** - * A test suite that tests Hive ORC filter API based filter pushdown optimization. - */ -class HiveOrcFilterSuite extends OrcTest with TestHiveSingleton { - - override val orcImp: String = "hive" - - private def checkFilterPredicate( - df: DataFrame, - predicate: Predicate, - checker: (SearchArgument) => Unit): Unit = { - val output = predicate.collect { case a: Attribute => a }.distinct - val query = df - .select(output.map(e => Column(e)): _*) - .where(Column(predicate)) - - var maybeRelation: Option[HadoopFsRelation] = None - val maybeAnalyzedPredicate = query.queryExecution.optimizedPlan.collect { - case PhysicalOperation(_, filters, LogicalRelation(orcRelation: HadoopFsRelation, _, _, _)) => - maybeRelation = Some(orcRelation) - filters - }.flatten.reduceLeftOption(_ && _) - assert(maybeAnalyzedPredicate.isDefined, "No filter is analyzed from the given query") - - val (_, selectedFilters, _) = - DataSourceStrategy.selectFilters(maybeRelation.get, maybeAnalyzedPredicate.toSeq) - assert(selectedFilters.nonEmpty, "No filter is pushed down") - - val maybeFilter = OrcFilters.createFilter(query.schema, selectedFilters.toArray) - assert(maybeFilter.isDefined, s"Couldn't generate filter predicate for $selectedFilters") - checker(maybeFilter.get) - } - - private def checkFilterPredicate - (predicate: Predicate, filterOperator: PredicateLeaf.Operator) - (implicit df: DataFrame): Unit = { - def checkComparisonOperator(filter: SearchArgument) = { - val operator = filter.getLeaves.asScala - assert(operator.map(_.getOperator).contains(filterOperator)) - } - checkFilterPredicate(df, predicate, checkComparisonOperator) - } - - private def checkFilterPredicateWithDiffHiveVersion - (predicate: Predicate, stringExpr: String) - (implicit df: DataFrame): Unit = { - def checkLogicalOperator(filter: SearchArgument) = { - if (HiveUtils.isHive23) { - assert(filter.toString == stringExpr.replace("\n", ", ")) - } else { - assert(filter.toString == stringExpr) - } - } - checkFilterPredicate(df, predicate, checkLogicalOperator) - } - - private def assertResultWithDiffHiveVersion(expected : String)(c : scala.Any) = { - if (HiveUtils.isHive23) { - assertResult(expected.replace("\n", ", "))(c) - } else { - assertResult(expected)(c) - } - } - - private def checkNoFilterPredicate - (predicate: Predicate) - (implicit df: DataFrame): Unit = { - val output = predicate.collect { case a: Attribute => a }.distinct - val query = df - .select(output.map(e => Column(e)): _*) - .where(Column(predicate)) - - var maybeRelation: Option[HadoopFsRelation] = None - val maybeAnalyzedPredicate = query.queryExecution.optimizedPlan.collect { - case PhysicalOperation(_, filters, LogicalRelation(orcRelation: HadoopFsRelation, _, _, _)) => - maybeRelation = Some(orcRelation) - filters - }.flatten.reduceLeftOption(_ && _) - assert(maybeAnalyzedPredicate.isDefined, "No filter is analyzed from the given query") - - val (_, selectedFilters, _) = - DataSourceStrategy.selectFilters(maybeRelation.get, maybeAnalyzedPredicate.toSeq) - assert(selectedFilters.nonEmpty, "No filter is pushed down") - - val maybeFilter = OrcFilters.createFilter(query.schema, selectedFilters.toArray) - assert(maybeFilter.isEmpty, s"Could generate filter predicate for $selectedFilters") - } - - test("filter pushdown - integer") { - withOrcDataFrame((1 to 4).map(i => Tuple1(Option(i)))) { implicit df => - checkFilterPredicate($"_1".isNull, PredicateLeaf.Operator.IS_NULL) - - checkFilterPredicate($"_1" === 1, PredicateLeaf.Operator.EQUALS) - checkFilterPredicate($"_1" <=> 1, PredicateLeaf.Operator.NULL_SAFE_EQUALS) - - checkFilterPredicate($"_1" < 2, PredicateLeaf.Operator.LESS_THAN) - checkFilterPredicate($"_1" > 3, PredicateLeaf.Operator.LESS_THAN_EQUALS) - checkFilterPredicate($"_1" <= 1, PredicateLeaf.Operator.LESS_THAN_EQUALS) - checkFilterPredicate($"_1" >= 4, PredicateLeaf.Operator.LESS_THAN) - - checkFilterPredicate(Literal(1) === $"_1", PredicateLeaf.Operator.EQUALS) - checkFilterPredicate(Literal(1) <=> $"_1", PredicateLeaf.Operator.NULL_SAFE_EQUALS) - checkFilterPredicate(Literal(2) > $"_1", PredicateLeaf.Operator.LESS_THAN) - checkFilterPredicate(Literal(3) < $"_1", PredicateLeaf.Operator.LESS_THAN_EQUALS) - checkFilterPredicate(Literal(1) >= $"_1", PredicateLeaf.Operator.LESS_THAN_EQUALS) - checkFilterPredicate(Literal(4) <= $"_1", PredicateLeaf.Operator.LESS_THAN) - } - } - - test("filter pushdown - long") { - withOrcDataFrame((1 to 4).map(i => Tuple1(Option(i.toLong)))) { implicit df => - checkFilterPredicate($"_1".isNull, PredicateLeaf.Operator.IS_NULL) - - checkFilterPredicate($"_1" === 1, PredicateLeaf.Operator.EQUALS) - checkFilterPredicate($"_1" <=> 1, PredicateLeaf.Operator.NULL_SAFE_EQUALS) - - checkFilterPredicate($"_1" < 2, PredicateLeaf.Operator.LESS_THAN) - checkFilterPredicate($"_1" > 3, PredicateLeaf.Operator.LESS_THAN_EQUALS) - checkFilterPredicate($"_1" <= 1, PredicateLeaf.Operator.LESS_THAN_EQUALS) - checkFilterPredicate($"_1" >= 4, PredicateLeaf.Operator.LESS_THAN) - - checkFilterPredicate(Literal(1) === $"_1", PredicateLeaf.Operator.EQUALS) - checkFilterPredicate(Literal(1) <=> $"_1", PredicateLeaf.Operator.NULL_SAFE_EQUALS) - checkFilterPredicate(Literal(2) > $"_1", PredicateLeaf.Operator.LESS_THAN) - checkFilterPredicate(Literal(3) < $"_1", PredicateLeaf.Operator.LESS_THAN_EQUALS) - checkFilterPredicate(Literal(1) >= $"_1", PredicateLeaf.Operator.LESS_THAN_EQUALS) - checkFilterPredicate(Literal(4) <= $"_1", PredicateLeaf.Operator.LESS_THAN) - } - } - - test("filter pushdown - float") { - withOrcDataFrame((1 to 4).map(i => Tuple1(Option(i.toFloat)))) { implicit df => - checkFilterPredicate($"_1".isNull, PredicateLeaf.Operator.IS_NULL) - - checkFilterPredicate($"_1" === 1, PredicateLeaf.Operator.EQUALS) - checkFilterPredicate($"_1" <=> 1, PredicateLeaf.Operator.NULL_SAFE_EQUALS) - - checkFilterPredicate($"_1" < 2, PredicateLeaf.Operator.LESS_THAN) - checkFilterPredicate($"_1" > 3, PredicateLeaf.Operator.LESS_THAN_EQUALS) - checkFilterPredicate($"_1" <= 1, PredicateLeaf.Operator.LESS_THAN_EQUALS) - checkFilterPredicate($"_1" >= 4, PredicateLeaf.Operator.LESS_THAN) - - checkFilterPredicate(Literal(1) === $"_1", PredicateLeaf.Operator.EQUALS) - checkFilterPredicate(Literal(1) <=> $"_1", PredicateLeaf.Operator.NULL_SAFE_EQUALS) - checkFilterPredicate(Literal(2) > $"_1", PredicateLeaf.Operator.LESS_THAN) - checkFilterPredicate(Literal(3) < $"_1", PredicateLeaf.Operator.LESS_THAN_EQUALS) - checkFilterPredicate(Literal(1) >= $"_1", PredicateLeaf.Operator.LESS_THAN_EQUALS) - checkFilterPredicate(Literal(4) <= $"_1", PredicateLeaf.Operator.LESS_THAN) - } - } - - test("filter pushdown - double") { - withOrcDataFrame((1 to 4).map(i => Tuple1(Option(i.toDouble)))) { implicit df => - checkFilterPredicate($"_1".isNull, PredicateLeaf.Operator.IS_NULL) - - checkFilterPredicate($"_1" === 1, PredicateLeaf.Operator.EQUALS) - checkFilterPredicate($"_1" <=> 1, PredicateLeaf.Operator.NULL_SAFE_EQUALS) - - checkFilterPredicate($"_1" < 2, PredicateLeaf.Operator.LESS_THAN) - checkFilterPredicate($"_1" > 3, PredicateLeaf.Operator.LESS_THAN_EQUALS) - checkFilterPredicate($"_1" <= 1, PredicateLeaf.Operator.LESS_THAN_EQUALS) - checkFilterPredicate($"_1" >= 4, PredicateLeaf.Operator.LESS_THAN) - - checkFilterPredicate(Literal(1) === $"_1", PredicateLeaf.Operator.EQUALS) - checkFilterPredicate(Literal(1) <=> $"_1", PredicateLeaf.Operator.NULL_SAFE_EQUALS) - checkFilterPredicate(Literal(2) > $"_1", PredicateLeaf.Operator.LESS_THAN) - checkFilterPredicate(Literal(3) < $"_1", PredicateLeaf.Operator.LESS_THAN_EQUALS) - checkFilterPredicate(Literal(1) >= $"_1", PredicateLeaf.Operator.LESS_THAN_EQUALS) - checkFilterPredicate(Literal(4) <= $"_1", PredicateLeaf.Operator.LESS_THAN) - } - } - - test("filter pushdown - string") { - withOrcDataFrame((1 to 4).map(i => Tuple1(i.toString))) { implicit df => - checkFilterPredicate($"_1".isNull, PredicateLeaf.Operator.IS_NULL) - - checkFilterPredicate($"_1" === "1", PredicateLeaf.Operator.EQUALS) - checkFilterPredicate($"_1" <=> "1", PredicateLeaf.Operator.NULL_SAFE_EQUALS) - - checkFilterPredicate($"_1" < "2", PredicateLeaf.Operator.LESS_THAN) - checkFilterPredicate($"_1" > "3", PredicateLeaf.Operator.LESS_THAN_EQUALS) - checkFilterPredicate($"_1" <= "1", PredicateLeaf.Operator.LESS_THAN_EQUALS) - checkFilterPredicate($"_1" >= "4", PredicateLeaf.Operator.LESS_THAN) - - checkFilterPredicate(Literal("1") === $"_1", PredicateLeaf.Operator.EQUALS) - checkFilterPredicate(Literal("1") <=> $"_1", PredicateLeaf.Operator.NULL_SAFE_EQUALS) - checkFilterPredicate(Literal("2") > $"_1", PredicateLeaf.Operator.LESS_THAN) - checkFilterPredicate(Literal("3") < $"_1", PredicateLeaf.Operator.LESS_THAN_EQUALS) - checkFilterPredicate(Literal("1") >= $"_1", PredicateLeaf.Operator.LESS_THAN_EQUALS) - checkFilterPredicate(Literal("4") <= $"_1", PredicateLeaf.Operator.LESS_THAN) - } - } - - test("filter pushdown - boolean") { - withOrcDataFrame((true :: false :: Nil).map(b => Tuple1.apply(Option(b)))) { implicit df => - checkFilterPredicate($"_1".isNull, PredicateLeaf.Operator.IS_NULL) - - checkFilterPredicate($"_1" === true, PredicateLeaf.Operator.EQUALS) - checkFilterPredicate($"_1" <=> true, PredicateLeaf.Operator.NULL_SAFE_EQUALS) - - checkFilterPredicate($"_1" < true, PredicateLeaf.Operator.LESS_THAN) - checkFilterPredicate($"_1" > false, PredicateLeaf.Operator.LESS_THAN_EQUALS) - checkFilterPredicate($"_1" <= false, PredicateLeaf.Operator.LESS_THAN_EQUALS) - checkFilterPredicate($"_1" >= false, PredicateLeaf.Operator.LESS_THAN) - - checkFilterPredicate(Literal(false) === $"_1", PredicateLeaf.Operator.EQUALS) - checkFilterPredicate(Literal(false) <=> $"_1", PredicateLeaf.Operator.NULL_SAFE_EQUALS) - checkFilterPredicate(Literal(false) > $"_1", PredicateLeaf.Operator.LESS_THAN) - checkFilterPredicate(Literal(true) < $"_1", PredicateLeaf.Operator.LESS_THAN_EQUALS) - checkFilterPredicate(Literal(true) >= $"_1", PredicateLeaf.Operator.LESS_THAN_EQUALS) - checkFilterPredicate(Literal(true) <= $"_1", PredicateLeaf.Operator.LESS_THAN) - } - } - - test("filter pushdown - decimal") { - withOrcDataFrame((1 to 4).map(i => Tuple1.apply(BigDecimal.valueOf(i)))) { implicit df => - checkFilterPredicate($"_1".isNull, PredicateLeaf.Operator.IS_NULL) - - checkFilterPredicate($"_1" === BigDecimal.valueOf(1), PredicateLeaf.Operator.EQUALS) - checkFilterPredicate($"_1" <=> BigDecimal.valueOf(1), PredicateLeaf.Operator.NULL_SAFE_EQUALS) - - checkFilterPredicate($"_1" < BigDecimal.valueOf(2), PredicateLeaf.Operator.LESS_THAN) - checkFilterPredicate($"_1" > BigDecimal.valueOf(3), PredicateLeaf.Operator.LESS_THAN_EQUALS) - checkFilterPredicate($"_1" <= BigDecimal.valueOf(1), PredicateLeaf.Operator.LESS_THAN_EQUALS) - checkFilterPredicate($"_1" >= BigDecimal.valueOf(4), PredicateLeaf.Operator.LESS_THAN) - - checkFilterPredicate( - Literal(BigDecimal.valueOf(1)) === $"_1", PredicateLeaf.Operator.EQUALS) - checkFilterPredicate( - Literal(BigDecimal.valueOf(1)) <=> $"_1", PredicateLeaf.Operator.NULL_SAFE_EQUALS) - checkFilterPredicate( - Literal(BigDecimal.valueOf(2)) > $"_1", PredicateLeaf.Operator.LESS_THAN) - checkFilterPredicate( - Literal(BigDecimal.valueOf(3)) < $"_1", PredicateLeaf.Operator.LESS_THAN_EQUALS) - checkFilterPredicate( - Literal(BigDecimal.valueOf(1)) >= $"_1", PredicateLeaf.Operator.LESS_THAN_EQUALS) - checkFilterPredicate( - Literal(BigDecimal.valueOf(4)) <= $"_1", PredicateLeaf.Operator.LESS_THAN) - } - } - - test("filter pushdown - timestamp") { - val timeString = "2015-08-20 14:57:00" - val timestamps = (1 to 4).map { i => - val milliseconds = Timestamp.valueOf(timeString).getTime + i * 3600 - new Timestamp(milliseconds) - } - withOrcDataFrame(timestamps.map(Tuple1(_))) { implicit df => - checkFilterPredicate($"_1".isNull, PredicateLeaf.Operator.IS_NULL) - - checkFilterPredicate($"_1" === timestamps(0), PredicateLeaf.Operator.EQUALS) - checkFilterPredicate($"_1" <=> timestamps(0), PredicateLeaf.Operator.NULL_SAFE_EQUALS) - - checkFilterPredicate($"_1" < timestamps(1), PredicateLeaf.Operator.LESS_THAN) - checkFilterPredicate($"_1" > timestamps(2), PredicateLeaf.Operator.LESS_THAN_EQUALS) - checkFilterPredicate($"_1" <= timestamps(0), PredicateLeaf.Operator.LESS_THAN_EQUALS) - checkFilterPredicate($"_1" >= timestamps(3), PredicateLeaf.Operator.LESS_THAN) - - checkFilterPredicate(Literal(timestamps(0)) === $"_1", PredicateLeaf.Operator.EQUALS) - checkFilterPredicate(Literal(timestamps(0)) <=> $"_1", - PredicateLeaf.Operator.NULL_SAFE_EQUALS) - checkFilterPredicate(Literal(timestamps(1)) > $"_1", PredicateLeaf.Operator.LESS_THAN) - checkFilterPredicate(Literal(timestamps(2)) < $"_1", PredicateLeaf.Operator.LESS_THAN_EQUALS) - checkFilterPredicate(Literal(timestamps(0)) >= $"_1", PredicateLeaf.Operator.LESS_THAN_EQUALS) - checkFilterPredicate(Literal(timestamps(3)) <= $"_1", PredicateLeaf.Operator.LESS_THAN) - } - } - - test("filter pushdown - combinations with logical operators") { - withOrcDataFrame((1 to 4).map(i => Tuple1(Option(i)))) { implicit df => - // Because `ExpressionTree` is not accessible at Hive 1.2.x, this should be checked - // in string form in order to check filter creation including logical operators - // such as `and`, `or` or `not`. So, this function uses `SearchArgument.toString()` - // to produce string expression and then compare it to given string expression below. - // This might have to be changed after Hive version is upgraded. - checkFilterPredicateWithDiffHiveVersion( - $"_1".isNotNull, - """leaf-0 = (IS_NULL _1) - |expr = (not leaf-0)""".stripMargin.trim - ) - checkFilterPredicateWithDiffHiveVersion( - $"_1" =!= 1, - """leaf-0 = (IS_NULL _1) - |leaf-1 = (EQUALS _1 1) - |expr = (and (not leaf-0) (not leaf-1))""".stripMargin.trim - ) - checkFilterPredicateWithDiffHiveVersion( - !($"_1" < 4), - """leaf-0 = (IS_NULL _1) - |leaf-1 = (LESS_THAN _1 4) - |expr = (and (not leaf-0) (not leaf-1))""".stripMargin.trim - ) - checkFilterPredicateWithDiffHiveVersion( - $"_1" < 2 || $"_1" > 3, - """leaf-0 = (LESS_THAN _1 2) - |leaf-1 = (LESS_THAN_EQUALS _1 3) - |expr = (or leaf-0 (not leaf-1))""".stripMargin.trim - ) - checkFilterPredicateWithDiffHiveVersion( - $"_1" < 2 && $"_1" > 3, - """leaf-0 = (IS_NULL _1) - |leaf-1 = (LESS_THAN _1 2) - |leaf-2 = (LESS_THAN_EQUALS _1 3) - |expr = (and (not leaf-0) leaf-1 (not leaf-2))""".stripMargin.trim - ) - } - } - - test("no filter pushdown - non-supported types") { - implicit class IntToBinary(int: Int) { - def b: Array[Byte] = int.toString.getBytes(StandardCharsets.UTF_8) - } - // ArrayType - withOrcDataFrame((1 to 4).map(i => Tuple1(Array(i)))) { implicit df => - checkNoFilterPredicate($"_1".isNull) - } - // BinaryType - withOrcDataFrame((1 to 4).map(i => Tuple1(i.b))) { implicit df => - checkNoFilterPredicate($"_1" <=> 1.b) - } - // DateType - if (!HiveUtils.isHive23) { - val stringDate = "2015-01-01" - withOrcDataFrame(Seq(Tuple1(Date.valueOf(stringDate)))) { implicit df => - checkNoFilterPredicate($"_1" === Date.valueOf(stringDate)) - } - } - // MapType - withOrcDataFrame((1 to 4).map(i => Tuple1(Map(i -> i)))) { implicit df => - checkNoFilterPredicate($"_1".isNotNull) - } - } - - test("SPARK-12218 and SPARK-25699 Converting conjunctions into ORC SearchArguments") { - import org.apache.spark.sql.sources._ - // The `LessThan` should be converted while the `StringContains` shouldn't - val schema = new StructType( - Array( - StructField("a", IntegerType, nullable = true), - StructField("b", StringType, nullable = true))) - assertResultWithDiffHiveVersion( - """leaf-0 = (LESS_THAN a 10) - |expr = leaf-0 - """.stripMargin.trim - ) { - OrcFilters.createFilter(schema, Array( - LessThan("a", 10), - StringContains("b", "prefix") - )).get.toString - } - - // The `LessThan` should be converted while the whole inner `And` shouldn't - assertResultWithDiffHiveVersion( - """leaf-0 = (LESS_THAN a 10) - |expr = leaf-0 - """.stripMargin.trim - ) { - OrcFilters.createFilter(schema, Array( - LessThan("a", 10), - Not(And( - GreaterThan("a", 1), - StringContains("b", "prefix") - )) - )).get.toString - } - - // Safely remove unsupported `StringContains` predicate and push down `LessThan` - assertResultWithDiffHiveVersion( - """leaf-0 = (LESS_THAN a 10) - |expr = leaf-0 - """.stripMargin.trim - ) { - OrcFilters.createFilter(schema, Array( - And( - LessThan("a", 10), - StringContains("b", "prefix") - ) - )).get.toString - } - - // Safely remove unsupported `StringContains` predicate, push down `LessThan` and `GreaterThan`. - assertResultWithDiffHiveVersion( - """leaf-0 = (LESS_THAN a 10) - |leaf-1 = (LESS_THAN_EQUALS a 1) - |expr = (and leaf-0 (not leaf-1)) - """.stripMargin.trim - ) { - OrcFilters.createFilter(schema, Array( - And( - And( - LessThan("a", 10), - StringContains("b", "prefix") - ), - GreaterThan("a", 1) - ) - )).get.toString - } - } - - test("SPARK-27699 Converting disjunctions into ORC SearchArguments") { - import org.apache.spark.sql.sources._ - // The `LessThan` should be converted while the `StringContains` shouldn't - val schema = new StructType( - Array( - StructField("a", IntegerType, nullable = true), - StructField("b", StringType, nullable = true))) - - // The predicate `StringContains` predicate is not able to be pushed down. - assertResultWithDiffHiveVersion("leaf-0 = (LESS_THAN_EQUALS a 10)\nleaf-1 = (LESS_THAN a 1)\n" + - "expr = (or (not leaf-0) leaf-1)") { - OrcFilters.createFilter(schema, Array( - Or( - GreaterThan("a", 10), - And( - StringContains("b", "prefix"), - LessThan("a", 1) - ) - ) - )).get.toString - } - - assertResultWithDiffHiveVersion("leaf-0 = (LESS_THAN_EQUALS a 10)\nleaf-1 = (LESS_THAN a 1)\n" + - "expr = (or (not leaf-0) leaf-1)") { - OrcFilters.createFilter(schema, Array( - Or( - And( - GreaterThan("a", 10), - StringContains("b", "foobar") - ), - And( - StringContains("b", "prefix"), - LessThan("a", 1) - ) - ) - )).get.toString - } - - assert(OrcFilters.createFilter(schema, Array( - Or( - StringContains("b", "foobar"), - And( - StringContains("b", "prefix"), - LessThan("a", 1) - ) - ) - )).isEmpty) - } -} diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/HiveOrcQuerySuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/HiveOrcQuerySuite.scala index 12ee5bea7c2f9..fcf7febe33121 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/HiveOrcQuerySuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/HiveOrcQuerySuite.scala @@ -168,9 +168,6 @@ class HiveOrcQuerySuite extends OrcQueryTest with TestHiveSingleton { } } - // Since Hive 1.2.1 library code path still has this problem, users may hit this - // when spark.sql.hive.convertMetastoreOrc=false. However, after SPARK-22279, - // Apache Spark with the default configuration doesn't hit this bug. test("SPARK-22267 Spark SQL incorrectly reads ORC files when column order is different") { Seq("native", "hive").foreach { orcImpl => withSQLConf(SQLConf.ORC_IMPLEMENTATION.key -> orcImpl) { @@ -179,10 +176,12 @@ class HiveOrcQuerySuite extends OrcQueryTest with TestHiveSingleton { Seq(1 -> 2).toDF("c1", "c2").write.orc(path) checkAnswer(spark.read.orc(path), Row(1, 2)) - withSQLConf(HiveUtils.CONVERT_METASTORE_ORC.key -> "true") { // default since 2.3.0 - withTable("t") { - sql(s"CREATE EXTERNAL TABLE t(c2 INT, c1 INT) STORED AS ORC LOCATION '$path'") - checkAnswer(spark.table("t"), Row(2, 1)) + Seq(true, false).foreach { convertMetastoreOrc => + withSQLConf(HiveUtils.CONVERT_METASTORE_ORC.key -> convertMetastoreOrc.toString) { + withTable("t") { + sql(s"CREATE EXTERNAL TABLE t(c2 INT, c1 INT) STORED AS ORC LOCATION '$path'") + checkAnswer(spark.table("t"), Row(2, 1)) + } } } } @@ -190,9 +189,6 @@ class HiveOrcQuerySuite extends OrcQueryTest with TestHiveSingleton { } } - // Since Hive 1.2.1 library code path still has this problem, users may hit this - // when spark.sql.hive.convertMetastoreOrc=false. However, after SPARK-22279, - // Apache Spark with the default configuration doesn't hit this bug. test("SPARK-19809 NullPointerException on zero-size ORC file") { Seq("native", "hive").foreach { orcImpl => withSQLConf(SQLConf.ORC_IMPLEMENTATION.key -> orcImpl) { @@ -201,8 +197,10 @@ class HiveOrcQuerySuite extends OrcQueryTest with TestHiveSingleton { sql(s"CREATE TABLE spark_19809(a int) STORED AS ORC LOCATION '$dir'") Files.touch(new File(s"${dir.getCanonicalPath}", "zero.orc")) - withSQLConf(HiveUtils.CONVERT_METASTORE_ORC.key -> "true") { // default since 2.3.0 - checkAnswer(spark.table("spark_19809"), Seq.empty) + Seq(true, false).foreach { convertMetastoreOrc => + withSQLConf(HiveUtils.CONVERT_METASTORE_ORC.key -> convertMetastoreOrc.toString) { + checkAnswer(spark.table("spark_19809"), Seq.empty) + } } } } @@ -224,7 +222,6 @@ class HiveOrcQuerySuite extends OrcQueryTest with TestHiveSingleton { } test("SPARK-26437 Can not query decimal type when value is 0") { - assume(HiveUtils.isHive23, "bad test: This bug fixed by HIVE-13083(Hive 2.0.1)") withSQLConf(HiveUtils.CONVERT_METASTORE_ORC.key -> "false") { withTable("spark_26437") { sql("CREATE TABLE spark_26437 STORED AS ORCFILE AS SELECT 0.00 AS c1") diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/HiveOrcSourceSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/HiveOrcSourceSuite.scala index 91fd8a47339fc..e94e0b39c859c 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/HiveOrcSourceSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/HiveOrcSourceSuite.scala @@ -149,12 +149,7 @@ class HiveOrcSourceSuite extends OrcSuite with TestHiveSingleton { test("Check BloomFilter creation") { Seq(true, false).foreach { convertMetastore => withSQLConf(HiveUtils.CONVERT_METASTORE_ORC.key -> s"$convertMetastore") { - if (HiveUtils.isHive23) { - testBloomFilterCreation(org.apache.orc.OrcProto.Stream.Kind.BLOOM_FILTER_UTF8) - } else { - // Before ORC-101 - testBloomFilterCreation(org.apache.orc.OrcProto.Stream.Kind.BLOOM_FILTER) - } + testBloomFilterCreation(org.apache.orc.OrcProto.Stream.Kind.BLOOM_FILTER_UTF8) } } } @@ -162,7 +157,7 @@ class HiveOrcSourceSuite extends OrcSuite with TestHiveSingleton { test("Enforce direct encoding column-wise selectively") { Seq(true, false).foreach { convertMetastore => withSQLConf(HiveUtils.CONVERT_METASTORE_ORC.key -> s"$convertMetastore") { - testSelectiveDictionaryEncoding(isSelective = false, isHive23 = HiveUtils.isHive23) + testSelectiveDictionaryEncoding(isSelective = false, isHiveOrc = true) } } } @@ -322,7 +317,6 @@ class HiveOrcSourceSuite extends OrcSuite with TestHiveSingleton { } test("SPARK-31580: Read a file written before ORC-569") { - assume(HiveUtils.isHive23) // Hive 1.2 doesn't use Apache ORC // Test ORC file came from ORC-621 val df = readResourceOrcFile("test-data/TestStringDictionary.testRowIndex.orc") assert(df.where("str < 'row 001000'").count() === 1000) diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/test/TestHive.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/test/TestHive.scala index a2518e70a013b..cbba9be32b77c 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/test/TestHive.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/test/TestHive.scala @@ -23,7 +23,6 @@ import java.util.{Set => JavaSet} import scala.collection.JavaConverters._ import scala.collection.mutable -import scala.language.implicitConversions import org.apache.hadoop.conf.Configuration import org.apache.hadoop.fs.Path @@ -38,11 +37,12 @@ import org.apache.spark.internal.config.UI._ import org.apache.spark.sql.{DataFrame, Dataset, SparkSession, SQLContext} import org.apache.spark.sql.catalyst.analysis.UnresolvedRelation import org.apache.spark.sql.catalyst.catalog.ExternalCatalogWithListener +import org.apache.spark.sql.catalyst.expressions.CodegenObjectFactoryMode import org.apache.spark.sql.catalyst.optimizer.ConvertToLocalRelation import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, OneRowRelation} +import org.apache.spark.sql.connector.catalog.CatalogManager import org.apache.spark.sql.connector.catalog.CatalogV2Implicits._ import org.apache.spark.sql.execution.{QueryExecution, SQLExecution} -import org.apache.spark.sql.execution.command.CacheTableCommand import org.apache.spark.sql.hive._ import org.apache.spark.sql.hive.client.HiveClient import org.apache.spark.sql.internal.{SessionState, SharedState, SQLConf, WithTestConf} @@ -58,6 +58,7 @@ object TestHive new SparkConf() .set("spark.sql.test", "") .set(SQLConf.CODEGEN_FALLBACK.key, "false") + .set(SQLConf.CODEGEN_FACTORY_MODE.key, CodegenObjectFactoryMode.CODEGEN_ONLY.toString) .set(HiveUtils.HIVE_METASTORE_BARRIER_PREFIXES.key, "org.apache.spark.sql.hive.execution.PairSerDe") .set(WAREHOUSE_PATH.key, TestHiveContext.makeWarehouseDir().toURI.getPath) @@ -128,11 +129,11 @@ class TestHiveContext( * If loadTestTables is false, no test tables are loaded. Note that this flag can only be true * when running in the JVM, i.e. it needs to be false when calling from Python. */ - def this(sc: SparkContext, loadTestTables: Boolean = true) { + def this(sc: SparkContext, loadTestTables: Boolean = true) = { this(new TestHiveSparkSession(HiveUtils.withHiveExternalCatalog(sc), loadTestTables)) } - def this(sc: SparkContext, hiveClient: HiveClient) { + def this(sc: SparkContext, hiveClient: HiveClient) = { this(new TestHiveSparkSession(HiveUtils.withHiveExternalCatalog(sc), hiveClient, loadTestTables = false)) @@ -176,7 +177,7 @@ private[hive] class TestHiveSparkSession( private val loadTestTables: Boolean) extends SparkSession(sc) with Logging { self => - def this(sc: SparkContext, loadTestTables: Boolean) { + def this(sc: SparkContext, loadTestTables: Boolean) = { this( sc, existingSharedState = None, @@ -184,7 +185,7 @@ private[hive] class TestHiveSparkSession( loadTestTables) } - def this(sc: SparkContext, hiveClient: HiveClient, loadTestTables: Boolean) { + def this(sc: SparkContext, hiveClient: HiveClient, loadTestTables: Boolean) = { this( sc, existingSharedState = Some(new TestHiveSharedState(sc, Some(hiveClient))), @@ -222,7 +223,7 @@ private[hive] class TestHiveSparkSession( @transient override lazy val sessionState: SessionState = { - new TestHiveSessionStateBuilder(this, parentSessionState).build() + new TestHiveSessionStateBuilder(this, parentSessionState, Map.empty).build() } lazy val metadataHive: HiveClient = { @@ -326,20 +327,22 @@ private[hive] class TestHiveSparkSession( } if (loadTestTables) { + def createTableSQL(tblName: String): String = { + s"CREATE TABLE $tblName (key INT, value STRING) STORED AS textfile" + } // The test tables that are defined in the Hive QTestUtil. // /itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java // https://github.com/apache/hive/blob/branch-0.13/data/scripts/q_test_init.sql @transient val hiveQTestUtilTables: Seq[TestTable] = Seq( TestTable("src", - "CREATE TABLE src (key INT, value STRING) STORED AS TEXTFILE".cmd, + createTableSQL("src").cmd, s"LOAD DATA LOCAL INPATH '${quoteHiveFile("data/files/kv1.txt")}' INTO TABLE src".cmd), TestTable("src1", - "CREATE TABLE src1 (key INT, value STRING) STORED AS TEXTFILE".cmd, + createTableSQL("src1").cmd, s"LOAD DATA LOCAL INPATH '${quoteHiveFile("data/files/kv3.txt")}' INTO TABLE src1".cmd), TestTable("srcpart", () => { - "CREATE TABLE srcpart (key INT, value STRING) PARTITIONED BY (ds STRING, hr STRING)" - .cmd.apply() + s"${createTableSQL("srcpart")} PARTITIONED BY (ds STRING, hr STRING)".cmd.apply() for (ds <- Seq("2008-04-08", "2008-04-09"); hr <- Seq("11", "12")) { s""" |LOAD DATA LOCAL INPATH '${quoteHiveFile("data/files/kv1.txt")}' @@ -348,8 +351,7 @@ private[hive] class TestHiveSparkSession( } }), TestTable("srcpart1", () => { - "CREATE TABLE srcpart1 (key INT, value STRING) PARTITIONED BY (ds STRING, hr INT)" - .cmd.apply() + s"${createTableSQL("srcpart1")} PARTITIONED BY (ds STRING, hr INT)".cmd.apply() for (ds <- Seq("2008-04-08", "2008-04-09"); hr <- 11 to 12) { s""" |LOAD DATA LOCAL INPATH '${quoteHiveFile("data/files/kv1.txt")}' @@ -494,7 +496,10 @@ private[hive] class TestHiveSparkSession( def getLoadedTables: collection.mutable.HashSet[String] = sharedState.loadedTables def loadTestTable(name: String): Unit = { - if (!sharedState.loadedTables.contains(name)) { + // LOAD DATA does not work on temporary views. Since temporary views are resolved first, + // skip loading if there exists a temporary view with the given name. + if (sessionState.catalog.getTempView(name).isEmpty && + !sharedState.loadedTables.contains(name)) { // Marks the table as loaded first to prevent infinite mutually recursive table loading. sharedState.loadedTables += name logDebug(s"Loading test table $name") @@ -582,24 +587,22 @@ private[hive] class TestHiveQueryExecution( logicalPlan: LogicalPlan) extends QueryExecution(sparkSession, logicalPlan) with Logging { - def this(sparkSession: TestHiveSparkSession, sql: String) { + def this(sparkSession: TestHiveSparkSession, sql: String) = { this(sparkSession, sparkSession.sessionState.sqlParser.parsePlan(sql)) } - def this(sql: String) { + def this(sql: String) = { this(TestHive.sparkSession, sql) } override lazy val analyzed: LogicalPlan = sparkSession.withActive { - val describedTables = logical match { - case CacheTableCommand(tbl, _, _, _) => tbl :: Nil - case _ => Nil - } - // Make sure any test tables referenced are loaded. - val referencedTables = - describedTables ++ - logical.collect { case UnresolvedRelation(ident, _) => ident.asTableIdentifier } + val referencedTables = logical.collect { + case UnresolvedRelation(ident, _, _) => + if (ident.length > 1 && ident.head.equalsIgnoreCase(CatalogManager.SESSION_CATALOG_NAME)) { + ident.tail.asTableIdentifier + } else ident.asTableIdentifier + } val resolver = sparkSession.sessionState.conf.resolver val referencedTestTables = referencedTables.flatMap { tbl => val testTableOpt = sparkSession.testTables.keys.find(resolver(_, tbl.table)) @@ -648,8 +651,9 @@ private[hive] object TestHiveContext { private[sql] class TestHiveSessionStateBuilder( session: SparkSession, - state: Option[SessionState]) - extends HiveSessionStateBuilder(session, state) + state: Option[SessionState], + options: Map[String, String]) + extends HiveSessionStateBuilder(session, state, options) with WithTestConf { override def overrideConfs: Map[String, String] = TestHiveContext.overrideConfs @@ -658,7 +662,7 @@ private[sql] class TestHiveSessionStateBuilder( new TestHiveQueryExecution(session.asInstanceOf[TestHiveSparkSession], plan) } - override protected def newBuilder: NewBuilder = new TestHiveSessionStateBuilder(_, _) + override protected def newBuilder: NewBuilder = new TestHiveSessionStateBuilder(_, _, Map.empty) } private[hive] object HiveTestJars { @@ -677,7 +681,7 @@ private[hive] object HiveTestJars { val fileName = urlString.split("/").last val targetFile = new File(hiveTestJarsDir, fileName) if (!targetFile.exists()) { - Utils.doFetchFile(urlString, hiveTestJarsDir, fileName, new SparkConf, null, null) + Utils.doFetchFile(urlString, hiveTestJarsDir, fileName, new SparkConf, null) } targetFile } diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/sources/DisableUnnecessaryBucketedScanWithHiveSupportSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/sources/DisableUnnecessaryBucketedScanWithHiveSupportSuite.scala new file mode 100644 index 0000000000000..30eb93cb5c3e8 --- /dev/null +++ b/sql/hive/src/test/scala/org/apache/spark/sql/sources/DisableUnnecessaryBucketedScanWithHiveSupportSuite.scala @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.sources + +import org.apache.spark.sql.hive.test.TestHiveSingleton +import org.apache.spark.sql.internal.StaticSQLConf.CATALOG_IMPLEMENTATION + +class DisableUnnecessaryBucketedScanWithHiveSupportSuite + extends DisableUnnecessaryBucketedScanSuite + with TestHiveSingleton { + + protected override def beforeAll(): Unit = { + super.beforeAll() + assert(spark.sparkContext.conf.get(CATALOG_IMPLEMENTATION) == "hive") + } +} diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/sources/HadoopFsRelationTest.scala b/sql/hive/src/test/scala/org/apache/spark/sql/sources/HadoopFsRelationTest.scala index cbea74103343e..b65a00457c72c 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/sources/HadoopFsRelationTest.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/sources/HadoopFsRelationTest.scala @@ -155,6 +155,7 @@ abstract class HadoopFsRelationTest extends QueryTest with SQLTestUtils with Tes withSQLConf( SQLConf.DATETIME_JAVA8API_ENABLED.key -> java8Api.toString, SQLConf.LEGACY_PARQUET_REBASE_MODE_IN_WRITE.key -> CORRECTED.toString, + SQLConf.LEGACY_PARQUET_INT96_REBASE_MODE_IN_WRITE.key -> CORRECTED.toString, SQLConf.LEGACY_AVRO_REBASE_MODE_IN_WRITE.key -> CORRECTED.toString) { val dataGenerator = RandomDataGenerator.forType( dataType = dataType, diff --git a/streaming/pom.xml b/streaming/pom.xml index 53b49dd320e94..bd8d352092e73 100644 --- a/streaming/pom.xml +++ b/streaming/pom.xml @@ -21,7 +21,7 @@ org.apache.spark spark-parent_2.12 - 3.1.0-SNAPSHOT + 3.2.0-SNAPSHOT ../pom.xml diff --git a/streaming/src/main/scala/org/apache/spark/status/api/v1/streaming/ApiStreamingRootResource.scala b/streaming/src/main/scala/org/apache/spark/status/api/v1/streaming/ApiStreamingRootResource.scala index a2571b910f615..99d59e4a1447a 100644 --- a/streaming/src/main/scala/org/apache/spark/status/api/v1/streaming/ApiStreamingRootResource.scala +++ b/streaming/src/main/scala/org/apache/spark/status/api/v1/streaming/ApiStreamingRootResource.scala @@ -23,9 +23,7 @@ import javax.ws.rs.core.MediaType import org.apache.spark.status.api.v1.NotFoundException import org.apache.spark.streaming.Time -import org.apache.spark.streaming.ui.StreamingJobProgressListener import org.apache.spark.streaming.ui.StreamingJobProgressListener._ -import org.apache.spark.ui.SparkUI @Produces(Array(MediaType.APPLICATION_JSON)) private[v1] class ApiStreamingRootResource extends BaseStreamingAppResource { diff --git a/streaming/src/main/scala/org/apache/spark/streaming/DStreamGraph.scala b/streaming/src/main/scala/org/apache/spark/streaming/DStreamGraph.scala index 37cc1b8a6d2ab..43aaa7e1eeaec 100644 --- a/streaming/src/main/scala/org/apache/spark/streaming/DStreamGraph.scala +++ b/streaming/src/main/scala/org/apache/spark/streaming/DStreamGraph.scala @@ -19,7 +19,7 @@ package org.apache.spark.streaming import java.io.{IOException, ObjectInputStream, ObjectOutputStream} -import scala.collection.mutable.ArrayBuffer +import scala.collection.mutable import scala.collection.parallel.immutable.ParVector import org.apache.spark.internal.Logging @@ -29,8 +29,8 @@ import org.apache.spark.util.Utils final private[streaming] class DStreamGraph extends Serializable with Logging { - private val inputStreams = new ArrayBuffer[InputDStream[_]]() - private val outputStreams = new ArrayBuffer[DStream[_]]() + private var inputStreams = mutable.ArraySeq.empty[InputDStream[_]] + private var outputStreams = mutable.ArraySeq.empty[DStream[_]] @volatile private var inputStreamNameAndID: Seq[(String, Int)] = Nil @@ -91,14 +91,14 @@ final private[streaming] class DStreamGraph extends Serializable with Logging { def addInputStream(inputStream: InputDStream[_]): Unit = { this.synchronized { inputStream.setGraph(this) - inputStreams += inputStream + inputStreams = inputStreams :+ inputStream } } def addOutputStream(outputStream: DStream[_]): Unit = { this.synchronized { outputStream.setGraph(this) - outputStreams += outputStream + outputStreams = outputStreams :+ outputStream } } diff --git a/streaming/src/main/scala/org/apache/spark/streaming/State.scala b/streaming/src/main/scala/org/apache/spark/streaming/State.scala index 734c6ef42696e..c4cd1a9dc336b 100644 --- a/streaming/src/main/scala/org/apache/spark/streaming/State.scala +++ b/streaming/src/main/scala/org/apache/spark/streaming/State.scala @@ -17,8 +17,6 @@ package org.apache.spark.streaming -import scala.language.implicitConversions - import org.apache.spark.annotation.Experimental /** diff --git a/streaming/src/main/scala/org/apache/spark/streaming/api/python/PythonDStream.scala b/streaming/src/main/scala/org/apache/spark/streaming/api/python/PythonDStream.scala index 570663c6f6ad3..7a8e3f1d2ccf4 100644 --- a/streaming/src/main/scala/org/apache/spark/streaming/api/python/PythonDStream.scala +++ b/streaming/src/main/scala/org/apache/spark/streaming/api/python/PythonDStream.scala @@ -163,7 +163,7 @@ private[python] object PythonTransformFunctionSerializer { private[streaming] object PythonDStream { /** - * can not access PythonTransformFunctionSerializer.register() via Py4j + * cannot access PythonTransformFunctionSerializer.register() via Py4j * Py4JError: PythonTransformFunctionSerializerregister does not exist in the JVM */ def registerSerializer(ser: PythonTransformFunctionSerializer): Unit = { diff --git a/streaming/src/main/scala/org/apache/spark/streaming/dstream/DStream.scala b/streaming/src/main/scala/org/apache/spark/streaming/dstream/DStream.scala index e037f26088347..ca4f3670d5ad7 100644 --- a/streaming/src/main/scala/org/apache/spark/streaming/dstream/DStream.scala +++ b/streaming/src/main/scala/org/apache/spark/streaming/dstream/DStream.scala @@ -960,7 +960,7 @@ object DStream { /** Get the creation site of a DStream from the stack trace of when the DStream is created. */ private[streaming] def getCreationSite(): CallSite = { /** Filtering function that excludes non-user classes for a streaming application */ - def streamingExclustionFunction(className: String): Boolean = { + def streamingExclusionFunction(className: String): Boolean = { def doesMatch(r: Regex): Boolean = r.findFirstIn(className).isDefined val isSparkClass = doesMatch(SPARK_CLASS_REGEX) val isSparkExampleClass = doesMatch(SPARK_EXAMPLES_CLASS_REGEX) @@ -972,6 +972,6 @@ object DStream { // non-Spark and non-Scala class, as the rest would streaming application classes. (isSparkClass || isScalaClass) && !isSparkExampleClass && !isSparkStreamingTestClass } - org.apache.spark.util.Utils.getCallSite(streamingExclustionFunction) + org.apache.spark.util.Utils.getCallSite(streamingExclusionFunction) } } diff --git a/streaming/src/main/scala/org/apache/spark/streaming/rdd/WriteAheadLogBackedBlockRDD.scala b/streaming/src/main/scala/org/apache/spark/streaming/rdd/WriteAheadLogBackedBlockRDD.scala index f677c492d561f..6494e512713f8 100644 --- a/streaming/src/main/scala/org/apache/spark/streaming/rdd/WriteAheadLogBackedBlockRDD.scala +++ b/streaming/src/main/scala/org/apache/spark/streaming/rdd/WriteAheadLogBackedBlockRDD.scala @@ -96,7 +96,7 @@ class WriteAheadLogBackedBlockRDD[T: ClassTag]( @transient private val hadoopConfig = sc.hadoopConfiguration private val broadcastedHadoopConf = new SerializableConfiguration(hadoopConfig) - override def isValid(): Boolean = true + override def isValid: Boolean = true override def getPartitions: Array[Partition] = { assertValid() diff --git a/streaming/src/main/scala/org/apache/spark/streaming/receiver/ReceivedBlockHandler.scala b/streaming/src/main/scala/org/apache/spark/streaming/receiver/ReceivedBlockHandler.scala index 12ed8015117e5..7a561ecb4990f 100644 --- a/streaming/src/main/scala/org/apache/spark/streaming/receiver/ReceivedBlockHandler.scala +++ b/streaming/src/main/scala/org/apache/spark/streaming/receiver/ReceivedBlockHandler.scala @@ -234,7 +234,7 @@ private[streaming] class CountingIterator[T](iterator: Iterator[T]) extends Iter private def isFullyConsumed: Boolean = !iterator.hasNext - def hasNext(): Boolean = iterator.hasNext + def hasNext: Boolean = iterator.hasNext def count(): Option[Long] = { if (isFullyConsumed) Some(_count) else None diff --git a/streaming/src/main/scala/org/apache/spark/streaming/scheduler/ReceivedBlockTracker.scala b/streaming/src/main/scala/org/apache/spark/streaming/scheduler/ReceivedBlockTracker.scala index d038021e93e73..4ac1c62822e7a 100644 --- a/streaming/src/main/scala/org/apache/spark/streaming/scheduler/ReceivedBlockTracker.scala +++ b/streaming/src/main/scala/org/apache/spark/streaming/scheduler/ReceivedBlockTracker.scala @@ -21,7 +21,6 @@ import java.nio.ByteBuffer import scala.collection.JavaConverters._ import scala.collection.mutable -import scala.language.implicitConversions import scala.util.control.NonFatal import org.apache.hadoop.conf.Configuration diff --git a/streaming/src/main/scala/org/apache/spark/streaming/util/FileBasedWriteAheadLog.scala b/streaming/src/main/scala/org/apache/spark/streaming/util/FileBasedWriteAheadLog.scala index 2e5000159bcb7..d1f9dfb791355 100644 --- a/streaming/src/main/scala/org/apache/spark/streaming/util/FileBasedWriteAheadLog.scala +++ b/streaming/src/main/scala/org/apache/spark/streaming/util/FileBasedWriteAheadLog.scala @@ -293,7 +293,7 @@ private[streaming] object FileBasedWriteAheadLog { val startTime = startTimeStr.toLong val stopTime = stopTimeStr.toLong Some(LogInfo(startTime, stopTime, file.toString)) - case None => + case None | Some(_) => None } }.sortBy { _.startTime } diff --git a/streaming/src/main/scala/org/apache/spark/streaming/util/HdfsUtils.scala b/streaming/src/main/scala/org/apache/spark/streaming/util/HdfsUtils.scala index 146577214de17..ef040681adf37 100644 --- a/streaming/src/main/scala/org/apache/spark/streaming/util/HdfsUtils.scala +++ b/streaming/src/main/scala/org/apache/spark/streaming/util/HdfsUtils.scala @@ -39,7 +39,7 @@ private[streaming] object HdfsUtils { throw new IllegalStateException("File exists and there is no append support!") } } else { - // we dont' want to use hdfs erasure coding, as that lacks support for append and hflush + // we don't want to use hdfs erasure coding, as that lacks support for append and hflush SparkHadoopUtil.createFile(dfs, dfsPath, false) } } @@ -58,7 +58,7 @@ private[streaming] object HdfsUtils { // If we are really unlucky, the file may be deleted as we're opening the stream. // This can happen as clean up is performed by daemon threads that may be left over from // previous runs. - if (!dfs.isFile(dfsPath)) null else throw e + if (!dfs.getFileStatus(dfsPath).isFile) null else throw e } } @@ -92,6 +92,10 @@ private[streaming] object HdfsUtils { def checkFileExists(path: String, conf: Configuration): Boolean = { val hdpPath = new Path(path) val fs = getFileSystemForPath(hdpPath, conf) - fs.isFile(hdpPath) + try { + fs.getFileStatus(hdpPath).isFile + } catch { + case _: FileNotFoundException => false + } } } diff --git a/streaming/src/test/java/test/org/apache/spark/streaming/JavaAPISuite.java b/streaming/src/test/java/test/org/apache/spark/streaming/JavaAPISuite.java index c7cde5674f547..8a57b0c58b228 100644 --- a/streaming/src/test/java/test/org/apache/spark/streaming/JavaAPISuite.java +++ b/streaming/src/test/java/test/org/apache/spark/streaming/JavaAPISuite.java @@ -1595,7 +1595,7 @@ public void testContextGetOrCreate() throws InterruptedException { /* TEST DISABLED: Pending a discussion about checkpoint() semantics with TD @SuppressWarnings("unchecked") @Test - public void testCheckpointofIndividualStream() throws InterruptedException { + public void testCheckpointOfIndividualStream() throws InterruptedException { List> inputData = Arrays.asList( Arrays.asList("this", "is"), Arrays.asList("a", "test"), diff --git a/streaming/src/test/scala/org/apache/spark/streaming/MapWithStateSuite.scala b/streaming/src/test/scala/org/apache/spark/streaming/MapWithStateSuite.scala index b2b8d2f41fc80..3ffaa62bd75ac 100644 --- a/streaming/src/test/scala/org/apache/spark/streaming/MapWithStateSuite.scala +++ b/streaming/src/test/scala/org/apache/spark/streaming/MapWithStateSuite.scala @@ -541,12 +541,12 @@ class MapWithStateSuite extends SparkFunSuite with LocalStreamingContext // Setup the stream computation val ssc = new StreamingContext(sc, Seconds(1)) val inputStream = new TestInputStream(ssc, input, numPartitions = 2) - val trackeStateStream = inputStream.map(x => (x, 1)).mapWithState(mapWithStateSpec) + val trackedStateStream = inputStream.map(x => (x, 1)).mapWithState(mapWithStateSpec) val collectedOutputs = new ConcurrentLinkedQueue[Seq[T]] - val outputStream = new TestOutputStream(trackeStateStream, collectedOutputs) + val outputStream = new TestOutputStream(trackedStateStream, collectedOutputs) val collectedStateSnapshots = new ConcurrentLinkedQueue[Seq[(K, S)]] val stateSnapshotStream = new TestOutputStream( - trackeStateStream.stateSnapshots(), collectedStateSnapshots) + trackedStateStream.stateSnapshots(), collectedStateSnapshots) outputStream.register() stateSnapshotStream.register() diff --git a/streaming/src/test/scala/org/apache/spark/streaming/ReceiverInputDStreamSuite.scala b/streaming/src/test/scala/org/apache/spark/streaming/ReceiverInputDStreamSuite.scala index 6b332206e8f6d..9d4b67bccecaf 100644 --- a/streaming/src/test/scala/org/apache/spark/streaming/ReceiverInputDStreamSuite.scala +++ b/streaming/src/test/scala/org/apache/spark/streaming/ReceiverInputDStreamSuite.scala @@ -22,7 +22,6 @@ import scala.util.Random import org.apache.spark.{SparkConf, SparkEnv} import org.apache.spark.rdd.BlockRDD import org.apache.spark.storage.{StorageLevel, StreamBlockId} -import org.apache.spark.streaming.StreamingConf.RECEIVER_WAL_ENABLE_CONF_KEY import org.apache.spark.streaming.dstream.ReceiverInputDStream import org.apache.spark.streaming.rdd.WriteAheadLogBackedBlockRDD import org.apache.spark.streaming.receiver.{BlockManagerBasedStoreResult, Receiver, WriteAheadLogBasedStoreResult} diff --git a/streaming/src/test/scala/org/apache/spark/streaming/StreamingContextSuite.scala b/streaming/src/test/scala/org/apache/spark/streaming/StreamingContextSuite.scala index 4eff464dcdafb..1d6637861511f 100644 --- a/streaming/src/test/scala/org/apache/spark/streaming/StreamingContextSuite.scala +++ b/streaming/src/test/scala/org/apache/spark/streaming/StreamingContextSuite.scala @@ -293,8 +293,7 @@ class StreamingContextSuite } } - // TODO (SPARK-31728): re-enable it - ignore("stop gracefully") { + test("stop gracefully") { val conf = new SparkConf().setMaster(master).setAppName(appName) conf.set("spark.dummyTimeConfig", "3600s") val sc = new SparkContext(conf) diff --git a/streaming/src/test/scala/org/apache/spark/streaming/TestSuiteBase.scala b/streaming/src/test/scala/org/apache/spark/streaming/TestSuiteBase.scala index 55c2950261a07..7ce4343acbdac 100644 --- a/streaming/src/test/scala/org/apache/spark/streaming/TestSuiteBase.scala +++ b/streaming/src/test/scala/org/apache/spark/streaming/TestSuiteBase.scala @@ -17,11 +17,10 @@ package org.apache.spark.streaming -import java.io.{File, IOException, ObjectInputStream} +import java.io.{IOException, ObjectInputStream} import java.util.concurrent.{ConcurrentLinkedQueue, TimeUnit} import scala.collection.JavaConverters._ -import scala.language.implicitConversions import scala.reflect.ClassTag import org.scalatest.BeforeAndAfterEach diff --git a/streaming/src/test/scala/org/apache/spark/streaming/rdd/MapWithStateRDDSuite.scala b/streaming/src/test/scala/org/apache/spark/streaming/rdd/MapWithStateRDDSuite.scala index 58ce3a93251a9..f06b1feb8c0cd 100644 --- a/streaming/src/test/scala/org/apache/spark/streaming/rdd/MapWithStateRDDSuite.scala +++ b/streaming/src/test/scala/org/apache/spark/streaming/rdd/MapWithStateRDDSuite.scala @@ -320,7 +320,7 @@ class MapWithStateRDDSuite extends SparkFunSuite with RDDCheckpointTester with B makeStateRDDWithLongLineageDataRDD, reliableCheckpoint = true, rddCollectFunc _) /** Generate MapWithStateRDD with parent state RDD having a long lineage */ - def makeStateRDDWithLongLineageParenttateRDD( + def makeStateRDDWithLongLineageParentStateRDD( longLineageRDD: RDD[Int]): MapWithStateRDD[Int, Int, Int, Int] = { // Create a MapWithStateRDD that has a long lineage using the data RDD with a long lineage @@ -337,9 +337,9 @@ class MapWithStateRDDSuite extends SparkFunSuite with RDDCheckpointTester with B } testRDD( - makeStateRDDWithLongLineageParenttateRDD, reliableCheckpoint = true, rddCollectFunc _) + makeStateRDDWithLongLineageParentStateRDD, reliableCheckpoint = true, rddCollectFunc _) testRDDPartitions( - makeStateRDDWithLongLineageParenttateRDD, reliableCheckpoint = true, rddCollectFunc _) + makeStateRDDWithLongLineageParentStateRDD, reliableCheckpoint = true, rddCollectFunc _) } test("checkpointing empty state RDD") { diff --git a/streaming/src/test/scala/org/apache/spark/streaming/receiver/BlockGeneratorSuite.scala b/streaming/src/test/scala/org/apache/spark/streaming/receiver/BlockGeneratorSuite.scala index cd867aa8132bc..31456b0b95b18 100644 --- a/streaming/src/test/scala/org/apache/spark/streaming/receiver/BlockGeneratorSuite.scala +++ b/streaming/src/test/scala/org/apache/spark/streaming/receiver/BlockGeneratorSuite.scala @@ -25,7 +25,6 @@ import scala.collection.mutable import org.scalatest.BeforeAndAfter import org.scalatest.concurrent.{Signaler, ThreadSignaler, TimeLimits} import org.scalatest.concurrent.Eventually._ -import org.scalatest.matchers.must.Matchers import org.scalatest.matchers.should.Matchers._ import org.scalatest.time.SpanSugar._ diff --git a/streaming/src/test/scala/org/apache/spark/streaming/scheduler/ExecutorAllocationManagerSuite.scala b/streaming/src/test/scala/org/apache/spark/streaming/scheduler/ExecutorAllocationManagerSuite.scala index f1870718c6730..c2b039244d01f 100644 --- a/streaming/src/test/scala/org/apache/spark/streaming/scheduler/ExecutorAllocationManagerSuite.scala +++ b/streaming/src/test/scala/org/apache/spark/streaming/scheduler/ExecutorAllocationManagerSuite.scala @@ -17,9 +17,9 @@ package org.apache.spark.streaming.scheduler -import org.mockito.ArgumentMatchers.{eq => meq} +import org.mockito.ArgumentMatchers.{any, eq => meq} import org.mockito.Mockito.{never, reset, times, verify, when} -import org.scalatest.{BeforeAndAfterEach, PrivateMethodTester} +import org.scalatest.PrivateMethodTester import org.scalatest.concurrent.Eventually.{eventually, timeout} import org.scalatest.time.SpanSugar._ import org.scalatestplus.mockito.MockitoSugar @@ -101,12 +101,12 @@ class ExecutorAllocationManagerSuite extends TestSuiteBase val decomInfo = ExecutorDecommissionInfo("spark scale down", None) if (decommissioning) { verify(allocationClient, times(1)).decommissionExecutor( - meq(expectedExec.get), meq(decomInfo), meq(true)) + meq(expectedExec.get), meq(decomInfo), meq(true), any()) verify(allocationClient, never).killExecutor(meq(expectedExec.get)) } else { verify(allocationClient, times(1)).killExecutor(meq(expectedExec.get)) verify(allocationClient, never).decommissionExecutor( - meq(expectedExec.get), meq(decomInfo), meq(true)) + meq(expectedExec.get), meq(decomInfo), meq(true), any()) } } else { if (decommissioning) { diff --git a/tools/pom.xml b/tools/pom.xml index 6e806413ef261..8fe8ab358d60c 100644 --- a/tools/pom.xml +++ b/tools/pom.xml @@ -20,7 +20,7 @@ org.apache.spark spark-parent_2.12 - 3.1.0-SNAPSHOT + 3.2.0-SNAPSHOT ../pom.xml diff --git a/tools/src/main/scala/org/apache/spark/tools/GenerateMIMAIgnore.scala b/tools/src/main/scala/org/apache/spark/tools/GenerateMIMAIgnore.scala index f9bc499961ad7..a6fee8616df11 100644 --- a/tools/src/main/scala/org/apache/spark/tools/GenerateMIMAIgnore.scala +++ b/tools/src/main/scala/org/apache/spark/tools/GenerateMIMAIgnore.scala @@ -24,6 +24,7 @@ import scala.reflect.runtime.universe.runtimeMirror import scala.util.Try import org.clapper.classutil.ClassFinder +import org.objectweb.asm.Opcodes /** * A tool for generating classes to be excluded during binary checking with MIMA. It is expected @@ -146,7 +147,7 @@ object GenerateMIMAIgnore { * and subpackages both from directories and jars present on the classpath. */ private def getClasses(packageName: String): Set[String] = { - val finder = ClassFinder() + val finder = ClassFinder(maybeOverrideAsmVersion = Some(Opcodes.ASM7)) finder .getClasses .map(_.name)